summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIdo Barnea <ibarnea@cisco.com>2015-12-09 05:07:44 +0200
committerIdo Barnea <ibarnea@cisco.com>2015-12-27 08:50:12 +0200
commit509648b87434b9032d38b8ca5ad470ba3edcc036 (patch)
tree920548ce9e2e5aeed4c88c1b288290505e7d7987
parentb161dc672544a913f7f1ddf3a086dd75f2f1134a (diff)
Adding dpdk 2.2 instead of dpdk 1.8 and making changes to make compilation work.
40G and 10G filters do not work yet.
-rw-r--r--.gitignore8
-rwxr-xr-xlinux_dpdk/ws_main.py441
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/af_packet/rte_eth_af_packet.c (renamed from src/dpdk_lib18/librte_pmd_af_packet/rte_eth_af_packet.c)97
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/af_packet/rte_eth_af_packet.h (renamed from src/dpdk_lib18/librte_pmd_af_packet/rte_eth_af_packet.h)0
-rw-r--r--src/dpdk22/drivers/net/bnx2x/bnx2x.h1987
-rw-r--r--src/dpdk22/drivers/net/bnx2x/bnx2x_ethdev.h83
-rw-r--r--src/dpdk22/drivers/net/bnx2x/bnx2x_logs.h62
-rw-r--r--src/dpdk22/drivers/net/bnx2x/bnx2x_rxtx.h89
-rw-r--r--src/dpdk22/drivers/net/bnx2x/bnx2x_stats.h611
-rw-r--r--src/dpdk22/drivers/net/bnx2x/bnx2x_vfpf.h334
-rw-r--r--src/dpdk22/drivers/net/bnx2x/ecore_fw_defs.h403
-rw-r--r--src/dpdk22/drivers/net/bnx2x/ecore_hsi.h6330
-rw-r--r--src/dpdk22/drivers/net/bnx2x/ecore_init.h819
-rw-r--r--src/dpdk22/drivers/net/bnx2x/ecore_init_ops.h865
-rw-r--r--src/dpdk22/drivers/net/bnx2x/ecore_mfw_req.h187
-rw-r--r--src/dpdk22/drivers/net/bnx2x/ecore_reg.h3644
-rw-r--r--src/dpdk22/drivers/net/bnx2x/ecore_sp.h1768
-rw-r--r--src/dpdk22/drivers/net/bnx2x/elink.h588
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/bonding/rte_eth_bond.h (renamed from src/dpdk_lib18/librte_pmd_bond/rte_eth_bond.h)26
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/bonding/rte_eth_bond_8023ad.h (renamed from src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_8023ad.h)8
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/bonding/rte_eth_bond_8023ad_private.h (renamed from src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_8023ad_private.h)0
-rw-r--r--src/dpdk22/drivers/net/bonding/rte_eth_bond_alb.h142
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/bonding/rte_eth_bond_private.h (renamed from src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_private.h)61
-rw-r--r--src/dpdk22/drivers/net/cxgbe/base/adapter.h576
-rw-r--r--src/dpdk22/drivers/net/cxgbe/base/common.h401
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/cxgbe/base/t4_chip_type.h (renamed from src/dpdk_lib18/librte_sched/rte_approx.h)76
-rw-r--r--src/dpdk22/drivers/net/cxgbe/base/t4_hw.c2686
-rw-r--r--src/dpdk22/drivers/net/cxgbe/base/t4_hw.h149
-rw-r--r--src/dpdk22/drivers/net/cxgbe/base/t4_msg.h345
-rw-r--r--src/dpdk22/drivers/net/cxgbe/base/t4_pci_id_tbl.h151
-rw-r--r--src/dpdk22/drivers/net/cxgbe/base/t4_regs.h795
-rw-r--r--src/dpdk22/drivers/net/cxgbe/base/t4_regs_values.h169
-rw-r--r--src/dpdk22/drivers/net/cxgbe/base/t4fw_interface.h1730
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/cxgbe/cxgbe.h (renamed from src/dpdk_lib18/librte_eal/bsdapp/eal/eal_interrupts.c)60
-rw-r--r--src/dpdk22/drivers/net/cxgbe/cxgbe_compat.h266
-rw-r--r--src/dpdk22/drivers/net/cxgbe/cxgbe_ethdev.c879
-rw-r--r--src/dpdk22/drivers/net/cxgbe/cxgbe_main.c1216
-rw-r--r--src/dpdk22/drivers/net/cxgbe/sge.c2258
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/README (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/README)17
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_80003es2lan.c (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_80003es2lan.c)33
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_80003es2lan.h (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_80003es2lan.h)2
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_82540.c (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82540.c)2
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_82541.c (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82541.c)2
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_82541.h (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82541.h)2
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_82542.c (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82542.c)8
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_82543.c (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82543.c)2
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_82543.h (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82543.h)2
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_82571.c (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82571.c)8
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_82571.h (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82571.h)2
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_82575.c (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82575.c)205
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_82575.h (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82575.h)7
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_api.c (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_api.c)12
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_api.h (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_api.h)20
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_defines.h (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_defines.h)8
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_hw.h (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_hw.h)16
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_i210.c (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_i210.c)49
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_i210.h (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_i210.h)2
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_ich8lan.c (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_ich8lan.c)252
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_ich8lan.h (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_ich8lan.h)43
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_mac.c (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_mac.c)14
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_mac.h (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_mac.h)4
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_manage.c (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_manage.c)7
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_manage.h (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_manage.h)2
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_mbx.c (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_mbx.c)2
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_mbx.h (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_mbx.h)2
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_nvm.c (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_nvm.c)11
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_nvm.h (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_nvm.h)2
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_osdep.c (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_osdep.c)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_osdep.h (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_osdep.h)6
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_phy.c (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_phy.c)21
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_phy.h (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_phy.h)10
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_regs.h (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_regs.h)13
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_vf.c (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_vf.c)8
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/base/e1000_vf.h (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_vf.h)4
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/e1000_ethdev.h (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000_ethdev.h)173
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/e1000_logs.h (renamed from src/dpdk_lib18/librte_pmd_e1000/e1000_logs.h)4
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/em_ethdev.c (renamed from src/dpdk_lib18/librte_pmd_e1000/em_ethdev.c)297
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/em_rxtx.c (renamed from src/dpdk_lib18/librte_pmd_e1000/em_rxtx.c)147
-rw-r--r--src/dpdk22/drivers/net/e1000/igb_ethdev.c4867
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/igb_pf.c (renamed from src/dpdk_lib18/librte_pmd_e1000/igb_pf.c)60
-rw-r--r--src/dpdk22/drivers/net/e1000/igb_regs.h223
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/e1000/igb_rxtx.c (renamed from src/dpdk_lib18/librte_pmd_e1000/igb_rxtx.c)488
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/enic/LICENSE (renamed from src/dpdk_lib18/librte_pmd_enic/LICENSE)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/enic/base/cq_desc.h (renamed from src/dpdk_lib18/librte_pmd_enic/vnic/cq_desc.h)1
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/enic/base/cq_enet_desc.h (renamed from src/dpdk_lib18/librte_pmd_enic/vnic/cq_enet_desc.h)1
-rw-r--r--src/dpdk22/drivers/net/enic/base/enic_vnic_wq.h79
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/enic/base/rq_enet_desc.h (renamed from src/dpdk_lib18/librte_pmd_enic/vnic/rq_enet_desc.h)1
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/enic/base/vnic_cq.c (renamed from src/dpdk_lib18/librte_pmd_enic/vnic/vnic_cq.c)1
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/enic/base/vnic_cq.h (renamed from src/dpdk_lib18/librte_pmd_enic/vnic/vnic_cq.h)1
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/enic/base/vnic_dev.c (renamed from src/dpdk_lib18/librte_pmd_enic/vnic/vnic_dev.c)9
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/enic/base/vnic_dev.h (renamed from src/dpdk_lib18/librte_pmd_enic/vnic/vnic_dev.h)5
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/enic/base/vnic_devcmd.h (renamed from src/dpdk_lib18/librte_pmd_enic/vnic/vnic_devcmd.h)1
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/enic/base/vnic_enet.h (renamed from src/dpdk_lib18/librte_pmd_enic/vnic/vnic_enet.h)1
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/enic/base/vnic_intr.c (renamed from src/dpdk_lib18/librte_pmd_enic/vnic/vnic_intr.c)1
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/enic/base/vnic_intr.h (renamed from src/dpdk_lib18/librte_pmd_enic/vnic/vnic_intr.h)1
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/enic/base/vnic_nic.h (renamed from src/dpdk_lib18/librte_pmd_enic/vnic/vnic_nic.h)1
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/enic/base/vnic_resource.h (renamed from src/dpdk_lib18/librte_pmd_enic/vnic/vnic_resource.h)1
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/enic/base/vnic_rq.c (renamed from src/dpdk_lib18/librte_pmd_enic/vnic/vnic_rq.c)1
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/enic/base/vnic_rq.h (renamed from src/dpdk_lib18/librte_pmd_enic/vnic/vnic_rq.h)1
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/enic/base/vnic_rss.c (renamed from src/dpdk_lib18/librte_pmd_enic/vnic/vnic_rss.c)1
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/enic/base/vnic_rss.h (renamed from src/dpdk_lib18/librte_pmd_enic/vnic/vnic_rss.h)1
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/enic/base/vnic_stats.h (renamed from src/dpdk_lib18/librte_pmd_enic/vnic/vnic_stats.h)1
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/enic/base/vnic_wq.c (renamed from src/dpdk_lib18/librte_pmd_enic/vnic/vnic_wq.c)1
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/enic/base/vnic_wq.h (renamed from src/dpdk_lib18/librte_pmd_enic/vnic/vnic_wq.h)1
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/enic/base/wq_enet_desc.h (renamed from src/dpdk_lib18/librte_pmd_enic/vnic/wq_enet_desc.h)1
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/enic/enic.h (renamed from src/dpdk_lib18/librte_pmd_enic/enic.h)32
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/enic/enic_clsf.c (renamed from src/dpdk_lib18/librte_pmd_enic/enic_clsf.c)62
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/enic/enic_compat.h (renamed from src/dpdk_lib18/librte_pmd_enic/enic_compat.h)17
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/enic/enic_ethdev.c (renamed from src/dpdk_lib18/librte_pmd_enic/enic_ethdev.c)128
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/enic/enic_main.c (renamed from src/dpdk_lib18/librte_pmd_enic/enic_main.c)66
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/enic/enic_res.c (renamed from src/dpdk_lib18/librte_pmd_enic/enic_res.c)1
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/enic/enic_res.h (renamed from src/dpdk_lib18/librte_pmd_enic/enic_res.h)1
-rw-r--r--src/dpdk22/drivers/net/fm10k/base/fm10k_api.c361
-rw-r--r--src/dpdk22/drivers/net/fm10k/base/fm10k_api.h62
-rw-r--r--src/dpdk22/drivers/net/fm10k/base/fm10k_common.c572
-rw-r--r--src/dpdk22/drivers/net/fm10k/base/fm10k_common.h52
-rw-r--r--src/dpdk22/drivers/net/fm10k/base/fm10k_mbx.c2239
-rw-r--r--src/dpdk22/drivers/net/fm10k/base/fm10k_mbx.h331
-rw-r--r--src/dpdk22/drivers/net/fm10k/base/fm10k_osdep.h158
-rw-r--r--src/dpdk22/drivers/net/fm10k/base/fm10k_pf.c2094
-rw-r--r--src/dpdk22/drivers/net/fm10k/base/fm10k_pf.h189
-rw-r--r--src/dpdk22/drivers/net/fm10k/base/fm10k_tlv.c914
-rw-r--r--src/dpdk22/drivers/net/fm10k/base/fm10k_tlv.h199
-rw-r--r--src/dpdk22/drivers/net/fm10k/base/fm10k_type.h964
-rw-r--r--src/dpdk22/drivers/net/fm10k/base/fm10k_vf.c659
-rw-r--r--src/dpdk22/drivers/net/fm10k/base/fm10k_vf.h92
-rw-r--r--src/dpdk22/drivers/net/fm10k/fm10k.h362
-rw-r--r--src/dpdk22/drivers/net/fm10k/fm10k_ethdev.c2780
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/fm10k/fm10k_logs.h (renamed from src/dpdk_lib18/librte_power/channel_commands.h)72
-rw-r--r--src/dpdk22/drivers/net/fm10k/fm10k_rxtx.c517
-rw-r--r--src/dpdk22/drivers/net/fm10k/fm10k_rxtx_vec.c829
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/i40e/base/i40e_adminq.c (renamed from src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_adminq.c)185
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/i40e/base/i40e_adminq.h (renamed from src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_adminq.h)23
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/i40e/base/i40e_adminq_cmd.h (renamed from src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_adminq_cmd.h)388
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/i40e/base/i40e_alloc.h (renamed from src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_alloc.h)2
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/i40e/base/i40e_common.c (renamed from src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_common.c)1427
-rw-r--r--src/dpdk22/drivers/net/i40e/base/i40e_dcb.c1297
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/i40e/base/i40e_dcb.h (renamed from src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_dcb.h)72
-rw-r--r--src/dpdk22/drivers/net/i40e/base/i40e_devids.h70
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/i40e/base/i40e_diag.c (renamed from src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_diag.c)13
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/i40e/base/i40e_diag.h (renamed from src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_diag.h)2
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/i40e/base/i40e_hmc.c (renamed from src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_hmc.c)69
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/i40e/base/i40e_hmc.h (renamed from src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_hmc.h)12
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/i40e/base/i40e_lan_hmc.c (renamed from src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_lan_hmc.c)62
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/i40e/base/i40e_lan_hmc.h (renamed from src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_lan_hmc.h)2
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/i40e/base/i40e_nvm.c (renamed from src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_nvm.c)843
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/i40e/base/i40e_osdep.h (renamed from src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_osdep.h)8
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/i40e/base/i40e_prototype.h (renamed from src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_prototype.h)99
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/i40e/base/i40e_register.h (renamed from src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_register.h)1997
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/i40e/base/i40e_status.h (renamed from src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_status.h)2
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/i40e/base/i40e_type.h (renamed from src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_type.h)333
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/i40e/base/i40e_virtchnl.h (renamed from src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_virtchnl.h)81
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/i40e/i40e_ethdev.c (renamed from src/dpdk_lib18/librte_pmd_i40e/i40e_ethdev.c)4308
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/i40e/i40e_ethdev.h (renamed from src/dpdk_lib18/librte_pmd_i40e/i40e_ethdev.h)177
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/i40e/i40e_ethdev_vf.c (renamed from src/dpdk_lib18/librte_pmd_i40e/i40e_ethdev_vf.c)767
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/i40e/i40e_fdir.c (renamed from src/dpdk_lib18/librte_pmd_i40e/i40e_fdir.c)251
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/i40e/i40e_logs.h (renamed from src/dpdk_lib18/librte_pmd_i40e/i40e_logs.h)6
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/i40e/i40e_pf.c (renamed from src/dpdk_lib18/librte_pmd_i40e/i40e_pf.c)49
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/i40e/i40e_pf.h (renamed from src/dpdk_lib18/librte_pmd_i40e/i40e_pf.h)3
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/i40e/i40e_rxtx.c (renamed from src/dpdk_lib18/librte_pmd_i40e/i40e_rxtx.c)1566
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/i40e/i40e_rxtx.h (renamed from src/dpdk_lib18/librte_pmd_i40e/i40e_rxtx.h)69
-rw-r--r--src/dpdk22/drivers/net/i40e/i40e_rxtx_vec.c777
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/base/README (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/README)20
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/base/ixgbe_82598.c (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_82598.c)66
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/base/ixgbe_82598.h (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_82598.h)3
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/base/ixgbe_82599.c (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_82599.c)429
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/base/ixgbe_82599.h (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_82599.h)8
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/base/ixgbe_api.c (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_api.c)229
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/base/ixgbe_api.h (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_api.h)21
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/base/ixgbe_common.c (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_common.c)404
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/base/ixgbe_common.h (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_common.h)12
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/base/ixgbe_dcb.c (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb.c)3
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/base/ixgbe_dcb.h (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb.h)4
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/base/ixgbe_dcb_82598.c (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82598.c)3
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/base/ixgbe_dcb_82598.h (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82598.h)3
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/base/ixgbe_dcb_82599.c (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82599.c)5
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/base/ixgbe_dcb_82599.h (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82599.h)3
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/base/ixgbe_mbx.c (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.c)2
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/base/ixgbe_mbx.h (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.h)2
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/base/ixgbe_osdep.h (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_osdep.h)12
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/base/ixgbe_phy.c (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_phy.c)441
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/base/ixgbe_phy.h (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_phy.h)37
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/base/ixgbe_type.h (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_type.h)396
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/base/ixgbe_vf.c (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_vf.c)4
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/base/ixgbe_vf.h (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_vf.h)7
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/base/ixgbe_x540.c (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_x540.c)120
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/base/ixgbe_x540.h (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_x540.h)3
-rw-r--r--src/dpdk22/drivers/net/ixgbe/base/ixgbe_x550.c3209
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/base/ixgbe_x550.h (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_x550.h)30
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/ixgbe_bypass.h (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_bypass.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/ixgbe_bypass_api.h (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_bypass_api.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/ixgbe_bypass_defines.h (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_bypass_defines.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/ixgbe_ethdev.c (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_ethdev.c)3068
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/ixgbe_ethdev.h (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_ethdev.h)187
-rw-r--r--src/dpdk22/drivers/net/ixgbe/ixgbe_fdir.c1361
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/ixgbe_logs.h (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_logs.h)5
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/ixgbe_pf.c (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_pf.c)135
-rw-r--r--src/dpdk22/drivers/net/ixgbe/ixgbe_regs.h376
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/ixgbe_rxtx.c (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_rxtx.c)1996
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/ixgbe_rxtx.h (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_rxtx.h)113
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ixgbe/ixgbe_rxtx_vec.c (renamed from src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_rxtx_vec.c)281
-rw-r--r--src/dpdk22/drivers/net/mlx4/mlx4.h163
-rw-r--r--src/dpdk22/drivers/net/mlx5/mlx5.h220
-rw-r--r--src/dpdk22/drivers/net/mlx5/mlx5_defs.h84
-rw-r--r--src/dpdk22/drivers/net/mlx5/mlx5_rxtx.h274
-rw-r--r--src/dpdk22/drivers/net/mlx5/mlx5_utils.h184
-rw-r--r--src/dpdk22/drivers/net/null/rte_eth_null.c703
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/null/rte_eth_null.h (renamed from src/dpdk_lib18/librte_eal/common/include/rte_pci_dev_feature_defs.h)17
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ring/rte_eth_ring.c (renamed from src/dpdk_lib18/librte_pmd_ring/rte_eth_ring.c)288
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/ring/rte_eth_ring.h (renamed from src/dpdk_lib18/librte_power/guest_channel.h)67
-rw-r--r--src/dpdk22/drivers/net/szedata2/rte_eth_szedata2.h102
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/virtio/virtio_ethdev.c (renamed from src/dpdk_lib18/librte_pmd_virtio/virtio_ethdev.c)878
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/virtio/virtio_ethdev.h (renamed from src/dpdk_lib18/librte_pmd_virtio/virtio_ethdev.h)50
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/virtio/virtio_logs.h (renamed from src/dpdk_lib18/librte_pmd_virtio/virtio_logs.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/virtio/virtio_pci.c (renamed from src/dpdk_lib18/librte_pmd_virtio/virtio_pci.c)20
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/virtio/virtio_pci.h (renamed from src/dpdk_lib18/librte_pmd_virtio/virtio_pci.h)104
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/virtio/virtio_ring.h (renamed from src/dpdk_lib18/librte_pmd_virtio/virtio_ring.h)4
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/virtio/virtio_rxtx.c (renamed from src/dpdk_lib18/librte_pmd_virtio/virtio_rxtx.c)306
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/virtio/virtio_rxtx.h (renamed from src/dpdk_lib18/librte_power/rte_power_common.h)10
-rw-r--r--src/dpdk22/drivers/net/virtio/virtio_rxtx_simple.c418
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/virtio/virtqueue.c (renamed from src/dpdk_lib18/librte_pmd_virtio/virtqueue.c)14
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/virtio/virtqueue.h (renamed from src/dpdk_lib18/librte_pmd_virtio/virtqueue.h)71
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/vmxnet3/base/README (renamed from src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/README)13
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/vmxnet3/base/includeCheck.h (renamed from src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/includeCheck.h)1
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/vmxnet3/base/upt1_defs.h (renamed from src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/upt1_defs.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/vmxnet3/base/vmware_pack_begin.h (renamed from src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/vmware_pack_begin.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/vmxnet3/base/vmware_pack_end.h (renamed from src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/vmware_pack_end.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/vmxnet3/base/vmxnet3_defs.h (renamed from src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/vmxnet3_defs.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/vmxnet3/base/vmxnet3_osdep.h (renamed from src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/vmxnet3_osdep.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/vmxnet3/vmxnet3_ethdev.c (renamed from src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_ethdev.c)239
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/vmxnet3/vmxnet3_ethdev.h (renamed from src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_ethdev.h)16
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/vmxnet3/vmxnet3_logs.h (renamed from src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_logs.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/vmxnet3/vmxnet3_ring.h (renamed from src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_ring.h)17
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/vmxnet3/vmxnet3_rxtx.c (renamed from src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_rxtx.c)320
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/xenvirt/rte_eth_xenvirt.h (renamed from src/dpdk_lib18/librte_pmd_xenvirt/rte_eth_xenvirt.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/xenvirt/rte_xen_lib.h (renamed from src/dpdk_lib18/librte_pmd_xenvirt/rte_xen_lib.h)5
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/xenvirt/virtio_logs.h (renamed from src/dpdk_lib18/librte_pmd_xenvirt/virtio_logs.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/drivers/net/xenvirt/virtqueue.h (renamed from src/dpdk_lib18/librte_pmd_xenvirt/virtqueue.h)9
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_acl/acl.h (renamed from src/dpdk_lib18/librte_acl/acl.h)69
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_acl/acl_run.h (renamed from src/dpdk_lib18/librte_acl/acl_run.h)7
-rw-r--r--src/dpdk22/lib/librte_acl/acl_run_avx2.h284
-rw-r--r--src/dpdk22/lib/librte_acl/acl_run_neon.h289
-rw-r--r--src/dpdk22/lib/librte_acl/acl_run_sse.h357
-rw-r--r--src/dpdk22/lib/librte_acl/acl_vect.h116
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_acl/rte_acl.h (renamed from src/dpdk_lib18/librte_acl/rte_acl.h)115
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_acl/rte_acl_osdep.h (renamed from src/dpdk_lib18/librte_acl/rte_acl_osdep.h)14
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_acl/tb_mem.h (renamed from src/dpdk_lib18/librte_acl/tb_mem.h)3
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_cfgfile/rte_cfgfile.c (renamed from src/dpdk_lib18/librte_cfgfile/rte_cfgfile.c)2
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_cfgfile/rte_cfgfile.h (renamed from src/dpdk_lib18/librte_cfgfile/rte_cfgfile.h)9
-rw-r--r--src/dpdk22/lib/librte_compat/rte_compat.h105
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_distributor/rte_distributor.h (renamed from src/dpdk_lib18/librte_distributor/rte_distributor.h)3
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/eal_common_cpuflags.c (renamed from src/dpdk_lib18/librte_eal/common/eal_common_cpuflags.c)5
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/eal_common_dev.c (renamed from src/dpdk_lib18/librte_eal/common/eal_common_dev.c)75
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/eal_common_devargs.c (renamed from src/dpdk_lib18/librte_eal/common/eal_common_devargs.c)97
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/eal_common_errno.c (renamed from src/dpdk_lib18/librte_eal/common/eal_common_errno.c)2
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/eal_common_hexdump.c (renamed from src/dpdk_lib18/librte_eal/common/eal_common_hexdump.c)3
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/eal_common_launch.c (renamed from src/dpdk_lib18/librte_eal/common/eal_common_launch.c)2
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/eal_common_lcore.c (renamed from src/dpdk_lib18/librte_eal/bsdapp/eal/eal_lcore.c)57
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/eal_common_log.c (renamed from src/dpdk_lib18/librte_eal/common/eal_common_log.c)33
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/eal_common_memory.c (renamed from src/dpdk_lib18/librte_eal/common/eal_common_memory.c)39
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/eal_common_memzone.c (renamed from src/dpdk_lib18/librte_eal/common/eal_common_memzone.c)414
-rw-r--r--src/dpdk22/lib/librte_eal/common/eal_common_options.c1023
-rw-r--r--src/dpdk22/lib/librte_eal/common/eal_common_pci.c464
-rw-r--r--src/dpdk22/lib/librte_eal/common/eal_common_pci_uio.c222
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/eal_common_string_fns.c (renamed from src/dpdk_lib18/librte_eal/common/eal_common_string_fns.c)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/eal_common_tailqs.c (renamed from src/dpdk_lib18/librte_eal/common/eal_common_tailqs.c)152
-rw-r--r--src/dpdk22/lib/librte_eal/common/eal_common_thread.c157
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/eal_common_timer.c (renamed from src/dpdk_lib18/librte_eal/bsdapp/eal/eal_alarm.c)54
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/eal_filesystem.h (renamed from src/dpdk_lib18/librte_eal/common/eal_filesystem.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/eal_hugepages.h (renamed from src/dpdk_lib18/librte_eal/common/eal_hugepages.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/eal_internal_cfg.h (renamed from src/dpdk_lib18/librte_eal/common/eal_internal_cfg.h)1
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/eal_options.h (renamed from src/dpdk_lib18/librte_eal/common/eal_options.h)69
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/eal_private.h (renamed from src/dpdk_lib18/librte_eal/common/eal_private.h)149
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/eal_thread.h (renamed from src/dpdk_lib18/librte_hash/rte_hash_crc.h)108
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_atomic.h (renamed from src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_atomic.h)6
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_atomic_32.h (renamed from src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_atomic_32.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_atomic_64.h (renamed from src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_atomic_64.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_byteorder.h (renamed from src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_byteorder.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_byteorder_32.h (renamed from src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_byteorder_32.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_byteorder_64.h (renamed from src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_byteorder_64.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_cpuflags.h (renamed from src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_cpuflags.h)210
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_cycles.h (renamed from src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_cycles.h)0
-rw-r--r--src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_memcpy.h639
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_prefetch.h (renamed from src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_prefetch.h)14
-rw-r--r--src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_rtm.h73
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_rwlock.h (renamed from src/dpdk_lib18/librte_pmd_ring/rte_eth_ring.h)55
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_spinlock.h (renamed from src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_spinlock.h)107
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_vect.h (renamed from src/dpdk_lib18/librte_eal/common/include/rte_common_vect.h)51
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/generic/rte_atomic.h (renamed from src/dpdk_lib18/librte_eal/common/include/generic/rte_atomic.h)27
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/generic/rte_byteorder.h (renamed from src/dpdk_lib18/librte_eal/common/include/generic/rte_byteorder.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/generic/rte_cpuflags.h (renamed from src/dpdk_lib18/librte_eal/common/include/generic/rte_cpuflags.h)12
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/generic/rte_cycles.h (renamed from src/dpdk_lib18/librte_eal/common/include/generic/rte_cycles.h)8
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/generic/rte_memcpy.h (renamed from src/dpdk_lib18/librte_eal/common/include/generic/rte_memcpy.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/generic/rte_prefetch.h (renamed from src/dpdk_lib18/librte_eal/common/include/generic/rte_prefetch.h)8
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/generic/rte_rwlock.h (renamed from src/dpdk_lib18/librte_eal/common/include/rte_rwlock.h)50
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/generic/rte_spinlock.h (renamed from src/dpdk_lib18/librte_eal/common/include/generic/rte_spinlock.h)103
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/rte_alarm.h (renamed from src/dpdk_lib18/librte_eal/common/include/rte_alarm.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/rte_branch_prediction.h (renamed from src/dpdk_lib18/librte_eal/common/include/rte_branch_prediction.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/rte_common.h (renamed from src/dpdk_lib18/librte_eal/common/include/rte_common.h)62
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/rte_debug.h (renamed from src/dpdk_lib18/librte_eal/common/include/rte_debug.h)6
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/rte_dev.h (renamed from src/dpdk_lib18/librte_eal/common/include/rte_dev.h)81
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/rte_devargs.h (renamed from src/dpdk_lib18/librte_eal/common/include/rte_devargs.h)36
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/rte_eal.h (renamed from src/dpdk_lib18/librte_eal/common/include/rte_eal.h)92
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/rte_eal_memconfig.h (renamed from src/dpdk_lib18/librte_eal/common/include/rte_eal_memconfig.h)14
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/rte_errno.h (renamed from src/dpdk_lib18/librte_eal/common/include/rte_errno.h)1
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/rte_hexdump.h (renamed from src/dpdk_lib18/librte_eal/common/include/rte_hexdump.h)20
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/rte_interrupts.h (renamed from src/dpdk_lib18/librte_eal/common/include/rte_interrupts.h)1
-rw-r--r--src/dpdk22/lib/librte_eal/common/include/rte_keepalive.h146
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/rte_launch.h (renamed from src/dpdk_lib18/librte_eal/common/include/rte_launch.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/rte_lcore.h (renamed from src/dpdk_lib18/librte_eal/common/include/rte_lcore.h)63
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/rte_log.h (renamed from src/dpdk_lib18/librte_eal/common/include/rte_log.h)15
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/rte_malloc.h (renamed from src/dpdk_lib18/librte_malloc/rte_malloc.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/rte_malloc_heap.h (renamed from src/dpdk_lib18/librte_eal/common/include/rte_malloc_heap.h)3
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/rte_memory.h (renamed from src/dpdk_lib18/librte_eal/common/include/rte_memory.h)52
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/rte_memzone.h (renamed from src/dpdk_lib18/librte_eal/common/include/rte_memzone.h)71
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/rte_pci.h (renamed from src/dpdk_lib18/librte_eal/common/include/rte_pci.h)227
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/rte_pci_dev_feature_defs.h (renamed from src/dpdk_lib18/librte_vhost/eventfd_link/eventfd_link.h)52
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/rte_pci_dev_features.h (renamed from src/dpdk_lib18/librte_eal/common/include/rte_pci_dev_features.h)25
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/rte_pci_dev_ids.h (renamed from src/dpdk_lib18/librte_eal/common/include/rte_pci_dev_ids.h)133
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/rte_per_lcore.h (renamed from src/dpdk_lib18/librte_eal/common/include/rte_per_lcore.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/rte_random.h (renamed from src/dpdk_lib18/librte_eal/common/include/rte_random.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/rte_string_fns.h (renamed from src/dpdk_lib18/librte_eal/common/include/rte_string_fns.h)0
-rw-r--r--src/dpdk22/lib/librte_eal/common/include/rte_tailq.h162
-rw-r--r--src/dpdk22/lib/librte_eal/common/include/rte_time.h122
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/rte_version.h (renamed from src/dpdk_lib18/librte_eal/common/include/rte_version.h)5
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/include/rte_warnings.h (renamed from src/dpdk_lib18/librte_eal/common/include/rte_warnings.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/malloc_elem.c (renamed from src/dpdk_lib18/librte_malloc/malloc_elem.c)69
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/malloc_elem.h (renamed from src/dpdk_lib18/librte_malloc/malloc_elem.h)14
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/malloc_heap.c (renamed from src/dpdk_lib18/librte_malloc/malloc_heap.c)170
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/malloc_heap.h (renamed from src/dpdk_lib18/librte_malloc/malloc_heap.h)13
-rw-r--r--src/dpdk22/lib/librte_eal/common/rte_keepalive.c113
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/common/rte_malloc.c (renamed from src/dpdk_lib18/librte_malloc/rte_malloc.c)13
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/linuxapp/eal/eal.c (renamed from src/dpdk_lib18/librte_eal/linuxapp/eal/eal.c)216
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/linuxapp/eal/eal_alarm.c (renamed from src/dpdk_lib18/librte_eal/linuxapp/eal/eal_alarm.c)25
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/linuxapp/eal/eal_debug.c (renamed from src/dpdk_lib18/librte_eal/bsdapp/eal/eal_debug.c)6
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c (renamed from src/dpdk_lib18/librte_eal/linuxapp/eal/eal_hugepage_info.c)170
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/linuxapp/eal/eal_interrupts.c (renamed from src/dpdk_lib18/librte_eal/linuxapp/eal/eal_interrupts.c)544
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/linuxapp/eal/eal_ivshmem.c (renamed from src/dpdk_lib18/librte_eal/linuxapp/eal/eal_ivshmem.c)22
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/linuxapp/eal/eal_lcore.c (renamed from src/dpdk_lib18/librte_eal/linuxapp/eal/eal_debug.c)121
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/linuxapp/eal/eal_log.c (renamed from src/dpdk_lib18/librte_eal/linuxapp/eal/eal_log.c)51
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/linuxapp/eal/eal_memory.c (renamed from src/dpdk_lib18/librte_eal/linuxapp/eal/eal_memory.c)145
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/linuxapp/eal/eal_pci.c (renamed from src/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci.c)311
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/linuxapp/eal/eal_pci_init.h (renamed from src/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci_init.h)43
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/linuxapp/eal/eal_pci_uio.c (renamed from src/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci_uio.c)355
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/linuxapp/eal/eal_pci_vfio.c (renamed from src/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci_vfio.c)177
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c (renamed from src/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c)16
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/linuxapp/eal/eal_thread.c (renamed from src/dpdk_lib18/librte_eal/linuxapp/eal/eal_thread.c)78
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/linuxapp/eal/eal_timer.c (renamed from src/dpdk_lib18/librte_eal/linuxapp/eal/eal_timer.c)78
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/linuxapp/eal/eal_vfio.h (renamed from src/dpdk_lib18/librte_eal/linuxapp/eal/eal_vfio.h)8
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/linuxapp/eal/include/exec-env/rte_dom0_common.h (renamed from src/dpdk_lib18/librte_eal/linuxapp/eal/include/exec-env/rte_dom0_common.h)0
-rw-r--r--src/dpdk22/lib/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h228
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h (renamed from src/dpdk_lib18/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h)4
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/linuxapp/igb_uio/compat.h (renamed from src/dpdk_lib18/librte_eal/linuxapp/igb_uio/compat.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/linuxapp/xen_dom0/compat.h (renamed from src/dpdk_lib18/librte_eal/linuxapp/xen_dom0/compat.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_eal/linuxapp/xen_dom0/dom0_mm_dev.h (renamed from src/dpdk_lib18/librte_eal/linuxapp/xen_dom0/dom0_mm_dev.h)2
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_ether/rte_dev_info.h (renamed from src/dpdk_lib18/librte_eal/common/eal_thread.h)36
-rw-r--r--src/dpdk22/lib/librte_ether/rte_eth_ctrl.h811
-rw-r--r--src/dpdk22/lib/librte_ether/rte_ethdev.c3241
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_ether/rte_ethdev.h (renamed from src/dpdk_lib18/librte_ether/rte_ethdev.h)1921
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_ether/rte_ether.h (renamed from src/dpdk_lib18/librte_ether/rte_ether.h)80
-rw-r--r--src/dpdk22/lib/librte_hash/rte_cmp_arm64.h114
-rw-r--r--src/dpdk22/lib/librte_hash/rte_cmp_x86.h109
-rw-r--r--src/dpdk22/lib/librte_hash/rte_crc_arm64.h151
-rw-r--r--src/dpdk22/lib/librte_hash/rte_cuckoo_hash.c1243
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_hash/rte_fbk_hash.h (renamed from src/dpdk_lib18/librte_hash/rte_fbk_hash.h)3
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_hash/rte_hash.h (renamed from src/dpdk_lib18/librte_hash/rte_hash.h)248
-rw-r--r--src/dpdk22/lib/librte_hash/rte_hash_crc.h568
-rw-r--r--src/dpdk22/lib/librte_hash/rte_jhash.h410
-rw-r--r--src/dpdk22/lib/librte_hash/rte_thash.h250
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_kvargs/rte_kvargs.c (renamed from src/dpdk_lib18/librte_kvargs/rte_kvargs.c)4
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_kvargs/rte_kvargs.h (renamed from src/dpdk_lib18/librte_kvargs/rte_kvargs.h)3
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_mbuf/rte_mbuf.c (renamed from src/dpdk_lib18/librte_mbuf/rte_mbuf.c)82
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_mbuf/rte_mbuf.h (renamed from src/dpdk_lib18/librte_mbuf/rte_mbuf.h)1000
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_mempool/rte_mempool.c (renamed from src/dpdk_lib18/librte_mempool/rte_mempool.c)154
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_mempool/rte_mempool.h (renamed from src/dpdk_lib18/librte_mempool/rte_mempool.h)238
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_net/rte_arp.h (renamed from src/dpdk_lib18/librte_net/rte_arp.h)13
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_net/rte_icmp.h (renamed from src/dpdk_lib18/librte_net/rte_icmp.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_net/rte_ip.h (renamed from src/dpdk_lib18/librte_net/rte_ip.h)17
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_net/rte_sctp.h (renamed from src/dpdk_lib18/librte_net/rte_sctp.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_net/rte_tcp.h (renamed from src/dpdk_lib18/librte_net/rte_tcp.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_net/rte_udp.h (renamed from src/dpdk_lib18/librte_net/rte_udp.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_pipeline/rte_pipeline.c (renamed from src/dpdk_lib18/librte_pipeline/rte_pipeline.c)303
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_pipeline/rte_pipeline.h (renamed from src/dpdk_lib18/librte_pipeline/rte_pipeline.h)165
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_port/rte_port.h (renamed from src/dpdk_lib18/librte_port/rte_port.h)92
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_port/rte_port_ethdev.h (renamed from src/dpdk_lib18/librte_port/rte_port_ethdev.h)19
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_port/rte_port_frag.h (renamed from src/dpdk_lib18/librte_port/rte_port_frag.h)9
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_port/rte_port_ras.h (renamed from src/dpdk_lib18/librte_port/rte_port_ras.h)9
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_port/rte_port_ring.h (renamed from src/dpdk_lib18/librte_port/rte_port_ring.h)49
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_port/rte_port_sched.h (renamed from src/dpdk_lib18/librte_port/rte_port_sched.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_port/rte_port_source_sink.h (renamed from src/dpdk_lib18/librte_port/rte_port_source_sink.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_ring/rte_ring.c (renamed from src/dpdk_lib18/librte_ring/rte_ring.c)75
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_ring/rte_ring.h (renamed from src/dpdk_lib18/librte_ring/rte_ring.h)63
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_table/rte_lru.h (renamed from src/dpdk_lib18/librte_table/rte_lru.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_table/rte_table.h (renamed from src/dpdk_lib18/librte_table/rte_table.h)111
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_table/rte_table_acl.h (renamed from src/dpdk_lib18/librte_table/rte_table_acl.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_table/rte_table_array.h (renamed from src/dpdk_lib18/librte_table/rte_table_array.h)0
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_table/rte_table_hash.h (renamed from src/dpdk_lib18/librte_table/rte_table_hash.h)20
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_table/rte_table_lpm.h (renamed from src/dpdk_lib18/librte_table/rte_table_lpm.h)3
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_table/rte_table_lpm_ipv6.h (renamed from src/dpdk_lib18/librte_table/rte_table_lpm_ipv6.h)3
-rw-r--r--[-rwxr-xr-x]src/dpdk22/lib/librte_table/rte_table_stub.h (renamed from src/dpdk_lib18/librte_table/rte_table_stub.h)0
-rwxr-xr-xsrc/dpdk_lib18/librte_acl/Makefile63
-rwxr-xr-xsrc/dpdk_lib18/librte_acl/acl_bld.c2008
-rwxr-xr-xsrc/dpdk_lib18/librte_acl/acl_gen.c475
-rwxr-xr-xsrc/dpdk_lib18/librte_acl/acl_run_scalar.c193
-rwxr-xr-xsrc/dpdk_lib18/librte_acl/acl_run_sse.c626
-rwxr-xr-xsrc/dpdk_lib18/librte_acl/acl_vect.h132
-rwxr-xr-xsrc/dpdk_lib18/librte_acl/rte_acl.c516
-rwxr-xr-xsrc/dpdk_lib18/librte_acl/rte_acl_osdep_alone.h278
-rwxr-xr-xsrc/dpdk_lib18/librte_acl/tb_mem.c104
-rwxr-xr-xsrc/dpdk_lib18/librte_cfgfile/Makefile53
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/Makefile63
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline.c264
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline.h91
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_cirbuf.c467
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_cirbuf.h245
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_parse.c564
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_parse.h191
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_parse_etheraddr.c180
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_parse_etheraddr.h94
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_parse_ipaddr.c408
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_parse_ipaddr.h186
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_parse_num.c402
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_parse_num.h113
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_parse_portlist.c173
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_parse_portlist.h101
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_parse_string.c253
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_parse_string.h110
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_rdline.c698
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_rdline.h254
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_socket.c119
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_socket.h76
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_vt100.c185
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_vt100.h151
-rwxr-xr-xsrc/dpdk_lib18/librte_distributor/Makefile50
-rwxr-xr-xsrc/dpdk_lib18/librte_distributor/rte_distributor.c488
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/Makefile39
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/Makefile38
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/contigmem/BSDmakefile36
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/contigmem/Makefile52
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/contigmem/contigmem.c233
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/eal/Makefile97
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/eal/eal.c563
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/eal/eal_hugepage_info.c133
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/eal/eal_log.c57
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/eal/eal_memory.c224
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/eal/eal_pci.c510
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/eal/eal_thread.c233
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/eal/eal_timer.c149
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/eal/include/exec-env/rte_dom0_common.h107
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/eal/include/exec-env/rte_interrupts.h54
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/nic_uio/BSDmakefile36
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/nic_uio/Makefile52
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/nic_uio/nic_uio.c329
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/Makefile61
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/eal_common_options.c611
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/eal_common_pci.c207
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_atomic.h426
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_byteorder.h149
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_cpuflags.h187
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_cycles.h87
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_memcpy.h225
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_prefetch.h61
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_spinlock.h73
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/arch/x86/rte_memcpy.h297
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_tailq.h215
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_tailq_elem.h90
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/Makefile45
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/eal/Makefile112
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/eal/eal_lcore.c191
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/eal/eal_xen_memory.c370
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h58
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/igb_uio/Makefile53
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/igb_uio/igb_uio.c643
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/Makefile93
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/compat.h21
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/README100
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/COPYING339
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c3665
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.h509
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_api.c1160
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_api.h157
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_defines.h1380
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_hw.h793
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_i210.c909
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_i210.h91
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mac.c2096
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mac.h80
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_manage.c556
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_manage.h89
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.c526
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.h87
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.c967
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.h75
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_osdep.h136
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_phy.c3405
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_phy.h256
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_regs.h646
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb.h859
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_debugfs.c29
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c2859
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_hwmon.c260
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c10263
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_param.c848
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_procfs.c363
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_ptp.c944
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_regtest.h251
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.c437
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.h46
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/kcompat.c1482
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h3884
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c1172
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/COPYING339
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe.h925
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82598.c1296
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82598.h44
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.c2314
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.h58
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_api.c1158
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_api.h168
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c4083
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.h140
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_dcb.h168
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c2901
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_fcoe.h91
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c2975
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_mbx.h105
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_osdep.h132
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.c1847
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.h137
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_sriov.h74
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_type.h3254
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_x540.c938
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_x540.h58
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.c1246
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.h3143
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/kni_dev.h150
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/kni_ethtool.c217
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/kni_fifo.h108
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/kni_misc.c606
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/kni_net.c687
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/kni_vhost.c811
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/xen_dom0/Makefile56
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/xen_dom0/dom0_mm_misc.c781
-rwxr-xr-xsrc/dpdk_lib18/librte_ether/Makefile54
-rwxr-xr-xsrc/dpdk_lib18/librte_ether/rte_eth_ctrl.h459
-rwxr-xr-xsrc/dpdk_lib18/librte_ether/rte_ethdev.c3271
-rwxr-xr-xsrc/dpdk_lib18/librte_hash/Makefile53
-rwxr-xr-xsrc/dpdk_lib18/librte_hash/rte_fbk_hash.c240
-rwxr-xr-xsrc/dpdk_lib18/librte_hash/rte_hash.c483
-rwxr-xr-xsrc/dpdk_lib18/librte_hash/rte_jhash.h253
-rwxr-xr-xsrc/dpdk_lib18/librte_ip_frag/Makefile59
-rwxr-xr-xsrc/dpdk_lib18/librte_ip_frag/ip_frag_common.h192
-rwxr-xr-xsrc/dpdk_lib18/librte_ip_frag/ip_frag_internal.c418
-rwxr-xr-xsrc/dpdk_lib18/librte_ip_frag/rte_ip_frag.h353
-rwxr-xr-xsrc/dpdk_lib18/librte_ip_frag/rte_ip_frag_common.c139
-rwxr-xr-xsrc/dpdk_lib18/librte_ip_frag/rte_ipv4_fragmentation.c209
-rwxr-xr-xsrc/dpdk_lib18/librte_ip_frag/rte_ipv4_reassembly.c183
-rwxr-xr-xsrc/dpdk_lib18/librte_ip_frag/rte_ipv6_fragmentation.c215
-rwxr-xr-xsrc/dpdk_lib18/librte_ip_frag/rte_ipv6_reassembly.c222
-rwxr-xr-xsrc/dpdk_lib18/librte_ivshmem/Makefile48
-rwxr-xr-xsrc/dpdk_lib18/librte_ivshmem/rte_ivshmem.c884
-rwxr-xr-xsrc/dpdk_lib18/librte_ivshmem/rte_ivshmem.h165
-rwxr-xr-xsrc/dpdk_lib18/librte_kni/Makefile49
-rwxr-xr-xsrc/dpdk_lib18/librte_kni/rte_kni.c747
-rwxr-xr-xsrc/dpdk_lib18/librte_kni/rte_kni.h306
-rwxr-xr-xsrc/dpdk_lib18/librte_kni/rte_kni_fifo.h93
-rwxr-xr-xsrc/dpdk_lib18/librte_kvargs/Makefile51
-rwxr-xr-xsrc/dpdk_lib18/librte_lpm/Makefile49
-rwxr-xr-xsrc/dpdk_lib18/librte_lpm/rte_lpm.c1017
-rwxr-xr-xsrc/dpdk_lib18/librte_lpm/rte_lpm.h472
-rwxr-xr-xsrc/dpdk_lib18/librte_lpm/rte_lpm6.c892
-rwxr-xr-xsrc/dpdk_lib18/librte_lpm/rte_lpm6.h228
-rwxr-xr-xsrc/dpdk_lib18/librte_malloc/Makefile48
-rwxr-xr-xsrc/dpdk_lib18/librte_mbuf/Makefile48
-rwxr-xr-xsrc/dpdk_lib18/librte_mempool/Makefile51
-rwxr-xr-xsrc/dpdk_lib18/librte_mempool/rte_dom0_mempool.c134
-rwxr-xr-xsrc/dpdk_lib18/librte_meter/Makefile53
-rwxr-xr-xsrc/dpdk_lib18/librte_meter/rte_meter.c120
-rwxr-xr-xsrc/dpdk_lib18/librte_meter/rte_meter.h387
-rwxr-xr-xsrc/dpdk_lib18/librte_net/Makefile40
-rwxr-xr-xsrc/dpdk_lib18/librte_pipeline/Makefile54
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_af_packet/Makefile60
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_bond/Makefile67
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_bond/rte_eth_bond_8023ad.c1216
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_bond/rte_eth_bond_api.c822
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_bond/rte_eth_bond_args.c279
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_bond/rte_eth_bond_pmd.c1881
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/Makefile95
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/igb_ethdev.c3164
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_enic/Makefile67
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/Makefile101
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/i40e/i40e_dcb.c479
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/Makefile117
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_x550.c1809
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe_82599_bypass.c314
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe_bypass.c414
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe_fdir.c922
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_pcap/Makefile59
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_pcap/rte_eth_pcap.c936
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ring/Makefile57
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_virtio/Makefile57
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_vmxnet3/Makefile80
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_xenvirt/Makefile58
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_xenvirt/rte_eth_xenvirt.c716
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_xenvirt/rte_mempool_gntalloc.c298
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_xenvirt/rte_xen_lib.c428
-rwxr-xr-xsrc/dpdk_lib18/librte_port/Makefile77
-rwxr-xr-xsrc/dpdk_lib18/librte_port/rte_port_ethdev.c305
-rwxr-xr-xsrc/dpdk_lib18/librte_port/rte_port_frag.c241
-rwxr-xr-xsrc/dpdk_lib18/librte_port/rte_port_ras.c252
-rwxr-xr-xsrc/dpdk_lib18/librte_port/rte_port_ring.c237
-rwxr-xr-xsrc/dpdk_lib18/librte_port/rte_port_sched.c239
-rwxr-xr-xsrc/dpdk_lib18/librte_port/rte_port_source_sink.c158
-rwxr-xr-xsrc/dpdk_lib18/librte_power/Makefile49
-rwxr-xr-xsrc/dpdk_lib18/librte_power/guest_channel.c162
-rwxr-xr-xsrc/dpdk_lib18/librte_power/rte_power.c143
-rwxr-xr-xsrc/dpdk_lib18/librte_power/rte_power.h251
-rwxr-xr-xsrc/dpdk_lib18/librte_power/rte_power_acpi_cpufreq.c545
-rwxr-xr-xsrc/dpdk_lib18/librte_power/rte_power_acpi_cpufreq.h192
-rwxr-xr-xsrc/dpdk_lib18/librte_power/rte_power_kvm_vm.c136
-rwxr-xr-xsrc/dpdk_lib18/librte_power/rte_power_kvm_vm.h179
-rwxr-xr-xsrc/dpdk_lib18/librte_ring/Makefile48
-rwxr-xr-xsrc/dpdk_lib18/librte_sched/Makefile56
-rwxr-xr-xsrc/dpdk_lib18/librte_sched/rte_approx.c196
-rwxr-xr-xsrc/dpdk_lib18/librte_sched/rte_bitmap.h563
-rwxr-xr-xsrc/dpdk_lib18/librte_sched/rte_red.c158
-rwxr-xr-xsrc/dpdk_lib18/librte_sched/rte_red.h453
-rwxr-xr-xsrc/dpdk_lib18/librte_sched/rte_sched.c2150
-rwxr-xr-xsrc/dpdk_lib18/librte_sched/rte_sched.h442
-rwxr-xr-xsrc/dpdk_lib18/librte_sched/rte_sched_common.h129
-rwxr-xr-xsrc/dpdk_lib18/librte_table/Makefile82
-rwxr-xr-xsrc/dpdk_lib18/librte_table/rte_table_acl.c491
-rwxr-xr-xsrc/dpdk_lib18/librte_table/rte_table_array.c205
-rwxr-xr-xsrc/dpdk_lib18/librte_table/rte_table_hash_ext.c1122
-rwxr-xr-xsrc/dpdk_lib18/librte_table/rte_table_hash_key16.c1101
-rwxr-xr-xsrc/dpdk_lib18/librte_table/rte_table_hash_key32.c1121
-rwxr-xr-xsrc/dpdk_lib18/librte_table/rte_table_hash_key8.c1399
-rwxr-xr-xsrc/dpdk_lib18/librte_table/rte_table_hash_lru.c1065
-rwxr-xr-xsrc/dpdk_lib18/librte_table/rte_table_lpm.c348
-rwxr-xr-xsrc/dpdk_lib18/librte_table/rte_table_lpm_ipv6.c362
-rwxr-xr-xsrc/dpdk_lib18/librte_table/rte_table_stub.c65
-rwxr-xr-xsrc/dpdk_lib18/librte_timer/Makefile48
-rwxr-xr-xsrc/dpdk_lib18/librte_timer/rte_timer.c610
-rwxr-xr-xsrc/dpdk_lib18/librte_timer/rte_timer.h335
-rwxr-xr-xsrc/dpdk_lib18/librte_vhost/Makefile50
-rwxr-xr-xsrc/dpdk_lib18/librte_vhost/eventfd_link/Makefile39
-rwxr-xr-xsrc/dpdk_lib18/librte_vhost/eventfd_link/eventfd_link.c195
-rwxr-xr-xsrc/dpdk_lib18/librte_vhost/libvirt/qemu-wrap.py367
-rwxr-xr-xsrc/dpdk_lib18/librte_vhost/rte_virtio_net.h215
-rwxr-xr-xsrc/dpdk_lib18/librte_vhost/vhost-net-cdev.c389
-rwxr-xr-xsrc/dpdk_lib18/librte_vhost/vhost-net-cdev.h113
-rwxr-xr-xsrc/dpdk_lib18/librte_vhost/vhost_rxtx.c730
-rwxr-xr-xsrc/dpdk_lib18/librte_vhost/virtio-net.c1163
-rwxr-xr-xsrc/main_dpdk.cpp42
-rw-r--r--src/pal/linux_dpdk/dpdk22/rte_config.h322
655 files changed, 96684 insertions, 145860 deletions
diff --git a/.gitignore b/.gitignore
index f67a9194..61cc588a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,8 +7,12 @@
*.o
*.so
.lock*
-linux_dpdk*
-linux*
+linux_dpdk/.waf*
+linux_dpdk/build*
+linux_dpdk/version*
+linux/.waf*
+linux/build*
+linux/version*
scripts/_t-rex-*
scripts/bp-sim-*
scripts/doc/*
diff --git a/linux_dpdk/ws_main.py b/linux_dpdk/ws_main.py
index 4fd30d4e..4c8d821d 100755
--- a/linux_dpdk/ws_main.py
+++ b/linux_dpdk/ws_main.py
@@ -211,152 +211,143 @@ version_src = SrcGroup(
])
-dpdk_src = SrcGroup(dir='src/dpdk_lib18/',
+dpdk_src = SrcGroup(dir='src/dpdk22/',
src_list=[
- 'librte_ring/rte_ring.c',
- 'librte_timer/rte_timer.c',
- #'librte_pmd_ixgbe/ixgbe_82599_bypass.c',
- #'librte_pmd_ixgbe/ixgbe_bypass.c',
- 'librte_pmd_ixgbe/ixgbe_ethdev.c',
- 'librte_pmd_ixgbe/ixgbe_fdir.c',
- 'librte_pmd_ixgbe/ixgbe_pf.c',
- 'librte_pmd_ixgbe/ixgbe_rxtx.c',
- 'librte_pmd_ixgbe/ixgbe_rxtx_vec.c',
- 'librte_pmd_ixgbe/ixgbe/ixgbe_82598.c',
- 'librte_pmd_ixgbe/ixgbe/ixgbe_82599.c',
- 'librte_pmd_ixgbe/ixgbe/ixgbe_api.c',
- 'librte_pmd_ixgbe/ixgbe/ixgbe_common.c',
- 'librte_pmd_ixgbe/ixgbe/ixgbe_dcb.c',
- 'librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82598.c',
- 'librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82599.c',
- 'librte_pmd_ixgbe/ixgbe/ixgbe_mbx.c',
- 'librte_pmd_ixgbe/ixgbe/ixgbe_phy.c',
- 'librte_pmd_ixgbe/ixgbe/ixgbe_vf.c',
- 'librte_pmd_ixgbe/ixgbe/ixgbe_x540.c',
- 'librte_pmd_ixgbe/ixgbe/ixgbe_x550.c',
-
- 'librte_pmd_vmxnet3/vmxnet3_ethdev.c',
- 'librte_pmd_vmxnet3/vmxnet3_rxtx.c',
-
- 'librte_pmd_virtio/virtio_ethdev.c',
- 'librte_pmd_virtio/virtio_pci.c',
- 'librte_pmd_virtio/virtio_rxtx.c',
- 'librte_pmd_virtio/virtqueue.c',
-
- 'librte_pmd_enic/enic_clsf.c',
- 'librte_pmd_enic/enic_ethdev.c',
- 'librte_pmd_enic/enic_main.c',
- 'librte_pmd_enic/enic_res.c',
- 'librte_pmd_enic/vnic/vnic_cq.c',
- 'librte_pmd_enic/vnic/vnic_dev.c',
- 'librte_pmd_enic/vnic/vnic_intr.c',
- 'librte_pmd_enic/vnic/vnic_rq.c',
- 'librte_pmd_enic/vnic/vnic_rss.c',
- 'librte_pmd_enic/vnic/vnic_wq.c',
-
- 'librte_malloc/malloc_elem.c',
- 'librte_malloc/malloc_heap.c',
- 'librte_malloc/rte_malloc.c',
-
- 'librte_mbuf/rte_mbuf.c',
- 'librte_mempool/rte_mempool.c',
-
- 'librte_pmd_e1000/em_ethdev.c',
- 'librte_pmd_e1000/em_rxtx.c',
- 'librte_pmd_e1000/igb_ethdev.c',
- 'librte_pmd_e1000/igb_pf.c',
- 'librte_pmd_e1000/igb_rxtx.c',
- 'librte_pmd_e1000/e1000/e1000_80003es2lan.c',
- 'librte_pmd_e1000/e1000/e1000_82540.c',
- 'librte_pmd_e1000/e1000/e1000_82541.c',
- 'librte_pmd_e1000/e1000/e1000_82542.c',
- 'librte_pmd_e1000/e1000/e1000_82543.c',
- 'librte_pmd_e1000/e1000/e1000_82571.c',
- 'librte_pmd_e1000/e1000/e1000_82575.c',
- 'librte_pmd_e1000/e1000/e1000_api.c',
- 'librte_pmd_e1000/e1000/e1000_i210.c',
- 'librte_pmd_e1000/e1000/e1000_ich8lan.c',
- 'librte_pmd_e1000/e1000/e1000_mac.c',
- 'librte_pmd_e1000/e1000/e1000_manage.c',
- 'librte_pmd_e1000/e1000/e1000_mbx.c',
- 'librte_pmd_e1000/e1000/e1000_nvm.c',
- 'librte_pmd_e1000/e1000/e1000_osdep.c',
- 'librte_pmd_e1000/e1000/e1000_phy.c',
- 'librte_pmd_e1000/e1000/e1000_vf.c',
- 'librte_hash/rte_fbk_hash.c',
- 'librte_hash/rte_hash.c',
- 'librte_cmdline/cmdline.c',
- 'librte_cmdline/cmdline_cirbuf.c',
- 'librte_cmdline/cmdline_parse.c',
- 'librte_cmdline/cmdline_parse_etheraddr.c',
- 'librte_cmdline/cmdline_parse_ipaddr.c',
- 'librte_cmdline/cmdline_parse_num.c',
- 'librte_cmdline/cmdline_parse_portlist.c',
- 'librte_cmdline/cmdline_parse_string.c',
- 'librte_cmdline/cmdline_rdline.c',
- 'librte_cmdline/cmdline_socket.c',
- 'librte_cmdline/cmdline_vt100.c',
-
- 'librte_eal/common/eal_common_cpuflags.c',
- 'librte_eal/common/eal_common_dev.c',
- 'librte_eal/common/eal_common_devargs.c',
- 'librte_eal/common/eal_common_errno.c',
- 'librte_eal/common/eal_common_hexdump.c',
- 'librte_eal/common/eal_common_launch.c',
- 'librte_eal/common/eal_common_log.c',
- 'librte_eal/common/eal_common_memory.c',
- 'librte_eal/common/eal_common_memzone.c',
- 'librte_eal/common/eal_common_options.c',
- 'librte_eal/common/eal_common_pci.c',
- 'librte_eal/common/eal_common_string_fns.c',
- 'librte_eal/common/eal_common_tailqs.c',
-
- 'librte_ether/rte_ethdev.c',
-
- 'librte_pmd_ring/rte_eth_ring.c',
- 'librte_kvargs/rte_kvargs.c',
-
-
- 'librte_eal/linuxapp/eal/eal.c',
- 'librte_eal/linuxapp/eal/eal_alarm.c',
- 'librte_eal/linuxapp/eal/eal_debug.c',
- 'librte_eal/linuxapp/eal/eal_hugepage_info.c',
- 'librte_eal/linuxapp/eal/eal_interrupts.c',
- 'librte_eal/linuxapp/eal/eal_ivshmem.c',
- 'librte_eal/linuxapp/eal/eal_lcore.c',
- 'librte_eal/linuxapp/eal/eal_log.c',
- 'librte_eal/linuxapp/eal/eal_memory.c',
- 'librte_eal/linuxapp/eal/eal_pci.c',
- 'librte_eal/linuxapp/eal/eal_pci_uio.c',
- 'librte_eal/linuxapp/eal/eal_pci_vfio.c',
- 'librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c',
- 'librte_eal/linuxapp/eal/eal_thread.c',
- 'librte_eal/linuxapp/eal/eal_timer.c',
- #'librte_eal/linuxapp/eal/eal_xen_memory.c'
-
- 'librte_pmd_i40e/i40e_ethdev.c',
- 'librte_pmd_i40e/i40e_ethdev_vf.c',
- 'librte_pmd_i40e/i40e_fdir.c',
- 'librte_pmd_i40e/i40e_pf.c',
- 'librte_pmd_i40e/i40e_rxtx.c',
- 'librte_pmd_i40e/i40e/i40e_adminq.c',
- 'librte_pmd_i40e/i40e/i40e_common.c',
- 'librte_pmd_i40e/i40e/i40e_dcb.c',
- 'librte_pmd_i40e/i40e/i40e_diag.c',
- 'librte_pmd_i40e/i40e/i40e_hmc.c',
- 'librte_pmd_i40e/i40e/i40e_lan_hmc.c',
- 'librte_pmd_i40e/i40e/i40e_nvm.c'
+ 'drivers/net/af_packet/rte_eth_af_packet.c',
+ 'drivers/net/cxgbe/base/t4_hw.c',
+ 'drivers/net/cxgbe/cxgbe_ethdev.c',
+ 'drivers/net/cxgbe/cxgbe_main.c',
+ 'drivers/net/cxgbe/sge.c',
+ 'drivers/net/e1000/base/e1000_80003es2lan.c',
+ 'drivers/net/e1000/base/e1000_82540.c',
+ 'drivers/net/e1000/base/e1000_82541.c',
+ 'drivers/net/e1000/base/e1000_82542.c',
+ 'drivers/net/e1000/base/e1000_82543.c',
+ 'drivers/net/e1000/base/e1000_82571.c',
+ 'drivers/net/e1000/base/e1000_82575.c',
+ 'drivers/net/e1000/base/e1000_api.c',
+ 'drivers/net/e1000/base/e1000_i210.c',
+ 'drivers/net/e1000/base/e1000_ich8lan.c',
+ 'drivers/net/e1000/base/e1000_mac.c',
+ 'drivers/net/e1000/base/e1000_manage.c',
+ 'drivers/net/e1000/base/e1000_mbx.c',
+ 'drivers/net/e1000/base/e1000_nvm.c',
+ 'drivers/net/e1000/base/e1000_osdep.c',
+ 'drivers/net/e1000/base/e1000_phy.c',
+ 'drivers/net/e1000/base/e1000_vf.c',
+ 'drivers/net/e1000/em_ethdev.c',
+ 'drivers/net/e1000/em_rxtx.c',
+ 'drivers/net/e1000/igb_ethdev.c',
+ 'drivers/net/e1000/igb_pf.c',
+ 'drivers/net/e1000/igb_rxtx.c',
+ 'drivers/net/enic/base/vnic_cq.c',
+ 'drivers/net/enic/base/vnic_dev.c',
+ 'drivers/net/enic/base/vnic_intr.c',
+ 'drivers/net/enic/base/vnic_rq.c',
+ 'drivers/net/enic/base/vnic_rss.c',
+ 'drivers/net/enic/base/vnic_wq.c',
+ 'drivers/net/enic/enic_clsf.c',
+ 'drivers/net/enic/enic_ethdev.c',
+ 'drivers/net/enic/enic_main.c',
+ 'drivers/net/enic/enic_res.c',
+ 'drivers/net/fm10k/base/fm10k_api.c',
+ 'drivers/net/fm10k/base/fm10k_common.c',
+ 'drivers/net/fm10k/base/fm10k_mbx.c',
+ 'drivers/net/fm10k/base/fm10k_pf.c',
+ 'drivers/net/fm10k/base/fm10k_tlv.c',
+ 'drivers/net/fm10k/base/fm10k_vf.c',
+ 'drivers/net/fm10k/fm10k_ethdev.c',
+ 'drivers/net/fm10k/fm10k_rxtx.c',
+ 'drivers/net/fm10k/fm10k_rxtx_vec.c',
+ 'drivers/net/ixgbe/base/ixgbe_82598.c',
+ 'drivers/net/ixgbe/base/ixgbe_82599.c',
+ 'drivers/net/ixgbe/base/ixgbe_api.c',
+ 'drivers/net/ixgbe/base/ixgbe_common.c',
+ 'drivers/net/ixgbe/base/ixgbe_dcb.c',
+ 'drivers/net/ixgbe/base/ixgbe_dcb_82598.c',
+ 'drivers/net/ixgbe/base/ixgbe_dcb_82599.c',
+ 'drivers/net/ixgbe/base/ixgbe_mbx.c',
+ 'drivers/net/ixgbe/base/ixgbe_phy.c',
+ 'drivers/net/ixgbe/base/ixgbe_vf.c',
+ 'drivers/net/ixgbe/base/ixgbe_x540.c',
+ 'drivers/net/ixgbe/base/ixgbe_x550.c',
+ 'drivers/net/ixgbe/ixgbe_ethdev.c',
+ 'drivers/net/ixgbe/ixgbe_fdir.c',
+ 'drivers/net/ixgbe/ixgbe_pf.c',
+ 'drivers/net/ixgbe/ixgbe_rxtx.c',
+ 'drivers/net/ixgbe/ixgbe_rxtx_vec.c',
+ 'drivers/net/i40e/base/i40e_adminq.c',
+ 'drivers/net/i40e/base/i40e_common.c',
+ 'drivers/net/i40e/base/i40e_dcb.c',
+ 'drivers/net/i40e/base/i40e_diag.c',
+ 'drivers/net/i40e/base/i40e_hmc.c',
+ 'drivers/net/i40e/base/i40e_lan_hmc.c',
+ 'drivers/net/i40e/base/i40e_nvm.c',
+ 'drivers/net/i40e/i40e_ethdev_vf.c',
+ 'drivers/net/i40e/i40e_pf.c',
+ 'drivers/net/i40e/i40e_rxtx.c',
+ 'drivers/net/i40e/i40e_rxtx_vec.c',
+ 'drivers/net/i40e/i40e_fdir.c',
+ 'drivers/net/i40e/i40e_ethdev.c',
+ 'drivers/net/null/rte_eth_null.c',
+ 'drivers/net/ring/rte_eth_ring.c',
+ 'drivers/net/virtio/virtio_ethdev.c',
+ 'drivers/net/virtio/virtio_pci.c',
+ 'drivers/net/virtio/virtio_rxtx.c',
+ 'drivers/net/virtio/virtio_rxtx_simple.c',
+ 'drivers/net/virtio/virtqueue.c',
+ '/drivers/net/vmxnet3/vmxnet3_ethdev.c',
+ '/drivers/net/vmxnet3/vmxnet3_rxtx.c',
+ 'lib/librte_cfgfile/rte_cfgfile.c',
+ 'lib/librte_eal/common/eal_common_cpuflags.c',
+ 'lib/librte_eal/common/eal_common_dev.c',
+ 'lib/librte_eal/common/eal_common_devargs.c',
+ 'lib/librte_eal/common/eal_common_errno.c',
+ 'lib/librte_eal/common/eal_common_hexdump.c',
+ 'lib/librte_eal/common/eal_common_launch.c',
+ 'lib/librte_eal/common/eal_common_lcore.c',
+ 'lib/librte_eal/common/eal_common_log.c',
+ 'lib/librte_eal/common/eal_common_memory.c',
+ 'lib/librte_eal/common/eal_common_memzone.c',
+ 'lib/librte_eal/common/eal_common_options.c',
+ 'lib/librte_eal/common/eal_common_pci.c',
+ 'lib/librte_eal/common/eal_common_pci_uio.c',
+ 'lib/librte_eal/common/eal_common_string_fns.c',
+ 'lib/librte_eal/common/eal_common_tailqs.c',
+ 'lib/librte_eal/common/eal_common_thread.c',
+ 'lib/librte_eal/common/eal_common_timer.c',
+ 'lib/librte_eal/common/malloc_elem.c',
+ 'lib/librte_eal/common/malloc_heap.c',
+ 'lib/librte_eal/common/rte_keepalive.c',
+ 'lib/librte_eal/common/rte_malloc.c',
+ 'lib/librte_eal/linuxapp/eal/eal.c',
+ 'lib/librte_eal/linuxapp/eal/eal_alarm.c',
+ 'lib/librte_eal/linuxapp/eal/eal_debug.c',
+ 'lib/librte_eal/linuxapp/eal/eal_hugepage_info.c',
+ 'lib/librte_eal/linuxapp/eal/eal_interrupts.c',
+ 'lib/librte_eal/linuxapp/eal/eal_ivshmem.c',
+ 'lib/librte_eal/linuxapp/eal/eal_lcore.c',
+ 'lib/librte_eal/linuxapp/eal/eal_log.c',
+ 'lib/librte_eal/linuxapp/eal/eal_memory.c',
+ 'lib/librte_eal/linuxapp/eal/eal_pci.c',
+ 'lib/librte_eal/linuxapp/eal/eal_pci_uio.c',
+ 'lib/librte_eal/linuxapp/eal/eal_pci_vfio.c',
+ 'lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c',
+ 'lib/librte_eal/linuxapp/eal/eal_thread.c',
+ 'lib/librte_eal/linuxapp/eal/eal_timer.c',
+ 'lib/librte_ether/rte_ethdev.c',
+ 'lib/librte_hash/rte_cuckoo_hash.c',
+ 'lib/librte_kvargs/rte_kvargs.c',
+ 'lib/librte_mbuf/rte_mbuf.c',
+ 'lib/librte_mempool/rte_mempool.c',
+ 'lib/librte_pipeline/rte_pipeline.c',
+ 'lib/librte_ring/rte_ring.c',
]);
-
-
-
bp_dpdk =SrcGroups([
dpdk_src
]);
-
-
# this is the library dp going to falcon (and maybe other platforms)
bp =SrcGroups([
main_src,
@@ -388,7 +379,7 @@ common_flags = ['-DWIN_UCODE_SIM',
'-Wno-format',
'-Wno-deprecated-declarations',
'-DRTE_DPDK',
- '-include','../src/pal/linux_dpdk/dpdk180/rte_config.h'
+ '-include','../src/pal/linux_dpdk/dpdk22/rte_config.h'
]
common_flags_new = common_flags + [
@@ -428,67 +419,120 @@ includes_path =''' ../src/pal/linux_dpdk/
../external_libs/zmq/include/
../external_libs/json/
- ../src/dpdk_lib18/librte_eal/linuxapp/eal/include/
- ../src/dpdk_lib18/librte_eal/common/include/
- ../src/dpdk_lib18/librte_eal/common/
- ../src/dpdk_lib18/librte_eal/common/include/arch/x86
-
- ../src/dpdk_lib18/librte_ring/
- ../src/dpdk_lib18/librte_mempool/
- ../src/dpdk_lib18/librte_malloc/
- ../src/dpdk_lib18/librte_ether/
-
- ../src/dpdk_lib18/librte_cmdline/
- ../src/dpdk_lib18/librte_hash/
- ../src/dpdk_lib18/librte_lpm/
- ../src/dpdk_lib18/librte_mbuf/
- ../src/dpdk_lib18/librte_pmd_igb/
- ../src/dpdk_lib18/librte_pmd_ixgbe/
-
- ../src/dpdk_lib18/librte_pmd_enic/
- ../src/dpdk_lib18/librte_pmd_enic/vnic/
- ../src/dpdk_lib18/librte_pmd_virtio/
- ../src/dpdk_lib18/librte_pmd_vmxnet3/
-
- ../src/dpdk_lib18/librte_timer/
- ../src/dpdk_lib18/librte_net/
- ../src/dpdk_lib18/librte_pmd_ring/
- ../src/dpdk_lib18/librte_kvargs/
-
-
+../src/dpdk22/drivers/
+../src/dpdk22/drivers/net/
+../src/dpdk22/drivers/net/af_packet/
+../src/dpdk22/drivers/net/bnx2x/
+../src/dpdk22/drivers/net/bonding/
+../src/dpdk22/drivers/net/cxgbe/
+../src/dpdk22/drivers/net/cxgbe/base/
+../src/dpdk22/drivers/net/e1000/
+../src/dpdk22/drivers/net/e1000/base/
+../src/dpdk22/drivers/net/fm10k/
+../src/dpdk22/drivers/net/fm10k/base/
+../src/dpdk22/drivers/net/i40e/
+../src/dpdk22/drivers/net/i40e/base/
+../src/dpdk22/drivers/net/ixgbe/
+../src/dpdk22/drivers/net/ixgbe/base/
+../src/dpdk22/drivers/net/mlx4/
+../src/dpdk22/drivers/net/mlx5/
+../src/dpdk22/drivers/net/mpipe/
+../src/dpdk22/drivers/net/null/
+../src/dpdk22/drivers/net/pcap/
+../src/dpdk22/drivers/net/ring/
+../src/dpdk22/drivers/net/szedata2/
+../src/dpdk22/drivers/net/virtio/
+../src/dpdk22/drivers/net/xenvirt/
+../src/dpdk22/lib/
+../src/dpdk22/lib/librte_acl/
+../src/dpdk22/lib/librte_cfgfile/
+../src/dpdk22/lib/librte_compat/
+../src/dpdk22/lib/librte_distributor/
+../src/dpdk22/lib/librte_eal/
+../src/dpdk22/lib/librte_eal/common/
+../src/dpdk22/lib/librte_eal/common/include/
+../src/dpdk22/lib/librte_eal/common/include/arch/
+../src/dpdk22/lib/librte_eal/common/include/arch/x86/
+../src/dpdk22/lib/librte_eal/common/include/generic/
+../src/dpdk22/lib/librte_eal/linuxapp/
+../src/dpdk22/lib/librte_eal/linuxapp/eal/
+../src/dpdk22/lib/librte_eal/linuxapp/eal/include/
+../src/dpdk22/lib/librte_eal/linuxapp/eal/include/exec-env/
+../src/dpdk22/lib/librte_eal/linuxapp/igb_uio/
+../src/dpdk22/lib/librte_eal/linuxapp/xen_dom0/
+../src/dpdk22/lib/librte_ether/
+../src/dpdk22/lib/librte_hash/
+../src/dpdk22/lib/librte_kvargs/
+../src/dpdk22/lib/librte_mbuf/
+../src/dpdk22/lib/librte_mempool/
+../src/dpdk22/lib/librte_pipeline/
+../src/dpdk22/lib/librte_ring/
+../src/dpdk22/
''';
dpdk_includes_path =''' ../src/
../src/pal/linux_dpdk/
- ../src/dpdk_lib18/librte_eal/linuxapp/eal/include/
- ../src/dpdk_lib18/librte_eal/common/include/
- ../src/dpdk_lib18/librte_eal/common/
- ../src/dpdk_lib18/librte_eal/common/include/arch/x86
-
- ../src/dpdk_lib18/librte_pmd_enic/
- ../src/dpdk_lib18/librte_pmd_virtio/
- ../src/dpdk_lib18/librte_pmd_vmxnet3/
- ../src/dpdk_lib18/librte_pmd_enic/vnic/
-
- ../src/dpdk_lib18/librte_ring/
- ../src/dpdk_lib18/librte_mempool/
- ../src/dpdk_lib18/librte_malloc/
- ../src/dpdk_lib18/librte_ether/
- ../src/dpdk_lib18/librte_cmdline/
- ../src/dpdk_lib18/librte_hash/
- ../src/dpdk_lib18/librte_lpm/
- ../src/dpdk_lib18/librte_mbuf/
- ../src/dpdk_lib18/librte_pmd_igb/
- ../src/dpdk_lib18/librte_pmd_ixgbe/
- ../src/dpdk_lib18/librte_timer/
- ../src/dpdk_lib18/librte_net/
- ../src/dpdk_lib18/librte_pmd_ring/
- ../src/dpdk_lib18/librte_kvargs/
-
- ''';
-
-
-DPDK_WARNING=['-D_GNU_SOURCE'];
+ ../src/pal/linux_dpdk/dpdk22
+../src/dpdk22/drivers/
+../src/dpdk22/drivers/net/
+../src/dpdk22/drivers/net/af_packet/
+../src/dpdk22/drivers/net/bnx2x/
+../src/dpdk22/drivers/net/bonding/
+../src/dpdk22/drivers/net/cxgbe/
+../src/dpdk22/drivers/net/cxgbe/base/
+../src/dpdk22/drivers/net/e1000/
+../src/dpdk22/drivers/net/e1000/base/
+../src/dpdk22/drivers/net/enic/
+../src/dpdk22/drivers/net/enic/base/
+../src/dpdk22/drivers/net/fm10k/
+../src/dpdk22/drivers/net/fm10k/base/
+../src/dpdk22/drivers/net/i40e/
+../src/dpdk22/drivers/net/i40e/base/
+../src/dpdk22/drivers/net/ixgbe/
+../src/dpdk22/drivers/net/ixgbe/base/
+../src/dpdk22/drivers/net/mlx4/
+../src/dpdk22/drivers/net/mlx5/
+../src/dpdk22/drivers/net/mpipe/
+../src/dpdk22/drivers/net/null/
+../src/dpdk22/drivers/net/pcap/
+../src/dpdk22/drivers/net/ring/
+../src/dpdk22/drivers/net/virtio/
+../src/dpdk22/drivers/net/vmxnet3/
+../src/dpdk22/drivers/net/vmxnet3/base
+../src/dpdk22/drivers/net/xenvirt/
+../src/dpdk22/lib/
+../src/dpdk22/lib/librte_acl/
+../src/dpdk22/lib/librte_cfgfile/
+../src/dpdk22/lib/librte_compat/
+../src/dpdk22/lib/librte_distributor/
+../src/dpdk22/lib/librte_eal/
+../src/dpdk22/lib/librte_eal/common/
+../src/dpdk22/lib/librte_eal/common/include/
+../src/dpdk22/lib/librte_eal/common/include/arch/
+../src/dpdk22/lib/librte_eal/common/include/arch/x86/
+../src/dpdk22/lib/librte_eal/common/include/generic/
+../src/dpdk22/lib/librte_eal/linuxapp/
+../src/dpdk22/lib/librte_eal/linuxapp/eal/
+../src/dpdk22/lib/librte_eal/linuxapp/eal/include/
+../src/dpdk22/lib/librte_eal/linuxapp/eal/include/exec-env/
+../src/dpdk22/lib/librte_eal/linuxapp/igb_uio/
+../src/dpdk22/lib/librte_eal/linuxapp/xen_dom0/
+../src/dpdk22/lib/librte_ether/
+../src/dpdk22/lib/librte_hash/
+../src/dpdk22/lib/librte_kvargs/
+../src/dpdk22/lib/librte_mbuf/
+../src/dpdk22/lib/librte_mempool/
+../src/dpdk22/lib/librte_pipeline/
+../src/dpdk22/lib/librte_ring/
+../src/dpdk22/lib/librte_net/
+../src/dpdk22/lib/librte_port/
+../src/dpdk22/lib/librte_pipeline/
+../src/dpdk22/lib/librte_table/
+../src/dpdk22/
+''';
+
+
+DPDK_FLAGS=['-D_GNU_SOURCE', '-DPF_DRIVER', '-DX722_SUPPORT', '-DVF_DRIVER', '-DINTEGRATED_VF'];
@@ -605,6 +649,7 @@ class build_option:
base_flags = [];
if self.is64Platform():
base_flags += ['-m64'];
+ base_flags += ['-lrt'];
else:
base_flags += ['-lrt'];
@@ -637,7 +682,7 @@ def build_prog (bld, build_obj):
features='c ',
includes = dpdk_includes_path,
- cflags = (build_obj.get_c_flags()+DPDK_WARNING ),
+ cflags = (build_obj.get_c_flags()+DPDK_FLAGS ),
source = bp_dpdk.file_list(top),
target=build_obj.get_dpdk_target()
);
diff --git a/src/dpdk_lib18/librte_pmd_af_packet/rte_eth_af_packet.c b/src/dpdk22/drivers/net/af_packet/rte_eth_af_packet.c
index 755780a1..767f36b6 100755..100644
--- a/src/dpdk_lib18/librte_pmd_af_packet/rte_eth_af_packet.c
+++ b/src/dpdk22/drivers/net/af_packet/rte_eth_af_packet.c
@@ -5,7 +5,7 @@
*
* Originally based upon librte_pmd_pcap code:
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* Copyright(c) 2014 6WIND S.A.
* All rights reserved.
*
@@ -74,6 +74,7 @@ struct pkt_rx_queue {
unsigned int framenum;
struct rte_mempool *mb_pool;
+ uint8_t in_port;
volatile unsigned long rx_pkts;
volatile unsigned long err_pkts;
@@ -160,6 +161,7 @@ eth_af_packet_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
ppd->tp_status = TP_STATUS_KERNEL;
if (++framenum >= framecount)
framenum = 0;
+ mbuf->port = pkt_q->in_port;
/* account for the receive frame */
bufs[i] = mbuf;
@@ -220,7 +222,8 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
}
/* kick-off transmits */
- sendto(pkt_q->sockfd, NULL, 0, MSG_DONTWAIT, NULL, 0);
+ if (sendto(pkt_q->sockfd, NULL, 0, MSG_DONTWAIT, NULL, 0) == -1)
+ return 0; /* error sending -- no packets transmitted */
pkt_q->framenum = framenum;
pkt_q->tx_pkts += num_tx;
@@ -285,8 +288,6 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
const struct pmd_internals *internal = dev->data->dev_private;
- memset(igb_stats, 0, sizeof(*igb_stats));
-
imax = (internal->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS ?
internal->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS);
for (i = 0; i < imax; i++) {
@@ -350,15 +351,13 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
{
struct pmd_internals *internals = dev->data->dev_private;
struct pkt_rx_queue *pkt_q = &internals->rx_queue[rx_queue_id];
- struct rte_pktmbuf_pool_private *mbp_priv;
uint16_t buf_size;
pkt_q->mb_pool = mb_pool;
/* Now get the space available for data in the mbuf */
- mbp_priv = rte_mempool_get_priv(pkt_q->mb_pool);
- buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
- RTE_PKTMBUF_HEADROOM);
+ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(pkt_q->mb_pool) -
+ RTE_PKTMBUF_HEADROOM);
if (ETH_FRAME_LEN > buf_size) {
RTE_LOG(ERR, PMD,
@@ -368,6 +367,7 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
}
dev->data->rx_queues[rx_queue_id] = pkt_q;
+ pkt_q->in_port = dev->data->port_id;
return 0;
}
@@ -386,7 +386,7 @@ eth_tx_queue_setup(struct rte_eth_dev *dev,
return 0;
}
-static struct eth_dev_ops ops = {
+static const struct eth_dev_ops ops = {
.dev_start = eth_dev_start,
.dev_stop = eth_dev_stop,
.dev_close = eth_dev_close,
@@ -435,7 +435,6 @@ rte_pmd_init_internals(const char *name,
struct rte_kvargs *kvlist)
{
struct rte_eth_dev_data *data = NULL;
- struct rte_pci_device *pci_dev = NULL;
struct rte_kvargs_pair *pair = NULL;
struct ifreq ifr;
size_t ifnamelen;
@@ -444,7 +443,8 @@ rte_pmd_init_internals(const char *name,
struct tpacket_req *req;
struct pkt_rx_queue *rx_queue;
struct pkt_tx_queue *tx_queue;
- int rc, qsockfd, tpver, discard;
+ int rc, tpver, discard;
+ int qsockfd = -1;
unsigned int i, q, rdsize;
int fanout_arg __rte_unused, bypass __rte_unused;
@@ -457,7 +457,7 @@ rte_pmd_init_internals(const char *name,
RTE_LOG(ERR, PMD,
"%s: no interface specified for AF_PACKET ethdev\n",
name);
- goto error;
+ goto error_early;
}
RTE_LOG(INFO, PMD,
@@ -470,16 +470,12 @@ rte_pmd_init_internals(const char *name,
*/
data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
if (data == NULL)
- goto error;
-
- pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, numa_node);
- if (pci_dev == NULL)
- goto error;
+ goto error_early;
*internals = rte_zmalloc_socket(name, sizeof(**internals),
0, numa_node);
if (*internals == NULL)
- goto error;
+ goto error_early;
for (q = 0; q < nb_queues; q++) {
(*internals)->rx_queue[q].map = MAP_FAILED;
@@ -501,13 +497,13 @@ rte_pmd_init_internals(const char *name,
RTE_LOG(ERR, PMD,
"%s: I/F name too long (%s)\n",
name, pair->value);
- goto error;
+ goto error_early;
}
if (ioctl(sockfd, SIOCGIFINDEX, &ifr) == -1) {
RTE_LOG(ERR, PMD,
"%s: ioctl failed (SIOCGIFINDEX)\n",
name);
- goto error;
+ goto error_early;
}
(*internals)->if_index = ifr.ifr_ifindex;
@@ -515,7 +511,7 @@ rte_pmd_init_internals(const char *name,
RTE_LOG(ERR, PMD,
"%s: ioctl failed (SIOCGIFHWADDR)\n",
name);
- goto error;
+ goto error_early;
}
memcpy(&(*internals)->eth_addr, ifr.ifr_hwaddr.sa_data, ETH_ALEN);
@@ -651,15 +647,15 @@ rte_pmd_init_internals(const char *name,
}
/* reserve an ethdev entry */
- *eth_dev = rte_eth_dev_allocate(name);
+ *eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
if (*eth_dev == NULL)
goto error;
/*
* now put it all together
* - store queue data in internals,
- * - store numa_node info in pci_driver
- * - point eth_dev_data to internals and pci_driver
+ * - store numa_node in eth_dev
+ * - point eth_dev_data to internals
* - and point eth_dev structure to new eth_dev_data structure
*/
@@ -672,30 +668,32 @@ rte_pmd_init_internals(const char *name,
data->dev_link = pmd_link;
data->mac_addrs = &(*internals)->eth_addr;
- pci_dev->numa_node = numa_node;
-
(*eth_dev)->data = data;
(*eth_dev)->dev_ops = &ops;
- (*eth_dev)->pci_dev = pci_dev;
+ (*eth_dev)->driver = NULL;
+ (*eth_dev)->data->dev_flags = 0;
+ (*eth_dev)->data->drv_name = drivername;
+ (*eth_dev)->data->kdrv = RTE_KDRV_NONE;
+ (*eth_dev)->data->numa_node = numa_node;
return 0;
error:
- if (data)
- rte_free(data);
- if (pci_dev)
- rte_free(pci_dev);
- if (*internals) {
- for (q = 0; q < nb_queues; q++) {
- munmap((*internals)->rx_queue[q].map,
- 2 * req->tp_block_size * req->tp_block_nr);
- if ((*internals)->rx_queue[q].rd)
- rte_free((*internals)->rx_queue[q].rd);
- if ((*internals)->tx_queue[q].rd)
- rte_free((*internals)->tx_queue[q].rd);
- }
- rte_free(*internals);
+ if (qsockfd != -1)
+ close(qsockfd);
+ for (q = 0; q < nb_queues; q++) {
+ munmap((*internals)->rx_queue[q].map,
+ 2 * req->tp_block_size * req->tp_block_nr);
+
+ rte_free((*internals)->rx_queue[q].rd);
+ rte_free((*internals)->tx_queue[q].rd);
+ if (((*internals)->rx_queue[q].sockfd != 0) &&
+ ((*internals)->rx_queue[q].sockfd != qsockfd))
+ close((*internals)->rx_queue[q].sockfd);
}
+ rte_free(*internals);
+error_early:
+ rte_free(data);
return -1;
}
@@ -804,7 +802,7 @@ int
rte_pmd_af_packet_devinit(const char *name, const char *params)
{
unsigned numa_node;
- int ret;
+ int ret = 0;
struct rte_kvargs *kvlist;
int sockfd = -1;
@@ -813,8 +811,10 @@ rte_pmd_af_packet_devinit(const char *name, const char *params)
numa_node = rte_socket_id();
kvlist = rte_kvargs_parse(params, valid_arguments);
- if (kvlist == NULL)
- return -1;
+ if (kvlist == NULL) {
+ ret = -1;
+ goto exit;
+ }
/*
* If iface argument is passed we open the NICs and use them for
@@ -825,16 +825,15 @@ rte_pmd_af_packet_devinit(const char *name, const char *params)
ret = rte_kvargs_process(kvlist, ETH_AF_PACKET_IFACE_ARG,
&open_packet_iface, &sockfd);
if (ret < 0)
- return -1;
+ goto exit;
}
ret = rte_eth_from_packet(name, &sockfd, numa_node, kvlist);
close(sockfd); /* no longer needed */
- if (ret < 0)
- return -1;
-
- return 0;
+exit:
+ rte_kvargs_free(kvlist);
+ return ret;
}
static struct rte_driver pmd_af_packet_drv = {
diff --git a/src/dpdk_lib18/librte_pmd_af_packet/rte_eth_af_packet.h b/src/dpdk22/drivers/net/af_packet/rte_eth_af_packet.h
index 5d1bc7e6..5d1bc7e6 100755..100644
--- a/src/dpdk_lib18/librte_pmd_af_packet/rte_eth_af_packet.h
+++ b/src/dpdk22/drivers/net/af_packet/rte_eth_af_packet.h
diff --git a/src/dpdk22/drivers/net/bnx2x/bnx2x.h b/src/dpdk22/drivers/net/bnx2x/bnx2x.h
new file mode 100644
index 00000000..2abab0c4
--- /dev/null
+++ b/src/dpdk22/drivers/net/bnx2x/bnx2x.h
@@ -0,0 +1,1987 @@
+/*-
+ * Copyright (c) 2007-2013 Broadcom Corporation.
+ *
+ * Eric Davis <edavis@broadcom.com>
+ * David Christensen <davidch@broadcom.com>
+ * Gary Zambrano <zambrano@broadcom.com>
+ *
+ * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
+ * Copyright (c) 2015 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ */
+
+#ifndef __BNX2X_H__
+#define __BNX2X_H__
+
+#include <rte_byteorder.h>
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+#ifndef __LITTLE_ENDIAN
+#define __LITTLE_ENDIAN RTE_LITTLE_ENDIAN
+#endif
+#undef __BIG_ENDIAN
+#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+#ifndef __BIG_ENDIAN
+#define __BIG_ENDIAN RTE_BIG_ENDIAN
+#endif
+#undef __LITTLE_ENDIAN
+#endif
+
+#include "bnx2x_ethdev.h"
+#include "ecore_mfw_req.h"
+#include "ecore_fw_defs.h"
+#include "ecore_hsi.h"
+#include "ecore_reg.h"
+#include "bnx2x_stats.h"
+#include "bnx2x_vfpf.h"
+
+#include "elink.h"
+
+#ifndef __FreeBSD__
+#include <linux/pci_regs.h>
+
+#define PCIY_PMG PCI_CAP_ID_PM
+#define PCIY_MSI PCI_CAP_ID_MSI
+#define PCIY_EXPRESS PCI_CAP_ID_EXP
+#define PCIY_MSIX PCI_CAP_ID_MSIX
+#define PCIR_EXPRESS_DEVICE_STA PCI_EXP_TYPE_RC_EC
+#define PCIM_EXP_STA_TRANSACTION_PND PCI_EXP_DEVSTA_TRPND
+#define PCIR_EXPRESS_LINK_STA PCI_EXP_LNKSTA
+#define PCIM_LINK_STA_WIDTH PCI_EXP_LNKSTA_NLW
+#define PCIM_LINK_STA_SPEED PCI_EXP_LNKSTA_CLS
+#define PCIR_EXPRESS_DEVICE_CTL PCI_EXP_DEVCTL
+#define PCIM_EXP_CTL_MAX_PAYLOAD PCI_EXP_DEVCTL_PAYLOAD
+#define PCIM_EXP_CTL_MAX_READ_REQUEST PCI_EXP_DEVCTL_READRQ
+#define PCIR_POWER_STATUS PCI_PM_CTRL
+#define PCIM_PSTAT_DMASK PCI_PM_CTRL_STATE_MASK
+#define PCIM_PSTAT_PME PCI_PM_CTRL_PME_STATUS
+#define PCIM_PSTAT_D3 0x3
+#define PCIM_PSTAT_PMEENABLE PCI_PM_CTRL_PME_ENABLE
+#define PCIR_MSIX_CTRL PCI_MSIX_FLAGS
+#define PCIM_MSIXCTRL_TABLE_SIZE PCI_MSIX_FLAGS_QSIZE
+#else
+#include <dev/pci/pcireg.h>
+#endif
+
+#define IFM_10G_CX4 20 /* 10GBase CX4 copper */
+#define IFM_10G_TWINAX 22 /* 10GBase Twinax copper */
+#define IFM_10G_T 26 /* 10GBase-T - RJ45 */
+
+#ifndef __FreeBSD__
+#define PCIR_EXPRESS_DEVICE_STA PCI_EXP_TYPE_RC_EC
+#define PCIM_EXP_STA_TRANSACTION_PND PCI_EXP_DEVSTA_TRPND
+#define PCIR_EXPRESS_LINK_STA PCI_EXP_LNKSTA
+#define PCIM_LINK_STA_WIDTH PCI_EXP_LNKSTA_NLW
+#define PCIM_LINK_STA_SPEED PCI_EXP_LNKSTA_CLS
+#define PCIR_EXPRESS_DEVICE_CTL PCI_EXP_DEVCTL
+#define PCIM_EXP_CTL_MAX_PAYLOAD PCI_EXP_DEVCTL_PAYLOAD
+#define PCIM_EXP_CTL_MAX_READ_REQUEST PCI_EXP_DEVCTL_READRQ
+#else
+#define PCIR_EXPRESS_DEVICE_STA PCIER_DEVICE_STA
+#define PCIM_EXP_STA_TRANSACTION_PND PCIEM_STA_TRANSACTION_PND
+#define PCIR_EXPRESS_LINK_STA PCIER_LINK_STA
+#define PCIM_LINK_STA_WIDTH PCIEM_LINK_STA_WIDTH
+#define PCIM_LINK_STA_SPEED PCIEM_LINK_STA_SPEED
+#define PCIR_EXPRESS_DEVICE_CTL PCIER_DEVICE_CTL
+#define PCIM_EXP_CTL_MAX_PAYLOAD PCIEM_CTL_MAX_PAYLOAD
+#define PCIM_EXP_CTL_MAX_READ_REQUEST PCIEM_CTL_MAX_READ_REQUEST
+#endif
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#endif
+#ifndef ARRSIZE
+#define ARRSIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#endif
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#endif
+#ifndef roundup
+#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
+#endif
+#ifndef ilog2
+static inline
+int bnx2x_ilog2(int x)
+{
+ int log = 0;
+ x >>= 1;
+
+ while(x) {
+ log++;
+ x >>= 1;
+ }
+ return log;
+}
+#define ilog2(x) bnx2x_ilog2(x)
+#endif
+
+#include "ecore_sp.h"
+
+struct bnx2x_device_type {
+ uint16_t bnx2x_vid;
+ uint16_t bnx2x_did;
+ uint16_t bnx2x_svid;
+ uint16_t bnx2x_sdid;
+ char *bnx2x_name;
+};
+
+#define RTE_MBUF_DATA_DMA_ADDR(mb) \
+ ((uint64_t)((mb)->buf_physaddr + (mb)->data_off))
+
+#define BNX2X_PAGE_SHIFT 12
+#define BNX2X_PAGE_SIZE (1 << BNX2X_PAGE_SHIFT)
+#define BNX2X_PAGE_MASK (~(BNX2X_PAGE_SIZE - 1))
+#define BNX2X_PAGE_ALIGN(addr) ((addr + BNX2X_PAGE_SIZE - 1) & BNX2X_PAGE_MASK)
+
+#if BNX2X_PAGE_SIZE != 4096
+#error Page sizes other than 4KB are unsupported!
+#endif
+
+#define U64_LO(addr) ((uint32_t)(((uint64_t)(addr)) & 0xFFFFFFFF))
+#define U64_HI(addr) ((uint32_t)(((uint64_t)(addr)) >> 32))
+#define HILO_U64(hi, lo) ((((uint64_t)(hi)) << 32) + (lo))
+
+/* dropless fc FW/HW related params */
+#define BRB_SIZE(sc) (CHIP_IS_E3(sc) ? 1024 : 512)
+#define MAX_AGG_QS(sc) ETH_MAX_AGGREGATION_QUEUES_E1H_E2
+#define FW_DROP_LEVEL(sc) (3 + MAX_SPQ_PENDING + MAX_AGG_QS(sc))
+#define FW_PREFETCH_CNT 16U
+#define DROPLESS_FC_HEADROOM 100
+
+#ifndef MCLSHIFT
+#define MCLSHIFT 11
+#endif
+#define MCLBYTES (1 << MCLSHIFT)
+
+#if !defined(MJUMPAGESIZE)
+#if BNX2X_PAGE_SIZE < 2048
+#define MJUMPAGESIZE MCLBYTES
+#elif BNX2X_PAGE_SIZE <= 8192
+#define MJUMPAGESIZE BNX2X_PAGE_SIZE
+#else
+#define MJUMPAGESIZE (8 * 1024)
+#endif
+#endif
+#define MJUM9BYTES (9 * 1024)
+#define MJUM16BYTES (16 * 1024)
+
+/*
+ * Transmit Buffer Descriptor (tx_bd) definitions*
+ */
+/* NUM_TX_PAGES must be a power of 2. */
+#define TOTAL_TX_BD_PER_PAGE (BNX2X_PAGE_SIZE / sizeof(union eth_tx_bd_types)) /* 256 */
+#define USABLE_TX_BD_PER_PAGE (TOTAL_TX_BD_PER_PAGE - 1) /* 255 */
+
+#define TOTAL_TX_BD(q) (TOTAL_TX_BD_PER_PAGE * q->nb_tx_pages) /* 512 */
+#define USABLE_TX_BD(q) (USABLE_TX_BD_PER_PAGE * q->nb_tx_pages) /* 510 */
+#define MAX_TX_BD(q) (TOTAL_TX_BD(q) - 1) /* 511 */
+
+#define NEXT_TX_BD(x) \
+ ((((x) & USABLE_TX_BD_PER_PAGE) == \
+ (USABLE_TX_BD_PER_PAGE - 1)) ? (x) + 2 : (x) + 1)
+
+#define TX_BD(x, q) ((x) & MAX_TX_BD(q))
+#define TX_PAGE(x) (((x) & ~USABLE_TX_BD_PER_PAGE) >> 8)
+#define TX_IDX(x) ((x) & USABLE_TX_BD_PER_PAGE)
+
+/*
+ * Trigger pending transmits when the number of available BDs is greater
+ * than 1/8 of the total number of usable BDs.
+ */
+#define BNX2X_TX_CLEANUP_THRESHOLD(q) (USABLE_TX_BD(q) / 8)
+#define BNX2X_TX_TIMEOUT 5
+
+/*
+ * Receive Buffer Descriptor (rx_bd) definitions*
+ */
+//#define NUM_RX_PAGES 1
+#define TOTAL_RX_BD_PER_PAGE (BNX2X_PAGE_SIZE / sizeof(struct eth_rx_bd)) /* 512 */
+#define USABLE_RX_BD_PER_PAGE (TOTAL_RX_BD_PER_PAGE - 2) /* 510 */
+#define RX_BD_PER_PAGE_MASK (TOTAL_RX_BD_PER_PAGE - 1) /* 511 */
+#define TOTAL_RX_BD(q) (TOTAL_RX_BD_PER_PAGE * q->nb_rx_pages) /* 512 */
+#define USABLE_RX_BD(q) (USABLE_RX_BD_PER_PAGE * q->nb_rx_pages) /* 510 */
+#define MAX_RX_BD(q) (TOTAL_RX_BD(q) - 1) /* 511 */
+#define RX_BD_NEXT_PAGE_DESC_CNT 2
+
+#define NEXT_RX_BD(x) \
+ ((((x) & RX_BD_PER_PAGE_MASK) == \
+ (USABLE_RX_BD_PER_PAGE - 1)) ? (x) + 3 : (x) + 1)
+
+/* x & 0x3ff */
+#define RX_BD(x, q) ((x) & MAX_RX_BD(q))
+#define RX_PAGE(x) (((x) & ~RX_BD_PER_PAGE_MASK) >> 9)
+#define RX_IDX(x) ((x) & RX_BD_PER_PAGE_MASK)
+
+/*
+ * Receive Completion Queue definitions*
+ */
+//#define NUM_RCQ_PAGES (NUM_RX_PAGES * 4)
+#define TOTAL_RCQ_ENTRIES_PER_PAGE (BNX2X_PAGE_SIZE / sizeof(union eth_rx_cqe)) /* 128 */
+#define USABLE_RCQ_ENTRIES_PER_PAGE (TOTAL_RCQ_ENTRIES_PER_PAGE - 1) /* 127 */
+#define TOTAL_RCQ_ENTRIES(q) (TOTAL_RCQ_ENTRIES_PER_PAGE * q->nb_cq_pages) /* 512 */
+#define USABLE_RCQ_ENTRIES(q) (USABLE_RCQ_ENTRIES_PER_PAGE * q->nb_cq_pages) /* 508 */
+#define MAX_RCQ_ENTRIES(q) (TOTAL_RCQ_ENTRIES(q) - 1) /* 511 */
+#define RCQ_NEXT_PAGE_DESC_CNT 1
+
+#define NEXT_RCQ_IDX(x) \
+ ((((x) & USABLE_RCQ_ENTRIES_PER_PAGE) == \
+ (USABLE_RCQ_ENTRIES_PER_PAGE - 1)) ? (x) + 2 : (x) + 1)
+
+#define CQE_BD_REL \
+ (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd))
+
+#define RCQ_BD_PAGES(q) \
+ (q->nb_rx_pages * CQE_BD_REL)
+
+#define RCQ_ENTRY(x, q) ((x) & MAX_RCQ_ENTRIES(q))
+#define RCQ_PAGE(x) (((x) & ~USABLE_RCQ_ENTRIES_PER_PAGE) >> 7)
+#define RCQ_IDX(x) ((x) & USABLE_RCQ_ENTRIES_PER_PAGE)
+
+/*
+ * dropless fc calculations for BDs
+ * Number of BDs should be as number of buffers in BRB:
+ * Low threshold takes into account RX_BD_NEXT_PAGE_DESC_CNT
+ * "next" elements on each page
+ */
+#define NUM_BD_REQ(sc) \
+ BRB_SIZE(sc)
+#define NUM_BD_PG_REQ(sc) \
+ ((NUM_BD_REQ(sc) + USABLE_RX_BD_PER_PAGE - 1) / USABLE_RX_BD_PER_PAGE)
+#define BD_TH_LO(sc) \
+ (NUM_BD_REQ(sc) + \
+ NUM_BD_PG_REQ(sc) * RX_BD_NEXT_PAGE_DESC_CNT + \
+ FW_DROP_LEVEL(sc))
+#define BD_TH_HI(sc) \
+ (BD_TH_LO(sc) + DROPLESS_FC_HEADROOM)
+#define MIN_RX_AVAIL(sc) \
+ ((sc)->dropless_fc ? BD_TH_HI(sc) + 128 : 128)
+
+/*
+ * dropless fc calculations for RCQs
+ * Number of RCQs should be as number of buffers in BRB:
+ * Low threshold takes into account RCQ_NEXT_PAGE_DESC_CNT
+ * "next" elements on each page
+ */
+#define NUM_RCQ_REQ(sc) \
+ BRB_SIZE(sc)
+#define NUM_RCQ_PG_REQ(sc) \
+ ((NUM_RCQ_REQ(sc) + USABLE_RCQ_ENTRIES_PER_PAGE - 1) / USABLE_RCQ_ENTRIES_PER_PAGE)
+#define RCQ_TH_LO(sc) \
+ (NUM_RCQ_REQ(sc) + \
+ NUM_RCQ_PG_REQ(sc) * RCQ_NEXT_PAGE_DESC_CNT + \
+ FW_DROP_LEVEL(sc))
+#define RCQ_TH_HI(sc) \
+ (RCQ_TH_LO(sc) + DROPLESS_FC_HEADROOM)
+
+/* Load / Unload modes */
+#define LOAD_NORMAL 0
+#define LOAD_OPEN 1
+#define LOAD_DIAG 2
+#define LOAD_LOOPBACK_EXT 3
+#define UNLOAD_NORMAL 0
+#define UNLOAD_CLOSE 1
+#define UNLOAD_RECOVERY 2
+
+/* Some constants... */
+//#define MAX_PATH_NUM 2
+//#define E2_MAX_NUM_OF_VFS 64
+//#define E1H_FUNC_MAX 8
+//#define E2_FUNC_MAX 4 /* per path */
+#define MAX_VNIC_NUM 4
+#define MAX_FUNC_NUM 8 /* common to all chips */
+//#define MAX_NDSB HC_SB_MAX_SB_E2 /* max non-default status block */
+#define MAX_RSS_CHAINS 16 /* a constant for HW limit */
+#define MAX_MSI_VECTOR 8 /* a constant for HW limit */
+
+#define ILT_NUM_PAGE_ENTRIES 3072
+/*
+ * 57711 we use whole table since we have 8 functions.
+ * 57712 we have only 4 functions, but use same size per func, so only half
+ * of the table is used.
+ */
+#define ILT_PER_FUNC (ILT_NUM_PAGE_ENTRIES / 8)
+#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
+/*
+ * the phys address is shifted right 12 bits and has an added
+ * 1=valid bit added to the 53rd bit
+ * then since this is a wide register(TM)
+ * we split it into two 32 bit writes
+ */
+#define ONCHIP_ADDR1(x) ((uint32_t)(((uint64_t)x >> 12) & 0xFFFFFFFF))
+#define ONCHIP_ADDR2(x) ((uint32_t)((1 << 20) | ((uint64_t)x >> 44)))
+
+/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
+#define ETH_HLEN 14
+#define ETH_OVERHEAD (ETH_HLEN + 8 + 8)
+#define ETH_MIN_PACKET_SIZE 60
+#define ETH_MAX_PACKET_SIZE ETHERMTU /* 1500 */
+#define ETH_MAX_JUMBO_PACKET_SIZE 9600
+/* TCP with Timestamp Option (32) + IPv6 (40) */
+
+/* max supported alignment is 256 (8 shift) */
+#define BNX2X_RX_ALIGN_SHIFT 8
+/* FW uses 2 cache lines alignment for start packet and size */
+#define BNX2X_FW_RX_ALIGN_START (1 << BNX2X_RX_ALIGN_SHIFT)
+#define BNX2X_FW_RX_ALIGN_END (1 << BNX2X_RX_ALIGN_SHIFT)
+
+#define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5)
+
+struct bnx2x_bar {
+ void *base_addr;
+};
+
+/* Used to manage DMA allocations. */
+struct bnx2x_dma {
+ struct bnx2x_softc *sc;
+ phys_addr_t paddr;
+ void *vaddr;
+ int nseg;
+ char msg[RTE_MEMZONE_NAMESIZE - 6];
+};
+
+/* attn group wiring */
+#define MAX_DYNAMIC_ATTN_GRPS 8
+
+struct attn_route {
+ uint32_t sig[5];
+};
+
+struct iro {
+ uint32_t base;
+ uint16_t m1;
+ uint16_t m2;
+ uint16_t m3;
+ uint16_t size;
+};
+
+union bnx2x_host_hc_status_block {
+ /* pointer to fp status block e2 */
+ struct host_hc_status_block_e2 *e2_sb;
+ /* pointer to fp status block e1x */
+ struct host_hc_status_block_e1x *e1x_sb;
+};
+
+union bnx2x_db_prod {
+ struct doorbell_set_prod data;
+ uint32_t raw;
+};
+
+struct bnx2x_sw_tx_bd {
+ struct mbuf *m;
+ uint16_t first_bd;
+ uint8_t flags;
+/* set on the first BD descriptor when there is a split BD */
+#define BNX2X_TSO_SPLIT_BD (1 << 0)
+};
+
+/*
+ * This is the HSI fastpath data structure. There can be up to MAX_RSS_CHAIN
+ * instances of the fastpath structure when using multiple queues.
+ */
+struct bnx2x_fastpath {
+ /* pointer back to parent structure */
+ struct bnx2x_softc *sc;
+
+ /* status block */
+ struct bnx2x_dma sb_dma;
+ union bnx2x_host_hc_status_block status_block;
+
+ phys_addr_t tx_desc_mapping;
+
+ phys_addr_t rx_desc_mapping;
+ phys_addr_t rx_comp_mapping;
+
+ uint16_t *sb_index_values;
+ uint16_t *sb_running_index;
+ uint32_t ustorm_rx_prods_offset;
+
+ uint8_t igu_sb_id; /* status block number in HW */
+ uint8_t fw_sb_id; /* status block number in FW */
+
+ uint32_t rx_buf_size;
+ int mbuf_alloc_size;
+
+ int state;
+#define BNX2X_FP_STATE_CLOSED 0x01
+#define BNX2X_FP_STATE_IRQ 0x02
+#define BNX2X_FP_STATE_OPENING 0x04
+#define BNX2X_FP_STATE_OPEN 0x08
+#define BNX2X_FP_STATE_HALTING 0x10
+#define BNX2X_FP_STATE_HALTED 0x20
+
+ /* reference back to this fastpath queue number */
+ uint8_t index; /* this is also the 'cid' */
+#define FP_IDX(fp) (fp->index)
+
+ /* ethernet client ID (each fastpath set of RX/TX/CQE is a client) */
+ uint8_t cl_id;
+#define FP_CL_ID(fp) (fp->cl_id)
+ uint8_t cl_qzone_id;
+
+ uint16_t fp_hc_idx;
+
+ union bnx2x_db_prod tx_db;
+
+ struct tstorm_per_queue_stats old_tclient;
+ struct ustorm_per_queue_stats old_uclient;
+ struct xstorm_per_queue_stats old_xclient;
+ struct bnx2x_eth_q_stats eth_q_stats;
+ struct bnx2x_eth_q_stats_old eth_q_stats_old;
+
+ /* Pointer to the receive consumer in the status block */
+ uint16_t *rx_cq_cons_sb;
+
+ /* Pointer to the transmit consumer in the status block */
+ uint16_t *tx_cons_sb;
+
+ /* transmit timeout until chip reset */
+ int watchdog_timer;
+
+}; /* struct bnx2x_fastpath */
+
+#define BNX2X_MAX_NUM_OF_VFS 64
+#define BNX2X_VF_ID_INVALID 0xFF
+
+/* maximum number of fast-path interrupt contexts */
+#define FP_SB_MAX_E1x 16
+#define FP_SB_MAX_E2 HC_SB_MAX_SB_E2
+
+union cdu_context {
+ struct eth_context eth;
+ char pad[1024];
+};
+
+/* CDU host DB constants */
+#define CDU_ILT_PAGE_SZ_HW 2
+#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 32K */
+#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
+
+#define CNIC_ISCSI_CID_MAX 256
+#define CNIC_FCOE_CID_MAX 2048
+#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX)
+#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS)
+
+#define QM_ILT_PAGE_SZ_HW 0
+#define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 4K */
+#define QM_CID_ROUND 1024
+
+/* TM (timers) host DB constants */
+#define TM_ILT_PAGE_SZ_HW 0
+#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */
+/*#define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */
+#define TM_CONN_NUM 1024
+#define TM_ILT_SZ (8 * TM_CONN_NUM)
+#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ)
+
+/* SRC (Searcher) host DB constants */
+#define SRC_ILT_PAGE_SZ_HW 0
+#define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 4K */
+#define SRC_HASH_BITS 10
+#define SRC_CONN_NUM (1 << SRC_HASH_BITS) /* 1024 */
+#define SRC_ILT_SZ (sizeof(struct src_ent) * SRC_CONN_NUM)
+#define SRC_T2_SZ SRC_ILT_SZ
+#define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ)
+
+struct hw_context {
+ struct bnx2x_dma vcxt_dma;
+ union cdu_context *vcxt;
+ //phys_addr_t cxt_mapping;
+ size_t size;
+};
+
+#define SM_RX_ID 0
+#define SM_TX_ID 1
+
+/* defines for multiple tx priority indices */
+#define FIRST_TX_ONLY_COS_INDEX 1
+#define FIRST_TX_COS_INDEX 0
+
+#define CID_TO_FP(cid, sc) ((cid) % BNX2X_NUM_NON_CNIC_QUEUES(sc))
+
+#define HC_INDEX_ETH_RX_CQ_CONS 1
+#define HC_INDEX_OOO_TX_CQ_CONS 4
+#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5
+#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6
+#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7
+#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0
+
+/* congestion management fairness mode */
+#define CMNG_FNS_NONE 0
+#define CMNG_FNS_MINMAX 1
+
+/* CMNG constants, as derived from system spec calculations */
+/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */
+#define DEF_MIN_RATE 100
+/* resolution of the rate shaping timer - 400 usec */
+#define RS_PERIODIC_TIMEOUT_USEC 400
+/* number of bytes in single QM arbitration cycle -
+ * coefficient for calculating the fairness timer */
+#define QM_ARB_BYTES 160000
+/* resolution of Min algorithm 1:100 */
+#define MIN_RES 100
+/* how many bytes above threshold for the minimal credit of Min algorithm*/
+#define MIN_ABOVE_THRESH 32768
+/* fairness algorithm integration time coefficient -
+ * for calculating the actual Tfair */
+#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES)
+/* memory of fairness algorithm - 2 cycles */
+#define FAIR_MEM 2
+
+#define HC_SEG_ACCESS_DEF 0 /* Driver decision 0-3 */
+#define HC_SEG_ACCESS_ATTN 4
+#define HC_SEG_ACCESS_NORM 0 /* Driver decision 0-1 */
+
+/*
+ * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is
+ * control by the number of fast-path status blocks supported by the
+ * device (HW/FW). Each fast-path status block (FP-SB) aka non-default
+ * status block represents an independent interrupts context that can
+ * serve a regular L2 networking queue. However special L2 queues such
+ * as the FCoE queue do not require a FP-SB and other components like
+ * the CNIC may consume FP-SB reducing the number of possible L2 queues
+ *
+ * If the maximum number of FP-SB available is X then:
+ * a. If CNIC is supported it consumes 1 FP-SB thus the max number of
+ * regular L2 queues is Y=X-1
+ * b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor)
+ * c. If the FCoE L2 queue is supported the actual number of L2 queues
+ * is Y+1
+ * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for
+ * slow-path interrupts) or Y+2 if CNIC is supported (one additional
+ * FP interrupt context for the CNIC).
+ * e. The number of HW context (CID count) is always X or X+1 if FCoE
+ * L2 queue is supported. the cid for the FCoE L2 queue is always X.
+ *
+ * So this is quite simple for now as no ULPs are supported yet. :-)
+ */
+#define BNX2X_NUM_QUEUES(sc) ((sc)->num_queues)
+#define BNX2X_NUM_ETH_QUEUES(sc) BNX2X_NUM_QUEUES(sc)
+#define BNX2X_NUM_NON_CNIC_QUEUES(sc) BNX2X_NUM_QUEUES(sc)
+#define BNX2X_NUM_RX_QUEUES(sc) BNX2X_NUM_QUEUES(sc)
+
+#define FOR_EACH_QUEUE(sc, var) \
+ for ((var) = 0; (var) < BNX2X_NUM_QUEUES(sc); (var)++)
+
+#define FOR_EACH_NONDEFAULT_QUEUE(sc, var) \
+ for ((var) = 1; (var) < BNX2X_NUM_QUEUES(sc); (var)++)
+
+#define FOR_EACH_ETH_QUEUE(sc, var) \
+ for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(sc); (var)++)
+
+#define FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, var) \
+ for ((var) = 1; (var) < BNX2X_NUM_ETH_QUEUES(sc); (var)++)
+
+#define FOR_EACH_COS_IN_TX_QUEUE(sc, var) \
+ for ((var) = 0; (var) < (sc)->max_cos; (var)++)
+
+#define FOR_EACH_CNIC_QUEUE(sc, var) \
+ for ((var) = BNX2X_NUM_ETH_QUEUES(sc); \
+ (var) < BNX2X_NUM_QUEUES(sc); \
+ (var)++)
+
+enum {
+ OOO_IDX_OFFSET,
+ FCOE_IDX_OFFSET,
+ FWD_IDX_OFFSET,
+};
+
+#define FCOE_IDX(sc) (BNX2X_NUM_NON_CNIC_QUEUES(sc) + FCOE_IDX_OFFSET)
+#define bnx2x_fcoe_fp(sc) (&sc->fp[FCOE_IDX(sc)])
+#define bnx2x_fcoe(sc, var) (bnx2x_fcoe_fp(sc)->var)
+#define bnx2x_fcoe_inner_sp_obj(sc) (&sc->sp_objs[FCOE_IDX(sc)])
+#define bnx2x_fcoe_sp_obj(sc, var) (bnx2x_fcoe_inner_sp_obj(sc)->var)
+#define bnx2x_fcoe_tx(sc, var) (bnx2x_fcoe_fp(sc)->txdata_ptr[FIRST_TX_COS_INDEX]->var)
+
+#define OOO_IDX(sc) (BNX2X_NUM_NON_CNIC_QUEUES(sc) + OOO_IDX_OFFSET)
+#define bnx2x_ooo_fp(sc) (&sc->fp[OOO_IDX(sc)])
+#define bnx2x_ooo(sc, var) (bnx2x_ooo_fp(sc)->var)
+#define bnx2x_ooo_inner_sp_obj(sc) (&sc->sp_objs[OOO_IDX(sc)])
+#define bnx2x_ooo_sp_obj(sc, var) (bnx2x_ooo_inner_sp_obj(sc)->var)
+
+#define FWD_IDX(sc) (BNX2X_NUM_NON_CNIC_QUEUES(sc) + FWD_IDX_OFFSET)
+#define bnx2x_fwd_fp(sc) (&sc->fp[FWD_IDX(sc)])
+#define bnx2x_fwd(sc, var) (bnx2x_fwd_fp(sc)->var)
+#define bnx2x_fwd_inner_sp_obj(sc) (&sc->sp_objs[FWD_IDX(sc)])
+#define bnx2x_fwd_sp_obj(sc, var) (bnx2x_fwd_inner_sp_obj(sc)->var)
+#define bnx2x_fwd_txdata(fp) (fp->txdata_ptr[FIRST_TX_COS_INDEX])
+
+#define IS_ETH_FP(fp) ((fp)->index < BNX2X_NUM_ETH_QUEUES((fp)->sc))
+#define IS_FCOE_FP(fp) ((fp)->index == FCOE_IDX((fp)->sc))
+#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(sc))
+#define IS_FWD_FP(fp) ((fp)->index == FWD_IDX((fp)->sc))
+#define IS_FWD_IDX(idx) ((idx) == FWD_IDX(sc))
+#define IS_OOO_FP(fp) ((fp)->index == OOO_IDX((fp)->sc))
+#define IS_OOO_IDX(idx) ((idx) == OOO_IDX(sc))
+
+enum {
+ BNX2X_PORT_QUERY_IDX,
+ BNX2X_PF_QUERY_IDX,
+ BNX2X_FCOE_QUERY_IDX,
+ BNX2X_FIRST_QUEUE_QUERY_IDX,
+};
+
+struct bnx2x_fw_stats_req {
+ struct stats_query_header hdr;
+ struct stats_query_entry query[FP_SB_MAX_E1x +
+ BNX2X_FIRST_QUEUE_QUERY_IDX];
+};
+
+struct bnx2x_fw_stats_data {
+ struct stats_counter storm_counters;
+ struct per_port_stats port;
+ struct per_pf_stats pf;
+ struct per_queue_stats queue_stats[1];
+};
+
+/* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */
+#define BNX2X_IGU_STAS_MSG_VF_CNT 64
+#define BNX2X_IGU_STAS_MSG_PF_CNT 4
+
+#define MAX_DMAE_C 8
+
+/*
+ * This is the slowpath data structure. It is mapped into non-paged memory
+ * so that the hardware can access it's contents directly and must be page
+ * aligned.
+ */
+struct bnx2x_slowpath {
+
+ /* used by the DMAE command executer */
+ struct dmae_command dmae[MAX_DMAE_C];
+
+ /* statistics completion */
+ uint32_t stats_comp;
+
+ /* firmware defined statistics blocks */
+ union mac_stats mac_stats;
+ struct nig_stats nig_stats;
+ struct host_port_stats port_stats;
+ struct host_func_stats func_stats;
+
+ /* DMAE completion value and data source/sink */
+ uint32_t wb_comp;
+ uint32_t wb_data[4];
+
+ union {
+ struct mac_configuration_cmd e1x;
+ struct eth_classify_rules_ramrod_data e2;
+ } mac_rdata;
+
+ union {
+ struct tstorm_eth_mac_filter_config e1x;
+ struct eth_filter_rules_ramrod_data e2;
+ } rx_mode_rdata;
+
+ struct eth_rss_update_ramrod_data rss_rdata;
+
+ union {
+ struct mac_configuration_cmd e1;
+ struct eth_multicast_rules_ramrod_data e2;
+ } mcast_rdata;
+
+ union {
+ struct function_start_data func_start;
+ struct flow_control_configuration pfc_config; /* for DCBX ramrod */
+ } func_rdata;
+
+ /* Queue State related ramrods */
+ union {
+ struct client_init_ramrod_data init_data;
+ struct client_update_ramrod_data update_data;
+ } q_rdata;
+
+ /*
+ * AFEX ramrod can not be a part of func_rdata union because these
+ * events might arrive in parallel to other events from func_rdata.
+ * If they were defined in the same union the data can get corrupted.
+ */
+ struct afex_vif_list_ramrod_data func_afex_rdata;
+
+ union drv_info_to_mcp drv_info_to_mcp;
+}; /* struct bnx2x_slowpath */
+
+/*
+ * Port specifc data structure.
+ */
+struct bnx2x_port {
+ /*
+ * Port Management Function (for 57711E only).
+ * When this field is set the driver instance is
+ * responsible for managing port specifc
+ * configurations such as handling link attentions.
+ */
+ uint32_t pmf;
+
+ /* Ethernet maximum transmission unit. */
+ uint16_t ether_mtu;
+
+ uint32_t link_config[ELINK_LINK_CONFIG_SIZE];
+
+ uint32_t ext_phy_config;
+
+ /* Port feature config.*/
+ uint32_t config;
+
+ /* Defines the features supported by the PHY. */
+ uint32_t supported[ELINK_LINK_CONFIG_SIZE];
+
+ /* Defines the features advertised by the PHY. */
+ uint32_t advertising[ELINK_LINK_CONFIG_SIZE];
+#define ADVERTISED_10baseT_Half (1 << 1)
+#define ADVERTISED_10baseT_Full (1 << 2)
+#define ADVERTISED_100baseT_Half (1 << 3)
+#define ADVERTISED_100baseT_Full (1 << 4)
+#define ADVERTISED_1000baseT_Half (1 << 5)
+#define ADVERTISED_1000baseT_Full (1 << 6)
+#define ADVERTISED_TP (1 << 7)
+#define ADVERTISED_FIBRE (1 << 8)
+#define ADVERTISED_Autoneg (1 << 9)
+#define ADVERTISED_Asym_Pause (1 << 10)
+#define ADVERTISED_Pause (1 << 11)
+#define ADVERTISED_2500baseX_Full (1 << 15)
+#define ADVERTISED_10000baseT_Full (1 << 16)
+
+ uint32_t phy_addr;
+
+ /*
+ * MCP scratchpad address for port specific statistics.
+ * The device is responsible for writing statistcss
+ * back to the MCP for use with management firmware such
+ * as UMP/NC-SI.
+ */
+ uint32_t port_stx;
+
+ struct nig_stats old_nig_stats;
+}; /* struct bnx2x_port */
+
+struct bnx2x_mf_info {
+ uint32_t mf_config[E1HVN_MAX];
+
+ uint32_t vnics_per_port; /* 1, 2 or 4 */
+ uint32_t multi_vnics_mode; /* can be set even if vnics_per_port = 1 */
+ uint32_t path_has_ovlan; /* MF mode in the path (can be different than the MF mode of the function */
+
+#define IS_MULTI_VNIC(sc) ((sc)->devinfo.mf_info.multi_vnics_mode)
+#define VNICS_PER_PORT(sc) ((sc)->devinfo.mf_info.vnics_per_port)
+#define VNICS_PER_PATH(sc) \
+ ((sc)->devinfo.mf_info.vnics_per_port * \
+ ((CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 1 ))
+
+ uint8_t min_bw[MAX_VNIC_NUM];
+ uint8_t max_bw[MAX_VNIC_NUM];
+
+ uint16_t ext_id; /* vnic outer vlan or VIF ID */
+#define VALID_OVLAN(ovlan) ((ovlan) <= 4096)
+#define INVALID_VIF_ID 0xFFFF
+#define OVLAN(sc) ((sc)->devinfo.mf_info.ext_id)
+#define VIF_ID(sc) ((sc)->devinfo.mf_info.ext_id)
+
+ uint16_t default_vlan;
+#define NIV_DEFAULT_VLAN(sc) ((sc)->devinfo.mf_info.default_vlan)
+
+ uint8_t niv_allowed_priorities;
+#define NIV_ALLOWED_PRIORITIES(sc) ((sc)->devinfo.mf_info.niv_allowed_priorities)
+
+ uint8_t niv_default_cos;
+#define NIV_DEFAULT_COS(sc) ((sc)->devinfo.mf_info.niv_default_cos)
+
+ uint8_t niv_mba_enabled;
+
+ enum mf_cfg_afex_vlan_mode afex_vlan_mode;
+#define AFEX_VLAN_MODE(sc) ((sc)->devinfo.mf_info.afex_vlan_mode)
+ int afex_def_vlan_tag;
+ uint32_t pending_max;
+
+ uint16_t flags;
+#define MF_INFO_VALID_MAC 0x0001
+
+ uint16_t mf_ov;
+ uint8_t mf_mode; /* Switch-Dependent or Switch-Independent */
+#define IS_MF(sc) \
+ (IS_MULTI_VNIC(sc) && \
+ ((sc)->devinfo.mf_info.mf_mode != 0))
+#define IS_MF_SD(sc) \
+ (IS_MULTI_VNIC(sc) && \
+ ((sc)->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD))
+#define IS_MF_SI(sc) \
+ (IS_MULTI_VNIC(sc) && \
+ ((sc)->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI))
+#define IS_MF_AFEX(sc) \
+ (IS_MULTI_VNIC(sc) && \
+ ((sc)->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX))
+#define IS_MF_SD_MODE(sc) IS_MF_SD(sc)
+#define IS_MF_SI_MODE(sc) IS_MF_SI(sc)
+#define IS_MF_AFEX_MODE(sc) IS_MF_AFEX(sc)
+
+ uint32_t mf_protos_supported;
+ #define MF_PROTO_SUPPORT_ETHERNET 0x1
+ #define MF_PROTO_SUPPORT_ISCSI 0x2
+ #define MF_PROTO_SUPPORT_FCOE 0x4
+}; /* struct bnx2x_mf_info */
+
+/* Device information data structure. */
+struct bnx2x_devinfo {
+ /* PCIe info */
+ uint16_t vendor_id;
+ uint16_t device_id;
+ uint16_t subvendor_id;
+ uint16_t subdevice_id;
+
+ /*
+ * chip_id = 0b'CCCCCCCCCCCCCCCCRRRRMMMMMMMMBBBB'
+ * C = Chip Number (bits 16-31)
+ * R = Chip Revision (bits 12-15)
+ * M = Chip Metal (bits 4-11)
+ * B = Chip Bond ID (bits 0-3)
+ */
+ uint32_t chip_id;
+#define CHIP_ID(sc) ((sc)->devinfo.chip_id & 0xffff0000)
+#define CHIP_NUM(sc) ((sc)->devinfo.chip_id >> 16)
+/* device ids */
+#define CHIP_NUM_57711 0x164f
+#define CHIP_NUM_57711E 0x1650
+#define CHIP_NUM_57712 0x1662
+#define CHIP_NUM_57712_MF 0x1663
+#define CHIP_NUM_57712_VF 0x166f
+#define CHIP_NUM_57800 0x168a
+#define CHIP_NUM_57800_MF 0x16a5
+#define CHIP_NUM_57800_VF 0x16a9
+#define CHIP_NUM_57810 0x168e
+#define CHIP_NUM_57810_MF 0x16ae
+#define CHIP_NUM_57810_VF 0x16af
+#define CHIP_NUM_57811 0x163d
+#define CHIP_NUM_57811_MF 0x163e
+#define CHIP_NUM_57811_VF 0x163f
+#define CHIP_NUM_57840_OBS 0x168d
+#define CHIP_NUM_57840_OBS_MF 0x16ab
+#define CHIP_NUM_57840_4_10 0x16a1
+#define CHIP_NUM_57840_2_20 0x16a2
+#define CHIP_NUM_57840_MF 0x16a4
+#define CHIP_NUM_57840_VF 0x16ad
+
+#define CHIP_REV_SHIFT 12
+#define CHIP_REV_MASK (0xF << CHIP_REV_SHIFT)
+#define CHIP_REV(sc) ((sc)->devinfo.chip_id & CHIP_REV_MASK)
+
+#define CHIP_REV_Ax (0x0 << CHIP_REV_SHIFT)
+#define CHIP_REV_Bx (0x1 << CHIP_REV_SHIFT)
+#define CHIP_REV_Cx (0x2 << CHIP_REV_SHIFT)
+
+#define CHIP_REV_IS_SLOW(sc) \
+ (CHIP_REV(sc) > 0x00005000)
+#define CHIP_REV_IS_FPGA(sc) \
+ (CHIP_REV_IS_SLOW(sc) && (CHIP_REV(sc) & 0x00001000))
+#define CHIP_REV_IS_EMUL(sc) \
+ (CHIP_REV_IS_SLOW(sc) && !(CHIP_REV(sc) & 0x00001000))
+#define CHIP_REV_IS_ASIC(sc) \
+ (!CHIP_REV_IS_SLOW(sc))
+
+#define CHIP_METAL(sc) ((sc->devinfo.chip_id) & 0x00000ff0)
+#define CHIP_BOND_ID(sc) ((sc->devinfo.chip_id) & 0x0000000f)
+
+#define CHIP_IS_57711(sc) (CHIP_NUM(sc) == CHIP_NUM_57711)
+#define CHIP_IS_57711E(sc) (CHIP_NUM(sc) == CHIP_NUM_57711E)
+#define CHIP_IS_E1H(sc) ((CHIP_IS_57711(sc)) || \
+ (CHIP_IS_57711E(sc)))
+#define CHIP_IS_E1x(sc) CHIP_IS_E1H(sc)
+
+#define CHIP_IS_57712(sc) (CHIP_NUM(sc) == CHIP_NUM_57712)
+#define CHIP_IS_57712_MF(sc) (CHIP_NUM(sc) == CHIP_NUM_57712_MF)
+#define CHIP_IS_57712_VF(sc) (CHIP_NUM(sc) == CHIP_NUM_57712_VF)
+#define CHIP_IS_E2(sc) (CHIP_IS_57712(sc) || \
+ CHIP_IS_57712_MF(sc))
+
+#define CHIP_IS_57800(sc) (CHIP_NUM(sc) == CHIP_NUM_57800)
+#define CHIP_IS_57800_MF(sc) (CHIP_NUM(sc) == CHIP_NUM_57800_MF)
+#define CHIP_IS_57800_VF(sc) (CHIP_NUM(sc) == CHIP_NUM_57800_VF)
+#define CHIP_IS_57810(sc) (CHIP_NUM(sc) == CHIP_NUM_57810)
+#define CHIP_IS_57810_MF(sc) (CHIP_NUM(sc) == CHIP_NUM_57810_MF)
+#define CHIP_IS_57810_VF(sc) (CHIP_NUM(sc) == CHIP_NUM_57810_VF)
+#define CHIP_IS_57811(sc) (CHIP_NUM(sc) == CHIP_NUM_57811)
+#define CHIP_IS_57811_MF(sc) (CHIP_NUM(sc) == CHIP_NUM_57811_MF)
+#define CHIP_IS_57811_VF(sc) (CHIP_NUM(sc) == CHIP_NUM_57811_VF)
+#define CHIP_IS_57840(sc) ((CHIP_NUM(sc) == CHIP_NUM_57840_OBS) || \
+ (CHIP_NUM(sc) == CHIP_NUM_57840_4_10) || \
+ (CHIP_NUM(sc) == CHIP_NUM_57840_2_20))
+#define CHIP_IS_57840_MF(sc) ((CHIP_NUM(sc) == CHIP_NUM_57840_OBS_MF) || \
+ (CHIP_NUM(sc) == CHIP_NUM_57840_MF))
+#define CHIP_IS_57840_VF(sc) (CHIP_NUM(sc) == CHIP_NUM_57840_VF)
+
+#define CHIP_IS_E3(sc) (CHIP_IS_57800(sc) || \
+ CHIP_IS_57800_MF(sc) || \
+ CHIP_IS_57800_VF(sc) || \
+ CHIP_IS_57810(sc) || \
+ CHIP_IS_57810_MF(sc) || \
+ CHIP_IS_57810_VF(sc) || \
+ CHIP_IS_57811(sc) || \
+ CHIP_IS_57811_MF(sc) || \
+ CHIP_IS_57811_VF(sc) || \
+ CHIP_IS_57840(sc) || \
+ CHIP_IS_57840_MF(sc) || \
+ CHIP_IS_57840_VF(sc))
+#define CHIP_IS_E3A0(sc) (CHIP_IS_E3(sc) && \
+ (CHIP_REV(sc) == CHIP_REV_Ax))
+#define CHIP_IS_E3B0(sc) (CHIP_IS_E3(sc) && \
+ (CHIP_REV(sc) == CHIP_REV_Bx))
+
+#define USES_WARPCORE(sc) (CHIP_IS_E3(sc))
+#define CHIP_IS_E2E3(sc) (CHIP_IS_E2(sc) || \
+ CHIP_IS_E3(sc))
+
+#define CHIP_IS_MF_CAP(sc) (CHIP_IS_57711E(sc) || \
+ CHIP_IS_57712_MF(sc) || \
+ CHIP_IS_E3(sc))
+
+#define IS_VF(sc) ((sc)->flags & BNX2X_IS_VF_FLAG)
+#define IS_PF(sc) (!IS_VF(sc))
+
+/*
+ * This define is used in two main places:
+ * 1. In the early stages of nic_load, to know if to configure Parser/Searcher
+ * to nic-only mode or to offload mode. Offload mode is configured if either
+ * the chip is E1x (where NIC_MODE register is not applicable), or if cnic
+ * already registered for this port (which means that the user wants storage
+ * services).
+ * 2. During cnic-related load, to know if offload mode is already configured
+ * in the HW or needs to be configrued. Since the transition from nic-mode to
+ * offload-mode in HW causes traffic coruption, nic-mode is configured only
+ * in ports on which storage services where never requested.
+ */
+#define CONFIGURE_NIC_MODE(sc) (!CHIP_IS_E1x(sc) && !CNIC_ENABLED(sc))
+
+ uint8_t chip_port_mode;
+#define CHIP_4_PORT_MODE 0x0
+#define CHIP_2_PORT_MODE 0x1
+#define CHIP_PORT_MODE_NONE 0x2
+#define CHIP_PORT_MODE(sc) ((sc)->devinfo.chip_port_mode)
+#define CHIP_IS_MODE_4_PORT(sc) (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE)
+
+ uint8_t int_block;
+#define INT_BLOCK_HC 0
+#define INT_BLOCK_IGU 1
+#define INT_BLOCK_MODE_NORMAL 0
+#define INT_BLOCK_MODE_BW_COMP 2
+#define CHIP_INT_MODE_IS_NBC(sc) \
+ (!CHIP_IS_E1x(sc) && \
+ !((sc)->devinfo.int_block & INT_BLOCK_MODE_BW_COMP))
+#define CHIP_INT_MODE_IS_BC(sc) (!CHIP_INT_MODE_IS_NBC(sc))
+
+ uint32_t shmem_base;
+ uint32_t shmem2_base;
+ uint32_t bc_ver;
+ char bc_ver_str[32];
+ uint32_t mf_cfg_base; /* bootcode shmem address in BAR memory */
+ struct bnx2x_mf_info mf_info;
+
+ uint32_t flash_size;
+#define NVRAM_1MB_SIZE 0x20000
+#define NVRAM_TIMEOUT_COUNT 30000
+#define NVRAM_PAGE_SIZE 256
+
+ /* PCIe capability information */
+ uint32_t pcie_cap_flags;
+#define BNX2X_PM_CAPABLE_FLAG 0x00000001
+#define BNX2X_PCIE_CAPABLE_FLAG 0x00000002
+#define BNX2X_MSI_CAPABLE_FLAG 0x00000004
+#define BNX2X_MSIX_CAPABLE_FLAG 0x00000008
+ uint16_t pcie_pm_cap_reg;
+ uint16_t pcie_link_width;
+ uint16_t pcie_link_speed;
+ uint16_t pcie_msi_cap_reg;
+ uint16_t pcie_msix_cap_reg;
+
+ /* device configuration read from bootcode shared memory */
+ uint32_t hw_config;
+ uint32_t hw_config2;
+}; /* struct bnx2x_devinfo */
+
+struct bnx2x_sp_objs {
+ struct ecore_vlan_mac_obj mac_obj; /* MACs object */
+ struct ecore_queue_sp_obj q_obj; /* Queue State object */
+}; /* struct bnx2x_sp_objs */
+
+/*
+ * Data that will be used to create a link report message. We will keep the
+ * data used for the last link report in order to prevent reporting the same
+ * link parameters twice.
+ */
+struct bnx2x_link_report_data {
+ uint16_t line_speed; /* Effective line speed */
+ unsigned long link_report_flags; /* BNX2X_LINK_REPORT_XXX flags */
+};
+
+enum {
+ BNX2X_LINK_REPORT_FULL_DUPLEX,
+ BNX2X_LINK_REPORT_LINK_DOWN,
+ BNX2X_LINK_REPORT_RX_FC_ON,
+ BNX2X_LINK_REPORT_TX_FC_ON
+};
+
+#define BNX2X_RX_CHAIN_PAGE_SZ BNX2X_PAGE_SIZE
+
+struct bnx2x_pci_cap {
+ struct bnx2x_pci_cap *next;
+ uint16_t id;
+ uint16_t type;
+ uint16_t addr;
+};
+
+struct bnx2x_vfdb;
+
+/* Top level device private data structure. */
+struct bnx2x_softc {
+
+ void **rx_queues;
+ void **tx_queues;
+ uint32_t max_tx_queues;
+ uint32_t max_rx_queues;
+ const struct rte_pci_device *pci_dev;
+ uint32_t pci_val;
+ struct bnx2x_pci_cap *pci_caps;
+#define BNX2X_INTRS_POLL_PERIOD 1
+
+ void *firmware;
+ uint64_t fw_len;
+
+ /* MAC address operations */
+ struct bnx2x_mac_ops mac_ops;
+
+ /* structures for VF mbox/response/bulletin */
+ struct bnx2x_vf_mbx_msg *vf2pf_mbox;
+ struct bnx2x_dma vf2pf_mbox_mapping;
+ struct vf_acquire_resp_tlv acquire_resp;
+ struct bnx2x_vf_bulletin *pf2vf_bulletin;
+ struct bnx2x_dma pf2vf_bulletin_mapping;
+ struct bnx2x_vf_bulletin old_bulletin;
+
+ int media;
+
+ int state; /* device state */
+#define BNX2X_STATE_CLOSED 0x0000
+#define BNX2X_STATE_OPENING_WAITING_LOAD 0x1000
+#define BNX2X_STATE_OPENING_WAITING_PORT 0x2000
+#define BNX2X_STATE_OPEN 0x3000
+#define BNX2X_STATE_CLOSING_WAITING_HALT 0x4000
+#define BNX2X_STATE_CLOSING_WAITING_DELETE 0x5000
+#define BNX2X_STATE_CLOSING_WAITING_UNLOAD 0x6000
+#define BNX2X_STATE_DISABLED 0xD000
+#define BNX2X_STATE_DIAG 0xE000
+#define BNX2X_STATE_ERROR 0xF000
+
+ int flags;
+#define BNX2X_ONE_PORT_FLAG 0x1
+#define BNX2X_NO_FCOE_FLAG 0x2
+#define BNX2X_NO_WOL_FLAG 0x4
+#define BNX2X_NO_MCP_FLAG 0x8
+#define BNX2X_NO_ISCSI_OOO_FLAG 0x10
+#define BNX2X_NO_ISCSI_FLAG 0x20
+#define BNX2X_MF_FUNC_DIS 0x40
+#define BNX2X_TX_SWITCHING 0x80
+#define BNX2X_IS_VF_FLAG 0x100
+
+#define BNX2X_ONE_PORT(sc) (sc->flags & BNX2X_ONE_PORT_FLAG)
+#define BNX2X_NOFCOE(sc) (sc->flags & BNX2X_NO_FCOE_FLAG)
+#define BNX2X_NOMCP(sc) (sc->flags & BNX2X_NO_MCP_FLAG)
+
+#define MAX_BARS 5
+ struct bnx2x_bar bar[MAX_BARS]; /* map BARs 0, 2, 4 */
+
+ uint16_t doorbell_size;
+
+ /* periodic timer callout */
+#define PERIODIC_STOP 0
+#define PERIODIC_GO 1
+ volatile unsigned long periodic_flags;
+
+ struct bnx2x_fastpath fp[MAX_RSS_CHAINS];
+ struct bnx2x_sp_objs sp_objs[MAX_RSS_CHAINS];
+
+ uint8_t unit; /* driver instance number */
+
+ int pcie_bus; /* PCIe bus number */
+ int pcie_device; /* PCIe device/slot number */
+ int pcie_func; /* PCIe function number */
+
+ uint8_t pfunc_rel; /* function relative */
+ uint8_t pfunc_abs; /* function absolute */
+ uint8_t path_id; /* function absolute */
+#define SC_PATH(sc) (sc->path_id)
+#define SC_PORT(sc) (sc->pfunc_rel & 1)
+#define SC_FUNC(sc) (sc->pfunc_rel)
+#define SC_ABS_FUNC(sc) (sc->pfunc_abs)
+#define SC_VN(sc) (sc->pfunc_rel >> 1)
+#define SC_L_ID(sc) (SC_VN(sc) << 2)
+#define PORT_ID(sc) SC_PORT(sc)
+#define PATH_ID(sc) SC_PATH(sc)
+#define VNIC_ID(sc) SC_VN(sc)
+#define FUNC_ID(sc) SC_FUNC(sc)
+#define ABS_FUNC_ID(sc) SC_ABS_FUNC(sc)
+#define SC_FW_MB_IDX_VN(sc, vn) \
+ (SC_PORT(sc) + (vn) * \
+ ((CHIP_IS_E1x(sc) || (CHIP_IS_MODE_4_PORT(sc))) ? 2 : 1))
+#define SC_FW_MB_IDX(sc) SC_FW_MB_IDX_VN(sc, SC_VN(sc))
+
+ int if_capen; /* enabled interface capabilities */
+
+ struct bnx2x_devinfo devinfo;
+ char fw_ver_str[32];
+ char mf_mode_str[32];
+ char pci_link_str[32];
+
+ struct iro *iro_array;
+
+ int dmae_ready;
+#define DMAE_READY(sc) (sc->dmae_ready)
+
+ struct ecore_credit_pool_obj vlans_pool;
+ struct ecore_credit_pool_obj macs_pool;
+ struct ecore_rx_mode_obj rx_mode_obj;
+ struct ecore_mcast_obj mcast_obj;
+ struct ecore_rss_config_obj rss_conf_obj;
+ struct ecore_func_sp_obj func_obj;
+
+ uint16_t fw_seq;
+ uint16_t fw_drv_pulse_wr_seq;
+ uint32_t func_stx;
+
+ struct elink_params link_params;
+ struct elink_vars link_vars;
+ uint32_t link_cnt;
+ struct bnx2x_link_report_data last_reported_link;
+ char mac_addr_str[32];
+
+ uint32_t tx_ring_size;
+ uint32_t rx_ring_size;
+ int wol;
+
+ int is_leader;
+ int recovery_state;
+#define BNX2X_RECOVERY_DONE 1
+#define BNX2X_RECOVERY_INIT 2
+#define BNX2X_RECOVERY_WAIT 3
+#define BNX2X_RECOVERY_FAILED 4
+#define BNX2X_RECOVERY_NIC_LOADING 5
+
+ uint32_t rx_mode;
+#define BNX2X_RX_MODE_NONE 0
+#define BNX2X_RX_MODE_NORMAL 1
+#define BNX2X_RX_MODE_ALLMULTI 2
+#define BNX2X_RX_MODE_PROMISC 3
+#define BNX2X_MAX_MULTICAST 64
+
+ struct bnx2x_port port;
+
+ struct cmng_init cmng;
+
+ /* user configs */
+ uint8_t num_queues;
+ int hc_rx_ticks;
+ int hc_tx_ticks;
+ uint32_t rx_budget;
+ int interrupt_mode;
+#define INTR_MODE_INTX 0
+#define INTR_MODE_MSI 1
+#define INTR_MODE_MSIX 2
+#define INTR_MODE_SINGLE_MSIX 3
+ int udp_rss;
+
+ uint8_t igu_dsb_id;
+ uint8_t igu_base_sb;
+ uint8_t igu_sb_cnt;
+ uint32_t igu_base_addr;
+ uint8_t base_fw_ndsb;
+#define DEF_SB_IGU_ID 16
+#define DEF_SB_ID HC_SP_SB_ID
+
+ /* default status block */
+ struct bnx2x_dma def_sb_dma;
+ struct host_sp_status_block *def_sb;
+ uint16_t def_idx;
+ uint16_t def_att_idx;
+ uint32_t attn_state;
+ struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS];
+
+ /* general SP events - stats query, cfc delete, etc */
+#define HC_SP_INDEX_ETH_DEF_CONS 3
+ /* EQ completions */
+#define HC_SP_INDEX_EQ_CONS 7
+ /* FCoE L2 connection completions */
+#define HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS 6
+#define HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS 4
+ /* iSCSI L2 */
+#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5
+#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1
+
+ /* event queue */
+ struct bnx2x_dma eq_dma;
+ union event_ring_elem *eq;
+ uint16_t eq_prod;
+ uint16_t eq_cons;
+ uint16_t *eq_cons_sb;
+#define NUM_EQ_PAGES 1 /* must be a power of 2 */
+#define EQ_DESC_CNT_PAGE (BNX2X_PAGE_SIZE / sizeof(union event_ring_elem))
+#define EQ_DESC_MAX_PAGE (EQ_DESC_CNT_PAGE - 1)
+#define NUM_EQ_DESC (EQ_DESC_CNT_PAGE * NUM_EQ_PAGES)
+#define EQ_DESC_MASK (NUM_EQ_DESC - 1)
+#define MAX_EQ_AVAIL (EQ_DESC_MAX_PAGE * NUM_EQ_PAGES - 2)
+ /* depends on EQ_DESC_CNT_PAGE being a power of 2 */
+#define NEXT_EQ_IDX(x) \
+ ((((x) & EQ_DESC_MAX_PAGE) == (EQ_DESC_MAX_PAGE - 1)) ? \
+ ((x) + 2) : ((x) + 1))
+ /* depends on the above and on NUM_EQ_PAGES being a power of 2 */
+#define EQ_DESC(x) ((x) & EQ_DESC_MASK)
+
+ /* slow path */
+ struct bnx2x_dma sp_dma;
+ struct bnx2x_slowpath *sp;
+ unsigned long sp_state;
+
+ /* slow path queue */
+ struct bnx2x_dma spq_dma;
+ struct eth_spe *spq;
+#define SP_DESC_CNT (BNX2X_PAGE_SIZE / sizeof(struct eth_spe))
+#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1)
+#define MAX_SPQ_PENDING 8
+
+ uint16_t spq_prod_idx;
+ struct eth_spe *spq_prod_bd;
+ struct eth_spe *spq_last_bd;
+ uint16_t *dsb_sp_prod;
+
+ volatile unsigned long eq_spq_left; /* COMMON_xxx ramrod credit */
+ volatile unsigned long cq_spq_left; /* ETH_xxx ramrod credit */
+
+ /* fw decompression buffer */
+ struct bnx2x_dma gz_buf_dma;
+ void *gz_buf;
+ uint32_t gz_outlen;
+#define GUNZIP_BUF(sc) (sc->gz_buf)
+#define GUNZIP_OUTLEN(sc) (sc->gz_outlen)
+#define GUNZIP_PHYS(sc) (phys_addr_t)(sc->gz_buf_dma.paddr)
+#define FW_BUF_SIZE 0x40000
+
+ struct raw_op *init_ops;
+ uint16_t *init_ops_offsets; /* init block offsets inside init_ops */
+ uint32_t *init_data; /* data blob, 32 bit granularity */
+ uint32_t init_mode_flags;
+#define INIT_MODE_FLAGS(sc) (sc->init_mode_flags)
+ /* PRAM blobs - raw data */
+ const uint8_t *tsem_int_table_data;
+ const uint8_t *tsem_pram_data;
+ const uint8_t *usem_int_table_data;
+ const uint8_t *usem_pram_data;
+ const uint8_t *xsem_int_table_data;
+ const uint8_t *xsem_pram_data;
+ const uint8_t *csem_int_table_data;
+ const uint8_t *csem_pram_data;
+#define INIT_OPS(sc) (sc->init_ops)
+#define INIT_OPS_OFFSETS(sc) (sc->init_ops_offsets)
+#define INIT_DATA(sc) (sc->init_data)
+#define INIT_TSEM_INT_TABLE_DATA(sc) (sc->tsem_int_table_data)
+#define INIT_TSEM_PRAM_DATA(sc) (sc->tsem_pram_data)
+#define INIT_USEM_INT_TABLE_DATA(sc) (sc->usem_int_table_data)
+#define INIT_USEM_PRAM_DATA(sc) (sc->usem_pram_data)
+#define INIT_XSEM_INT_TABLE_DATA(sc) (sc->xsem_int_table_data)
+#define INIT_XSEM_PRAM_DATA(sc) (sc->xsem_pram_data)
+#define INIT_CSEM_INT_TABLE_DATA(sc) (sc->csem_int_table_data)
+#define INIT_CSEM_PRAM_DATA(sc) (sc->csem_pram_data)
+
+#define PHY_FW_VER_LEN 20
+ char fw_ver[32];
+
+ /* ILT
+ * For max 196 cids (64*3 + non-eth), 32KB ILT page size and 1KB
+ * context size we need 8 ILT entries.
+ */
+#define ILT_MAX_L2_LINES 8
+ struct hw_context context[ILT_MAX_L2_LINES];
+ struct ecore_ilt *ilt;
+#define ILT_MAX_LINES 256
+
+ /* max supported number of RSS queues: IGU SBs minus one for CNIC */
+#define BNX2X_MAX_RSS_COUNT(sc) ((sc)->igu_sb_cnt - CNIC_SUPPORT(sc))
+ /* max CID count: Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI */
+#define BNX2X_L2_MAX_CID(sc) \
+ (BNX2X_MAX_RSS_COUNT(sc) * ECORE_MULTI_TX_COS + 2 * CNIC_SUPPORT(sc))
+#define BNX2X_L2_CID_COUNT(sc) \
+ (BNX2X_NUM_ETH_QUEUES(sc) * ECORE_MULTI_TX_COS + 2 * CNIC_SUPPORT(sc))
+#define L2_ILT_LINES(sc) \
+ (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(sc), ILT_PAGE_CIDS))
+
+ int qm_cid_count;
+
+ uint8_t dropless_fc;
+
+ /* total number of FW statistics requests */
+ uint8_t fw_stats_num;
+ /*
+ * This is a memory buffer that will contain both statistics ramrod
+ * request and data.
+ */
+ struct bnx2x_dma fw_stats_dma;
+ /*
+ * FW statistics request shortcut (points at the beginning of fw_stats
+ * buffer).
+ */
+ int fw_stats_req_size;
+ struct bnx2x_fw_stats_req *fw_stats_req;
+ phys_addr_t fw_stats_req_mapping;
+ /*
+ * FW statistics data shortcut (points at the beginning of fw_stats
+ * buffer + fw_stats_req_size).
+ */
+ int fw_stats_data_size;
+ struct bnx2x_fw_stats_data *fw_stats_data;
+ phys_addr_t fw_stats_data_mapping;
+
+ /* tracking a pending STAT_QUERY ramrod */
+ uint16_t stats_pending;
+ /* number of completed statistics ramrods */
+ uint16_t stats_comp;
+ uint16_t stats_counter;
+ uint8_t stats_init;
+ int stats_state;
+
+ struct bnx2x_eth_stats eth_stats;
+ struct host_func_stats func_stats;
+ struct bnx2x_eth_stats_old eth_stats_old;
+ struct bnx2x_net_stats_old net_stats_old;
+ struct bnx2x_fw_port_stats_old fw_stats_old;
+
+ struct dmae_command stats_dmae; /* used by dmae command loader */
+ int executer_idx;
+
+ int mtu;
+
+ /* DCB support on/off */
+ int dcb_state;
+#define BNX2X_DCB_STATE_OFF 0
+#define BNX2X_DCB_STATE_ON 1
+ /* DCBX engine mode */
+ int dcbx_enabled;
+#define BNX2X_DCBX_ENABLED_OFF 0
+#define BNX2X_DCBX_ENABLED_ON_NEG_OFF 1
+#define BNX2X_DCBX_ENABLED_ON_NEG_ON 2
+#define BNX2X_DCBX_ENABLED_INVALID -1
+
+ uint8_t cnic_support;
+ uint8_t cnic_enabled;
+ uint8_t cnic_loaded;
+#define CNIC_SUPPORT(sc) 0 /* ((sc)->cnic_support) */
+#define CNIC_ENABLED(sc) 0 /* ((sc)->cnic_enabled) */
+#define CNIC_LOADED(sc) 0 /* ((sc)->cnic_loaded) */
+
+ /* multiple tx classes of service */
+ uint8_t max_cos;
+#define BNX2X_MAX_PRIORITY 8
+ /* priority to cos mapping */
+ uint8_t prio_to_cos[BNX2X_MAX_PRIORITY];
+
+ int panic;
+}; /* struct bnx2x_softc */
+
+/* IOCTL sub-commands for edebug and firmware upgrade */
+#define BNX2X_IOC_RD_NVRAM 1
+#define BNX2X_IOC_WR_NVRAM 2
+#define BNX2X_IOC_STATS_SHOW_NUM 3
+#define BNX2X_IOC_STATS_SHOW_STR 4
+#define BNX2X_IOC_STATS_SHOW_CNT 5
+
+struct bnx2x_nvram_data {
+ uint32_t op; /* ioctl sub-command */
+ uint32_t offset;
+ uint32_t len;
+ uint32_t value[1]; /* variable */
+};
+
+union bnx2x_stats_show_data {
+ uint32_t op; /* ioctl sub-command */
+
+ struct {
+ uint32_t num; /* return number of stats */
+ uint32_t len; /* length of each string item */
+ } desc;
+
+ /* variable length... */
+ char str[1]; /* holds names of desc.num stats, each desc.len in length */
+
+ /* variable length... */
+ uint64_t stats[1]; /* holds all stats */
+};
+
+/* function init flags */
+#define FUNC_FLG_RSS 0x0001
+#define FUNC_FLG_STATS 0x0002
+/* FUNC_FLG_UNMATCHED 0x0004 */
+#define FUNC_FLG_SPQ 0x0010
+#define FUNC_FLG_LEADING 0x0020 /* PF only */
+
+struct bnx2x_func_init_params {
+ phys_addr_t fw_stat_map; /* (dma) valid if FUNC_FLG_STATS */
+ phys_addr_t spq_map; /* (dma) valid if FUNC_FLG_SPQ */
+ uint16_t func_flgs;
+ uint16_t func_id; /* abs function id */
+ uint16_t pf_id;
+ uint16_t spq_prod; /* valid if FUNC_FLG_SPQ */
+};
+
+/* memory resources reside at BARs 0, 2, 4 */
+/* Run `pciconf -lb` to see mappings */
+#define BAR0 0
+#define BAR1 2
+#define BAR2 4
+
+#ifdef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
+uint8_t bnx2x_reg_read8(struct bnx2x_softc *sc, size_t offset);
+uint16_t bnx2x_reg_read16(struct bnx2x_softc *sc, size_t offset);
+uint32_t bnx2x_reg_read32(struct bnx2x_softc *sc, size_t offset);
+
+void bnx2x_reg_write8(struct bnx2x_softc *sc, size_t offset, uint8_t val);
+void bnx2x_reg_write16(struct bnx2x_softc *sc, size_t offset, uint16_t val);
+void bnx2x_reg_write32(struct bnx2x_softc *sc, size_t offset, uint32_t val);
+#else
+#define bnx2x_reg_write8(sc, offset, val)\
+ *((volatile uint8_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset)) = val
+
+#define bnx2x_reg_write16(sc, offset, val)\
+ *((volatile uint16_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset)) = val
+
+#define bnx2x_reg_write32(sc, offset, val)\
+ *((volatile uint32_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset)) = val
+
+#define bnx2x_reg_read8(sc, offset)\
+ (*((volatile uint8_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset)))
+
+#define bnx2x_reg_read16(sc, offset)\
+ (*((volatile uint16_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset)))
+
+#define bnx2x_reg_read32(sc, offset)\
+ (*((volatile uint32_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset)))
+#endif
+
+#define REG_ADDR(sc, offset) (((uint64_t)sc->bar[BAR0].base_addr) + (offset))
+
+#define REG_RD8(sc, offset) bnx2x_reg_read8(sc, (offset))
+#define REG_RD16(sc, offset) bnx2x_reg_read16(sc, (offset))
+#define REG_RD32(sc, offset) bnx2x_reg_read32(sc, (offset))
+
+#define REG_WR8(sc, offset, val) bnx2x_reg_write8(sc, (offset), val)
+#define REG_WR16(sc, offset, val) bnx2x_reg_write16(sc, (offset), val)
+#define REG_WR32(sc, offset, val) bnx2x_reg_write32(sc, (offset), val)
+
+#define REG_RD(sc, offset) REG_RD32(sc, offset)
+#define REG_WR(sc, offset, val) REG_WR32(sc, offset, val)
+
+#define BNX2X_SP(sc, var) (&(sc)->sp->var)
+#define BNX2X_SP_MAPPING(sc, var) \
+ (sc->sp_dma.paddr + offsetof(struct bnx2x_slowpath, var))
+
+#define BNX2X_FP(sc, nr, var) ((sc)->fp[(nr)].var)
+#define BNX2X_SP_OBJ(sc, fp) ((sc)->sp_objs[(fp)->index])
+
+#define bnx2x_fp(sc, nr, var) ((sc)->fp[nr].var)
+
+#define REG_RD_DMAE(sc, offset, valp, len32) \
+ do { \
+ (void)bnx2x_read_dmae(sc, offset, len32); \
+ (void)rte_memcpy(valp, BNX2X_SP(sc, wb_data[0]), (len32) * 4); \
+ } while (0)
+
+#define REG_WR_DMAE(sc, offset, valp, len32) \
+ do { \
+ (void)rte_memcpy(BNX2X_SP(sc, wb_data[0]), valp, (len32) * 4); \
+ (void)bnx2x_write_dmae(sc, BNX2X_SP_MAPPING(sc, wb_data), offset, len32); \
+ } while (0)
+
+#define REG_WR_DMAE_LEN(sc, offset, valp, len32) \
+ REG_WR_DMAE(sc, offset, valp, len32)
+
+#define REG_RD_DMAE_LEN(sc, offset, valp, len32) \
+ REG_RD_DMAE(sc, offset, valp, len32)
+
+#define VIRT_WR_DMAE_LEN(sc, data, addr, len32, le32_swap) \
+ do { \
+ /* if (le32_swap) { */ \
+ /* PMD_PWARN_LOG(sc, "VIRT_WR_DMAE_LEN with le32_swap=1"); */ \
+ /* } */ \
+ rte_memcpy(GUNZIP_BUF(sc), data, len32 * 4); \
+ ecore_write_big_buf_wb(sc, addr, len32); \
+ } while (0)
+
+#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */
+#define BNX2X_DB_SHIFT 7 /* 128 bytes */
+#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT)
+#error "Minimum DB doorbell stride is 8"
+#endif
+#define DPM_TRIGGER_TYPE 0x40
+
+/* Doorbell macro */
+#define BNX2X_DB_WRITE(db_bar, val) \
+ *((volatile uint32_t *)(db_bar)) = (val)
+
+#define BNX2X_DB_READ(db_bar) \
+ *((volatile uint32_t *)(db_bar))
+
+#define DOORBELL_ADDR(sc, offset) \
+ (volatile uint32_t *)(((char *)(sc)->bar[BAR1].base_addr + (offset)))
+
+#define DOORBELL(sc, cid, val) \
+ if (IS_PF(sc)) \
+ BNX2X_DB_WRITE((DOORBELL_ADDR(sc, sc->doorbell_size * (cid) + DPM_TRIGGER_TYPE)), (val)); \
+ else \
+ BNX2X_DB_WRITE((DOORBELL_ADDR(sc, sc->doorbell_size * (cid))), (val)) \
+
+#define SHMEM_ADDR(sc, field) \
+ (sc->devinfo.shmem_base + offsetof(struct shmem_region, field))
+#define SHMEM_RD(sc, field) REG_RD(sc, SHMEM_ADDR(sc, field))
+#define SHMEM_RD16(sc, field) REG_RD16(sc, SHMEM_ADDR(sc, field))
+#define SHMEM_WR(sc, field, val) REG_WR(sc, SHMEM_ADDR(sc, field), val)
+
+#define SHMEM2_ADDR(sc, field) \
+ (sc->devinfo.shmem2_base + offsetof(struct shmem2_region, field))
+#define SHMEM2_HAS(sc, field) \
+ (sc->devinfo.shmem2_base && (REG_RD(sc, SHMEM2_ADDR(sc, size)) > \
+ offsetof(struct shmem2_region, field)))
+#define SHMEM2_RD(sc, field) REG_RD(sc, SHMEM2_ADDR(sc, field))
+#define SHMEM2_WR(sc, field, val) REG_WR(sc, SHMEM2_ADDR(sc, field), val)
+
+#define MFCFG_ADDR(sc, field) \
+ (sc->devinfo.mf_cfg_base + offsetof(struct mf_cfg, field))
+#define MFCFG_RD(sc, field) REG_RD(sc, MFCFG_ADDR(sc, field))
+#define MFCFG_RD16(sc, field) REG_RD16(sc, MFCFG_ADDR(sc, field))
+#define MFCFG_WR(sc, field, val) REG_WR(sc, MFCFG_ADDR(sc, field), val)
+
+/* DMAE command defines */
+
+#define DMAE_TIMEOUT -1
+#define DMAE_PCI_ERROR -2 /* E2 and onward */
+#define DMAE_NOT_RDY -3
+#define DMAE_PCI_ERR_FLAG 0x80000000
+
+#define DMAE_SRC_PCI 0
+#define DMAE_SRC_GRC 1
+
+#define DMAE_DST_NONE 0
+#define DMAE_DST_PCI 1
+#define DMAE_DST_GRC 2
+
+#define DMAE_COMP_PCI 0
+#define DMAE_COMP_GRC 1
+
+#define DMAE_COMP_REGULAR 0
+#define DMAE_COM_SET_ERR 1
+
+#define DMAE_CMD_SRC_PCI (DMAE_SRC_PCI << DMAE_COMMAND_SRC_SHIFT)
+#define DMAE_CMD_SRC_GRC (DMAE_SRC_GRC << DMAE_COMMAND_SRC_SHIFT)
+#define DMAE_CMD_DST_PCI (DMAE_DST_PCI << DMAE_COMMAND_DST_SHIFT)
+#define DMAE_CMD_DST_GRC (DMAE_DST_GRC << DMAE_COMMAND_DST_SHIFT)
+
+#define DMAE_CMD_C_DST_PCI (DMAE_COMP_PCI << DMAE_COMMAND_C_DST_SHIFT)
+#define DMAE_CMD_C_DST_GRC (DMAE_COMP_GRC << DMAE_COMMAND_C_DST_SHIFT)
+
+#define DMAE_CMD_ENDIANITY_NO_SWAP (0 << DMAE_COMMAND_ENDIANITY_SHIFT)
+#define DMAE_CMD_ENDIANITY_B_SWAP (1 << DMAE_COMMAND_ENDIANITY_SHIFT)
+#define DMAE_CMD_ENDIANITY_DW_SWAP (2 << DMAE_COMMAND_ENDIANITY_SHIFT)
+#define DMAE_CMD_ENDIANITY_B_DW_SWAP (3 << DMAE_COMMAND_ENDIANITY_SHIFT)
+
+#define DMAE_CMD_PORT_0 0
+#define DMAE_CMD_PORT_1 DMAE_COMMAND_PORT
+
+#define DMAE_SRC_PF 0
+#define DMAE_SRC_VF 1
+
+#define DMAE_DST_PF 0
+#define DMAE_DST_VF 1
+
+#define DMAE_C_SRC 0
+#define DMAE_C_DST 1
+
+#define DMAE_LEN32_RD_MAX 0x80
+#define DMAE_LEN32_WR_MAX(sc) 0x2000
+
+#define DMAE_COMP_VAL 0x60d0d0ae /* E2 and beyond, upper bit indicates error */
+
+#define MAX_DMAE_C_PER_PORT 8
+#define INIT_DMAE_C(sc) ((SC_PORT(sc) * MAX_DMAE_C_PER_PORT) + SC_VN(sc))
+#define PMF_DMAE_C(sc) ((SC_PORT(sc) * MAX_DMAE_C_PER_PORT) + E1HVN_MAX)
+
+static const uint32_t dmae_reg_go_c[] = {
+ DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
+ DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
+ DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
+ DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
+};
+
+#define ATTN_NIG_FOR_FUNC (1L << 8)
+#define ATTN_SW_TIMER_4_FUNC (1L << 9)
+#define GPIO_2_FUNC (1L << 10)
+#define GPIO_3_FUNC (1L << 11)
+#define GPIO_4_FUNC (1L << 12)
+#define ATTN_GENERAL_ATTN_1 (1L << 13)
+#define ATTN_GENERAL_ATTN_2 (1L << 14)
+#define ATTN_GENERAL_ATTN_3 (1L << 15)
+#define ATTN_GENERAL_ATTN_4 (1L << 13)
+#define ATTN_GENERAL_ATTN_5 (1L << 14)
+#define ATTN_GENERAL_ATTN_6 (1L << 15)
+#define ATTN_HARD_WIRED_MASK 0xff00
+#define ATTENTION_ID 4
+
+#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
+ AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
+
+#define MAX_IGU_ATTN_ACK_TO 100
+
+#define STORM_ASSERT_ARRAY_SIZE 50
+
+#define BNX2X_PMF_LINK_ASSERT(sc) \
+ GENERAL_ATTEN_OFFSET(LINK_SYNC_ATTENTION_BIT_FUNC_0 + SC_FUNC(sc))
+
+#define BNX2X_MC_ASSERT_BITS \
+ (GENERAL_ATTEN_OFFSET(TSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
+ GENERAL_ATTEN_OFFSET(USTORM_FATAL_ASSERT_ATTENTION_BIT) | \
+ GENERAL_ATTEN_OFFSET(CSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
+ GENERAL_ATTEN_OFFSET(XSTORM_FATAL_ASSERT_ATTENTION_BIT))
+
+#define BNX2X_MCP_ASSERT \
+ GENERAL_ATTEN_OFFSET(MCP_FATAL_ASSERT_ATTENTION_BIT)
+
+#define BNX2X_GRC_TIMEOUT GENERAL_ATTEN_OFFSET(LATCHED_ATTN_TIMEOUT_GRC)
+#define BNX2X_GRC_RSV (GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCR) | \
+ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCT) | \
+ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCN) | \
+ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCU) | \
+ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \
+ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC))
+
+#define MULTI_MASK 0x7f
+
+#define PFS_PER_PORT(sc) \
+ ((CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4)
+#define SC_MAX_VN_NUM(sc) PFS_PER_PORT(sc)
+
+#define FIRST_ABS_FUNC_IN_PORT(sc) \
+ ((CHIP_PORT_MODE(sc) == CHIP_PORT_MODE_NONE) ? \
+ PORT_ID(sc) : (PATH_ID(sc) + (2 * PORT_ID(sc))))
+
+#define FOREACH_ABS_FUNC_IN_PORT(sc, i) \
+ for ((i) = FIRST_ABS_FUNC_IN_PORT(sc); \
+ (i) < MAX_FUNC_NUM; \
+ (i) += (MAX_FUNC_NUM / PFS_PER_PORT(sc)))
+
+#define BNX2X_SWCID_SHIFT 17
+#define BNX2X_SWCID_MASK ((0x1 << BNX2X_SWCID_SHIFT) - 1)
+
+#define SW_CID(x) (le32toh(x) & BNX2X_SWCID_MASK)
+#define CQE_CMD(x) (le32toh(x) >> COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT)
+
+#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE)
+#define CQE_TYPE_START(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_START_AGG)
+#define CQE_TYPE_STOP(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_STOP_AGG)
+#define CQE_TYPE_SLOW(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_RAMROD)
+#define CQE_TYPE_FAST(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_FASTPATH)
+
+/* must be used on a CID before placing it on a HW ring */
+#define HW_CID(sc, x) \
+ ((SC_PORT(sc) << 23) | (SC_VN(sc) << BNX2X_SWCID_SHIFT) | (x))
+
+#define SPEED_10 10
+#define SPEED_100 100
+#define SPEED_1000 1000
+#define SPEED_2500 2500
+#define SPEED_10000 10000
+
+#define PCI_PM_D0 1
+#define PCI_PM_D3hot 2
+
+int bnx2x_test_bit(int nr, volatile unsigned long * addr);
+void bnx2x_set_bit(unsigned int nr, volatile unsigned long * addr);
+void bnx2x_clear_bit(int nr, volatile unsigned long * addr);
+int bnx2x_test_and_clear_bit(int nr, volatile unsigned long * addr);
+int bnx2x_cmpxchg(volatile int *addr, int old, int new);
+
+int bnx2x_dma_alloc(struct bnx2x_softc *sc, size_t size,
+ struct bnx2x_dma *dma, const char *msg, uint32_t align);
+
+uint32_t bnx2x_dmae_opcode_add_comp(uint32_t opcode, uint8_t comp_type);
+uint32_t bnx2x_dmae_opcode_clr_src_reset(uint32_t opcode);
+uint32_t bnx2x_dmae_opcode(struct bnx2x_softc *sc, uint8_t src_type,
+ uint8_t dst_type, uint8_t with_comp,
+ uint8_t comp_type);
+void bnx2x_post_dmae(struct bnx2x_softc *sc, struct dmae_command *dmae, int idx);
+void bnx2x_read_dmae(struct bnx2x_softc *sc, uint32_t src_addr, uint32_t len32);
+void bnx2x_write_dmae(struct bnx2x_softc *sc, phys_addr_t dma_addr,
+ uint32_t dst_addr, uint32_t len32);
+void bnx2x_set_ctx_validation(struct bnx2x_softc *sc, struct eth_context *cxt,
+ uint32_t cid);
+void bnx2x_update_coalesce_sb_index(struct bnx2x_softc *sc, uint8_t fw_sb_id,
+ uint8_t sb_index, uint8_t disable,
+ uint16_t usec);
+
+int bnx2x_sp_post(struct bnx2x_softc *sc, int command, int cid,
+ uint32_t data_hi, uint32_t data_lo, int cmd_type);
+
+void ecore_init_e1h_firmware(struct bnx2x_softc *sc);
+void ecore_init_e2_firmware(struct bnx2x_softc *sc);
+
+void ecore_storm_memset_struct(struct bnx2x_softc *sc, uint32_t addr,
+ size_t size, uint32_t *data);
+
+#define CATC_TRIGGER(sc, data) REG_WR((sc), 0x2000, (data));
+#define CATC_TRIGGER_START(sc) CATC_TRIGGER((sc), 0xcafecafe)
+
+#define BNX2X_MAC_FMT "%pM"
+#define BNX2X_MAC_PRN_LIST(mac) (mac)
+
+/***********/
+/* INLINES */
+/***********/
+
+static inline uint32_t
+reg_poll(struct bnx2x_softc *sc, uint32_t reg, uint32_t expected, int ms, int wait)
+{
+ uint32_t val;
+ do {
+ val = REG_RD(sc, reg);
+ if (val == expected) {
+ break;
+ }
+ ms -= wait;
+ DELAY(wait * 1000);
+ } while (ms > 0);
+
+ return val;
+}
+
+static inline void
+bnx2x_update_fp_sb_idx(struct bnx2x_fastpath *fp)
+{
+ mb(); /* status block is written to by the chip */
+ fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
+}
+
+static inline void
+bnx2x_igu_ack_sb_gen(struct bnx2x_softc *sc, uint8_t segment,
+ uint16_t index, uint8_t op, uint8_t update, uint32_t igu_addr)
+{
+ struct igu_regular cmd_data = {0};
+
+ cmd_data.sb_id_and_flags =
+ ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
+ (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
+ (update << IGU_REGULAR_BUPDATE_SHIFT) |
+ (op << IGU_REGULAR_ENABLE_INT_SHIFT));
+
+ REG_WR(sc, igu_addr, cmd_data.sb_id_and_flags);
+
+ /* Make sure that ACK is written */
+ mb();
+}
+
+static inline void
+bnx2x_hc_ack_sb(struct bnx2x_softc *sc, uint8_t sb_id, uint8_t storm,
+ uint16_t index, uint8_t op, uint8_t update)
+{
+ uint32_t hc_addr = (HC_REG_COMMAND_REG + SC_PORT(sc) * 32 +
+ COMMAND_REG_INT_ACK);
+ union igu_ack_register igu_ack;
+
+ igu_ack.sb.status_block_index = index;
+ igu_ack.sb.sb_id_and_flags =
+ ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
+ (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
+ (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
+ (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
+
+ REG_WR(sc, hc_addr, igu_ack.raw_data);
+
+ /* Make sure that ACK is written */
+ mb();
+}
+
+static inline uint32_t
+bnx2x_hc_ack_int(struct bnx2x_softc *sc)
+{
+ uint32_t hc_addr = (HC_REG_COMMAND_REG + SC_PORT(sc) * 32 +
+ COMMAND_REG_SIMD_MASK);
+ uint32_t result = REG_RD(sc, hc_addr);
+
+ mb();
+ return result;
+}
+
+static inline uint32_t
+bnx2x_igu_ack_int(struct bnx2x_softc *sc)
+{
+ uint32_t igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER * 8);
+ uint32_t result = REG_RD(sc, igu_addr);
+
+ /* PMD_PDEBUG_LOG(sc, DBG_INTR, "read 0x%08x from IGU addr 0x%x",
+ result, igu_addr); */
+
+ mb();
+ return result;
+}
+
+static inline uint32_t
+bnx2x_ack_int(struct bnx2x_softc *sc)
+{
+ mb();
+ if (sc->devinfo.int_block == INT_BLOCK_HC) {
+ return bnx2x_hc_ack_int(sc);
+ } else {
+ return bnx2x_igu_ack_int(sc);
+ }
+}
+
+static inline int
+func_by_vn(struct bnx2x_softc *sc, int vn)
+{
+ return (2 * vn + SC_PORT(sc));
+}
+
+/*
+ * send notification to other functions.
+ */
+static inline void
+bnx2x_link_sync_notify(struct bnx2x_softc *sc)
+{
+ int func, vn;
+
+ /* Set the attention towards other drivers on the same port */
+ for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
+ if (vn == SC_VN(sc))
+ continue;
+
+ func = func_by_vn(sc, vn);
+ REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_0 +
+ (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func) * 4, 1);
+ }
+}
+
+/*
+ * Statistics ID are global per chip/path, while Client IDs for E1x
+ * are per port.
+ */
+static inline uint8_t
+bnx2x_stats_id(struct bnx2x_fastpath *fp)
+{
+ struct bnx2x_softc *sc = fp->sc;
+
+ if (!CHIP_IS_E1x(sc)) {
+ return fp->cl_id;
+ }
+
+ return (fp->cl_id + SC_PORT(sc) * FP_SB_MAX_E1x);
+}
+
+int bnx2x_init(struct bnx2x_softc *sc);
+void bnx2x_load_firmware(struct bnx2x_softc *sc);
+int bnx2x_attach(struct bnx2x_softc *sc);
+int bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link);
+int bnx2x_alloc_hsi_mem(struct bnx2x_softc *sc);
+int bnx2x_alloc_ilt_mem(struct bnx2x_softc *sc);
+void bnx2x_free_ilt_mem(struct bnx2x_softc *sc);
+void bnx2x_dump_tx_chain(struct bnx2x_fastpath * fp, int bd_prod, int count);
+int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf **m_head, int m_pkts);
+uint8_t bnx2x_txeof(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp);
+void bnx2x_print_adapter_info(struct bnx2x_softc *sc);
+int bnx2x_intr_legacy(struct bnx2x_softc *sc, int scan_fp);
+void bnx2x_link_status_update(struct bnx2x_softc *sc);
+int bnx2x_complete_sp(struct bnx2x_softc *sc);
+int bnx2x_set_storm_rx_mode(struct bnx2x_softc *sc);
+void bnx2x_periodic_callout(struct bnx2x_softc *sc);
+
+int bnx2x_vf_get_resources(struct bnx2x_softc *sc, uint8_t tx_count, uint8_t rx_count);
+void bnx2x_vf_close(struct bnx2x_softc *sc);
+int bnx2x_vf_init(struct bnx2x_softc *sc);
+void bnx2x_vf_unload(struct bnx2x_softc *sc);
+int bnx2x_vf_setup_queue(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
+ int leading);
+void bnx2x_free_hsi_mem(struct bnx2x_softc *sc);
+int bnx2x_vf_set_rx_mode(struct bnx2x_softc *sc);
+int bnx2x_fill_accept_flags(struct bnx2x_softc *sc, uint32_t rx_mode,
+ unsigned long *rx_accept_flags, unsigned long *tx_accept_flags);
+int bnx2x_check_bull(struct bnx2x_softc *sc);
+
+//#define BNX2X_PULSE
+
+#define BNX2X_PCI_CAP 1
+#define BNX2X_PCI_ECAP 2
+
+static inline struct bnx2x_pci_cap*
+pci_find_cap(struct bnx2x_softc *sc, uint8_t id, uint8_t type)
+{
+ struct bnx2x_pci_cap *cap = sc->pci_caps;
+
+ while (cap) {
+ if (cap->id == id && cap->type == type)
+ return cap;
+ cap = cap->next;
+ }
+
+ return NULL;
+}
+
+static inline int is_valid_ether_addr(uint8_t *addr)
+{
+ if (!(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]))
+ return 0;
+ else
+ return 1;
+}
+
+static inline void
+bnx2x_set_rx_mode(struct bnx2x_softc *sc)
+{
+ if (sc->state == BNX2X_STATE_OPEN) {
+ if (IS_PF(sc)) {
+ bnx2x_set_storm_rx_mode(sc);
+ } else {
+ sc->rx_mode = BNX2X_RX_MODE_PROMISC;
+ bnx2x_vf_set_rx_mode(sc);
+ }
+ } else {
+ PMD_DRV_LOG(NOTICE, "Card is not ready to change mode");
+ }
+}
+
+static inline int pci_read(struct bnx2x_softc *sc, size_t addr,
+ void *val, uint8_t size)
+{
+ if (rte_eal_pci_read_config(sc->pci_dev, val, size, addr) <= 0) {
+ PMD_DRV_LOG(ERR, "Can't read from PCI config space");
+ return ENXIO;
+ }
+
+ return 0;
+}
+
+static inline int pci_write_word(struct bnx2x_softc *sc, size_t addr, off_t val)
+{
+ uint16_t val16 = val;
+
+ if (rte_eal_pci_write_config(sc->pci_dev, &val16,
+ sizeof(val16), addr) <= 0) {
+ PMD_DRV_LOG(ERR, "Can't write to PCI config space");
+ return ENXIO;
+ }
+
+ return 0;
+}
+
+static inline int pci_write_long(struct bnx2x_softc *sc, size_t addr, off_t val)
+{
+ uint32_t val32 = val;
+ if (rte_eal_pci_write_config(sc->pci_dev, &val32,
+ sizeof(val32), addr) <= 0) {
+ PMD_DRV_LOG(ERR, "Can't write to PCI config space");
+ return ENXIO;
+ }
+
+ return 0;
+}
+
+#endif /* __BNX2X_H__ */
diff --git a/src/dpdk22/drivers/net/bnx2x/bnx2x_ethdev.h b/src/dpdk22/drivers/net/bnx2x/bnx2x_ethdev.h
new file mode 100644
index 00000000..a9da9de8
--- /dev/null
+++ b/src/dpdk22/drivers/net/bnx2x/bnx2x_ethdev.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
+ *
+ * Copyright (c) 2015 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ */
+
+#ifndef PMD_BNX2X_ETHDEV_H
+#define PMD_BNX2X_ETHDEV_H
+
+#include <sys/queue.h>
+#include <sys/param.h>
+#include <sys/user.h>
+#include <sys/stat.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <assert.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_malloc.h>
+#include <rte_ethdev.h>
+#include <rte_spinlock.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+
+#include "bnx2x_rxtx.h"
+#include "bnx2x_logs.h"
+
+#define DELAY(x) rte_delay_us(x)
+#define DELAY_MS(x) rte_delay_ms(x)
+#define usec_delay(x) DELAY(x)
+#define msec_delay(x) DELAY(1000*(x))
+
+#define FALSE 0
+#define TRUE 1
+
+#define false 0
+#define true 1
+#define min(a,b) RTE_MIN(a,b)
+
+#define mb() rte_mb()
+#define wmb() rte_wmb()
+#define rmb() rte_rmb()
+
+
+#define MAX_QUEUES sysconf(_SC_NPROCESSORS_CONF)
+
+#define BNX2X_MIN_RX_BUF_SIZE 1024
+#define BNX2X_MAX_RX_PKT_LEN 15872
+#define BNX2X_MAX_MAC_ADDRS 1
+
+/* Hardware RX tick timer (usecs) */
+#define BNX2X_RX_TICKS 25
+/* Hardware TX tick timer (usecs) */
+#define BNX2X_TX_TICKS 50
+/* Maximum number of Rx packets to process at a time */
+#define BNX2X_RX_BUDGET 0xffffffff
+
+#endif
+
+/* MAC address operations */
+struct bnx2x_mac_ops {
+ void (*mac_addr_add)(struct rte_eth_dev *dev, struct ether_addr *addr,
+ uint16_t index, uint32_t pool); /* not implemented yet */
+ void (*mac_addr_remove)(struct rte_eth_dev *dev, uint16_t index); /* not implemented yet */
+};
diff --git a/src/dpdk22/drivers/net/bnx2x/bnx2x_logs.h b/src/dpdk22/drivers/net/bnx2x/bnx2x_logs.h
new file mode 100644
index 00000000..dff014d7
--- /dev/null
+++ b/src/dpdk22/drivers/net/bnx2x/bnx2x_logs.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
+ *
+ * Copyright (c) 2015 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ */
+
+#ifndef _PMD_LOGS_H_
+#define _PMD_LOGS_H_
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args)
+
+#ifdef RTE_LIBRTE_BNX2X_DEBUG_INIT
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+#else
+#define PMD_INIT_FUNC_TRACE() do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_BNX2X_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_BNX2X_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_BNX2X_DEBUG_TX_FREE
+#define PMD_TX_FREE_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_BNX2X_DEBUG
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
+#else
+#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
+#endif
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+#ifdef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
+#define PMD_DEBUG_PERIODIC_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_DEBUG_PERIODIC_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+
+#endif /* _PMD_LOGS_H_ */
diff --git a/src/dpdk22/drivers/net/bnx2x/bnx2x_rxtx.h b/src/dpdk22/drivers/net/bnx2x/bnx2x_rxtx.h
new file mode 100644
index 00000000..ccb22fc1
--- /dev/null
+++ b/src/dpdk22/drivers/net/bnx2x/bnx2x_rxtx.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
+ *
+ * Copyright (c) 2015 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ */
+
+#ifndef _BNX2X_RXTX_H_
+#define _BNX2X_RXTX_H_
+
+
+#define DEFAULT_RX_FREE_THRESH 0
+#define DEFAULT_TX_FREE_THRESH 512
+#define RTE_PMD_BNX2X_TX_MAX_BURST 1
+
+/**
+ * Structure associated with each descriptor of the RX ring of a RX queue.
+ */
+struct bnx2x_rx_entry {
+ struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
+};
+
+/**
+ * Structure associated with each RX queue.
+ */
+struct bnx2x_rx_queue {
+ struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
+ union eth_rx_cqe *cq_ring; /**< RCQ ring virtual address. */
+ uint64_t cq_ring_phys_addr; /**< RCQ ring DMA address. */
+ uint64_t *rx_ring; /**< RX ring virtual address. */
+ uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
+ struct rte_mbuf **sw_ring; /**< address of RX software ring. */
+ struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
+ struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
+ uint16_t nb_cq_pages; /**< number of RCQ pages. */
+ uint16_t nb_rx_desc; /**< number of RX descriptors. */
+ uint16_t nb_rx_pages; /**< number of RX pages. */
+ uint16_t rx_bd_head; /**< Index of current rx bd. */
+ uint16_t rx_bd_tail; /**< Index of last rx bd. */
+ uint16_t rx_cq_head; /**< Index of current rcq bd. */
+ uint16_t rx_cq_tail; /**< Index of last rcq bd. */
+ uint16_t nb_rx_hold; /**< number of held free RX desc. */
+ uint16_t rx_free_thresh; /**< max free RX desc to hold. */
+ uint16_t queue_id; /**< RX queue index. */
+ uint8_t port_id; /**< Device port identifier. */
+ uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
+ struct bnx2x_softc *sc; /**< Ptr to dev_private data. */
+ uint64_t rx_mbuf_alloc; /**< Number of allocated mbufs. */
+};
+
+/**
+ * Structure associated with each TX queue.
+ */
+struct bnx2x_tx_queue {
+ /** TX ring virtual address. */
+ union eth_tx_bd_types *tx_ring; /**< TX ring virtual address. */
+ uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
+ struct rte_mbuf **sw_ring; /**< virtual address of SW ring. */
+ uint16_t tx_pkt_tail; /**< Index of current tx pkt. */
+ uint16_t tx_pkt_head; /**< Index of last pkt counted by txeof. */
+ uint16_t tx_bd_tail; /**< Index of current tx bd. */
+ uint16_t tx_bd_head; /**< Index of last bd counted by txeof. */
+ uint16_t nb_tx_desc; /**< number of TX descriptors. */
+ uint16_t tx_free_thresh; /**< minimum TX before freeing. */
+ uint16_t nb_tx_avail; /**< Number of TX descriptors available. */
+ uint16_t nb_tx_pages; /**< number of TX pages */
+ uint16_t queue_id; /**< TX queue index. */
+ uint8_t port_id; /**< Device port identifier. */
+ struct bnx2x_softc *sc; /**< Ptr to dev_private data */
+};
+
+int bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
+
+int bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+
+void bnx2x_dev_rx_queue_release(void *rxq);
+void bnx2x_dev_tx_queue_release(void *txq);
+int bnx2x_dev_rx_init(struct rte_eth_dev *dev);
+void bnx2x_dev_clear_queues(struct rte_eth_dev *dev);
+
+#endif /* _BNX2X_RXTX_H_ */
diff --git a/src/dpdk22/drivers/net/bnx2x/bnx2x_stats.h b/src/dpdk22/drivers/net/bnx2x/bnx2x_stats.h
new file mode 100644
index 00000000..3396de31
--- /dev/null
+++ b/src/dpdk22/drivers/net/bnx2x/bnx2x_stats.h
@@ -0,0 +1,611 @@
+/*-
+ * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+ *
+ * Eric Davis <edavis@broadcom.com>
+ * David Christensen <davidch@broadcom.com>
+ * Gary Zambrano <zambrano@broadcom.com>
+ *
+ * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
+ * Copyright (c) 2015 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ */
+
+#ifndef BNX2X_STATS_H
+#define BNX2X_STATS_H
+
+#include <sys/types.h>
+
+struct nig_stats {
+ uint32_t brb_discard;
+ uint32_t brb_packet;
+ uint32_t brb_truncate;
+ uint32_t flow_ctrl_discard;
+ uint32_t flow_ctrl_octets;
+ uint32_t flow_ctrl_packet;
+ uint32_t mng_discard;
+ uint32_t mng_octet_inp;
+ uint32_t mng_octet_out;
+ uint32_t mng_packet_inp;
+ uint32_t mng_packet_out;
+ uint32_t pbf_octets;
+ uint32_t pbf_packet;
+ uint32_t safc_inp;
+ uint32_t egress_mac_pkt0_lo;
+ uint32_t egress_mac_pkt0_hi;
+ uint32_t egress_mac_pkt1_lo;
+ uint32_t egress_mac_pkt1_hi;
+};
+
+
+enum bnx2x_stats_event {
+ STATS_EVENT_PMF = 0,
+ STATS_EVENT_LINK_UP,
+ STATS_EVENT_UPDATE,
+ STATS_EVENT_STOP,
+ STATS_EVENT_MAX
+};
+
+enum bnx2x_stats_state {
+ STATS_STATE_DISABLED = 0,
+ STATS_STATE_ENABLED,
+ STATS_STATE_MAX
+};
+
+struct bnx2x_eth_stats {
+ uint32_t total_bytes_received_hi;
+ uint32_t total_bytes_received_lo;
+ uint32_t total_bytes_transmitted_hi;
+ uint32_t total_bytes_transmitted_lo;
+ uint32_t total_unicast_packets_received_hi;
+ uint32_t total_unicast_packets_received_lo;
+ uint32_t total_multicast_packets_received_hi;
+ uint32_t total_multicast_packets_received_lo;
+ uint32_t total_broadcast_packets_received_hi;
+ uint32_t total_broadcast_packets_received_lo;
+ uint32_t total_unicast_packets_transmitted_hi;
+ uint32_t total_unicast_packets_transmitted_lo;
+ uint32_t total_multicast_packets_transmitted_hi;
+ uint32_t total_multicast_packets_transmitted_lo;
+ uint32_t total_broadcast_packets_transmitted_hi;
+ uint32_t total_broadcast_packets_transmitted_lo;
+ uint32_t valid_bytes_received_hi;
+ uint32_t valid_bytes_received_lo;
+
+ uint32_t error_bytes_received_hi;
+ uint32_t error_bytes_received_lo;
+ uint32_t etherstatsoverrsizepkts_hi;
+ uint32_t etherstatsoverrsizepkts_lo;
+ uint32_t no_buff_discard_hi;
+ uint32_t no_buff_discard_lo;
+
+ uint32_t rx_stat_ifhcinbadoctets_hi;
+ uint32_t rx_stat_ifhcinbadoctets_lo;
+ uint32_t tx_stat_ifhcoutbadoctets_hi;
+ uint32_t tx_stat_ifhcoutbadoctets_lo;
+ uint32_t rx_stat_dot3statsfcserrors_hi;
+ uint32_t rx_stat_dot3statsfcserrors_lo;
+ uint32_t rx_stat_dot3statsalignmenterrors_hi;
+ uint32_t rx_stat_dot3statsalignmenterrors_lo;
+ uint32_t rx_stat_dot3statscarriersenseerrors_hi;
+ uint32_t rx_stat_dot3statscarriersenseerrors_lo;
+ uint32_t rx_stat_falsecarriererrors_hi;
+ uint32_t rx_stat_falsecarriererrors_lo;
+ uint32_t rx_stat_etherstatsundersizepkts_hi;
+ uint32_t rx_stat_etherstatsundersizepkts_lo;
+ uint32_t rx_stat_dot3statsframestoolong_hi;
+ uint32_t rx_stat_dot3statsframestoolong_lo;
+ uint32_t rx_stat_etherstatsfragments_hi;
+ uint32_t rx_stat_etherstatsfragments_lo;
+ uint32_t rx_stat_etherstatsjabbers_hi;
+ uint32_t rx_stat_etherstatsjabbers_lo;
+ uint32_t rx_stat_maccontrolframesreceived_hi;
+ uint32_t rx_stat_maccontrolframesreceived_lo;
+ uint32_t rx_stat_bmac_xpf_hi;
+ uint32_t rx_stat_bmac_xpf_lo;
+ uint32_t rx_stat_bmac_xcf_hi;
+ uint32_t rx_stat_bmac_xcf_lo;
+ uint32_t rx_stat_xoffstateentered_hi;
+ uint32_t rx_stat_xoffstateentered_lo;
+ uint32_t rx_stat_xonpauseframesreceived_hi;
+ uint32_t rx_stat_xonpauseframesreceived_lo;
+ uint32_t rx_stat_xoffpauseframesreceived_hi;
+ uint32_t rx_stat_xoffpauseframesreceived_lo;
+ uint32_t tx_stat_outxonsent_hi;
+ uint32_t tx_stat_outxonsent_lo;
+ uint32_t tx_stat_outxoffsent_hi;
+ uint32_t tx_stat_outxoffsent_lo;
+ uint32_t tx_stat_flowcontroldone_hi;
+ uint32_t tx_stat_flowcontroldone_lo;
+ uint32_t tx_stat_etherstatscollisions_hi;
+ uint32_t tx_stat_etherstatscollisions_lo;
+ uint32_t tx_stat_dot3statssinglecollisionframes_hi;
+ uint32_t tx_stat_dot3statssinglecollisionframes_lo;
+ uint32_t tx_stat_dot3statsmultiplecollisionframes_hi;
+ uint32_t tx_stat_dot3statsmultiplecollisionframes_lo;
+ uint32_t tx_stat_dot3statsdeferredtransmissions_hi;
+ uint32_t tx_stat_dot3statsdeferredtransmissions_lo;
+ uint32_t tx_stat_dot3statsexcessivecollisions_hi;
+ uint32_t tx_stat_dot3statsexcessivecollisions_lo;
+ uint32_t tx_stat_dot3statslatecollisions_hi;
+ uint32_t tx_stat_dot3statslatecollisions_lo;
+ uint32_t tx_stat_etherstatspkts64octets_hi;
+ uint32_t tx_stat_etherstatspkts64octets_lo;
+ uint32_t tx_stat_etherstatspkts65octetsto127octets_hi;
+ uint32_t tx_stat_etherstatspkts65octetsto127octets_lo;
+ uint32_t tx_stat_etherstatspkts128octetsto255octets_hi;
+ uint32_t tx_stat_etherstatspkts128octetsto255octets_lo;
+ uint32_t tx_stat_etherstatspkts256octetsto511octets_hi;
+ uint32_t tx_stat_etherstatspkts256octetsto511octets_lo;
+ uint32_t tx_stat_etherstatspkts512octetsto1023octets_hi;
+ uint32_t tx_stat_etherstatspkts512octetsto1023octets_lo;
+ uint32_t tx_stat_etherstatspkts1024octetsto1522octets_hi;
+ uint32_t tx_stat_etherstatspkts1024octetsto1522octets_lo;
+ uint32_t tx_stat_etherstatspktsover1522octets_hi;
+ uint32_t tx_stat_etherstatspktsover1522octets_lo;
+ uint32_t tx_stat_bmac_2047_hi;
+ uint32_t tx_stat_bmac_2047_lo;
+ uint32_t tx_stat_bmac_4095_hi;
+ uint32_t tx_stat_bmac_4095_lo;
+ uint32_t tx_stat_bmac_9216_hi;
+ uint32_t tx_stat_bmac_9216_lo;
+ uint32_t tx_stat_bmac_16383_hi;
+ uint32_t tx_stat_bmac_16383_lo;
+ uint32_t tx_stat_dot3statsinternalmactransmiterrors_hi;
+ uint32_t tx_stat_dot3statsinternalmactransmiterrors_lo;
+ uint32_t tx_stat_bmac_ufl_hi;
+ uint32_t tx_stat_bmac_ufl_lo;
+
+ uint32_t pause_frames_received_hi;
+ uint32_t pause_frames_received_lo;
+ uint32_t pause_frames_sent_hi;
+ uint32_t pause_frames_sent_lo;
+
+ uint32_t etherstatspkts1024octetsto1522octets_hi;
+ uint32_t etherstatspkts1024octetsto1522octets_lo;
+ uint32_t etherstatspktsover1522octets_hi;
+ uint32_t etherstatspktsover1522octets_lo;
+
+ uint32_t brb_drop_hi;
+ uint32_t brb_drop_lo;
+ uint32_t brb_truncate_hi;
+ uint32_t brb_truncate_lo;
+
+ uint32_t mac_filter_discard;
+ uint32_t mf_tag_discard;
+ uint32_t brb_truncate_discard;
+ uint32_t mac_discard;
+
+ uint32_t nig_timer_max;
+
+ /* PFC */
+ uint32_t pfc_frames_received_hi;
+ uint32_t pfc_frames_received_lo;
+ uint32_t pfc_frames_sent_hi;
+ uint32_t pfc_frames_sent_lo;
+
+ /* Recovery */
+ uint32_t recoverable_error;
+ uint32_t unrecoverable_error;
+
+ /* src: Clear-on-Read register; Will not survive PMF Migration */
+ uint32_t eee_tx_lpi;
+
+ /* receive path driver statistics */
+ uint32_t rx_calls;
+ uint32_t rx_pkts;
+ uint32_t rx_soft_errors;
+ uint32_t rx_hw_csum_errors;
+ uint32_t rx_ofld_frames_csum_ip;
+ uint32_t rx_ofld_frames_csum_tcp_udp;
+ uint32_t rx_budget_reached;
+
+ /* tx path driver statistics */
+ uint32_t tx_pkts;
+ uint32_t tx_soft_errors;
+ uint32_t tx_ofld_frames_csum_ip;
+ uint32_t tx_ofld_frames_csum_tcp;
+ uint32_t tx_ofld_frames_csum_udp;
+ uint32_t tx_encap_failures;
+ uint32_t tx_hw_queue_full;
+ uint32_t tx_hw_max_queue_depth;
+ uint32_t tx_dma_mapping_failure;
+ uint32_t tx_max_drbr_queue_depth;
+ uint32_t tx_window_violation_std;
+ uint32_t tx_chain_lost_mbuf;
+ uint32_t tx_frames_deferred;
+ uint32_t tx_queue_xoff;
+
+ /* mbuf driver statistics */
+ uint32_t mbuf_defrag_attempts;
+ uint32_t mbuf_defrag_failures;
+ uint32_t mbuf_rx_bd_alloc_failed;
+ uint32_t mbuf_rx_bd_mapping_failed;
+
+ /* track the number of allocated mbufs */
+ uint32_t mbuf_alloc_tx;
+ uint32_t mbuf_alloc_rx;
+};
+
+
+struct bnx2x_eth_q_stats {
+ uint32_t total_unicast_bytes_received_hi;
+ uint32_t total_unicast_bytes_received_lo;
+ uint32_t total_broadcast_bytes_received_hi;
+ uint32_t total_broadcast_bytes_received_lo;
+ uint32_t total_multicast_bytes_received_hi;
+ uint32_t total_multicast_bytes_received_lo;
+ uint32_t total_bytes_received_hi;
+ uint32_t total_bytes_received_lo;
+ uint32_t total_unicast_bytes_transmitted_hi;
+ uint32_t total_unicast_bytes_transmitted_lo;
+ uint32_t total_broadcast_bytes_transmitted_hi;
+ uint32_t total_broadcast_bytes_transmitted_lo;
+ uint32_t total_multicast_bytes_transmitted_hi;
+ uint32_t total_multicast_bytes_transmitted_lo;
+ uint32_t total_bytes_transmitted_hi;
+ uint32_t total_bytes_transmitted_lo;
+ uint32_t total_unicast_packets_received_hi;
+ uint32_t total_unicast_packets_received_lo;
+ uint32_t total_multicast_packets_received_hi;
+ uint32_t total_multicast_packets_received_lo;
+ uint32_t total_broadcast_packets_received_hi;
+ uint32_t total_broadcast_packets_received_lo;
+ uint32_t total_unicast_packets_transmitted_hi;
+ uint32_t total_unicast_packets_transmitted_lo;
+ uint32_t total_multicast_packets_transmitted_hi;
+ uint32_t total_multicast_packets_transmitted_lo;
+ uint32_t total_broadcast_packets_transmitted_hi;
+ uint32_t total_broadcast_packets_transmitted_lo;
+ uint32_t valid_bytes_received_hi;
+ uint32_t valid_bytes_received_lo;
+
+ uint32_t etherstatsoverrsizepkts_hi;
+ uint32_t etherstatsoverrsizepkts_lo;
+ uint32_t no_buff_discard_hi;
+ uint32_t no_buff_discard_lo;
+
+ uint32_t total_packets_received_checksum_discarded_hi;
+ uint32_t total_packets_received_checksum_discarded_lo;
+ uint32_t total_packets_received_ttl0_discarded_hi;
+ uint32_t total_packets_received_ttl0_discarded_lo;
+ uint32_t total_transmitted_dropped_packets_error_hi;
+ uint32_t total_transmitted_dropped_packets_error_lo;
+
+ /* receive path driver statistics */
+ uint32_t rx_calls;
+ uint32_t rx_pkts;
+ uint32_t rx_soft_errors;
+ uint32_t rx_hw_csum_errors;
+ uint32_t rx_ofld_frames_csum_ip;
+ uint32_t rx_ofld_frames_csum_tcp_udp;
+ uint32_t rx_budget_reached;
+
+ /* tx path driver statistics */
+ uint32_t tx_pkts;
+ uint32_t tx_soft_errors;
+ uint32_t tx_ofld_frames_csum_ip;
+ uint32_t tx_ofld_frames_csum_tcp;
+ uint32_t tx_ofld_frames_csum_udp;
+ uint32_t tx_encap_failures;
+ uint32_t tx_hw_queue_full;
+ uint32_t tx_hw_max_queue_depth;
+ uint32_t tx_dma_mapping_failure;
+ uint32_t tx_max_drbr_queue_depth;
+ uint32_t tx_window_violation_std;
+ uint32_t tx_chain_lost_mbuf;
+ uint32_t tx_frames_deferred;
+ uint32_t tx_queue_xoff;
+
+ /* mbuf driver statistics */
+ uint32_t mbuf_defrag_attempts;
+ uint32_t mbuf_defrag_failures;
+ uint32_t mbuf_rx_bd_alloc_failed;
+ uint32_t mbuf_rx_bd_mapping_failed;
+
+ /* track the number of allocated mbufs */
+ uint32_t mbuf_alloc_tx;
+ uint32_t mbuf_alloc_rx;
+};
+
+struct bnx2x_eth_stats_old {
+ uint32_t rx_stat_dot3statsframestoolong_hi;
+ uint32_t rx_stat_dot3statsframestoolong_lo;
+};
+
+struct bnx2x_eth_q_stats_old {
+ /* Fields to perserve over fw reset*/
+ uint32_t total_unicast_bytes_received_hi;
+ uint32_t total_unicast_bytes_received_lo;
+ uint32_t total_broadcast_bytes_received_hi;
+ uint32_t total_broadcast_bytes_received_lo;
+ uint32_t total_multicast_bytes_received_hi;
+ uint32_t total_multicast_bytes_received_lo;
+ uint32_t total_unicast_bytes_transmitted_hi;
+ uint32_t total_unicast_bytes_transmitted_lo;
+ uint32_t total_broadcast_bytes_transmitted_hi;
+ uint32_t total_broadcast_bytes_transmitted_lo;
+ uint32_t total_multicast_bytes_transmitted_hi;
+ uint32_t total_multicast_bytes_transmitted_lo;
+
+ /* Fields to perserve last of */
+ uint32_t total_bytes_received_hi;
+ uint32_t total_bytes_received_lo;
+ uint32_t total_bytes_transmitted_hi;
+ uint32_t total_bytes_transmitted_lo;
+ uint32_t total_unicast_packets_received_hi;
+ uint32_t total_unicast_packets_received_lo;
+ uint32_t total_multicast_packets_received_hi;
+ uint32_t total_multicast_packets_received_lo;
+ uint32_t total_broadcast_packets_received_hi;
+ uint32_t total_broadcast_packets_received_lo;
+ uint32_t total_unicast_packets_transmitted_hi;
+ uint32_t total_unicast_packets_transmitted_lo;
+ uint32_t total_multicast_packets_transmitted_hi;
+ uint32_t total_multicast_packets_transmitted_lo;
+ uint32_t total_broadcast_packets_transmitted_hi;
+ uint32_t total_broadcast_packets_transmitted_lo;
+ uint32_t valid_bytes_received_hi;
+ uint32_t valid_bytes_received_lo;
+
+ /* receive path driver statistics */
+ uint32_t rx_calls_old;
+ uint32_t rx_pkts_old;
+ uint32_t rx_soft_errors_old;
+ uint32_t rx_hw_csum_errors_old;
+ uint32_t rx_ofld_frames_csum_ip_old;
+ uint32_t rx_ofld_frames_csum_tcp_udp_old;
+ uint32_t rx_budget_reached_old;
+
+ /* tx path driver statistics */
+ uint32_t tx_pkts_old;
+ uint32_t tx_soft_errors_old;
+ uint32_t tx_ofld_frames_csum_ip_old;
+ uint32_t tx_ofld_frames_csum_tcp_old;
+ uint32_t tx_ofld_frames_csum_udp_old;
+ uint32_t tx_encap_failures_old;
+ uint32_t tx_hw_queue_full_old;
+ uint32_t tx_hw_max_queue_depth_old;
+ uint32_t tx_dma_mapping_failure_old;
+ uint32_t tx_max_drbr_queue_depth_old;
+ uint32_t tx_window_violation_std_old;
+ uint32_t tx_chain_lost_mbuf_old;
+ uint32_t tx_frames_deferred_old;
+ uint32_t tx_queue_xoff_old;
+
+ /* mbuf driver statistics */
+ uint32_t mbuf_defrag_attempts_old;
+ uint32_t mbuf_defrag_failures_old;
+ uint32_t mbuf_rx_bd_alloc_failed_old;
+ uint32_t mbuf_rx_bd_mapping_failed_old;
+
+ /* track the number of allocated mbufs */
+ int mbuf_alloc_tx_old;
+ int mbuf_alloc_rx_old;
+};
+
+struct bnx2x_net_stats_old {
+ uint32_t rx_dropped;
+};
+
+struct bnx2x_fw_port_stats_old {
+ uint32_t pfc_frames_tx_hi;
+ uint32_t pfc_frames_tx_lo;
+ uint32_t pfc_frames_rx_hi;
+ uint32_t pfc_frames_rx_lo;
+
+ uint32_t mac_filter_discard;
+ uint32_t mf_tag_discard;
+ uint32_t brb_truncate_discard;
+ uint32_t mac_discard;
+};
+
+/* sum[hi:lo] += add[hi:lo] */
+#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
+ do { \
+ s_lo += a_lo; \
+ s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
+ } while (0)
+
+#define LE32_0 ((uint32_t) 0)
+#define LE16_0 ((uint16_t) 0)
+
+/* The _force is for cases where high value is 0 */
+#define ADD_64_LE(s_hi, a_hi_le, s_lo, a_lo_le) \
+ ADD_64(s_hi, le32toh(a_hi_le), \
+ s_lo, le32toh(a_lo_le))
+
+#define ADD_64_LE16(s_hi, a_hi_le, s_lo, a_lo_le) \
+ ADD_64(s_hi, le16toh(a_hi_le), \
+ s_lo, le16toh(a_lo_le))
+
+/* difference = minuend - subtrahend */
+#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
+ do { \
+ if (m_lo < s_lo) { \
+ /* underflow */ \
+ d_hi = m_hi - s_hi; \
+ if (d_hi > 0) { \
+ /* we can 'loan' 1 */ \
+ d_hi--; \
+ d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
+ } else { \
+ /* m_hi <= s_hi */ \
+ d_hi = 0; \
+ d_lo = 0; \
+ } \
+ } else { \
+ /* m_lo >= s_lo */ \
+ if (m_hi < s_hi) { \
+ d_hi = 0; \
+ d_lo = 0; \
+ } else { \
+ /* m_hi >= s_hi */ \
+ d_hi = m_hi - s_hi; \
+ d_lo = m_lo - s_lo; \
+ } \
+ } \
+ } while (0)
+
+#define UPDATE_STAT64(s, t) \
+ do { \
+ DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
+ diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
+ pstats->mac_stx[0].t##_hi = new->s##_hi; \
+ pstats->mac_stx[0].t##_lo = new->s##_lo; \
+ ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
+ pstats->mac_stx[1].t##_lo, diff.lo); \
+ } while (0)
+
+#define UPDATE_STAT64_NIG(s, t) \
+ do { \
+ DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
+ diff.lo, new->s##_lo, old->s##_lo); \
+ ADD_64(estats->t##_hi, diff.hi, \
+ estats->t##_lo, diff.lo); \
+ } while (0)
+
+/* sum[hi:lo] += add */
+#define ADD_EXTEND_64(s_hi, s_lo, a) \
+ do { \
+ s_lo += a; \
+ s_hi += (s_lo < a) ? 1 : 0; \
+ } while (0)
+
+#define ADD_STAT64(diff, t) \
+ do { \
+ ADD_64(pstats->mac_stx[1].t##_hi, new->diff##_hi, \
+ pstats->mac_stx[1].t##_lo, new->diff##_lo); \
+ } while (0)
+
+#define UPDATE_EXTEND_STAT(s) \
+ do { \
+ ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
+ pstats->mac_stx[1].s##_lo, \
+ new->s); \
+ } while (0)
+
+#define UPDATE_EXTEND_TSTAT_X(s, t, size) \
+ do { \
+ diff = le##size##toh(tclient->s) - \
+ le##size##toh(old_tclient->s); \
+ old_tclient->s = tclient->s; \
+ ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+ } while (0)
+
+#define UPDATE_EXTEND_TSTAT(s, t) UPDATE_EXTEND_TSTAT_X(s, t, 32)
+
+#define UPDATE_EXTEND_E_TSTAT(s, t, size) \
+ do { \
+ UPDATE_EXTEND_TSTAT_X(s, t, size); \
+ ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \
+ } while (0)
+
+#define UPDATE_EXTEND_USTAT(s, t) \
+ do { \
+ diff = le32toh(uclient->s) - le32toh(old_uclient->s); \
+ old_uclient->s = uclient->s; \
+ ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+ } while (0)
+
+#define UPDATE_EXTEND_E_USTAT(s, t) \
+ do { \
+ UPDATE_EXTEND_USTAT(s, t); \
+ ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \
+ } while (0)
+
+#define UPDATE_EXTEND_XSTAT(s, t) \
+ do { \
+ diff = le32toh(xclient->s) - le32toh(old_xclient->s); \
+ old_xclient->s = xclient->s; \
+ ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+ } while (0)
+
+#define UPDATE_QSTAT(s, t) \
+ do { \
+ qstats->t##_hi = qstats_old->t##_hi + le32toh(s.hi); \
+ qstats->t##_lo = qstats_old->t##_lo + le32toh(s.lo); \
+ } while (0)
+
+#define UPDATE_QSTAT_OLD(f) \
+ do { \
+ qstats_old->f = qstats->f; \
+ } while (0)
+
+#define UPDATE_ESTAT_QSTAT_64(s) \
+ do { \
+ ADD_64(estats->s##_hi, qstats->s##_hi, \
+ estats->s##_lo, qstats->s##_lo); \
+ SUB_64(estats->s##_hi, qstats_old->s##_hi_old, \
+ estats->s##_lo, qstats_old->s##_lo_old); \
+ qstats_old->s##_hi_old = qstats->s##_hi; \
+ qstats_old->s##_lo_old = qstats->s##_lo; \
+ } while (0)
+
+#define UPDATE_ESTAT_QSTAT(s) \
+ do { \
+ estats->s += qstats->s; \
+ estats->s -= qstats_old->s##_old; \
+ qstats_old->s##_old = qstats->s; \
+ } while (0)
+
+#define UPDATE_FSTAT_QSTAT(s) \
+ do { \
+ ADD_64(fstats->s##_hi, qstats->s##_hi, \
+ fstats->s##_lo, qstats->s##_lo); \
+ SUB_64(fstats->s##_hi, qstats_old->s##_hi, \
+ fstats->s##_lo, qstats_old->s##_lo); \
+ estats->s##_hi = fstats->s##_hi; \
+ estats->s##_lo = fstats->s##_lo; \
+ qstats_old->s##_hi = qstats->s##_hi; \
+ qstats_old->s##_lo = qstats->s##_lo; \
+ } while (0)
+
+#define UPDATE_FW_STAT(s) \
+ do { \
+ estats->s = le32toh(tport->s) + fwstats->s; \
+ } while (0)
+
+#define UPDATE_FW_STAT_OLD(f) \
+ do { \
+ fwstats->f = estats->f; \
+ } while (0)
+
+#define UPDATE_ESTAT(s, t) \
+ do { \
+ SUB_64(estats->s##_hi, estats_old->t##_hi, \
+ estats->s##_lo, estats_old->t##_lo); \
+ ADD_64(estats->s##_hi, estats->t##_hi, \
+ estats->s##_lo, estats->t##_lo); \
+ estats_old->t##_hi = estats->t##_hi; \
+ estats_old->t##_lo = estats->t##_lo; \
+ } while (0)
+
+/* minuend -= subtrahend */
+#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
+ do { \
+ DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
+ } while (0)
+
+/* minuend[hi:lo] -= subtrahend */
+#define SUB_EXTEND_64(m_hi, m_lo, s) \
+ do { \
+ uint32_t s_hi = 0; \
+ SUB_64(m_hi, s_hi, m_lo, s); \
+ } while (0)
+
+#define SUB_EXTEND_USTAT(s, t) \
+ do { \
+ diff = le32toh(uclient->s) - le32toh(old_uclient->s); \
+ SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+ } while (0)
+
+struct bnx2x_softc;
+void bnx2x_stats_init(struct bnx2x_softc *sc);
+void bnx2x_stats_handle(struct bnx2x_softc *sc, enum bnx2x_stats_event event);
+void bnx2x_save_statistics(struct bnx2x_softc *sc);
+void bnx2x_memset_stats(struct bnx2x_softc *sc);
+
+#endif /* BNX2X_STATS_H */
diff --git a/src/dpdk22/drivers/net/bnx2x/bnx2x_vfpf.h b/src/dpdk22/drivers/net/bnx2x/bnx2x_vfpf.h
new file mode 100644
index 00000000..966240cc
--- /dev/null
+++ b/src/dpdk22/drivers/net/bnx2x/bnx2x_vfpf.h
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
+ *
+ * Copyright (c) 2015 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ */
+
+#ifndef BNX2X_VFPF_H
+#define BNX2X_VFPF_H
+
+#include "ecore_sp.h"
+
+#define VLAN_HLEN 4
+
+struct vf_resource_query {
+ uint8_t num_rxqs;
+ uint8_t num_txqs;
+ uint8_t num_sbs;
+ uint8_t num_mac_filters;
+ uint8_t num_vlan_filters;
+ uint8_t num_mc_filters;
+};
+
+#define BNX2X_VF_STATUS_SUCCESS 1
+#define BNX2X_VF_STATUS_FAILURE 2
+#define BNX2X_VF_STATUS_NO_RESOURCES 4
+#define BNX2X_VF_BULLETIN_TRIES 5
+
+#define BNX2X_VF_Q_FLAG_CACHE_ALIGN 0x0008
+#define BNX2X_VF_Q_FLAG_STATS 0x0010
+#define BNX2X_VF_Q_FLAG_OV 0x0020
+#define BNX2X_VF_Q_FLAG_VLAN 0x0040
+#define BNX2X_VF_Q_FLAG_COS 0x0080
+#define BNX2X_VF_Q_FLAG_HC 0x0100
+#define BNX2X_VF_Q_FLAG_DHC 0x0200
+#define BNX2X_VF_Q_FLAG_LEADING_RSS 0x0400
+
+#define TLV_BUFFER_SIZE 1024
+
+/* general tlv header (used for both vf->pf request and pf->vf response) */
+struct channel_tlv {
+ uint16_t type;
+ uint16_t length;
+};
+
+struct vf_first_tlv {
+ uint16_t type;
+ uint16_t length;
+ uint32_t reply_offset;
+};
+
+struct tlv_buffer_size {
+ uint8_t tlv_buffer[TLV_BUFFER_SIZE];
+};
+
+/* tlv struct for all PF replies except acquire */
+struct vf_common_reply_tlv {
+ uint16_t type;
+ uint16_t length;
+ uint8_t status;
+ uint8_t pad[3];
+};
+
+/* used to terminate and pad a tlv list */
+struct channel_list_end_tlv {
+ uint16_t type;
+ uint16_t length;
+ uint32_t pad;
+};
+
+/* Acquire */
+struct vf_acquire_tlv {
+ struct vf_first_tlv first_tlv;
+
+ uint8_t vf_id;
+ uint8_t pad[3];
+
+ struct vf_resource_query res_query;
+
+ uint64_t bulletin_addr;
+};
+
+/* simple operation request on queue */
+struct vf_q_op_tlv {
+ struct vf_first_tlv first_tlv;
+ uint8_t vf_qid;
+ uint8_t pad[3];
+};
+
+/* receive side scaling tlv */
+struct vf_rss_tlv {
+ struct vf_first_tlv first_tlv;
+ uint32_t rss_flags;
+ uint8_t rss_result_mask;
+ uint8_t ind_table_size;
+ uint8_t rss_key_size;
+ uint8_t pad;
+ uint8_t ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+ uint32_t rss_key[T_ETH_RSS_KEY]; /* hash values */
+};
+
+struct vf_resc {
+#define BNX2X_VF_MAX_QUEUES_PER_VF 16
+#define BNX2X_VF_MAX_SBS_PER_VF 16
+ uint16_t hw_sbs[BNX2X_VF_MAX_SBS_PER_VF];
+ uint8_t hw_qid[BNX2X_VF_MAX_QUEUES_PER_VF];
+ uint8_t num_rxqs;
+ uint8_t num_txqs;
+ uint8_t num_sbs;
+ uint8_t num_mac_filters;
+ uint8_t num_vlan_filters;
+ uint8_t num_mc_filters;
+ uint8_t permanent_mac_addr[ETH_ALEN];
+ uint8_t current_mac_addr[ETH_ALEN];
+ uint16_t pf_link_speed;
+ uint32_t pf_link_supported;
+};
+
+/* tlv struct holding reply for acquire */
+struct vf_acquire_resp_tlv {
+ uint16_t type;
+ uint16_t length;
+ uint8_t status;
+ uint8_t pad1[3];
+ uint32_t chip_num;
+ uint8_t pad2[4];
+ char fw_ver[32];
+ uint16_t db_size;
+ uint8_t pad3[2];
+ struct vf_resc resc;
+};
+
+/* Init VF */
+struct vf_init_tlv {
+ struct vf_first_tlv first_tlv;
+ uint64_t sb_addr[BNX2X_VF_MAX_SBS_PER_VF];
+ uint64_t spq_addr;
+ uint64_t stats_addr;
+ uint16_t stats_step;
+ uint32_t flags;
+ uint32_t pad[2];
+};
+
+struct vf_rxq_params {
+ /* physical addresses */
+ uint64_t rcq_addr;
+ uint64_t rcq_np_addr;
+ uint64_t rxq_addr;
+ uint64_t pad1;
+
+ /* sb + hc info */
+ uint8_t vf_sb_id;
+ uint8_t sb_cq_index;
+ uint16_t hc_rate; /* desired interrupts per sec. */
+ /* rx buffer info */
+ uint16_t mtu;
+ uint16_t buf_sz;
+ uint16_t flags; /* for BNX2X_VF_Q_FLAG_X flags */
+ uint16_t stat_id; /* valid if BNX2X_VF_Q_FLAG_STATS */
+
+ uint8_t pad2[5];
+
+ uint8_t drop_flags;
+ uint8_t cache_line_log; /* BNX2X_VF_Q_FLAG_CACHE_ALIGN */
+ uint8_t pad3;
+};
+
+struct vf_txq_params {
+ /* physical addresses */
+ uint64_t txq_addr;
+
+ /* sb + hc info */
+ uint8_t vf_sb_id; /* index in hw_sbs[] */
+ uint8_t sb_index; /* Index in the SB */
+ uint16_t hc_rate; /* desired interrupts per sec. */
+ uint32_t flags; /* for BNX2X_VF_Q_FLAG_X flags */
+ uint16_t stat_id; /* valid if BNX2X_VF_Q_FLAG_STATS */
+ uint8_t traffic_type; /* see in setup_context() */
+ uint8_t pad;
+};
+
+/* Setup Queue */
+struct vf_setup_q_tlv {
+ struct vf_first_tlv first_tlv;
+
+ struct vf_rxq_params rxq;
+ struct vf_txq_params txq;
+
+ uint8_t vf_qid; /* index in hw_qid[] */
+ uint8_t param_valid;
+ #define VF_RXQ_VALID 0x01
+ #define VF_TXQ_VALID 0x02
+ uint8_t pad[2];
+};
+
+/* Set Queue Filters */
+struct vf_q_mac_vlan_filter {
+ uint32_t flags;
+ #define BNX2X_VF_Q_FILTER_DEST_MAC_VALID 0x01
+ #define BNX2X_VF_Q_FILTER_VLAN_TAG_VALID 0x02
+ #define BNX2X_VF_Q_FILTER_SET_MAC 0x100 /* set/clear */
+ uint8_t mac[ETH_ALEN];
+ uint16_t vlan_tag;
+};
+
+
+#define _UP_ETH_ALEN (6)
+
+/* configure queue filters */
+struct vf_set_q_filters_tlv {
+ struct vf_first_tlv first_tlv;
+
+ uint32_t flags;
+ #define BNX2X_VF_MAC_VLAN_CHANGED 0x01
+ #define BNX2X_VF_MULTICAST_CHANGED 0x02
+ #define BNX2X_VF_RX_MASK_CHANGED 0x04
+
+ uint8_t vf_qid; /* index in hw_qid[] */
+ uint8_t mac_filters_cnt;
+ uint8_t multicast_cnt;
+ uint8_t pad;
+
+ #define VF_MAX_MAC_FILTERS 16
+ #define VF_MAX_VLAN_FILTERS 16
+ #define VF_MAX_FILTERS (VF_MAX_MAC_FILTERS +\
+ VF_MAX_VLAN_FILTERS)
+ struct vf_q_mac_vlan_filter filters[VF_MAX_FILTERS];
+
+ #define VF_MAX_MULTICAST_PER_VF 32
+ uint8_t multicast[VF_MAX_MULTICAST_PER_VF][_UP_ETH_ALEN];
+ unsigned long rx_mask;
+};
+
+
+/* close VF (disable VF) */
+struct vf_close_tlv {
+ struct vf_first_tlv first_tlv;
+ uint16_t vf_id; /* for debug */
+ uint8_t pad[2];
+};
+
+/* rlease the VF's acquired resources */
+struct vf_release_tlv {
+ struct vf_first_tlv first_tlv;
+ uint16_t vf_id; /* for debug */
+ uint8_t pad[2];
+};
+
+union query_tlvs {
+ struct vf_first_tlv first_tlv;
+ struct vf_acquire_tlv acquire;
+ struct vf_init_tlv init;
+ struct vf_close_tlv close;
+ struct vf_q_op_tlv q_op;
+ struct vf_setup_q_tlv setup_q;
+ struct vf_set_q_filters_tlv set_q_filters;
+ struct vf_release_tlv release;
+ struct vf_rss_tlv update_rss;
+ struct channel_list_end_tlv list_end;
+ struct tlv_buffer_size tlv_buf_size;
+};
+
+union resp_tlvs {
+ struct vf_common_reply_tlv common_reply;
+ struct vf_acquire_resp_tlv acquire_resp;
+ struct channel_list_end_tlv list_end;
+ struct tlv_buffer_size tlv_buf_size;
+};
+
+/* struct allocated by VF driver, PF sends updates to VF via bulletin */
+struct bnx2x_vf_bulletin {
+ uint32_t crc; /* crc of structure to ensure is not in
+ * mid-update
+ */
+ uint16_t version;
+ uint16_t length;
+
+ uint64_t valid_bitmap; /* bitmap indicating wich fields
+ * hold valid values
+ */
+
+#define MAC_ADDR_VALID 0 /* alert the vf that a new mac address
+ * is available for it
+ */
+#define VLAN_VALID 1 /* when set, the vf should no access the
+ * vf channel
+ */
+#define CHANNEL_DOWN 2 /* vf channel is disabled. VFs are not
+ * to attempt to send messages on the
+ * channel after this bit is set
+ */
+ uint8_t mac[ETH_ALEN];
+ uint8_t mac_pad[2];
+
+ uint16_t vlan;
+ uint8_t vlan_pad[6];
+};
+
+#define MAX_TLVS_IN_LIST 50
+enum channel_tlvs {
+ BNX2X_VF_TLV_NONE, /* ends tlv sequence */
+ BNX2X_VF_TLV_ACQUIRE,
+ BNX2X_VF_TLV_INIT,
+ BNX2X_VF_TLV_SETUP_Q,
+ BNX2X_VF_TLV_SET_Q_FILTERS,
+ BNX2X_VF_TLV_ACTIVATE_Q,
+ BNX2X_VF_TLV_DEACTIVATE_Q,
+ BNX2X_VF_TLV_TEARDOWN_Q,
+ BNX2X_VF_TLV_CLOSE,
+ BNX2X_VF_TLV_RELEASE,
+ BNX2X_VF_TLV_UPDATE_RSS_OLD,
+ BNX2X_VF_TLV_PF_RELEASE_VF,
+ BNX2X_VF_TLV_LIST_END,
+ BNX2X_VF_TLV_FLR,
+ BNX2X_VF_TLV_PF_SET_MAC,
+ BNX2X_VF_TLV_PF_SET_VLAN,
+ BNX2X_VF_TLV_UPDATE_RSS,
+ BNX2X_VF_TLV_PHYS_PORT_ID,
+ BNX2X_VF_TLV_MAX
+};
+
+struct bnx2x_vf_mbx_msg {
+ union query_tlvs query[BNX2X_VF_MAX_QUEUES_PER_VF];
+ union resp_tlvs resp;
+};
+
+void bnx2x_add_tlv(void *tlvs_list, uint16_t offset, uint16_t type, uint16_t length);
+int bnx2x_vf_set_mac(struct bnx2x_softc *sc, int set);
+int bnx2x_vf_config_rss(struct bnx2x_softc *sc, struct ecore_config_rss_params *params);
+
+#endif /* BNX2X_VFPF_H */
diff --git a/src/dpdk22/drivers/net/bnx2x/ecore_fw_defs.h b/src/dpdk22/drivers/net/bnx2x/ecore_fw_defs.h
new file mode 100644
index 00000000..ab490efa
--- /dev/null
+++ b/src/dpdk22/drivers/net/bnx2x/ecore_fw_defs.h
@@ -0,0 +1,403 @@
+/*-
+ * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+ *
+ * Eric Davis <edavis@broadcom.com>
+ * David Christensen <davidch@broadcom.com>
+ * Gary Zambrano <zambrano@broadcom.com>
+ *
+ * Copyright (c) 2014-2015 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ */
+
+#ifndef ECORE_FW_DEFS_H
+#define ECORE_FW_DEFS_H
+
+
+#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[148].base)
+#define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
+ (IRO[147].base + ((assertListEntry) * IRO[147].m1))
+#define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \
+ (IRO[153].base + (((pfId)>>1) * IRO[153].m1) + (((pfId)&1) * \
+ IRO[153].m2))
+#define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \
+ (IRO[154].base + (((pfId)>>1) * IRO[154].m1) + (((pfId)&1) * \
+ IRO[154].m2))
+#define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \
+ (IRO[155].base + ((vfId) * IRO[155].m1))
+#define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \
+ (IRO[156].base + ((vfId) * IRO[156].m1))
+#define CSTORM_VF_TO_PF_OFFSET(funcId) \
+ (IRO[150].base + ((funcId) * IRO[150].m1))
+#define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \
+ (IRO[159].base + ((funcId) * IRO[159].m1))
+#define CSTORM_FUNC_EN_OFFSET(funcId) \
+ (IRO[149].base + ((funcId) * IRO[149].m1))
+#define CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hcIndex, sbId) \
+ (IRO[139].base + ((hcIndex) * IRO[139].m1) + ((sbId) * IRO[139].m2))
+#define CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hcIndex, sbId) \
+ (IRO[138].base + (((hcIndex)>>2) * IRO[138].m1) + (((hcIndex)&3) \
+ * IRO[138].m2) + ((sbId) * IRO[138].m3))
+#define CSTORM_IGU_MODE_OFFSET (IRO[157].base)
+#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
+ (IRO[317].base + ((pfId) * IRO[317].m1))
+#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+ (IRO[318].base + ((pfId) * IRO[318].m1))
+#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
+ (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
+#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
+ (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
+#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
+ (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
+#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
+ (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
+#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
+ (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
+#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
+ (IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2))
+#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
+ (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2))
+#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
+ (IRO[316].base + ((pfId) * IRO[316].m1))
+#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+ (IRO[308].base + ((pfId) * IRO[308].m1))
+#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+ (IRO[307].base + ((pfId) * IRO[307].m1))
+#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+ (IRO[306].base + ((pfId) * IRO[306].m1))
+#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+ (IRO[151].base + ((funcId) * IRO[151].m1))
+#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
+ (IRO[142].base + ((pfId) * IRO[142].m1))
+#define CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(pfId) \
+ (IRO[143].base + ((pfId) * IRO[143].m1))
+#define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \
+ (IRO[141].base + ((pfId) * IRO[141].m1))
+#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[141].size)
+#define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \
+ (IRO[144].base + ((pfId) * IRO[144].m1))
+#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[144].size)
+#define CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(sbId, hcIndex) \
+ (IRO[136].base + ((sbId) * IRO[136].m1) + ((hcIndex) * IRO[136].m2))
+#define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \
+ (IRO[133].base + ((sbId) * IRO[133].m1))
+#define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \
+ (IRO[134].base + ((sbId) * IRO[134].m1))
+#define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \
+ (IRO[135].base + ((sbId) * IRO[135].m1) + ((hcIndex) * IRO[135].m2))
+#define CSTORM_STATUS_BLOCK_OFFSET(sbId) \
+ (IRO[132].base + ((sbId) * IRO[132].m1))
+#define CSTORM_STATUS_BLOCK_SIZE (IRO[132].size)
+#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \
+ (IRO[137].base + ((sbId) * IRO[137].m1))
+#define CSTORM_SYNC_BLOCK_SIZE (IRO[137].size)
+#define CSTORM_VF_TO_PF_OFFSET(funcId) \
+ (IRO[150].base + ((funcId) * IRO[150].m1))
+#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base)
+#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
+ (IRO[203].base + ((pfId) * IRO[203].m1))
+#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base)
+#define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
+ (IRO[101].base + ((assertListEntry) * IRO[101].m1))
+#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \
+ (IRO[201].base + ((pfId) * IRO[201].m1))
+#define TSTORM_FUNC_EN_OFFSET(funcId) \
+ (IRO[103].base + ((funcId) * IRO[103].m1))
+#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
+ (IRO[272].base + ((pfId) * IRO[272].m1))
+#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+ (IRO[271].base + ((pfId) * IRO[271].m1))
+#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+ (IRO[270].base + ((pfId) * IRO[270].m1))
+#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+ (IRO[269].base + ((pfId) * IRO[269].m1))
+#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+ (IRO[268].base + ((pfId) * IRO[268].m1))
+#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
+ (IRO[278].base + ((pfId) * IRO[278].m1))
+#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
+ (IRO[264].base + ((pfId) * IRO[264].m1))
+#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
+ (IRO[265].base + ((pfId) * IRO[265].m1))
+#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
+ (IRO[266].base + ((pfId) * IRO[266].m1))
+#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
+ (IRO[267].base + ((pfId) * IRO[267].m1))
+#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
+ (IRO[202].base + ((pfId) * IRO[202].m1))
+#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+ (IRO[105].base + ((funcId) * IRO[105].m1))
+#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
+ (IRO[217].base + ((pfId) * IRO[217].m1))
+#define TSTORM_VF_TO_PF_OFFSET(funcId) \
+ (IRO[104].base + ((funcId) * IRO[104].m1))
+#define USTORM_AGG_DATA_OFFSET (IRO[206].base)
+#define USTORM_AGG_DATA_SIZE (IRO[206].size)
+#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base)
+#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
+ (IRO[176].base + ((assertListEntry) * IRO[176].m1))
+#define USTORM_CQE_PAGE_NEXT_OFFSET(portId, clientId) \
+ (IRO[205].base + ((portId) * IRO[205].m1) + ((clientId) * IRO[205].m2))
+#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
+ (IRO[183].base + ((portId) * IRO[183].m1))
+#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
+ (IRO[319].base + ((pfId) * IRO[319].m1))
+#define USTORM_FUNC_EN_OFFSET(funcId) \
+ (IRO[178].base + ((funcId) * IRO[178].m1))
+#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
+ (IRO[283].base + ((pfId) * IRO[283].m1))
+#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+ (IRO[284].base + ((pfId) * IRO[284].m1))
+#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
+ (IRO[288].base + ((pfId) * IRO[288].m1))
+#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
+ (IRO[285].base + ((pfId) * IRO[285].m1))
+#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+ (IRO[281].base + ((pfId) * IRO[281].m1))
+#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+ (IRO[280].base + ((pfId) * IRO[280].m1))
+#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+ (IRO[279].base + ((pfId) * IRO[279].m1))
+#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
+ (IRO[282].base + ((pfId) * IRO[282].m1))
+#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
+ (IRO[286].base + ((pfId) * IRO[286].m1))
+#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+ (IRO[287].base + ((pfId) * IRO[287].m1))
+#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
+ (IRO[182].base + ((pfId) * IRO[182].m1))
+#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+ (IRO[180].base + ((funcId) * IRO[180].m1))
+#define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \
+ (IRO[209].base + ((portId) * IRO[209].m1) + ((clientId) * \
+ IRO[209].m2))
+#define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \
+ (IRO[210].base + ((qzoneId) * IRO[210].m1))
+#define USTORM_TPA_BTR_OFFSET (IRO[207].base)
+#define USTORM_TPA_BTR_SIZE (IRO[207].size)
+#define USTORM_VF_TO_PF_OFFSET(funcId) \
+ (IRO[179].base + ((funcId) * IRO[179].m1))
+#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base)
+#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[66].base)
+#define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[51].base)
+#define XSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
+ (IRO[50].base + ((assertListEntry) * IRO[50].m1))
+#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(portId) \
+ (IRO[43].base + ((portId) * IRO[43].m1))
+#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(pfId) \
+ (IRO[45].base + ((pfId) * IRO[45].m1))
+#define XSTORM_FUNC_EN_OFFSET(funcId) \
+ (IRO[47].base + ((funcId) * IRO[47].m1))
+#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
+ (IRO[296].base + ((pfId) * IRO[296].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
+ (IRO[299].base + ((pfId) * IRO[299].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
+ (IRO[300].base + ((pfId) * IRO[300].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
+ (IRO[301].base + ((pfId) * IRO[301].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
+ (IRO[302].base + ((pfId) * IRO[302].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
+ (IRO[303].base + ((pfId) * IRO[303].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
+ (IRO[304].base + ((pfId) * IRO[304].m1))
+#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+ (IRO[305].base + ((pfId) * IRO[305].m1))
+#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+ (IRO[295].base + ((pfId) * IRO[295].m1))
+#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+ (IRO[294].base + ((pfId) * IRO[294].m1))
+#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+ (IRO[293].base + ((pfId) * IRO[293].m1))
+#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
+ (IRO[298].base + ((pfId) * IRO[298].m1))
+#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
+ (IRO[297].base + ((pfId) * IRO[297].m1))
+#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
+ (IRO[292].base + ((pfId) * IRO[292].m1))
+#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
+ (IRO[291].base + ((pfId) * IRO[291].m1))
+#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
+ (IRO[290].base + ((pfId) * IRO[290].m1))
+#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
+ (IRO[289].base + ((pfId) * IRO[289].m1))
+#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
+ (IRO[44].base + ((pfId) * IRO[44].m1))
+#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+ (IRO[49].base + ((funcId) * IRO[49].m1))
+#define XSTORM_SPQ_DATA_OFFSET(funcId) \
+ (IRO[32].base + ((funcId) * IRO[32].m1))
+#define XSTORM_SPQ_DATA_SIZE (IRO[32].size)
+#define XSTORM_SPQ_PAGE_BASE_OFFSET(funcId) \
+ (IRO[30].base + ((funcId) * IRO[30].m1))
+#define XSTORM_SPQ_PROD_OFFSET(funcId) \
+ (IRO[31].base + ((funcId) * IRO[31].m1))
+#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \
+ (IRO[211].base + ((portId) * IRO[211].m1))
+#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \
+ (IRO[212].base + ((portId) * IRO[212].m1))
+#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \
+ (IRO[214].base + (((pfId)>>1) * IRO[214].m1) + (((pfId)&1) * \
+ IRO[214].m2))
+#define XSTORM_VF_TO_PF_OFFSET(funcId) \
+ (IRO[48].base + ((funcId) * IRO[48].m1))
+#define COMMON_ASM_INVALID_ASSERT_OPCODE (IRO[7].base)
+
+
+/* Ethernet Ring parameters */
+#define X_ETH_LOCAL_RING_SIZE 13
+#define FIRST_BD_IN_PKT 0
+#define PARSE_BD_INDEX 1
+#define NUM_OF_ETH_BDS_IN_PAGE ((PAGE_SIZE)/(STRUCT_SIZE(eth_tx_bd)/8))
+
+/* Rx ring params */
+#define U_ETH_LOCAL_BD_RING_SIZE 8
+#define U_ETH_SGL_SIZE 8
+ /* The fw will padd the buffer with this value, so the IP header \
+ will be align to 4 Byte */
+#define IP_HEADER_ALIGNMENT_PADDING 2
+
+#define TU_ETH_CQES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_cqe)/8))
+#define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8))
+
+#define U_ETH_BDS_PER_PAGE_MASK (U_ETH_BDS_PER_PAGE-1)
+#define U_ETH_CQE_PER_PAGE_MASK (TU_ETH_CQES_PER_PAGE-1)
+
+#define U_ETH_UNDEFINED_Q 0xFF
+
+#define T_ETH_INDIRECTION_TABLE_SIZE 128
+#define T_ETH_RSS_KEY 10
+#define ETH_NUM_OF_RSS_ENGINES_E2 72
+
+#define FILTER_RULES_COUNT 16
+#define MULTICAST_RULES_COUNT 16
+#define CLASSIFY_RULES_COUNT 16
+
+/*The CRC32 seed, that is used for the hash(reduction) multicast address */
+#define ETH_CRC32_HASH_SEED 0x00000000
+
+#define ETH_CRC32_HASH_BIT_SIZE (8)
+#define ETH_CRC32_HASH_MASK EVAL((1<<ETH_CRC32_HASH_BIT_SIZE)-1)
+
+/* Maximal L2 clients supported */
+#define ETH_MAX_RX_CLIENTS_E1H 28
+#define ETH_MAX_RX_CLIENTS_E2 152
+
+/* Maximal statistics client Ids */
+#define MAX_STAT_COUNTER_ID_E1H 56
+#define MAX_STAT_COUNTER_ID_E2 140
+
+#define MAX_MAC_CREDIT_E1H 256 /* Per Chip */
+#define MAX_MAC_CREDIT_E2 272 /* Per Path */
+#define MAX_VLAN_CREDIT_E1H 0 /* Per Chip */
+#define MAX_VLAN_CREDIT_E2 272 /* Per Path */
+
+
+/* Maximal aggregation queues supported */
+#define ETH_MAX_AGGREGATION_QUEUES_E1H_E2 64
+
+
+#define ETH_NUM_OF_MCAST_BINS 256
+#define ETH_NUM_OF_MCAST_ENGINES_E2 72
+
+#define ETH_MIN_RX_CQES_WITHOUT_TPA (MAX_RAMRODS_PER_PORT + 3)
+#define ETH_MIN_RX_CQES_WITH_TPA_E1H_E2 \
+ (ETH_MAX_AGGREGATION_QUEUES_E1H_E2 + ETH_MIN_RX_CQES_WITHOUT_TPA)
+
+#define DISABLE_STATISTIC_COUNTER_ID_VALUE 0
+
+
+/* This file defines HSI constants common to all microcode flows */
+
+/* offset in bits of protocol in the state context parameter */
+#define PROTOCOL_STATE_BIT_OFFSET 6
+
+#define ETH_STATE (ETH_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
+#define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
+#define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
+
+/* microcode fixed page page size 4K (chains and ring segments) */
+#define MC_PAGE_SIZE 4096
+
+/* Number of indices per slow-path SB */
+#define HC_SP_SB_MAX_INDICES 16 /* The Maximum of all */
+
+/* Number of indices per SB */
+#define HC_SB_MAX_INDICES_E1X 8 /* Multiple of 4 */
+#define HC_SB_MAX_INDICES_E2 8 /* Multiple of 4 */
+
+/* Number of SB */
+#define HC_SB_MAX_SB_E1X 32
+#define HC_SB_MAX_SB_E2 136 /* include PF */
+
+/* ID of slow path status block */
+#define HC_SP_SB_ID 0xde
+
+/* Num of State machines */
+#define HC_SB_MAX_SM 2 /* Fixed */
+
+/* Num of dynamic indices */
+#define HC_SB_MAX_DYNAMIC_INDICES 4 /* 0..3 fixed */
+
+/* max number of slow path commands per port */
+#define MAX_RAMRODS_PER_PORT 8
+
+
+/**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
+
+/* chip timers frequency constants */
+#define TIMERS_TICK_SIZE_CHIP (1e-3)
+
+/* used in toe: TsRecentAge, MaxRt, and temporarily RTT */
+#define TSEMI_CLK1_RESUL_CHIP (1e-3)
+
+/* temporarily used for RTT */
+#define XSEMI_CLK1_RESUL_CHIP (1e-3)
+
+/* used for Host Coallescing */
+#define SDM_TIMER_TICK_RESUL_CHIP (4 * (1e-6))
+
+/**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
+
+#define XSTORM_IP_ID_ROLL_HALF 0x8000
+#define XSTORM_IP_ID_ROLL_ALL 0
+
+/* assert list: number of entries */
+#define FW_LOG_LIST_SIZE 50
+
+#define NUM_OF_SAFC_BITS 16
+#define MAX_COS_NUMBER 4
+#define MAX_TRAFFIC_TYPES 8
+#define MAX_PFC_PRIORITIES 8
+
+ /* used by array traffic_type_to_priority[] to mark traffic type \
+ that is not mapped to priority*/
+#define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF
+
+/* Event Ring definitions */
+#define C_ERES_PER_PAGE \
+ (PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem)))
+#define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1)
+
+/* number of statistic command */
+#define STATS_QUERY_CMD_COUNT 16
+
+/* niv list table size */
+#define AFEX_LIST_TABLE_SIZE 4096
+
+/* invalid VNIC Id. used in VNIC classification */
+#define INVALID_VNIC_ID 0xFF
+
+/* used for indicating an undefined RAM offset in the IRO arrays */
+#define UNDEF_IRO 0x80000000
+
+/* used for defining the amount of FCoE tasks supported for PF */
+#define MAX_FCOE_FUNCS_PER_ENGINE 2
+#define MAX_NUM_FCOE_TASKS_PER_ENGINE \
+ 4096 /*Each port can have at max 1 function*/
+
+
+#endif /* ECORE_FW_DEFS_H */
diff --git a/src/dpdk22/drivers/net/bnx2x/ecore_hsi.h b/src/dpdk22/drivers/net/bnx2x/ecore_hsi.h
new file mode 100644
index 00000000..5808e1ae
--- /dev/null
+++ b/src/dpdk22/drivers/net/bnx2x/ecore_hsi.h
@@ -0,0 +1,6330 @@
+/*-
+ * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+ *
+ * Eric Davis <edavis@broadcom.com>
+ * David Christensen <davidch@broadcom.com>
+ * Gary Zambrano <zambrano@broadcom.com>
+ *
+ * Copyright (c) 2014-2015 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ */
+
+#ifndef ECORE_HSI_H
+#define ECORE_HSI_H
+
+#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e
+
+struct license_key {
+ uint32_t reserved[6];
+
+ uint32_t max_iscsi_conn;
+#define LICENSE_MAX_ISCSI_TRGT_CONN_MASK 0xFFFF
+#define LICENSE_MAX_ISCSI_TRGT_CONN_SHIFT 0
+#define LICENSE_MAX_ISCSI_INIT_CONN_MASK 0xFFFF0000
+#define LICENSE_MAX_ISCSI_INIT_CONN_SHIFT 16
+
+ uint32_t reserved_a;
+
+ uint32_t max_fcoe_conn;
+#define LICENSE_MAX_FCOE_TRGT_CONN_MASK 0xFFFF
+#define LICENSE_MAX_FCOE_TRGT_CONN_SHIFT 0
+#define LICENSE_MAX_FCOE_INIT_CONN_MASK 0xFFFF0000
+#define LICENSE_MAX_FCOE_INIT_CONN_SHIFT 16
+
+ uint32_t reserved_b[4];
+};
+
+typedef struct license_key license_key_t;
+
+
+/****************************************************************************
+ * Shared HW configuration *
+ ****************************************************************************/
+#define PIN_CFG_NA 0x00000000
+#define PIN_CFG_GPIO0_P0 0x00000001
+#define PIN_CFG_GPIO1_P0 0x00000002
+#define PIN_CFG_GPIO2_P0 0x00000003
+#define PIN_CFG_GPIO3_P0 0x00000004
+#define PIN_CFG_GPIO0_P1 0x00000005
+#define PIN_CFG_GPIO1_P1 0x00000006
+#define PIN_CFG_GPIO2_P1 0x00000007
+#define PIN_CFG_GPIO3_P1 0x00000008
+#define PIN_CFG_EPIO0 0x00000009
+#define PIN_CFG_EPIO1 0x0000000a
+#define PIN_CFG_EPIO2 0x0000000b
+#define PIN_CFG_EPIO3 0x0000000c
+#define PIN_CFG_EPIO4 0x0000000d
+#define PIN_CFG_EPIO5 0x0000000e
+#define PIN_CFG_EPIO6 0x0000000f
+#define PIN_CFG_EPIO7 0x00000010
+#define PIN_CFG_EPIO8 0x00000011
+#define PIN_CFG_EPIO9 0x00000012
+#define PIN_CFG_EPIO10 0x00000013
+#define PIN_CFG_EPIO11 0x00000014
+#define PIN_CFG_EPIO12 0x00000015
+#define PIN_CFG_EPIO13 0x00000016
+#define PIN_CFG_EPIO14 0x00000017
+#define PIN_CFG_EPIO15 0x00000018
+#define PIN_CFG_EPIO16 0x00000019
+#define PIN_CFG_EPIO17 0x0000001a
+#define PIN_CFG_EPIO18 0x0000001b
+#define PIN_CFG_EPIO19 0x0000001c
+#define PIN_CFG_EPIO20 0x0000001d
+#define PIN_CFG_EPIO21 0x0000001e
+#define PIN_CFG_EPIO22 0x0000001f
+#define PIN_CFG_EPIO23 0x00000020
+#define PIN_CFG_EPIO24 0x00000021
+#define PIN_CFG_EPIO25 0x00000022
+#define PIN_CFG_EPIO26 0x00000023
+#define PIN_CFG_EPIO27 0x00000024
+#define PIN_CFG_EPIO28 0x00000025
+#define PIN_CFG_EPIO29 0x00000026
+#define PIN_CFG_EPIO30 0x00000027
+#define PIN_CFG_EPIO31 0x00000028
+
+/* EPIO definition */
+#define EPIO_CFG_NA 0x00000000
+#define EPIO_CFG_EPIO0 0x00000001
+#define EPIO_CFG_EPIO1 0x00000002
+#define EPIO_CFG_EPIO2 0x00000003
+#define EPIO_CFG_EPIO3 0x00000004
+#define EPIO_CFG_EPIO4 0x00000005
+#define EPIO_CFG_EPIO5 0x00000006
+#define EPIO_CFG_EPIO6 0x00000007
+#define EPIO_CFG_EPIO7 0x00000008
+#define EPIO_CFG_EPIO8 0x00000009
+#define EPIO_CFG_EPIO9 0x0000000a
+#define EPIO_CFG_EPIO10 0x0000000b
+#define EPIO_CFG_EPIO11 0x0000000c
+#define EPIO_CFG_EPIO12 0x0000000d
+#define EPIO_CFG_EPIO13 0x0000000e
+#define EPIO_CFG_EPIO14 0x0000000f
+#define EPIO_CFG_EPIO15 0x00000010
+#define EPIO_CFG_EPIO16 0x00000011
+#define EPIO_CFG_EPIO17 0x00000012
+#define EPIO_CFG_EPIO18 0x00000013
+#define EPIO_CFG_EPIO19 0x00000014
+#define EPIO_CFG_EPIO20 0x00000015
+#define EPIO_CFG_EPIO21 0x00000016
+#define EPIO_CFG_EPIO22 0x00000017
+#define EPIO_CFG_EPIO23 0x00000018
+#define EPIO_CFG_EPIO24 0x00000019
+#define EPIO_CFG_EPIO25 0x0000001a
+#define EPIO_CFG_EPIO26 0x0000001b
+#define EPIO_CFG_EPIO27 0x0000001c
+#define EPIO_CFG_EPIO28 0x0000001d
+#define EPIO_CFG_EPIO29 0x0000001e
+#define EPIO_CFG_EPIO30 0x0000001f
+#define EPIO_CFG_EPIO31 0x00000020
+
+struct mac_addr {
+ uint32_t upper;
+ uint32_t lower;
+};
+
+
+struct shared_hw_cfg { /* NVRAM Offset */
+ /* Up to 16 bytes of NULL-terminated string */
+ uint8_t part_num[16]; /* 0x104 */
+
+ uint32_t config; /* 0x114 */
+ #define SHARED_HW_CFG_MDIO_VOLTAGE_MASK 0x00000001
+ #define SHARED_HW_CFG_MDIO_VOLTAGE_SHIFT 0
+ #define SHARED_HW_CFG_MDIO_VOLTAGE_1_2V 0x00000000
+ #define SHARED_HW_CFG_MDIO_VOLTAGE_2_5V 0x00000001
+
+ #define SHARED_HW_CFG_PORT_SWAP 0x00000004
+
+ #define SHARED_HW_CFG_BEACON_WOL_EN 0x00000008
+
+ #define SHARED_HW_CFG_PCIE_GEN3_DISABLED 0x00000000
+ #define SHARED_HW_CFG_PCIE_GEN3_ENABLED 0x00000010
+
+ #define SHARED_HW_CFG_MFW_SELECT_MASK 0x00000700
+ #define SHARED_HW_CFG_MFW_SELECT_SHIFT 8
+ /* Whatever MFW found in NVM
+ (if multiple found, priority order is: NC-SI, UMP, IPMI) */
+ #define SHARED_HW_CFG_MFW_SELECT_DEFAULT 0x00000000
+ #define SHARED_HW_CFG_MFW_SELECT_NC_SI 0x00000100
+ #define SHARED_HW_CFG_MFW_SELECT_UMP 0x00000200
+ #define SHARED_HW_CFG_MFW_SELECT_IPMI 0x00000300
+ /* Use SPIO4 as an arbiter between: 0-NC_SI, 1-IPMI
+ (can only be used when an add-in board, not BMC, pulls-down SPIO4) */
+ #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_IPMI 0x00000400
+ /* Use SPIO4 as an arbiter between: 0-UMP, 1-IPMI
+ (can only be used when an add-in board, not BMC, pulls-down SPIO4) */
+ #define SHARED_HW_CFG_MFW_SELECT_SPIO4_UMP_IPMI 0x00000500
+ /* Use SPIO4 as an arbiter between: 0-NC-SI, 1-UMP
+ (can only be used when an add-in board, not BMC, pulls-down SPIO4) */
+ #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_UMP 0x00000600
+
+ /* Adjust the PCIe G2 Tx amplitude driver for all Tx lanes. For
+ backwards compatibility, value of 0 is disabling this feature.
+ That means that though 0 is a valid value, it cannot be
+ configured. */
+ #define SHARED_HW_CFG_G2_TX_DRIVE_MASK 0x0000F000
+ #define SHARED_HW_CFG_G2_TX_DRIVE_SHIFT 12
+
+ #define SHARED_HW_CFG_LED_MODE_MASK 0x000F0000
+ #define SHARED_HW_CFG_LED_MODE_SHIFT 16
+ #define SHARED_HW_CFG_LED_MAC1 0x00000000
+ #define SHARED_HW_CFG_LED_PHY1 0x00010000
+ #define SHARED_HW_CFG_LED_PHY2 0x00020000
+ #define SHARED_HW_CFG_LED_PHY3 0x00030000
+ #define SHARED_HW_CFG_LED_MAC2 0x00040000
+ #define SHARED_HW_CFG_LED_PHY4 0x00050000
+ #define SHARED_HW_CFG_LED_PHY5 0x00060000
+ #define SHARED_HW_CFG_LED_PHY6 0x00070000
+ #define SHARED_HW_CFG_LED_MAC3 0x00080000
+ #define SHARED_HW_CFG_LED_PHY7 0x00090000
+ #define SHARED_HW_CFG_LED_PHY9 0x000a0000
+ #define SHARED_HW_CFG_LED_PHY11 0x000b0000
+ #define SHARED_HW_CFG_LED_MAC4 0x000c0000
+ #define SHARED_HW_CFG_LED_PHY8 0x000d0000
+ #define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000
+ #define SHARED_HW_CFG_LED_EXTPHY2 0x000f0000
+
+ #define SHARED_HW_CFG_SRIOV_MASK 0x40000000
+ #define SHARED_HW_CFG_SRIOV_DISABLED 0x00000000
+ #define SHARED_HW_CFG_SRIOV_ENABLED 0x40000000
+
+ #define SHARED_HW_CFG_ATC_MASK 0x80000000
+ #define SHARED_HW_CFG_ATC_DISABLED 0x00000000
+ #define SHARED_HW_CFG_ATC_ENABLED 0x80000000
+
+ uint32_t config2; /* 0x118 */
+
+ #define SHARED_HW_CFG_PCIE_GEN2_MASK 0x00000100
+ #define SHARED_HW_CFG_PCIE_GEN2_SHIFT 8
+ #define SHARED_HW_CFG_PCIE_GEN2_DISABLED 0x00000000
+ #define SHARED_HW_CFG_PCIE_GEN2_ENABLED 0x00000100
+
+ #define SHARED_HW_CFG_SMBUS_TIMING_MASK 0x00001000
+ #define SHARED_HW_CFG_SMBUS_TIMING_100KHZ 0x00000000
+ #define SHARED_HW_CFG_SMBUS_TIMING_400KHZ 0x00001000
+
+ #define SHARED_HW_CFG_HIDE_PORT1 0x00002000
+
+
+ /* Output low when PERST is asserted */
+ #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_MASK 0x00008000
+ #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_DISABLED 0x00000000
+ #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_ENABLED 0x00008000
+
+ #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_MASK 0x00070000
+ #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_SHIFT 16
+ #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_HW 0x00000000
+ #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_0DB 0x00010000
+ #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_3_5DB 0x00020000
+ #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_6_0DB 0x00030000
+
+ /* The fan failure mechanism is usually related to the PHY type
+ since the power consumption of the board is determined by the PHY.
+ Currently, fan is required for most designs with SFX7101, BNX2X8727
+ and BNX2X8481. If a fan is not required for a board which uses one
+ of those PHYs, this field should be set to "Disabled". If a fan is
+ required for a different PHY type, this option should be set to
+ "Enabled". The fan failure indication is expected on SPIO5 */
+ #define SHARED_HW_CFG_FAN_FAILURE_MASK 0x00180000
+ #define SHARED_HW_CFG_FAN_FAILURE_SHIFT 19
+ #define SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE 0x00000000
+ #define SHARED_HW_CFG_FAN_FAILURE_DISABLED 0x00080000
+ #define SHARED_HW_CFG_FAN_FAILURE_ENABLED 0x00100000
+
+ /* ASPM Power Management support */
+ #define SHARED_HW_CFG_ASPM_SUPPORT_MASK 0x00600000
+ #define SHARED_HW_CFG_ASPM_SUPPORT_SHIFT 21
+ #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_ENABLED 0x00000000
+ #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_DISABLED 0x00200000
+ #define SHARED_HW_CFG_ASPM_SUPPORT_L1_DISABLED 0x00400000
+ #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_DISABLED 0x00600000
+
+ /* The value of PM_TL_IGNORE_REQS (bit0) in PCI register
+ tl_control_0 (register 0x2800) */
+ #define SHARED_HW_CFG_PREVENT_L1_ENTRY_MASK 0x00800000
+ #define SHARED_HW_CFG_PREVENT_L1_ENTRY_DISABLED 0x00000000
+ #define SHARED_HW_CFG_PREVENT_L1_ENTRY_ENABLED 0x00800000
+
+
+ /* Set the MDC/MDIO access for the first external phy */
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK 0x1C000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT 26
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE 0x00000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0 0x04000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1 0x08000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH 0x0c000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED 0x10000000
+
+ /* Set the MDC/MDIO access for the second external phy */
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK 0xE0000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT 29
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_PHY_TYPE 0x00000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC0 0x20000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC1 0x40000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_BOTH 0x60000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SWAPPED 0x80000000
+
+ /* Max number of PF MSIX vectors */
+ uint32_t config_3; /* 0x11C */
+ #define SHARED_HW_CFG_PF_MSIX_MAX_NUM_MASK 0x0000007F
+ #define SHARED_HW_CFG_PF_MSIX_MAX_NUM_SHIFT 0
+
+ uint32_t ump_nc_si_config; /* 0x120 */
+ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MASK 0x00000003
+ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_SHIFT 0
+ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MAC 0x00000000
+ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_PHY 0x00000001
+ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MII 0x00000000
+ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_RMII 0x00000002
+
+ /* Reserved bits: 226-230 */
+
+ /* The output pin template BSC_SEL which selects the I2C for this
+ port in the I2C Mux */
+ uint32_t board; /* 0x124 */
+ #define SHARED_HW_CFG_E3_I2C_MUX0_MASK 0x0000003F
+ #define SHARED_HW_CFG_E3_I2C_MUX0_SHIFT 0
+
+ #define SHARED_HW_CFG_E3_I2C_MUX1_MASK 0x00000FC0
+ #define SHARED_HW_CFG_E3_I2C_MUX1_SHIFT 6
+ /* Use the PIN_CFG_XXX defines on top */
+ #define SHARED_HW_CFG_BOARD_REV_MASK 0x00FF0000
+ #define SHARED_HW_CFG_BOARD_REV_SHIFT 16
+
+ #define SHARED_HW_CFG_BOARD_MAJOR_VER_MASK 0x0F000000
+ #define SHARED_HW_CFG_BOARD_MAJOR_VER_SHIFT 24
+
+ #define SHARED_HW_CFG_BOARD_MINOR_VER_MASK 0xF0000000
+ #define SHARED_HW_CFG_BOARD_MINOR_VER_SHIFT 28
+
+ uint32_t wc_lane_config; /* 0x128 */
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_MASK 0x0000FFFF
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_SHIFT 0
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_32103210 0x00001b1b
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_32100123 0x00001be4
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_31200213 0x000027d8
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_02133120 0x0000d827
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_01233210 0x0000e41b
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_01230123 0x0000e4e4
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000FF
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000FF00
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8
+
+ /* TX lane Polarity swap */
+ #define SHARED_HW_CFG_TX_LANE0_POL_FLIP_ENABLED 0x00010000
+ #define SHARED_HW_CFG_TX_LANE1_POL_FLIP_ENABLED 0x00020000
+ #define SHARED_HW_CFG_TX_LANE2_POL_FLIP_ENABLED 0x00040000
+ #define SHARED_HW_CFG_TX_LANE3_POL_FLIP_ENABLED 0x00080000
+ /* TX lane Polarity swap */
+ #define SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED 0x00100000
+ #define SHARED_HW_CFG_RX_LANE1_POL_FLIP_ENABLED 0x00200000
+ #define SHARED_HW_CFG_RX_LANE2_POL_FLIP_ENABLED 0x00400000
+ #define SHARED_HW_CFG_RX_LANE3_POL_FLIP_ENABLED 0x00800000
+
+ /* Selects the port layout of the board */
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_MASK 0x0F000000
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_SHIFT 24
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_01 0x00000000
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_10 0x01000000
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_0123 0x02000000
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_1032 0x03000000
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_2301 0x04000000
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_3210 0x05000000
+};
+
+
+/****************************************************************************
+ * Port HW configuration *
+ ****************************************************************************/
+struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
+
+ uint32_t pci_id;
+ #define PORT_HW_CFG_PCI_DEVICE_ID_MASK 0x0000FFFF
+ #define PORT_HW_CFG_PCI_DEVICE_ID_SHIFT 0
+
+ #define PORT_HW_CFG_PCI_VENDOR_ID_MASK 0xFFFF0000
+ #define PORT_HW_CFG_PCI_VENDOR_ID_SHIFT 16
+
+ uint32_t pci_sub_id;
+ #define PORT_HW_CFG_PCI_SUBSYS_VENDOR_ID_MASK 0x0000FFFF
+ #define PORT_HW_CFG_PCI_SUBSYS_VENDOR_ID_SHIFT 0
+
+ #define PORT_HW_CFG_PCI_SUBSYS_DEVICE_ID_MASK 0xFFFF0000
+ #define PORT_HW_CFG_PCI_SUBSYS_DEVICE_ID_SHIFT 16
+
+ uint32_t power_dissipated;
+ #define PORT_HW_CFG_POWER_DIS_D0_MASK 0x000000FF
+ #define PORT_HW_CFG_POWER_DIS_D0_SHIFT 0
+ #define PORT_HW_CFG_POWER_DIS_D1_MASK 0x0000FF00
+ #define PORT_HW_CFG_POWER_DIS_D1_SHIFT 8
+ #define PORT_HW_CFG_POWER_DIS_D2_MASK 0x00FF0000
+ #define PORT_HW_CFG_POWER_DIS_D2_SHIFT 16
+ #define PORT_HW_CFG_POWER_DIS_D3_MASK 0xFF000000
+ #define PORT_HW_CFG_POWER_DIS_D3_SHIFT 24
+
+ uint32_t power_consumed;
+ #define PORT_HW_CFG_POWER_CONS_D0_MASK 0x000000FF
+ #define PORT_HW_CFG_POWER_CONS_D0_SHIFT 0
+ #define PORT_HW_CFG_POWER_CONS_D1_MASK 0x0000FF00
+ #define PORT_HW_CFG_POWER_CONS_D1_SHIFT 8
+ #define PORT_HW_CFG_POWER_CONS_D2_MASK 0x00FF0000
+ #define PORT_HW_CFG_POWER_CONS_D2_SHIFT 16
+ #define PORT_HW_CFG_POWER_CONS_D3_MASK 0xFF000000
+ #define PORT_HW_CFG_POWER_CONS_D3_SHIFT 24
+
+ uint32_t mac_upper;
+ uint32_t mac_lower; /* 0x140 */
+ #define PORT_HW_CFG_UPPERMAC_MASK 0x0000FFFF
+ #define PORT_HW_CFG_UPPERMAC_SHIFT 0
+
+
+ uint32_t iscsi_mac_upper; /* Upper 16 bits are always zeroes */
+ uint32_t iscsi_mac_lower;
+
+ uint32_t rdma_mac_upper; /* Upper 16 bits are always zeroes */
+ uint32_t rdma_mac_lower;
+
+ uint32_t serdes_config;
+ #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_MASK 0x0000FFFF
+ #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_SHIFT 0
+
+ #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK 0xFFFF0000
+ #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16
+
+
+ /* Default values: 2P-64, 4P-32 */
+ uint32_t reserved;
+
+ uint32_t vf_config; /* 0x15C */
+ #define PORT_HW_CFG_VF_PCI_DEVICE_ID_MASK 0xFFFF0000
+ #define PORT_HW_CFG_VF_PCI_DEVICE_ID_SHIFT 16
+
+ uint32_t mf_pci_id; /* 0x160 */
+ #define PORT_HW_CFG_MF_PCI_DEVICE_ID_MASK 0x0000FFFF
+ #define PORT_HW_CFG_MF_PCI_DEVICE_ID_SHIFT 0
+
+ /* Controls the TX laser of the SFP+ module */
+ uint32_t sfp_ctrl; /* 0x164 */
+ #define PORT_HW_CFG_TX_LASER_MASK 0x000000FF
+ #define PORT_HW_CFG_TX_LASER_SHIFT 0
+ #define PORT_HW_CFG_TX_LASER_MDIO 0x00000000
+ #define PORT_HW_CFG_TX_LASER_GPIO0 0x00000001
+ #define PORT_HW_CFG_TX_LASER_GPIO1 0x00000002
+ #define PORT_HW_CFG_TX_LASER_GPIO2 0x00000003
+ #define PORT_HW_CFG_TX_LASER_GPIO3 0x00000004
+
+ /* Controls the fault module LED of the SFP+ */
+ #define PORT_HW_CFG_FAULT_MODULE_LED_MASK 0x0000FF00
+ #define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT 8
+ #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0 0x00000000
+ #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1 0x00000100
+ #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2 0x00000200
+ #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3 0x00000300
+ #define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED 0x00000400
+
+ /* The output pin TX_DIS that controls the TX laser of the SFP+
+ module. Use the PIN_CFG_XXX defines on top */
+ uint32_t e3_sfp_ctrl; /* 0x168 */
+ #define PORT_HW_CFG_E3_TX_LASER_MASK 0x000000FF
+ #define PORT_HW_CFG_E3_TX_LASER_SHIFT 0
+
+ /* The output pin for SFPP_TYPE which turns on the Fault module LED */
+ #define PORT_HW_CFG_E3_FAULT_MDL_LED_MASK 0x0000FF00
+ #define PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT 8
+
+ /* The input pin MOD_ABS that indicates whether SFP+ module is
+ present or not. Use the PIN_CFG_XXX defines on top */
+ #define PORT_HW_CFG_E3_MOD_ABS_MASK 0x00FF0000
+ #define PORT_HW_CFG_E3_MOD_ABS_SHIFT 16
+
+ /* The output pin PWRDIS_SFP_X which disable the power of the SFP+
+ module. Use the PIN_CFG_XXX defines on top */
+ #define PORT_HW_CFG_E3_PWR_DIS_MASK 0xFF000000
+ #define PORT_HW_CFG_E3_PWR_DIS_SHIFT 24
+
+ /*
+ * The input pin which signals module transmit fault. Use the
+ * PIN_CFG_XXX defines on top
+ */
+ uint32_t e3_cmn_pin_cfg; /* 0x16C */
+ #define PORT_HW_CFG_E3_TX_FAULT_MASK 0x000000FF
+ #define PORT_HW_CFG_E3_TX_FAULT_SHIFT 0
+
+ /* The output pin which reset the PHY. Use the PIN_CFG_XXX defines on
+ top */
+ #define PORT_HW_CFG_E3_PHY_RESET_MASK 0x0000FF00
+ #define PORT_HW_CFG_E3_PHY_RESET_SHIFT 8
+
+ /*
+ * The output pin which powers down the PHY. Use the PIN_CFG_XXX
+ * defines on top
+ */
+ #define PORT_HW_CFG_E3_PWR_DOWN_MASK 0x00FF0000
+ #define PORT_HW_CFG_E3_PWR_DOWN_SHIFT 16
+
+ /* The output pin values BSC_SEL which selects the I2C for this port
+ in the I2C Mux */
+ #define PORT_HW_CFG_E3_I2C_MUX0_MASK 0x01000000
+ #define PORT_HW_CFG_E3_I2C_MUX1_MASK 0x02000000
+
+
+ /*
+ * The input pin I_FAULT which indicate over-current has occurred.
+ * Use the PIN_CFG_XXX defines on top
+ */
+ uint32_t e3_cmn_pin_cfg1; /* 0x170 */
+ #define PORT_HW_CFG_E3_OVER_CURRENT_MASK 0x000000FF
+ #define PORT_HW_CFG_E3_OVER_CURRENT_SHIFT 0
+
+ /* pause on host ring */
+ uint32_t generic_features; /* 0x174 */
+ #define PORT_HW_CFG_PAUSE_ON_HOST_RING_MASK 0x00000001
+ #define PORT_HW_CFG_PAUSE_ON_HOST_RING_SHIFT 0
+ #define PORT_HW_CFG_PAUSE_ON_HOST_RING_DISABLED 0x00000000
+ #define PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED 0x00000001
+
+ /* SFP+ Tx Equalization: NIC recommended and tested value is 0xBEB2
+ * LOM recommended and tested value is 0xBEB2. Using a different
+ * value means using a value not tested by BRCM
+ */
+ uint32_t sfi_tap_values; /* 0x178 */
+ #define PORT_HW_CFG_TX_EQUALIZATION_MASK 0x0000FFFF
+ #define PORT_HW_CFG_TX_EQUALIZATION_SHIFT 0
+
+ /* SFP+ Tx driver broadcast IDRIVER: NIC recommended and tested
+ * value is 0x2. LOM recommended and tested value is 0x2. Using a
+ * different value means using a value not tested by BRCM
+ */
+ #define PORT_HW_CFG_TX_DRV_BROADCAST_MASK 0x000F0000
+ #define PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT 16
+
+ uint32_t reserved0[5]; /* 0x17c */
+
+ uint32_t aeu_int_mask; /* 0x190 */
+
+ uint32_t media_type; /* 0x194 */
+ #define PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK 0x000000FF
+ #define PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT 0
+
+ #define PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK 0x0000FF00
+ #define PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT 8
+
+ #define PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK 0x00FF0000
+ #define PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT 16
+
+ /* 4 times 16 bits for all 4 lanes. In case external PHY is present
+ (not direct mode), those values will not take effect on the 4 XGXS
+ lanes. For some external PHYs (such as 8706 and 8726) the values
+ will be used to configure the external PHY in those cases, not
+ all 4 values are needed. */
+ uint16_t xgxs_config_rx[4]; /* 0x198 */
+ uint16_t xgxs_config_tx[4]; /* 0x1A0 */
+
+
+ /* For storing FCOE mac on shared memory */
+ uint32_t fcoe_fip_mac_upper;
+ #define PORT_HW_CFG_FCOE_UPPERMAC_MASK 0x0000ffff
+ #define PORT_HW_CFG_FCOE_UPPERMAC_SHIFT 0
+ uint32_t fcoe_fip_mac_lower;
+
+ uint32_t fcoe_wwn_port_name_upper;
+ uint32_t fcoe_wwn_port_name_lower;
+
+ uint32_t fcoe_wwn_node_name_upper;
+ uint32_t fcoe_wwn_node_name_lower;
+
+ /* wwpn for npiv enabled */
+ uint32_t wwpn_for_npiv_config; /* 0x1C0 */
+ #define PORT_HW_CFG_WWPN_FOR_NPIV_ENABLED_MASK 0x00000001
+ #define PORT_HW_CFG_WWPN_FOR_NPIV_ENABLED_SHIFT 0
+ #define PORT_HW_CFG_WWPN_FOR_NPIV_ENABLED_DISABLED 0x00000000
+ #define PORT_HW_CFG_WWPN_FOR_NPIV_ENABLED_ENABLED 0x00000001
+
+ /* wwpn for npiv valid addresses */
+ uint32_t wwpn_for_npiv_valid_addresses; /* 0x1C4 */
+ #define PORT_HW_CFG_WWPN_FOR_NPIV_ADDRESS_BITMAP_MASK 0x0000FFFF
+ #define PORT_HW_CFG_WWPN_FOR_NPIV_ADDRESS_BITMAP_SHIFT 0
+
+ struct mac_addr wwpn_for_niv_macs[16];
+
+ /* Reserved bits: 2272-2336 For storing FCOE mac on shared memory */
+ uint32_t Reserved1[14];
+
+ uint32_t pf_allocation; /* 0x280 */
+ /* number of vfs per PF, if 0 - sriov disabled */
+ #define PORT_HW_CFG_NUMBER_OF_VFS_MASK 0x000000FF
+ #define PORT_HW_CFG_NUMBER_OF_VFS_SHIFT 0
+
+ /* Enable RJ45 magjack pair swapping on 10GBase-T PHY (0=default),
+ 84833 only */
+ uint32_t xgbt_phy_cfg; /* 0x284 */
+ #define PORT_HW_CFG_RJ45_PAIR_SWAP_MASK 0x000000FF
+ #define PORT_HW_CFG_RJ45_PAIR_SWAP_SHIFT 0
+
+ uint32_t default_cfg; /* 0x288 */
+ #define PORT_HW_CFG_GPIO0_CONFIG_MASK 0x00000003
+ #define PORT_HW_CFG_GPIO0_CONFIG_SHIFT 0
+ #define PORT_HW_CFG_GPIO0_CONFIG_NA 0x00000000
+ #define PORT_HW_CFG_GPIO0_CONFIG_LOW 0x00000001
+ #define PORT_HW_CFG_GPIO0_CONFIG_HIGH 0x00000002
+ #define PORT_HW_CFG_GPIO0_CONFIG_INPUT 0x00000003
+
+ #define PORT_HW_CFG_GPIO1_CONFIG_MASK 0x0000000C
+ #define PORT_HW_CFG_GPIO1_CONFIG_SHIFT 2
+ #define PORT_HW_CFG_GPIO1_CONFIG_NA 0x00000000
+ #define PORT_HW_CFG_GPIO1_CONFIG_LOW 0x00000004
+ #define PORT_HW_CFG_GPIO1_CONFIG_HIGH 0x00000008
+ #define PORT_HW_CFG_GPIO1_CONFIG_INPUT 0x0000000c
+
+ #define PORT_HW_CFG_GPIO2_CONFIG_MASK 0x00000030
+ #define PORT_HW_CFG_GPIO2_CONFIG_SHIFT 4
+ #define PORT_HW_CFG_GPIO2_CONFIG_NA 0x00000000
+ #define PORT_HW_CFG_GPIO2_CONFIG_LOW 0x00000010
+ #define PORT_HW_CFG_GPIO2_CONFIG_HIGH 0x00000020
+ #define PORT_HW_CFG_GPIO2_CONFIG_INPUT 0x00000030
+
+ #define PORT_HW_CFG_GPIO3_CONFIG_MASK 0x000000C0
+ #define PORT_HW_CFG_GPIO3_CONFIG_SHIFT 6
+ #define PORT_HW_CFG_GPIO3_CONFIG_NA 0x00000000
+ #define PORT_HW_CFG_GPIO3_CONFIG_LOW 0x00000040
+ #define PORT_HW_CFG_GPIO3_CONFIG_HIGH 0x00000080
+ #define PORT_HW_CFG_GPIO3_CONFIG_INPUT 0x000000c0
+
+ /* When KR link is required to be set to force which is not
+ KR-compliant, this parameter determine what is the trigger for it.
+ When GPIO is selected, low input will force the speed. Currently
+ default speed is 1G. In the future, it may be widen to select the
+ forced speed in with another parameter. Note when force-1G is
+ enabled, it override option 56: Link Speed option. */
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_MASK 0x00000F00
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT 8
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED 0x00000000
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0 0x00000100
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0 0x00000200
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0 0x00000300
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0 0x00000400
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1 0x00000500
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1 0x00000600
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1 0x00000700
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1 0x00000800
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED 0x00000900
+ /* Enable to determine with which GPIO to reset the external phy */
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK 0x000F0000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT 16
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE 0x00000000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0 0x00010000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0 0x00020000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0 0x00030000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0 0x00040000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1 0x00050000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1 0x00060000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1 0x00070000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1 0x00080000
+
+ /* Enable BAM on KR */
+ #define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000
+ #define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20
+ #define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000
+ #define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000
+
+ /* Enable Common Mode Sense */
+ #define PORT_HW_CFG_ENABLE_CMS_MASK 0x00200000
+ #define PORT_HW_CFG_ENABLE_CMS_SHIFT 21
+ #define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000
+ #define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000
+
+ /* Determine the Serdes electrical interface */
+ #define PORT_HW_CFG_NET_SERDES_IF_MASK 0x0F000000
+ #define PORT_HW_CFG_NET_SERDES_IF_SHIFT 24
+ #define PORT_HW_CFG_NET_SERDES_IF_SGMII 0x00000000
+ #define PORT_HW_CFG_NET_SERDES_IF_XFI 0x01000000
+ #define PORT_HW_CFG_NET_SERDES_IF_SFI 0x02000000
+ #define PORT_HW_CFG_NET_SERDES_IF_KR 0x03000000
+ #define PORT_HW_CFG_NET_SERDES_IF_DXGXS 0x04000000
+ #define PORT_HW_CFG_NET_SERDES_IF_KR2 0x05000000
+
+ /* SFP+ main TAP and post TAP volumes */
+ #define PORT_HW_CFG_TAP_LEVELS_MASK 0x70000000
+ #define PORT_HW_CFG_TAP_LEVELS_SHIFT 28
+ #define PORT_HW_CFG_TAP_LEVELS_POST_15_MAIN_43 0x00000000
+ #define PORT_HW_CFG_TAP_LEVELS_POST_14_MAIN_44 0x10000000
+ #define PORT_HW_CFG_TAP_LEVELS_POST_13_MAIN_45 0x20000000
+ #define PORT_HW_CFG_TAP_LEVELS_POST_12_MAIN_46 0x30000000
+ #define PORT_HW_CFG_TAP_LEVELS_POST_11_MAIN_47 0x40000000
+ #define PORT_HW_CFG_TAP_LEVELS_POST_10_MAIN_48 0x50000000
+
+ uint32_t speed_capability_mask2; /* 0x28C */
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10M_FULL 0x00000001
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10M_HALF 0x00000002
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_100M_HALF 0x00000004
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_100M_FULL 0x00000008
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_1G 0x00000010
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_2_5G 0x00000020
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10G 0x00000040
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_20G 0x00000080
+
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_MASK 0xFFFF0000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_SHIFT 16
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10M_FULL 0x00010000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10M_HALF 0x00020000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_100M_HALF 0x00040000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_100M_FULL 0x00080000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_1G 0x00100000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_2_5G 0x00200000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10G 0x00400000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_20G 0x00800000
+
+
+ /* In the case where two media types (e.g. copper and fiber) are
+ present and electrically active at the same time, PHY Selection
+ will determine which of the two PHYs will be designated as the
+ Active PHY and used for a connection to the network. */
+ uint32_t multi_phy_config; /* 0x290 */
+ #define PORT_HW_CFG_PHY_SELECTION_MASK 0x00000007
+ #define PORT_HW_CFG_PHY_SELECTION_SHIFT 0
+ #define PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT 0x00000000
+ #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY 0x00000001
+ #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY 0x00000002
+ #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY 0x00000003
+ #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY 0x00000004
+
+ /* When enabled, all second phy nvram parameters will be swapped
+ with the first phy parameters */
+ #define PORT_HW_CFG_PHY_SWAPPED_MASK 0x00000008
+ #define PORT_HW_CFG_PHY_SWAPPED_SHIFT 3
+ #define PORT_HW_CFG_PHY_SWAPPED_DISABLED 0x00000000
+ #define PORT_HW_CFG_PHY_SWAPPED_ENABLED 0x00000008
+
+
+ /* Address of the second external phy */
+ uint32_t external_phy_config2; /* 0x294 */
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_MASK 0x000000FF
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_SHIFT 0
+
+ /* The second XGXS external PHY type */
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_MASK 0x0000FF00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SHIFT 8
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_DIRECT 0x00000000
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8071 0x00000100
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8072 0x00000200
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8073 0x00000300
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8705 0x00000400
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8706 0x00000500
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8726 0x00000600
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8481 0x00000700
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SFX7101 0x00000800
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8727 0x00000900
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8727_NOC 0x00000a00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X84823 0x00000b00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X54640 0x00000c00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X84833 0x00000d00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X54618SE 0x00000e00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8722 0x00000f00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X54616 0x00001000
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X84834 0x00001100
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00
+
+
+ /* 4 times 16 bits for all 4 lanes. For some external PHYs (such as
+ 8706, 8726 and 8727) not all 4 values are needed. */
+ uint16_t xgxs_config2_rx[4]; /* 0x296 */
+ uint16_t xgxs_config2_tx[4]; /* 0x2A0 */
+
+ uint32_t lane_config;
+ #define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000FFFF
+ #define PORT_HW_CFG_LANE_SWAP_CFG_SHIFT 0
+ /* AN and forced */
+ #define PORT_HW_CFG_LANE_SWAP_CFG_01230123 0x00001b1b
+ /* forced only */
+ #define PORT_HW_CFG_LANE_SWAP_CFG_01233210 0x00001be4
+ /* forced only */
+ #define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8
+ /* forced only */
+ #define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4
+ #define PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000FF
+ #define PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0
+ #define PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000FF00
+ #define PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8
+ #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK 0x0000C000
+ #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT 14
+
+ /* Indicate whether to swap the external phy polarity */
+ #define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK 0x00010000
+ #define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED 0x00000000
+ #define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED 0x00010000
+
+
+ uint32_t external_phy_config;
+ #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK 0x000000FF
+ #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT 0
+
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK 0x0000FF00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SHIFT 8
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT 0x00000000
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8071 0x00000100
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8072 0x00000200
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8073 0x00000300
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8705 0x00000400
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8706 0x00000500
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8726 0x00000600
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8481 0x00000700
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101 0x00000800
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727 0x00000900
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727_NOC 0x00000a00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84823 0x00000b00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54640 0x00000c00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833 0x00000d00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54618SE 0x00000e00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8722 0x00000f00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54616 0x00001000
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834 0x00001100
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC 0x0000fc00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00
+
+ #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK 0x00FF0000
+ #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT 16
+
+ #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xFF000000
+ #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_SHIFT 24
+ #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT 0x00000000
+ #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BNX2X5482 0x01000000
+ #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD 0x02000000
+ #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN 0xff000000
+
+ uint32_t speed_capability_mask;
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_MASK 0x0000FFFF
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_SHIFT 0
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_FULL 0x00000001
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_HALF 0x00000002
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_HALF 0x00000004
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_FULL 0x00000008
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_1G 0x00000010
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_2_5G 0x00000020
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10G 0x00000040
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_20G 0x00000080
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_RESERVED 0x0000f000
+
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK 0xFFFF0000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_SHIFT 16
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL 0x00010000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF 0x00020000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF 0x00040000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL 0x00080000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_1G 0x00100000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G 0x00200000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10G 0x00400000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_20G 0x00800000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_RESERVED 0xf0000000
+
+ /* A place to hold the original MAC address as a backup */
+ uint32_t backup_mac_upper; /* 0x2B4 */
+ uint32_t backup_mac_lower; /* 0x2B8 */
+
+};
+
+
+/****************************************************************************
+ * Shared Feature configuration *
+ ****************************************************************************/
+struct shared_feat_cfg { /* NVRAM Offset */
+
+ uint32_t config; /* 0x450 */
+ #define SHARED_FEATURE_BMC_ECHO_MODE_EN 0x00000001
+
+ /* Use NVRAM values instead of HW default values */
+ #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_MASK \
+ 0x00000002
+ #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED \
+ 0x00000000
+ #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED \
+ 0x00000002
+
+ #define SHARED_FEAT_CFG_NCSI_ID_METHOD_MASK 0x00000008
+ #define SHARED_FEAT_CFG_NCSI_ID_METHOD_SPIO 0x00000000
+ #define SHARED_FEAT_CFG_NCSI_ID_METHOD_NVRAM 0x00000008
+
+ #define SHARED_FEAT_CFG_NCSI_ID_MASK 0x00000030
+ #define SHARED_FEAT_CFG_NCSI_ID_SHIFT 4
+
+ /* Override the OTP back to single function mode. When using GPIO,
+ high means only SF, 0 is according to CLP configuration */
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_MASK 0x00000700
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_SHIFT 8
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED 0x00000000
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE 0x00000400
+
+ /* Act as if the FCoE license is invalid */
+ #define SHARED_FEAT_CFG_PREVENT_FCOE 0x00001000
+
+ /* Force FLR capability to all ports */
+ #define SHARED_FEAT_CFG_FORCE_FLR_CAPABILITY 0x00002000
+
+ /* Act as if the iSCSI license is invalid */
+ #define SHARED_FEAT_CFG_PREVENT_ISCSI_MASK 0x00004000
+ #define SHARED_FEAT_CFG_PREVENT_ISCSI_SHIFT 14
+ #define SHARED_FEAT_CFG_PREVENT_ISCSI_DISABLED 0x00000000
+ #define SHARED_FEAT_CFG_PREVENT_ISCSI_ENABLED 0x00004000
+
+ /* The interval in seconds between sending LLDP packets. Set to zero
+ to disable the feature */
+ #define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_MASK 0x00FF0000
+ #define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_SHIFT 16
+
+ /* The assigned device type ID for LLDP usage */
+ #define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_MASK 0xFF000000
+ #define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_SHIFT 24
+
+};
+
+
+/****************************************************************************
+ * Port Feature configuration *
+ ****************************************************************************/
+struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
+
+ uint32_t config;
+ #define PORT_FEAT_CFG_BAR1_SIZE_MASK 0x0000000F
+ #define PORT_FEAT_CFG_BAR1_SIZE_SHIFT 0
+ #define PORT_FEAT_CFG_BAR1_SIZE_DISABLED 0x00000000
+ #define PORT_FEAT_CFG_BAR1_SIZE_64K 0x00000001
+ #define PORT_FEAT_CFG_BAR1_SIZE_128K 0x00000002
+ #define PORT_FEAT_CFG_BAR1_SIZE_256K 0x00000003
+ #define PORT_FEAT_CFG_BAR1_SIZE_512K 0x00000004
+ #define PORT_FEAT_CFG_BAR1_SIZE_1M 0x00000005
+ #define PORT_FEAT_CFG_BAR1_SIZE_2M 0x00000006
+ #define PORT_FEAT_CFG_BAR1_SIZE_4M 0x00000007
+ #define PORT_FEAT_CFG_BAR1_SIZE_8M 0x00000008
+ #define PORT_FEAT_CFG_BAR1_SIZE_16M 0x00000009
+ #define PORT_FEAT_CFG_BAR1_SIZE_32M 0x0000000a
+ #define PORT_FEAT_CFG_BAR1_SIZE_64M 0x0000000b
+ #define PORT_FEAT_CFG_BAR1_SIZE_128M 0x0000000c
+ #define PORT_FEAT_CFG_BAR1_SIZE_256M 0x0000000d
+ #define PORT_FEAT_CFG_BAR1_SIZE_512M 0x0000000e
+ #define PORT_FEAT_CFG_BAR1_SIZE_1G 0x0000000f
+ #define PORT_FEAT_CFG_BAR2_SIZE_MASK 0x000000F0
+ #define PORT_FEAT_CFG_BAR2_SIZE_SHIFT 4
+ #define PORT_FEAT_CFG_BAR2_SIZE_DISABLED 0x00000000
+ #define PORT_FEAT_CFG_BAR2_SIZE_64K 0x00000010
+ #define PORT_FEAT_CFG_BAR2_SIZE_128K 0x00000020
+ #define PORT_FEAT_CFG_BAR2_SIZE_256K 0x00000030
+ #define PORT_FEAT_CFG_BAR2_SIZE_512K 0x00000040
+ #define PORT_FEAT_CFG_BAR2_SIZE_1M 0x00000050
+ #define PORT_FEAT_CFG_BAR2_SIZE_2M 0x00000060
+ #define PORT_FEAT_CFG_BAR2_SIZE_4M 0x00000070
+ #define PORT_FEAT_CFG_BAR2_SIZE_8M 0x00000080
+ #define PORT_FEAT_CFG_BAR2_SIZE_16M 0x00000090
+ #define PORT_FEAT_CFG_BAR2_SIZE_32M 0x000000a0
+ #define PORT_FEAT_CFG_BAR2_SIZE_64M 0x000000b0
+ #define PORT_FEAT_CFG_BAR2_SIZE_128M 0x000000c0
+ #define PORT_FEAT_CFG_BAR2_SIZE_256M 0x000000d0
+ #define PORT_FEAT_CFG_BAR2_SIZE_512M 0x000000e0
+ #define PORT_FEAT_CFG_BAR2_SIZE_1G 0x000000f0
+
+ #define PORT_FEAT_CFG_DCBX_MASK 0x00000100
+ #define PORT_FEAT_CFG_DCBX_DISABLED 0x00000000
+ #define PORT_FEAT_CFG_DCBX_ENABLED 0x00000100
+
+ #define PORT_FEAT_CFG_AUTOGREEEN_MASK 0x00000200
+ #define PORT_FEAT_CFG_AUTOGREEEN_SHIFT 9
+ #define PORT_FEAT_CFG_AUTOGREEEN_DISABLED 0x00000000
+ #define PORT_FEAT_CFG_AUTOGREEEN_ENABLED 0x00000200
+
+ #define PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK 0x00000C00
+ #define PORT_FEAT_CFG_STORAGE_PERSONALITY_SHIFT 10
+ #define PORT_FEAT_CFG_STORAGE_PERSONALITY_DEFAULT 0x00000000
+ #define PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE 0x00000400
+ #define PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI 0x00000800
+ #define PORT_FEAT_CFG_STORAGE_PERSONALITY_BOTH 0x00000c00
+
+ #define PORT_FEATURE_EN_SIZE_MASK 0x0f000000
+ #define PORT_FEATURE_EN_SIZE_SHIFT 24
+ #define PORT_FEATURE_WOL_ENABLED 0x01000000
+ #define PORT_FEATURE_MBA_ENABLED 0x02000000
+ #define PORT_FEATURE_MFW_ENABLED 0x04000000
+
+ /* Advertise expansion ROM even if MBA is disabled */
+ #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_MASK 0x08000000
+ #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_DISABLED 0x00000000
+ #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_ENABLED 0x08000000
+
+ /* Check the optic vendor via i2c against a list of approved modules
+ in a separate nvram image */
+ #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK 0xE0000000
+ #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_SHIFT 29
+ #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT \
+ 0x00000000
+ #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER \
+ 0x20000000
+ #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG 0x40000000
+ #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN 0x60000000
+
+ uint32_t wol_config;
+ /* Default is used when driver sets to "auto" mode */
+ #define PORT_FEATURE_WOL_ACPI_UPON_MGMT 0x00000010
+
+ uint32_t mba_config;
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK 0x00000007
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_SHIFT 0
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE 0x00000000
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_RPL 0x00000001
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_BOOTP 0x00000002
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB 0x00000003
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT 0x00000004
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE 0x00000007
+
+ #define PORT_FEATURE_MBA_BOOT_RETRY_MASK 0x00000038
+ #define PORT_FEATURE_MBA_BOOT_RETRY_SHIFT 3
+
+ #define PORT_FEATURE_MBA_SETUP_PROMPT_ENABLE 0x00000400
+ #define PORT_FEATURE_MBA_HOTKEY_MASK 0x00000800
+ #define PORT_FEATURE_MBA_HOTKEY_CTRL_S 0x00000000
+ #define PORT_FEATURE_MBA_HOTKEY_CTRL_B 0x00000800
+
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_MASK 0x000FF000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_SHIFT 12
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_DISABLED 0x00000000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2K 0x00001000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4K 0x00002000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8K 0x00003000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16K 0x00004000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32K 0x00005000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_64K 0x00006000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_128K 0x00007000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_256K 0x00008000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_512K 0x00009000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_1M 0x0000a000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2M 0x0000b000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4M 0x0000c000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8M 0x0000d000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16M 0x0000e000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32M 0x0000f000
+ #define PORT_FEATURE_MBA_MSG_TIMEOUT_MASK 0x00F00000
+ #define PORT_FEATURE_MBA_MSG_TIMEOUT_SHIFT 20
+ #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_MASK 0x03000000
+ #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_SHIFT 24
+ #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_AUTO 0x00000000
+ #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_BBS 0x01000000
+ #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT18H 0x02000000
+ #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT19H 0x03000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_MASK 0x3C000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_SHIFT 26
+ #define PORT_FEATURE_MBA_LINK_SPEED_AUTO 0x00000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_10M_HALF 0x04000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_10M_FULL 0x08000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_100M_HALF 0x0c000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_100M_FULL 0x10000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_1G 0x14000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_2_5G 0x18000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_10G 0x1c000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_20G 0x20000000
+
+ uint32_t Reserved0; /* 0x460 */
+
+ uint32_t mba_vlan_cfg;
+ #define PORT_FEATURE_MBA_VLAN_TAG_MASK 0x0000FFFF
+ #define PORT_FEATURE_MBA_VLAN_TAG_SHIFT 0
+ #define PORT_FEATURE_MBA_VLAN_EN 0x00010000
+
+ uint32_t Reserved1;
+ uint32_t smbus_config;
+ #define PORT_FEATURE_SMBUS_ADDR_MASK 0x000000fe
+ #define PORT_FEATURE_SMBUS_ADDR_SHIFT 1
+
+ uint32_t vf_config;
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_MASK 0x0000000F
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_SHIFT 0
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_DISABLED 0x00000000
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_4K 0x00000001
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_8K 0x00000002
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_16K 0x00000003
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_32K 0x00000004
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_64K 0x00000005
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_128K 0x00000006
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_256K 0x00000007
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_512K 0x00000008
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_1M 0x00000009
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_2M 0x0000000a
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_4M 0x0000000b
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_8M 0x0000000c
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_16M 0x0000000d
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_32M 0x0000000e
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_64M 0x0000000f
+
+ uint32_t link_config; /* Used as HW defaults for the driver */
+
+ #define PORT_FEATURE_FLOW_CONTROL_MASK 0x00000700
+ #define PORT_FEATURE_FLOW_CONTROL_SHIFT 8
+ #define PORT_FEATURE_FLOW_CONTROL_AUTO 0x00000000
+ #define PORT_FEATURE_FLOW_CONTROL_TX 0x00000100
+ #define PORT_FEATURE_FLOW_CONTROL_RX 0x00000200
+ #define PORT_FEATURE_FLOW_CONTROL_BOTH 0x00000300
+ #define PORT_FEATURE_FLOW_CONTROL_NONE 0x00000400
+ #define PORT_FEATURE_FLOW_CONTROL_SAFC_RX 0x00000500
+ #define PORT_FEATURE_FLOW_CONTROL_SAFC_TX 0x00000600
+ #define PORT_FEATURE_FLOW_CONTROL_SAFC_BOTH 0x00000700
+
+ #define PORT_FEATURE_LINK_SPEED_MASK 0x000F0000
+ #define PORT_FEATURE_LINK_SPEED_SHIFT 16
+ #define PORT_FEATURE_LINK_SPEED_AUTO 0x00000000
+ #define PORT_FEATURE_LINK_SPEED_10M_FULL 0x00010000
+ #define PORT_FEATURE_LINK_SPEED_10M_HALF 0x00020000
+ #define PORT_FEATURE_LINK_SPEED_100M_HALF 0x00030000
+ #define PORT_FEATURE_LINK_SPEED_100M_FULL 0x00040000
+ #define PORT_FEATURE_LINK_SPEED_1G 0x00050000
+ #define PORT_FEATURE_LINK_SPEED_2_5G 0x00060000
+ #define PORT_FEATURE_LINK_SPEED_10G_CX4 0x00070000
+ #define PORT_FEATURE_LINK_SPEED_20G 0x00080000
+
+ #define PORT_FEATURE_CONNECTED_SWITCH_MASK 0x03000000
+ #define PORT_FEATURE_CONNECTED_SWITCH_SHIFT 24
+ /* (forced) low speed switch (< 10G) */
+ #define PORT_FEATURE_CON_SWITCH_1G_SWITCH 0x00000000
+ /* (forced) high speed switch (>= 10G) */
+ #define PORT_FEATURE_CON_SWITCH_10G_SWITCH 0x01000000
+ #define PORT_FEATURE_CON_SWITCH_AUTO_DETECT 0x02000000
+ #define PORT_FEATURE_CON_SWITCH_ONE_TIME_DETECT 0x03000000
+
+
+ /* The default for MCP link configuration,
+ uses the same defines as link_config */
+ uint32_t mfw_wol_link_cfg;
+
+ /* The default for the driver of the second external phy,
+ uses the same defines as link_config */
+ uint32_t link_config2; /* 0x47C */
+
+ /* The default for MCP of the second external phy,
+ uses the same defines as link_config */
+ uint32_t mfw_wol_link_cfg2; /* 0x480 */
+
+
+ /* EEE power saving mode */
+ uint32_t eee_power_mode; /* 0x484 */
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_MASK 0x000000FF
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT 0
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED 0x00000000
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED 0x00000001
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE 0x00000002
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY 0x00000003
+
+
+ uint32_t Reserved2[16]; /* 0x488 */
+};
+
+/****************************************************************************
+ * Device Information *
+ ****************************************************************************/
+struct shm_dev_info { /* size */
+
+ uint32_t bc_rev; /* 8 bits each: major, minor, build */ /* 4 */
+
+ struct shared_hw_cfg shared_hw_config; /* 40 */
+
+ struct port_hw_cfg port_hw_config[PORT_MAX]; /* 400*2=800 */
+
+ struct shared_feat_cfg shared_feature_config; /* 4 */
+
+ struct port_feat_cfg port_feature_config[PORT_MAX];/* 116*2=232 */
+
+};
+
+struct extended_dev_info_shared_cfg { /* NVRAM OFFSET */
+
+ /* Threshold in celcius to start using the fan */
+ uint32_t temperature_monitor1; /* 0x4000 */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_THRESH_MASK 0x0000007F
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_THRESH_SHIFT 0
+
+ /* Threshold in celcius to shut down the board */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_THRESH_MASK 0x00007F00
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_THRESH_SHIFT 8
+
+ /* EPIO of fan temperature status */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_MASK 0x00FF0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_SHIFT 16
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_NA 0x00000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO0 0x00010000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO1 0x00020000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO2 0x00030000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO3 0x00040000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO4 0x00050000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO5 0x00060000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO6 0x00070000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO7 0x00080000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO8 0x00090000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO9 0x000a0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO10 0x000b0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO11 0x000c0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO12 0x000d0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO13 0x000e0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO14 0x000f0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO15 0x00100000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO16 0x00110000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO17 0x00120000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO18 0x00130000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO19 0x00140000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO20 0x00150000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO21 0x00160000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO22 0x00170000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO23 0x00180000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO24 0x00190000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO25 0x001a0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO26 0x001b0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO27 0x001c0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO28 0x001d0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO29 0x001e0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO30 0x001f0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO31 0x00200000
+
+ /* EPIO of shut down temperature status */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_MASK 0xFF000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_SHIFT 24
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_NA 0x00000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO0 0x01000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO1 0x02000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO2 0x03000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO3 0x04000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO4 0x05000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO5 0x06000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO6 0x07000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO7 0x08000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO8 0x09000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO9 0x0a000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO10 0x0b000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO11 0x0c000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO12 0x0d000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO13 0x0e000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO14 0x0f000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO15 0x10000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO16 0x11000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO17 0x12000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO18 0x13000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO19 0x14000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO20 0x15000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO21 0x16000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO22 0x17000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO23 0x18000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO24 0x19000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO25 0x1a000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO26 0x1b000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO27 0x1c000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO28 0x1d000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO29 0x1e000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO30 0x1f000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO31 0x20000000
+
+
+ /* EPIO of shut down temperature status */
+ uint32_t temperature_monitor2; /* 0x4004 */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_PERIOD_MASK 0x0000FFFF
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_PERIOD_SHIFT 0
+
+
+ /* MFW flavor to be used */
+ uint32_t mfw_cfg; /* 0x4008 */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_MFW_FLAVOR_MASK 0x000000FF
+ #define EXTENDED_DEV_INFO_SHARED_CFG_MFW_FLAVOR_SHIFT 0
+ #define EXTENDED_DEV_INFO_SHARED_CFG_MFW_FLAVOR_NA 0x00000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_MFW_FLAVOR_A 0x00000001
+
+ /* Should NIC data query remain enabled upon last drv unload */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_OCBB_EN_LAST_DRV_MASK 0x00000100
+ #define EXTENDED_DEV_INFO_SHARED_CFG_OCBB_EN_LAST_DRV_SHIFT 8
+ #define EXTENDED_DEV_INFO_SHARED_CFG_OCBB_EN_LAST_DRV_DISABLED 0x00000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_OCBB_EN_LAST_DRV_ENABLED 0x00000100
+
+ /* Hide DCBX feature in CCM/BACS menus */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_HIDE_DCBX_FEAT_MASK 0x00010000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_HIDE_DCBX_FEAT_SHIFT 16
+ #define EXTENDED_DEV_INFO_SHARED_CFG_HIDE_DCBX_FEAT_DISABLED 0x00000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_HIDE_DCBX_FEAT_ENABLED 0x00010000
+
+ uint32_t smbus_config; /* 0x400C */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_SMBUS_ADDR_MASK 0x000000FF
+ #define EXTENDED_DEV_INFO_SHARED_CFG_SMBUS_ADDR_SHIFT 0
+
+ /* Switching regulator loop gain */
+ uint32_t board_cfg; /* 0x4010 */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_MASK 0x0000000F
+ #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_SHIFT 0
+ #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_HW_DEFAULT 0x00000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_X2 0x00000008
+ #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_X4 0x00000009
+ #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_X8 0x0000000a
+ #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_X16 0x0000000b
+ #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_DIV8 0x0000000c
+ #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_DIV4 0x0000000d
+ #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_DIV2 0x0000000e
+ #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_X1 0x0000000f
+
+ /* whether shadow swim feature is supported */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_SHADOW_SWIM_MASK 0x00000100
+ #define EXTENDED_DEV_INFO_SHARED_CFG_SHADOW_SWIM_SHIFT 8
+ #define EXTENDED_DEV_INFO_SHARED_CFG_SHADOW_SWIM_DISABLED 0x00000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_SHADOW_SWIM_ENABLED 0x00000100
+
+ /* whether to show/hide SRIOV menu in CCM */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_SRIOV_SHOW_MENU_MASK 0x00000200
+ #define EXTENDED_DEV_INFO_SHARED_CFG_SRIOV_SHOW_MENU_SHIFT 9
+ #define EXTENDED_DEV_INFO_SHARED_CFG_SRIOV_SHOW_MENU 0x00000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_SRIOV_HIDE_MENU 0x00000200
+
+ /* Threshold in celcius for max continuous operation */
+ uint32_t temperature_report; /* 0x4014 */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_MCOT_MASK 0x0000007F
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_MCOT_SHIFT 0
+
+ /* Threshold in celcius for sensor caution */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SCT_MASK 0x00007F00
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SCT_SHIFT 8
+
+ /* wwn node prefix to be used (unless value is 0) */
+ uint32_t wwn_prefix; /* 0x4018 */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_NODE_PREFIX0_MASK 0x000000FF
+ #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_NODE_PREFIX0_SHIFT 0
+
+ #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_NODE_PREFIX1_MASK 0x0000FF00
+ #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_NODE_PREFIX1_SHIFT 8
+
+ /* wwn port prefix to be used (unless value is 0) */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_PORT_PREFIX0_MASK 0x00FF0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_PORT_PREFIX0_SHIFT 16
+
+ /* wwn port prefix to be used (unless value is 0) */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_PORT_PREFIX1_MASK 0xFF000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_PORT_PREFIX1_SHIFT 24
+
+ /* General debug nvm cfg */
+ uint32_t dbg_cfg_flags; /* 0x401C */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_MASK 0x000FFFFF
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_SHIFT 0
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_ENABLE 0x00000001
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_EN_SIGDET_FILTER 0x00000002
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_SET_LP_TX_PRESET7 0x00000004
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_SET_TX_ANA_DEFAULT 0x00000008
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_SET_PLL_ANA_DEFAULT 0x00000010
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_FORCE_G1PLL_RETUNE 0x00000020
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_SET_RX_ANA_DEFAULT 0x00000040
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_FORCE_SERDES_RX_CLK 0x00000080
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_DIS_RX_LP_EIEOS 0x00000100
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_FINALIZE_UCODE 0x00000200
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_HOLDOFF_REQ 0x00000400
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_RX_SIGDET_OVERRIDE 0x00000800
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_GP_PORG_UC_RESET 0x00001000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_SUPPRESS_COMPEN_EVT 0x00002000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_ADJ_TXEQ_P0_P1 0x00004000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_G3_PLL_RETUNE 0x00008000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_SET_MAC_PHY_CTL8 0x00010000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_DIS_MAC_G3_FRM_ERR 0x00020000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_INFERRED_EI 0x00040000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_GEN3_COMPLI_ENA 0x00080000
+
+ /* Debug signet rx threshold */
+ uint32_t dbg_rx_sigdet_threshold; /* 0x4020 */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_RX_SIGDET_MASK 0x00000007
+ #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_RX_SIGDET_SHIFT 0
+
+ /* Enable IFFE feature */
+ uint32_t iffe_features; /* 0x4024 */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_ENABLE_IFFE_MASK 0x00000001
+ #define EXTENDED_DEV_INFO_SHARED_CFG_ENABLE_IFFE_SHIFT 0
+ #define EXTENDED_DEV_INFO_SHARED_CFG_ENABLE_IFFE_DISABLED 0x00000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_ENABLE_IFFE_ENABLED 0x00000001
+
+ /* Allowable port enablement (bitmask for ports 3-1) */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_PORT_MASK 0x0000000E
+ #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_PORT_SHIFT 1
+
+ /* Allow iSCSI offload override */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_ISCSI_MASK 0x00000010
+ #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_ISCSI_SHIFT 4
+ #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_ISCSI_DISABLED 0x00000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_ISCSI_ENABLED 0x00000010
+
+ /* Allow FCoE offload override */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_FCOE_MASK 0x00000020
+ #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_FCOE_SHIFT 5
+ #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_FCOE_DISABLED 0x00000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_FCOE_ENABLED 0x00000020
+
+ /* Tie to adaptor */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TIE_ADAPTOR_MASK 0x00008000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TIE_ADAPTOR_SHIFT 15
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TIE_ADAPTOR_DISABLED 0x00000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TIE_ADAPTOR_ENABLED 0x00008000
+
+ /* Currently enabled port(s) (bitmask for ports 3-1) */
+ uint32_t current_iffe_mask; /* 0x4028 */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_CFG_MASK 0x0000000E
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_CFG_SHIFT 1
+
+ /* Current iSCSI offload */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_ISCSI_MASK 0x00000010
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_ISCSI_SHIFT 4
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_ISCSI_DISABLED 0x00000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_ISCSI_ENABLED 0x00000010
+
+ /* Current FCoE offload */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_FCOE_MASK 0x00000020
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_FCOE_SHIFT 5
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_FCOE_DISABLED 0x00000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_FCOE_ENABLED 0x00000020
+
+ /* FW set this pin to "0" (assert) these signal if either of its MAC
+ * or PHY specific threshold values is exceeded.
+ * Values are standard GPIO/EPIO pins.
+ */
+ uint32_t threshold_pin; /* 0x402C */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TCONTROL_PIN_MASK 0x000000FF
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TCONTROL_PIN_SHIFT 0
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TWARNING_PIN_MASK 0x0000FF00
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TWARNING_PIN_SHIFT 8
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TCRITICAL_PIN_MASK 0x00FF0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_TCRITICAL_PIN_SHIFT 16
+
+ /* MAC die temperature threshold in Celsius. */
+ uint32_t mac_threshold_val; /* 0x4030 */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CONTROL_MAC_THRESH_MASK 0x000000FF
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CONTROL_MAC_THRESH_SHIFT 0
+ #define EXTENDED_DEV_INFO_SHARED_CFG_WARNING_MAC_THRESH_MASK 0x0000FF00
+ #define EXTENDED_DEV_INFO_SHARED_CFG_WARNING_MAC_THRESH_SHIFT 8
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CRITICAL_MAC_THRESH_MASK 0x00FF0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CRITICAL_MAC_THRESH_SHIFT 16
+
+ /* PHY die temperature threshold in Celsius. */
+ uint32_t phy_threshold_val; /* 0x4034 */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CONTROL_PHY_THRESH_MASK 0x000000FF
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CONTROL_PHY_THRESH_SHIFT 0
+ #define EXTENDED_DEV_INFO_SHARED_CFG_WARNING_PHY_THRESH_MASK 0x0000FF00
+ #define EXTENDED_DEV_INFO_SHARED_CFG_WARNING_PHY_THRESH_SHIFT 8
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CRITICAL_PHY_THRESH_MASK 0x00FF0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_CRITICAL_PHY_THRESH_SHIFT 16
+
+ /* External pins to communicate with host.
+ * Values are standard GPIO/EPIO pins.
+ */
+ uint32_t host_pin; /* 0x4038 */
+ #define EXTENDED_DEV_INFO_SHARED_CFG_I2C_ISOLATE_MASK 0x000000FF
+ #define EXTENDED_DEV_INFO_SHARED_CFG_I2C_ISOLATE_SHIFT 0
+ #define EXTENDED_DEV_INFO_SHARED_CFG_MEZZ_FAULT_MASK 0x0000FF00
+ #define EXTENDED_DEV_INFO_SHARED_CFG_MEZZ_FAULT_SHIFT 8
+ #define EXTENDED_DEV_INFO_SHARED_CFG_MEZZ_VPD_UPDATE_MASK 0x00FF0000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_MEZZ_VPD_UPDATE_SHIFT 16
+ #define EXTENDED_DEV_INFO_SHARED_CFG_VPD_CACHE_COMP_MASK 0xFF000000
+ #define EXTENDED_DEV_INFO_SHARED_CFG_VPD_CACHE_COMP_SHIFT 24
+};
+
+
+#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN)
+ #error "Missing either LITTLE_ENDIAN or BIG_ENDIAN definition."
+#endif
+
+#define FUNC_0 0
+#define FUNC_1 1
+#define FUNC_2 2
+#define FUNC_3 3
+#define FUNC_4 4
+#define FUNC_5 5
+#define FUNC_6 6
+#define FUNC_7 7
+#define E1H_FUNC_MAX 8
+#define E2_FUNC_MAX 4 /* per path */
+
+#define VN_0 0
+#define VN_1 1
+#define VN_2 2
+#define VN_3 3
+#define E1VN_MAX 1
+#define E1HVN_MAX 4
+
+#define E2_VF_MAX 64 /* HC_REG_VF_CONFIGURATION_SIZE */
+/* This value (in milliseconds) determines the frequency of the driver
+ * issuing the PULSE message code. The firmware monitors this periodic
+ * pulse to determine when to switch to an OS-absent mode. */
+#define DRV_PULSE_PERIOD_MS 250
+
+/* This value (in milliseconds) determines how long the driver should
+ * wait for an acknowledgement from the firmware before timing out. Once
+ * the firmware has timed out, the driver will assume there is no firmware
+ * running and there won't be any firmware-driver synchronization during a
+ * driver reset. */
+#define FW_ACK_TIME_OUT_MS 5000
+
+#define FW_ACK_POLL_TIME_MS 1
+
+#define FW_ACK_NUM_OF_POLL (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS)
+
+#define MFW_TRACE_SIGNATURE 0x54524342
+
+/****************************************************************************
+ * Driver <-> FW Mailbox *
+ ****************************************************************************/
+struct drv_port_mb {
+
+ uint32_t link_status;
+ /* Driver should update this field on any link change event */
+
+ #define LINK_STATUS_NONE (0<<0)
+ #define LINK_STATUS_LINK_FLAG_MASK 0x00000001
+ #define LINK_STATUS_LINK_UP 0x00000001
+ #define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E
+ #define LINK_STATUS_SPEED_AND_DUPLEX_AN_NOT_COMPLETE (0<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_10THD (1<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_10TFD (2<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_100TXHD (3<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_100T4 (4<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_100TXFD (5<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (6<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (7<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_1000XFD (7<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_2500THD (8<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_2500TFD (9<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_2500XFD (9<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_10GTFD (10<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_10GXFD (10<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_20GTFD (11<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_20GXFD (11<<1)
+
+ #define LINK_STATUS_AUTO_NEGOTIATE_FLAG_MASK 0x00000020
+ #define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
+
+ #define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
+ #define LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK 0x00000080
+ #define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
+
+ #define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
+ #define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
+ #define LINK_STATUS_LINK_PARTNER_100T4_CAPABLE 0x00000800
+ #define LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE 0x00001000
+ #define LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE 0x00002000
+ #define LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE 0x00004000
+ #define LINK_STATUS_LINK_PARTNER_10THD_CAPABLE 0x00008000
+
+ #define LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK 0x00010000
+ #define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00010000
+
+ #define LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK 0x00020000
+ #define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00020000
+
+ #define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000
+ #define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0<<18)
+ #define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1<<18)
+ #define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2<<18)
+ #define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3<<18)
+
+ #define LINK_STATUS_SERDES_LINK 0x00100000
+
+ #define LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE 0x00200000
+ #define LINK_STATUS_LINK_PARTNER_2500XHD_CAPABLE 0x00400000
+ #define LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE 0x00800000
+ #define LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE 0x10000000
+
+ #define LINK_STATUS_PFC_ENABLED 0x20000000
+
+ #define LINK_STATUS_PHYSICAL_LINK_FLAG 0x40000000
+ #define LINK_STATUS_SFP_TX_FAULT 0x80000000
+
+ uint32_t port_stx;
+
+ uint32_t stat_nig_timer;
+
+ /* MCP firmware does not use this field */
+ uint32_t ext_phy_fw_version;
+
+};
+
+
+struct drv_func_mb {
+
+ uint32_t drv_mb_header;
+ #define DRV_MSG_CODE_MASK 0xffff0000
+ #define DRV_MSG_CODE_LOAD_REQ 0x10000000
+ #define DRV_MSG_CODE_LOAD_DONE 0x11000000
+ #define DRV_MSG_CODE_UNLOAD_REQ_WOL_EN 0x20000000
+ #define DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS 0x20010000
+ #define DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP 0x20020000
+ #define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
+ #define DRV_MSG_CODE_DCC_OK 0x30000000
+ #define DRV_MSG_CODE_DCC_FAILURE 0x31000000
+ #define DRV_MSG_CODE_DIAG_ENTER_REQ 0x50000000
+ #define DRV_MSG_CODE_DIAG_EXIT_REQ 0x60000000
+ #define DRV_MSG_CODE_VALIDATE_KEY 0x70000000
+ #define DRV_MSG_CODE_GET_CURR_KEY 0x80000000
+ #define DRV_MSG_CODE_GET_UPGRADE_KEY 0x81000000
+ #define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000
+ #define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000
+
+ /*
+ * The optic module verification command requires bootcode
+ * v5.0.6 or later, te specific optic module verification command
+ * requires bootcode v5.2.12 or later
+ */
+ #define DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL 0xa0000000
+ #define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006
+ #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000
+ #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234
+ #define DRV_MSG_CODE_VRFY_AFEX_SUPPORTED 0xa2000000
+ #define REQ_BC_VER_4_VRFY_AFEX_SUPPORTED 0x00070002
+ #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014
+ #define REQ_BC_VER_4_MT_SUPPORTED 0x00070201
+ #define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201
+ #define REQ_BC_VER_4_FCOE_FEATURES 0x00070209
+
+ #define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000
+ #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000
+ #define REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF 0x00070401
+
+ #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
+
+ #define DRV_MSG_CODE_AFEX_DRIVER_SETMAC 0xd0000000
+ #define DRV_MSG_CODE_AFEX_LISTGET_ACK 0xd1000000
+ #define DRV_MSG_CODE_AFEX_LISTSET_ACK 0xd2000000
+ #define DRV_MSG_CODE_AFEX_STATSGET_ACK 0xd3000000
+ #define DRV_MSG_CODE_AFEX_VIFSET_ACK 0xd4000000
+
+ #define DRV_MSG_CODE_DRV_INFO_ACK 0xd8000000
+ #define DRV_MSG_CODE_DRV_INFO_NACK 0xd9000000
+
+ #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000
+
+ #define DRV_MSG_CODE_RMMOD 0xdb000000
+ #define REQ_BC_VER_4_RMMOD_CMD 0x0007080f
+
+ #define DRV_MSG_CODE_SET_MF_BW 0xe0000000
+ #define REQ_BC_VER_4_SET_MF_BW 0x00060202
+ #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000
+
+ #define DRV_MSG_CODE_LINK_STATUS_CHANGED 0x01000000
+
+ #define DRV_MSG_CODE_INITIATE_FLR 0x02000000
+ #define REQ_BC_VER_4_INITIATE_FLR 0x00070213
+
+ #define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000
+ #define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000
+ #define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000
+ #define BIOS_MSG_CODE_VIRT_MAC_ISCSI 0xff040000
+
+ #define DRV_MSG_CODE_IMG_OFFSET_REQ 0xe2000000
+ #define DRV_MSG_CODE_IMG_SIZE_REQ 0xe3000000
+
+ #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
+
+ uint32_t drv_mb_param;
+ #define DRV_MSG_CODE_SET_MF_BW_MIN_MASK 0x00ff0000
+ #define DRV_MSG_CODE_SET_MF_BW_MAX_MASK 0xff000000
+
+ #define DRV_MSG_CODE_UNLOAD_NON_D3_POWER 0x00000001
+ #define DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET 0x00000002
+
+ #define DRV_MSG_CODE_LOAD_REQ_WITH_LFA 0x0000100a
+ #define DRV_MSG_CODE_LOAD_REQ_FORCE_LFA 0x00002000
+
+ #define DRV_MSG_CODE_USR_BLK_IMAGE_REQ 0x00000001
+
+ uint32_t fw_mb_header;
+ #define FW_MSG_CODE_MASK 0xffff0000
+ #define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000
+ #define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
+ #define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
+ /* Load common chip is supported from bc 6.0.0 */
+ #define REQ_BC_VER_4_DRV_LOAD_COMMON_CHIP 0x00060000
+ #define FW_MSG_CODE_DRV_LOAD_COMMON_CHIP 0x10130000
+
+ #define FW_MSG_CODE_DRV_LOAD_REFUSED 0x10200000
+ #define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
+ #define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x20100000
+ #define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20110000
+ #define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20120000
+ #define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
+ #define FW_MSG_CODE_DCC_DONE 0x30100000
+ #define FW_MSG_CODE_LLDP_DONE 0x40100000
+ #define FW_MSG_CODE_DIAG_ENTER_DONE 0x50100000
+ #define FW_MSG_CODE_DIAG_REFUSE 0x50200000
+ #define FW_MSG_CODE_DIAG_EXIT_DONE 0x60100000
+ #define FW_MSG_CODE_VALIDATE_KEY_SUCCESS 0x70100000
+ #define FW_MSG_CODE_VALIDATE_KEY_FAILURE 0x70200000
+ #define FW_MSG_CODE_GET_KEY_DONE 0x80100000
+ #define FW_MSG_CODE_NO_KEY 0x80f00000
+ #define FW_MSG_CODE_LIC_INFO_NOT_READY 0x80f80000
+ #define FW_MSG_CODE_L2B_PRAM_LOADED 0x90100000
+ #define FW_MSG_CODE_L2B_PRAM_T_LOAD_FAILURE 0x90210000
+ #define FW_MSG_CODE_L2B_PRAM_C_LOAD_FAILURE 0x90220000
+ #define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE 0x90230000
+ #define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE 0x90240000
+ #define FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS 0xa0100000
+ #define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000
+ #define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000
+ #define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000
+ #define FW_MSG_CODE_HW_SET_INVALID_IMAGE 0xb0100000
+
+ #define FW_MSG_CODE_AFEX_DRIVER_SETMAC_DONE 0xd0100000
+ #define FW_MSG_CODE_AFEX_LISTGET_ACK 0xd1100000
+ #define FW_MSG_CODE_AFEX_LISTSET_ACK 0xd2100000
+ #define FW_MSG_CODE_AFEX_STATSGET_ACK 0xd3100000
+ #define FW_MSG_CODE_AFEX_VIFSET_ACK 0xd4100000
+
+ #define FW_MSG_CODE_DRV_INFO_ACK 0xd8100000
+ #define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000
+
+ #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000
+
+ #define FW_MSG_CODE_RMMOD_ACK 0xdb100000
+
+ #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000
+ #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000
+
+ #define FW_MSG_CODE_LINK_CHANGED_ACK 0x01100000
+
+ #define FW_MSG_CODE_FLR_ACK 0x02000000
+ #define FW_MSG_CODE_FLR_NACK 0x02100000
+
+ #define FW_MSG_CODE_LIC_CHALLENGE 0xff010000
+ #define FW_MSG_CODE_LIC_RESPONSE 0xff020000
+ #define FW_MSG_CODE_VIRT_MAC_PRIM 0xff030000
+ #define FW_MSG_CODE_VIRT_MAC_ISCSI 0xff040000
+
+ #define FW_MSG_CODE_IMG_OFFSET_RESPONSE 0xe2100000
+ #define FW_MSG_CODE_IMG_SIZE_RESPONSE 0xe3100000
+
+ #define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
+
+ uint32_t fw_mb_param;
+
+ #define FW_PARAM_INVALID_IMG 0xffffffff
+
+ uint32_t drv_pulse_mb;
+ #define DRV_PULSE_SEQ_MASK 0x00007fff
+ #define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
+ /*
+ * The system time is in the format of
+ * (year-2001)*12*32 + month*32 + day.
+ */
+ #define DRV_PULSE_ALWAYS_ALIVE 0x00008000
+ /*
+ * Indicate to the firmware not to go into the
+ * OS-absent when it is not getting driver pulse.
+ * This is used for debugging as well for PXE(MBA).
+ */
+
+ uint32_t mcp_pulse_mb;
+ #define MCP_PULSE_SEQ_MASK 0x00007fff
+ #define MCP_PULSE_ALWAYS_ALIVE 0x00008000
+ /* Indicates to the driver not to assert due to lack
+ * of MCP response */
+ #define MCP_EVENT_MASK 0xffff0000
+ #define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
+
+ uint32_t iscsi_boot_signature;
+ uint32_t iscsi_boot_block_offset;
+
+ uint32_t drv_status;
+ #define DRV_STATUS_PMF 0x00000001
+ #define DRV_STATUS_VF_DISABLED 0x00000002
+ #define DRV_STATUS_SET_MF_BW 0x00000004
+ #define DRV_STATUS_LINK_EVENT 0x00000008
+
+ #define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00
+ #define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100
+ #define DRV_STATUS_DCC_BANDWIDTH_ALLOCATION 0x00000200
+ #define DRV_STATUS_DCC_CHANGE_MAC_ADDRESS 0x00000400
+ #define DRV_STATUS_DCC_RESERVED1 0x00000800
+ #define DRV_STATUS_DCC_SET_PROTOCOL 0x00001000
+ #define DRV_STATUS_DCC_SET_PRIORITY 0x00002000
+
+ #define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000
+ #define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000
+ #define DRV_STATUS_AFEX_EVENT_MASK 0x03f00000
+ #define DRV_STATUS_AFEX_LISTGET_REQ 0x00100000
+ #define DRV_STATUS_AFEX_LISTSET_REQ 0x00200000
+ #define DRV_STATUS_AFEX_STATSGET_REQ 0x00400000
+ #define DRV_STATUS_AFEX_VIFSET_REQ 0x00800000
+
+ #define DRV_STATUS_DRV_INFO_REQ 0x04000000
+
+ #define DRV_STATUS_EEE_NEGOTIATION_RESULTS 0x08000000
+
+ uint32_t virt_mac_upper;
+ #define VIRT_MAC_SIGN_MASK 0xffff0000
+ #define VIRT_MAC_SIGNATURE 0x564d0000
+ uint32_t virt_mac_lower;
+
+};
+
+
+/****************************************************************************
+ * Management firmware state *
+ ****************************************************************************/
+/* Allocate 440 bytes for management firmware */
+#define MGMTFW_STATE_WORD_SIZE 110
+
+struct mgmtfw_state {
+ uint32_t opaque[MGMTFW_STATE_WORD_SIZE];
+};
+
+
+/****************************************************************************
+ * Multi-Function configuration *
+ ****************************************************************************/
+struct shared_mf_cfg {
+
+ uint32_t clp_mb;
+ #define SHARED_MF_CLP_SET_DEFAULT 0x00000000
+ /* set by CLP */
+ #define SHARED_MF_CLP_EXIT 0x00000001
+ /* set by MCP */
+ #define SHARED_MF_CLP_EXIT_DONE 0x00010000
+
+};
+
+struct port_mf_cfg {
+
+ uint32_t dynamic_cfg; /* device control channel */
+ #define PORT_MF_CFG_E1HOV_TAG_MASK 0x0000ffff
+ #define PORT_MF_CFG_E1HOV_TAG_SHIFT 0
+ #define PORT_MF_CFG_E1HOV_TAG_DEFAULT PORT_MF_CFG_E1HOV_TAG_MASK
+
+ uint32_t reserved[1];
+
+};
+
+struct func_mf_cfg {
+
+ uint32_t config;
+ /* E/R/I/D */
+ /* function 0 of each port cannot be hidden */
+ #define FUNC_MF_CFG_FUNC_HIDE 0x00000001
+
+ #define FUNC_MF_CFG_PROTOCOL_MASK 0x00000006
+ #define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000000
+ #define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000002
+ #define FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA 0x00000004
+ #define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000006
+ #define FUNC_MF_CFG_PROTOCOL_DEFAULT \
+ FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA
+
+ #define FUNC_MF_CFG_FUNC_DISABLED 0x00000008
+ #define FUNC_MF_CFG_FUNC_DELETED 0x00000010
+
+ #define FUNC_MF_CFG_FUNC_BOOT_MASK 0x00000060
+ #define FUNC_MF_CFG_FUNC_BOOT_BIOS_CTRL 0x00000000
+ #define FUNC_MF_CFG_FUNC_BOOT_VCM_DISABLED 0x00000020
+ #define FUNC_MF_CFG_FUNC_BOOT_VCM_ENABLED 0x00000040
+
+ /* PRI */
+ /* 0 - low priority, 3 - high priority */
+ #define FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK 0x00000300
+ #define FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT 8
+ #define FUNC_MF_CFG_TRANSMIT_PRIORITY_DEFAULT 0x00000000
+
+ /* MINBW, MAXBW */
+ /* value range - 0..100, increments in 100Mbps */
+ #define FUNC_MF_CFG_MIN_BW_MASK 0x00ff0000
+ #define FUNC_MF_CFG_MIN_BW_SHIFT 16
+ #define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
+ #define FUNC_MF_CFG_MAX_BW_MASK 0xff000000
+ #define FUNC_MF_CFG_MAX_BW_SHIFT 24
+ #define FUNC_MF_CFG_MAX_BW_DEFAULT 0x64000000
+
+ uint32_t mac_upper; /* MAC */
+ #define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
+ #define FUNC_MF_CFG_UPPERMAC_SHIFT 0
+ #define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
+ uint32_t mac_lower;
+ #define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
+
+ uint32_t e1hov_tag; /* VNI */
+ #define FUNC_MF_CFG_E1HOV_TAG_MASK 0x0000ffff
+ #define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0
+ #define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK
+
+ /* afex default VLAN ID - 12 bits */
+ #define FUNC_MF_CFG_AFEX_VLAN_MASK 0x0fff0000
+ #define FUNC_MF_CFG_AFEX_VLAN_SHIFT 16
+
+ uint32_t afex_config;
+ #define FUNC_MF_CFG_AFEX_COS_FILTER_MASK 0x000000ff
+ #define FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT 0
+ #define FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK 0x0000ff00
+ #define FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT 8
+ #define FUNC_MF_CFG_AFEX_MBA_ENABLED_VAL 0x00000100
+ #define FUNC_MF_CFG_AFEX_VLAN_MODE_MASK 0x000f0000
+ #define FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT 16
+
+ uint32_t pf_allocation;
+ /* number of vfs in function, if 0 - sriov disabled */
+ #define FUNC_MF_CFG_NUMBER_OF_VFS_MASK 0x000000FF
+ #define FUNC_MF_CFG_NUMBER_OF_VFS_SHIFT 0
+};
+
+enum mf_cfg_afex_vlan_mode {
+ FUNC_MF_CFG_AFEX_VLAN_TRUNK_MODE = 0,
+ FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE,
+ FUNC_MF_CFG_AFEX_VLAN_TRUNK_TAG_NATIVE_MODE
+};
+
+/* This structure is not applicable and should not be accessed on 57711 */
+struct func_ext_cfg {
+ uint32_t func_cfg;
+ #define MACP_FUNC_CFG_FLAGS_MASK 0x0000007F
+ #define MACP_FUNC_CFG_FLAGS_SHIFT 0
+ #define MACP_FUNC_CFG_FLAGS_ENABLED 0x00000001
+ #define MACP_FUNC_CFG_FLAGS_ETHERNET 0x00000002
+ #define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD 0x00000004
+ #define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD 0x00000008
+ #define MACP_FUNC_CFG_PAUSE_ON_HOST_RING 0x00000080
+
+ uint32_t iscsi_mac_addr_upper;
+ uint32_t iscsi_mac_addr_lower;
+
+ uint32_t fcoe_mac_addr_upper;
+ uint32_t fcoe_mac_addr_lower;
+
+ uint32_t fcoe_wwn_port_name_upper;
+ uint32_t fcoe_wwn_port_name_lower;
+
+ uint32_t fcoe_wwn_node_name_upper;
+ uint32_t fcoe_wwn_node_name_lower;
+
+ uint32_t preserve_data;
+ #define MF_FUNC_CFG_PRESERVE_L2_MAC (1<<0)
+ #define MF_FUNC_CFG_PRESERVE_ISCSI_MAC (1<<1)
+ #define MF_FUNC_CFG_PRESERVE_FCOE_MAC (1<<2)
+ #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_P (1<<3)
+ #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_N (1<<4)
+ #define MF_FUNC_CFG_PRESERVE_TX_BW (1<<5)
+};
+
+struct mf_cfg {
+
+ struct shared_mf_cfg shared_mf_config; /* 0x4 */
+ struct port_mf_cfg port_mf_config[NVM_PATH_MAX][PORT_MAX];
+ /* 0x10*2=0x20 */
+ /* for all chips, there are 8 mf functions */
+ struct func_mf_cfg func_mf_config[E1H_FUNC_MAX]; /* 0x18 * 8 = 0xc0 */
+ /*
+ * Extended configuration per function - this array does not exist and
+ * should not be accessed on 57711
+ */
+ struct func_ext_cfg func_ext_config[E1H_FUNC_MAX]; /* 0x28 * 8 = 0x140*/
+}; /* 0x224 */
+
+/****************************************************************************
+ * Shared Memory Region *
+ ****************************************************************************/
+struct shmem_region { /* SharedMem Offset (size) */
+
+ uint32_t validity_map[PORT_MAX]; /* 0x0 (4*2 = 0x8) */
+ #define SHR_MEM_FORMAT_REV_MASK 0xff000000
+ #define SHR_MEM_FORMAT_REV_ID ('A'<<24)
+ /* validity bits */
+ #define SHR_MEM_VALIDITY_PCI_CFG 0x00100000
+ #define SHR_MEM_VALIDITY_MB 0x00200000
+ #define SHR_MEM_VALIDITY_DEV_INFO 0x00400000
+ #define SHR_MEM_VALIDITY_RESERVED 0x00000007
+ /* One licensing bit should be set */
+ #define SHR_MEM_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038
+ #define SHR_MEM_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008
+ #define SHR_MEM_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010
+ #define SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020
+ /* Active MFW */
+ #define SHR_MEM_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000
+ #define SHR_MEM_VALIDITY_ACTIVE_MFW_MASK 0x000001c0
+ #define SHR_MEM_VALIDITY_ACTIVE_MFW_IPMI 0x00000040
+ #define SHR_MEM_VALIDITY_ACTIVE_MFW_UMP 0x00000080
+ #define SHR_MEM_VALIDITY_ACTIVE_MFW_NCSI 0x000000c0
+ #define SHR_MEM_VALIDITY_ACTIVE_MFW_NONE 0x000001c0
+
+ struct shm_dev_info dev_info; /* 0x8 (0x438) */
+
+ license_key_t drv_lic_key[PORT_MAX]; /* 0x440 (52*2=0x68) */
+
+ /* FW information (for internal FW use) */
+ uint32_t fw_info_fio_offset; /* 0x4a8 (0x4) */
+ struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */
+
+ struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */
+
+
+#ifdef BMAPI
+ /* This is a variable length array */
+ /* the number of function depends on the chip type */
+ struct drv_func_mb func_mb[1]; /* 0x684 (44*2/4/8=0x58/0xb0/0x160) */
+#else
+ /* the number of function depends on the chip type */
+ struct drv_func_mb func_mb[]; /* 0x684 (44*2/4/8=0x58/0xb0/0x160) */
+#endif /* BMAPI */
+
+}; /* 57711 = 0x7E4 | 57712 = 0x734 */
+
+/****************************************************************************
+ * Shared Memory 2 Region *
+ ****************************************************************************/
+/* The fw_flr_ack is actually built in the following way: */
+/* 8 bit: PF ack */
+/* 64 bit: VF ack */
+/* 8 bit: ios_dis_ack */
+/* In order to maintain endianity in the mailbox hsi, we want to keep using */
+/* uint32_t. The fw must have the VF right after the PF since this is how it */
+/* access arrays(it expects always the VF to reside after the PF, and that */
+/* makes the calculation much easier for it. ) */
+/* In order to answer both limitations, and keep the struct small, the code */
+/* will abuse the structure defined here to achieve the actual partition */
+/* above */
+/****************************************************************************/
+struct fw_flr_ack {
+ uint32_t pf_ack;
+ uint32_t vf_ack[1];
+ uint32_t iov_dis_ack;
+};
+
+struct fw_flr_mb {
+ uint32_t aggint;
+ uint32_t opgen_addr;
+ struct fw_flr_ack ack;
+};
+
+struct eee_remote_vals {
+ uint32_t tx_tw;
+ uint32_t rx_tw;
+};
+
+/**** SUPPORT FOR SHMEM ARRRAYS ***
+ * The SHMEM HSI is aligned on 32 bit boundaries which makes it difficult to
+ * define arrays with storage types smaller then unsigned dwords.
+ * The macros below add generic support for SHMEM arrays with numeric elements
+ * that can span 2,4,8 or 16 bits. The array underlying type is a 32 bit dword
+ * array with individual bit-filed elements accessed using shifts and masks.
+ *
+ */
+
+/* eb is the bitwidth of a single element */
+#define SHMEM_ARRAY_MASK(eb) ((1<<(eb))-1)
+#define SHMEM_ARRAY_ENTRY(i, eb) ((i)/(32/(eb)))
+
+/* the bit-position macro allows the used to flip the order of the arrays
+ * elements on a per byte or word boundary.
+ *
+ * example: an array with 8 entries each 4 bit wide. This array will fit into
+ * a single dword. The diagrmas below show the array order of the nibbles.
+ *
+ * SHMEM_ARRAY_BITPOS(i, 4, 4) defines the stadard ordering:
+ *
+ * | | | |
+ * 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
+ * | | | |
+ *
+ * SHMEM_ARRAY_BITPOS(i, 4, 8) defines a flip ordering per byte:
+ *
+ * | | | |
+ * 1 | 0 | 3 | 2 | 5 | 4 | 7 | 6 |
+ * | | | |
+ *
+ * SHMEM_ARRAY_BITPOS(i, 4, 16) defines a flip ordering per word:
+ *
+ * | | | |
+ * 3 | 2 | 1 | 0 | 7 | 6 | 5 | 4 |
+ * | | | |
+ */
+#define SHMEM_ARRAY_BITPOS(i, eb, fb) \
+ ((((32/(fb)) - 1 - ((i)/((fb)/(eb))) % (32/(fb))) * (fb)) + \
+ (((i)%((fb)/(eb))) * (eb)))
+
+#define SHMEM_ARRAY_GET(a, i, eb, fb) \
+ ((a[SHMEM_ARRAY_ENTRY(i, eb)] >> SHMEM_ARRAY_BITPOS(i, eb, fb)) & \
+ SHMEM_ARRAY_MASK(eb))
+
+#define SHMEM_ARRAY_SET(a, i, eb, fb, val) \
+do { \
+ a[SHMEM_ARRAY_ENTRY(i, eb)] &= ~(SHMEM_ARRAY_MASK(eb) << \
+ SHMEM_ARRAY_BITPOS(i, eb, fb)); \
+ a[SHMEM_ARRAY_ENTRY(i, eb)] |= (((val) & SHMEM_ARRAY_MASK(eb)) << \
+ SHMEM_ARRAY_BITPOS(i, eb, fb)); \
+} while (0)
+
+
+/****START OF DCBX STRUCTURES DECLARATIONS****/
+#define DCBX_MAX_NUM_PRI_PG_ENTRIES 8
+#define DCBX_PRI_PG_BITWIDTH 4
+#define DCBX_PRI_PG_FBITS 8
+#define DCBX_PRI_PG_GET(a, i) \
+ SHMEM_ARRAY_GET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS)
+#define DCBX_PRI_PG_SET(a, i, val) \
+ SHMEM_ARRAY_SET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS, val)
+#define DCBX_MAX_NUM_PG_BW_ENTRIES 8
+#define DCBX_BW_PG_BITWIDTH 8
+#define DCBX_PG_BW_GET(a, i) \
+ SHMEM_ARRAY_GET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH)
+#define DCBX_PG_BW_SET(a, i, val) \
+ SHMEM_ARRAY_SET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH, val)
+#define DCBX_STRICT_PRI_PG 15
+#define DCBX_MAX_APP_PROTOCOL 16
+#define DCBX_MAX_APP_LOCAL 32
+#define FCOE_APP_IDX 0
+#define ISCSI_APP_IDX 1
+#define PREDEFINED_APP_IDX_MAX 2
+
+
+/* Big/Little endian have the same representation. */
+struct dcbx_ets_feature {
+ /*
+ * For Admin MIB - is this feature supported by the
+ * driver | For Local MIB - should this feature be enabled.
+ */
+ uint32_t enabled;
+ uint32_t pg_bw_tbl[2];
+ uint32_t pri_pg_tbl[1];
+};
+
+/* Driver structure in LE */
+struct dcbx_pfc_feature {
+#ifdef __BIG_ENDIAN
+ uint8_t pri_en_bitmap;
+ #define DCBX_PFC_PRI_0 0x01
+ #define DCBX_PFC_PRI_1 0x02
+ #define DCBX_PFC_PRI_2 0x04
+ #define DCBX_PFC_PRI_3 0x08
+ #define DCBX_PFC_PRI_4 0x10
+ #define DCBX_PFC_PRI_5 0x20
+ #define DCBX_PFC_PRI_6 0x40
+ #define DCBX_PFC_PRI_7 0x80
+ uint8_t pfc_caps;
+ uint8_t reserved;
+ uint8_t enabled;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t enabled;
+ uint8_t reserved;
+ uint8_t pfc_caps;
+ uint8_t pri_en_bitmap;
+ #define DCBX_PFC_PRI_0 0x01
+ #define DCBX_PFC_PRI_1 0x02
+ #define DCBX_PFC_PRI_2 0x04
+ #define DCBX_PFC_PRI_3 0x08
+ #define DCBX_PFC_PRI_4 0x10
+ #define DCBX_PFC_PRI_5 0x20
+ #define DCBX_PFC_PRI_6 0x40
+ #define DCBX_PFC_PRI_7 0x80
+#endif
+};
+
+struct dcbx_app_priority_entry {
+#ifdef __BIG_ENDIAN
+ uint16_t app_id;
+ uint8_t pri_bitmap;
+ uint8_t appBitfield;
+ #define DCBX_APP_ENTRY_VALID 0x01
+ #define DCBX_APP_ENTRY_SF_MASK 0x30
+ #define DCBX_APP_ENTRY_SF_SHIFT 4
+ #define DCBX_APP_SF_ETH_TYPE 0x10
+ #define DCBX_APP_SF_PORT 0x20
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t appBitfield;
+ #define DCBX_APP_ENTRY_VALID 0x01
+ #define DCBX_APP_ENTRY_SF_MASK 0x30
+ #define DCBX_APP_ENTRY_SF_SHIFT 4
+ #define DCBX_APP_SF_ETH_TYPE 0x10
+ #define DCBX_APP_SF_PORT 0x20
+ uint8_t pri_bitmap;
+ uint16_t app_id;
+#endif
+};
+
+
+/* FW structure in BE */
+struct dcbx_app_priority_feature {
+#ifdef __BIG_ENDIAN
+ uint8_t reserved;
+ uint8_t default_pri;
+ uint8_t tc_supported;
+ uint8_t enabled;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t enabled;
+ uint8_t tc_supported;
+ uint8_t default_pri;
+ uint8_t reserved;
+#endif
+ struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
+};
+
+/* FW structure in BE */
+struct dcbx_features {
+ /* PG feature */
+ struct dcbx_ets_feature ets;
+ /* PFC feature */
+ struct dcbx_pfc_feature pfc;
+ /* APP feature */
+ struct dcbx_app_priority_feature app;
+};
+
+/* LLDP protocol parameters */
+/* FW structure in BE */
+struct lldp_params {
+#ifdef __BIG_ENDIAN
+ uint8_t msg_fast_tx_interval;
+ uint8_t msg_tx_hold;
+ uint8_t msg_tx_interval;
+ uint8_t admin_status;
+ #define LLDP_TX_ONLY 0x01
+ #define LLDP_RX_ONLY 0x02
+ #define LLDP_TX_RX 0x03
+ #define LLDP_DISABLED 0x04
+ uint8_t reserved1;
+ uint8_t tx_fast;
+ uint8_t tx_crd_max;
+ uint8_t tx_crd;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t admin_status;
+ #define LLDP_TX_ONLY 0x01
+ #define LLDP_RX_ONLY 0x02
+ #define LLDP_TX_RX 0x03
+ #define LLDP_DISABLED 0x04
+ uint8_t msg_tx_interval;
+ uint8_t msg_tx_hold;
+ uint8_t msg_fast_tx_interval;
+ uint8_t tx_crd;
+ uint8_t tx_crd_max;
+ uint8_t tx_fast;
+ uint8_t reserved1;
+#endif
+ #define REM_CHASSIS_ID_STAT_LEN 4
+ #define REM_PORT_ID_STAT_LEN 4
+ /* Holds remote Chassis ID TLV header, subtype and 9B of payload. */
+ uint32_t peer_chassis_id[REM_CHASSIS_ID_STAT_LEN];
+ /* Holds remote Port ID TLV header, subtype and 9B of payload. */
+ uint32_t peer_port_id[REM_PORT_ID_STAT_LEN];
+};
+
+struct lldp_dcbx_stat {
+ #define LOCAL_CHASSIS_ID_STAT_LEN 2
+ #define LOCAL_PORT_ID_STAT_LEN 2
+ /* Holds local Chassis ID 8B payload of constant subtype 4. */
+ uint32_t local_chassis_id[LOCAL_CHASSIS_ID_STAT_LEN];
+ /* Holds local Port ID 8B payload of constant subtype 3. */
+ uint32_t local_port_id[LOCAL_PORT_ID_STAT_LEN];
+ /* Number of DCBX frames transmitted. */
+ uint32_t num_tx_dcbx_pkts;
+ /* Number of DCBX frames received. */
+ uint32_t num_rx_dcbx_pkts;
+};
+
+/* ADMIN MIB - DCBX local machine default configuration. */
+struct lldp_admin_mib {
+ uint32_t ver_cfg_flags;
+ #define DCBX_ETS_CONFIG_TX_ENABLED 0x00000001
+ #define DCBX_PFC_CONFIG_TX_ENABLED 0x00000002
+ #define DCBX_APP_CONFIG_TX_ENABLED 0x00000004
+ #define DCBX_ETS_RECO_TX_ENABLED 0x00000008
+ #define DCBX_ETS_RECO_VALID 0x00000010
+ #define DCBX_ETS_WILLING 0x00000020
+ #define DCBX_PFC_WILLING 0x00000040
+ #define DCBX_APP_WILLING 0x00000080
+ #define DCBX_VERSION_CEE 0x00000100
+ #define DCBX_VERSION_IEEE 0x00000200
+ #define DCBX_DCBX_ENABLED 0x00000400
+ #define DCBX_CEE_VERSION_MASK 0x0000f000
+ #define DCBX_CEE_VERSION_SHIFT 12
+ #define DCBX_CEE_MAX_VERSION_MASK 0x000f0000
+ #define DCBX_CEE_MAX_VERSION_SHIFT 16
+ struct dcbx_features features;
+};
+
+/* REMOTE MIB - remote machine DCBX configuration. */
+struct lldp_remote_mib {
+ uint32_t prefix_seq_num;
+ uint32_t flags;
+ #define DCBX_ETS_TLV_RX 0x00000001
+ #define DCBX_PFC_TLV_RX 0x00000002
+ #define DCBX_APP_TLV_RX 0x00000004
+ #define DCBX_ETS_RX_ERROR 0x00000010
+ #define DCBX_PFC_RX_ERROR 0x00000020
+ #define DCBX_APP_RX_ERROR 0x00000040
+ #define DCBX_ETS_REM_WILLING 0x00000100
+ #define DCBX_PFC_REM_WILLING 0x00000200
+ #define DCBX_APP_REM_WILLING 0x00000400
+ #define DCBX_REMOTE_ETS_RECO_VALID 0x00001000
+ #define DCBX_REMOTE_MIB_VALID 0x00002000
+ struct dcbx_features features;
+ uint32_t suffix_seq_num;
+};
+
+/* LOCAL MIB - operational DCBX configuration - transmitted on Tx LLDPDU. */
+struct lldp_local_mib {
+ uint32_t prefix_seq_num;
+ /* Indicates if there is mismatch with negotiation results. */
+ uint32_t error;
+ #define DCBX_LOCAL_ETS_ERROR 0x00000001
+ #define DCBX_LOCAL_PFC_ERROR 0x00000002
+ #define DCBX_LOCAL_APP_ERROR 0x00000004
+ #define DCBX_LOCAL_PFC_MISMATCH 0x00000010
+ #define DCBX_LOCAL_APP_MISMATCH 0x00000020
+ #define DCBX_REMOTE_MIB_ERROR 0x00000040
+ #define DCBX_REMOTE_ETS_TLV_NOT_FOUND 0x00000080
+ #define DCBX_REMOTE_PFC_TLV_NOT_FOUND 0x00000100
+ #define DCBX_REMOTE_APP_TLV_NOT_FOUND 0x00000200
+ struct dcbx_features features;
+ uint32_t suffix_seq_num;
+};
+
+struct lldp_local_mib_ext {
+ uint32_t prefix_seq_num;
+ /* APP TLV extension - 16 more entries for negotiation results*/
+ struct dcbx_app_priority_entry app_pri_tbl_ext[DCBX_MAX_APP_PROTOCOL];
+ uint32_t suffix_seq_num;
+};
+/***END OF DCBX STRUCTURES DECLARATIONS***/
+
+/***********************************************************/
+/* Elink section */
+/***********************************************************/
+#define SHMEM_LINK_CONFIG_SIZE 2
+struct shmem_lfa {
+ uint32_t req_duplex;
+ #define REQ_DUPLEX_PHY0_MASK 0x0000ffff
+ #define REQ_DUPLEX_PHY0_SHIFT 0
+ #define REQ_DUPLEX_PHY1_MASK 0xffff0000
+ #define REQ_DUPLEX_PHY1_SHIFT 16
+ uint32_t req_flow_ctrl;
+ #define REQ_FLOW_CTRL_PHY0_MASK 0x0000ffff
+ #define REQ_FLOW_CTRL_PHY0_SHIFT 0
+ #define REQ_FLOW_CTRL_PHY1_MASK 0xffff0000
+ #define REQ_FLOW_CTRL_PHY1_SHIFT 16
+ uint32_t req_line_speed; /* Also determine AutoNeg */
+ #define REQ_LINE_SPD_PHY0_MASK 0x0000ffff
+ #define REQ_LINE_SPD_PHY0_SHIFT 0
+ #define REQ_LINE_SPD_PHY1_MASK 0xffff0000
+ #define REQ_LINE_SPD_PHY1_SHIFT 16
+ uint32_t speed_cap_mask[SHMEM_LINK_CONFIG_SIZE];
+ uint32_t additional_config;
+ #define REQ_FC_AUTO_ADV_MASK 0x0000ffff
+ #define REQ_FC_AUTO_ADV0_SHIFT 0
+ #define NO_LFA_DUE_TO_DCC_MASK 0x00010000
+ uint32_t lfa_sts;
+ #define LFA_LINK_FLAP_REASON_OFFSET 0
+ #define LFA_LINK_FLAP_REASON_MASK 0x000000ff
+ #define LFA_LINK_DOWN 0x1
+ #define LFA_LOOPBACK_ENABLED 0x2
+ #define LFA_DUPLEX_MISMATCH 0x3
+ #define LFA_MFW_IS_TOO_OLD 0x4
+ #define LFA_LINK_SPEED_MISMATCH 0x5
+ #define LFA_FLOW_CTRL_MISMATCH 0x6
+ #define LFA_SPEED_CAP_MISMATCH 0x7
+ #define LFA_DCC_LFA_DISABLED 0x8
+ #define LFA_EEE_MISMATCH 0x9
+
+ #define LINK_FLAP_AVOIDANCE_COUNT_OFFSET 8
+ #define LINK_FLAP_AVOIDANCE_COUNT_MASK 0x0000ff00
+
+ #define LINK_FLAP_COUNT_OFFSET 16
+ #define LINK_FLAP_COUNT_MASK 0x00ff0000
+
+ #define LFA_FLAGS_MASK 0xff000000
+ #define SHMEM_LFA_DONT_CLEAR_STAT (1<<24)
+
+};
+
+struct shmem2_region {
+
+ uint32_t size; /* 0x0000 */
+
+ uint32_t dcc_support; /* 0x0004 */
+ #define SHMEM_DCC_SUPPORT_NONE 0x00000000
+ #define SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV 0x00000001
+ #define SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV 0x00000004
+ #define SHMEM_DCC_SUPPORT_CHANGE_MAC_ADDRESS_TLV 0x00000008
+ #define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV 0x00000040
+ #define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV 0x00000080
+
+ uint32_t ext_phy_fw_version2[PORT_MAX]; /* 0x0008 */
+ /*
+ * For backwards compatibility, if the mf_cfg_addr does not exist
+ * (the size filed is smaller than 0xc) the mf_cfg resides at the
+ * end of struct shmem_region
+ */
+ uint32_t mf_cfg_addr; /* 0x0010 */
+ #define SHMEM_MF_CFG_ADDR_NONE 0x00000000
+
+ struct fw_flr_mb flr_mb; /* 0x0014 */
+ uint32_t dcbx_lldp_params_offset; /* 0x0028 */
+ #define SHMEM_LLDP_DCBX_PARAMS_NONE 0x00000000
+ uint32_t dcbx_neg_res_offset; /* 0x002c */
+ #define SHMEM_DCBX_NEG_RES_NONE 0x00000000
+ uint32_t dcbx_remote_mib_offset; /* 0x0030 */
+ #define SHMEM_DCBX_REMOTE_MIB_NONE 0x00000000
+ /*
+ * The other shmemX_base_addr holds the other path's shmem address
+ * required for example in case of common phy init, or for path1 to know
+ * the address of mcp debug trace which is located in offset from shmem
+ * of path0
+ */
+ uint32_t other_shmem_base_addr; /* 0x0034 */
+ uint32_t other_shmem2_base_addr; /* 0x0038 */
+ /*
+ * mcp_vf_disabled is set by the MCP to indicate the driver about VFs
+ * which were disabled/flred
+ */
+ uint32_t mcp_vf_disabled[E2_VF_MAX / 32]; /* 0x003c */
+
+ /*
+ * drv_ack_vf_disabled is set by the PF driver to ack handled disabled
+ * VFs
+ */
+ uint32_t drv_ack_vf_disabled[E2_FUNC_MAX][E2_VF_MAX / 32]; /* 0x0044 */
+
+ uint32_t dcbx_lldp_dcbx_stat_offset; /* 0x0064 */
+ #define SHMEM_LLDP_DCBX_STAT_NONE 0x00000000
+
+ /*
+ * edebug_driver_if field is used to transfer messages between edebug
+ * app to the driver through shmem2.
+ *
+ * message format:
+ * bits 0-2 - function number / instance of driver to perform request
+ * bits 3-5 - op code / is_ack?
+ * bits 6-63 - data
+ */
+ uint32_t edebug_driver_if[2]; /* 0x0068 */
+ #define EDEBUG_DRIVER_IF_OP_CODE_GET_PHYS_ADDR 1
+ #define EDEBUG_DRIVER_IF_OP_CODE_GET_BUS_ADDR 2
+ #define EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT 3
+
+ uint32_t nvm_retain_bitmap_addr; /* 0x0070 */
+
+ /* afex support of that driver */
+ uint32_t afex_driver_support; /* 0x0074 */
+ #define SHMEM_AFEX_VERSION_MASK 0x100f
+ #define SHMEM_AFEX_SUPPORTED_VERSION_ONE 0x1001
+ #define SHMEM_AFEX_REDUCED_DRV_LOADED 0x8000
+
+ /* driver receives addr in scratchpad to which it should respond */
+ uint32_t afex_scratchpad_addr_to_write[E2_FUNC_MAX];
+
+ /*
+ * generic params from MCP to driver (value depends on the msg sent
+ * to driver
+ */
+ uint32_t afex_param1_to_driver[E2_FUNC_MAX]; /* 0x0088 */
+ uint32_t afex_param2_to_driver[E2_FUNC_MAX]; /* 0x0098 */
+
+ uint32_t swim_base_addr; /* 0x0108 */
+ uint32_t swim_funcs;
+ uint32_t swim_main_cb;
+
+ /*
+ * bitmap notifying which VIF profiles stored in nvram are enabled by
+ * switch
+ */
+ uint32_t afex_profiles_enabled[2];
+
+ /* generic flags controlled by the driver */
+ uint32_t drv_flags;
+ #define DRV_FLAGS_DCB_CONFIGURED 0x0
+ #define DRV_FLAGS_DCB_CONFIGURATION_ABORTED 0x1
+ #define DRV_FLAGS_DCB_MFW_CONFIGURED 0x2
+
+ #define DRV_FLAGS_PORT_MASK ((1 << DRV_FLAGS_DCB_CONFIGURED) | \
+ (1 << DRV_FLAGS_DCB_CONFIGURATION_ABORTED) | \
+ (1 << DRV_FLAGS_DCB_MFW_CONFIGURED))
+ /* Port offset*/
+ #define DRV_FLAGS_P0_OFFSET 0
+ #define DRV_FLAGS_P1_OFFSET 16
+ #define DRV_FLAGS_GET_PORT_OFFSET(_port) ((0 == _port) ? \
+ DRV_FLAGS_P0_OFFSET : \
+ DRV_FLAGS_P1_OFFSET)
+
+ #define DRV_FLAGS_GET_PORT_MASK(_port) (DRV_FLAGS_PORT_MASK << \
+ DRV_FLAGS_GET_PORT_OFFSET(_port))
+
+ #define DRV_FLAGS_FILED_BY_PORT(_field_bit, _port) (1 << ( \
+ (_field_bit) + DRV_FLAGS_GET_PORT_OFFSET(_port)))
+
+ /* pointer to extended dev_info shared data copied from nvm image */
+ uint32_t extended_dev_info_shared_addr;
+ uint32_t ncsi_oem_data_addr;
+
+ uint32_t sensor_data_addr;
+ uint32_t buffer_block_addr;
+ uint32_t sensor_data_req_update_interval;
+ uint32_t temperature_in_half_celsius;
+ uint32_t glob_struct_in_host;
+
+ uint32_t dcbx_neg_res_ext_offset;
+ #define SHMEM_DCBX_NEG_RES_EXT_NONE 0x00000000
+
+ uint32_t drv_capabilities_flag[E2_FUNC_MAX];
+ #define DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED 0x00000001
+ #define DRV_FLAGS_CAPABILITIES_LOADED_L2 0x00000002
+ #define DRV_FLAGS_CAPABILITIES_LOADED_FCOE 0x00000004
+ #define DRV_FLAGS_CAPABILITIES_LOADED_ISCSI 0x00000008
+
+ uint32_t extended_dev_info_shared_cfg_size;
+
+ uint32_t dcbx_en[PORT_MAX];
+
+ /* The offset points to the multi threaded meta structure */
+ uint32_t multi_thread_data_offset;
+
+ /* address of DMAable host address holding values from the drivers */
+ uint32_t drv_info_host_addr_lo;
+ uint32_t drv_info_host_addr_hi;
+
+ /* general values written by the MFW (such as current version) */
+ uint32_t drv_info_control;
+ #define DRV_INFO_CONTROL_VER_MASK 0x000000ff
+ #define DRV_INFO_CONTROL_VER_SHIFT 0
+ #define DRV_INFO_CONTROL_OP_CODE_MASK 0x0000ff00
+ #define DRV_INFO_CONTROL_OP_CODE_SHIFT 8
+ uint32_t ibft_host_addr; /* initialized by option ROM */
+
+ struct eee_remote_vals eee_remote_vals[PORT_MAX];
+ uint32_t pf_allocation[E2_FUNC_MAX];
+ #define PF_ALLOACTION_MSIX_VECTORS_MASK 0x000000ff /* real value, as PCI config space can show only maximum of 64 vectors */
+ #define PF_ALLOACTION_MSIX_VECTORS_SHIFT 0
+
+ /* the status of EEE auto-negotiation
+ * bits 15:0 the configured tx-lpi entry timer value. Depends on bit 31.
+ * bits 19:16 the supported modes for EEE.
+ * bits 23:20 the speeds advertised for EEE.
+ * bits 27:24 the speeds the Link partner advertised for EEE.
+ * The supported/adv. modes in bits 27:19 originate from the
+ * SHMEM_EEE_XXX_ADV definitions (where XXX is replaced by speed).
+ * bit 28 when 1'b1 EEE was requested.
+ * bit 29 when 1'b1 tx lpi was requested.
+ * bit 30 when 1'b1 EEE was negotiated. Tx lpi will be asserted iff
+ * 30:29 are 2'b11.
+ * bit 31 when 1'b0 bits 15:0 contain a PORT_FEAT_CFG_EEE_ define as
+ * value. When 1'b1 those bits contains a value times 16 microseconds.
+ */
+ uint32_t eee_status[PORT_MAX];
+ #define SHMEM_EEE_TIMER_MASK 0x0000ffff
+ #define SHMEM_EEE_SUPPORTED_MASK 0x000f0000
+ #define SHMEM_EEE_SUPPORTED_SHIFT 16
+ #define SHMEM_EEE_ADV_STATUS_MASK 0x00f00000
+ #define SHMEM_EEE_100M_ADV (1<<0)
+ #define SHMEM_EEE_1G_ADV (1U<<1)
+ #define SHMEM_EEE_10G_ADV (1<<2)
+ #define SHMEM_EEE_ADV_STATUS_SHIFT 20
+ #define SHMEM_EEE_LP_ADV_STATUS_MASK 0x0f000000
+ #define SHMEM_EEE_LP_ADV_STATUS_SHIFT 24
+ #define SHMEM_EEE_REQUESTED_BIT 0x10000000
+ #define SHMEM_EEE_LPI_REQUESTED_BIT 0x20000000
+ #define SHMEM_EEE_ACTIVE_BIT 0x40000000
+ #define SHMEM_EEE_TIME_OUTPUT_BIT 0x80000000
+
+ uint32_t sizeof_port_stats;
+
+ /* Link Flap Avoidance */
+ uint32_t lfa_host_addr[PORT_MAX];
+
+ /* External PHY temperature in deg C. */
+ uint32_t extphy_temps_in_celsius;
+ #define EXTPHY1_TEMP_MASK 0x0000ffff
+ #define EXTPHY1_TEMP_SHIFT 0
+
+ uint32_t ocdata_info_addr; /* Offset 0x148 */
+ uint32_t drv_func_info_addr; /* Offset 0x14C */
+ uint32_t drv_func_info_size; /* Offset 0x150 */
+ uint32_t link_attr_sync[PORT_MAX]; /* Offset 0x154 */
+ #define LINK_ATTR_SYNC_KR2_ENABLE (1<<0)
+};
+
+
+struct emac_stats {
+ uint32_t rx_stat_ifhcinoctets;
+ uint32_t rx_stat_ifhcinbadoctets;
+ uint32_t rx_stat_etherstatsfragments;
+ uint32_t rx_stat_ifhcinucastpkts;
+ uint32_t rx_stat_ifhcinmulticastpkts;
+ uint32_t rx_stat_ifhcinbroadcastpkts;
+ uint32_t rx_stat_dot3statsfcserrors;
+ uint32_t rx_stat_dot3statsalignmenterrors;
+ uint32_t rx_stat_dot3statscarriersenseerrors;
+ uint32_t rx_stat_xonpauseframesreceived;
+ uint32_t rx_stat_xoffpauseframesreceived;
+ uint32_t rx_stat_maccontrolframesreceived;
+ uint32_t rx_stat_xoffstateentered;
+ uint32_t rx_stat_dot3statsframestoolong;
+ uint32_t rx_stat_etherstatsjabbers;
+ uint32_t rx_stat_etherstatsundersizepkts;
+ uint32_t rx_stat_etherstatspkts64octets;
+ uint32_t rx_stat_etherstatspkts65octetsto127octets;
+ uint32_t rx_stat_etherstatspkts128octetsto255octets;
+ uint32_t rx_stat_etherstatspkts256octetsto511octets;
+ uint32_t rx_stat_etherstatspkts512octetsto1023octets;
+ uint32_t rx_stat_etherstatspkts1024octetsto1522octets;
+ uint32_t rx_stat_etherstatspktsover1522octets;
+
+ uint32_t rx_stat_falsecarriererrors;
+
+ uint32_t tx_stat_ifhcoutoctets;
+ uint32_t tx_stat_ifhcoutbadoctets;
+ uint32_t tx_stat_etherstatscollisions;
+ uint32_t tx_stat_outxonsent;
+ uint32_t tx_stat_outxoffsent;
+ uint32_t tx_stat_flowcontroldone;
+ uint32_t tx_stat_dot3statssinglecollisionframes;
+ uint32_t tx_stat_dot3statsmultiplecollisionframes;
+ uint32_t tx_stat_dot3statsdeferredtransmissions;
+ uint32_t tx_stat_dot3statsexcessivecollisions;
+ uint32_t tx_stat_dot3statslatecollisions;
+ uint32_t tx_stat_ifhcoutucastpkts;
+ uint32_t tx_stat_ifhcoutmulticastpkts;
+ uint32_t tx_stat_ifhcoutbroadcastpkts;
+ uint32_t tx_stat_etherstatspkts64octets;
+ uint32_t tx_stat_etherstatspkts65octetsto127octets;
+ uint32_t tx_stat_etherstatspkts128octetsto255octets;
+ uint32_t tx_stat_etherstatspkts256octetsto511octets;
+ uint32_t tx_stat_etherstatspkts512octetsto1023octets;
+ uint32_t tx_stat_etherstatspkts1024octetsto1522octets;
+ uint32_t tx_stat_etherstatspktsover1522octets;
+ uint32_t tx_stat_dot3statsinternalmactransmiterrors;
+};
+
+
+struct bmac1_stats {
+ uint32_t tx_stat_gtpkt_lo;
+ uint32_t tx_stat_gtpkt_hi;
+ uint32_t tx_stat_gtxpf_lo;
+ uint32_t tx_stat_gtxpf_hi;
+ uint32_t tx_stat_gtfcs_lo;
+ uint32_t tx_stat_gtfcs_hi;
+ uint32_t tx_stat_gtmca_lo;
+ uint32_t tx_stat_gtmca_hi;
+ uint32_t tx_stat_gtbca_lo;
+ uint32_t tx_stat_gtbca_hi;
+ uint32_t tx_stat_gtfrg_lo;
+ uint32_t tx_stat_gtfrg_hi;
+ uint32_t tx_stat_gtovr_lo;
+ uint32_t tx_stat_gtovr_hi;
+ uint32_t tx_stat_gt64_lo;
+ uint32_t tx_stat_gt64_hi;
+ uint32_t tx_stat_gt127_lo;
+ uint32_t tx_stat_gt127_hi;
+ uint32_t tx_stat_gt255_lo;
+ uint32_t tx_stat_gt255_hi;
+ uint32_t tx_stat_gt511_lo;
+ uint32_t tx_stat_gt511_hi;
+ uint32_t tx_stat_gt1023_lo;
+ uint32_t tx_stat_gt1023_hi;
+ uint32_t tx_stat_gt1518_lo;
+ uint32_t tx_stat_gt1518_hi;
+ uint32_t tx_stat_gt2047_lo;
+ uint32_t tx_stat_gt2047_hi;
+ uint32_t tx_stat_gt4095_lo;
+ uint32_t tx_stat_gt4095_hi;
+ uint32_t tx_stat_gt9216_lo;
+ uint32_t tx_stat_gt9216_hi;
+ uint32_t tx_stat_gt16383_lo;
+ uint32_t tx_stat_gt16383_hi;
+ uint32_t tx_stat_gtmax_lo;
+ uint32_t tx_stat_gtmax_hi;
+ uint32_t tx_stat_gtufl_lo;
+ uint32_t tx_stat_gtufl_hi;
+ uint32_t tx_stat_gterr_lo;
+ uint32_t tx_stat_gterr_hi;
+ uint32_t tx_stat_gtbyt_lo;
+ uint32_t tx_stat_gtbyt_hi;
+
+ uint32_t rx_stat_gr64_lo;
+ uint32_t rx_stat_gr64_hi;
+ uint32_t rx_stat_gr127_lo;
+ uint32_t rx_stat_gr127_hi;
+ uint32_t rx_stat_gr255_lo;
+ uint32_t rx_stat_gr255_hi;
+ uint32_t rx_stat_gr511_lo;
+ uint32_t rx_stat_gr511_hi;
+ uint32_t rx_stat_gr1023_lo;
+ uint32_t rx_stat_gr1023_hi;
+ uint32_t rx_stat_gr1518_lo;
+ uint32_t rx_stat_gr1518_hi;
+ uint32_t rx_stat_gr2047_lo;
+ uint32_t rx_stat_gr2047_hi;
+ uint32_t rx_stat_gr4095_lo;
+ uint32_t rx_stat_gr4095_hi;
+ uint32_t rx_stat_gr9216_lo;
+ uint32_t rx_stat_gr9216_hi;
+ uint32_t rx_stat_gr16383_lo;
+ uint32_t rx_stat_gr16383_hi;
+ uint32_t rx_stat_grmax_lo;
+ uint32_t rx_stat_grmax_hi;
+ uint32_t rx_stat_grpkt_lo;
+ uint32_t rx_stat_grpkt_hi;
+ uint32_t rx_stat_grfcs_lo;
+ uint32_t rx_stat_grfcs_hi;
+ uint32_t rx_stat_grmca_lo;
+ uint32_t rx_stat_grmca_hi;
+ uint32_t rx_stat_grbca_lo;
+ uint32_t rx_stat_grbca_hi;
+ uint32_t rx_stat_grxcf_lo;
+ uint32_t rx_stat_grxcf_hi;
+ uint32_t rx_stat_grxpf_lo;
+ uint32_t rx_stat_grxpf_hi;
+ uint32_t rx_stat_grxuo_lo;
+ uint32_t rx_stat_grxuo_hi;
+ uint32_t rx_stat_grjbr_lo;
+ uint32_t rx_stat_grjbr_hi;
+ uint32_t rx_stat_grovr_lo;
+ uint32_t rx_stat_grovr_hi;
+ uint32_t rx_stat_grflr_lo;
+ uint32_t rx_stat_grflr_hi;
+ uint32_t rx_stat_grmeg_lo;
+ uint32_t rx_stat_grmeg_hi;
+ uint32_t rx_stat_grmeb_lo;
+ uint32_t rx_stat_grmeb_hi;
+ uint32_t rx_stat_grbyt_lo;
+ uint32_t rx_stat_grbyt_hi;
+ uint32_t rx_stat_grund_lo;
+ uint32_t rx_stat_grund_hi;
+ uint32_t rx_stat_grfrg_lo;
+ uint32_t rx_stat_grfrg_hi;
+ uint32_t rx_stat_grerb_lo;
+ uint32_t rx_stat_grerb_hi;
+ uint32_t rx_stat_grfre_lo;
+ uint32_t rx_stat_grfre_hi;
+ uint32_t rx_stat_gripj_lo;
+ uint32_t rx_stat_gripj_hi;
+};
+
+struct bmac2_stats {
+ uint32_t tx_stat_gtpk_lo; /* gtpok */
+ uint32_t tx_stat_gtpk_hi; /* gtpok */
+ uint32_t tx_stat_gtxpf_lo; /* gtpf */
+ uint32_t tx_stat_gtxpf_hi; /* gtpf */
+ uint32_t tx_stat_gtpp_lo; /* NEW BMAC2 */
+ uint32_t tx_stat_gtpp_hi; /* NEW BMAC2 */
+ uint32_t tx_stat_gtfcs_lo;
+ uint32_t tx_stat_gtfcs_hi;
+ uint32_t tx_stat_gtuca_lo; /* NEW BMAC2 */
+ uint32_t tx_stat_gtuca_hi; /* NEW BMAC2 */
+ uint32_t tx_stat_gtmca_lo;
+ uint32_t tx_stat_gtmca_hi;
+ uint32_t tx_stat_gtbca_lo;
+ uint32_t tx_stat_gtbca_hi;
+ uint32_t tx_stat_gtovr_lo;
+ uint32_t tx_stat_gtovr_hi;
+ uint32_t tx_stat_gtfrg_lo;
+ uint32_t tx_stat_gtfrg_hi;
+ uint32_t tx_stat_gtpkt1_lo; /* gtpkt */
+ uint32_t tx_stat_gtpkt1_hi; /* gtpkt */
+ uint32_t tx_stat_gt64_lo;
+ uint32_t tx_stat_gt64_hi;
+ uint32_t tx_stat_gt127_lo;
+ uint32_t tx_stat_gt127_hi;
+ uint32_t tx_stat_gt255_lo;
+ uint32_t tx_stat_gt255_hi;
+ uint32_t tx_stat_gt511_lo;
+ uint32_t tx_stat_gt511_hi;
+ uint32_t tx_stat_gt1023_lo;
+ uint32_t tx_stat_gt1023_hi;
+ uint32_t tx_stat_gt1518_lo;
+ uint32_t tx_stat_gt1518_hi;
+ uint32_t tx_stat_gt2047_lo;
+ uint32_t tx_stat_gt2047_hi;
+ uint32_t tx_stat_gt4095_lo;
+ uint32_t tx_stat_gt4095_hi;
+ uint32_t tx_stat_gt9216_lo;
+ uint32_t tx_stat_gt9216_hi;
+ uint32_t tx_stat_gt16383_lo;
+ uint32_t tx_stat_gt16383_hi;
+ uint32_t tx_stat_gtmax_lo;
+ uint32_t tx_stat_gtmax_hi;
+ uint32_t tx_stat_gtufl_lo;
+ uint32_t tx_stat_gtufl_hi;
+ uint32_t tx_stat_gterr_lo;
+ uint32_t tx_stat_gterr_hi;
+ uint32_t tx_stat_gtbyt_lo;
+ uint32_t tx_stat_gtbyt_hi;
+
+ uint32_t rx_stat_gr64_lo;
+ uint32_t rx_stat_gr64_hi;
+ uint32_t rx_stat_gr127_lo;
+ uint32_t rx_stat_gr127_hi;
+ uint32_t rx_stat_gr255_lo;
+ uint32_t rx_stat_gr255_hi;
+ uint32_t rx_stat_gr511_lo;
+ uint32_t rx_stat_gr511_hi;
+ uint32_t rx_stat_gr1023_lo;
+ uint32_t rx_stat_gr1023_hi;
+ uint32_t rx_stat_gr1518_lo;
+ uint32_t rx_stat_gr1518_hi;
+ uint32_t rx_stat_gr2047_lo;
+ uint32_t rx_stat_gr2047_hi;
+ uint32_t rx_stat_gr4095_lo;
+ uint32_t rx_stat_gr4095_hi;
+ uint32_t rx_stat_gr9216_lo;
+ uint32_t rx_stat_gr9216_hi;
+ uint32_t rx_stat_gr16383_lo;
+ uint32_t rx_stat_gr16383_hi;
+ uint32_t rx_stat_grmax_lo;
+ uint32_t rx_stat_grmax_hi;
+ uint32_t rx_stat_grpkt_lo;
+ uint32_t rx_stat_grpkt_hi;
+ uint32_t rx_stat_grfcs_lo;
+ uint32_t rx_stat_grfcs_hi;
+ uint32_t rx_stat_gruca_lo;
+ uint32_t rx_stat_gruca_hi;
+ uint32_t rx_stat_grmca_lo;
+ uint32_t rx_stat_grmca_hi;
+ uint32_t rx_stat_grbca_lo;
+ uint32_t rx_stat_grbca_hi;
+ uint32_t rx_stat_grxpf_lo; /* grpf */
+ uint32_t rx_stat_grxpf_hi; /* grpf */
+ uint32_t rx_stat_grpp_lo;
+ uint32_t rx_stat_grpp_hi;
+ uint32_t rx_stat_grxuo_lo; /* gruo */
+ uint32_t rx_stat_grxuo_hi; /* gruo */
+ uint32_t rx_stat_grjbr_lo;
+ uint32_t rx_stat_grjbr_hi;
+ uint32_t rx_stat_grovr_lo;
+ uint32_t rx_stat_grovr_hi;
+ uint32_t rx_stat_grxcf_lo; /* grcf */
+ uint32_t rx_stat_grxcf_hi; /* grcf */
+ uint32_t rx_stat_grflr_lo;
+ uint32_t rx_stat_grflr_hi;
+ uint32_t rx_stat_grpok_lo;
+ uint32_t rx_stat_grpok_hi;
+ uint32_t rx_stat_grmeg_lo;
+ uint32_t rx_stat_grmeg_hi;
+ uint32_t rx_stat_grmeb_lo;
+ uint32_t rx_stat_grmeb_hi;
+ uint32_t rx_stat_grbyt_lo;
+ uint32_t rx_stat_grbyt_hi;
+ uint32_t rx_stat_grund_lo;
+ uint32_t rx_stat_grund_hi;
+ uint32_t rx_stat_grfrg_lo;
+ uint32_t rx_stat_grfrg_hi;
+ uint32_t rx_stat_grerb_lo; /* grerrbyt */
+ uint32_t rx_stat_grerb_hi; /* grerrbyt */
+ uint32_t rx_stat_grfre_lo; /* grfrerr */
+ uint32_t rx_stat_grfre_hi; /* grfrerr */
+ uint32_t rx_stat_gripj_lo;
+ uint32_t rx_stat_gripj_hi;
+};
+
+struct mstat_stats {
+ struct {
+ /* OTE MSTAT on E3 has a bug where this register's contents are
+ * actually tx_gtxpok + tx_gtxpf + (possibly)tx_gtxpp
+ */
+ uint32_t tx_gtxpok_lo;
+ uint32_t tx_gtxpok_hi;
+ uint32_t tx_gtxpf_lo;
+ uint32_t tx_gtxpf_hi;
+ uint32_t tx_gtxpp_lo;
+ uint32_t tx_gtxpp_hi;
+ uint32_t tx_gtfcs_lo;
+ uint32_t tx_gtfcs_hi;
+ uint32_t tx_gtuca_lo;
+ uint32_t tx_gtuca_hi;
+ uint32_t tx_gtmca_lo;
+ uint32_t tx_gtmca_hi;
+ uint32_t tx_gtgca_lo;
+ uint32_t tx_gtgca_hi;
+ uint32_t tx_gtpkt_lo;
+ uint32_t tx_gtpkt_hi;
+ uint32_t tx_gt64_lo;
+ uint32_t tx_gt64_hi;
+ uint32_t tx_gt127_lo;
+ uint32_t tx_gt127_hi;
+ uint32_t tx_gt255_lo;
+ uint32_t tx_gt255_hi;
+ uint32_t tx_gt511_lo;
+ uint32_t tx_gt511_hi;
+ uint32_t tx_gt1023_lo;
+ uint32_t tx_gt1023_hi;
+ uint32_t tx_gt1518_lo;
+ uint32_t tx_gt1518_hi;
+ uint32_t tx_gt2047_lo;
+ uint32_t tx_gt2047_hi;
+ uint32_t tx_gt4095_lo;
+ uint32_t tx_gt4095_hi;
+ uint32_t tx_gt9216_lo;
+ uint32_t tx_gt9216_hi;
+ uint32_t tx_gt16383_lo;
+ uint32_t tx_gt16383_hi;
+ uint32_t tx_gtufl_lo;
+ uint32_t tx_gtufl_hi;
+ uint32_t tx_gterr_lo;
+ uint32_t tx_gterr_hi;
+ uint32_t tx_gtbyt_lo;
+ uint32_t tx_gtbyt_hi;
+ uint32_t tx_collisions_lo;
+ uint32_t tx_collisions_hi;
+ uint32_t tx_singlecollision_lo;
+ uint32_t tx_singlecollision_hi;
+ uint32_t tx_multiplecollisions_lo;
+ uint32_t tx_multiplecollisions_hi;
+ uint32_t tx_deferred_lo;
+ uint32_t tx_deferred_hi;
+ uint32_t tx_excessivecollisions_lo;
+ uint32_t tx_excessivecollisions_hi;
+ uint32_t tx_latecollisions_lo;
+ uint32_t tx_latecollisions_hi;
+ } stats_tx;
+
+ struct {
+ uint32_t rx_gr64_lo;
+ uint32_t rx_gr64_hi;
+ uint32_t rx_gr127_lo;
+ uint32_t rx_gr127_hi;
+ uint32_t rx_gr255_lo;
+ uint32_t rx_gr255_hi;
+ uint32_t rx_gr511_lo;
+ uint32_t rx_gr511_hi;
+ uint32_t rx_gr1023_lo;
+ uint32_t rx_gr1023_hi;
+ uint32_t rx_gr1518_lo;
+ uint32_t rx_gr1518_hi;
+ uint32_t rx_gr2047_lo;
+ uint32_t rx_gr2047_hi;
+ uint32_t rx_gr4095_lo;
+ uint32_t rx_gr4095_hi;
+ uint32_t rx_gr9216_lo;
+ uint32_t rx_gr9216_hi;
+ uint32_t rx_gr16383_lo;
+ uint32_t rx_gr16383_hi;
+ uint32_t rx_grpkt_lo;
+ uint32_t rx_grpkt_hi;
+ uint32_t rx_grfcs_lo;
+ uint32_t rx_grfcs_hi;
+ uint32_t rx_gruca_lo;
+ uint32_t rx_gruca_hi;
+ uint32_t rx_grmca_lo;
+ uint32_t rx_grmca_hi;
+ uint32_t rx_grbca_lo;
+ uint32_t rx_grbca_hi;
+ uint32_t rx_grxpf_lo;
+ uint32_t rx_grxpf_hi;
+ uint32_t rx_grxpp_lo;
+ uint32_t rx_grxpp_hi;
+ uint32_t rx_grxuo_lo;
+ uint32_t rx_grxuo_hi;
+ uint32_t rx_grovr_lo;
+ uint32_t rx_grovr_hi;
+ uint32_t rx_grxcf_lo;
+ uint32_t rx_grxcf_hi;
+ uint32_t rx_grflr_lo;
+ uint32_t rx_grflr_hi;
+ uint32_t rx_grpok_lo;
+ uint32_t rx_grpok_hi;
+ uint32_t rx_grbyt_lo;
+ uint32_t rx_grbyt_hi;
+ uint32_t rx_grund_lo;
+ uint32_t rx_grund_hi;
+ uint32_t rx_grfrg_lo;
+ uint32_t rx_grfrg_hi;
+ uint32_t rx_grerb_lo;
+ uint32_t rx_grerb_hi;
+ uint32_t rx_grfre_lo;
+ uint32_t rx_grfre_hi;
+
+ uint32_t rx_alignmenterrors_lo;
+ uint32_t rx_alignmenterrors_hi;
+ uint32_t rx_falsecarrier_lo;
+ uint32_t rx_falsecarrier_hi;
+ uint32_t rx_llfcmsgcnt_lo;
+ uint32_t rx_llfcmsgcnt_hi;
+ } stats_rx;
+};
+
+union mac_stats {
+ struct emac_stats emac_stats;
+ struct bmac1_stats bmac1_stats;
+ struct bmac2_stats bmac2_stats;
+ struct mstat_stats mstat_stats;
+};
+
+
+struct mac_stx {
+ /* in_bad_octets */
+ uint32_t rx_stat_ifhcinbadoctets_hi;
+ uint32_t rx_stat_ifhcinbadoctets_lo;
+
+ /* out_bad_octets */
+ uint32_t tx_stat_ifhcoutbadoctets_hi;
+ uint32_t tx_stat_ifhcoutbadoctets_lo;
+
+ /* crc_receive_errors */
+ uint32_t rx_stat_dot3statsfcserrors_hi;
+ uint32_t rx_stat_dot3statsfcserrors_lo;
+ /* alignment_errors */
+ uint32_t rx_stat_dot3statsalignmenterrors_hi;
+ uint32_t rx_stat_dot3statsalignmenterrors_lo;
+ /* carrier_sense_errors */
+ uint32_t rx_stat_dot3statscarriersenseerrors_hi;
+ uint32_t rx_stat_dot3statscarriersenseerrors_lo;
+ /* false_carrier_detections */
+ uint32_t rx_stat_falsecarriererrors_hi;
+ uint32_t rx_stat_falsecarriererrors_lo;
+
+ /* runt_packets_received */
+ uint32_t rx_stat_etherstatsundersizepkts_hi;
+ uint32_t rx_stat_etherstatsundersizepkts_lo;
+ /* jabber_packets_received */
+ uint32_t rx_stat_dot3statsframestoolong_hi;
+ uint32_t rx_stat_dot3statsframestoolong_lo;
+
+ /* error_runt_packets_received */
+ uint32_t rx_stat_etherstatsfragments_hi;
+ uint32_t rx_stat_etherstatsfragments_lo;
+ /* error_jabber_packets_received */
+ uint32_t rx_stat_etherstatsjabbers_hi;
+ uint32_t rx_stat_etherstatsjabbers_lo;
+
+ /* control_frames_received */
+ uint32_t rx_stat_maccontrolframesreceived_hi;
+ uint32_t rx_stat_maccontrolframesreceived_lo;
+ uint32_t rx_stat_mac_xpf_hi;
+ uint32_t rx_stat_mac_xpf_lo;
+ uint32_t rx_stat_mac_xcf_hi;
+ uint32_t rx_stat_mac_xcf_lo;
+
+ /* xoff_state_entered */
+ uint32_t rx_stat_xoffstateentered_hi;
+ uint32_t rx_stat_xoffstateentered_lo;
+ /* pause_xon_frames_received */
+ uint32_t rx_stat_xonpauseframesreceived_hi;
+ uint32_t rx_stat_xonpauseframesreceived_lo;
+ /* pause_xoff_frames_received */
+ uint32_t rx_stat_xoffpauseframesreceived_hi;
+ uint32_t rx_stat_xoffpauseframesreceived_lo;
+ /* pause_xon_frames_transmitted */
+ uint32_t tx_stat_outxonsent_hi;
+ uint32_t tx_stat_outxonsent_lo;
+ /* pause_xoff_frames_transmitted */
+ uint32_t tx_stat_outxoffsent_hi;
+ uint32_t tx_stat_outxoffsent_lo;
+ /* flow_control_done */
+ uint32_t tx_stat_flowcontroldone_hi;
+ uint32_t tx_stat_flowcontroldone_lo;
+
+ /* ether_stats_collisions */
+ uint32_t tx_stat_etherstatscollisions_hi;
+ uint32_t tx_stat_etherstatscollisions_lo;
+ /* single_collision_transmit_frames */
+ uint32_t tx_stat_dot3statssinglecollisionframes_hi;
+ uint32_t tx_stat_dot3statssinglecollisionframes_lo;
+ /* multiple_collision_transmit_frames */
+ uint32_t tx_stat_dot3statsmultiplecollisionframes_hi;
+ uint32_t tx_stat_dot3statsmultiplecollisionframes_lo;
+ /* deferred_transmissions */
+ uint32_t tx_stat_dot3statsdeferredtransmissions_hi;
+ uint32_t tx_stat_dot3statsdeferredtransmissions_lo;
+ /* excessive_collision_frames */
+ uint32_t tx_stat_dot3statsexcessivecollisions_hi;
+ uint32_t tx_stat_dot3statsexcessivecollisions_lo;
+ /* late_collision_frames */
+ uint32_t tx_stat_dot3statslatecollisions_hi;
+ uint32_t tx_stat_dot3statslatecollisions_lo;
+
+ /* frames_transmitted_64_bytes */
+ uint32_t tx_stat_etherstatspkts64octets_hi;
+ uint32_t tx_stat_etherstatspkts64octets_lo;
+ /* frames_transmitted_65_127_bytes */
+ uint32_t tx_stat_etherstatspkts65octetsto127octets_hi;
+ uint32_t tx_stat_etherstatspkts65octetsto127octets_lo;
+ /* frames_transmitted_128_255_bytes */
+ uint32_t tx_stat_etherstatspkts128octetsto255octets_hi;
+ uint32_t tx_stat_etherstatspkts128octetsto255octets_lo;
+ /* frames_transmitted_256_511_bytes */
+ uint32_t tx_stat_etherstatspkts256octetsto511octets_hi;
+ uint32_t tx_stat_etherstatspkts256octetsto511octets_lo;
+ /* frames_transmitted_512_1023_bytes */
+ uint32_t tx_stat_etherstatspkts512octetsto1023octets_hi;
+ uint32_t tx_stat_etherstatspkts512octetsto1023octets_lo;
+ /* frames_transmitted_1024_1522_bytes */
+ uint32_t tx_stat_etherstatspkts1024octetsto1522octets_hi;
+ uint32_t tx_stat_etherstatspkts1024octetsto1522octets_lo;
+ /* frames_transmitted_1523_9022_bytes */
+ uint32_t tx_stat_etherstatspktsover1522octets_hi;
+ uint32_t tx_stat_etherstatspktsover1522octets_lo;
+ uint32_t tx_stat_mac_2047_hi;
+ uint32_t tx_stat_mac_2047_lo;
+ uint32_t tx_stat_mac_4095_hi;
+ uint32_t tx_stat_mac_4095_lo;
+ uint32_t tx_stat_mac_9216_hi;
+ uint32_t tx_stat_mac_9216_lo;
+ uint32_t tx_stat_mac_16383_hi;
+ uint32_t tx_stat_mac_16383_lo;
+
+ /* internal_mac_transmit_errors */
+ uint32_t tx_stat_dot3statsinternalmactransmiterrors_hi;
+ uint32_t tx_stat_dot3statsinternalmactransmiterrors_lo;
+
+ /* if_out_discards */
+ uint32_t tx_stat_mac_ufl_hi;
+ uint32_t tx_stat_mac_ufl_lo;
+};
+
+
+#define MAC_STX_IDX_MAX 2
+
+struct host_port_stats {
+ uint32_t host_port_stats_counter;
+
+ struct mac_stx mac_stx[MAC_STX_IDX_MAX];
+
+ uint32_t brb_drop_hi;
+ uint32_t brb_drop_lo;
+
+ uint32_t not_used; /* obsolete as of MFW 7.2.1 */
+
+ uint32_t pfc_frames_tx_hi;
+ uint32_t pfc_frames_tx_lo;
+ uint32_t pfc_frames_rx_hi;
+ uint32_t pfc_frames_rx_lo;
+
+ uint32_t eee_lpi_count_hi;
+ uint32_t eee_lpi_count_lo;
+};
+
+
+struct host_func_stats {
+ uint32_t host_func_stats_start;
+
+ uint32_t total_bytes_received_hi;
+ uint32_t total_bytes_received_lo;
+
+ uint32_t total_bytes_transmitted_hi;
+ uint32_t total_bytes_transmitted_lo;
+
+ uint32_t total_unicast_packets_received_hi;
+ uint32_t total_unicast_packets_received_lo;
+
+ uint32_t total_multicast_packets_received_hi;
+ uint32_t total_multicast_packets_received_lo;
+
+ uint32_t total_broadcast_packets_received_hi;
+ uint32_t total_broadcast_packets_received_lo;
+
+ uint32_t total_unicast_packets_transmitted_hi;
+ uint32_t total_unicast_packets_transmitted_lo;
+
+ uint32_t total_multicast_packets_transmitted_hi;
+ uint32_t total_multicast_packets_transmitted_lo;
+
+ uint32_t total_broadcast_packets_transmitted_hi;
+ uint32_t total_broadcast_packets_transmitted_lo;
+
+ uint32_t valid_bytes_received_hi;
+ uint32_t valid_bytes_received_lo;
+
+ uint32_t host_func_stats_end;
+};
+
+/* VIC definitions */
+#define VICSTATST_UIF_INDEX 2
+
+/*
+ * stats collected for afex.
+ * NOTE: structure is exactly as expected to be received by the switch.
+ * order must remain exactly as is unless protocol changes !
+ */
+struct afex_stats {
+ uint32_t tx_unicast_frames_hi;
+ uint32_t tx_unicast_frames_lo;
+ uint32_t tx_unicast_bytes_hi;
+ uint32_t tx_unicast_bytes_lo;
+ uint32_t tx_multicast_frames_hi;
+ uint32_t tx_multicast_frames_lo;
+ uint32_t tx_multicast_bytes_hi;
+ uint32_t tx_multicast_bytes_lo;
+ uint32_t tx_broadcast_frames_hi;
+ uint32_t tx_broadcast_frames_lo;
+ uint32_t tx_broadcast_bytes_hi;
+ uint32_t tx_broadcast_bytes_lo;
+ uint32_t tx_frames_discarded_hi;
+ uint32_t tx_frames_discarded_lo;
+ uint32_t tx_frames_dropped_hi;
+ uint32_t tx_frames_dropped_lo;
+
+ uint32_t rx_unicast_frames_hi;
+ uint32_t rx_unicast_frames_lo;
+ uint32_t rx_unicast_bytes_hi;
+ uint32_t rx_unicast_bytes_lo;
+ uint32_t rx_multicast_frames_hi;
+ uint32_t rx_multicast_frames_lo;
+ uint32_t rx_multicast_bytes_hi;
+ uint32_t rx_multicast_bytes_lo;
+ uint32_t rx_broadcast_frames_hi;
+ uint32_t rx_broadcast_frames_lo;
+ uint32_t rx_broadcast_bytes_hi;
+ uint32_t rx_broadcast_bytes_lo;
+ uint32_t rx_frames_discarded_hi;
+ uint32_t rx_frames_discarded_lo;
+ uint32_t rx_frames_dropped_hi;
+ uint32_t rx_frames_dropped_lo;
+};
+
+/* To maintain backward compatibility between FW and drivers, new elements */
+/* should be added to the end of the structure. */
+
+/* Per Port Statistics */
+struct port_info {
+ uint32_t size; /* size of this structure (i.e. sizeof(port_info)) */
+ uint32_t enabled; /* 0 =Disabled, 1= Enabled */
+ uint32_t link_speed; /* multiplier of 100Mb */
+ uint32_t wol_support; /* WoL Support (i.e. Non-Zero if WOL supported ) */
+ uint32_t flow_control; /* 802.3X Flow Ctrl. 0=off 1=RX 2=TX 3=RX&TX.*/
+ uint32_t flex10; /* Flex10 mode enabled. non zero = yes */
+ uint32_t rx_drops; /* RX Discards. Counters roll over, never reset */
+ uint32_t rx_errors; /* RX Errors. Physical Port Stats L95, All PFs and NC-SI.
+ This is flagged by Consumer as an error. */
+ uint32_t rx_uncast_lo; /* RX Unicast Packets. Free running counters: */
+ uint32_t rx_uncast_hi; /* RX Unicast Packets. Free running counters: */
+ uint32_t rx_mcast_lo; /* RX Multicast Packets */
+ uint32_t rx_mcast_hi; /* RX Multicast Packets */
+ uint32_t rx_bcast_lo; /* RX Broadcast Packets */
+ uint32_t rx_bcast_hi; /* RX Broadcast Packets */
+ uint32_t tx_uncast_lo; /* TX Unicast Packets */
+ uint32_t tx_uncast_hi; /* TX Unicast Packets */
+ uint32_t tx_mcast_lo; /* TX Multicast Packets */
+ uint32_t tx_mcast_hi; /* TX Multicast Packets */
+ uint32_t tx_bcast_lo; /* TX Broadcast Packets */
+ uint32_t tx_bcast_hi; /* TX Broadcast Packets */
+ uint32_t tx_errors; /* TX Errors */
+ uint32_t tx_discards; /* TX Discards */
+ uint32_t rx_frames_lo; /* RX Frames received */
+ uint32_t rx_frames_hi; /* RX Frames received */
+ uint32_t rx_bytes_lo; /* RX Bytes received */
+ uint32_t rx_bytes_hi; /* RX Bytes received */
+ uint32_t tx_frames_lo; /* TX Frames sent */
+ uint32_t tx_frames_hi; /* TX Frames sent */
+ uint32_t tx_bytes_lo; /* TX Bytes sent */
+ uint32_t tx_bytes_hi; /* TX Bytes sent */
+ uint32_t link_status; /* Port P Link Status. 1:0 bit for port enabled.
+ 1:1 bit for link good,
+ 2:1 Set if link changed between last poll. */
+ uint32_t tx_pfc_frames_lo; /* PFC Frames sent. */
+ uint32_t tx_pfc_frames_hi; /* PFC Frames sent. */
+ uint32_t rx_pfc_frames_lo; /* PFC Frames Received. */
+ uint32_t rx_pfc_frames_hi; /* PFC Frames Received. */
+};
+
+
+#define BNX2X_5710_FW_MAJOR_VERSION 7
+#define BNX2X_5710_FW_MINOR_VERSION 2
+#define BNX2X_5710_FW_REVISION_VERSION 51
+#define BNX2X_5710_FW_ENGINEERING_VERSION 0
+#define BNX2X_5710_FW_COMPILE_FLAGS 1
+
+
+/*
+ * attention bits $$KEEP_ENDIANNESS$$
+ */
+struct atten_sp_status_block
+{
+ uint32_t attn_bits /* 16 bit of attention signal lines */;
+ uint32_t attn_bits_ack /* 16 bit of attention signal ack */;
+ uint8_t status_block_id /* status block id */;
+ uint8_t reserved0 /* resreved for padding */;
+ uint16_t attn_bits_index /* attention bits running index */;
+ uint32_t reserved1 /* resreved for padding */;
+};
+
+
+/*
+ * The eth aggregative context of Cstorm
+ */
+struct cstorm_eth_ag_context
+{
+ uint32_t __reserved0[10];
+};
+
+
+/*
+ * dmae command structure
+ */
+struct dmae_command
+{
+ uint32_t opcode;
+#define DMAE_COMMAND_SRC (0x1<<0) /* BitField opcode Whether the source is the PCIe or the GRC. 0- The source is the PCIe 1- The source is the GRC. */
+#define DMAE_COMMAND_SRC_SHIFT 0
+#define DMAE_COMMAND_DST (0x3<<1) /* BitField opcode The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
+#define DMAE_COMMAND_DST_SHIFT 1
+#define DMAE_COMMAND_C_DST (0x1<<3) /* BitField opcode The destination of the completion: 0-PCIe 1-GRC */
+#define DMAE_COMMAND_C_DST_SHIFT 3
+#define DMAE_COMMAND_C_TYPE_ENABLE (0x1<<4) /* BitField opcode Whether to write a completion word to the completion destination: 0-Do not write a completion word 1-Write the completion word */
+#define DMAE_COMMAND_C_TYPE_ENABLE_SHIFT 4
+#define DMAE_COMMAND_C_TYPE_CRC_ENABLE (0x1<<5) /* BitField opcode Whether to write a CRC word to the completion destination 0-Do not write a CRC word 1-Write a CRC word */
+#define DMAE_COMMAND_C_TYPE_CRC_ENABLE_SHIFT 5
+#define DMAE_COMMAND_C_TYPE_CRC_OFFSET (0x7<<6) /* BitField opcode The CRC word should be taken from the DMAE GRC space from address 9+X, where X is the value in these bits. */
+#define DMAE_COMMAND_C_TYPE_CRC_OFFSET_SHIFT 6
+#define DMAE_COMMAND_ENDIANITY (0x3<<9) /* BitField opcode swapping mode. */
+#define DMAE_COMMAND_ENDIANITY_SHIFT 9
+#define DMAE_COMMAND_PORT (0x1<<11) /* BitField opcode Which network port ID to present to the PCI request interface */
+#define DMAE_COMMAND_PORT_SHIFT 11
+#define DMAE_COMMAND_CRC_RESET (0x1<<12) /* BitField opcode reset crc result */
+#define DMAE_COMMAND_CRC_RESET_SHIFT 12
+#define DMAE_COMMAND_SRC_RESET (0x1<<13) /* BitField opcode reset source address in next go */
+#define DMAE_COMMAND_SRC_RESET_SHIFT 13
+#define DMAE_COMMAND_DST_RESET (0x1<<14) /* BitField opcode reset dest address in next go */
+#define DMAE_COMMAND_DST_RESET_SHIFT 14
+#define DMAE_COMMAND_E1HVN (0x3<<15) /* BitField opcode vnic number E2 and onwards source vnic */
+#define DMAE_COMMAND_E1HVN_SHIFT 15
+#define DMAE_COMMAND_DST_VN (0x3<<17) /* BitField opcode E2 and onwards dest vnic */
+#define DMAE_COMMAND_DST_VN_SHIFT 17
+#define DMAE_COMMAND_C_FUNC (0x1<<19) /* BitField opcode E2 and onwards which function gets the completion src_vn(e1hvn)-0 dst_vn-1 */
+#define DMAE_COMMAND_C_FUNC_SHIFT 19
+#define DMAE_COMMAND_ERR_POLICY (0x3<<20) /* BitField opcode E2 and onwards what to do when theres a completion and a PCI error regular-0 error indication-1 no completion-2 */
+#define DMAE_COMMAND_ERR_POLICY_SHIFT 20
+#define DMAE_COMMAND_RESERVED0 (0x3FF<<22) /* BitField opcode */
+#define DMAE_COMMAND_RESERVED0_SHIFT 22
+ uint32_t src_addr_lo /* source address low/grc address */;
+ uint32_t src_addr_hi /* source address hi */;
+ uint32_t dst_addr_lo /* dest address low/grc address */;
+ uint32_t dst_addr_hi /* dest address hi */;
+#if defined(__BIG_ENDIAN)
+ uint16_t opcode_iov;
+#define DMAE_COMMAND_SRC_VFID (0x3F<<0) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility source VF id */
+#define DMAE_COMMAND_SRC_VFID_SHIFT 0
+#define DMAE_COMMAND_SRC_VFPF (0x1<<6) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility selects the source function PF-0, VF-1 */
+#define DMAE_COMMAND_SRC_VFPF_SHIFT 6
+#define DMAE_COMMAND_RESERVED1 (0x1<<7) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility */
+#define DMAE_COMMAND_RESERVED1_SHIFT 7
+#define DMAE_COMMAND_DST_VFID (0x3F<<8) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility destination VF id */
+#define DMAE_COMMAND_DST_VFID_SHIFT 8
+#define DMAE_COMMAND_DST_VFPF (0x1<<14) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility selects the destination function PF-0, VF-1 */
+#define DMAE_COMMAND_DST_VFPF_SHIFT 14
+#define DMAE_COMMAND_RESERVED2 (0x1<<15) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility */
+#define DMAE_COMMAND_RESERVED2_SHIFT 15
+ uint16_t len /* copy length */;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t len /* copy length */;
+ uint16_t opcode_iov;
+#define DMAE_COMMAND_SRC_VFID (0x3F<<0) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility source VF id */
+#define DMAE_COMMAND_SRC_VFID_SHIFT 0
+#define DMAE_COMMAND_SRC_VFPF (0x1<<6) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility selects the source function PF-0, VF-1 */
+#define DMAE_COMMAND_SRC_VFPF_SHIFT 6
+#define DMAE_COMMAND_RESERVED1 (0x1<<7) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility */
+#define DMAE_COMMAND_RESERVED1_SHIFT 7
+#define DMAE_COMMAND_DST_VFID (0x3F<<8) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility destination VF id */
+#define DMAE_COMMAND_DST_VFID_SHIFT 8
+#define DMAE_COMMAND_DST_VFPF (0x1<<14) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility selects the destination function PF-0, VF-1 */
+#define DMAE_COMMAND_DST_VFPF_SHIFT 14
+#define DMAE_COMMAND_RESERVED2 (0x1<<15) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility */
+#define DMAE_COMMAND_RESERVED2_SHIFT 15
+#endif
+ uint32_t comp_addr_lo /* completion address low/grc address */;
+ uint32_t comp_addr_hi /* completion address hi */;
+ uint32_t comp_val /* value to write to completion address */;
+ uint32_t crc32 /* crc32 result */;
+ uint32_t crc32_c /* crc32_c result */;
+#if defined(__BIG_ENDIAN)
+ uint16_t crc16_c /* crc16_c result */;
+ uint16_t crc16 /* crc16 result */;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t crc16 /* crc16 result */;
+ uint16_t crc16_c /* crc16_c result */;
+#endif
+#if defined(__BIG_ENDIAN)
+ uint16_t reserved3;
+ uint16_t crc_t10 /* crc_t10 result */;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t crc_t10 /* crc_t10 result */;
+ uint16_t reserved3;
+#endif
+#if defined(__BIG_ENDIAN)
+ uint16_t xsum8 /* checksum8 result */;
+ uint16_t xsum16 /* checksum16 result */;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t xsum16 /* checksum16 result */;
+ uint16_t xsum8 /* checksum8 result */;
+#endif
+};
+
+
+/*
+ * common data for all protocols
+ */
+struct doorbell_hdr
+{
+ uint8_t header;
+#define DOORBELL_HDR_RX (0x1<<0) /* BitField header 1 for rx doorbell, 0 for tx doorbell */
+#define DOORBELL_HDR_RX_SHIFT 0
+#define DOORBELL_HDR_DB_TYPE (0x1<<1) /* BitField header 0 for normal doorbell, 1 for advertise wnd doorbell */
+#define DOORBELL_HDR_DB_TYPE_SHIFT 1
+#define DOORBELL_HDR_DPM_SIZE (0x3<<2) /* BitField header rdma tx only: DPM transaction size specifier (64/128/256/512 bytes) */
+#define DOORBELL_HDR_DPM_SIZE_SHIFT 2
+#define DOORBELL_HDR_CONN_TYPE (0xF<<4) /* BitField header connection type */
+#define DOORBELL_HDR_CONN_TYPE_SHIFT 4
+};
+
+/*
+ * Ethernet doorbell
+ */
+struct eth_tx_doorbell
+{
+#if defined(__BIG_ENDIAN)
+ uint16_t npackets /* number of data bytes that were added in the doorbell */;
+ uint8_t params;
+#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0) /* BitField params number of buffer descriptors that were added in the doorbell */
+#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6) /* BitField params tx fin command flag */
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6
+#define ETH_TX_DOORBELL_SPARE (0x1<<7) /* BitField params doorbell queue spare flag */
+#define ETH_TX_DOORBELL_SPARE_SHIFT 7
+ struct doorbell_hdr hdr;
+#elif defined(__LITTLE_ENDIAN)
+ struct doorbell_hdr hdr;
+ uint8_t params;
+#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0) /* BitField params number of buffer descriptors that were added in the doorbell */
+#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6) /* BitField params tx fin command flag */
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6
+#define ETH_TX_DOORBELL_SPARE (0x1<<7) /* BitField params doorbell queue spare flag */
+#define ETH_TX_DOORBELL_SPARE_SHIFT 7
+ uint16_t npackets /* number of data bytes that were added in the doorbell */;
+#endif
+};
+
+
+/*
+ * 3 lines. status block $$KEEP_ENDIANNESS$$
+ */
+struct hc_status_block_e1x
+{
+ uint16_t index_values[HC_SB_MAX_INDICES_E1X] /* indices reported by cstorm */;
+ uint16_t running_index[HC_SB_MAX_SM] /* Status Block running indices */;
+ uint32_t rsrv[11];
+};
+
+/*
+ * host status block
+ */
+struct host_hc_status_block_e1x
+{
+ struct hc_status_block_e1x sb /* fast path indices */;
+};
+
+
+/*
+ * 3 lines. status block $$KEEP_ENDIANNESS$$
+ */
+struct hc_status_block_e2
+{
+ uint16_t index_values[HC_SB_MAX_INDICES_E2] /* indices reported by cstorm */;
+ uint16_t running_index[HC_SB_MAX_SM] /* Status Block running indices */;
+ uint32_t reserved[11];
+};
+
+/*
+ * host status block
+ */
+struct host_hc_status_block_e2
+{
+ struct hc_status_block_e2 sb /* fast path indices */;
+};
+
+
+/*
+ * 5 lines. slow-path status block $$KEEP_ENDIANNESS$$
+ */
+struct hc_sp_status_block
+{
+ uint16_t index_values[HC_SP_SB_MAX_INDICES] /* indices reported by cstorm */;
+ uint16_t running_index /* Status Block running index */;
+ uint16_t rsrv;
+ uint32_t rsrv1;
+};
+
+/*
+ * host status block
+ */
+struct host_sp_status_block
+{
+ struct atten_sp_status_block atten_status_block /* attention bits section */;
+ struct hc_sp_status_block sp_sb /* slow path indices */;
+};
+
+
+/*
+ * IGU driver acknowledgment register
+ */
+union igu_ack_register
+{
+ struct {
+#if defined(__BIG_ENDIAN)
+ uint16_t sb_id_and_flags;
+#define IGU_ACK_REGISTER_STATUS_BLOCK_ID (0x1F<<0) /* BitField sb_id_and_flags 0-15: non default status blocks, 16: default status block */
+#define IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT 0
+#define IGU_ACK_REGISTER_STORM_ID (0x7<<5) /* BitField sb_id_and_flags 0-3:storm id, 4: attn status block (valid in default sb only) */
+#define IGU_ACK_REGISTER_STORM_ID_SHIFT 5
+#define IGU_ACK_REGISTER_UPDATE_INDEX (0x1<<8) /* BitField sb_id_and_flags if set, acknowledges status block index */
+#define IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT 8
+#define IGU_ACK_REGISTER_INTERRUPT_MODE (0x3<<9) /* BitField sb_id_and_flags interrupt enable/disable/nop: use IGU_INT_xxx constants */
+#define IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT 9
+#define IGU_ACK_REGISTER_RESERVED (0x1F<<11) /* BitField sb_id_and_flags */
+#define IGU_ACK_REGISTER_RESERVED_SHIFT 11
+ uint16_t status_block_index /* status block index acknowledgement */;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t status_block_index /* status block index acknowledgement */;
+ uint16_t sb_id_and_flags;
+#define IGU_ACK_REGISTER_STATUS_BLOCK_ID (0x1F<<0) /* BitField sb_id_and_flags 0-15: non default status blocks, 16: default status block */
+#define IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT 0
+#define IGU_ACK_REGISTER_STORM_ID (0x7<<5) /* BitField sb_id_and_flags 0-3:storm id, 4: attn status block (valid in default sb only) */
+#define IGU_ACK_REGISTER_STORM_ID_SHIFT 5
+#define IGU_ACK_REGISTER_UPDATE_INDEX (0x1<<8) /* BitField sb_id_and_flags if set, acknowledges status block index */
+#define IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT 8
+#define IGU_ACK_REGISTER_INTERRUPT_MODE (0x3<<9) /* BitField sb_id_and_flags interrupt enable/disable/nop: use IGU_INT_xxx constants */
+#define IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT 9
+#define IGU_ACK_REGISTER_RESERVED (0x1F<<11) /* BitField sb_id_and_flags */
+#define IGU_ACK_REGISTER_RESERVED_SHIFT 11
+#endif
+ } sb;
+ uint32_t raw_data;
+};
+
+
+/*
+ * IGU driver acknowledgement register
+ */
+struct igu_backward_compatible
+{
+ uint32_t sb_id_and_flags;
+#define IGU_BACKWARD_COMPATIBLE_SB_INDEX (0xFFFF<<0) /* BitField sb_id_and_flags */
+#define IGU_BACKWARD_COMPATIBLE_SB_INDEX_SHIFT 0
+#define IGU_BACKWARD_COMPATIBLE_SB_SELECT (0x1F<<16) /* BitField sb_id_and_flags */
+#define IGU_BACKWARD_COMPATIBLE_SB_SELECT_SHIFT 16
+#define IGU_BACKWARD_COMPATIBLE_SEGMENT_ACCESS (0x7<<21) /* BitField sb_id_and_flags 0-3:storm id, 4: attn status block (valid in default sb only) */
+#define IGU_BACKWARD_COMPATIBLE_SEGMENT_ACCESS_SHIFT 21
+#define IGU_BACKWARD_COMPATIBLE_BUPDATE (0x1<<24) /* BitField sb_id_and_flags if set, acknowledges status block index */
+#define IGU_BACKWARD_COMPATIBLE_BUPDATE_SHIFT 24
+#define IGU_BACKWARD_COMPATIBLE_ENABLE_INT (0x3<<25) /* BitField sb_id_and_flags interrupt enable/disable/nop: use IGU_INT_xxx constants */
+#define IGU_BACKWARD_COMPATIBLE_ENABLE_INT_SHIFT 25
+#define IGU_BACKWARD_COMPATIBLE_RESERVED_0 (0x1F<<27) /* BitField sb_id_and_flags */
+#define IGU_BACKWARD_COMPATIBLE_RESERVED_0_SHIFT 27
+ uint32_t reserved_2;
+};
+
+
+/*
+ * IGU driver acknowledgement register
+ */
+struct igu_regular
+{
+ uint32_t sb_id_and_flags;
+#define IGU_REGULAR_SB_INDEX (0xFFFFF<<0) /* BitField sb_id_and_flags */
+#define IGU_REGULAR_SB_INDEX_SHIFT 0
+#define IGU_REGULAR_RESERVED0 (0x1<<20) /* BitField sb_id_and_flags */
+#define IGU_REGULAR_RESERVED0_SHIFT 20
+#define IGU_REGULAR_SEGMENT_ACCESS (0x7<<21) /* BitField sb_id_and_flags 21-23 (use enum igu_seg_access) */
+#define IGU_REGULAR_SEGMENT_ACCESS_SHIFT 21
+#define IGU_REGULAR_BUPDATE (0x1<<24) /* BitField sb_id_and_flags */
+#define IGU_REGULAR_BUPDATE_SHIFT 24
+#define IGU_REGULAR_ENABLE_INT (0x3<<25) /* BitField sb_id_and_flags interrupt enable/disable/nop (use enum igu_int_cmd) */
+#define IGU_REGULAR_ENABLE_INT_SHIFT 25
+#define IGU_REGULAR_RESERVED_1 (0x1<<27) /* BitField sb_id_and_flags */
+#define IGU_REGULAR_RESERVED_1_SHIFT 27
+#define IGU_REGULAR_CLEANUP_TYPE (0x3<<28) /* BitField sb_id_and_flags */
+#define IGU_REGULAR_CLEANUP_TYPE_SHIFT 28
+#define IGU_REGULAR_CLEANUP_SET (0x1<<30) /* BitField sb_id_and_flags */
+#define IGU_REGULAR_CLEANUP_SET_SHIFT 30
+#define IGU_REGULAR_BCLEANUP (0x1<<31) /* BitField sb_id_and_flags */
+#define IGU_REGULAR_BCLEANUP_SHIFT 31
+ uint32_t reserved_2;
+};
+
+/*
+ * IGU driver acknowledgement register
+ */
+union igu_consprod_reg
+{
+ struct igu_regular regular;
+ struct igu_backward_compatible backward_compatible;
+};
+
+
+/*
+ * Igu control commands
+ */
+enum igu_ctrl_cmd
+{
+ IGU_CTRL_CMD_TYPE_RD,
+ IGU_CTRL_CMD_TYPE_WR,
+ MAX_IGU_CTRL_CMD};
+
+
+/*
+ * Control register for the IGU command register
+ */
+struct igu_ctrl_reg
+{
+ uint32_t ctrl_data;
+#define IGU_CTRL_REG_ADDRESS (0xFFF<<0) /* BitField ctrl_data */
+#define IGU_CTRL_REG_ADDRESS_SHIFT 0
+#define IGU_CTRL_REG_FID (0x7F<<12) /* BitField ctrl_data */
+#define IGU_CTRL_REG_FID_SHIFT 12
+#define IGU_CTRL_REG_RESERVED (0x1<<19) /* BitField ctrl_data */
+#define IGU_CTRL_REG_RESERVED_SHIFT 19
+#define IGU_CTRL_REG_TYPE (0x1<<20) /* BitField ctrl_data (use enum igu_ctrl_cmd) */
+#define IGU_CTRL_REG_TYPE_SHIFT 20
+#define IGU_CTRL_REG_UNUSED (0x7FF<<21) /* BitField ctrl_data */
+#define IGU_CTRL_REG_UNUSED_SHIFT 21
+};
+
+
+/*
+ * Igu interrupt command
+ */
+enum igu_int_cmd
+{
+ IGU_INT_ENABLE,
+ IGU_INT_DISABLE,
+ IGU_INT_NOP,
+ IGU_INT_NOP2,
+ MAX_IGU_INT_CMD};
+
+
+/*
+ * Igu segments
+ */
+enum igu_seg_access
+{
+ IGU_SEG_ACCESS_NORM,
+ IGU_SEG_ACCESS_DEF,
+ IGU_SEG_ACCESS_ATTN,
+ MAX_IGU_SEG_ACCESS};
+
+
+/*
+ * Parser parsing flags field
+ */
+struct parsing_flags
+{
+ uint16_t flags;
+#define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE (0x1<<0) /* BitField flagscontext flags 0=non-unicast, 1=unicast (use enum prs_flags_eth_addr_type) */
+#define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE_SHIFT 0
+#define PARSING_FLAGS_VLAN (0x1<<1) /* BitField flagscontext flags 0 or 1 */
+#define PARSING_FLAGS_VLAN_SHIFT 1
+#define PARSING_FLAGS_EXTRA_VLAN (0x1<<2) /* BitField flagscontext flags 0 or 1 */
+#define PARSING_FLAGS_EXTRA_VLAN_SHIFT 2
+#define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL (0x3<<3) /* BitField flagscontext flags 0=un-known, 1=Ipv4, 2=Ipv6,3=LLC SNAP un-known. LLC SNAP here refers only to LLC/SNAP packets that do not have Ipv4 or Ipv6 above them. Ipv4 and Ipv6 indications are even if they are over LLC/SNAP and not directly over Ethernet (use enum prs_flags_over_eth) */
+#define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT 3
+#define PARSING_FLAGS_IP_OPTIONS (0x1<<5) /* BitField flagscontext flags 0=no IP options / extension headers. 1=IP options / extension header exist */
+#define PARSING_FLAGS_IP_OPTIONS_SHIFT 5
+#define PARSING_FLAGS_FRAGMENTATION_STATUS (0x1<<6) /* BitField flagscontext flags 0=non-fragmented, 1=fragmented */
+#define PARSING_FLAGS_FRAGMENTATION_STATUS_SHIFT 6
+#define PARSING_FLAGS_OVER_IP_PROTOCOL (0x3<<7) /* BitField flagscontext flags 0=un-known, 1=TCP, 2=UDP (use enum prs_flags_over_ip) */
+#define PARSING_FLAGS_OVER_IP_PROTOCOL_SHIFT 7
+#define PARSING_FLAGS_PURE_ACK_INDICATION (0x1<<9) /* BitField flagscontext flags 0=packet with data, 1=pure-ACK (use enum prs_flags_ack_type) */
+#define PARSING_FLAGS_PURE_ACK_INDICATION_SHIFT 9
+#define PARSING_FLAGS_TCP_OPTIONS_EXIST (0x1<<10) /* BitField flagscontext flags 0=no TCP options. 1=TCP options */
+#define PARSING_FLAGS_TCP_OPTIONS_EXIST_SHIFT 10
+#define PARSING_FLAGS_TIME_STAMP_EXIST_FLAG (0x1<<11) /* BitField flagscontext flags According to the TCP header options parsing */
+#define PARSING_FLAGS_TIME_STAMP_EXIST_FLAG_SHIFT 11
+#define PARSING_FLAGS_CONNECTION_MATCH (0x1<<12) /* BitField flagscontext flags connection match in searcher indication */
+#define PARSING_FLAGS_CONNECTION_MATCH_SHIFT 12
+#define PARSING_FLAGS_LLC_SNAP (0x1<<13) /* BitField flagscontext flags LLC SNAP indication */
+#define PARSING_FLAGS_LLC_SNAP_SHIFT 13
+#define PARSING_FLAGS_RESERVED0 (0x3<<14) /* BitField flagscontext flags */
+#define PARSING_FLAGS_RESERVED0_SHIFT 14
+};
+
+
+/*
+ * Parsing flags for TCP ACK type
+ */
+enum prs_flags_ack_type
+{
+ PRS_FLAG_PUREACK_PIGGY,
+ PRS_FLAG_PUREACK_PURE,
+ MAX_PRS_FLAGS_ACK_TYPE};
+
+
+/*
+ * Parsing flags for Ethernet address type
+ */
+enum prs_flags_eth_addr_type
+{
+ PRS_FLAG_ETHTYPE_NON_UNICAST,
+ PRS_FLAG_ETHTYPE_UNICAST,
+ MAX_PRS_FLAGS_ETH_ADDR_TYPE};
+
+
+/*
+ * Parsing flags for over-ethernet protocol
+ */
+enum prs_flags_over_eth
+{
+ PRS_FLAG_OVERETH_UNKNOWN,
+ PRS_FLAG_OVERETH_IPV4,
+ PRS_FLAG_OVERETH_IPV6,
+ PRS_FLAG_OVERETH_LLCSNAP_UNKNOWN,
+ MAX_PRS_FLAGS_OVER_ETH};
+
+
+/*
+ * Parsing flags for over-IP protocol
+ */
+enum prs_flags_over_ip
+{
+ PRS_FLAG_OVERIP_UNKNOWN,
+ PRS_FLAG_OVERIP_TCP,
+ PRS_FLAG_OVERIP_UDP,
+ MAX_PRS_FLAGS_OVER_IP};
+
+
+/*
+ * SDM operation gen command (generate aggregative interrupt)
+ */
+struct sdm_op_gen
+{
+ uint32_t command;
+#define SDM_OP_GEN_COMP_PARAM (0x1F<<0) /* BitField commandcomp_param and comp_type thread ID/aggr interrupt number/counter depending on the completion type */
+#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
+#define SDM_OP_GEN_COMP_TYPE (0x7<<5) /* BitField commandcomp_param and comp_type Direct messages to CM / PCI switch are not supported in operation_gen completion */
+#define SDM_OP_GEN_COMP_TYPE_SHIFT 5
+#define SDM_OP_GEN_AGG_VECT_IDX (0xFF<<8) /* BitField commandcomp_param and comp_type bit index in aggregated interrupt vector */
+#define SDM_OP_GEN_AGG_VECT_IDX_SHIFT 8
+#define SDM_OP_GEN_AGG_VECT_IDX_VALID (0x1<<16) /* BitField commandcomp_param and comp_type */
+#define SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT 16
+#define SDM_OP_GEN_RESERVED (0x7FFF<<17) /* BitField commandcomp_param and comp_type */
+#define SDM_OP_GEN_RESERVED_SHIFT 17
+};
+
+
+/*
+ * Timers connection context
+ */
+struct timers_block_context
+{
+ uint32_t __reserved_0 /* data of client 0 of the timers block*/;
+ uint32_t __reserved_1 /* data of client 1 of the timers block*/;
+ uint32_t __reserved_2 /* data of client 2 of the timers block*/;
+ uint32_t flags;
+#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0) /* BitField flagscontext flags number of active timers running */
+#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0
+#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2) /* BitField flagscontext flags flag: is connection valid (should be set by driver to 1 in toe/iscsi connections) */
+#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2
+#define __TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3) /* BitField flagscontext flags */
+#define __TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3
+};
+
+
+/*
+ * The eth aggregative context of Tstorm
+ */
+struct tstorm_eth_ag_context
+{
+ uint32_t __reserved0[14];
+};
+
+
+/*
+ * The eth aggregative context of Ustorm
+ */
+struct ustorm_eth_ag_context
+{
+ uint32_t __reserved0;
+#if defined(__BIG_ENDIAN)
+ uint8_t cdu_usage /* Will be used by the CDU for validation of the CID/connection type on doorbells. */;
+ uint8_t __reserved2;
+ uint16_t __reserved1;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t __reserved1;
+ uint8_t __reserved2;
+ uint8_t cdu_usage /* Will be used by the CDU for validation of the CID/connection type on doorbells. */;
+#endif
+ uint32_t __reserved3[6];
+};
+
+
+/*
+ * The eth aggregative context of Xstorm
+ */
+struct xstorm_eth_ag_context
+{
+ uint32_t reserved0;
+#if defined(__BIG_ENDIAN)
+ uint8_t cdu_reserved /* Used by the CDU for validation and debugging */;
+ uint8_t reserved2;
+ uint16_t reserved1;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t reserved1;
+ uint8_t reserved2;
+ uint8_t cdu_reserved /* Used by the CDU for validation and debugging */;
+#endif
+ uint32_t reserved3[30];
+};
+
+
+/*
+ * doorbell message sent to the chip
+ */
+struct doorbell
+{
+#if defined(__BIG_ENDIAN)
+ uint16_t zero_fill2 /* driver must zero this field! */;
+ uint8_t zero_fill1 /* driver must zero this field! */;
+ struct doorbell_hdr header;
+#elif defined(__LITTLE_ENDIAN)
+ struct doorbell_hdr header;
+ uint8_t zero_fill1 /* driver must zero this field! */;
+ uint16_t zero_fill2 /* driver must zero this field! */;
+#endif
+};
+
+
+/*
+ * doorbell message sent to the chip
+ */
+struct doorbell_set_prod
+{
+#if defined(__BIG_ENDIAN)
+ uint16_t prod /* Producer index to be set */;
+ uint8_t zero_fill1 /* driver must zero this field! */;
+ struct doorbell_hdr header;
+#elif defined(__LITTLE_ENDIAN)
+ struct doorbell_hdr header;
+ uint8_t zero_fill1 /* driver must zero this field! */;
+ uint16_t prod /* Producer index to be set */;
+#endif
+};
+
+
+struct regpair
+{
+ uint32_t lo /* low word for reg-pair */;
+ uint32_t hi /* high word for reg-pair */;
+};
+
+
+struct regpair_native
+{
+ uint32_t lo /* low word for reg-pair */;
+ uint32_t hi /* high word for reg-pair */;
+};
+
+
+/*
+ * Classify rule opcodes in E2/E3
+ */
+enum classify_rule
+{
+ CLASSIFY_RULE_OPCODE_MAC /* Add/remove a MAC address */,
+ CLASSIFY_RULE_OPCODE_VLAN /* Add/remove a VLAN */,
+ CLASSIFY_RULE_OPCODE_PAIR /* Add/remove a MAC-VLAN pair */,
+ MAX_CLASSIFY_RULE};
+
+
+/*
+ * Classify rule types in E2/E3
+ */
+enum classify_rule_action_type
+{
+ CLASSIFY_RULE_REMOVE,
+ CLASSIFY_RULE_ADD,
+ MAX_CLASSIFY_RULE_ACTION_TYPE};
+
+
+/*
+ * client init ramrod data $$KEEP_ENDIANNESS$$
+ */
+struct client_init_general_data
+{
+ uint8_t client_id /* client_id */;
+ uint8_t statistics_counter_id /* statistics counter id */;
+ uint8_t statistics_en_flg /* statistics en flg */;
+ uint8_t is_fcoe_flg /* is this an fcoe connection. (1 bit is used) */;
+ uint8_t activate_flg /* if 0 - the client is deactivate else the client is activate client (1 bit is used) */;
+ uint8_t sp_client_id /* the slow path rings client Id. */;
+ uint16_t mtu /* Host MTU from client config */;
+ uint8_t statistics_zero_flg /* if set FW will reset the statistic counter of this client */;
+ uint8_t func_id /* PCI function ID (0-71) */;
+ uint8_t cos /* The connection cos, if applicable */;
+ uint8_t traffic_type;
+ uint32_t reserved0;
+};
+
+
+/*
+ * client init rx data $$KEEP_ENDIANNESS$$
+ */
+struct client_init_rx_data
+{
+ uint8_t tpa_en;
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4 (0x1<<0) /* BitField tpa_entpa_enable tpa enable flg ipv4 */
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4_SHIFT 0
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6 (0x1<<1) /* BitField tpa_entpa_enable tpa enable flg ipv6 */
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6_SHIFT 1
+#define CLIENT_INIT_RX_DATA_TPA_MODE (0x1<<2) /* BitField tpa_entpa_enable tpa mode (LRO or GRO) (use enum tpa_mode) */
+#define CLIENT_INIT_RX_DATA_TPA_MODE_SHIFT 2
+#define CLIENT_INIT_RX_DATA_RESERVED5 (0x1F<<3) /* BitField tpa_entpa_enable */
+#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 3
+ uint8_t vmqueue_mode_en_flg /* If set, working in VMQueue mode (always consume one sge) */;
+ uint8_t extra_data_over_sgl_en_flg /* if set, put over sgl data from end of input message */;
+ uint8_t cache_line_alignment_log_size /* The log size of cache line alignment in bytes. Must be a power of 2. */;
+ uint8_t enable_dynamic_hc /* If set, dynamic HC is enabled */;
+ uint8_t max_sges_for_packet /* The maximal number of SGEs that can be used for one packet. depends on MTU and SGE size. must be 0 if SGEs are disabled */;
+ uint8_t client_qzone_id /* used in E2 only, to specify the HW queue zone ID used for this client rx producers */;
+ uint8_t drop_ip_cs_err_flg /* If set, this client drops packets with IP checksum error */;
+ uint8_t drop_tcp_cs_err_flg /* If set, this client drops packets with TCP checksum error */;
+ uint8_t drop_ttl0_flg /* If set, this client drops packets with TTL=0 */;
+ uint8_t drop_udp_cs_err_flg /* If set, this client drops packets with UDP checksum error */;
+ uint8_t inner_vlan_removal_enable_flg /* If set, inner VLAN removal is enabled for this client */;
+ uint8_t outer_vlan_removal_enable_flg /* If set, outer VLAN removal is enabled for this client */;
+ uint8_t status_block_id /* rx status block id */;
+ uint8_t rx_sb_index_number /* status block indices */;
+ uint8_t dont_verify_rings_pause_thr_flg /* If set, the rings pause thresholds will not be verified by firmware. */;
+ uint8_t max_tpa_queues /* maximal TPA queues allowed for this client */;
+ uint8_t silent_vlan_removal_flg /* if set, and the vlan is equal to requested vlan according to mask, the vlan will be remove without notifying the driver */;
+ uint16_t max_bytes_on_bd /* Maximum bytes that can be placed on a BD. The BD allocated size should include 2 more bytes (ip alignment) and alignment size (in case the address is not aligned) */;
+ uint16_t sge_buff_size /* Size of the buffers pointed by SGEs */;
+ uint8_t approx_mcast_engine_id /* In Everest2, if is_approx_mcast is set, this field specified which approximate multicast engine is associate with this client */;
+ uint8_t rss_engine_id /* In Everest2, if rss_mode is set, this field specified which RSS engine is associate with this client */;
+ struct regpair bd_page_base /* BD page base address at the host */;
+ struct regpair sge_page_base /* SGE page base address at the host */;
+ struct regpair cqe_page_base /* Completion queue base address */;
+ uint8_t is_leading_rss;
+ uint8_t is_approx_mcast;
+ uint16_t max_agg_size /* maximal size for the aggregated TPA packets, reprted by the host */;
+ uint16_t state;
+#define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL (0x1<<0) /* BitField staterx filters state drop all unicast packets */
+#define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL_SHIFT 0
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL (0x1<<1) /* BitField staterx filters state accept all unicast packets (subject to vlan) */
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL_SHIFT 1
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED (0x1<<2) /* BitField staterx filters state accept all unmatched unicast packets (subject to vlan) */
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+#define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL (0x1<<3) /* BitField staterx filters state drop all multicast packets */
+#define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL_SHIFT 3
+#define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL (0x1<<4) /* BitField staterx filters state accept all multicast packets (subject to vlan) */
+#define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL_SHIFT 4
+#define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL (0x1<<5) /* BitField staterx filters state accept all broadcast packets (subject to vlan) */
+#define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL_SHIFT 5
+#define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN (0x1<<6) /* BitField staterx filters state accept packets matched only by MAC (without checking vlan) */
+#define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN_SHIFT 6
+#define CLIENT_INIT_RX_DATA_RESERVED2 (0x1FF<<7) /* BitField staterx filters state */
+#define CLIENT_INIT_RX_DATA_RESERVED2_SHIFT 7
+ uint16_t cqe_pause_thr_low /* number of remaining cqes under which, we send pause message */;
+ uint16_t cqe_pause_thr_high /* number of remaining cqes above which, we send un-pause message */;
+ uint16_t bd_pause_thr_low /* number of remaining bds under which, we send pause message */;
+ uint16_t bd_pause_thr_high /* number of remaining bds above which, we send un-pause message */;
+ uint16_t sge_pause_thr_low /* number of remaining sges under which, we send pause message */;
+ uint16_t sge_pause_thr_high /* number of remaining sges above which, we send un-pause message */;
+ uint16_t rx_cos_mask /* the bits that will be set on pfc/ safc paket whith will be genratet when this ring is full. for regular flow control set this to 1 */;
+ uint16_t silent_vlan_value /* The vlan to compare, in case, silent vlan is set */;
+ uint16_t silent_vlan_mask /* The vlan mask, in case, silent vlan is set */;
+ uint32_t reserved6[2];
+};
+
+/*
+ * client init tx data $$KEEP_ENDIANNESS$$
+ */
+struct client_init_tx_data
+{
+ uint8_t enforce_security_flg /* if set, security checks will be made for this connection */;
+ uint8_t tx_status_block_id /* the number of status block to update */;
+ uint8_t tx_sb_index_number /* the index to use inside the status block */;
+ uint8_t tss_leading_client_id /* client ID of the leading TSS client, for TX classification source knock out */;
+ uint8_t tx_switching_flg /* if set, tx switching will be done to packets on this connection */;
+ uint8_t anti_spoofing_flg /* if set, anti spoofing check will be done to packets on this connection */;
+ uint16_t default_vlan /* default vlan tag (id+pri). (valid if default_vlan_flg is set) */;
+ struct regpair tx_bd_page_base /* BD page base address at the host for TxBdCons */;
+ uint16_t state;
+#define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL (0x1<<0) /* BitField statetx filters state accept all unicast packets (subject to vlan) */
+#define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL_SHIFT 0
+#define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL (0x1<<1) /* BitField statetx filters state accept all multicast packets (subject to vlan) */
+#define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL_SHIFT 1
+#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL (0x1<<2) /* BitField statetx filters state accept all broadcast packets (subject to vlan) */
+#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL_SHIFT 2
+#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN (0x1<<3) /* BitField statetx filters state accept packets matched only by MAC (without checking vlan) */
+#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN_SHIFT 3
+#define CLIENT_INIT_TX_DATA_RESERVED0 (0xFFF<<4) /* BitField statetx filters state */
+#define CLIENT_INIT_TX_DATA_RESERVED0_SHIFT 4
+ uint8_t default_vlan_flg /* is default vlan valid for this client. */;
+ uint8_t force_default_pri_flg /* if set, force default priority */;
+ uint8_t tunnel_lso_inc_ip_id /* In case of LSO over IPv4 tunnel, whether to increment IP ID on external IP header or internal IP header */;
+ uint8_t refuse_outband_vlan_flg /* if set, the FW will not add outband vlan on packet (even if will exist on BD). */;
+ uint8_t tunnel_non_lso_pcsum_location /* In case of non-Lso encapsulated packets with L4 checksum offload, the pseudo checksum location - on packet or on BD. */;
+ uint8_t tunnel_non_lso_outer_ip_csum_location /* In case of non-Lso encapsulated packets with outer L3 ip checksum offload, the pseudo checksum location - on packet or on BD. */;
+};
+
+/*
+ * client init ramrod data $$KEEP_ENDIANNESS$$
+ */
+struct client_init_ramrod_data
+{
+ struct client_init_general_data general /* client init general data */;
+ struct client_init_rx_data rx /* client init rx data */;
+ struct client_init_tx_data tx /* client init tx data */;
+};
+
+
+/*
+ * client update ramrod data $$KEEP_ENDIANNESS$$
+ */
+struct client_update_ramrod_data
+{
+ uint8_t client_id /* the client to update */;
+ uint8_t func_id /* PCI function ID this client belongs to (0-71) */;
+ uint8_t inner_vlan_removal_enable_flg /* If set, inner VLAN removal is enabled for this client, will be change according to change flag */;
+ uint8_t inner_vlan_removal_change_flg /* If set, inner VLAN removal flag will be set according to the enable flag */;
+ uint8_t outer_vlan_removal_enable_flg /* If set, outer VLAN removal is enabled for this client, will be change according to change flag */;
+ uint8_t outer_vlan_removal_change_flg /* If set, outer VLAN removal flag will be set according to the enable flag */;
+ uint8_t anti_spoofing_enable_flg /* If set, anti spoofing is enabled for this client, will be change according to change flag */;
+ uint8_t anti_spoofing_change_flg /* If set, anti spoofing flag will be set according to anti spoofing flag */;
+ uint8_t activate_flg /* if 0 - the client is deactivate else the client is activate client (1 bit is used) */;
+ uint8_t activate_change_flg /* If set, activate_flg will be checked */;
+ uint16_t default_vlan /* default vlan tag (id+pri). (valid if default_vlan_flg is set) */;
+ uint8_t default_vlan_enable_flg;
+ uint8_t default_vlan_change_flg;
+ uint16_t silent_vlan_value /* The vlan to compare, in case, silent vlan is set */;
+ uint16_t silent_vlan_mask /* The vlan mask, in case, silent vlan is set */;
+ uint8_t silent_vlan_removal_flg /* if set, and the vlan is equal to requested vlan according to mask, the vlan will be remove without notifying the driver */;
+ uint8_t silent_vlan_change_flg;
+ uint8_t refuse_outband_vlan_flg /* If set, the FW will not add outband vlan on packet (even if will exist on BD). */;
+ uint8_t refuse_outband_vlan_change_flg /* If set, refuse_outband_vlan_flg will be updated. */;
+ uint8_t tx_switching_flg /* If set, tx switching will be done to packets on this connection. */;
+ uint8_t tx_switching_change_flg /* If set, tx_switching_flg will be updated. */;
+ uint32_t reserved1;
+ uint32_t echo /* echo value to be sent to driver on event ring */;
+};
+
+
+/*
+ * The eth storm context of Cstorm
+ */
+struct cstorm_eth_st_context
+{
+ uint32_t __reserved0[4];
+};
+
+
+struct double_regpair
+{
+ uint32_t regpair0_lo /* low word for reg-pair0 */;
+ uint32_t regpair0_hi /* high word for reg-pair0 */;
+ uint32_t regpair1_lo /* low word for reg-pair1 */;
+ uint32_t regpair1_hi /* high word for reg-pair1 */;
+};
+
+
+/*
+ * Ethernet address types used in ethernet tx BDs
+ */
+enum eth_addr_type
+{
+ UNKNOWN_ADDRESS,
+ UNICAST_ADDRESS,
+ MULTICAST_ADDRESS,
+ BROADCAST_ADDRESS,
+ MAX_ETH_ADDR_TYPE
+};
+
+
+/*
+ * $$KEEP_ENDIANNESS$$
+ */
+struct eth_classify_cmd_header
+{
+ uint8_t cmd_general_data;
+#define ETH_CLASSIFY_CMD_HEADER_RX_CMD (0x1<<0) /* BitField cmd_general_data should this cmd be applied for Rx */
+#define ETH_CLASSIFY_CMD_HEADER_RX_CMD_SHIFT 0
+#define ETH_CLASSIFY_CMD_HEADER_TX_CMD (0x1<<1) /* BitField cmd_general_data should this cmd be applied for Tx */
+#define ETH_CLASSIFY_CMD_HEADER_TX_CMD_SHIFT 1
+#define ETH_CLASSIFY_CMD_HEADER_OPCODE (0x3<<2) /* BitField cmd_general_data command opcode for MAC/VLAN/PAIR (use enum classify_rule) */
+#define ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT 2
+#define ETH_CLASSIFY_CMD_HEADER_IS_ADD (0x1<<4) /* BitField cmd_general_data (use enum classify_rule_action_type) */
+#define ETH_CLASSIFY_CMD_HEADER_IS_ADD_SHIFT 4
+#define ETH_CLASSIFY_CMD_HEADER_RESERVED0 (0x7<<5) /* BitField cmd_general_data */
+#define ETH_CLASSIFY_CMD_HEADER_RESERVED0_SHIFT 5
+ uint8_t func_id /* the function id */;
+ uint8_t client_id;
+ uint8_t reserved1;
+};
+
+
+/*
+ * header for eth classification config ramrod $$KEEP_ENDIANNESS$$
+ */
+struct eth_classify_header
+{
+ uint8_t rule_cnt /* number of rules in classification config ramrod */;
+ uint8_t reserved0;
+ uint16_t reserved1;
+ uint32_t echo /* echo value to be sent to driver on event ring */;
+};
+
+
+/*
+ * Command for adding/removing a MAC classification rule $$KEEP_ENDIANNESS$$
+ */
+struct eth_classify_mac_cmd
+{
+ struct eth_classify_cmd_header header;
+ uint16_t reserved0;
+ uint16_t inner_mac;
+ uint16_t mac_lsb;
+ uint16_t mac_mid;
+ uint16_t mac_msb;
+ uint16_t reserved1;
+};
+
+
+/*
+ * Command for adding/removing a MAC-VLAN pair classification rule $$KEEP_ENDIANNESS$$
+ */
+struct eth_classify_pair_cmd
+{
+ struct eth_classify_cmd_header header;
+ uint16_t reserved0;
+ uint16_t inner_mac;
+ uint16_t mac_lsb;
+ uint16_t mac_mid;
+ uint16_t mac_msb;
+ uint16_t vlan;
+};
+
+
+/*
+ * Command for adding/removing a VLAN classification rule $$KEEP_ENDIANNESS$$
+ */
+struct eth_classify_vlan_cmd
+{
+ struct eth_classify_cmd_header header;
+ uint32_t reserved0;
+ uint32_t reserved1;
+ uint16_t reserved2;
+ uint16_t vlan;
+};
+
+/*
+ * union for eth classification rule $$KEEP_ENDIANNESS$$
+ */
+union eth_classify_rule_cmd
+{
+ struct eth_classify_mac_cmd mac;
+ struct eth_classify_vlan_cmd vlan;
+ struct eth_classify_pair_cmd pair;
+};
+
+/*
+ * parameters for eth classification configuration ramrod $$KEEP_ENDIANNESS$$
+ */
+struct eth_classify_rules_ramrod_data
+{
+ struct eth_classify_header header;
+ union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT];
+};
+
+
+/*
+ * The data contain client ID need to the ramrod $$KEEP_ENDIANNESS$$
+ */
+struct eth_common_ramrod_data
+{
+ uint32_t client_id /* id of this client. (5 bits are used) */;
+ uint32_t reserved1;
+};
+
+
+/*
+ * The eth storm context of Ustorm
+ */
+struct ustorm_eth_st_context
+{
+ uint32_t reserved0[52];
+};
+
+/*
+ * The eth storm context of Tstorm
+ */
+struct tstorm_eth_st_context
+{
+ uint32_t __reserved0[28];
+};
+
+/*
+ * The eth storm context of Xstorm
+ */
+struct xstorm_eth_st_context
+{
+ uint32_t reserved0[60];
+};
+
+/*
+ * Ethernet connection context
+ */
+struct eth_context
+{
+ struct ustorm_eth_st_context ustorm_st_context /* Ustorm storm context */;
+ struct tstorm_eth_st_context tstorm_st_context /* Tstorm storm context */;
+ struct xstorm_eth_ag_context xstorm_ag_context /* Xstorm aggregative context */;
+ struct tstorm_eth_ag_context tstorm_ag_context /* Tstorm aggregative context */;
+ struct cstorm_eth_ag_context cstorm_ag_context /* Cstorm aggregative context */;
+ struct ustorm_eth_ag_context ustorm_ag_context /* Ustorm aggregative context */;
+ struct timers_block_context timers_context /* Timers block context */;
+ struct xstorm_eth_st_context xstorm_st_context /* Xstorm storm context */;
+ struct cstorm_eth_st_context cstorm_st_context /* Cstorm storm context */;
+};
+
+
+/*
+ * union for sgl and raw data.
+ */
+union eth_sgl_or_raw_data
+{
+ uint16_t sgl[8] /* Scatter-gather list of SGEs used by this packet. This list includes the indices of the SGEs. */;
+ uint32_t raw_data[4] /* raw data from Tstorm to the driver. */;
+};
+
+/*
+ * eth FP end aggregation CQE parameters struct $$KEEP_ENDIANNESS$$
+ */
+struct eth_end_agg_rx_cqe
+{
+ uint8_t type_error_flags;
+#define ETH_END_AGG_RX_CQE_TYPE (0x3<<0) /* BitField type_error_flags (use enum eth_rx_cqe_type) */
+#define ETH_END_AGG_RX_CQE_TYPE_SHIFT 0
+#define ETH_END_AGG_RX_CQE_SGL_RAW_SEL (0x1<<2) /* BitField type_error_flags (use enum eth_rx_fp_sel) */
+#define ETH_END_AGG_RX_CQE_SGL_RAW_SEL_SHIFT 2
+#define ETH_END_AGG_RX_CQE_RESERVED0 (0x1F<<3) /* BitField type_error_flags */
+#define ETH_END_AGG_RX_CQE_RESERVED0_SHIFT 3
+ uint8_t reserved1;
+ uint8_t queue_index /* The aggregation queue index of this packet */;
+ uint8_t reserved2;
+ uint32_t timestamp_delta /* timestamp delta between first packet to last packet in aggregation */;
+ uint16_t num_of_coalesced_segs /* Num of coalesced segments. */;
+ uint16_t pkt_len /* Packet length */;
+ uint8_t pure_ack_count /* Number of pure acks coalesced. */;
+ uint8_t reserved3;
+ uint16_t reserved4;
+ union eth_sgl_or_raw_data sgl_or_raw_data /* union for sgl and raw data. */;
+ uint32_t reserved5[8];
+};
+
+
+/*
+ * regular eth FP CQE parameters struct $$KEEP_ENDIANNESS$$
+ */
+struct eth_fast_path_rx_cqe
+{
+ uint8_t type_error_flags;
+#define ETH_FAST_PATH_RX_CQE_TYPE (0x3<<0) /* BitField type_error_flags (use enum eth_rx_cqe_type) */
+#define ETH_FAST_PATH_RX_CQE_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL (0x1<<2) /* BitField type_error_flags (use enum eth_rx_fp_sel) */
+#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT 2
+#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG (0x1<<3) /* BitField type_error_flags Physical layer errors */
+#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG_SHIFT 3
+#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG (0x1<<4) /* BitField type_error_flags IP checksum error */
+#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 4
+#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<5) /* BitField type_error_flags TCP/UDP checksum error */
+#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 5
+#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x3<<6) /* BitField type_error_flags */
+#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 6
+ uint8_t status_flags;
+#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0) /* BitField status_flags (use enum eth_rss_hash_type) */
+#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG (0x1<<3) /* BitField status_flags RSS hashing on/off */
+#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG_SHIFT 3
+#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG (0x1<<4) /* BitField status_flags if set to 1, this is a broadcast packet */
+#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG_SHIFT 4
+#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG (0x1<<5) /* BitField status_flags if set to 1, the MAC address was matched in the tstorm CAM search */
+#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG_SHIFT 5
+#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG (0x1<<6) /* BitField status_flags IP checksum validation was not performed (if packet is not IPv4) */
+#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG_SHIFT 6
+#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG (0x1<<7) /* BitField status_flags TCP/UDP checksum validation was not performed (if packet is not TCP/UDP or IPv6 extheaders exist) */
+#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG_SHIFT 7
+ uint8_t queue_index /* The aggregation queue index of this packet */;
+ uint8_t placement_offset /* Placement offset from the start of the BD, in bytes */;
+ uint32_t rss_hash_result /* RSS toeplitz hash result */;
+ uint16_t vlan_tag /* Ethernet VLAN tag field */;
+ uint16_t pkt_len_or_gro_seg_len /* Packet length (for non-TPA CQE) or GRO Segment Length (for TPA in GRO Mode) otherwise 0 */;
+ uint16_t len_on_bd /* Number of bytes placed on the BD */;
+ struct parsing_flags pars_flags;
+ union eth_sgl_or_raw_data sgl_or_raw_data /* union for sgl and raw data. */;
+ uint32_t reserved1[8];
+};
+
+
+/*
+ * Command for setting classification flags for a client $$KEEP_ENDIANNESS$$
+ */
+struct eth_filter_rules_cmd
+{
+ uint8_t cmd_general_data;
+#define ETH_FILTER_RULES_CMD_RX_CMD (0x1<<0) /* BitField cmd_general_data should this cmd be applied for Rx */
+#define ETH_FILTER_RULES_CMD_RX_CMD_SHIFT 0
+#define ETH_FILTER_RULES_CMD_TX_CMD (0x1<<1) /* BitField cmd_general_data should this cmd be applied for Tx */
+#define ETH_FILTER_RULES_CMD_TX_CMD_SHIFT 1
+#define ETH_FILTER_RULES_CMD_RESERVED0 (0x3F<<2) /* BitField cmd_general_data */
+#define ETH_FILTER_RULES_CMD_RESERVED0_SHIFT 2
+ uint8_t func_id /* the function id */;
+ uint8_t client_id /* the client id */;
+ uint8_t reserved1;
+ uint16_t state;
+#define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL (0x1<<0) /* BitField state drop all unicast packets */
+#define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL_SHIFT 0
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL (0x1<<1) /* BitField state accept all unicast packets (subject to vlan) */
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL_SHIFT 1
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED (0x1<<2) /* BitField state accept all unmatched unicast packets */
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+#define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL (0x1<<3) /* BitField state drop all multicast packets */
+#define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL_SHIFT 3
+#define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL (0x1<<4) /* BitField state accept all multicast packets (subject to vlan) */
+#define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL (0x1<<5) /* BitField state accept all broadcast packets (subject to vlan) */
+#define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL_SHIFT 5
+#define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN (0x1<<6) /* BitField state accept packets matched only by MAC (without checking vlan) */
+#define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN_SHIFT 6
+#define ETH_FILTER_RULES_CMD_RESERVED2 (0x1FF<<7) /* BitField state */
+#define ETH_FILTER_RULES_CMD_RESERVED2_SHIFT 7
+ uint16_t reserved3;
+ struct regpair reserved4;
+};
+
+
+/*
+ * parameters for eth classification filters ramrod $$KEEP_ENDIANNESS$$
+ */
+struct eth_filter_rules_ramrod_data
+{
+ struct eth_classify_header header;
+ struct eth_filter_rules_cmd rules[FILTER_RULES_COUNT];
+};
+
+
+/*
+ * parameters for eth classification configuration ramrod $$KEEP_ENDIANNESS$$
+ */
+struct eth_general_rules_ramrod_data
+{
+ struct eth_classify_header header;
+ union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT];
+};
+
+
+/*
+ * The data for Halt ramrod
+ */
+struct eth_halt_ramrod_data
+{
+ uint32_t client_id /* id of this client. (5 bits are used) */;
+ uint32_t reserved0;
+};
+
+
+/*
+ * destination and source mac address.
+ */
+struct eth_mac_addresses
+{
+#if defined(__BIG_ENDIAN)
+ uint16_t dst_mid /* destination mac address 16 middle bits */;
+ uint16_t dst_lo /* destination mac address 16 low bits */;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t dst_lo /* destination mac address 16 low bits */;
+ uint16_t dst_mid /* destination mac address 16 middle bits */;
+#endif
+#if defined(__BIG_ENDIAN)
+ uint16_t src_lo /* source mac address 16 low bits */;
+ uint16_t dst_hi /* destination mac address 16 high bits */;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t dst_hi /* destination mac address 16 high bits */;
+ uint16_t src_lo /* source mac address 16 low bits */;
+#endif
+#if defined(__BIG_ENDIAN)
+ uint16_t src_hi /* source mac address 16 high bits */;
+ uint16_t src_mid /* source mac address 16 middle bits */;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t src_mid /* source mac address 16 middle bits */;
+ uint16_t src_hi /* source mac address 16 high bits */;
+#endif
+};
+
+
+/*
+ * tunneling related data.
+ */
+struct eth_tunnel_data
+{
+#if defined(__BIG_ENDIAN)
+ uint16_t dst_mid /* destination mac address 16 middle bits */;
+ uint16_t dst_lo /* destination mac address 16 low bits */;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t dst_lo /* destination mac address 16 low bits */;
+ uint16_t dst_mid /* destination mac address 16 middle bits */;
+#endif
+#if defined(__BIG_ENDIAN)
+ uint16_t fw_ip_hdr_csum /* Fw Ip header checksum (with ALL ip header fields) for the outer IP header */;
+ uint16_t dst_hi /* destination mac address 16 high bits */;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t dst_hi /* destination mac address 16 high bits */;
+ uint16_t fw_ip_hdr_csum /* Fw Ip header checksum (with ALL ip header fields) for the outer IP header */;
+#endif
+#if defined(__BIG_ENDIAN)
+ uint8_t flags;
+#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER (0x1<<0) /* BitField flags Set in case outer IP header is ipV6 */
+#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER_SHIFT 0
+#define ETH_TUNNEL_DATA_RESERVED (0x7F<<1) /* BitField flags Should be set with 0 */
+#define ETH_TUNNEL_DATA_RESERVED_SHIFT 1
+ uint8_t ip_hdr_start_inner_w /* Inner IP header offset in WORDs (16-bit) from start of packet */;
+ uint16_t pseudo_csum /* Pseudo checksum with length field=0 */;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t pseudo_csum /* Pseudo checksum with length field=0 */;
+ uint8_t ip_hdr_start_inner_w /* Inner IP header offset in WORDs (16-bit) from start of packet */;
+ uint8_t flags;
+#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER (0x1<<0) /* BitField flags Set in case outer IP header is ipV6 */
+#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER_SHIFT 0
+#define ETH_TUNNEL_DATA_RESERVED (0x7F<<1) /* BitField flags Should be set with 0 */
+#define ETH_TUNNEL_DATA_RESERVED_SHIFT 1
+#endif
+};
+
+/*
+ * union for mac addresses and for tunneling data. considered as tunneling data only if (tunnel_exist == 1).
+ */
+union eth_mac_addr_or_tunnel_data
+{
+ struct eth_mac_addresses mac_addr /* destination and source mac addresses. */;
+ struct eth_tunnel_data tunnel_data /* tunneling related data. */;
+};
+
+
+/*
+ * Command for setting multicast classification for a client $$KEEP_ENDIANNESS$$
+ */
+struct eth_multicast_rules_cmd
+{
+ uint8_t cmd_general_data;
+#define ETH_MULTICAST_RULES_CMD_RX_CMD (0x1<<0) /* BitField cmd_general_data should this cmd be applied for Rx */
+#define ETH_MULTICAST_RULES_CMD_RX_CMD_SHIFT 0
+#define ETH_MULTICAST_RULES_CMD_TX_CMD (0x1<<1) /* BitField cmd_general_data should this cmd be applied for Tx */
+#define ETH_MULTICAST_RULES_CMD_TX_CMD_SHIFT 1
+#define ETH_MULTICAST_RULES_CMD_IS_ADD (0x1<<2) /* BitField cmd_general_data 1 for add rule, 0 for remove rule */
+#define ETH_MULTICAST_RULES_CMD_IS_ADD_SHIFT 2
+#define ETH_MULTICAST_RULES_CMD_RESERVED0 (0x1F<<3) /* BitField cmd_general_data */
+#define ETH_MULTICAST_RULES_CMD_RESERVED0_SHIFT 3
+ uint8_t func_id /* the function id */;
+ uint8_t bin_id /* the bin to add this function to (0-255) */;
+ uint8_t engine_id /* the approximate multicast engine id */;
+ uint32_t reserved2;
+ struct regpair reserved3;
+};
+
+
+/*
+ * parameters for multicast classification ramrod $$KEEP_ENDIANNESS$$
+ */
+struct eth_multicast_rules_ramrod_data
+{
+ struct eth_classify_header header;
+ struct eth_multicast_rules_cmd rules[MULTICAST_RULES_COUNT];
+};
+
+
+/*
+ * Place holder for ramrods protocol specific data
+ */
+struct ramrod_data
+{
+ uint32_t data_lo;
+ uint32_t data_hi;
+};
+
+/*
+ * union for ramrod data for Ethernet protocol (CQE) (force size of 16 bits)
+ */
+union eth_ramrod_data
+{
+ struct ramrod_data general;
+};
+
+
+/*
+ * RSS toeplitz hash type, as reported in CQE
+ */
+enum eth_rss_hash_type
+{
+ DEFAULT_HASH_TYPE,
+ IPV4_HASH_TYPE,
+ TCP_IPV4_HASH_TYPE,
+ IPV6_HASH_TYPE,
+ TCP_IPV6_HASH_TYPE,
+ VLAN_PRI_HASH_TYPE,
+ E1HOV_PRI_HASH_TYPE,
+ DSCP_HASH_TYPE,
+ MAX_ETH_RSS_HASH_TYPE};
+
+
+/*
+ * Ethernet RSS mode
+ */
+enum eth_rss_mode
+{
+ ETH_RSS_MODE_DISABLED,
+ ETH_RSS_MODE_ESX51 /* RSS mode for Vmware ESX 5.1 (Only do RSS if packet is UDP with dst port that matches the UDP 4-tuble Destination Port mask and value) */,
+ ETH_RSS_MODE_REGULAR /* Regular (ndis-like) RSS */,
+ ETH_RSS_MODE_VLAN_PRI /* RSS based on inner-vlan priority field */,
+ ETH_RSS_MODE_E1HOV_PRI /* RSS based on outer-vlan priority field */,
+ ETH_RSS_MODE_IP_DSCP /* RSS based on IPv4 DSCP field */,
+ MAX_ETH_RSS_MODE};
+
+
+/*
+ * parameters for RSS update ramrod (E2) $$KEEP_ENDIANNESS$$
+ */
+struct eth_rss_update_ramrod_data
+{
+ uint8_t rss_engine_id;
+ uint8_t capabilities;
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY (0x1<<0) /* BitField capabilitiesFunction RSS capabilities configuration of the IpV4 2-tupple capability */
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY_SHIFT 0
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY (0x1<<1) /* BitField capabilitiesFunction RSS capabilities configuration of the IpV4 4-tupple capability for TCP */
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY_SHIFT 1
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY (0x1<<2) /* BitField capabilitiesFunction RSS capabilities configuration of the IpV4 4-tupple capability for UDP */
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY_SHIFT 2
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY (0x1<<3) /* BitField capabilitiesFunction RSS capabilities configuration of the IpV6 2-tupple capability */
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY_SHIFT 3
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY (0x1<<4) /* BitField capabilitiesFunction RSS capabilities configuration of the IpV6 4-tupple capability for TCP */
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5) /* BitField capabilitiesFunction RSS capabilities configuration of the IpV6 4-tupple capability for UDP */
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5
+#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<6) /* BitField capabilitiesFunction RSS capabilities configuration of the 5-tupple capability */
+#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 6
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<7) /* BitField capabilitiesFunction RSS capabilities if set update the rss keys */
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 7
+ uint8_t rss_result_mask /* The mask for the lower byte of RSS result - defines which section of the indirection table will be used. To enable all table put here 0x7F */;
+ uint8_t rss_mode /* The RSS mode for this function */;
+ uint16_t udp_4tuple_dst_port_mask /* If UDP 4-tuple enabled, packets that match the mask and value are 4-tupled, the rest are 2-tupled. (Set to 0 to match all) */;
+ uint16_t udp_4tuple_dst_port_value /* If UDP 4-tuple enabled, packets that match the mask and value are 4-tupled, the rest are 2-tupled. (Set to 0 to match all) */;
+ uint8_t indirection_table[T_ETH_INDIRECTION_TABLE_SIZE] /* RSS indirection table */;
+ uint32_t rss_key[T_ETH_RSS_KEY] /* RSS key supplied as by OS */;
+ uint32_t echo;
+ uint32_t reserved3;
+};
+
+
+/*
+ * The eth Rx Buffer Descriptor
+ */
+struct eth_rx_bd
+{
+ uint32_t addr_lo /* Single continuous buffer low pointer */;
+ uint32_t addr_hi /* Single continuous buffer high pointer */;
+};
+
+
+/*
+ * Eth Rx Cqe structure- general structure for ramrods $$KEEP_ENDIANNESS$$
+ */
+struct common_ramrod_eth_rx_cqe
+{
+ uint8_t ramrod_type;
+#define COMMON_RAMROD_ETH_RX_CQE_TYPE (0x3<<0) /* BitField ramrod_type (use enum eth_rx_cqe_type) */
+#define COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT 0
+#define COMMON_RAMROD_ETH_RX_CQE_ERROR (0x1<<2) /* BitField ramrod_type */
+#define COMMON_RAMROD_ETH_RX_CQE_ERROR_SHIFT 2
+#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x1F<<3) /* BitField ramrod_type */
+#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 3
+ uint8_t conn_type /* only 3 bits are used */;
+ uint16_t reserved1 /* protocol specific data */;
+ uint32_t conn_and_cmd_data;
+#define COMMON_RAMROD_ETH_RX_CQE_CID (0xFFFFFF<<0) /* BitField conn_and_cmd_data */
+#define COMMON_RAMROD_ETH_RX_CQE_CID_SHIFT 0
+#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID (0xFF<<24) /* BitField conn_and_cmd_data command id of the ramrod- use RamrodCommandIdEnum */
+#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT 24
+ struct ramrod_data protocol_data /* protocol specific data */;
+ uint32_t echo;
+ uint32_t reserved2[11];
+};
+
+/*
+ * Rx Last CQE in page (in ETH)
+ */
+struct eth_rx_cqe_next_page
+{
+ uint32_t addr_lo /* Next page low pointer */;
+ uint32_t addr_hi /* Next page high pointer */;
+ uint32_t reserved[14];
+};
+
+/*
+ * union for all eth rx cqe types (fix their sizes)
+ */
+union eth_rx_cqe
+{
+ struct eth_fast_path_rx_cqe fast_path_cqe;
+ struct common_ramrod_eth_rx_cqe ramrod_cqe;
+ struct eth_rx_cqe_next_page next_page_cqe;
+ struct eth_end_agg_rx_cqe end_agg_cqe;
+};
+
+
+/*
+ * Values for RX ETH CQE type field
+ */
+enum eth_rx_cqe_type
+{
+ RX_ETH_CQE_TYPE_ETH_FASTPATH /* Fast path CQE */,
+ RX_ETH_CQE_TYPE_ETH_RAMROD /* Slow path CQE */,
+ RX_ETH_CQE_TYPE_ETH_START_AGG /* Fast path CQE */,
+ RX_ETH_CQE_TYPE_ETH_STOP_AGG /* Slow path CQE */,
+ MAX_ETH_RX_CQE_TYPE};
+
+
+/*
+ * Type of SGL/Raw field in ETH RX fast path CQE
+ */
+enum eth_rx_fp_sel
+{
+ ETH_FP_CQE_REGULAR /* Regular CQE- no extra data */,
+ ETH_FP_CQE_RAW /* Extra data is raw data- iscsi OOO */,
+ MAX_ETH_RX_FP_SEL};
+
+
+/*
+ * The eth Rx SGE Descriptor
+ */
+struct eth_rx_sge
+{
+ uint32_t addr_lo /* Single continuous buffer low pointer */;
+ uint32_t addr_hi /* Single continuous buffer high pointer */;
+};
+
+
+/*
+ * common data for all protocols $$KEEP_ENDIANNESS$$
+ */
+struct spe_hdr
+{
+ uint32_t conn_and_cmd_data;
+#define SPE_HDR_CID (0xFFFFFF<<0) /* BitField conn_and_cmd_data */
+#define SPE_HDR_CID_SHIFT 0
+#define SPE_HDR_CMD_ID (0xFF<<24) /* BitField conn_and_cmd_data command id of the ramrod- use enum common_spqe_cmd_id/eth_spqe_cmd_id/toe_spqe_cmd_id */
+#define SPE_HDR_CMD_ID_SHIFT 24
+ uint16_t type;
+#define SPE_HDR_CONN_TYPE (0xFF<<0) /* BitField type connection type. (3 bits are used) (use enum connection_type) */
+#define SPE_HDR_CONN_TYPE_SHIFT 0
+#define SPE_HDR_FUNCTION_ID (0xFF<<8) /* BitField type */
+#define SPE_HDR_FUNCTION_ID_SHIFT 8
+ uint16_t reserved1;
+};
+
+/*
+ * specific data for ethernet slow path element
+ */
+union eth_specific_data
+{
+ uint8_t protocol_data[8] /* to fix this structure size to 8 bytes */;
+ struct regpair client_update_ramrod_data /* The address of the data for client update ramrod */;
+ struct regpair client_init_ramrod_init_data /* The data for client setup ramrod */;
+ struct eth_halt_ramrod_data halt_ramrod_data /* Includes the client id to be deleted */;
+ struct regpair update_data_addr /* physical address of the eth_rss_update_ramrod_data struct, as allocated by the driver */;
+ struct eth_common_ramrod_data common_ramrod_data /* The data contain client ID need to the ramrod */;
+ struct regpair classify_cfg_addr /* physical address of the eth_classify_rules_ramrod_data struct, as allocated by the driver */;
+ struct regpair filter_cfg_addr /* physical address of the eth_filter_cfg_ramrod_data struct, as allocated by the driver */;
+ struct regpair mcast_cfg_addr /* physical address of the eth_mcast_cfg_ramrod_data struct, as allocated by the driver */;
+};
+
+/*
+ * Ethernet slow path element
+ */
+struct eth_spe
+{
+ struct spe_hdr hdr /* common data for all protocols */;
+ union eth_specific_data data /* data specific to ethernet protocol */;
+};
+
+
+/*
+ * Ethernet command ID for slow path elements
+ */
+enum eth_spqe_cmd_id
+{
+ RAMROD_CMD_ID_ETH_UNUSED,
+ RAMROD_CMD_ID_ETH_CLIENT_SETUP /* Setup a new L2 client */,
+ RAMROD_CMD_ID_ETH_HALT /* Halt an L2 client */,
+ RAMROD_CMD_ID_ETH_FORWARD_SETUP /* Setup a new FW channel */,
+ RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP /* Setup a new Tx only queue */,
+ RAMROD_CMD_ID_ETH_CLIENT_UPDATE /* Update an L2 client configuration */,
+ RAMROD_CMD_ID_ETH_EMPTY /* Empty ramrod - used to synchronize iSCSI OOO */,
+ RAMROD_CMD_ID_ETH_TERMINATE /* Terminate an L2 client */,
+ RAMROD_CMD_ID_ETH_TPA_UPDATE /* update the tpa roles in L2 client */,
+ RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES /* Add/remove classification filters for L2 client (in E2/E3 only) */,
+ RAMROD_CMD_ID_ETH_FILTER_RULES /* Add/remove classification filters for L2 client (in E2/E3 only) */,
+ RAMROD_CMD_ID_ETH_MULTICAST_RULES /* Add/remove multicast classification bin (in E2/E3 only) */,
+ RAMROD_CMD_ID_ETH_RSS_UPDATE /* Update RSS configuration */,
+ RAMROD_CMD_ID_ETH_SET_MAC /* Update RSS configuration */,
+ MAX_ETH_SPQE_CMD_ID};
+
+
+/*
+ * eth tpa update command
+ */
+enum eth_tpa_update_command
+{
+ TPA_UPDATE_NONE_COMMAND /* nop command */,
+ TPA_UPDATE_ENABLE_COMMAND /* enable command */,
+ TPA_UPDATE_DISABLE_COMMAND /* disable command */,
+ MAX_ETH_TPA_UPDATE_COMMAND};
+
+
+/*
+ * In case of LSO over IPv4 tunnel, whether to increment IP ID on external IP header or internal IP header
+ */
+enum eth_tunnel_lso_inc_ip_id
+{
+ EXT_HEADER /* Increment IP ID of external header (HW works on external, FW works on internal */,
+ INT_HEADER /* Increment IP ID of internal header (HW works on internal, FW works on external */,
+ MAX_ETH_TUNNEL_LSO_INC_IP_ID};
+
+
+/*
+ * In case tunnel exist and L4 checksum offload (or outer ip header checksum), the pseudo checksum location, on packet or on BD.
+ */
+enum eth_tunnel_non_lso_csum_location
+{
+ CSUM_ON_PKT /* checksum is on the packet. */,
+ CSUM_ON_BD /* checksum is on the BD. */,
+ MAX_ETH_TUNNEL_NON_LSO_CSUM_LOCATION};
+
+
+/*
+ * Tx regular BD structure $$KEEP_ENDIANNESS$$
+ */
+struct eth_tx_bd
+{
+ uint32_t addr_lo /* Single continuous buffer low pointer */;
+ uint32_t addr_hi /* Single continuous buffer high pointer */;
+ uint16_t total_pkt_bytes /* Size of the entire packet, valid for non-LSO packets */;
+ uint16_t nbytes /* Size of the data represented by the BD */;
+ uint8_t reserved[4] /* keeps same size as other eth tx bd types */;
+};
+
+
+/*
+ * structure for easy accessibility to assembler
+ */
+struct eth_tx_bd_flags
+{
+ uint8_t as_bitfield;
+#define ETH_TX_BD_FLAGS_IP_CSUM (0x1<<0) /* BitField as_bitfield IP CKSUM flag,Relevant in START */
+#define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 0
+#define ETH_TX_BD_FLAGS_L4_CSUM (0x1<<1) /* BitField as_bitfield L4 CKSUM flag,Relevant in START */
+#define ETH_TX_BD_FLAGS_L4_CSUM_SHIFT 1
+#define ETH_TX_BD_FLAGS_VLAN_MODE (0x3<<2) /* BitField as_bitfield 00 - no vlan; 01 - inband Vlan; 10 outband Vlan (use enum eth_tx_vlan_type) */
+#define ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT 2
+#define ETH_TX_BD_FLAGS_START_BD (0x1<<4) /* BitField as_bitfield Start of packet BD */
+#define ETH_TX_BD_FLAGS_START_BD_SHIFT 4
+#define ETH_TX_BD_FLAGS_IS_UDP (0x1<<5) /* BitField as_bitfield flag that indicates that the current packet is a udp packet */
+#define ETH_TX_BD_FLAGS_IS_UDP_SHIFT 5
+#define ETH_TX_BD_FLAGS_SW_LSO (0x1<<6) /* BitField as_bitfield LSO flag, Relevant in START */
+#define ETH_TX_BD_FLAGS_SW_LSO_SHIFT 6
+#define ETH_TX_BD_FLAGS_IPV6 (0x1<<7) /* BitField as_bitfield set in case ipV6 packet, Relevant in START */
+#define ETH_TX_BD_FLAGS_IPV6_SHIFT 7
+};
+
+/*
+ * The eth Tx Buffer Descriptor $$KEEP_ENDIANNESS$$
+ */
+struct eth_tx_start_bd
+{
+ uint64_t addr;
+ uint16_t nbd /* Num of BDs in packet: include parsInfoBD, Relevant in START(only in Everest) */;
+ uint16_t nbytes /* Size of the data represented by the BD */;
+ uint16_t vlan_or_ethertype /* Vlan structure: vlan_id is in lsb, then cfi and then priority vlan_id 12 bits (lsb), cfi 1 bit, priority 3 bits. In E2, this field should be set with etherType for VFs with no vlan */;
+ struct eth_tx_bd_flags bd_flags;
+ uint8_t general_data;
+#define ETH_TX_START_BD_HDR_NBDS (0xF<<0) /* BitField general_data contains the number of BDs that contain Ethernet/IP/TCP headers, for full/partial LSO modes */
+#define ETH_TX_START_BD_HDR_NBDS_SHIFT 0
+#define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4) /* BitField general_data force vlan mode according to bds (vlan mode can change accroding to global configuration) */
+#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4
+#define ETH_TX_START_BD_PARSE_NBDS (0x3<<5) /* BitField general_data Determines the number of parsing BDs in packet. Number of parsing BDs in packet is (parse_nbds+1). */
+#define ETH_TX_START_BD_PARSE_NBDS_SHIFT 5
+#define ETH_TX_START_BD_TUNNEL_EXIST (0x1<<7) /* BitField general_data set in case of tunneling encapsulated packet */
+#define ETH_TX_START_BD_TUNNEL_EXIST_SHIFT 7
+};
+
+/*
+ * Tx parsing BD structure for ETH E1h $$KEEP_ENDIANNESS$$
+ */
+struct eth_tx_parse_bd_e1x
+{
+ uint16_t global_data;
+#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0) /* BitField global_data IP header Offset in WORDs from start of packet */
+#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE (0x3<<4) /* BitField global_data marks ethernet address type (use enum eth_addr_type) */
+#define ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT 4
+#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<6) /* BitField global_data */
+#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 6
+#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<7) /* BitField global_data */
+#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 7
+#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<8) /* BitField global_data an optional addition to ECN that protects against accidental or malicious concealment of marked packets from the TCP sender. */
+#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 8
+#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x7F<<9) /* BitField global_data reserved bit, should be set with 0 */
+#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 9
+ uint8_t tcp_flags;
+#define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0) /* BitField tcp_flagsState flags End of data flag */
+#define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0
+#define ETH_TX_PARSE_BD_E1X_SYN_FLG (0x1<<1) /* BitField tcp_flagsState flags Synchronize sequence numbers flag */
+#define ETH_TX_PARSE_BD_E1X_SYN_FLG_SHIFT 1
+#define ETH_TX_PARSE_BD_E1X_RST_FLG (0x1<<2) /* BitField tcp_flagsState flags Reset connection flag */
+#define ETH_TX_PARSE_BD_E1X_RST_FLG_SHIFT 2
+#define ETH_TX_PARSE_BD_E1X_PSH_FLG (0x1<<3) /* BitField tcp_flagsState flags Push flag */
+#define ETH_TX_PARSE_BD_E1X_PSH_FLG_SHIFT 3
+#define ETH_TX_PARSE_BD_E1X_ACK_FLG (0x1<<4) /* BitField tcp_flagsState flags Acknowledgment number valid flag */
+#define ETH_TX_PARSE_BD_E1X_ACK_FLG_SHIFT 4
+#define ETH_TX_PARSE_BD_E1X_URG_FLG (0x1<<5) /* BitField tcp_flagsState flags Urgent pointer valid flag */
+#define ETH_TX_PARSE_BD_E1X_URG_FLG_SHIFT 5
+#define ETH_TX_PARSE_BD_E1X_ECE_FLG (0x1<<6) /* BitField tcp_flagsState flags ECN-Echo */
+#define ETH_TX_PARSE_BD_E1X_ECE_FLG_SHIFT 6
+#define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7) /* BitField tcp_flagsState flags Congestion Window Reduced */
+#define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7
+ uint8_t ip_hlen_w /* IP header length in WORDs */;
+ uint16_t total_hlen_w /* IP+TCP+ETH */;
+ uint16_t tcp_pseudo_csum /* Checksum of pseudo header with length field=0 */;
+ uint16_t lso_mss /* for LSO mode */;
+ uint16_t ip_id /* for LSO mode */;
+ uint32_t tcp_send_seq /* for LSO mode */;
+};
+
+/*
+ * Tx parsing BD structure for ETH E2 $$KEEP_ENDIANNESS$$
+ */
+struct eth_tx_parse_bd_e2
+{
+ union eth_mac_addr_or_tunnel_data data /* union for mac addresses and for tunneling data. considered as tunneling data only if (tunnel_exist == 1). */;
+ uint32_t parsing_data;
+#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W (0x7FF<<0) /* BitField parsing_data TCP/UDP header Offset in WORDs from start of packet */
+#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<11) /* BitField parsing_data TCP header size in DOUBLE WORDS */
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 11
+#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<15) /* BitField parsing_data a flag to indicate an ipv6 packet with extension headers. If set on LSO packet, pseudo CS should be placed in TCP CS field without length field */
+#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 15
+#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<16) /* BitField parsing_data for LSO mode */
+#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 16
+#define ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE (0x3<<30) /* BitField parsing_data marks ethernet address type (use enum eth_addr_type) */
+#define ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT 30
+};
+
+/*
+ * Tx 2nd parsing BD structure for ETH packet $$KEEP_ENDIANNESS$$
+ */
+struct eth_tx_parse_2nd_bd
+{
+ uint16_t global_data;
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W (0xF<<0) /* BitField global_data Outer IP header offset in WORDs (16-bit) from start of packet */
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W_SHIFT 0
+#define ETH_TX_PARSE_2ND_BD_RESERVED0 (0x1<<4) /* BitField global_data should be set with 0 */
+#define ETH_TX_PARSE_2ND_BD_RESERVED0_SHIFT 4
+#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN (0x1<<5) /* BitField global_data */
+#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT 5
+#define ETH_TX_PARSE_2ND_BD_NS_FLG (0x1<<6) /* BitField global_data an optional addition to ECN that protects against accidental or malicious concealment of marked packets from the TCP sender. */
+#define ETH_TX_PARSE_2ND_BD_NS_FLG_SHIFT 6
+#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST (0x1<<7) /* BitField global_data Set in case UDP header exists in tunnel outer hedears. */
+#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST_SHIFT 7
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W (0x1F<<8) /* BitField global_data Outer IP header length in WORDs (16-bit). Valid only for IpV4. */
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT 8
+#define ETH_TX_PARSE_2ND_BD_RESERVED1 (0x7<<13) /* BitField global_data should be set with 0 */
+#define ETH_TX_PARSE_2ND_BD_RESERVED1_SHIFT 13
+ uint16_t reserved2;
+ uint8_t tcp_flags;
+#define ETH_TX_PARSE_2ND_BD_FIN_FLG (0x1<<0) /* BitField tcp_flagsState flags End of data flag */
+#define ETH_TX_PARSE_2ND_BD_FIN_FLG_SHIFT 0
+#define ETH_TX_PARSE_2ND_BD_SYN_FLG (0x1<<1) /* BitField tcp_flagsState flags Synchronize sequence numbers flag */
+#define ETH_TX_PARSE_2ND_BD_SYN_FLG_SHIFT 1
+#define ETH_TX_PARSE_2ND_BD_RST_FLG (0x1<<2) /* BitField tcp_flagsState flags Reset connection flag */
+#define ETH_TX_PARSE_2ND_BD_RST_FLG_SHIFT 2
+#define ETH_TX_PARSE_2ND_BD_PSH_FLG (0x1<<3) /* BitField tcp_flagsState flags Push flag */
+#define ETH_TX_PARSE_2ND_BD_PSH_FLG_SHIFT 3
+#define ETH_TX_PARSE_2ND_BD_ACK_FLG (0x1<<4) /* BitField tcp_flagsState flags Acknowledgment number valid flag */
+#define ETH_TX_PARSE_2ND_BD_ACK_FLG_SHIFT 4
+#define ETH_TX_PARSE_2ND_BD_URG_FLG (0x1<<5) /* BitField tcp_flagsState flags Urgent pointer valid flag */
+#define ETH_TX_PARSE_2ND_BD_URG_FLG_SHIFT 5
+#define ETH_TX_PARSE_2ND_BD_ECE_FLG (0x1<<6) /* BitField tcp_flagsState flags ECN-Echo */
+#define ETH_TX_PARSE_2ND_BD_ECE_FLG_SHIFT 6
+#define ETH_TX_PARSE_2ND_BD_CWR_FLG (0x1<<7) /* BitField tcp_flagsState flags Congestion Window Reduced */
+#define ETH_TX_PARSE_2ND_BD_CWR_FLG_SHIFT 7
+ uint8_t reserved3;
+ uint8_t tunnel_udp_hdr_start_w /* Offset (in WORDs) from start of packet to tunnel UDP header. (if exist) */;
+ uint8_t fw_ip_hdr_to_payload_w /* In IpV4, the length (in WORDs) from the FW IpV4 header start to the payload start. In IpV6, the length (in WORDs) from the FW IpV6 header end to the payload start. However, if extension headers are included, their length is counted here as well. */;
+ uint16_t fw_ip_csum_wo_len_flags_frag /* For the IP header which is set by the FW, the IP checksum without length, flags and fragment offset. */;
+ uint16_t hw_ip_id /* The IP ID to be set by HW for LSO packets in tunnel mode. */;
+ uint32_t tcp_send_seq /* The TCP sequence number for LSO packets. */;
+};
+
+/*
+ * The last BD in the BD memory will hold a pointer to the next BD memory
+ */
+struct eth_tx_next_bd
+{
+ uint32_t addr_lo /* Single continuous buffer low pointer */;
+ uint32_t addr_hi /* Single continuous buffer high pointer */;
+ uint8_t reserved[8] /* keeps same size as other eth tx bd types */;
+};
+
+/*
+ * union for 4 Bd types
+ */
+union eth_tx_bd_types
+{
+ struct eth_tx_start_bd start_bd /* the first bd in a packets */;
+ struct eth_tx_bd reg_bd /* the common bd */;
+ struct eth_tx_parse_bd_e1x parse_bd_e1x /* parsing info BD for e1/e1h */;
+ struct eth_tx_parse_bd_e2 parse_bd_e2 /* parsing info BD for e2 */;
+ struct eth_tx_parse_2nd_bd parse_2nd_bd /* 2nd parsing info BD */;
+ struct eth_tx_next_bd next_bd /* Bd that contains the address of the next page */;
+};
+
+/*
+ * array of 13 bds as appears in the eth xstorm context
+ */
+struct eth_tx_bds_array
+{
+ union eth_tx_bd_types bds[13];
+};
+
+
+/*
+ * VLAN mode on TX BDs
+ */
+enum eth_tx_vlan_type
+{
+ X_ETH_NO_VLAN,
+ X_ETH_OUTBAND_VLAN,
+ X_ETH_INBAND_VLAN,
+ X_ETH_FW_ADDED_VLAN /* Driver should not use this! */,
+ MAX_ETH_TX_VLAN_TYPE};
+
+
+/*
+ * Ethernet VLAN filtering mode in E1x
+ */
+enum eth_vlan_filter_mode
+{
+ ETH_VLAN_FILTER_ANY_VLAN /* Dont filter by vlan */,
+ ETH_VLAN_FILTER_SPECIFIC_VLAN /* Only the vlan_id is allowed */,
+ ETH_VLAN_FILTER_CLASSIFY /* Vlan will be added to CAM for classification */,
+ MAX_ETH_VLAN_FILTER_MODE};
+
+
+/*
+ * MAC filtering configuration command header $$KEEP_ENDIANNESS$$
+ */
+struct mac_configuration_hdr
+{
+ uint8_t length /* number of entries valid in this command (6 bits) */;
+ uint8_t offset /* offset of the first entry in the list */;
+ uint16_t client_id /* the client id which this ramrod is sent on. 5b is used. */;
+ uint32_t echo /* echo value to be sent to driver on event ring */;
+};
+
+/*
+ * MAC address in list for ramrod $$KEEP_ENDIANNESS$$
+ */
+struct mac_configuration_entry
+{
+ uint16_t lsb_mac_addr /* 2 LSB of MAC address (should be given in big endien - driver should do hton to this number!!!) */;
+ uint16_t middle_mac_addr /* 2 middle bytes of MAC address (should be given in big endien - driver should do hton to this number!!!) */;
+ uint16_t msb_mac_addr /* 2 MSB of MAC address (should be given in big endien - driver should do hton to this number!!!) */;
+ uint16_t vlan_id /* The inner vlan id (12b). Used either in vlan_in_cam for mac_valn pair or for vlan filtering */;
+ uint8_t pf_id /* The pf id, for multi function mode */;
+ uint8_t flags;
+#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE (0x1<<0) /* BitField flags configures the action to be done in cam (used only is slow path handlers) (use enum set_mac_action_type) */
+#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE_SHIFT 0
+#define MAC_CONFIGURATION_ENTRY_RDMA_MAC (0x1<<1) /* BitField flags If set, this MAC also belongs to RDMA client */
+#define MAC_CONFIGURATION_ENTRY_RDMA_MAC_SHIFT 1
+#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE (0x3<<2) /* BitField flags (use enum eth_vlan_filter_mode) */
+#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE_SHIFT 2
+#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL (0x1<<4) /* BitField flags BitField flags 0 - cant remove vlan 1 - can remove vlan. relevant only to everest1 */
+#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL_SHIFT 4
+#define MAC_CONFIGURATION_ENTRY_BROADCAST (0x1<<5) /* BitField flags BitField flags 0 - not broadcast 1 - broadcast. relevant only to everest1 */
+#define MAC_CONFIGURATION_ENTRY_BROADCAST_SHIFT 5
+#define MAC_CONFIGURATION_ENTRY_RESERVED1 (0x3<<6) /* BitField flags */
+#define MAC_CONFIGURATION_ENTRY_RESERVED1_SHIFT 6
+ uint16_t reserved0;
+ uint32_t clients_bit_vector /* Bit vector for the clients which should receive this MAC. */;
+};
+
+/*
+ * MAC filtering configuration command
+ */
+struct mac_configuration_cmd
+{
+ struct mac_configuration_hdr hdr /* header */;
+ struct mac_configuration_entry config_table[64] /* table of 64 MAC configuration entries: addresses and target table entries */;
+};
+
+
+/*
+ * Set-MAC command type (in E1x)
+ */
+enum set_mac_action_type
+{
+ T_ETH_MAC_COMMAND_INVALIDATE,
+ T_ETH_MAC_COMMAND_SET,
+ MAX_SET_MAC_ACTION_TYPE};
+
+
+/*
+ * Ethernet TPA Modes
+ */
+enum tpa_mode
+{
+ TPA_LRO /* LRO mode TPA */,
+ TPA_GRO /* GRO mode TPA */,
+ MAX_TPA_MODE};
+
+
+/*
+ * tpa update ramrod data $$KEEP_ENDIANNESS$$
+ */
+struct tpa_update_ramrod_data
+{
+ uint8_t update_ipv4 /* none, enable or disable */;
+ uint8_t update_ipv6 /* none, enable or disable */;
+ uint8_t client_id /* client init flow control data */;
+ uint8_t max_tpa_queues /* maximal TPA queues allowed for this client */;
+ uint8_t max_sges_for_packet /* The maximal number of SGEs that can be used for one packet. depends on MTU and SGE size. must be 0 if SGEs are disabled */;
+ uint8_t complete_on_both_clients /* If set and the client has different sp_client, completion will be sent to both rings */;
+ uint8_t dont_verify_rings_pause_thr_flg /* If set, the rings pause thresholds will not be verified by firmware. */;
+ uint8_t tpa_mode /* TPA mode to use (LRO or GRO) */;
+ uint16_t sge_buff_size /* Size of the buffers pointed by SGEs */;
+ uint16_t max_agg_size /* maximal size for the aggregated TPA packets, reprted by the host */;
+ uint32_t sge_page_base_lo /* The address to fetch the next sges from (low) */;
+ uint32_t sge_page_base_hi /* The address to fetch the next sges from (high) */;
+ uint16_t sge_pause_thr_low /* number of remaining sges under which, we send pause message */;
+ uint16_t sge_pause_thr_high /* number of remaining sges above which, we send un-pause message */;
+};
+
+
+/*
+ * approximate-match multicast filtering for E1H per function in Tstorm
+ */
+struct tstorm_eth_approximate_match_multicast_filtering
+{
+ uint32_t mcast_add_hash_bit_array[8] /* Bit array for multicast hash filtering.Each bit supports a hash function result if to accept this multicast dst address. */;
+};
+
+
+/*
+ * Common configuration parameters per function in Tstorm $$KEEP_ENDIANNESS$$
+ */
+struct tstorm_eth_function_common_config
+{
+ uint16_t config_flags;
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0) /* BitField config_flagsGeneral configuration flags configuration of the port RSS IpV4 2-tupple capability */
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1) /* BitField config_flagsGeneral configuration flags configuration of the port RSS IpV4 4-tupple capability */
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2) /* BitField config_flagsGeneral configuration flags configuration of the port RSS IpV4 2-tupple capability */
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3) /* BitField config_flagsGeneral configuration flags configuration of the port RSS IpV6 4-tupple capability */
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4) /* BitField config_flagsGeneral configuration flags RSS mode of operation (use enum eth_rss_mode) */
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<7) /* BitField config_flagsGeneral configuration flags 0 - Dont filter by vlan, 1 - Filter according to the vlans specificied in mac_filter_config */
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 7
+#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0xFF<<8) /* BitField config_flagsGeneral configuration flags */
+#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 8
+ uint8_t rss_result_mask /* The mask for the lower byte of RSS result - defines which section of the indirection table will be used. To enable all table put here 0x7F */;
+ uint8_t reserved1;
+ uint16_t vlan_id[2] /* VLANs of this function. VLAN filtering is determine according to vlan_filtering_enable. */;
+};
+
+
+/*
+ * MAC filtering configuration parameters per port in Tstorm $$KEEP_ENDIANNESS$$
+ */
+struct tstorm_eth_mac_filter_config
+{
+ uint32_t ucast_drop_all /* bit vector in which the clients which drop all unicast packets are set */;
+ uint32_t ucast_accept_all /* bit vector in which clients that accept all unicast packets are set */;
+ uint32_t mcast_drop_all /* bit vector in which the clients which drop all multicast packets are set */;
+ uint32_t mcast_accept_all /* bit vector in which clients that accept all multicast packets are set */;
+ uint32_t bcast_accept_all /* bit vector in which clients that accept all broadcast packets are set */;
+ uint32_t vlan_filter[2] /* bit vector for VLAN filtering. Clients which enforce filtering of vlan[x] should be marked in vlan_filter[x]. The primary vlan is taken from the CAM target table. */;
+ uint32_t unmatched_unicast /* bit vector in which clients that accept unmatched unicast packets are set */;
+};
+
+
+/*
+ * tx only queue init ramrod data $$KEEP_ENDIANNESS$$
+ */
+struct tx_queue_init_ramrod_data
+{
+ struct client_init_general_data general /* client init general data */;
+ struct client_init_tx_data tx /* client init tx data */;
+};
+
+
+/*
+ * Three RX producers for ETH
+ */
+union ustorm_eth_rx_producers
+{
+ struct {
+#if defined(__BIG_ENDIAN)
+ uint16_t bd_prod /* Producer of the RX BD ring */;
+ uint16_t cqe_prod /* Producer of the RX CQE ring */;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t cqe_prod /* Producer of the RX CQE ring */;
+ uint16_t bd_prod /* Producer of the RX BD ring */;
+#endif
+#if defined(__BIG_ENDIAN)
+ uint16_t reserved;
+ uint16_t sge_prod /* Producer of the RX SGE ring */;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t sge_prod /* Producer of the RX SGE ring */;
+ uint16_t reserved;
+#endif
+ } prod;
+ uint32_t raw_data[2];
+};
+
+
+/*
+ * The data afex vif list ramrod need $$KEEP_ENDIANNESS$$
+ */
+struct afex_vif_list_ramrod_data
+{
+ uint8_t afex_vif_list_command /* set get, clear all a VIF list id defined by enum vif_list_rule_kind */;
+ uint8_t func_bit_map /* the function bit map to set */;
+ uint16_t vif_list_index /* the VIF list, in a per pf vector to add this function to */;
+ uint8_t func_to_clear /* the func id to clear in case of clear func mode */;
+ uint8_t echo;
+ uint16_t reserved1;
+};
+
+
+/*
+ * cfc delete event data $$KEEP_ENDIANNESS$$
+ */
+struct cfc_del_event_data
+{
+ uint32_t cid /* cid of deleted connection */;
+ uint32_t reserved0;
+ uint32_t reserved1;
+};
+
+
+/*
+ * per-port SAFC demo variables
+ */
+struct cmng_flags_per_port
+{
+ uint32_t cmng_enables;
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_VN (0x1<<0) /* BitField cmng_enablesenables flag for fairness and rate shaping between protocols, vnics and COSes if set, enable fairness between vnics */
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_VN_SHIFT 0
+#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN (0x1<<1) /* BitField cmng_enablesenables flag for fairness and rate shaping between protocols, vnics and COSes if set, enable rate shaping between vnics */
+#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN_SHIFT 1
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS (0x1<<2) /* BitField cmng_enablesenables flag for fairness and rate shaping between protocols, vnics and COSes if set, enable fairness between COSes */
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_SHIFT 2
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE (0x1<<3) /* BitField cmng_enablesenables flag for fairness and rate shaping between protocols, vnics and COSes (use enum fairness_mode) */
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE_SHIFT 3
+#define __CMNG_FLAGS_PER_PORT_RESERVED0 (0xFFFFFFF<<4) /* BitField cmng_enablesenables flag for fairness and rate shaping between protocols, vnics and COSes reserved */
+#define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 4
+ uint32_t __reserved1;
+};
+
+
+/*
+ * per-port rate shaping variables
+ */
+struct rate_shaping_vars_per_port
+{
+ uint32_t rs_periodic_timeout /* timeout of periodic timer */;
+ uint32_t rs_threshold /* threshold, below which we start to stop queues */;
+};
+
+/*
+ * per-port fairness variables
+ */
+struct fairness_vars_per_port
+{
+ uint32_t upper_bound /* Quota for a protocol/vnic */;
+ uint32_t fair_threshold /* almost-empty threshold */;
+ uint32_t fairness_timeout /* timeout of fairness timer */;
+ uint32_t reserved0;
+};
+
+/*
+ * per-port SAFC variables
+ */
+struct safc_struct_per_port
+{
+#if defined(__BIG_ENDIAN)
+ uint16_t __reserved1;
+ uint8_t __reserved0;
+ uint8_t safc_timeout_usec /* timeout to stop queues on SAFC pause command */;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t safc_timeout_usec /* timeout to stop queues on SAFC pause command */;
+ uint8_t __reserved0;
+ uint16_t __reserved1;
+#endif
+ uint8_t cos_to_traffic_types[MAX_COS_NUMBER] /* translate cos to service traffics types */;
+ uint16_t cos_to_pause_mask[NUM_OF_SAFC_BITS] /* QM pause mask for each class of service in the SAFC frame */;
+};
+
+/*
+ * Per-port congestion management variables
+ */
+struct cmng_struct_per_port
+{
+ struct rate_shaping_vars_per_port rs_vars;
+ struct fairness_vars_per_port fair_vars;
+ struct safc_struct_per_port safc_vars;
+ struct cmng_flags_per_port flags;
+};
+
+/*
+ * a single rate shaping counter. can be used as protocol or vnic counter
+ */
+struct rate_shaping_counter
+{
+ uint32_t quota /* Quota for a protocol/vnic */;
+#if defined(__BIG_ENDIAN)
+ uint16_t __reserved0;
+ uint16_t rate /* Vnic/Protocol rate in units of Mega-bits/sec */;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t rate /* Vnic/Protocol rate in units of Mega-bits/sec */;
+ uint16_t __reserved0;
+#endif
+};
+
+/*
+ * per-vnic rate shaping variables
+ */
+struct rate_shaping_vars_per_vn
+{
+ struct rate_shaping_counter vn_counter /* per-vnic counter */;
+};
+
+/*
+ * per-vnic fairness variables
+ */
+struct fairness_vars_per_vn
+{
+ uint32_t cos_credit_delta[MAX_COS_NUMBER] /* used for incrementing the credit */;
+ uint32_t vn_credit_delta /* used for incrementing the credit */;
+ uint32_t __reserved0;
+};
+
+/*
+ * cmng port init state
+ */
+struct cmng_vnic
+{
+ struct rate_shaping_vars_per_vn vnic_max_rate[4];
+ struct fairness_vars_per_vn vnic_min_rate[4];
+};
+
+/*
+ * cmng port init state
+ */
+struct cmng_init
+{
+ struct cmng_struct_per_port port;
+ struct cmng_vnic vnic;
+};
+
+
+/*
+ * driver parameters for congestion management init, all rates are in Mbps
+ */
+struct cmng_init_input
+{
+ uint32_t port_rate;
+ uint16_t vnic_min_rate[4] /* rates are in Mbps */;
+ uint16_t vnic_max_rate[4] /* rates are in Mbps */;
+ uint16_t cos_min_rate[MAX_COS_NUMBER] /* rates are in Mbps */;
+ uint16_t cos_to_pause_mask[MAX_COS_NUMBER];
+ struct cmng_flags_per_port flags;
+};
+
+
+/*
+ * Protocol-common command ID for slow path elements
+ */
+enum common_spqe_cmd_id
+{
+ RAMROD_CMD_ID_COMMON_UNUSED,
+ RAMROD_CMD_ID_COMMON_FUNCTION_START /* Start a function (for PFs only) */,
+ RAMROD_CMD_ID_COMMON_FUNCTION_STOP /* Stop a function (for PFs only) */,
+ RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE /* niv update function */,
+ RAMROD_CMD_ID_COMMON_CFC_DEL /* Delete a connection from CFC */,
+ RAMROD_CMD_ID_COMMON_CFC_DEL_WB /* Delete a connection from CFC (with write back) */,
+ RAMROD_CMD_ID_COMMON_STAT_QUERY /* Collect statistics counters */,
+ RAMROD_CMD_ID_COMMON_STOP_TRAFFIC /* Stop Tx traffic (before DCB updates) */,
+ RAMROD_CMD_ID_COMMON_START_TRAFFIC /* Start Tx traffic (after DCB updates) */,
+ RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS /* niv vif lists */,
+ RAMROD_CMD_ID_COMMON_SET_TIMESYNC /* Set Timesync Parameters (E3 Only) */,
+ MAX_COMMON_SPQE_CMD_ID};
+
+
+/*
+ * Per-protocol connection types
+ */
+enum connection_type
+{
+ ETH_CONNECTION_TYPE /* Ethernet */,
+ TOE_CONNECTION_TYPE /* TOE */,
+ RDMA_CONNECTION_TYPE /* RDMA */,
+ ISCSI_CONNECTION_TYPE /* iSCSI */,
+ FCOE_CONNECTION_TYPE /* FCoE */,
+ RESERVED_CONNECTION_TYPE_0,
+ RESERVED_CONNECTION_TYPE_1,
+ RESERVED_CONNECTION_TYPE_2,
+ NONE_CONNECTION_TYPE /* General- used for common slow path */,
+ MAX_CONNECTION_TYPE};
+
+
+/*
+ * Cos modes
+ */
+enum cos_mode
+{
+ OVERRIDE_COS /* Firmware deduce cos according to DCB */,
+ STATIC_COS /* Firmware has constant queues per CoS */,
+ FW_WRR /* Firmware keep fairness between different CoSes */,
+ MAX_COS_MODE};
+
+
+/*
+ * Dynamic HC counters set by the driver
+ */
+struct hc_dynamic_drv_counter
+{
+ uint32_t val[HC_SB_MAX_DYNAMIC_INDICES] /* 4 bytes * 4 indices = 2 lines */;
+};
+
+/*
+ * zone A per-queue data
+ */
+struct cstorm_queue_zone_data
+{
+ struct hc_dynamic_drv_counter hc_dyn_drv_cnt /* 4 bytes * 4 indices = 2 lines */;
+ struct regpair reserved[2];
+};
+
+
+/*
+ * Vf-PF channel data in cstorm ram (non-triggered zone)
+ */
+struct vf_pf_channel_zone_data
+{
+ uint32_t msg_addr_lo /* the message address on VF memory */;
+ uint32_t msg_addr_hi /* the message address on VF memory */;
+};
+
+/*
+ * zone for VF non-triggered data
+ */
+struct non_trigger_vf_zone
+{
+ struct vf_pf_channel_zone_data vf_pf_channel /* vf-pf channel zone data */;
+};
+
+/*
+ * Vf-PF channel trigger zone in cstorm ram
+ */
+struct vf_pf_channel_zone_trigger
+{
+ uint8_t addr_valid /* indicates that a vf-pf message is pending. MUST be set AFTER the message address. */;
+};
+
+/*
+ * zone that triggers the in-bound interrupt
+ */
+struct trigger_vf_zone
+{
+#if defined(__BIG_ENDIAN)
+ uint16_t reserved1;
+ uint8_t reserved0;
+ struct vf_pf_channel_zone_trigger vf_pf_channel;
+#elif defined(__LITTLE_ENDIAN)
+ struct vf_pf_channel_zone_trigger vf_pf_channel;
+ uint8_t reserved0;
+ uint16_t reserved1;
+#endif
+ uint32_t reserved2;
+};
+
+/*
+ * zone B per-VF data
+ */
+struct cstorm_vf_zone_data
+{
+ struct non_trigger_vf_zone non_trigger /* zone for VF non-triggered data */;
+ struct trigger_vf_zone trigger /* zone that triggers the in-bound interrupt */;
+};
+
+
+/*
+ * Dynamic host coalescing init parameters, per state machine
+ */
+struct dynamic_hc_sm_config
+{
+ uint32_t threshold[3] /* thresholds of number of outstanding bytes */;
+ uint8_t shift_per_protocol[HC_SB_MAX_DYNAMIC_INDICES] /* bytes difference of each protocol is shifted right by this value */;
+ uint8_t hc_timeout0[HC_SB_MAX_DYNAMIC_INDICES] /* timeout for level 0 for each protocol, in units of usec */;
+ uint8_t hc_timeout1[HC_SB_MAX_DYNAMIC_INDICES] /* timeout for level 1 for each protocol, in units of usec */;
+ uint8_t hc_timeout2[HC_SB_MAX_DYNAMIC_INDICES] /* timeout for level 2 for each protocol, in units of usec */;
+ uint8_t hc_timeout3[HC_SB_MAX_DYNAMIC_INDICES] /* timeout for level 3 for each protocol, in units of usec */;
+};
+
+/*
+ * Dynamic host coalescing init parameters
+ */
+struct dynamic_hc_config
+{
+ struct dynamic_hc_sm_config sm_config[HC_SB_MAX_SM] /* Configuration per state machine */;
+};
+
+
+struct e2_integ_data
+{
+#if defined(__BIG_ENDIAN)
+ uint8_t flags;
+#define E2_INTEG_DATA_TESTING_EN (0x1<<0) /* BitField flags integration testing enabled */
+#define E2_INTEG_DATA_TESTING_EN_SHIFT 0
+#define E2_INTEG_DATA_LB_TX (0x1<<1) /* BitField flags flag indicating this connection will transmit on loopback */
+#define E2_INTEG_DATA_LB_TX_SHIFT 1
+#define E2_INTEG_DATA_COS_TX (0x1<<2) /* BitField flags flag indicating this connection will transmit according to cos field */
+#define E2_INTEG_DATA_COS_TX_SHIFT 2
+#define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3) /* BitField flags flag indicating this connection will activate the opportunistic QM credit flow */
+#define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4) /* BitField flags flag indicating this connection will release the door bell queue (DQ) */
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4
+#define E2_INTEG_DATA_RESERVED (0x7<<5) /* BitField flags */
+#define E2_INTEG_DATA_RESERVED_SHIFT 5
+ uint8_t cos /* cos of the connection (relevant only in cos transmitting connections, when cosTx is set */;
+ uint8_t voq /* voq to return credit on. Normally equal to port (i.e. always 0 in E2 operational connections). in cos tests equal to cos. in loopback tests equal to LB_PORT (=4) */;
+ uint8_t pbf_queue /* pbf queue to transmit on. Normally equal to port (i.e. always 0 in E2 operational connections). in cos tests equal to cos. in loopback tests equal to LB_PORT (=4) */;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t pbf_queue /* pbf queue to transmit on. Normally equal to port (i.e. always 0 in E2 operational connections). in cos tests equal to cos. in loopback tests equal to LB_PORT (=4) */;
+ uint8_t voq /* voq to return credit on. Normally equal to port (i.e. always 0 in E2 operational connections). in cos tests equal to cos. in loopback tests equal to LB_PORT (=4) */;
+ uint8_t cos /* cos of the connection (relevant only in cos transmitting connections, when cosTx is set */;
+ uint8_t flags;
+#define E2_INTEG_DATA_TESTING_EN (0x1<<0) /* BitField flags integration testing enabled */
+#define E2_INTEG_DATA_TESTING_EN_SHIFT 0
+#define E2_INTEG_DATA_LB_TX (0x1<<1) /* BitField flags flag indicating this connection will transmit on loopback */
+#define E2_INTEG_DATA_LB_TX_SHIFT 1
+#define E2_INTEG_DATA_COS_TX (0x1<<2) /* BitField flags flag indicating this connection will transmit according to cos field */
+#define E2_INTEG_DATA_COS_TX_SHIFT 2
+#define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3) /* BitField flags flag indicating this connection will activate the opportunistic QM credit flow */
+#define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4) /* BitField flags flag indicating this connection will release the door bell queue (DQ) */
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4
+#define E2_INTEG_DATA_RESERVED (0x7<<5) /* BitField flags */
+#define E2_INTEG_DATA_RESERVED_SHIFT 5
+#endif
+#if defined(__BIG_ENDIAN)
+ uint16_t reserved3;
+ uint8_t reserved2;
+ uint8_t ramEn /* context area reserved for reading enable bit from ram */;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t ramEn /* context area reserved for reading enable bit from ram */;
+ uint8_t reserved2;
+ uint16_t reserved3;
+#endif
+};
+
+
+/*
+ * set mac event data $$KEEP_ENDIANNESS$$
+ */
+struct eth_event_data
+{
+ uint32_t echo /* set mac echo data to return to driver */;
+ uint32_t reserved0;
+ uint32_t reserved1;
+};
+
+
+/*
+ * pf-vf event data $$KEEP_ENDIANNESS$$
+ */
+struct vf_pf_event_data
+{
+ uint8_t vf_id /* VF ID (0-63) */;
+ uint8_t reserved0;
+ uint16_t reserved1;
+ uint32_t msg_addr_lo /* message address on Vf (low 32 bits) */;
+ uint32_t msg_addr_hi /* message address on Vf (high 32 bits) */;
+};
+
+/*
+ * VF FLR event data $$KEEP_ENDIANNESS$$
+ */
+struct vf_flr_event_data
+{
+ uint8_t vf_id /* VF ID (0-63) */;
+ uint8_t reserved0;
+ uint16_t reserved1;
+ uint32_t reserved2;
+ uint32_t reserved3;
+};
+
+/*
+ * malicious VF event data $$KEEP_ENDIANNESS$$
+ */
+struct malicious_vf_event_data
+{
+ uint8_t vf_id /* VF ID (0-63) */;
+ uint8_t err_id /* reason for malicious notification */;
+ uint16_t reserved1;
+ uint32_t reserved2;
+ uint32_t reserved3;
+};
+
+/*
+ * vif list event data $$KEEP_ENDIANNESS$$
+ */
+struct vif_list_event_data
+{
+ uint8_t func_bit_map /* bit map of pf indice */;
+ uint8_t echo;
+ uint16_t reserved0;
+ uint32_t reserved1;
+ uint32_t reserved2;
+};
+
+/*
+ * function update event data $$KEEP_ENDIANNESS$$
+ */
+struct function_update_event_data
+{
+ uint8_t echo;
+ uint8_t reserved;
+ uint16_t reserved0;
+ uint32_t reserved1;
+ uint32_t reserved2;
+};
+
+/*
+ * union for all event ring message types
+ */
+union event_data
+{
+ struct vf_pf_event_data vf_pf_event /* vf-pf event data */;
+ struct eth_event_data eth_event /* set mac event data */;
+ struct cfc_del_event_data cfc_del_event /* cfc delete event data */;
+ struct vf_flr_event_data vf_flr_event /* vf flr event data */;
+ struct malicious_vf_event_data malicious_vf_event /* malicious vf event data */;
+ struct vif_list_event_data vif_list_event /* vif list event data */;
+ struct function_update_event_data function_update_event /* function update event data */;
+};
+
+
+/*
+ * per PF event ring data
+ */
+struct event_ring_data
+{
+ struct regpair_native base_addr /* ring base address */;
+#if defined(__BIG_ENDIAN)
+ uint8_t index_id /* index ID within the status block */;
+ uint8_t sb_id /* status block ID */;
+ uint16_t producer /* event ring producer */;
+#elif defined(__LITTLE_ENDIAN)
+ uint16_t producer /* event ring producer */;
+ uint8_t sb_id /* status block ID */;
+ uint8_t index_id /* index ID within the status block */;
+#endif
+ uint32_t reserved0;
+};
+
+
+/*
+ * event ring message element (each element is 128 bits) $$KEEP_ENDIANNESS$$
+ */
+struct event_ring_msg
+{
+ uint8_t opcode;
+ uint8_t error /* error on the mesasage */;
+ uint16_t reserved1;
+ union event_data data /* message data (96 bits data) */;
+};
+
+/*
+ * event ring next page element (128 bits)
+ */
+struct event_ring_next
+{
+ struct regpair addr /* Address of the next page of the ring */;
+ uint32_t reserved[2];
+};
+
+/*
+ * union for event ring element types (each element is 128 bits)
+ */
+union event_ring_elem
+{
+ struct event_ring_msg message /* event ring message */;
+ struct event_ring_next next_page /* event ring next page */;
+};
+
+
+/*
+ * Common event ring opcodes
+ */
+enum event_ring_opcode
+{
+ EVENT_RING_OPCODE_VF_PF_CHANNEL,
+ EVENT_RING_OPCODE_FUNCTION_START /* Start a function (for PFs only) */,
+ EVENT_RING_OPCODE_FUNCTION_STOP /* Stop a function (for PFs only) */,
+ EVENT_RING_OPCODE_CFC_DEL /* Delete a connection from CFC */,
+ EVENT_RING_OPCODE_CFC_DEL_WB /* Delete a connection from CFC (with write back) */,
+ EVENT_RING_OPCODE_STAT_QUERY /* Collect statistics counters */,
+ EVENT_RING_OPCODE_STOP_TRAFFIC /* Stop Tx traffic (before DCB updates) */,
+ EVENT_RING_OPCODE_START_TRAFFIC /* Start Tx traffic (after DCB updates) */,
+ EVENT_RING_OPCODE_VF_FLR /* VF FLR indication for PF */,
+ EVENT_RING_OPCODE_MALICIOUS_VF /* Malicious VF operation detected */,
+ EVENT_RING_OPCODE_FORWARD_SETUP /* Initialize forward channel */,
+ EVENT_RING_OPCODE_RSS_UPDATE_RULES /* Update RSS configuration */,
+ EVENT_RING_OPCODE_FUNCTION_UPDATE /* function update */,
+ EVENT_RING_OPCODE_AFEX_VIF_LISTS /* event ring opcode niv vif lists */,
+ EVENT_RING_OPCODE_SET_MAC /* Add/remove MAC (in E1x only) */,
+ EVENT_RING_OPCODE_CLASSIFICATION_RULES /* Add/remove MAC or VLAN (in E2/E3 only) */,
+ EVENT_RING_OPCODE_FILTERS_RULES /* Add/remove classification filters for L2 client (in E2/E3 only) */,
+ EVENT_RING_OPCODE_MULTICAST_RULES /* Add/remove multicast classification bin (in E2/E3 only) */,
+ EVENT_RING_OPCODE_SET_TIMESYNC /* Set Timesync Parameters (E3 Only) */,
+ MAX_EVENT_RING_OPCODE};
+
+
+/*
+ * Modes for fairness algorithm
+ */
+enum fairness_mode
+{
+ FAIRNESS_COS_WRR_MODE /* Weighted round robin mode (used in Google) */,
+ FAIRNESS_COS_ETS_MODE /* ETS mode (used in FCoE) */,
+ MAX_FAIRNESS_MODE};
+
+
+/*
+ * Priority and cos $$KEEP_ENDIANNESS$$
+ */
+struct priority_cos
+{
+ uint8_t priority /* Priority */;
+ uint8_t cos /* Cos */;
+ uint16_t reserved1;
+};
+
+/*
+ * The data for flow control configuration $$KEEP_ENDIANNESS$$
+ */
+struct flow_control_configuration
+{
+ struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES] /* traffic_type to priority cos */;
+ uint8_t dcb_enabled /* If DCB mode is enabled then traffic class to priority array is fully initialized and there must be inner VLAN */;
+ uint8_t dcb_version /* DCB version Increase by one on each DCB update */;
+ uint8_t dont_add_pri_0 /* In case, the priority is 0, and the packet has no vlan, the firmware wont add vlan */;
+ uint8_t reserved1;
+ uint32_t reserved2;
+};
+
+
+/*
+ * $$KEEP_ENDIANNESS$$
+ */
+struct function_start_data
+{
+ uint8_t function_mode /* the function mode */;
+ uint8_t allow_npar_tx_switching /* If set, inter-pf tx switching is allowed in Switch Independant function mode. (E2/E3 Only) */;
+ uint16_t sd_vlan_tag /* value of Vlan in case of switch depended multi-function mode */;
+ uint16_t vif_id /* value of VIF id in case of NIV multi-function mode */;
+ uint8_t path_id;
+ uint8_t network_cos_mode /* The cos mode for network traffic. */;
+ uint8_t dmae_cmd_id /* The DMAE command id to use for FW DMAE transactions */;
+ uint8_t gre_tunnel_mode /* GRE Tunnel Mode to enable on the Function (E2/E3 Only) */;
+ uint8_t gre_tunnel_rss /* Type of RSS to perform on GRE Tunneled packets */;
+ uint8_t nvgre_clss_en /* If set, NVGRE tunneled packets are classified according to their inner MAC (gre_mode must be NVGRE_TUNNEL) */;
+ uint16_t reserved1[2];
+};
+
+
+/*
+ * $$KEEP_ENDIANNESS$$
+ */
+struct function_update_data
+{
+ uint8_t vif_id_change_flg /* If set, vif_id will be checked */;
+ uint8_t afex_default_vlan_change_flg /* If set, afex_default_vlan will be checked */;
+ uint8_t allowed_priorities_change_flg /* If set, allowed_priorities will be checked */;
+ uint8_t network_cos_mode_change_flg /* If set, network_cos_mode will be checked */;
+ uint16_t vif_id /* value of VIF id in case of NIV multi-function mode */;
+ uint16_t afex_default_vlan /* value of default Vlan in case of NIV mf */;
+ uint8_t allowed_priorities /* bit vector of allowed Vlan priorities for this VIF */;
+ uint8_t network_cos_mode /* The cos mode for network traffic. */;
+ uint8_t lb_mode_en_change_flg /* If set, lb_mode_en will be checked */;
+ uint8_t lb_mode_en /* If set, niv loopback mode will be enabled */;
+ uint8_t tx_switch_suspend_change_flg /* If set, tx_switch_suspend will be checked */;
+ uint8_t tx_switch_suspend /* If set, TX switching TO this function will be disabled and packets will be dropped */;
+ uint8_t echo;
+ uint8_t reserved1;
+ uint8_t update_gre_cfg_flg /* If set, GRE config for the function will be updated according to the gre_tunnel_rss and nvgre_clss_en fields */;
+ uint8_t gre_tunnel_mode /* GRE Tunnel Mode to enable on the Function (E2/E3 Only) */;
+ uint8_t gre_tunnel_rss /* Type of RSS to perform on GRE Tunneled packets */;
+ uint8_t nvgre_clss_en /* If set, NVGRE tunneled packets are classified according to their inner MAC (gre_mode must be NVGRE_TUNNEL) */;
+ uint32_t reserved3;
+};
+
+
+/*
+ * FW version stored in the Xstorm RAM
+ */
+struct fw_version
+{
+#if defined(__BIG_ENDIAN)
+ uint8_t engineering /* firmware current engineering version */;
+ uint8_t revision /* firmware current revision version */;
+ uint8_t minor /* firmware current minor version */;
+ uint8_t major /* firmware current major version */;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t major /* firmware current major version */;
+ uint8_t minor /* firmware current minor version */;
+ uint8_t revision /* firmware current revision version */;
+ uint8_t engineering /* firmware current engineering version */;
+#endif
+ uint32_t flags;
+#define FW_VERSION_OPTIMIZED (0x1<<0) /* BitField flags if set, this is optimized ASM */
+#define FW_VERSION_OPTIMIZED_SHIFT 0
+#define FW_VERSION_BIG_ENDIEN (0x1<<1) /* BitField flags if set, this is big-endien ASM */
+#define FW_VERSION_BIG_ENDIEN_SHIFT 1
+#define FW_VERSION_CHIP_VERSION (0x3<<2) /* BitField flags 1 - E1H */
+#define FW_VERSION_CHIP_VERSION_SHIFT 2
+#define __FW_VERSION_RESERVED (0xFFFFFFF<<4) /* BitField flags */
+#define __FW_VERSION_RESERVED_SHIFT 4
+};
+
+
+/*
+ * GRE RSS Mode
+ */
+enum gre_rss_mode
+{
+ GRE_OUTER_HEADERS_RSS /* RSS for GRE Packets is performed on the outer headers */,
+ GRE_INNER_HEADERS_RSS /* RSS for GRE Packets is performed on the inner headers */,
+ NVGRE_KEY_ENTROPY_RSS /* RSS for NVGRE Packets is done based on a hash containing the entropy bits from the GRE Key Field (gre_tunnel must be NVGRE_TUNNEL) */,
+ MAX_GRE_RSS_MODE};
+
+
+/*
+ * GRE Tunnel Mode
+ */
+enum gre_tunnel_type
+{
+ NO_GRE_TUNNEL,
+ NVGRE_TUNNEL /* NV-GRE Tunneling Microsoft L2 over GRE. GRE header contains mandatory Key Field. */,
+ L2GRE_TUNNEL /* L2-GRE Tunneling General L2 over GRE. GRE can contain Key field with Tenant ID and Sequence Field */,
+ IPGRE_TUNNEL /* IP-GRE Tunneling IP over GRE. GRE may contain Key field with Tenant ID, Sequence Field and/or Checksum Field */,
+ MAX_GRE_TUNNEL_TYPE};
+
+
+/*
+ * Dynamic Host-Coalescing - Driver(host) counters
+ */
+struct hc_dynamic_sb_drv_counters
+{
+ uint32_t dynamic_hc_drv_counter[HC_SB_MAX_DYNAMIC_INDICES] /* Dynamic HC counters written by drivers */;
+};
+
+
+/*
+ * 2 bytes. configuration/state parameters for a single protocol index
+ */
+struct hc_index_data
+{
+#if defined(__BIG_ENDIAN)
+ uint8_t flags;
+#define HC_INDEX_DATA_SM_ID (0x1<<0) /* BitField flags Index to a state machine. Can be 0 or 1 */
+#define HC_INDEX_DATA_SM_ID_SHIFT 0
+#define HC_INDEX_DATA_HC_ENABLED (0x1<<1) /* BitField flags if set, host coalescing would be done for this index */
+#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1
+#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1<<2) /* BitField flags if set, dynamic HC will be done for this index */
+#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2
+#define HC_INDEX_DATA_RESERVE (0x1F<<3) /* BitField flags */
+#define HC_INDEX_DATA_RESERVE_SHIFT 3
+ uint8_t timeout /* the timeout values for this index. Units are 4 usec */;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t timeout /* the timeout values for this index. Units are 4 usec */;
+ uint8_t flags;
+#define HC_INDEX_DATA_SM_ID (0x1<<0) /* BitField flags Index to a state machine. Can be 0 or 1 */
+#define HC_INDEX_DATA_SM_ID_SHIFT 0
+#define HC_INDEX_DATA_HC_ENABLED (0x1<<1) /* BitField flags if set, host coalescing would be done for this index */
+#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1
+#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1<<2) /* BitField flags if set, dynamic HC will be done for this index */
+#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2
+#define HC_INDEX_DATA_RESERVE (0x1F<<3) /* BitField flags */
+#define HC_INDEX_DATA_RESERVE_SHIFT 3
+#endif
+};
+
+
+/*
+ * HC state-machine
+ */
+struct hc_status_block_sm
+{
+#if defined(__BIG_ENDIAN)
+ uint8_t igu_seg_id;
+ uint8_t igu_sb_id /* sb_id within the IGU */;
+ uint8_t timer_value /* Determines the time_to_expire */;
+ uint8_t __flags;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t __flags;
+ uint8_t timer_value /* Determines the time_to_expire */;
+ uint8_t igu_sb_id /* sb_id within the IGU */;
+ uint8_t igu_seg_id;
+#endif
+ uint32_t time_to_expire /* The time in which it expects to wake up */;
+};
+
+/*
+ * hold PCI identification variables- used in various places in firmware
+ */
+struct pci_entity
+{
+#if defined(__BIG_ENDIAN)
+ uint8_t vf_valid /* If set, this is a VF, otherwise it is PF */;
+ uint8_t vf_id /* VF ID (0-63). Value of 0xFF means VF not valid */;
+ uint8_t vnic_id /* Virtual NIC ID (0-3) */;
+ uint8_t pf_id /* PCI physical function number (0-7). The LSB of this field is the port ID */;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t pf_id /* PCI physical function number (0-7). The LSB of this field is the port ID */;
+ uint8_t vnic_id /* Virtual NIC ID (0-3) */;
+ uint8_t vf_id /* VF ID (0-63). Value of 0xFF means VF not valid */;
+ uint8_t vf_valid /* If set, this is a VF, otherwise it is PF */;
+#endif
+};
+
+/*
+ * The fast-path status block meta-data, common to all chips
+ */
+struct hc_sb_data
+{
+ struct regpair_native host_sb_addr /* Host status block address */;
+ struct hc_status_block_sm state_machine[HC_SB_MAX_SM] /* Holds the state machines of the status block */;
+ struct pci_entity p_func /* vnic / port of the status block to be set by the driver */;
+#if defined(__BIG_ENDIAN)
+ uint8_t rsrv0;
+ uint8_t state;
+ uint8_t dhc_qzone_id /* used in E2 only, to specify the HW queue zone ID used for this status block dynamic HC counters */;
+ uint8_t same_igu_sb_1b /* Indicate that both state-machines acts like single sm */;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t same_igu_sb_1b /* Indicate that both state-machines acts like single sm */;
+ uint8_t dhc_qzone_id /* used in E2 only, to specify the HW queue zone ID used for this status block dynamic HC counters */;
+ uint8_t state;
+ uint8_t rsrv0;
+#endif
+ struct regpair_native rsrv1[2];
+};
+
+
+/*
+ * Segment types for host coaslescing
+ */
+enum hc_segment
+{
+ HC_REGULAR_SEGMENT,
+ HC_DEFAULT_SEGMENT,
+ MAX_HC_SEGMENT};
+
+
+/*
+ * The fast-path status block meta-data
+ */
+struct hc_sp_status_block_data
+{
+ struct regpair_native host_sb_addr /* Host status block address */;
+#if defined(__BIG_ENDIAN)
+ uint8_t rsrv1;
+ uint8_t state;
+ uint8_t igu_seg_id /* segment id of the IGU */;
+ uint8_t igu_sb_id /* sb_id within the IGU */;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t igu_sb_id /* sb_id within the IGU */;
+ uint8_t igu_seg_id /* segment id of the IGU */;
+ uint8_t state;
+ uint8_t rsrv1;
+#endif
+ struct pci_entity p_func /* vnic / port of the status block to be set by the driver */;
+};
+
+
+/*
+ * The fast-path status block meta-data
+ */
+struct hc_status_block_data_e1x
+{
+ struct hc_index_data index_data[HC_SB_MAX_INDICES_E1X] /* configuration/state parameters for a single protocol index */;
+ struct hc_sb_data common /* The fast-path status block meta-data, common to all chips */;
+};
+
+
+/*
+ * The fast-path status block meta-data
+ */
+struct hc_status_block_data_e2
+{
+ struct hc_index_data index_data[HC_SB_MAX_INDICES_E2] /* configuration/state parameters for a single protocol index */;
+ struct hc_sb_data common /* The fast-path status block meta-data, common to all chips */;
+};
+
+
+/*
+ * IGU block operartion modes (in Everest2)
+ */
+enum igu_mode
+{
+ HC_IGU_BC_MODE /* Backward compatible mode */,
+ HC_IGU_NBC_MODE /* Non-backward compatible mode */,
+ MAX_IGU_MODE};
+
+
+/*
+ * IP versions
+ */
+enum ip_ver
+{
+ IP_V4,
+ IP_V6,
+ MAX_IP_VER};
+
+
+/*
+ * Malicious VF error ID
+ */
+enum malicious_vf_error_id
+{
+ VF_PF_CHANNEL_NOT_READY /* Writing to VF/PF channel when it is not ready */,
+ ETH_ILLEGAL_BD_LENGTHS /* TX BD lengths error was detected */,
+ ETH_PACKET_TOO_SHORT /* TX packet is shorter then reported on BDs */,
+ ETH_PAYLOAD_TOO_BIG /* TX packet is greater then MTU */,
+ ETH_ILLEGAL_ETH_TYPE /* TX packet reported without VLAN but eth type is 0x8100 */,
+ ETH_ILLEGAL_LSO_HDR_LEN /* LSO header length on BDs and on hdr_nbd do not match */,
+ ETH_TOO_MANY_BDS /* Tx packet has too many BDs */,
+ ETH_ZERO_HDR_NBDS /* hdr_nbds field is zero */,
+ ETH_START_BD_NOT_SET /* start_bd should be set on first TX BD in packet */,
+ ETH_ILLEGAL_PARSE_NBDS /* Tx packet with parse_nbds field which is not legal */,
+ ETH_IPV6_AND_CHECKSUM /* Tx packet with IP checksum on IPv6 */,
+ ETH_VLAN_FLG_INCORRECT /* Tx packet with incorrect VLAN flag */,
+ ETH_ILLEGAL_LSO_MSS /* Tx LSO packet with illegal MSS value */,
+ ETH_TUNNEL_NOT_SUPPORTED /* Tunneling packets are not supported in current connection */,
+ MAX_MALICIOUS_VF_ERROR_ID};
+
+
+/*
+ * Multi-function modes
+ */
+enum mf_mode
+{
+ SINGLE_FUNCTION,
+ MULTI_FUNCTION_SD /* Switch dependent (vlan based) */,
+ MULTI_FUNCTION_SI /* Switch independent (mac based) */,
+ MULTI_FUNCTION_AFEX /* Switch dependent (niv based) */,
+ MAX_MF_MODE};
+
+
+/*
+ * Protocol-common statistics collected by the Tstorm (per pf) $$KEEP_ENDIANNESS$$
+ */
+struct tstorm_per_pf_stats
+{
+ struct regpair rcv_error_bytes /* number of bytes received with errors */;
+};
+
+/*
+ * $$KEEP_ENDIANNESS$$
+ */
+struct per_pf_stats
+{
+ struct tstorm_per_pf_stats tstorm_pf_statistics;
+};
+
+
+/*
+ * Protocol-common statistics collected by the Tstorm (per port) $$KEEP_ENDIANNESS$$
+ */
+struct tstorm_per_port_stats
+{
+ uint32_t mac_discard /* number of packets with mac errors */;
+ uint32_t mac_filter_discard /* the number of good frames dropped because of no perfect match to MAC/VLAN address */;
+ uint32_t brb_truncate_discard /* the number of packtes that were dropped because they were truncated in BRB */;
+ uint32_t mf_tag_discard /* the number of good frames dropped because of no match to the outer vlan/VNtag */;
+ uint32_t packet_drop /* general packet drop conter- incremented for every packet drop */;
+ uint32_t reserved;
+};
+
+/*
+ * $$KEEP_ENDIANNESS$$
+ */
+struct per_port_stats
+{
+ struct tstorm_per_port_stats tstorm_port_statistics;
+};
+
+
+/*
+ * Protocol-common statistics collected by the Tstorm (per client) $$KEEP_ENDIANNESS$$
+ */
+struct tstorm_per_queue_stats
+{
+ struct regpair rcv_ucast_bytes /* number of bytes in unicast packets received without errors and pass the filter */;
+ uint32_t rcv_ucast_pkts /* number of unicast packets received without errors and pass the filter */;
+ uint32_t checksum_discard /* number of total packets received with checksum error */;
+ struct regpair rcv_bcast_bytes /* number of bytes in broadcast packets received without errors and pass the filter */;
+ uint32_t rcv_bcast_pkts /* number of packets in broadcast packets received without errors and pass the filter */;
+ uint32_t pkts_too_big_discard /* number of too long packets received */;
+ struct regpair rcv_mcast_bytes /* number of bytes in multicast packets received without errors and pass the filter */;
+ uint32_t rcv_mcast_pkts /* number of packets in multicast packets received without errors and pass the filter */;
+ uint32_t ttl0_discard /* the number of good frames dropped because of TTL=0 */;
+ uint16_t no_buff_discard;
+ uint16_t reserved0;
+ uint32_t reserved1;
+};
+
+/*
+ * Protocol-common statistics collected by the Ustorm (per client) $$KEEP_ENDIANNESS$$
+ */
+struct ustorm_per_queue_stats
+{
+ struct regpair ucast_no_buff_bytes /* the number of unicast bytes received from network dropped because of no buffer at host */;
+ struct regpair mcast_no_buff_bytes /* the number of multicast bytes received from network dropped because of no buffer at host */;
+ struct regpair bcast_no_buff_bytes /* the number of broadcast bytes received from network dropped because of no buffer at host */;
+ uint32_t ucast_no_buff_pkts /* the number of unicast frames received from network dropped because of no buffer at host */;
+ uint32_t mcast_no_buff_pkts /* the number of unicast frames received from network dropped because of no buffer at host */;
+ uint32_t bcast_no_buff_pkts /* the number of unicast frames received from network dropped because of no buffer at host */;
+ uint32_t coalesced_pkts /* the number of packets coalesced in all aggregations */;
+ struct regpair coalesced_bytes /* the number of bytes coalesced in all aggregations */;
+ uint32_t coalesced_events /* the number of aggregations */;
+ uint32_t coalesced_aborts /* the number of exception which avoid aggregation */;
+};
+
+/*
+ * Protocol-common statistics collected by the Xstorm (per client) $$KEEP_ENDIANNESS$$
+ */
+struct xstorm_per_queue_stats
+{
+ struct regpair ucast_bytes_sent /* number of total bytes sent without errors */;
+ struct regpair mcast_bytes_sent /* number of total bytes sent without errors */;
+ struct regpair bcast_bytes_sent /* number of total bytes sent without errors */;
+ uint32_t ucast_pkts_sent /* number of total packets sent without errors */;
+ uint32_t mcast_pkts_sent /* number of total packets sent without errors */;
+ uint32_t bcast_pkts_sent /* number of total packets sent without errors */;
+ uint32_t error_drop_pkts /* number of total packets drooped due to errors */;
+};
+
+/*
+ * $$KEEP_ENDIANNESS$$
+ */
+struct per_queue_stats
+{
+ struct tstorm_per_queue_stats tstorm_queue_statistics;
+ struct ustorm_per_queue_stats ustorm_queue_statistics;
+ struct xstorm_per_queue_stats xstorm_queue_statistics;
+};
+
+
+/*
+ * FW version stored in first line of pram $$KEEP_ENDIANNESS$$
+ */
+struct pram_fw_version
+{
+ uint8_t major /* firmware current major version */;
+ uint8_t minor /* firmware current minor version */;
+ uint8_t revision /* firmware current revision version */;
+ uint8_t engineering /* firmware current engineering version */;
+ uint8_t flags;
+#define PRAM_FW_VERSION_OPTIMIZED (0x1<<0) /* BitField flags if set, this is optimized ASM */
+#define PRAM_FW_VERSION_OPTIMIZED_SHIFT 0
+#define PRAM_FW_VERSION_STORM_ID (0x3<<1) /* BitField flags storm_id identification */
+#define PRAM_FW_VERSION_STORM_ID_SHIFT 1
+#define PRAM_FW_VERSION_BIG_ENDIEN (0x1<<3) /* BitField flags if set, this is big-endien ASM */
+#define PRAM_FW_VERSION_BIG_ENDIEN_SHIFT 3
+#define PRAM_FW_VERSION_CHIP_VERSION (0x3<<4) /* BitField flags 1 - E1H */
+#define PRAM_FW_VERSION_CHIP_VERSION_SHIFT 4
+#define __PRAM_FW_VERSION_RESERVED0 (0x3<<6) /* BitField flags */
+#define __PRAM_FW_VERSION_RESERVED0_SHIFT 6
+};
+
+
+/*
+ * Ethernet slow path element
+ */
+union protocol_common_specific_data
+{
+ uint8_t protocol_data[8] /* to fix this structure size to 8 bytes */;
+ struct regpair phy_address /* SPE physical address */;
+ struct regpair mac_config_addr /* physical address of the MAC configuration command, as allocated by the driver */;
+ struct afex_vif_list_ramrod_data afex_vif_list_data /* The data afex vif list ramrod need */;
+};
+
+/*
+ * The send queue element
+ */
+struct protocol_common_spe
+{
+ struct spe_hdr hdr /* SPE header */;
+ union protocol_common_specific_data data /* data specific to common protocol */;
+};
+
+
+/*
+ * The data for the Set Timesync Ramrod $$KEEP_ENDIANNESS$$
+ */
+struct set_timesync_ramrod_data
+{
+ uint8_t drift_adjust_cmd /* Timesync Drift Adjust Command */;
+ uint8_t offset_cmd /* Timesync Offset Command */;
+ uint8_t add_sub_drift_adjust_value /* Whether to add(1)/subtract(0) Drift Adjust Value from the Offset */;
+ uint8_t drift_adjust_value /* Drift Adjust Value (in ns) */;
+ uint32_t drift_adjust_period /* Drift Adjust Period (in us) */;
+ struct regpair offset_delta /* Timesync Offset Delta (in ns) */;
+};
+
+
+/*
+ * The send queue element
+ */
+struct slow_path_element
+{
+ struct spe_hdr hdr /* common data for all protocols */;
+ struct regpair protocol_data /* additional data specific to the protocol */;
+};
+
+
+/*
+ * Protocol-common statistics counter $$KEEP_ENDIANNESS$$
+ */
+struct stats_counter
+{
+ uint16_t xstats_counter /* xstorm statistics counter */;
+ uint16_t reserved0;
+ uint32_t reserved1;
+ uint16_t tstats_counter /* tstorm statistics counter */;
+ uint16_t reserved2;
+ uint32_t reserved3;
+ uint16_t ustats_counter /* ustorm statistics counter */;
+ uint16_t reserved4;
+ uint32_t reserved5;
+ uint16_t cstats_counter /* ustorm statistics counter */;
+ uint16_t reserved6;
+ uint32_t reserved7;
+};
+
+
+/*
+ * $$KEEP_ENDIANNESS$$
+ */
+struct stats_query_entry
+{
+ uint8_t kind;
+ uint8_t index /* queue index */;
+ uint16_t funcID /* the func the statistic will send to */;
+ uint32_t reserved;
+ struct regpair address /* pxp address */;
+};
+
+/*
+ * statistic command $$KEEP_ENDIANNESS$$
+ */
+struct stats_query_cmd_group
+{
+ struct stats_query_entry query[STATS_QUERY_CMD_COUNT];
+};
+
+
+/*
+ * statistic command header $$KEEP_ENDIANNESS$$
+ */
+struct stats_query_header
+{
+ uint8_t cmd_num /* command number */;
+ uint8_t reserved0;
+ uint16_t drv_stats_counter;
+ uint32_t reserved1;
+ struct regpair stats_counters_addrs /* stats counter */;
+};
+
+
+/*
+ * Types of statistcis query entry
+ */
+enum stats_query_type
+{
+ STATS_TYPE_QUEUE,
+ STATS_TYPE_PORT,
+ STATS_TYPE_PF,
+ STATS_TYPE_TOE,
+ STATS_TYPE_FCOE,
+ MAX_STATS_QUERY_TYPE};
+
+
+/*
+ * Indicate of the function status block state
+ */
+enum status_block_state
+{
+ SB_DISABLED,
+ SB_ENABLED,
+ SB_CLEANED,
+ MAX_STATUS_BLOCK_STATE};
+
+
+/*
+ * Storm IDs (including attentions for IGU related enums)
+ */
+enum storm_id
+{
+ USTORM_ID,
+ CSTORM_ID,
+ XSTORM_ID,
+ TSTORM_ID,
+ ATTENTION_ID,
+ MAX_STORM_ID};
+
+
+/*
+ * Taffic types used in ETS and flow control algorithms
+ */
+enum traffic_type
+{
+ LLFC_TRAFFIC_TYPE_NW /* Networking */,
+ LLFC_TRAFFIC_TYPE_FCOE /* FCoE */,
+ LLFC_TRAFFIC_TYPE_ISCSI /* iSCSI */,
+ MAX_TRAFFIC_TYPE};
+
+
+/*
+ * zone A per-queue data
+ */
+struct tstorm_queue_zone_data
+{
+ struct regpair reserved[4];
+};
+
+
+/*
+ * zone B per-VF data
+ */
+struct tstorm_vf_zone_data
+{
+ struct regpair reserved;
+};
+
+
+/*
+ * Add or Subtract Value for Set Timesync Ramrod
+ */
+enum ts_add_sub_value
+{
+ TS_SUB_VALUE /* Subtract Value */,
+ TS_ADD_VALUE /* Add Value */,
+ MAX_TS_ADD_SUB_VALUE};
+
+
+/*
+ * Drift-Adjust Commands for Set Timesync Ramrod
+ */
+enum ts_drift_adjust_cmd
+{
+ TS_DRIFT_ADJUST_KEEP /* Keep Drift-Adjust at current values */,
+ TS_DRIFT_ADJUST_SET /* Set Drift-Adjust */,
+ TS_DRIFT_ADJUST_RESET /* Reset Drift-Adjust */,
+ MAX_TS_DRIFT_ADJUST_CMD};
+
+
+/*
+ * Offset Commands for Set Timesync Ramrod
+ */
+enum ts_offset_cmd
+{
+ TS_OFFSET_KEEP /* Keep Offset at current values */,
+ TS_OFFSET_INC /* Increase Offset by Offset Delta */,
+ TS_OFFSET_DEC /* Decrease Offset by Offset Delta */,
+ MAX_TS_OFFSET_CMD};
+
+
+/*
+ * zone A per-queue data
+ */
+struct ustorm_queue_zone_data
+{
+ union ustorm_eth_rx_producers eth_rx_producers /* ETH RX rings producers */;
+ struct regpair reserved[3];
+};
+
+
+/*
+ * zone B per-VF data
+ */
+struct ustorm_vf_zone_data
+{
+ struct regpair reserved;
+};
+
+
+/*
+ * data per VF-PF channel
+ */
+struct vf_pf_channel_data
+{
+#if defined(__BIG_ENDIAN)
+ uint16_t reserved0;
+ uint8_t valid /* flag for channel validity. (cleared when identify a VF as malicious) */;
+ uint8_t state /* channel state (ready / waiting for ack) */;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t state /* channel state (ready / waiting for ack) */;
+ uint8_t valid /* flag for channel validity. (cleared when identify a VF as malicious) */;
+ uint16_t reserved0;
+#endif
+ uint32_t reserved1;
+};
+
+
+/*
+ * State of VF-PF channel
+ */
+enum vf_pf_channel_state
+{
+ VF_PF_CHANNEL_STATE_READY /* Channel is ready to accept a message from VF */,
+ VF_PF_CHANNEL_STATE_WAITING_FOR_ACK /* Channel waits for an ACK from PF */,
+ MAX_VF_PF_CHANNEL_STATE};
+
+
+/*
+ * vif_list_rule_kind
+ */
+enum vif_list_rule_kind
+{
+ VIF_LIST_RULE_SET,
+ VIF_LIST_RULE_GET,
+ VIF_LIST_RULE_CLEAR_ALL,
+ VIF_LIST_RULE_CLEAR_FUNC,
+ MAX_VIF_LIST_RULE_KIND};
+
+
+/*
+ * zone A per-queue data
+ */
+struct xstorm_queue_zone_data
+{
+ struct regpair reserved[4];
+};
+
+
+/*
+ * zone B per-VF data
+ */
+struct xstorm_vf_zone_data
+{
+ struct regpair reserved;
+};
+
+
+#endif /* ECORE_HSI_H */
diff --git a/src/dpdk22/drivers/net/bnx2x/ecore_init.h b/src/dpdk22/drivers/net/bnx2x/ecore_init.h
new file mode 100644
index 00000000..d25e2803
--- /dev/null
+++ b/src/dpdk22/drivers/net/bnx2x/ecore_init.h
@@ -0,0 +1,819 @@
+/*-
+ * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+ *
+ * Eric Davis <edavis@broadcom.com>
+ * David Christensen <davidch@broadcom.com>
+ * Gary Zambrano <zambrano@broadcom.com>
+ *
+ * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
+ * Copyright (c) 2015 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ */
+
+#ifndef ECORE_INIT_H
+#define ECORE_INIT_H
+
+/* Init operation types and structures */
+enum {
+ OP_RD = 0x1, /* read a single register */
+ OP_WR, /* write a single register */
+ OP_SW, /* copy a string to the device */
+ OP_ZR, /* clear memory */
+ OP_ZP, /* unzip then copy with DMAE */
+ OP_WR_64, /* write 64 bit pattern */
+ OP_WB, /* copy a string using DMAE */
+ OP_WB_ZR, /* Clear a string using DMAE or indirect-wr */
+ OP_IF_MODE_OR, /* Skip the following ops if all init modes don't match */
+ OP_IF_MODE_AND, /* Skip the following ops if any init modes don't match */
+ OP_IF_PHASE,
+ OP_RT,
+ OP_DELAY,
+ OP_VERIFY,
+ OP_MAX
+};
+
+enum {
+ STAGE_START,
+ STAGE_END,
+};
+
+/* Returns the index of start or end of a specific block stage in ops array*/
+#define BLOCK_OPS_IDX(block, stage, end) \
+ (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
+
+
+/* structs for the various opcodes */
+struct raw_op {
+ uint32_t op:8;
+ uint32_t offset:24;
+ uint32_t raw_data;
+};
+
+struct op_read {
+ uint32_t op:8;
+ uint32_t offset:24;
+ uint32_t val;
+};
+
+struct op_write {
+ uint32_t op:8;
+ uint32_t offset:24;
+ uint32_t val;
+};
+
+struct op_arr_write {
+ uint32_t op:8;
+ uint32_t offset:24;
+#ifdef __BIG_ENDIAN
+ uint16_t data_len;
+ uint16_t data_off;
+#else /* __LITTLE_ENDIAN */
+ uint16_t data_off;
+ uint16_t data_len;
+#endif
+};
+
+struct op_zero {
+ uint32_t op:8;
+ uint32_t offset:24;
+ uint32_t len;
+};
+
+struct op_if_mode {
+ uint32_t op:8;
+ uint32_t cmd_offset:24;
+ uint32_t mode_bit_map;
+};
+
+struct op_if_phase {
+ uint32_t op:8;
+ uint32_t cmd_offset:24;
+ uint32_t phase_bit_map;
+};
+
+struct op_delay {
+ uint32_t op:8;
+ uint32_t reserved:24;
+ uint32_t delay;
+};
+
+union init_op {
+ struct op_read read;
+ struct op_write write;
+ struct op_arr_write arr_wr;
+ struct op_zero zero;
+ struct raw_op raw;
+ struct op_if_mode if_mode;
+ struct op_if_phase if_phase;
+ struct op_delay delay;
+};
+
+
+/* Init Phases */
+enum {
+ PHASE_COMMON,
+ PHASE_PORT0,
+ PHASE_PORT1,
+ PHASE_PF0,
+ PHASE_PF1,
+ PHASE_PF2,
+ PHASE_PF3,
+ PHASE_PF4,
+ PHASE_PF5,
+ PHASE_PF6,
+ PHASE_PF7,
+ NUM_OF_INIT_PHASES
+};
+
+/* Init Modes */
+enum {
+ MODE_ASIC = 0x00000001,
+ MODE_FPGA = 0x00000002,
+ MODE_EMUL = 0x00000004,
+ MODE_E2 = 0x00000008,
+ MODE_E3 = 0x00000010,
+ MODE_PORT2 = 0x00000020,
+ MODE_PORT4 = 0x00000040,
+ MODE_SF = 0x00000080,
+ MODE_MF = 0x00000100,
+ MODE_MF_SD = 0x00000200,
+ MODE_MF_SI = 0x00000400,
+ MODE_MF_AFEX = 0x00000800,
+ MODE_E3_A0 = 0x00001000,
+ MODE_E3_B0 = 0x00002000,
+ MODE_COS3 = 0x00004000,
+ MODE_COS6 = 0x00008000,
+ MODE_LITTLE_ENDIAN = 0x00010000,
+ MODE_BIG_ENDIAN = 0x00020000,
+};
+
+/* Init Blocks */
+enum {
+ BLOCK_ATC,
+ BLOCK_BRB1,
+ BLOCK_CCM,
+ BLOCK_CDU,
+ BLOCK_CFC,
+ BLOCK_CSDM,
+ BLOCK_CSEM,
+ BLOCK_DBG,
+ BLOCK_DMAE,
+ BLOCK_DORQ,
+ BLOCK_HC,
+ BLOCK_IGU,
+ BLOCK_MISC,
+ BLOCK_NIG,
+ BLOCK_PBF,
+ BLOCK_PGLUE_B,
+ BLOCK_PRS,
+ BLOCK_PXP2,
+ BLOCK_PXP,
+ BLOCK_QM,
+ BLOCK_SRC,
+ BLOCK_TCM,
+ BLOCK_TM,
+ BLOCK_TSDM,
+ BLOCK_TSEM,
+ BLOCK_UCM,
+ BLOCK_UPB,
+ BLOCK_USDM,
+ BLOCK_USEM,
+ BLOCK_XCM,
+ BLOCK_XPB,
+ BLOCK_XSDM,
+ BLOCK_XSEM,
+ BLOCK_MISC_AEU,
+ NUM_OF_INIT_BLOCKS
+};
+
+
+
+
+
+
+
+
+/* Vnics per mode */
+#define ECORE_PORT2_MODE_NUM_VNICS 4
+
+
+/* QM queue numbers */
+#define ECORE_ETH_Q 0
+#define ECORE_TOE_Q 3
+#define ECORE_TOE_ACK_Q 6
+#define ECORE_ISCSI_Q 9
+#define ECORE_ISCSI_ACK_Q 11
+#define ECORE_FCOE_Q 10
+
+/* Vnics per mode */
+#define ECORE_PORT4_MODE_NUM_VNICS 2
+
+/* COS offset for port1 in E3 B0 4port mode */
+#define ECORE_E3B0_PORT1_COS_OFFSET 3
+
+/* QM Register addresses */
+#define ECORE_Q_VOQ_REG_ADDR(pf_q_num)\
+ (QM_REG_QVOQIDX_0 + 4 * (pf_q_num))
+#define ECORE_VOQ_Q_REG_ADDR(cos, pf_q_num)\
+ (QM_REG_VOQQMASK_0_LSB + 4 * ((cos) * 2 + ((pf_q_num) >> 5)))
+#define ECORE_Q_CMDQ_REG_ADDR(pf_q_num)\
+ (QM_REG_BYTECRDCMDQ_0 + 4 * ((pf_q_num) >> 4))
+
+/* extracts the QM queue number for the specified port and vnic */
+#define ECORE_PF_Q_NUM(q_num, port, vnic)\
+ ((((port) << 1) | (vnic)) * 16 + (q_num))
+
+
+/* Maps the specified queue to the specified COS */
+static inline void ecore_map_q_cos(struct bnx2x_softc *sc, uint32_t q_num, uint32_t new_cos)
+{
+ /* find current COS mapping */
+ uint32_t curr_cos = REG_RD(sc, QM_REG_QVOQIDX_0 + q_num * 4);
+
+ /* check if queue->COS mapping has changed */
+ if (curr_cos != new_cos) {
+ uint32_t num_vnics = ECORE_PORT2_MODE_NUM_VNICS;
+ uint32_t reg_addr, reg_bit_map, vnic;
+
+ /* update parameters for 4port mode */
+ if (INIT_MODE_FLAGS(sc) & MODE_PORT4) {
+ num_vnics = ECORE_PORT4_MODE_NUM_VNICS;
+ if (PORT_ID(sc)) {
+ curr_cos += ECORE_E3B0_PORT1_COS_OFFSET;
+ new_cos += ECORE_E3B0_PORT1_COS_OFFSET;
+ }
+ }
+
+ /* change queue mapping for each VNIC */
+ for (vnic = 0; vnic < num_vnics; vnic++) {
+ uint32_t pf_q_num =
+ ECORE_PF_Q_NUM(q_num, PORT_ID(sc), vnic);
+ uint32_t q_bit_map = 1 << (pf_q_num & 0x1f);
+
+ /* overwrite queue->VOQ mapping */
+ REG_WR(sc, ECORE_Q_VOQ_REG_ADDR(pf_q_num), new_cos);
+
+ /* clear queue bit from current COS bit map */
+ reg_addr = ECORE_VOQ_Q_REG_ADDR(curr_cos, pf_q_num);
+ reg_bit_map = REG_RD(sc, reg_addr);
+ REG_WR(sc, reg_addr, reg_bit_map & (~q_bit_map));
+
+ /* set queue bit in new COS bit map */
+ reg_addr = ECORE_VOQ_Q_REG_ADDR(new_cos, pf_q_num);
+ reg_bit_map = REG_RD(sc, reg_addr);
+ REG_WR(sc, reg_addr, reg_bit_map | q_bit_map);
+
+ /* set/clear queue bit in command-queue bit map
+ (E2/E3A0 only, valid COS values are 0/1) */
+ if (!(INIT_MODE_FLAGS(sc) & MODE_E3_B0)) {
+ reg_addr = ECORE_Q_CMDQ_REG_ADDR(pf_q_num);
+ reg_bit_map = REG_RD(sc, reg_addr);
+ q_bit_map = 1 << (2 * (pf_q_num & 0xf));
+ reg_bit_map = new_cos ?
+ (reg_bit_map | q_bit_map) :
+ (reg_bit_map & (~q_bit_map));
+ REG_WR(sc, reg_addr, reg_bit_map);
+ }
+ }
+ }
+}
+
+/* Configures the QM according to the specified per-traffic-type COSes */
+static inline void ecore_dcb_config_qm(struct bnx2x_softc *sc, enum cos_mode mode,
+ struct priority_cos *traffic_cos)
+{
+ ecore_map_q_cos(sc, ECORE_FCOE_Q,
+ traffic_cos[LLFC_TRAFFIC_TYPE_FCOE].cos);
+ ecore_map_q_cos(sc, ECORE_ISCSI_Q,
+ traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
+ ecore_map_q_cos(sc, ECORE_ISCSI_ACK_Q,
+ traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
+ if (mode != STATIC_COS) {
+ /* required only in OVERRIDE_COS mode */
+ ecore_map_q_cos(sc, ECORE_ETH_Q,
+ traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
+ ecore_map_q_cos(sc, ECORE_TOE_Q,
+ traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
+ ecore_map_q_cos(sc, ECORE_TOE_ACK_Q,
+ traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
+ }
+}
+
+
+/*
+ * congestion managment port init api description
+ * the api works as follows:
+ * the driver should pass the cmng_init_input struct, the port_init function
+ * will prepare the required internal ram structure which will be passed back
+ * to the driver (cmng_init) that will write it into the internal ram.
+ *
+ * IMPORTANT REMARKS:
+ * 1. the cmng_init struct does not represent the contiguous internal ram
+ * structure. the driver should use the XSTORM_CMNG_PERPORT_VARS_OFFSET
+ * offset in order to write the port sub struct and the
+ * PFID_FROM_PORT_AND_VNIC offset for writing the vnic sub struct (in other
+ * words - don't use memcpy!).
+ * 2. although the cmng_init struct is filled for the maximal vnic number
+ * possible, the driver should only write the valid vnics into the internal
+ * ram according to the appropriate port mode.
+ */
+#define BITS_TO_BYTES(x) ((x)/8)
+
+/* CMNG constants, as derived from system spec calculations */
+
+/* default MIN rate in case VNIC min rate is configured to zero- 100Mbps */
+#define DEF_MIN_RATE 100
+
+/* resolution of the rate shaping timer - 400 usec */
+#define RS_PERIODIC_TIMEOUT_USEC 400
+
+/*
+ * number of bytes in single QM arbitration cycle -
+ * coefficient for calculating the fairness timer
+ */
+#define QM_ARB_BYTES 160000
+
+/* resolution of Min algorithm 1:100 */
+#define MIN_RES 100
+
+/*
+ * how many bytes above threshold for
+ * the minimal credit of Min algorithm
+ */
+#define MIN_ABOVE_THRESH 32768
+
+/*
+ * Fairness algorithm integration time coefficient -
+ * for calculating the actual Tfair
+ */
+#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES)
+
+/* Memory of fairness algorithm - 2 cycles */
+#define FAIR_MEM 2
+#define SAFC_TIMEOUT_USEC 52
+
+#define SDM_TICKS 4
+
+
+static inline void ecore_init_max(const struct cmng_init_input *input_data,
+ uint32_t r_param, struct cmng_init *ram_data)
+{
+ uint32_t vnic;
+ struct cmng_vnic *vdata = &ram_data->vnic;
+ struct cmng_struct_per_port *pdata = &ram_data->port;
+ /*
+ * rate shaping per-port variables
+ * 100 micro seconds in SDM ticks = 25
+ * since each tick is 4 microSeconds
+ */
+
+ pdata->rs_vars.rs_periodic_timeout =
+ RS_PERIODIC_TIMEOUT_USEC / SDM_TICKS;
+
+ /* this is the threshold below which no timer arming will occur.
+ * 1.25 coefficient is for the threshold to be a little bigger
+ * then the real time to compensate for timer in-accuracy
+ */
+ pdata->rs_vars.rs_threshold =
+ (5 * RS_PERIODIC_TIMEOUT_USEC * r_param)/4;
+
+ /* rate shaping per-vnic variables */
+ for (vnic = 0; vnic < ECORE_PORT2_MODE_NUM_VNICS; vnic++) {
+ /* global vnic counter */
+ vdata->vnic_max_rate[vnic].vn_counter.rate =
+ input_data->vnic_max_rate[vnic];
+ /*
+ * maximal Mbps for this vnic
+ * the quota in each timer period - number of bytes
+ * transmitted in this period
+ */
+ vdata->vnic_max_rate[vnic].vn_counter.quota =
+ RS_PERIODIC_TIMEOUT_USEC *
+ (uint32_t)vdata->vnic_max_rate[vnic].vn_counter.rate / 8;
+ }
+
+}
+
+static inline void ecore_init_max_per_vn(uint16_t vnic_max_rate,
+ struct rate_shaping_vars_per_vn *ram_data)
+{
+ /* global vnic counter */
+ ram_data->vn_counter.rate = vnic_max_rate;
+
+ /*
+ * maximal Mbps for this vnic
+ * the quota in each timer period - number of bytes
+ * transmitted in this period
+ */
+ ram_data->vn_counter.quota =
+ RS_PERIODIC_TIMEOUT_USEC * (uint32_t)vnic_max_rate / 8;
+}
+
+static inline void ecore_init_min(const struct cmng_init_input *input_data,
+ uint32_t r_param, struct cmng_init *ram_data)
+{
+ uint32_t vnic, fair_periodic_timeout_usec, vnicWeightSum, tFair;
+ struct cmng_vnic *vdata = &ram_data->vnic;
+ struct cmng_struct_per_port *pdata = &ram_data->port;
+
+ /* this is the resolution of the fairness timer */
+ fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
+
+ /*
+ * fairness per-port variables
+ * for 10G it is 1000usec. for 1G it is 10000usec.
+ */
+ tFair = T_FAIR_COEF / input_data->port_rate;
+
+ /* this is the threshold below which we won't arm the timer anymore */
+ pdata->fair_vars.fair_threshold = QM_ARB_BYTES;
+
+ /*
+ * we multiply by 1e3/8 to get bytes/msec. We don't want the credits
+ * to pass a credit of the T_FAIR*FAIR_MEM (algorithm resolution)
+ */
+ pdata->fair_vars.upper_bound = r_param * tFair * FAIR_MEM;
+
+ /* since each tick is 4 microSeconds */
+ pdata->fair_vars.fairness_timeout =
+ fair_periodic_timeout_usec / SDM_TICKS;
+
+ /* calculate sum of weights */
+ vnicWeightSum = 0;
+
+ for (vnic = 0; vnic < ECORE_PORT2_MODE_NUM_VNICS; vnic++)
+ vnicWeightSum += input_data->vnic_min_rate[vnic];
+
+ /* global vnic counter */
+ if (vnicWeightSum > 0) {
+ /* fairness per-vnic variables */
+ for (vnic = 0; vnic < ECORE_PORT2_MODE_NUM_VNICS; vnic++) {
+ /*
+ * this is the credit for each period of the fairness
+ * algorithm - number of bytes in T_FAIR (this vnic
+ * share of the port rate)
+ */
+ vdata->vnic_min_rate[vnic].vn_credit_delta =
+ ((uint32_t)(input_data->vnic_min_rate[vnic]) * 100 *
+ (T_FAIR_COEF / (8 * 100 * vnicWeightSum)));
+ if (vdata->vnic_min_rate[vnic].vn_credit_delta <
+ pdata->fair_vars.fair_threshold +
+ MIN_ABOVE_THRESH) {
+ vdata->vnic_min_rate[vnic].vn_credit_delta =
+ pdata->fair_vars.fair_threshold +
+ MIN_ABOVE_THRESH;
+ }
+ }
+ }
+}
+
+static inline void ecore_init_fw_wrr(const struct cmng_init_input *input_data,
+ struct cmng_init *ram_data)
+{
+ uint32_t vnic, cos;
+ uint32_t cosWeightSum = 0;
+ struct cmng_vnic *vdata = &ram_data->vnic;
+ struct cmng_struct_per_port *pdata = &ram_data->port;
+
+ for (cos = 0; cos < MAX_COS_NUMBER; cos++)
+ cosWeightSum += input_data->cos_min_rate[cos];
+
+ if (cosWeightSum > 0) {
+
+ for (vnic = 0; vnic < ECORE_PORT2_MODE_NUM_VNICS; vnic++) {
+ /*
+ * Since cos and vnic shouldn't work together the rate
+ * to divide between the coses is the port rate.
+ */
+ uint32_t *ccd = vdata->vnic_min_rate[vnic].cos_credit_delta;
+ for (cos = 0; cos < MAX_COS_NUMBER; cos++) {
+ /*
+ * this is the credit for each period of
+ * the fairness algorithm - number of bytes
+ * in T_FAIR (this cos share of the vnic rate)
+ */
+ ccd[cos] =
+ ((uint32_t)input_data->cos_min_rate[cos] * 100 *
+ (T_FAIR_COEF / (8 * 100 * cosWeightSum)));
+ if (ccd[cos] < pdata->fair_vars.fair_threshold
+ + MIN_ABOVE_THRESH) {
+ ccd[cos] =
+ pdata->fair_vars.fair_threshold +
+ MIN_ABOVE_THRESH;
+ }
+ }
+ }
+ }
+}
+
+static inline void ecore_init_safc(struct cmng_init *ram_data)
+{
+ /* in microSeconds */
+ ram_data->port.safc_vars.safc_timeout_usec = SAFC_TIMEOUT_USEC;
+}
+
+/* Congestion management port init */
+static inline void ecore_init_cmng(const struct cmng_init_input *input_data,
+ struct cmng_init *ram_data)
+{
+ uint32_t r_param;
+ ECORE_MEMSET(ram_data, 0,sizeof(struct cmng_init));
+
+ ram_data->port.flags = input_data->flags;
+
+ /*
+ * number of bytes transmitted in a rate of 10Gbps
+ * in one usec = 1.25KB.
+ */
+ r_param = BITS_TO_BYTES(input_data->port_rate);
+ ecore_init_max(input_data, r_param, ram_data);
+ ecore_init_min(input_data, r_param, ram_data);
+ ecore_init_fw_wrr(input_data, ram_data);
+ ecore_init_safc(ram_data);
+}
+
+
+
+
+/* Returns the index of start or end of a specific block stage in ops array*/
+#define BLOCK_OPS_IDX(block, stage, end) \
+ (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
+
+
+#define INITOP_SET 0 /* set the HW directly */
+#define INITOP_CLEAR 1 /* clear the HW directly */
+#define INITOP_INIT 2 /* set the init-value array */
+
+/****************************************************************************
+* ILT management
+****************************************************************************/
+struct ilt_line {
+ ecore_dma_addr_t page_mapping;
+ void *page;
+ uint32_t size;
+};
+
+struct ilt_client_info {
+ uint32_t page_size;
+ uint16_t start;
+ uint16_t end;
+ uint16_t client_num;
+ uint16_t flags;
+#define ILT_CLIENT_SKIP_INIT 0x1
+#define ILT_CLIENT_SKIP_MEM 0x2
+};
+
+struct ecore_ilt {
+ uint32_t start_line;
+ struct ilt_line *lines;
+ struct ilt_client_info clients[4];
+#define ILT_CLIENT_CDU 0
+#define ILT_CLIENT_QM 1
+#define ILT_CLIENT_SRC 2
+#define ILT_CLIENT_TM 3
+};
+
+/****************************************************************************
+* SRC configuration
+****************************************************************************/
+struct src_ent {
+ uint8_t opaque[56];
+ uint64_t next;
+};
+
+/****************************************************************************
+* Parity configuration
+****************************************************************************/
+#define BLOCK_PRTY_INFO(block, en_mask, m1h, m2, m3) \
+{ \
+ block##_REG_##block##_PRTY_MASK, \
+ block##_REG_##block##_PRTY_STS_CLR, \
+ en_mask, {m1h, m2, m3}, #block \
+}
+
+#define BLOCK_PRTY_INFO_0(block, en_mask, m1h, m2, m3) \
+{ \
+ block##_REG_##block##_PRTY_MASK_0, \
+ block##_REG_##block##_PRTY_STS_CLR_0, \
+ en_mask, {m1h, m2, m3}, #block"_0" \
+}
+
+#define BLOCK_PRTY_INFO_1(block, en_mask, m1h, m2, m3) \
+{ \
+ block##_REG_##block##_PRTY_MASK_1, \
+ block##_REG_##block##_PRTY_STS_CLR_1, \
+ en_mask, {m1h, m2, m3}, #block"_1" \
+}
+
+static const struct {
+ uint32_t mask_addr;
+ uint32_t sts_clr_addr;
+ uint32_t en_mask; /* Mask to enable parity attentions */
+ struct {
+ uint32_t e1h; /* 57711 */
+ uint32_t e2; /* 57712 */
+ uint32_t e3; /* 578xx */
+ } reg_mask; /* Register mask (all valid bits) */
+ char name[8]; /* Block's longest name is 7 characters long
+ * (name + suffix)
+ */
+} ecore_blocks_parity_data[] = {
+ /* bit 19 masked */
+ /* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */
+ /* bit 5,18,20-31 */
+ /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */
+ /* bit 5 */
+ /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20); */
+ /* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */
+ /* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */
+
+ /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
+ * want to handle "system kill" flow at the moment.
+ */
+ BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x7ffffff,
+ 0x7ffffff),
+ BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff),
+ BLOCK_PRTY_INFO_1(PXP2, 0x1ffffff, 0x7f, 0x7ff, 0x1ffffff),
+ BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0, 0),
+ BLOCK_PRTY_INFO(NIG, 0xffffffff, 0xffffffff, 0, 0),
+ BLOCK_PRTY_INFO_0(NIG, 0xffffffff, 0, 0xffffffff, 0xffffffff),
+ BLOCK_PRTY_INFO_1(NIG, 0xffff, 0, 0xff, 0xffff),
+ BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1),
+ BLOCK_PRTY_INFO(QM, 0, 0xfff, 0xfff, 0xfff),
+ BLOCK_PRTY_INFO(ATC, 0x1f, 0, 0x1f, 0x1f),
+ BLOCK_PRTY_INFO(PGLUE_B, 0x3, 0, 0x3, 0x3),
+ BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3),
+ {GRCBASE_UPB + PB_REG_PB_PRTY_MASK,
+ GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0xf,
+ {0xf, 0xf, 0xf}, "UPB"},
+ {GRCBASE_XPB + PB_REG_PB_PRTY_MASK,
+ GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0,
+ {0xf, 0xf, 0xf}, "XPB"},
+ BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7),
+ BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f),
+ BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0x3f),
+ BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1),
+ BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf),
+ BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf),
+ BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff),
+ BLOCK_PRTY_INFO(PBF, 0, 0x3ffff, 0xfffff, 0xfffffff),
+ BLOCK_PRTY_INFO(TM, 0, 0x7f, 0x7f, 0x7f),
+ BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(TCM, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
+ BLOCK_PRTY_INFO(CCM, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
+ BLOCK_PRTY_INFO(UCM, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
+ BLOCK_PRTY_INFO(XCM, 0, 0x3fffffff, 0x3fffffff, 0x3fffffff),
+ BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+ BLOCK_PRTY_INFO_1(TSEM, 0, 0x1f, 0x3f, 0x3f),
+ BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+ BLOCK_PRTY_INFO_1(USEM, 0, 0x1f, 0x1f, 0x1f),
+ BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+ BLOCK_PRTY_INFO_1(CSEM, 0, 0x1f, 0x1f, 0x1f),
+ BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+ BLOCK_PRTY_INFO_1(XSEM, 0, 0x1f, 0x3f, 0x3f),
+};
+
+
+/* [28] MCP Latched rom_parity
+ * [29] MCP Latched ump_rx_parity
+ * [30] MCP Latched ump_tx_parity
+ * [31] MCP Latched scpad_parity
+ */
+#define MISC_AEU_ENABLE_MCP_PRTY_BITS \
+ (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
+
+/* Below registers control the MCP parity attention output. When
+ * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
+ * enabled, when cleared - disabled.
+ */
+static const uint32_t mcp_attn_ctl_regs[] = {
+ MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
+ MISC_REG_AEU_ENABLE4_NIG_0,
+ MISC_REG_AEU_ENABLE4_PXP_0,
+ MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
+ MISC_REG_AEU_ENABLE4_NIG_1,
+ MISC_REG_AEU_ENABLE4_PXP_1
+};
+
+static inline void ecore_set_mcp_parity(struct bnx2x_softc *sc, uint8_t enable)
+{
+ uint32_t i;
+ uint32_t reg_val;
+
+ for (i = 0; i < ARRSIZE(mcp_attn_ctl_regs); i++) {
+ reg_val = REG_RD(sc, mcp_attn_ctl_regs[i]);
+
+ if (enable)
+ reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS;
+ else
+ reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS;
+
+ REG_WR(sc, mcp_attn_ctl_regs[i], reg_val);
+ }
+}
+
+static inline uint32_t ecore_parity_reg_mask(struct bnx2x_softc *sc, int idx)
+{
+ if (CHIP_IS_E1H(sc))
+ return ecore_blocks_parity_data[idx].reg_mask.e1h;
+ else if (CHIP_IS_E2(sc))
+ return ecore_blocks_parity_data[idx].reg_mask.e2;
+ else /* CHIP_IS_E3 */
+ return ecore_blocks_parity_data[idx].reg_mask.e3;
+}
+
+static inline void ecore_disable_blocks_parity(struct bnx2x_softc *sc)
+{
+ uint32_t i;
+
+ for (i = 0; i < ARRSIZE(ecore_blocks_parity_data); i++) {
+ uint32_t dis_mask = ecore_parity_reg_mask(sc, i);
+
+ if (dis_mask) {
+ REG_WR(sc, ecore_blocks_parity_data[i].mask_addr,
+ dis_mask);
+ ECORE_MSG("Setting parity mask "
+ "for %s to\t\t0x%x",
+ ecore_blocks_parity_data[i].name, dis_mask);
+ }
+ }
+
+ /* Disable MCP parity attentions */
+ ecore_set_mcp_parity(sc, FALSE);
+}
+
+/**
+ * Clear the parity error status registers.
+ */
+static inline void ecore_clear_blocks_parity(struct bnx2x_softc *sc)
+{
+ uint32_t i;
+ uint32_t reg_val, mcp_aeu_bits =
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY |
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY |
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY |
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY;
+
+ /* Clear SEM_FAST parities */
+ REG_WR(sc, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+ REG_WR(sc, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+ REG_WR(sc, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+ REG_WR(sc, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+
+ for (i = 0; i < ARRSIZE(ecore_blocks_parity_data); i++) {
+ uint32_t reg_mask = ecore_parity_reg_mask(sc, i);
+
+ if (reg_mask) {
+ reg_val = REG_RD(sc, ecore_blocks_parity_data[i].
+ sts_clr_addr);
+ if (reg_val & reg_mask)
+ ECORE_MSG("Parity errors in %s: 0x%x",
+ ecore_blocks_parity_data[i].name,
+ reg_val & reg_mask);
+ }
+ }
+
+ /* Check if there were parity attentions in MCP */
+ reg_val = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_MCP);
+ if (reg_val & mcp_aeu_bits)
+ ECORE_MSG("Parity error in MCP: 0x%x",
+ reg_val & mcp_aeu_bits);
+
+ /* Clear parity attentions in MCP:
+ * [7] clears Latched rom_parity
+ * [8] clears Latched ump_rx_parity
+ * [9] clears Latched ump_tx_parity
+ * [10] clears Latched scpad_parity (both ports)
+ */
+ REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780);
+}
+
+static inline void ecore_enable_blocks_parity(struct bnx2x_softc *sc)
+{
+ uint32_t i;
+
+ for (i = 0; i < ARRSIZE(ecore_blocks_parity_data); i++) {
+ uint32_t reg_mask = ecore_parity_reg_mask(sc, i);
+
+ if (reg_mask)
+ REG_WR(sc, ecore_blocks_parity_data[i].mask_addr,
+ ecore_blocks_parity_data[i].en_mask & reg_mask);
+ }
+
+ /* Enable MCP parity attentions */
+ ecore_set_mcp_parity(sc, TRUE);
+}
+
+
+#endif /* ECORE_INIT_H */
diff --git a/src/dpdk22/drivers/net/bnx2x/ecore_init_ops.h b/src/dpdk22/drivers/net/bnx2x/ecore_init_ops.h
new file mode 100644
index 00000000..b6f98324
--- /dev/null
+++ b/src/dpdk22/drivers/net/bnx2x/ecore_init_ops.h
@@ -0,0 +1,865 @@
+/*-
+ * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+ *
+ * Eric Davis <edavis@broadcom.com>
+ * David Christensen <davidch@broadcom.com>
+ * Gary Zambrano <zambrano@broadcom.com>
+ *
+ * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
+ * Copyright (c) 2015 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ */
+
+#ifndef ECORE_INIT_OPS_H
+#define ECORE_INIT_OPS_H
+
+static int ecore_gunzip(struct bnx2x_softc *sc, const uint8_t *zbuf, int len);
+static void ecore_write_dmae_phys_len(struct bnx2x_softc *sc,
+ ecore_dma_addr_t phys_addr, uint32_t addr,
+ uint32_t len);
+
+static void ecore_init_str_wr(struct bnx2x_softc *sc, uint32_t addr,
+ const uint32_t *data, uint32_t len)
+{
+ uint32_t i;
+
+ for (i = 0; i < len; i++)
+ REG_WR(sc, addr + i*4, data[i]);
+}
+
+static void ecore_write_big_buf(struct bnx2x_softc *sc, uint32_t addr, uint32_t len)
+{
+ if (DMAE_READY(sc))
+ ecore_write_dmae_phys_len(sc, GUNZIP_PHYS(sc), addr, len);
+
+ else ecore_init_str_wr(sc, addr, GUNZIP_BUF(sc), len);
+}
+
+static void ecore_init_fill(struct bnx2x_softc *sc, uint32_t addr, int fill,
+ uint32_t len)
+{
+ uint32_t buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4));
+ uint32_t buf_len32 = buf_len/4;
+ uint32_t i;
+
+ ECORE_MEMSET(GUNZIP_BUF(sc), (uint8_t)fill, buf_len);
+
+ for (i = 0; i < len; i += buf_len32) {
+ uint32_t cur_len = min(buf_len32, len - i);
+
+ ecore_write_big_buf(sc, addr + i*4, cur_len);
+ }
+}
+
+static void ecore_write_big_buf_wb(struct bnx2x_softc *sc, uint32_t addr, uint32_t len)
+{
+ if (DMAE_READY(sc))
+ ecore_write_dmae_phys_len(sc, GUNZIP_PHYS(sc), addr, len);
+
+ else ecore_init_str_wr(sc, addr, GUNZIP_BUF(sc), len);
+}
+
+static void ecore_init_wr_64(struct bnx2x_softc *sc, uint32_t addr,
+ const uint32_t *data, uint32_t len64)
+{
+ uint32_t buf_len32 = FW_BUF_SIZE/4;
+ uint32_t len = len64*2;
+ uint64_t data64 = 0;
+ uint32_t i;
+
+ /* 64 bit value is in a blob: first low DWORD, then high DWORD */
+ data64 = HILO_U64((*(data + 1)), (*data));
+
+ len64 = min((uint32_t)(FW_BUF_SIZE/8), len64);
+ for (i = 0; i < len64; i++) {
+ uint64_t *pdata = ((uint64_t *)(GUNZIP_BUF(sc))) + i;
+
+ *pdata = data64;
+ }
+
+ for (i = 0; i < len; i += buf_len32) {
+ uint32_t cur_len = min(buf_len32, len - i);
+
+ ecore_write_big_buf_wb(sc, addr + i*4, cur_len);
+ }
+}
+
+/*********************************************************
+ There are different blobs for each PRAM section.
+ In addition, each blob write operation is divided into a few operations
+ in order to decrease the amount of phys. contiguous buffer needed.
+ Thus, when we select a blob the address may be with some offset
+ from the beginning of PRAM section.
+ The same holds for the INT_TABLE sections.
+**********************************************************/
+#define IF_IS_INT_TABLE_ADDR(base, addr) \
+ if (((base) <= (addr)) && ((base) + 0x400 >= (addr)))
+
+#define IF_IS_PRAM_ADDR(base, addr) \
+ if (((base) <= (addr)) && ((base) + 0x40000 >= (addr)))
+
+static const uint8_t *ecore_sel_blob(struct bnx2x_softc *sc, uint32_t addr,
+ const uint8_t *data)
+{
+ IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr)
+ data = INIT_TSEM_INT_TABLE_DATA(sc);
+ else
+ IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr)
+ data = INIT_CSEM_INT_TABLE_DATA(sc);
+ else
+ IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr)
+ data = INIT_USEM_INT_TABLE_DATA(sc);
+ else
+ IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr)
+ data = INIT_XSEM_INT_TABLE_DATA(sc);
+ else
+ IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr)
+ data = INIT_TSEM_PRAM_DATA(sc);
+ else
+ IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr)
+ data = INIT_CSEM_PRAM_DATA(sc);
+ else
+ IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr)
+ data = INIT_USEM_PRAM_DATA(sc);
+ else
+ IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr)
+ data = INIT_XSEM_PRAM_DATA(sc);
+
+ return data;
+}
+
+static void ecore_init_wr_wb(struct bnx2x_softc *sc, uint32_t addr,
+ const uint32_t *data, uint32_t len)
+{
+ if (DMAE_READY(sc))
+ VIRT_WR_DMAE_LEN(sc, data, addr, len, 0);
+
+ else ecore_init_str_wr(sc, addr, data, len);
+}
+
+static void ecore_wr_64(struct bnx2x_softc *sc, uint32_t reg, uint32_t val_lo,
+ uint32_t val_hi)
+{
+ uint32_t wb_write[2];
+
+ wb_write[0] = val_lo;
+ wb_write[1] = val_hi;
+ REG_WR_DMAE_LEN(sc, reg, wb_write, 2);
+}
+
+static void ecore_init_wr_zp(struct bnx2x_softc *sc, uint32_t addr, uint32_t len,
+ uint32_t blob_off)
+{
+ const uint8_t *data = NULL;
+ int rc;
+ uint32_t i;
+
+ data = ecore_sel_blob(sc, addr, data) + blob_off*4;
+
+ rc = ecore_gunzip(sc, data, len);
+ if (rc)
+ return;
+
+ /* gunzip_outlen is in dwords */
+ len = GUNZIP_OUTLEN(sc);
+ for (i = 0; i < len; i++)
+ ((uint32_t *)GUNZIP_BUF(sc))[i] = (uint32_t)
+ ECORE_CPU_TO_LE32(((uint32_t *)GUNZIP_BUF(sc))[i]);
+
+ ecore_write_big_buf_wb(sc, addr, len);
+}
+
+static void ecore_init_block(struct bnx2x_softc *sc, uint32_t block, uint32_t stage)
+{
+ uint16_t op_start =
+ INIT_OPS_OFFSETS(sc)[BLOCK_OPS_IDX(block, stage,
+ STAGE_START)];
+ uint16_t op_end =
+ INIT_OPS_OFFSETS(sc)[BLOCK_OPS_IDX(block, stage,
+ STAGE_END)];
+ const union init_op *op;
+ uint32_t op_idx, op_type, addr, len;
+ const uint32_t *data, *data_base;
+
+ /* If empty block */
+ if (op_start == op_end)
+ return;
+
+ data_base = INIT_DATA(sc);
+
+ for (op_idx = op_start; op_idx < op_end; op_idx++) {
+
+ op = (const union init_op *)&(INIT_OPS(sc)[op_idx]);
+ /* Get generic data */
+ op_type = op->raw.op;
+ addr = op->raw.offset;
+ /* Get data that's used for OP_SW, OP_WB, OP_FW, OP_ZP and
+ * OP_WR64 (we assume that op_arr_write and op_write have the
+ * same structure).
+ */
+ len = op->arr_wr.data_len;
+ data = data_base + op->arr_wr.data_off;
+
+ switch (op_type) {
+ case OP_RD:
+ REG_RD(sc, addr);
+ break;
+ case OP_WR:
+ REG_WR(sc, addr, op->write.val);
+ break;
+ case OP_SW:
+ ecore_init_str_wr(sc, addr, data, len);
+ break;
+ case OP_WB:
+ ecore_init_wr_wb(sc, addr, data, len);
+ break;
+ case OP_ZR:
+ case OP_WB_ZR:
+ ecore_init_fill(sc, addr, 0, op->zero.len);
+ break;
+ case OP_ZP:
+ ecore_init_wr_zp(sc, addr, len, op->arr_wr.data_off);
+ break;
+ case OP_WR_64:
+ ecore_init_wr_64(sc, addr, data, len);
+ break;
+ case OP_IF_MODE_AND:
+ /* if any of the flags doesn't match, skip the
+ * conditional block.
+ */
+ if ((INIT_MODE_FLAGS(sc) &
+ op->if_mode.mode_bit_map) !=
+ op->if_mode.mode_bit_map)
+ op_idx += op->if_mode.cmd_offset;
+ break;
+ case OP_IF_MODE_OR:
+ /* if all the flags don't match, skip the conditional
+ * block.
+ */
+ if ((INIT_MODE_FLAGS(sc) &
+ op->if_mode.mode_bit_map) == 0)
+ op_idx += op->if_mode.cmd_offset;
+ break;
+ /* the following opcodes are unused at the moment. */
+ case OP_IF_PHASE:
+ case OP_RT:
+ case OP_DELAY:
+ case OP_VERIFY:
+ default:
+ /* Should never get here! */
+
+ break;
+ }
+ }
+}
+
+
+/****************************************************************************
+* PXP Arbiter
+****************************************************************************/
+/*
+ * This code configures the PCI read/write arbiter
+ * which implements a weighted round robin
+ * between the virtual queues in the chip.
+ *
+ * The values were derived for each PCI max payload and max request size.
+ * since max payload and max request size are only known at run time,
+ * this is done as a separate init stage.
+ */
+
+#define NUM_WR_Q 13
+#define NUM_RD_Q 29
+#define MAX_RD_ORD 3
+#define MAX_WR_ORD 2
+
+/* configuration for one arbiter queue */
+struct arb_line {
+ int l;
+ int add;
+ int ubound;
+};
+
+/* derived configuration for each read queue for each max request size */
+static const struct arb_line read_arb_data[NUM_RD_Q][MAX_RD_ORD + 1] = {
+/* 1 */ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
+ { {4, 8, 4}, {4, 8, 4}, {4, 8, 4}, {4, 8, 4} },
+ { {4, 3, 3}, {4, 3, 3}, {4, 3, 3}, {4, 3, 3} },
+ { {8, 3, 6}, {16, 3, 11}, {16, 3, 11}, {16, 3, 11} },
+ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
+/* 10 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 64, 6}, {16, 64, 11}, {32, 64, 21}, {32, 64, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+/* 20 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 64, 25}, {16, 64, 41}, {32, 64, 81}, {64, 64, 120} }
+};
+
+/* derived configuration for each write queue for each max request size */
+static const struct arb_line write_arb_data[NUM_WR_Q][MAX_WR_ORD + 1] = {
+/* 1 */ { {4, 6, 3}, {4, 6, 3}, {4, 6, 3} },
+ { {4, 2, 3}, {4, 2, 3}, {4, 2, 3} },
+ { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
+ { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
+ { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
+ { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
+ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25} },
+ { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
+ { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
+/* 10 */{ {8, 9, 6}, {16, 9, 11}, {32, 9, 21} },
+ { {8, 47, 19}, {16, 47, 19}, {32, 47, 21} },
+ { {8, 9, 6}, {16, 9, 11}, {16, 9, 11} },
+ { {8, 64, 25}, {16, 64, 41}, {32, 64, 81} }
+};
+
+/* register addresses for read queues */
+static const struct arb_line read_arb_addr[NUM_RD_Q-1] = {
+/* 1 */ {PXP2_REG_RQ_BW_RD_L0, PXP2_REG_RQ_BW_RD_ADD0,
+ PXP2_REG_RQ_BW_RD_UBOUND0},
+ {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
+ PXP2_REG_PSWRQ_BW_UB1},
+ {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
+ PXP2_REG_PSWRQ_BW_UB2},
+ {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
+ PXP2_REG_PSWRQ_BW_UB3},
+ {PXP2_REG_RQ_BW_RD_L4, PXP2_REG_RQ_BW_RD_ADD4,
+ PXP2_REG_RQ_BW_RD_UBOUND4},
+ {PXP2_REG_RQ_BW_RD_L5, PXP2_REG_RQ_BW_RD_ADD5,
+ PXP2_REG_RQ_BW_RD_UBOUND5},
+ {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
+ PXP2_REG_PSWRQ_BW_UB6},
+ {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
+ PXP2_REG_PSWRQ_BW_UB7},
+ {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
+ PXP2_REG_PSWRQ_BW_UB8},
+/* 10 */{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
+ PXP2_REG_PSWRQ_BW_UB9},
+ {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
+ PXP2_REG_PSWRQ_BW_UB10},
+ {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
+ PXP2_REG_PSWRQ_BW_UB11},
+ {PXP2_REG_RQ_BW_RD_L12, PXP2_REG_RQ_BW_RD_ADD12,
+ PXP2_REG_RQ_BW_RD_UBOUND12},
+ {PXP2_REG_RQ_BW_RD_L13, PXP2_REG_RQ_BW_RD_ADD13,
+ PXP2_REG_RQ_BW_RD_UBOUND13},
+ {PXP2_REG_RQ_BW_RD_L14, PXP2_REG_RQ_BW_RD_ADD14,
+ PXP2_REG_RQ_BW_RD_UBOUND14},
+ {PXP2_REG_RQ_BW_RD_L15, PXP2_REG_RQ_BW_RD_ADD15,
+ PXP2_REG_RQ_BW_RD_UBOUND15},
+ {PXP2_REG_RQ_BW_RD_L16, PXP2_REG_RQ_BW_RD_ADD16,
+ PXP2_REG_RQ_BW_RD_UBOUND16},
+ {PXP2_REG_RQ_BW_RD_L17, PXP2_REG_RQ_BW_RD_ADD17,
+ PXP2_REG_RQ_BW_RD_UBOUND17},
+ {PXP2_REG_RQ_BW_RD_L18, PXP2_REG_RQ_BW_RD_ADD18,
+ PXP2_REG_RQ_BW_RD_UBOUND18},
+/* 20 */{PXP2_REG_RQ_BW_RD_L19, PXP2_REG_RQ_BW_RD_ADD19,
+ PXP2_REG_RQ_BW_RD_UBOUND19},
+ {PXP2_REG_RQ_BW_RD_L20, PXP2_REG_RQ_BW_RD_ADD20,
+ PXP2_REG_RQ_BW_RD_UBOUND20},
+ {PXP2_REG_RQ_BW_RD_L22, PXP2_REG_RQ_BW_RD_ADD22,
+ PXP2_REG_RQ_BW_RD_UBOUND22},
+ {PXP2_REG_RQ_BW_RD_L23, PXP2_REG_RQ_BW_RD_ADD23,
+ PXP2_REG_RQ_BW_RD_UBOUND23},
+ {PXP2_REG_RQ_BW_RD_L24, PXP2_REG_RQ_BW_RD_ADD24,
+ PXP2_REG_RQ_BW_RD_UBOUND24},
+ {PXP2_REG_RQ_BW_RD_L25, PXP2_REG_RQ_BW_RD_ADD25,
+ PXP2_REG_RQ_BW_RD_UBOUND25},
+ {PXP2_REG_RQ_BW_RD_L26, PXP2_REG_RQ_BW_RD_ADD26,
+ PXP2_REG_RQ_BW_RD_UBOUND26},
+ {PXP2_REG_RQ_BW_RD_L27, PXP2_REG_RQ_BW_RD_ADD27,
+ PXP2_REG_RQ_BW_RD_UBOUND27},
+ {PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
+ PXP2_REG_PSWRQ_BW_UB28}
+};
+
+/* register addresses for write queues */
+static const struct arb_line write_arb_addr[NUM_WR_Q-1] = {
+/* 1 */ {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
+ PXP2_REG_PSWRQ_BW_UB1},
+ {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
+ PXP2_REG_PSWRQ_BW_UB2},
+ {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
+ PXP2_REG_PSWRQ_BW_UB3},
+ {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
+ PXP2_REG_PSWRQ_BW_UB6},
+ {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
+ PXP2_REG_PSWRQ_BW_UB7},
+ {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
+ PXP2_REG_PSWRQ_BW_UB8},
+ {PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
+ PXP2_REG_PSWRQ_BW_UB9},
+ {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
+ PXP2_REG_PSWRQ_BW_UB10},
+ {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
+ PXP2_REG_PSWRQ_BW_UB11},
+/* 10 */{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
+ PXP2_REG_PSWRQ_BW_UB28},
+ {PXP2_REG_RQ_BW_WR_L29, PXP2_REG_RQ_BW_WR_ADD29,
+ PXP2_REG_RQ_BW_WR_UBOUND29},
+ {PXP2_REG_RQ_BW_WR_L30, PXP2_REG_RQ_BW_WR_ADD30,
+ PXP2_REG_RQ_BW_WR_UBOUND30}
+};
+
+static void ecore_init_pxp_arb(struct bnx2x_softc *sc, int r_order,
+ int w_order)
+{
+ uint32_t val, i;
+
+ if (r_order > MAX_RD_ORD) {
+ ECORE_MSG("read order of %d order adjusted to %d",
+ r_order, MAX_RD_ORD);
+ r_order = MAX_RD_ORD;
+ }
+ if (w_order > MAX_WR_ORD) {
+ ECORE_MSG("write order of %d order adjusted to %d",
+ w_order, MAX_WR_ORD);
+ w_order = MAX_WR_ORD;
+ }
+ if (CHIP_REV_IS_FPGA(sc)) {
+ ECORE_MSG("write order adjusted to 1 for FPGA");
+ w_order = 0;
+ }
+ ECORE_MSG("read order %d write order %d", r_order, w_order);
+
+ for (i = 0; i < NUM_RD_Q-1; i++) {
+ REG_WR(sc, read_arb_addr[i].l, read_arb_data[i][r_order].l);
+ REG_WR(sc, read_arb_addr[i].add,
+ read_arb_data[i][r_order].add);
+ REG_WR(sc, read_arb_addr[i].ubound,
+ read_arb_data[i][r_order].ubound);
+ }
+
+ for (i = 0; i < NUM_WR_Q-1; i++) {
+ if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) ||
+ (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) {
+
+ REG_WR(sc, write_arb_addr[i].l,
+ write_arb_data[i][w_order].l);
+
+ REG_WR(sc, write_arb_addr[i].add,
+ write_arb_data[i][w_order].add);
+
+ REG_WR(sc, write_arb_addr[i].ubound,
+ write_arb_data[i][w_order].ubound);
+ } else {
+
+ val = REG_RD(sc, write_arb_addr[i].l);
+ REG_WR(sc, write_arb_addr[i].l,
+ val | (write_arb_data[i][w_order].l << 10));
+
+ val = REG_RD(sc, write_arb_addr[i].add);
+ REG_WR(sc, write_arb_addr[i].add,
+ val | (write_arb_data[i][w_order].add << 10));
+
+ val = REG_RD(sc, write_arb_addr[i].ubound);
+ REG_WR(sc, write_arb_addr[i].ubound,
+ val | (write_arb_data[i][w_order].ubound << 7));
+ }
+ }
+
+ val = write_arb_data[NUM_WR_Q-1][w_order].add;
+ val += write_arb_data[NUM_WR_Q-1][w_order].ubound << 10;
+ val += write_arb_data[NUM_WR_Q-1][w_order].l << 17;
+ REG_WR(sc, PXP2_REG_PSWRQ_BW_RD, val);
+
+ val = read_arb_data[NUM_RD_Q-1][r_order].add;
+ val += read_arb_data[NUM_RD_Q-1][r_order].ubound << 10;
+ val += read_arb_data[NUM_RD_Q-1][r_order].l << 17;
+ REG_WR(sc, PXP2_REG_PSWRQ_BW_WR, val);
+
+ REG_WR(sc, PXP2_REG_RQ_WR_MBS0, w_order);
+ REG_WR(sc, PXP2_REG_RQ_WR_MBS1, w_order);
+ REG_WR(sc, PXP2_REG_RQ_RD_MBS0, r_order);
+ REG_WR(sc, PXP2_REG_RQ_RD_MBS1, r_order);
+
+ if (CHIP_IS_E1H(sc) && (r_order == MAX_RD_ORD))
+ REG_WR(sc, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
+
+ if (CHIP_IS_E3(sc))
+ REG_WR(sc, PXP2_REG_WR_USDMDP_TH, (0x4 << w_order));
+ else if (CHIP_IS_E2(sc))
+ REG_WR(sc, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order));
+ else
+ REG_WR(sc, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
+
+ /* MPS w_order optimal TH presently TH
+ * 128 0 0 2
+ * 256 1 1 3
+ * >=512 2 2 3
+ */
+ /* DMAE is special */
+ if (!CHIP_IS_E1H(sc)) {
+ /* E2 can use optimal TH */
+ val = w_order;
+ REG_WR(sc, PXP2_REG_WR_DMAE_MPS, val);
+ } else {
+ val = ((w_order == 0) ? 2 : 3);
+ REG_WR(sc, PXP2_REG_WR_DMAE_MPS, 2);
+ }
+
+ REG_WR(sc, PXP2_REG_WR_HC_MPS, val);
+ REG_WR(sc, PXP2_REG_WR_USDM_MPS, val);
+ REG_WR(sc, PXP2_REG_WR_CSDM_MPS, val);
+ REG_WR(sc, PXP2_REG_WR_TSDM_MPS, val);
+ REG_WR(sc, PXP2_REG_WR_XSDM_MPS, val);
+ REG_WR(sc, PXP2_REG_WR_QM_MPS, val);
+ REG_WR(sc, PXP2_REG_WR_TM_MPS, val);
+ REG_WR(sc, PXP2_REG_WR_SRC_MPS, val);
+ REG_WR(sc, PXP2_REG_WR_DBG_MPS, val);
+ REG_WR(sc, PXP2_REG_WR_CDU_MPS, val);
+
+ /* Validate number of tags suppoted by device */
+#define PCIE_REG_PCIER_TL_HDR_FC_ST 0x2980
+ val = REG_RD(sc, PCIE_REG_PCIER_TL_HDR_FC_ST);
+ val &= 0xFF;
+ if (val <= 0x20)
+ REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x20);
+}
+
+/****************************************************************************
+* ILT management
+****************************************************************************/
+/*
+ * This codes hides the low level HW interaction for ILT management and
+ * configuration. The API consists of a shadow ILT table which is set by the
+ * driver and a set of routines to use it to configure the HW.
+ *
+ */
+
+/* ILT HW init operations */
+
+/* ILT memory management operations */
+#define ILT_MEMOP_ALLOC 0
+#define ILT_MEMOP_FREE 1
+
+/* the phys address is shifted right 12 bits and has an added
+ * 1=valid bit added to the 53rd bit
+ * then since this is a wide register(TM)
+ * we split it into two 32 bit writes
+ */
+#define ILT_ADDR1(x) ((uint32_t)(((uint64_t)x >> 12) & 0xFFFFFFFF))
+#define ILT_ADDR2(x) ((uint32_t)((1 << 20) | ((uint64_t)x >> 44)))
+#define ILT_RANGE(f, l) (((l) << 10) | f)
+
+static int ecore_ilt_line_mem_op(struct bnx2x_softc *sc,
+ struct ilt_line *line, uint32_t size, uint8_t memop, int cli_num, int i)
+{
+#define ECORE_ILT_NAMESIZE 10
+ char str[ECORE_ILT_NAMESIZE];
+
+ if (memop == ILT_MEMOP_FREE) {
+ ECORE_ILT_FREE(line->page, line->page_mapping, line->size);
+ return 0;
+ }
+ snprintf(str, ECORE_ILT_NAMESIZE, "ILT_%d_%d", cli_num, i);
+ ECORE_ILT_ZALLOC(line->page, &line->page_mapping, size, str);
+ if (!line->page)
+ return -1;
+ line->size = size;
+ return 0;
+}
+
+
+static int ecore_ilt_client_mem_op(struct bnx2x_softc *sc, int cli_num,
+ uint8_t memop)
+{
+ int i, rc = 0;
+ struct ecore_ilt *ilt = SC_ILT(sc);
+ struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
+
+ if (!ilt || !ilt->lines)
+ return -1;
+
+ if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM))
+ return 0;
+
+ for (i = ilt_cli->start; i <= ilt_cli->end && !rc; i++) {
+ rc = ecore_ilt_line_mem_op(sc, &ilt->lines[i],
+ ilt_cli->page_size, memop, cli_num, i);
+ }
+ return rc;
+}
+
+static inline int ecore_ilt_mem_op_cnic(struct bnx2x_softc *sc, uint8_t memop)
+{
+ int rc = 0;
+
+ if (CONFIGURE_NIC_MODE(sc))
+ rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_SRC, memop);
+ if (!rc)
+ rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_TM, memop);
+
+ return rc;
+}
+
+static int ecore_ilt_mem_op(struct bnx2x_softc *sc, uint8_t memop)
+{
+ int rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_CDU, memop);
+ if (!rc)
+ rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_QM, memop);
+ if (!rc && CNIC_SUPPORT(sc) && !CONFIGURE_NIC_MODE(sc))
+ rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_SRC, memop);
+
+ return rc;
+}
+
+static void ecore_ilt_line_wr(struct bnx2x_softc *sc, int abs_idx,
+ ecore_dma_addr_t page_mapping)
+{
+ uint32_t reg;
+
+ reg = PXP2_REG_RQ_ONCHIP_AT_B0 + abs_idx*8;
+
+ ecore_wr_64(sc, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping));
+}
+
+static void ecore_ilt_line_init_op(struct bnx2x_softc *sc,
+ struct ecore_ilt *ilt, int idx, uint8_t initop)
+{
+ ecore_dma_addr_t null_mapping;
+ int abs_idx = ilt->start_line + idx;
+
+ switch (initop) {
+ case INITOP_INIT:
+ /* set in the init-value array */
+ case INITOP_SET:
+ ecore_ilt_line_wr(sc, abs_idx, ilt->lines[idx].page_mapping);
+ break;
+ case INITOP_CLEAR:
+ null_mapping = 0;
+ ecore_ilt_line_wr(sc, abs_idx, null_mapping);
+ break;
+ }
+}
+
+static void ecore_ilt_boundry_init_op(struct bnx2x_softc *sc,
+ struct ilt_client_info *ilt_cli,
+ uint32_t ilt_start)
+{
+ uint32_t start_reg = 0;
+ uint32_t end_reg = 0;
+
+ /* The boundary is either SET or INIT,
+ CLEAR => SET and for now SET ~~ INIT */
+
+ /* find the appropriate regs */
+ switch (ilt_cli->client_num) {
+ case ILT_CLIENT_CDU:
+ start_reg = PXP2_REG_RQ_CDU_FIRST_ILT;
+ end_reg = PXP2_REG_RQ_CDU_LAST_ILT;
+ break;
+ case ILT_CLIENT_QM:
+ start_reg = PXP2_REG_RQ_QM_FIRST_ILT;
+ end_reg = PXP2_REG_RQ_QM_LAST_ILT;
+ break;
+ case ILT_CLIENT_SRC:
+ start_reg = PXP2_REG_RQ_SRC_FIRST_ILT;
+ end_reg = PXP2_REG_RQ_SRC_LAST_ILT;
+ break;
+ case ILT_CLIENT_TM:
+ start_reg = PXP2_REG_RQ_TM_FIRST_ILT;
+ end_reg = PXP2_REG_RQ_TM_LAST_ILT;
+ break;
+ }
+ REG_WR(sc, start_reg, (ilt_start + ilt_cli->start));
+ REG_WR(sc, end_reg, (ilt_start + ilt_cli->end));
+}
+
+static void ecore_ilt_client_init_op_ilt(struct bnx2x_softc *sc,
+ struct ecore_ilt *ilt,
+ struct ilt_client_info *ilt_cli,
+ uint8_t initop)
+{
+ int i;
+
+ if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
+ return;
+
+ for (i = ilt_cli->start; i <= ilt_cli->end; i++)
+ ecore_ilt_line_init_op(sc, ilt, i, initop);
+
+ /* init/clear the ILT boundries */
+ ecore_ilt_boundry_init_op(sc, ilt_cli, ilt->start_line);
+}
+
+static void ecore_ilt_client_init_op(struct bnx2x_softc *sc,
+ struct ilt_client_info *ilt_cli, uint8_t initop)
+{
+ struct ecore_ilt *ilt = SC_ILT(sc);
+
+ ecore_ilt_client_init_op_ilt(sc, ilt, ilt_cli, initop);
+}
+
+static void ecore_ilt_client_id_init_op(struct bnx2x_softc *sc,
+ int cli_num, uint8_t initop)
+{
+ struct ecore_ilt *ilt = SC_ILT(sc);
+ struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
+
+ ecore_ilt_client_init_op(sc, ilt_cli, initop);
+}
+
+static inline void ecore_ilt_init_op_cnic(struct bnx2x_softc *sc, uint8_t initop)
+{
+ if (CONFIGURE_NIC_MODE(sc))
+ ecore_ilt_client_id_init_op(sc, ILT_CLIENT_SRC, initop);
+ ecore_ilt_client_id_init_op(sc, ILT_CLIENT_TM, initop);
+}
+
+static void ecore_ilt_init_op(struct bnx2x_softc *sc, uint8_t initop)
+{
+ ecore_ilt_client_id_init_op(sc, ILT_CLIENT_CDU, initop);
+ ecore_ilt_client_id_init_op(sc, ILT_CLIENT_QM, initop);
+ if (CNIC_SUPPORT(sc) && !CONFIGURE_NIC_MODE(sc))
+ ecore_ilt_client_id_init_op(sc, ILT_CLIENT_SRC, initop);
+}
+
+static void ecore_ilt_init_client_psz(struct bnx2x_softc *sc, int cli_num,
+ uint32_t psz_reg, uint8_t initop)
+{
+ struct ecore_ilt *ilt = SC_ILT(sc);
+ struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
+
+ if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
+ return;
+
+ switch (initop) {
+ case INITOP_INIT:
+ /* set in the init-value array */
+ case INITOP_SET:
+ REG_WR(sc, psz_reg, ILOG2(ilt_cli->page_size >> 12));
+ break;
+ case INITOP_CLEAR:
+ break;
+ }
+}
+
+/*
+ * called during init common stage, ilt clients should be initialized
+ * prioir to calling this function
+ */
+static void ecore_ilt_init_page_size(struct bnx2x_softc *sc, uint8_t initop)
+{
+ ecore_ilt_init_client_psz(sc, ILT_CLIENT_CDU,
+ PXP2_REG_RQ_CDU_P_SIZE, initop);
+ ecore_ilt_init_client_psz(sc, ILT_CLIENT_QM,
+ PXP2_REG_RQ_QM_P_SIZE, initop);
+ ecore_ilt_init_client_psz(sc, ILT_CLIENT_SRC,
+ PXP2_REG_RQ_SRC_P_SIZE, initop);
+ ecore_ilt_init_client_psz(sc, ILT_CLIENT_TM,
+ PXP2_REG_RQ_TM_P_SIZE, initop);
+}
+
+/****************************************************************************
+* QM initializations
+****************************************************************************/
+#define QM_QUEUES_PER_FUNC 16
+#define QM_INIT_MIN_CID_COUNT 31
+#define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT)
+
+/* called during init port stage */
+static void ecore_qm_init_cid_count(struct bnx2x_softc *sc, int qm_cid_count,
+ uint8_t initop)
+{
+ int port = SC_PORT(sc);
+
+ if (QM_INIT(qm_cid_count)) {
+ switch (initop) {
+ case INITOP_INIT:
+ /* set in the init-value array */
+ case INITOP_SET:
+ REG_WR(sc, QM_REG_CONNNUM_0 + port*4,
+ qm_cid_count/16 - 1);
+ break;
+ case INITOP_CLEAR:
+ break;
+ }
+ }
+}
+
+static void ecore_qm_set_ptr_table(struct bnx2x_softc *sc, int qm_cid_count,
+ uint32_t base_reg, uint32_t reg)
+{
+ int i;
+ uint32_t wb_data[2] = {0, 0};
+ for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) {
+ REG_WR(sc, base_reg + i*4,
+ qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
+ ecore_init_wr_wb(sc, reg + i*8,
+ wb_data, 2);
+ }
+}
+
+/* called during init common stage */
+static void ecore_qm_init_ptr_table(struct bnx2x_softc *sc, int qm_cid_count,
+ uint8_t initop)
+{
+ if (!QM_INIT(qm_cid_count))
+ return;
+
+ switch (initop) {
+ case INITOP_INIT:
+ /* set in the init-value array */
+ case INITOP_SET:
+ ecore_qm_set_ptr_table(sc, qm_cid_count,
+ QM_REG_BASEADDR, QM_REG_PTRTBL);
+ if (CHIP_IS_E1H(sc))
+ ecore_qm_set_ptr_table(sc, qm_cid_count,
+ QM_REG_BASEADDR_EXT_A,
+ QM_REG_PTRTBL_EXT_A);
+ break;
+ case INITOP_CLEAR:
+ break;
+ }
+}
+
+/****************************************************************************
+* SRC initializations
+****************************************************************************/
+#ifdef ECORE_L5
+/* called during init func stage */
+static void ecore_src_init_t2(struct bnx2x_softc *sc, struct src_ent *t2,
+ ecore_dma_addr_t t2_mapping, int src_cid_count)
+{
+ int i;
+ int port = SC_PORT(sc);
+
+ /* Initialize T2 */
+ for (i = 0; i < src_cid_count-1; i++)
+ t2[i].next = (uint64_t)(t2_mapping +
+ (i+1)*sizeof(struct src_ent));
+
+ /* tell the searcher where the T2 table is */
+ REG_WR(sc, SRC_REG_COUNTFREE0 + port*4, src_cid_count);
+
+ ecore_wr_64(sc, SRC_REG_FIRSTFREE0 + port*16,
+ U64_LO(t2_mapping), U64_HI(t2_mapping));
+
+ ecore_wr_64(sc, SRC_REG_LASTFREE0 + port*16,
+ U64_LO((uint64_t)t2_mapping +
+ (src_cid_count-1) * sizeof(struct src_ent)),
+ U64_HI((uint64_t)t2_mapping +
+ (src_cid_count-1) * sizeof(struct src_ent)));
+}
+#endif
+#endif /* ECORE_INIT_OPS_H */
diff --git a/src/dpdk22/drivers/net/bnx2x/ecore_mfw_req.h b/src/dpdk22/drivers/net/bnx2x/ecore_mfw_req.h
new file mode 100644
index 00000000..57529097
--- /dev/null
+++ b/src/dpdk22/drivers/net/bnx2x/ecore_mfw_req.h
@@ -0,0 +1,187 @@
+/*-
+ * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+ *
+ * Eric Davis <edavis@broadcom.com>
+ * David Christensen <davidch@broadcom.com>
+ * Gary Zambrano <zambrano@broadcom.com>
+ *
+ * Copyright (c) 2014-2015 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ */
+
+#ifndef ECORE_MFW_REQ_H
+#define ECORE_MFW_REQ_H
+
+
+
+#define PORT_0 0
+#define PORT_1 1
+#define PORT_MAX 2
+#define NVM_PATH_MAX 2
+
+/* FCoE capabilities required from the driver */
+struct fcoe_capabilities {
+ uint32_t capability1;
+ /* Maximum number of I/Os per connection */
+ #define FCOE_IOS_PER_CONNECTION_MASK 0x0000ffff
+ #define FCOE_IOS_PER_CONNECTION_SHIFT 0
+ /* Maximum number of Logins per port */
+ #define FCOE_LOGINS_PER_PORT_MASK 0xffff0000
+ #define FCOE_LOGINS_PER_PORT_SHIFT 16
+
+ uint32_t capability2;
+ /* Maximum number of exchanges */
+ #define FCOE_NUMBER_OF_EXCHANGES_MASK 0x0000ffff
+ #define FCOE_NUMBER_OF_EXCHANGES_SHIFT 0
+ /* Maximum NPIV WWN per port */
+ #define FCOE_NPIV_WWN_PER_PORT_MASK 0xffff0000
+ #define FCOE_NPIV_WWN_PER_PORT_SHIFT 16
+
+ uint32_t capability3;
+ /* Maximum number of targets supported */
+ #define FCOE_TARGETS_SUPPORTED_MASK 0x0000ffff
+ #define FCOE_TARGETS_SUPPORTED_SHIFT 0
+ /* Maximum number of outstanding commands across all connections */
+ #define FCOE_OUTSTANDING_COMMANDS_MASK 0xffff0000
+ #define FCOE_OUTSTANDING_COMMANDS_SHIFT 16
+
+ uint32_t capability4;
+ #define FCOE_CAPABILITY4_STATEFUL 0x00000001
+ #define FCOE_CAPABILITY4_STATELESS 0x00000002
+ #define FCOE_CAPABILITY4_CAPABILITIES_REPORTED_VALID 0x00000004
+};
+
+struct glob_ncsi_oem_data
+{
+ uint32_t driver_version;
+ uint32_t unused[3];
+ struct fcoe_capabilities fcoe_features[NVM_PATH_MAX][PORT_MAX];
+};
+
+/* current drv_info version */
+#define DRV_INFO_CUR_VER 2
+
+/* drv_info op codes supported */
+enum drv_info_opcode {
+ ETH_STATS_OPCODE,
+ FCOE_STATS_OPCODE,
+ ISCSI_STATS_OPCODE
+};
+
+#define ETH_STAT_INFO_VERSION_LEN 12
+/* Per PCI Function Ethernet Statistics required from the driver */
+struct eth_stats_info {
+ /* Function's Driver Version. padded to 12 */
+ char version[ETH_STAT_INFO_VERSION_LEN];
+ /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */
+ uint8_t mac_local[8];
+ uint8_t mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ uint8_t mac_add2[8]; /* Additional Programmed MAC Addr 2. */
+ uint32_t mtu_size; /* MTU Size. Note : Negotiated MTU */
+ uint32_t feature_flags; /* Feature_Flags. */
+#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK 0x01
+#define FEATURE_ETH_LSO_MASK 0x02
+#define FEATURE_ETH_BOOTMODE_MASK 0x1C
+#define FEATURE_ETH_BOOTMODE_SHIFT 2
+#define FEATURE_ETH_BOOTMODE_NONE (0x0 << 2)
+#define FEATURE_ETH_BOOTMODE_PXE (0x1 << 2)
+#define FEATURE_ETH_BOOTMODE_ISCSI (0x2 << 2)
+#define FEATURE_ETH_BOOTMODE_FCOE (0x3 << 2)
+#define FEATURE_ETH_TOE_MASK 0x20
+ uint32_t lso_max_size; /* LSO MaxOffloadSize. */
+ uint32_t lso_min_seg_cnt; /* LSO MinSegmentCount. */
+ /* Num Offloaded Connections TCP_IPv4. */
+ uint32_t ipv4_ofld_cnt;
+ /* Num Offloaded Connections TCP_IPv6. */
+ uint32_t ipv6_ofld_cnt;
+ uint32_t promiscuous_mode; /* Promiscuous Mode. non-zero true */
+ uint32_t txq_size; /* TX Descriptors Queue Size */
+ uint32_t rxq_size; /* RX Descriptors Queue Size */
+ /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */
+ uint32_t txq_avg_depth;
+ /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */
+ uint32_t rxq_avg_depth;
+ /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/
+ uint32_t iov_offload;
+ /* Number of NetQueue/VMQ Config'd. */
+ uint32_t netq_cnt;
+ uint32_t vf_cnt; /* Num VF assigned to this PF. */
+};
+
+/* Per PCI Function FCOE Statistics required from the driver */
+struct fcoe_stats_info {
+ uint8_t version[12]; /* Function's Driver Version. */
+ uint8_t mac_local[8]; /* Locally Admin Addr. */
+ uint8_t mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ uint8_t mac_add2[8]; /* Additional Programmed MAC Addr 2. */
+ /* QoS Priority (per 802.1p). 0-7255 */
+ uint32_t qos_priority;
+ uint32_t txq_size; /* FCoE TX Descriptors Queue Size. */
+ uint32_t rxq_size; /* FCoE RX Descriptors Queue Size. */
+ /* FCoE TX Descriptor Queue Avg Depth. */
+ uint32_t txq_avg_depth;
+ /* FCoE RX Descriptors Queue Avg Depth. */
+ uint32_t rxq_avg_depth;
+ uint32_t rx_frames_lo; /* FCoE RX Frames received. */
+ uint32_t rx_frames_hi; /* FCoE RX Frames received. */
+ uint32_t rx_bytes_lo; /* FCoE RX Bytes received. */
+ uint32_t rx_bytes_hi; /* FCoE RX Bytes received. */
+ uint32_t tx_frames_lo; /* FCoE TX Frames sent. */
+ uint32_t tx_frames_hi; /* FCoE TX Frames sent. */
+ uint32_t tx_bytes_lo; /* FCoE TX Bytes sent. */
+ uint32_t tx_bytes_hi; /* FCoE TX Bytes sent. */
+ uint32_t rx_fcs_errors; /* number of receive packets with FCS errors */
+ uint32_t rx_fc_crc_errors; /* number of FC frames with CRC errors*/
+ uint32_t fip_login_failures; /* number of FCoE/FIP Login failures */
+};
+
+/* Per PCI Function iSCSI Statistics required from the driver*/
+struct iscsi_stats_info {
+ uint8_t version[12]; /* Function's Driver Version. */
+ uint8_t mac_local[8]; /* Locally Admin iSCSI MAC Addr. */
+ uint8_t mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ /* QoS Priority (per 802.1p). 0-7255 */
+ uint32_t qos_priority;
+
+ uint8_t initiator_name[64]; /* iSCSI Boot Initiator Node name. */
+
+ uint8_t ww_port_name[64]; /* iSCSI World wide port name */
+
+ uint8_t boot_target_name[64];/* iSCSI Boot Target Name. */
+
+ uint8_t boot_target_ip[16]; /* iSCSI Boot Target IP. */
+ uint32_t boot_target_portal; /* iSCSI Boot Target Portal. */
+ uint8_t boot_init_ip[16]; /* iSCSI Boot Initiator IP Address. */
+ uint32_t max_frame_size; /* Max Frame Size. bytes */
+ uint32_t txq_size; /* PDU TX Descriptors Queue Size. */
+ uint32_t rxq_size; /* PDU RX Descriptors Queue Size. */
+
+ uint32_t txq_avg_depth; /*PDU TX Descriptor Queue Avg Depth. */
+ uint32_t rxq_avg_depth; /*PDU RX Descriptors Queue Avg Depth. */
+ uint32_t rx_pdus_lo; /* iSCSI PDUs received. */
+ uint32_t rx_pdus_hi; /* iSCSI PDUs received. */
+
+ uint32_t rx_bytes_lo; /* iSCSI RX Bytes received. */
+ uint32_t rx_bytes_hi; /* iSCSI RX Bytes received. */
+ uint32_t tx_pdus_lo; /* iSCSI PDUs sent. */
+ uint32_t tx_pdus_hi; /* iSCSI PDUs sent. */
+
+ uint32_t tx_bytes_lo; /* iSCSI PDU TX Bytes sent. */
+ uint32_t tx_bytes_hi; /* iSCSI PDU TX Bytes sent. */
+ uint32_t pcp_prior_map_tbl; /*C-PCP to S-PCP Priority MapTable.
+ 9 nibbles, the position of each nibble
+ represents the C-PCP value, the value
+ of the nibble = S-PCP value.*/
+};
+
+union drv_info_to_mcp {
+ struct eth_stats_info ether_stat;
+ struct fcoe_stats_info fcoe_stat;
+ struct iscsi_stats_info iscsi_stat;
+};
+
+
+#endif /* ECORE_MFW_REQ_H */
diff --git a/src/dpdk22/drivers/net/bnx2x/ecore_reg.h b/src/dpdk22/drivers/net/bnx2x/ecore_reg.h
new file mode 100644
index 00000000..d8203b45
--- /dev/null
+++ b/src/dpdk22/drivers/net/bnx2x/ecore_reg.h
@@ -0,0 +1,3644 @@
+/*-
+ * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+ *
+ * Eric Davis <edavis@broadcom.com>
+ * David Christensen <davidch@broadcom.com>
+ * Gary Zambrano <zambrano@broadcom.com>
+ *
+ * Copyright (c) 2014-2015 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ */
+
+#ifndef ECORE_REG_H
+#define ECORE_REG_H
+
+
+#define ATC_ATC_INT_STS_REG_ADDRESS_ERROR \
+ (0x1<<0)
+#define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS \
+ (0x1<<2)
+#define ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU \
+ (0x1<<5)
+#define ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT \
+ (0x1<<3)
+#define ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR \
+ (0x1<<4)
+#define ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND \
+ (0x1<<1)
+#define ATC_REG_ATC_INIT_DONE \
+ 0x1100bcUL
+#define ATC_REG_ATC_INT_STS_CLR \
+ 0x1101c0UL
+#define ATC_REG_ATC_PRTY_MASK \
+ 0x1101d8UL
+#define ATC_REG_ATC_PRTY_STS_CLR \
+ 0x1101d0UL
+#define BRB1_REG_BRB1_INT_MASK \
+ 0x60128UL
+#define BRB1_REG_BRB1_PRTY_MASK \
+ 0x60138UL
+#define BRB1_REG_BRB1_PRTY_STS_CLR \
+ 0x60130UL
+#define BRB1_REG_MAC_GUARANTIED_0 \
+ 0x601e8UL
+#define BRB1_REG_MAC_GUARANTIED_1 \
+ 0x60240UL
+#define BRB1_REG_NUM_OF_FULL_BLOCKS \
+ 0x60090UL
+#define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 \
+ 0x60078UL
+#define BRB1_REG_PAUSE_LOW_THRESHOLD_0 \
+ 0x60068UL
+#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 \
+ 0x60094UL
+#define CCM_REG_CCM_INT_MASK \
+ 0xd01e4UL
+#define CCM_REG_CCM_PRTY_MASK \
+ 0xd01f4UL
+#define CCM_REG_CCM_PRTY_STS_CLR \
+ 0xd01ecUL
+#define CDU_REG_CDU_GLOBAL_PARAMS \
+ 0x101020UL
+#define CDU_REG_CDU_INT_MASK \
+ 0x10103cUL
+#define CDU_REG_CDU_PRTY_MASK \
+ 0x10104cUL
+#define CDU_REG_CDU_PRTY_STS_CLR \
+ 0x101044UL
+#define CFC_REG_AC_INIT_DONE \
+ 0x104078UL
+#define CFC_REG_CAM_INIT_DONE \
+ 0x10407cUL
+#define CFC_REG_CFC_INT_MASK \
+ 0x104108UL
+#define CFC_REG_CFC_INT_STS_CLR \
+ 0x104100UL
+#define CFC_REG_CFC_PRTY_MASK \
+ 0x104118UL
+#define CFC_REG_CFC_PRTY_STS_CLR \
+ 0x104110UL
+#define CFC_REG_DEBUG0 \
+ 0x104050UL
+#define CFC_REG_INIT_REG \
+ 0x10404cUL
+#define CFC_REG_LL_INIT_DONE \
+ 0x104074UL
+#define CFC_REG_NUM_LCIDS_INSIDE_PF \
+ 0x104120UL
+#define CFC_REG_STRONG_ENABLE_PF \
+ 0x104128UL
+#define CFC_REG_WEAK_ENABLE_PF \
+ 0x104124UL
+#define CSDM_REG_CSDM_INT_MASK_0 \
+ 0xc229cUL
+#define CSDM_REG_CSDM_INT_MASK_1 \
+ 0xc22acUL
+#define CSDM_REG_CSDM_PRTY_MASK \
+ 0xc22bcUL
+#define CSDM_REG_CSDM_PRTY_STS_CLR \
+ 0xc22b4UL
+#define CSEM_REG_CSEM_INT_MASK_0 \
+ 0x200110UL
+#define CSEM_REG_CSEM_INT_MASK_1 \
+ 0x200120UL
+#define CSEM_REG_CSEM_PRTY_MASK_0 \
+ 0x200130UL
+#define CSEM_REG_CSEM_PRTY_MASK_1 \
+ 0x200140UL
+#define CSEM_REG_CSEM_PRTY_STS_CLR_0 \
+ 0x200128UL
+#define CSEM_REG_CSEM_PRTY_STS_CLR_1 \
+ 0x200138UL
+#define CSEM_REG_FAST_MEMORY \
+ 0x220000UL
+#define CSEM_REG_INT_TABLE \
+ 0x200400UL
+#define CSEM_REG_PASSIVE_BUFFER \
+ 0x202000UL
+#define CSEM_REG_PRAM \
+ 0x240000UL
+#define CSEM_REG_VFPF_ERR_NUM \
+ 0x200380UL
+#define DBG_REG_DBG_PRTY_MASK \
+ 0xc0a8UL
+#define DBG_REG_DBG_PRTY_STS_CLR \
+ 0xc0a0UL
+#define DMAE_REG_BACKWARD_COMP_EN \
+ 0x10207cUL
+#define DMAE_REG_CMD_MEM \
+ 0x102400UL
+#define DMAE_REG_DMAE_INT_MASK \
+ 0x102054UL
+#define DMAE_REG_DMAE_PRTY_MASK \
+ 0x102064UL
+#define DMAE_REG_DMAE_PRTY_STS_CLR \
+ 0x10205cUL
+#define DMAE_REG_GO_C0 \
+ 0x102080UL
+#define DMAE_REG_GO_C1 \
+ 0x102084UL
+#define DMAE_REG_GO_C10 \
+ 0x102088UL
+#define DMAE_REG_GO_C11 \
+ 0x10208cUL
+#define DMAE_REG_GO_C12 \
+ 0x102090UL
+#define DMAE_REG_GO_C13 \
+ 0x102094UL
+#define DMAE_REG_GO_C14 \
+ 0x102098UL
+#define DMAE_REG_GO_C15 \
+ 0x10209cUL
+#define DMAE_REG_GO_C2 \
+ 0x1020a0UL
+#define DMAE_REG_GO_C3 \
+ 0x1020a4UL
+#define DMAE_REG_GO_C4 \
+ 0x1020a8UL
+#define DMAE_REG_GO_C5 \
+ 0x1020acUL
+#define DMAE_REG_GO_C6 \
+ 0x1020b0UL
+#define DMAE_REG_GO_C7 \
+ 0x1020b4UL
+#define DMAE_REG_GO_C8 \
+ 0x1020b8UL
+#define DMAE_REG_GO_C9 \
+ 0x1020bcUL
+#define DORQ_REG_DORQ_INT_MASK \
+ 0x170180UL
+#define DORQ_REG_DORQ_INT_STS_CLR \
+ 0x170178UL
+#define DORQ_REG_DORQ_PRTY_MASK \
+ 0x170190UL
+#define DORQ_REG_DORQ_PRTY_STS_CLR \
+ 0x170188UL
+#define DORQ_REG_DPM_CID_OFST \
+ 0x170030UL
+#define DORQ_REG_MAX_RVFID_SIZE \
+ 0x1701ecUL
+#define DORQ_REG_NORM_CID_OFST \
+ 0x17002cUL
+#define DORQ_REG_PF_USAGE_CNT \
+ 0x1701d0UL
+#define DORQ_REG_VF_NORM_CID_BASE \
+ 0x1701a0UL
+#define DORQ_REG_VF_NORM_CID_OFST \
+ 0x1701f4UL
+#define DORQ_REG_VF_NORM_CID_WND_SIZE \
+ 0x1701a4UL
+#define DORQ_REG_VF_NORM_MAX_CID_COUNT \
+ 0x1701e4UL
+#define DORQ_REG_VF_NORM_VF_BASE \
+ 0x1701a8UL
+#define DORQ_REG_VF_TYPE_MASK_0 \
+ 0x170218UL
+#define DORQ_REG_VF_TYPE_MAX_MCID_0 \
+ 0x1702d8UL
+#define DORQ_REG_VF_TYPE_MIN_MCID_0 \
+ 0x170298UL
+#define DORQ_REG_VF_TYPE_VALUE_0 \
+ 0x170258UL
+#define DORQ_REG_VF_USAGE_CNT \
+ 0x170320UL
+#define DORQ_REG_VF_USAGE_CT_LIMIT \
+ 0x170340UL
+#define HC_CONFIG_0_REG_ATTN_BIT_EN_0 \
+ (0x1<<4)
+#define HC_CONFIG_0_REG_BLOCK_DISABLE_0 \
+ (0x1<<0)
+#define HC_CONFIG_0_REG_INT_LINE_EN_0 \
+ (0x1<<3)
+#define HC_CONFIG_0_REG_MSI_ATTN_EN_0 \
+ (0x1<<7)
+#define HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 \
+ (0x1<<2)
+#define HC_CONFIG_0_REG_SINGLE_ISR_EN_0 \
+ (0x1<<1)
+#define HC_CONFIG_1_REG_BLOCK_DISABLE_1 \
+ (0x1<<0)
+#define HC_REG_ATTN_MSG0_ADDR_L \
+ 0x108018UL
+#define HC_REG_ATTN_MSG1_ADDR_L \
+ 0x108020UL
+#define HC_REG_COMMAND_REG \
+ 0x108180UL
+#define HC_REG_CONFIG_0 \
+ 0x108000UL
+#define HC_REG_CONFIG_1 \
+ 0x108004UL
+#define HC_REG_HC_PRTY_MASK \
+ 0x1080a0UL
+#define HC_REG_HC_PRTY_STS_CLR \
+ 0x108098UL
+#define HC_REG_INT_MASK \
+ 0x108108UL
+#define HC_REG_LEADING_EDGE_0 \
+ 0x108040UL
+#define HC_REG_MAIN_MEMORY \
+ 0x108800UL
+#define HC_REG_MAIN_MEMORY_SIZE \
+ 152
+#define HC_REG_TRAILING_EDGE_0 \
+ 0x108044UL
+#define IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN \
+ (0x1<<1)
+#define IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE \
+ (0x1<<0)
+#define IGU_REG_ATTENTION_ACK_BITS \
+ 0x130108UL
+#define IGU_REG_ATTN_MSG_ADDR_H \
+ 0x13011cUL
+#define IGU_REG_ATTN_MSG_ADDR_L \
+ 0x130120UL
+#define IGU_REG_BLOCK_CONFIGURATION \
+ 0x130000UL
+#define IGU_REG_COMMAND_REG_32LSB_DATA \
+ 0x130124UL
+#define IGU_REG_COMMAND_REG_CTRL \
+ 0x13012cUL
+#define IGU_REG_CSTORM_TYPE_0_SB_CLEANUP \
+ 0x130200UL
+#define IGU_REG_IGU_PRTY_MASK \
+ 0x1300a8UL
+#define IGU_REG_IGU_PRTY_STS_CLR \
+ 0x1300a0UL
+#define IGU_REG_LEADING_EDGE_LATCH \
+ 0x130134UL
+#define IGU_REG_MAPPING_MEMORY \
+ 0x131000UL
+#define IGU_REG_MAPPING_MEMORY_SIZE \
+ 136
+#define IGU_REG_PBA_STATUS_LSB \
+ 0x130138UL
+#define IGU_REG_PBA_STATUS_MSB \
+ 0x13013cUL
+#define IGU_REG_PCI_PF_MSIX_EN \
+ 0x130144UL
+#define IGU_REG_PCI_PF_MSIX_FUNC_MASK \
+ 0x130148UL
+#define IGU_REG_PCI_PF_MSI_EN \
+ 0x130140UL
+#define IGU_REG_PENDING_BITS_STATUS \
+ 0x130300UL
+#define IGU_REG_PF_CONFIGURATION \
+ 0x130154UL
+#define IGU_REG_PROD_CONS_MEMORY \
+ 0x132000UL
+#define IGU_REG_RESET_MEMORIES \
+ 0x130158UL
+#define IGU_REG_SB_INT_BEFORE_MASK_LSB \
+ 0x13015cUL
+#define IGU_REG_SB_INT_BEFORE_MASK_MSB \
+ 0x130160UL
+#define IGU_REG_SB_MASK_LSB \
+ 0x130164UL
+#define IGU_REG_SB_MASK_MSB \
+ 0x130168UL
+#define IGU_REG_STATISTIC_NUM_MESSAGE_SENT \
+ 0x130800UL
+#define IGU_REG_TRAILING_EDGE_LATCH \
+ 0x130104UL
+#define IGU_REG_VF_CONFIGURATION \
+ 0x130170UL
+#define MCP_REG_MCPR_ACCESS_LOCK \
+ 0x8009c
+#define MCP_REG_MCPR_GP_INPUTS \
+ 0x800c0
+#define MCP_REG_MCPR_GP_OENABLE \
+ 0x800c8
+#define MCP_REG_MCPR_GP_OUTPUTS \
+ 0x800c4
+#define MCP_REG_MCPR_IMC_COMMAND \
+ 0x85900
+#define MCP_REG_MCPR_IMC_DATAREG0 \
+ 0x85920
+#define MCP_REG_MCPR_IMC_SLAVE_CONTROL \
+ 0x85904
+#define MCP_REG_MCPR_NVM_ACCESS_ENABLE \
+ 0x86424
+#define MCP_REG_MCPR_NVM_ADDR \
+ 0x8640c
+#define MCP_REG_MCPR_NVM_CFG4 \
+ 0x8642c
+#define MCP_REG_MCPR_NVM_COMMAND \
+ 0x86400
+#define MCP_REG_MCPR_NVM_READ \
+ 0x86410
+#define MCP_REG_MCPR_NVM_SW_ARB \
+ 0x86420
+#define MCP_REG_MCPR_NVM_WRITE \
+ 0x86408
+#define MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK \
+ (0x1<<1)
+#define MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK \
+ (0x1<<0)
+#define MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 \
+ 0xa42cUL
+#define MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 \
+ 0xa438UL
+#define MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 \
+ 0xa444UL
+#define MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 \
+ 0xa450UL
+#define MISC_REG_AEU_AFTER_INVERT_4_MCP \
+ 0xa458UL
+#define MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 \
+ 0xa700UL
+#define MISC_REG_AEU_CLR_LATCH_SIGNAL \
+ 0xa45cUL
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0 \
+ 0xa06cUL
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1 \
+ 0xa07cUL
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2 \
+ 0xa08cUL
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 \
+ 0xa10cUL
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 \
+ 0xa11cUL
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 \
+ 0xa12cUL
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 \
+ 0xa078UL
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0 \
+ 0xa118UL
+#define MISC_REG_AEU_ENABLE4_NIG_0 \
+ 0xa0f8UL
+#define MISC_REG_AEU_ENABLE4_NIG_1 \
+ 0xa198UL
+#define MISC_REG_AEU_ENABLE4_PXP_0 \
+ 0xa108UL
+#define MISC_REG_AEU_ENABLE4_PXP_1 \
+ 0xa1a8UL
+#define MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0 \
+ 0xa688UL
+#define MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 \
+ 0xa6b0UL
+#define MISC_REG_AEU_GENERAL_ATTN_0 \
+ 0xa000UL
+#define MISC_REG_AEU_GENERAL_ATTN_1 \
+ 0xa004UL
+#define MISC_REG_AEU_GENERAL_ATTN_10 \
+ 0xa028UL
+#define MISC_REG_AEU_GENERAL_ATTN_11 \
+ 0xa02cUL
+#define MISC_REG_AEU_GENERAL_ATTN_12 \
+ 0xa030UL
+#define MISC_REG_AEU_GENERAL_ATTN_2 \
+ 0xa008UL
+#define MISC_REG_AEU_GENERAL_ATTN_3 \
+ 0xa00cUL
+#define MISC_REG_AEU_GENERAL_ATTN_4 \
+ 0xa010UL
+#define MISC_REG_AEU_GENERAL_ATTN_5 \
+ 0xa014UL
+#define MISC_REG_AEU_GENERAL_ATTN_6 \
+ 0xa018UL
+#define MISC_REG_AEU_GENERAL_ATTN_7 \
+ 0xa01cUL
+#define MISC_REG_AEU_GENERAL_ATTN_8 \
+ 0xa020UL
+#define MISC_REG_AEU_GENERAL_ATTN_9 \
+ 0xa024UL
+#define MISC_REG_AEU_GENERAL_MASK \
+ 0xa61cUL
+#define MISC_REG_AEU_MASK_ATTN_FUNC_0 \
+ 0xa060UL
+#define MISC_REG_AEU_MASK_ATTN_FUNC_1 \
+ 0xa064UL
+#define MISC_REG_BOND_ID \
+ 0xa400UL
+#define MISC_REG_CHIP_NUM \
+ 0xa408UL
+#define MISC_REG_CHIP_REV \
+ 0xa40cUL
+#define MISC_REG_CHIP_TYPE \
+ 0xac60UL
+#define MISC_REG_CHIP_TYPE_57811_MASK \
+ (1<<1)
+#define MISC_REG_CPMU_LP_DR_ENABLE \
+ 0xa858UL
+#define MISC_REG_CPMU_LP_FW_ENABLE_P0 \
+ 0xa84cUL
+#define MISC_REG_CPMU_LP_IDLE_THR_P0 \
+ 0xa8a0UL
+#define MISC_REG_CPMU_LP_MASK_ENT_P0 \
+ 0xa880UL
+#define MISC_REG_CPMU_LP_MASK_EXT_P0 \
+ 0xa888UL
+#define MISC_REG_CPMU_LP_SM_ENT_CNT_P0 \
+ 0xa8b8UL
+#define MISC_REG_CPMU_LP_SM_ENT_CNT_P1 \
+ 0xa8bcUL
+#define MISC_REG_DRIVER_CONTROL_1 \
+ 0xa510UL
+#define MISC_REG_DRIVER_CONTROL_7 \
+ 0xa3c8UL
+#define MISC_REG_FOUR_PORT_PATH_SWAP \
+ 0xa75cUL
+#define MISC_REG_FOUR_PORT_PATH_SWAP_OVWR \
+ 0xa738UL
+#define MISC_REG_FOUR_PORT_PORT_SWAP \
+ 0xa754UL
+#define MISC_REG_FOUR_PORT_PORT_SWAP_OVWR \
+ 0xa734UL
+#define MISC_REG_GENERIC_CR_0 \
+ 0xa460UL
+#define MISC_REG_GENERIC_CR_1 \
+ 0xa464UL
+#define MISC_REG_GENERIC_POR_1 \
+ 0xa474UL
+#define MISC_REG_GEN_PURP_HWG \
+ 0xa9a0UL
+#define MISC_REG_GPIO \
+ 0xa490UL
+#define MISC_REG_GPIO_EVENT_EN \
+ 0xa2bcUL
+#define MISC_REG_GPIO_INT \
+ 0xa494UL
+#define MISC_REG_GRC_RSV_ATTN \
+ 0xa3c0UL
+#define MISC_REG_GRC_TIMEOUT_ATTN \
+ 0xa3c4UL
+#define MISC_REG_LCPLL_E40_PWRDWN \
+ 0xaa74UL
+#define MISC_REG_LCPLL_E40_RESETB_ANA \
+ 0xaa78UL
+#define MISC_REG_LCPLL_E40_RESETB_DIG \
+ 0xaa7cUL
+#define MISC_REG_MISC_INT_MASK \
+ 0xa388UL
+#define MISC_REG_MISC_PRTY_MASK \
+ 0xa398UL
+#define MISC_REG_MISC_PRTY_STS_CLR \
+ 0xa390UL
+#define MISC_REG_PORT4MODE_EN \
+ 0xa750UL
+#define MISC_REG_PORT4MODE_EN_OVWR \
+ 0xa720UL
+#define MISC_REG_RESET_REG_1 \
+ 0xa580UL
+#define MISC_REG_RESET_REG_2 \
+ 0xa590UL
+#define MISC_REG_SHARED_MEM_ADDR \
+ 0xa2b4UL
+#define MISC_REG_SPIO \
+ 0xa4fcUL
+#define MISC_REG_SPIO_EVENT_EN \
+ 0xa2b8UL
+#define MISC_REG_SPIO_INT \
+ 0xa500UL
+#define MISC_REG_TWO_PORT_PATH_SWAP \
+ 0xa758UL
+#define MISC_REG_TWO_PORT_PATH_SWAP_OVWR \
+ 0xa72cUL
+#define MISC_REG_UNPREPARED \
+ 0xa424UL
+#define MISC_REG_WC0_CTRL_PHY_ADDR \
+ 0xa9ccUL
+#define MISC_REG_WC0_RESET \
+ 0xac30UL
+#define MISC_REG_XMAC_CORE_PORT_MODE \
+ 0xa964UL
+#define MISC_REG_XMAC_PHY_PORT_MODE \
+ 0xa960UL
+#define MSTAT_REG_RX_STAT_GR64_LO \
+ 0x200UL
+#define MSTAT_REG_TX_STAT_GTXPOK_LO \
+ 0UL
+#define NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN \
+ (0x1<<0)
+#define NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN \
+ (0x1<<0)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT \
+ (0x1<<0)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS \
+ (0x1<<9)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G \
+ (0x1<<15)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS \
+ (0xf<<18)
+#define NIG_REG_BMAC0_IN_EN \
+ 0x100acUL
+#define NIG_REG_BMAC0_OUT_EN \
+ 0x100e0UL
+#define NIG_REG_BMAC0_PAUSE_OUT_EN \
+ 0x10110UL
+#define NIG_REG_BMAC0_REGS_OUT_EN \
+ 0x100e8UL
+#define NIG_REG_BRB0_PAUSE_IN_EN \
+ 0x100c4UL
+#define NIG_REG_BRB1_PAUSE_IN_EN \
+ 0x100c8UL
+#define NIG_REG_DEBUG_PACKET_LB \
+ 0x10800UL
+#define NIG_REG_EGRESS_DRAIN0_MODE \
+ 0x10060UL
+#define NIG_REG_EGRESS_EMAC0_OUT_EN \
+ 0x10120UL
+#define NIG_REG_EGRESS_EMAC0_PORT \
+ 0x10058UL
+#define NIG_REG_EMAC0_IN_EN \
+ 0x100a4UL
+#define NIG_REG_EMAC0_PAUSE_OUT_EN \
+ 0x10118UL
+#define NIG_REG_EMAC0_STATUS_MISC_MI_INT \
+ 0x10494UL
+#define NIG_REG_INGRESS_BMAC0_MEM \
+ 0x10c00UL
+#define NIG_REG_INGRESS_BMAC1_MEM \
+ 0x11000UL
+#define NIG_REG_INGRESS_EOP_LB_EMPTY \
+ 0x104e0UL
+#define NIG_REG_INGRESS_EOP_LB_FIFO \
+ 0x104e4UL
+#define NIG_REG_LATCH_BC_0 \
+ 0x16210UL
+#define NIG_REG_LATCH_STATUS_0 \
+ 0x18000UL
+#define NIG_REG_LED_10G_P0 \
+ 0x10320UL
+#define NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 \
+ 0x10318UL
+#define NIG_REG_LED_CONTROL_BLINK_RATE_P0 \
+ 0x10310UL
+#define NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 \
+ 0x10308UL
+#define NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 \
+ 0x102f8UL
+#define NIG_REG_LED_CONTROL_TRAFFIC_P0 \
+ 0x10300UL
+#define NIG_REG_LED_MODE_P0 \
+ 0x102f0UL
+#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 \
+ 0x16070UL
+#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 \
+ 0x16074UL
+#define NIG_REG_LLFC_ENABLE_0 \
+ 0x16208UL
+#define NIG_REG_LLFC_ENABLE_1 \
+ 0x1620cUL
+#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0 \
+ 0x16058UL
+#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 \
+ 0x1605cUL
+#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0 \
+ 0x16060UL
+#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 \
+ 0x16064UL
+#define NIG_REG_LLFC_OUT_EN_0 \
+ 0x160c8UL
+#define NIG_REG_LLFC_OUT_EN_1 \
+ 0x160ccUL
+#define NIG_REG_LLH0_BRB1_DRV_MASK \
+ 0x10244UL
+#define NIG_REG_LLH0_BRB1_DRV_MASK_MF \
+ 0x16048UL
+#define NIG_REG_LLH0_BRB1_NOT_MCP \
+ 0x1025cUL
+#define NIG_REG_LLH0_CLS_TYPE \
+ 0x16080UL
+#define NIG_REG_LLH0_FUNC_EN \
+ 0x160fcUL
+#define NIG_REG_LLH0_FUNC_MEM \
+ 0x16180UL
+#define NIG_REG_LLH0_FUNC_MEM_ENABLE \
+ 0x16140UL
+#define NIG_REG_LLH0_FUNC_VLAN_ID \
+ 0x16100UL
+#define NIG_REG_LLH0_XCM_MASK \
+ 0x10130UL
+#define NIG_REG_LLH1_BRB1_NOT_MCP \
+ 0x102dcUL
+#define NIG_REG_LLH1_CLS_TYPE \
+ 0x16084UL
+#define NIG_REG_LLH1_FUNC_MEM \
+ 0x161c0UL
+#define NIG_REG_LLH1_FUNC_MEM_ENABLE \
+ 0x16160UL
+#define NIG_REG_LLH1_FUNC_MEM_SIZE \
+ 16
+#define NIG_REG_LLH1_MF_MODE \
+ 0x18614UL
+#define NIG_REG_LLH1_XCM_MASK \
+ 0x10134UL
+#define NIG_REG_LLH_E1HOV_MODE \
+ 0x160d8UL
+#define NIG_REG_LLH_MF_MODE \
+ 0x16024UL
+#define NIG_REG_MASK_INTERRUPT_PORT0 \
+ 0x10330UL
+#define NIG_REG_MASK_INTERRUPT_PORT1 \
+ 0x10334UL
+#define NIG_REG_NIG_EMAC0_EN \
+ 0x1003cUL
+#define NIG_REG_NIG_INGRESS_EMAC0_NO_CRC \
+ 0x10044UL
+#define NIG_REG_NIG_INT_STS_CLR_0 \
+ 0x103b4UL
+#define NIG_REG_NIG_PRTY_MASK \
+ 0x103dcUL
+#define NIG_REG_NIG_PRTY_MASK_0 \
+ 0x183c8UL
+#define NIG_REG_NIG_PRTY_MASK_1 \
+ 0x183d8UL
+#define NIG_REG_NIG_PRTY_STS_CLR \
+ 0x103d4UL
+#define NIG_REG_NIG_PRTY_STS_CLR_0 \
+ 0x183c0UL
+#define NIG_REG_NIG_PRTY_STS_CLR_1 \
+ 0x183d0UL
+#define NIG_REG_P0_HDRS_AFTER_BASIC \
+ 0x18038UL
+#define NIG_REG_P0_HWPFC_ENABLE \
+ 0x18078UL
+#define NIG_REG_P0_LLH_FUNC_MEM2 \
+ 0x18480UL
+#define NIG_REG_P0_MAC_IN_EN \
+ 0x185acUL
+#define NIG_REG_P0_MAC_OUT_EN \
+ 0x185b0UL
+#define NIG_REG_P0_MAC_PAUSE_OUT_EN \
+ 0x185b4UL
+#define NIG_REG_P0_PKT_PRIORITY_TO_COS \
+ 0x18054UL
+#define NIG_REG_P0_RX_COS0_PRIORITY_MASK \
+ 0x18058UL
+#define NIG_REG_P0_RX_COS1_PRIORITY_MASK \
+ 0x1805cUL
+#define NIG_REG_P0_RX_COS2_PRIORITY_MASK \
+ 0x186b0UL
+#define NIG_REG_P0_RX_COS3_PRIORITY_MASK \
+ 0x186b4UL
+#define NIG_REG_P0_RX_COS4_PRIORITY_MASK \
+ 0x186b8UL
+#define NIG_REG_P0_RX_COS5_PRIORITY_MASK \
+ 0x186bcUL
+#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP \
+ 0x180f0UL
+#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB \
+ 0x18688UL
+#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB \
+ 0x1868cUL
+#define NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT \
+ 0x180e8UL
+#define NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ \
+ 0x180ecUL
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0 \
+ 0x1810cUL
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1 \
+ 0x18110UL
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2 \
+ 0x18114UL
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3 \
+ 0x18118UL
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4 \
+ 0x1811cUL
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5 \
+ 0x186a0UL
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6 \
+ 0x186a4UL
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7 \
+ 0x186a8UL
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8 \
+ 0x186acUL
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0 \
+ 0x180f8UL
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1 \
+ 0x180fcUL
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2 \
+ 0x18100UL
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3 \
+ 0x18104UL
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4 \
+ 0x18108UL
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5 \
+ 0x18690UL
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6 \
+ 0x18694UL
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7 \
+ 0x18698UL
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8 \
+ 0x1869cUL
+#define NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS \
+ 0x180f4UL
+#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT \
+ 0x180e4UL
+#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB \
+ 0x18680UL
+#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB \
+ 0x18684UL
+#define NIG_REG_P1_HDRS_AFTER_BASIC \
+ 0x1818cUL
+#define NIG_REG_P1_HWPFC_ENABLE \
+ 0x181d0UL
+#define NIG_REG_P1_LLH_FUNC_MEM2 \
+ 0x184c0UL
+#define NIG_REG_P1_MAC_IN_EN \
+ 0x185c0UL
+#define NIG_REG_P1_MAC_OUT_EN \
+ 0x185c4UL
+#define NIG_REG_P1_MAC_PAUSE_OUT_EN \
+ 0x185c8UL
+#define NIG_REG_P1_PKT_PRIORITY_TO_COS \
+ 0x181a8UL
+#define NIG_REG_P1_RX_COS0_PRIORITY_MASK \
+ 0x181acUL
+#define NIG_REG_P1_RX_COS1_PRIORITY_MASK \
+ 0x181b0UL
+#define NIG_REG_P1_RX_COS2_PRIORITY_MASK \
+ 0x186f8UL
+#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB \
+ 0x186e8UL
+#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB \
+ 0x186ecUL
+#define NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT \
+ 0x18234UL
+#define NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ \
+ 0x18238UL
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 \
+ 0x18258UL
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 \
+ 0x1825cUL
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 \
+ 0x18260UL
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 \
+ 0x18264UL
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 \
+ 0x18268UL
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 \
+ 0x186f4UL
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 \
+ 0x18244UL
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 \
+ 0x18248UL
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 \
+ 0x1824cUL
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 \
+ 0x18250UL
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 \
+ 0x18254UL
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 \
+ 0x186f0UL
+#define NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS \
+ 0x18240UL
+#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB \
+ 0x186e0UL
+#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB \
+ 0x186e4UL
+#define NIG_REG_PAUSE_ENABLE_0 \
+ 0x160c0UL
+#define NIG_REG_PAUSE_ENABLE_1 \
+ 0x160c4UL
+#define NIG_REG_PORT_SWAP \
+ 0x10394UL
+#define NIG_REG_PPP_ENABLE_0 \
+ 0x160b0UL
+#define NIG_REG_PPP_ENABLE_1 \
+ 0x160b4UL
+#define NIG_REG_PRS_REQ_IN_EN \
+ 0x100b8UL
+#define NIG_REG_SERDES0_CTRL_MD_DEVAD \
+ 0x10370UL
+#define NIG_REG_SERDES0_CTRL_MD_ST \
+ 0x1036cUL
+#define NIG_REG_SERDES0_CTRL_PHY_ADDR \
+ 0x10374UL
+#define NIG_REG_SERDES0_STATUS_LINK_STATUS \
+ 0x10578UL
+#define NIG_REG_STAT0_BRB_DISCARD \
+ 0x105f0UL
+#define NIG_REG_STAT0_BRB_TRUNCATE \
+ 0x105f8UL
+#define NIG_REG_STAT0_EGRESS_MAC_PKT0 \
+ 0x10750UL
+#define NIG_REG_STAT0_EGRESS_MAC_PKT1 \
+ 0x10760UL
+#define NIG_REG_STAT1_BRB_DISCARD \
+ 0x10628UL
+#define NIG_REG_STAT1_EGRESS_MAC_PKT0 \
+ 0x107a0UL
+#define NIG_REG_STAT1_EGRESS_MAC_PKT1 \
+ 0x107b0UL
+#define NIG_REG_STAT2_BRB_OCTET \
+ 0x107e0UL
+#define NIG_REG_STATUS_INTERRUPT_PORT0 \
+ 0x10328UL
+#define NIG_REG_STRAP_OVERRIDE \
+ 0x10398UL
+#define NIG_REG_XCM0_OUT_EN \
+ 0x100f0UL
+#define NIG_REG_XCM1_OUT_EN \
+ 0x100f4UL
+#define NIG_REG_XGXS0_CTRL_MD_DEVAD \
+ 0x1033cUL
+#define NIG_REG_XGXS0_CTRL_MD_ST \
+ 0x10338UL
+#define NIG_REG_XGXS0_CTRL_PHY_ADDR \
+ 0x10340UL
+#define NIG_REG_XGXS0_STATUS_LINK10G \
+ 0x10680UL
+#define NIG_REG_XGXS0_STATUS_LINK_STATUS \
+ 0x10684UL
+#define NIG_REG_XGXS_LANE_SEL_P0 \
+ 0x102e8UL
+#define NIG_REG_XGXS_SERDES0_MODE_SEL \
+ 0x102e0UL
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT \
+ (0x1<<0)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS \
+ (0x1<<9)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G \
+ (0x1<<15)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS \
+ (0xf<<18)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE \
+ 18
+#define PBF_REG_COS0_UPPER_BOUND \
+ 0x15c05cUL
+#define PBF_REG_COS0_UPPER_BOUND_P0 \
+ 0x15c2ccUL
+#define PBF_REG_COS0_UPPER_BOUND_P1 \
+ 0x15c2e4UL
+#define PBF_REG_COS0_WEIGHT \
+ 0x15c054UL
+#define PBF_REG_COS0_WEIGHT_P0 \
+ 0x15c2a8UL
+#define PBF_REG_COS0_WEIGHT_P1 \
+ 0x15c2c0UL
+#define PBF_REG_COS1_UPPER_BOUND \
+ 0x15c060UL
+#define PBF_REG_COS1_WEIGHT \
+ 0x15c058UL
+#define PBF_REG_COS1_WEIGHT_P0 \
+ 0x15c2acUL
+#define PBF_REG_COS1_WEIGHT_P1 \
+ 0x15c2c4UL
+#define PBF_REG_COS2_WEIGHT_P0 \
+ 0x15c2b0UL
+#define PBF_REG_COS2_WEIGHT_P1 \
+ 0x15c2c8UL
+#define PBF_REG_COS3_WEIGHT_P0 \
+ 0x15c2b4UL
+#define PBF_REG_COS4_WEIGHT_P0 \
+ 0x15c2b8UL
+#define PBF_REG_COS5_WEIGHT_P0 \
+ 0x15c2bcUL
+#define PBF_REG_CREDIT_LB_Q \
+ 0x140338UL
+#define PBF_REG_CREDIT_Q0 \
+ 0x14033cUL
+#define PBF_REG_CREDIT_Q1 \
+ 0x140340UL
+#define PBF_REG_DISABLE_NEW_TASK_PROC_P0 \
+ 0x14005cUL
+#define PBF_REG_DISABLE_PF \
+ 0x1402e8UL
+#define PBF_REG_DISABLE_VF \
+ 0x1402ecUL
+#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0 \
+ 0x15c288UL
+#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1 \
+ 0x15c28cUL
+#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 \
+ 0x15c278UL
+#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 \
+ 0x15c27cUL
+#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 \
+ 0x15c280UL
+#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 \
+ 0x15c284UL
+#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 \
+ 0x15c2a0UL
+#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 \
+ 0x15c2a4UL
+#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 \
+ 0x15c270UL
+#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 \
+ 0x15c274UL
+#define PBF_REG_ETS_ENABLED \
+ 0x15c050UL
+#define PBF_REG_HDRS_AFTER_BASIC \
+ 0x15c0a8UL
+#define PBF_REG_HDRS_AFTER_TAG_0 \
+ 0x15c0b8UL
+#define PBF_REG_HIGH_PRIORITY_COS_NUM \
+ 0x15c04cUL
+#define PBF_REG_INIT_CRD_LB_Q \
+ 0x15c248UL
+#define PBF_REG_INIT_CRD_Q0 \
+ 0x15c230UL
+#define PBF_REG_INIT_CRD_Q1 \
+ 0x15c234UL
+#define PBF_REG_INIT_P0 \
+ 0x140004UL
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q \
+ 0x140354UL
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 \
+ 0x140358UL
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 \
+ 0x14035cUL
+#define PBF_REG_MUST_HAVE_HDRS \
+ 0x15c0c4UL
+#define PBF_REG_NUM_STRICT_ARB_SLOTS \
+ 0x15c064UL
+#define PBF_REG_P0_ARB_THRSH \
+ 0x1400e4UL
+#define PBF_REG_P0_CREDIT \
+ 0x140200UL
+#define PBF_REG_P0_INIT_CRD \
+ 0x1400d0UL
+#define PBF_REG_P0_INTERNAL_CRD_FREED_CNT \
+ 0x140308UL
+#define PBF_REG_P0_PAUSE_ENABLE \
+ 0x140014UL
+#define PBF_REG_P0_TQ_LINES_FREED_CNT \
+ 0x1402f0UL
+#define PBF_REG_P0_TQ_OCCUPANCY \
+ 0x1402fcUL
+#define PBF_REG_P1_CREDIT \
+ 0x140208UL
+#define PBF_REG_P1_INIT_CRD \
+ 0x1400d4UL
+#define PBF_REG_P1_INTERNAL_CRD_FREED_CNT \
+ 0x14030cUL
+#define PBF_REG_P1_TQ_LINES_FREED_CNT \
+ 0x1402f4UL
+#define PBF_REG_P1_TQ_OCCUPANCY \
+ 0x140300UL
+#define PBF_REG_P4_CREDIT \
+ 0x140210UL
+#define PBF_REG_P4_INIT_CRD \
+ 0x1400e0UL
+#define PBF_REG_P4_INTERNAL_CRD_FREED_CNT \
+ 0x140310UL
+#define PBF_REG_P4_TQ_LINES_FREED_CNT \
+ 0x1402f8UL
+#define PBF_REG_P4_TQ_OCCUPANCY \
+ 0x140304UL
+#define PBF_REG_PBF_INT_MASK \
+ 0x1401d4UL
+#define PBF_REG_PBF_PRTY_MASK \
+ 0x1401e4UL
+#define PBF_REG_PBF_PRTY_STS_CLR \
+ 0x1401dcUL
+#define PBF_REG_TAG_ETHERTYPE_0 \
+ 0x15c090UL
+#define PBF_REG_TAG_LEN_0 \
+ 0x15c09cUL
+#define PBF_REG_TQ_LINES_FREED_CNT_LB_Q \
+ 0x14038cUL
+#define PBF_REG_TQ_LINES_FREED_CNT_Q0 \
+ 0x140390UL
+#define PBF_REG_TQ_LINES_FREED_CNT_Q1 \
+ 0x140394UL
+#define PBF_REG_TQ_OCCUPANCY_LB_Q \
+ 0x1403a8UL
+#define PBF_REG_TQ_OCCUPANCY_Q0 \
+ 0x1403acUL
+#define PBF_REG_TQ_OCCUPANCY_Q1 \
+ 0x1403b0UL
+#define PB_REG_PB_INT_MASK \
+ 0x28UL
+#define PB_REG_PB_PRTY_MASK \
+ 0x38UL
+#define PB_REG_PB_PRTY_STS_CLR \
+ 0x30UL
+#define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR \
+ (0x1<<0)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW \
+ (0x1<<8)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR \
+ (0x1<<1)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN \
+ (0x1<<6)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN \
+ (0x1<<7)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN \
+ (0x1<<4)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN \
+ (0x1<<3)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN \
+ (0x1<<5)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN \
+ (0x1<<2)
+#define PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR \
+ 0x9418UL
+#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR \
+ 0x9478UL
+#define PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR \
+ 0x947cUL
+#define PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR \
+ 0x9480UL
+#define PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR \
+ 0x9474UL
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER \
+ 0x942cUL
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ \
+ 0x9430UL
+#define PGLUE_B_REG_INTERNAL_VFID_ENABLE \
+ 0x9438UL
+#define PGLUE_B_REG_PGLUE_B_INT_STS \
+ 0x9298UL
+#define PGLUE_B_REG_PGLUE_B_INT_STS_CLR \
+ 0x929cUL
+#define PGLUE_B_REG_PGLUE_B_PRTY_MASK \
+ 0x92b4UL
+#define PGLUE_B_REG_PGLUE_B_PRTY_STS_CLR \
+ 0x92acUL
+#define PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR \
+ 0x9458UL
+#define PGLUE_B_REG_TAGS_63_32 \
+ 0x9244UL
+#define PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR \
+ 0x9470UL
+#define PRS_REG_A_PRSU_20 \
+ 0x40134UL
+#define PRS_REG_CFC_SEARCH_INITIAL_CREDIT \
+ 0x4011cUL
+#define PRS_REG_E1HOV_MODE \
+ 0x401c8UL
+#define PRS_REG_HDRS_AFTER_BASIC \
+ 0x40238UL
+#define PRS_REG_HDRS_AFTER_BASIC_PORT_0 \
+ 0x40270UL
+#define PRS_REG_HDRS_AFTER_BASIC_PORT_1 \
+ 0x40290UL
+#define PRS_REG_HDRS_AFTER_TAG_0 \
+ 0x40248UL
+#define PRS_REG_HDRS_AFTER_TAG_0_PORT_0 \
+ 0x40280UL
+#define PRS_REG_HDRS_AFTER_TAG_0_PORT_1 \
+ 0x402a0UL
+#define PRS_REG_MUST_HAVE_HDRS \
+ 0x40254UL
+#define PRS_REG_MUST_HAVE_HDRS_PORT_0 \
+ 0x4028cUL
+#define PRS_REG_MUST_HAVE_HDRS_PORT_1 \
+ 0x402acUL
+#define PRS_REG_NIC_MODE \
+ 0x40138UL
+#define PRS_REG_NUM_OF_PACKETS \
+ 0x40124UL
+#define PRS_REG_PRS_PRTY_MASK \
+ 0x401a4UL
+#define PRS_REG_PRS_PRTY_STS_CLR \
+ 0x4019cUL
+#define PRS_REG_TAG_ETHERTYPE_0 \
+ 0x401d4UL
+#define PRS_REG_TAG_LEN_0 \
+ 0x4022cUL
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT \
+ (0x1<<19)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF \
+ (0x1<<20)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN \
+ (0x1<<22)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED \
+ (0x1<<23)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED \
+ (0x1<<24)
+#define PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR \
+ (0x1<<7)
+#define PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR \
+ (0x1<<7)
+#define PXP2_REG_PGL_ADDR_88_F0 \
+ 0x120534UL
+#define PXP2_REG_PGL_ADDR_88_F1 \
+ 0x120544UL
+#define PXP2_REG_PGL_ADDR_8C_F0 \
+ 0x120538UL
+#define PXP2_REG_PGL_ADDR_8C_F1 \
+ 0x120548UL
+#define PXP2_REG_PGL_ADDR_90_F0 \
+ 0x12053cUL
+#define PXP2_REG_PGL_ADDR_90_F1 \
+ 0x12054cUL
+#define PXP2_REG_PGL_ADDR_94_F0 \
+ 0x120540UL
+#define PXP2_REG_PGL_ADDR_94_F1 \
+ 0x120550UL
+#define PXP2_REG_PGL_EXP_ROM2 \
+ 0x120808UL
+#define PXP2_REG_PGL_PRETEND_FUNC_F0 \
+ 0x120674UL
+#define PXP2_REG_PGL_PRETEND_FUNC_F1 \
+ 0x120678UL
+#define PXP2_REG_PGL_TAGS_LIMIT \
+ 0x1205a8UL
+#define PXP2_REG_PSWRQ_BW_ADD1 \
+ 0x1201c0UL
+#define PXP2_REG_PSWRQ_BW_ADD10 \
+ 0x1201e4UL
+#define PXP2_REG_PSWRQ_BW_ADD11 \
+ 0x1201e8UL
+#define PXP2_REG_PSWRQ_BW_ADD2 \
+ 0x1201c4UL
+#define PXP2_REG_PSWRQ_BW_ADD28 \
+ 0x120228UL
+#define PXP2_REG_PSWRQ_BW_ADD3 \
+ 0x1201c8UL
+#define PXP2_REG_PSWRQ_BW_ADD6 \
+ 0x1201d4UL
+#define PXP2_REG_PSWRQ_BW_ADD7 \
+ 0x1201d8UL
+#define PXP2_REG_PSWRQ_BW_ADD8 \
+ 0x1201dcUL
+#define PXP2_REG_PSWRQ_BW_ADD9 \
+ 0x1201e0UL
+#define PXP2_REG_PSWRQ_BW_L1 \
+ 0x1202b0UL
+#define PXP2_REG_PSWRQ_BW_L10 \
+ 0x1202d4UL
+#define PXP2_REG_PSWRQ_BW_L11 \
+ 0x1202d8UL
+#define PXP2_REG_PSWRQ_BW_L2 \
+ 0x1202b4UL
+#define PXP2_REG_PSWRQ_BW_L28 \
+ 0x120318UL
+#define PXP2_REG_PSWRQ_BW_L3 \
+ 0x1202b8UL
+#define PXP2_REG_PSWRQ_BW_L6 \
+ 0x1202c4UL
+#define PXP2_REG_PSWRQ_BW_L7 \
+ 0x1202c8UL
+#define PXP2_REG_PSWRQ_BW_L8 \
+ 0x1202ccUL
+#define PXP2_REG_PSWRQ_BW_L9 \
+ 0x1202d0UL
+#define PXP2_REG_PSWRQ_BW_RD \
+ 0x120324UL
+#define PXP2_REG_PSWRQ_BW_UB1 \
+ 0x120238UL
+#define PXP2_REG_PSWRQ_BW_UB10 \
+ 0x12025cUL
+#define PXP2_REG_PSWRQ_BW_UB11 \
+ 0x120260UL
+#define PXP2_REG_PSWRQ_BW_UB2 \
+ 0x12023cUL
+#define PXP2_REG_PSWRQ_BW_UB28 \
+ 0x1202a0UL
+#define PXP2_REG_PSWRQ_BW_UB3 \
+ 0x120240UL
+#define PXP2_REG_PSWRQ_BW_UB6 \
+ 0x12024cUL
+#define PXP2_REG_PSWRQ_BW_UB7 \
+ 0x120250UL
+#define PXP2_REG_PSWRQ_BW_UB8 \
+ 0x120254UL
+#define PXP2_REG_PSWRQ_BW_UB9 \
+ 0x120258UL
+#define PXP2_REG_PSWRQ_BW_WR \
+ 0x120328UL
+#define PXP2_REG_PSWRQ_CDU0_L2P \
+ 0x120000UL
+#define PXP2_REG_PSWRQ_QM0_L2P \
+ 0x120038UL
+#define PXP2_REG_PSWRQ_SRC0_L2P \
+ 0x120054UL
+#define PXP2_REG_PSWRQ_TM0_L2P \
+ 0x12001cUL
+#define PXP2_REG_PXP2_INT_MASK_0 \
+ 0x120578UL
+#define PXP2_REG_PXP2_INT_MASK_1 \
+ 0x120614UL
+#define PXP2_REG_PXP2_INT_STS_0 \
+ 0x12056cUL
+#define PXP2_REG_PXP2_INT_STS_1 \
+ 0x120608UL
+#define PXP2_REG_PXP2_INT_STS_CLR_0 \
+ 0x120570UL
+#define PXP2_REG_PXP2_PRTY_MASK_0 \
+ 0x120588UL
+#define PXP2_REG_PXP2_PRTY_MASK_1 \
+ 0x120598UL
+#define PXP2_REG_PXP2_PRTY_STS_CLR_0 \
+ 0x120580UL
+#define PXP2_REG_PXP2_PRTY_STS_CLR_1 \
+ 0x120590UL
+#define PXP2_REG_RD_BLK_CNT \
+ 0x120418UL
+#define PXP2_REG_RD_CDURD_SWAP_MODE \
+ 0x120404UL
+#define PXP2_REG_RD_DISABLE_INPUTS \
+ 0x120374UL
+#define PXP2_REG_RD_INIT_DONE \
+ 0x120370UL
+#define PXP2_REG_RD_PBF_SWAP_MODE \
+ 0x1203f4UL
+#define PXP2_REG_RD_PORT_IS_IDLE_0 \
+ 0x12041cUL
+#define PXP2_REG_RD_PORT_IS_IDLE_1 \
+ 0x120420UL
+#define PXP2_REG_RD_QM_SWAP_MODE \
+ 0x1203f8UL
+#define PXP2_REG_RD_SRC_SWAP_MODE \
+ 0x120400UL
+#define PXP2_REG_RD_SR_CNT \
+ 0x120414UL
+#define PXP2_REG_RD_START_INIT \
+ 0x12036cUL
+#define PXP2_REG_RD_TM_SWAP_MODE \
+ 0x1203fcUL
+#define PXP2_REG_RQ_BW_RD_ADD0 \
+ 0x1201bcUL
+#define PXP2_REG_RQ_BW_RD_ADD12 \
+ 0x1201ecUL
+#define PXP2_REG_RQ_BW_RD_ADD13 \
+ 0x1201f0UL
+#define PXP2_REG_RQ_BW_RD_ADD14 \
+ 0x1201f4UL
+#define PXP2_REG_RQ_BW_RD_ADD15 \
+ 0x1201f8UL
+#define PXP2_REG_RQ_BW_RD_ADD16 \
+ 0x1201fcUL
+#define PXP2_REG_RQ_BW_RD_ADD17 \
+ 0x120200UL
+#define PXP2_REG_RQ_BW_RD_ADD18 \
+ 0x120204UL
+#define PXP2_REG_RQ_BW_RD_ADD19 \
+ 0x120208UL
+#define PXP2_REG_RQ_BW_RD_ADD20 \
+ 0x12020cUL
+#define PXP2_REG_RQ_BW_RD_ADD22 \
+ 0x120210UL
+#define PXP2_REG_RQ_BW_RD_ADD23 \
+ 0x120214UL
+#define PXP2_REG_RQ_BW_RD_ADD24 \
+ 0x120218UL
+#define PXP2_REG_RQ_BW_RD_ADD25 \
+ 0x12021cUL
+#define PXP2_REG_RQ_BW_RD_ADD26 \
+ 0x120220UL
+#define PXP2_REG_RQ_BW_RD_ADD27 \
+ 0x120224UL
+#define PXP2_REG_RQ_BW_RD_ADD4 \
+ 0x1201ccUL
+#define PXP2_REG_RQ_BW_RD_ADD5 \
+ 0x1201d0UL
+#define PXP2_REG_RQ_BW_RD_L0 \
+ 0x1202acUL
+#define PXP2_REG_RQ_BW_RD_L12 \
+ 0x1202dcUL
+#define PXP2_REG_RQ_BW_RD_L13 \
+ 0x1202e0UL
+#define PXP2_REG_RQ_BW_RD_L14 \
+ 0x1202e4UL
+#define PXP2_REG_RQ_BW_RD_L15 \
+ 0x1202e8UL
+#define PXP2_REG_RQ_BW_RD_L16 \
+ 0x1202ecUL
+#define PXP2_REG_RQ_BW_RD_L17 \
+ 0x1202f0UL
+#define PXP2_REG_RQ_BW_RD_L18 \
+ 0x1202f4UL
+#define PXP2_REG_RQ_BW_RD_L19 \
+ 0x1202f8UL
+#define PXP2_REG_RQ_BW_RD_L20 \
+ 0x1202fcUL
+#define PXP2_REG_RQ_BW_RD_L22 \
+ 0x120300UL
+#define PXP2_REG_RQ_BW_RD_L23 \
+ 0x120304UL
+#define PXP2_REG_RQ_BW_RD_L24 \
+ 0x120308UL
+#define PXP2_REG_RQ_BW_RD_L25 \
+ 0x12030cUL
+#define PXP2_REG_RQ_BW_RD_L26 \
+ 0x120310UL
+#define PXP2_REG_RQ_BW_RD_L27 \
+ 0x120314UL
+#define PXP2_REG_RQ_BW_RD_L4 \
+ 0x1202bcUL
+#define PXP2_REG_RQ_BW_RD_L5 \
+ 0x1202c0UL
+#define PXP2_REG_RQ_BW_RD_UBOUND0 \
+ 0x120234UL
+#define PXP2_REG_RQ_BW_RD_UBOUND12 \
+ 0x120264UL
+#define PXP2_REG_RQ_BW_RD_UBOUND13 \
+ 0x120268UL
+#define PXP2_REG_RQ_BW_RD_UBOUND14 \
+ 0x12026cUL
+#define PXP2_REG_RQ_BW_RD_UBOUND15 \
+ 0x120270UL
+#define PXP2_REG_RQ_BW_RD_UBOUND16 \
+ 0x120274UL
+#define PXP2_REG_RQ_BW_RD_UBOUND17 \
+ 0x120278UL
+#define PXP2_REG_RQ_BW_RD_UBOUND18 \
+ 0x12027cUL
+#define PXP2_REG_RQ_BW_RD_UBOUND19 \
+ 0x120280UL
+#define PXP2_REG_RQ_BW_RD_UBOUND20 \
+ 0x120284UL
+#define PXP2_REG_RQ_BW_RD_UBOUND22 \
+ 0x120288UL
+#define PXP2_REG_RQ_BW_RD_UBOUND23 \
+ 0x12028cUL
+#define PXP2_REG_RQ_BW_RD_UBOUND24 \
+ 0x120290UL
+#define PXP2_REG_RQ_BW_RD_UBOUND25 \
+ 0x120294UL
+#define PXP2_REG_RQ_BW_RD_UBOUND26 \
+ 0x120298UL
+#define PXP2_REG_RQ_BW_RD_UBOUND27 \
+ 0x12029cUL
+#define PXP2_REG_RQ_BW_RD_UBOUND4 \
+ 0x120244UL
+#define PXP2_REG_RQ_BW_RD_UBOUND5 \
+ 0x120248UL
+#define PXP2_REG_RQ_BW_WR_ADD29 \
+ 0x12022cUL
+#define PXP2_REG_RQ_BW_WR_ADD30 \
+ 0x120230UL
+#define PXP2_REG_RQ_BW_WR_L29 \
+ 0x12031cUL
+#define PXP2_REG_RQ_BW_WR_L30 \
+ 0x120320UL
+#define PXP2_REG_RQ_BW_WR_UBOUND29 \
+ 0x1202a4UL
+#define PXP2_REG_RQ_BW_WR_UBOUND30 \
+ 0x1202a8UL
+#define PXP2_REG_RQ_CDU_ENDIAN_M \
+ 0x1201a0UL
+#define PXP2_REG_RQ_CDU_FIRST_ILT \
+ 0x12061cUL
+#define PXP2_REG_RQ_CDU_LAST_ILT \
+ 0x120620UL
+#define PXP2_REG_RQ_CDU_P_SIZE \
+ 0x120018UL
+#define PXP2_REG_RQ_CFG_DONE \
+ 0x1201b4UL
+#define PXP2_REG_RQ_DBG_ENDIAN_M \
+ 0x1201a4UL
+#define PXP2_REG_RQ_DISABLE_INPUTS \
+ 0x120330UL
+#define PXP2_REG_RQ_DRAM_ALIGN \
+ 0x1205b0UL
+#define PXP2_REG_RQ_DRAM_ALIGN_RD \
+ 0x12092cUL
+#define PXP2_REG_RQ_DRAM_ALIGN_SEL \
+ 0x120930UL
+#define PXP2_REG_RQ_HC_ENDIAN_M \
+ 0x1201a8UL
+#define PXP2_REG_RQ_ONCHIP_AT \
+ 0x122000UL
+#define PXP2_REG_RQ_ONCHIP_AT_B0 \
+ 0x128000UL
+#define PXP2_REG_RQ_PDR_LIMIT \
+ 0x12033cUL
+#define PXP2_REG_RQ_QM_ENDIAN_M \
+ 0x120194UL
+#define PXP2_REG_RQ_QM_FIRST_ILT \
+ 0x120634UL
+#define PXP2_REG_RQ_QM_LAST_ILT \
+ 0x120638UL
+#define PXP2_REG_RQ_QM_P_SIZE \
+ 0x120050UL
+#define PXP2_REG_RQ_RBC_DONE \
+ 0x1201b0UL
+#define PXP2_REG_RQ_RD_MBS0 \
+ 0x120160UL
+#define PXP2_REG_RQ_RD_MBS1 \
+ 0x120168UL
+#define PXP2_REG_RQ_SRC_ENDIAN_M \
+ 0x12019cUL
+#define PXP2_REG_RQ_SRC_FIRST_ILT \
+ 0x12063cUL
+#define PXP2_REG_RQ_SRC_LAST_ILT \
+ 0x120640UL
+#define PXP2_REG_RQ_SRC_P_SIZE \
+ 0x12006cUL
+#define PXP2_REG_RQ_TM_ENDIAN_M \
+ 0x120198UL
+#define PXP2_REG_RQ_TM_FIRST_ILT \
+ 0x120644UL
+#define PXP2_REG_RQ_TM_LAST_ILT \
+ 0x120648UL
+#define PXP2_REG_RQ_TM_P_SIZE \
+ 0x120034UL
+#define PXP2_REG_RQ_WR_MBS0 \
+ 0x12015cUL
+#define PXP2_REG_RQ_WR_MBS1 \
+ 0x120164UL
+#define PXP2_REG_WR_CDU_MPS \
+ 0x1205f0UL
+#define PXP2_REG_WR_CSDM_MPS \
+ 0x1205d0UL
+#define PXP2_REG_WR_DBG_MPS \
+ 0x1205e8UL
+#define PXP2_REG_WR_DMAE_MPS \
+ 0x1205ecUL
+#define PXP2_REG_WR_HC_MPS \
+ 0x1205c8UL
+#define PXP2_REG_WR_QM_MPS \
+ 0x1205dcUL
+#define PXP2_REG_WR_SRC_MPS \
+ 0x1205e4UL
+#define PXP2_REG_WR_TM_MPS \
+ 0x1205e0UL
+#define PXP2_REG_WR_TSDM_MPS \
+ 0x1205d4UL
+#define PXP2_REG_WR_USDMDP_TH \
+ 0x120348UL
+#define PXP2_REG_WR_USDM_MPS \
+ 0x1205ccUL
+#define PXP2_REG_WR_XSDM_MPS \
+ 0x1205d8UL
+#define PXP_REG_HST_DISCARD_DOORBELLS \
+ 0x1030a4UL
+#define PXP_REG_HST_DISCARD_INTERNAL_WRITES \
+ 0x1030a8UL
+#define PXP_REG_HST_ZONE_PERMISSION_TABLE \
+ 0x103400UL
+#define PXP_REG_PXP_INT_MASK_0 \
+ 0x103074UL
+#define PXP_REG_PXP_INT_MASK_1 \
+ 0x103084UL
+#define PXP_REG_PXP_INT_STS_CLR_0 \
+ 0x10306cUL
+#define PXP_REG_PXP_INT_STS_CLR_1 \
+ 0x10307cUL
+#define PXP_REG_PXP_PRTY_MASK \
+ 0x103094UL
+#define PXP_REG_PXP_PRTY_STS_CLR \
+ 0x10308cUL
+#define QM_REG_BASEADDR \
+ 0x168900UL
+#define QM_REG_BASEADDR_EXT_A \
+ 0x16e100UL
+#define QM_REG_BYTECRDCMDQ_0 \
+ 0x16e6e8UL
+#define QM_REG_CONNNUM_0 \
+ 0x168020UL
+#define QM_REG_PF_EN \
+ 0x16e70cUL
+#define QM_REG_PF_USG_CNT_0 \
+ 0x16e040UL
+#define QM_REG_PTRTBL \
+ 0x168a00UL
+#define QM_REG_PTRTBL_EXT_A \
+ 0x16e200UL
+#define QM_REG_QM_INT_MASK \
+ 0x168444UL
+#define QM_REG_QM_PRTY_MASK \
+ 0x168454UL
+#define QM_REG_QM_PRTY_STS_CLR \
+ 0x16844cUL
+#define QM_REG_QVOQIDX_0 \
+ 0x1680f4UL
+#define QM_REG_SOFT_RESET \
+ 0x168428UL
+#define QM_REG_VOQQMASK_0_LSB \
+ 0x168240UL
+#define SEM_FAST_REG_PARITY_RST \
+ 0x18840UL
+#define SRC_REG_COUNTFREE0 \
+ 0x40500UL
+#define SRC_REG_FIRSTFREE0 \
+ 0x40510UL
+#define SRC_REG_KEYSEARCH_0 \
+ 0x40458UL
+#define SRC_REG_KEYSEARCH_1 \
+ 0x4045cUL
+#define SRC_REG_KEYSEARCH_2 \
+ 0x40460UL
+#define SRC_REG_KEYSEARCH_3 \
+ 0x40464UL
+#define SRC_REG_KEYSEARCH_4 \
+ 0x40468UL
+#define SRC_REG_KEYSEARCH_5 \
+ 0x4046cUL
+#define SRC_REG_KEYSEARCH_6 \
+ 0x40470UL
+#define SRC_REG_KEYSEARCH_7 \
+ 0x40474UL
+#define SRC_REG_KEYSEARCH_8 \
+ 0x40478UL
+#define SRC_REG_KEYSEARCH_9 \
+ 0x4047cUL
+#define SRC_REG_LASTFREE0 \
+ 0x40530UL
+#define SRC_REG_NUMBER_HASH_BITS0 \
+ 0x40400UL
+#define SRC_REG_SOFT_RST \
+ 0x4049cUL
+#define SRC_REG_SRC_PRTY_MASK \
+ 0x404c8UL
+#define SRC_REG_SRC_PRTY_STS_CLR \
+ 0x404c0UL
+#define TCM_REG_PRS_IFEN \
+ 0x50020UL
+#define TCM_REG_TCM_INT_MASK \
+ 0x501dcUL
+#define TCM_REG_TCM_PRTY_MASK \
+ 0x501ecUL
+#define TCM_REG_TCM_PRTY_STS_CLR \
+ 0x501e4UL
+#define TM_REG_EN_LINEAR0_TIMER \
+ 0x164014UL
+#define TM_REG_LIN0_MAX_ACTIVE_CID \
+ 0x164048UL
+#define TM_REG_LIN0_NUM_SCANS \
+ 0x1640a0UL
+#define TM_REG_LIN0_SCAN_ON \
+ 0x1640d0UL
+#define TM_REG_LIN0_SCAN_TIME \
+ 0x16403cUL
+#define TM_REG_LIN0_VNIC_UC \
+ 0x164128UL
+#define TM_REG_TM_INT_MASK \
+ 0x1640fcUL
+#define TM_REG_TM_PRTY_MASK \
+ 0x16410cUL
+#define TM_REG_TM_PRTY_STS_CLR \
+ 0x164104UL
+#define TSDM_REG_ENABLE_IN1 \
+ 0x42238UL
+#define TSDM_REG_TSDM_INT_MASK_0 \
+ 0x4229cUL
+#define TSDM_REG_TSDM_INT_MASK_1 \
+ 0x422acUL
+#define TSDM_REG_TSDM_PRTY_MASK \
+ 0x422bcUL
+#define TSDM_REG_TSDM_PRTY_STS_CLR \
+ 0x422b4UL
+#define TSEM_REG_FAST_MEMORY \
+ 0x1a0000UL
+#define TSEM_REG_INT_TABLE \
+ 0x180400UL
+#define TSEM_REG_PASSIVE_BUFFER \
+ 0x181000UL
+#define TSEM_REG_PRAM \
+ 0x1c0000UL
+#define TSEM_REG_TSEM_INT_MASK_0 \
+ 0x180100UL
+#define TSEM_REG_TSEM_INT_MASK_1 \
+ 0x180110UL
+#define TSEM_REG_TSEM_PRTY_MASK_0 \
+ 0x180120UL
+#define TSEM_REG_TSEM_PRTY_MASK_1 \
+ 0x180130UL
+#define TSEM_REG_TSEM_PRTY_STS_CLR_0 \
+ 0x180118UL
+#define TSEM_REG_TSEM_PRTY_STS_CLR_1 \
+ 0x180128UL
+#define TSEM_REG_VFPF_ERR_NUM \
+ 0x180380UL
+#define UCM_REG_UCM_INT_MASK \
+ 0xe01d4UL
+#define UCM_REG_UCM_PRTY_MASK \
+ 0xe01e4UL
+#define UCM_REG_UCM_PRTY_STS_CLR \
+ 0xe01dcUL
+#define UMAC_COMMAND_CONFIG_REG_HD_ENA \
+ (0x1<<10)
+#define UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE \
+ (0x1<<28)
+#define UMAC_COMMAND_CONFIG_REG_LOOP_ENA \
+ (0x1<<15)
+#define UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK \
+ (0x1<<24)
+#define UMAC_COMMAND_CONFIG_REG_PAD_EN \
+ (0x1<<5)
+#define UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE \
+ (0x1<<8)
+#define UMAC_COMMAND_CONFIG_REG_PROMIS_EN \
+ (0x1<<4)
+#define UMAC_COMMAND_CONFIG_REG_RX_ENA \
+ (0x1<<1)
+#define UMAC_COMMAND_CONFIG_REG_SW_RESET \
+ (0x1<<13)
+#define UMAC_COMMAND_CONFIG_REG_TX_ENA \
+ (0x1<<0)
+#define UMAC_REG_COMMAND_CONFIG \
+ 0x8UL
+#define UMAC_REG_EEE_WAKE_TIMER \
+ 0x6cUL
+#define UMAC_REG_MAC_ADDR0 \
+ 0xcUL
+#define UMAC_REG_MAC_ADDR1 \
+ 0x10UL
+#define UMAC_REG_MAXFR \
+ 0x14UL
+#define UMAC_REG_UMAC_EEE_CTRL \
+ 0x64UL
+#define UMAC_UMAC_EEE_CTRL_REG_EEE_EN \
+ (0x1<<3)
+#define USDM_REG_USDM_INT_MASK_0 \
+ 0xc42a0UL
+#define USDM_REG_USDM_INT_MASK_1 \
+ 0xc42b0UL
+#define USDM_REG_USDM_PRTY_MASK \
+ 0xc42c0UL
+#define USDM_REG_USDM_PRTY_STS_CLR \
+ 0xc42b8UL
+#define USEM_REG_FAST_MEMORY \
+ 0x320000UL
+#define USEM_REG_INT_TABLE \
+ 0x300400UL
+#define USEM_REG_PASSIVE_BUFFER \
+ 0x302000UL
+#define USEM_REG_PRAM \
+ 0x340000UL
+#define USEM_REG_USEM_INT_MASK_0 \
+ 0x300110UL
+#define USEM_REG_USEM_INT_MASK_1 \
+ 0x300120UL
+#define USEM_REG_USEM_PRTY_MASK_0 \
+ 0x300130UL
+#define USEM_REG_USEM_PRTY_MASK_1 \
+ 0x300140UL
+#define USEM_REG_USEM_PRTY_STS_CLR_0 \
+ 0x300128UL
+#define USEM_REG_USEM_PRTY_STS_CLR_1 \
+ 0x300138UL
+#define USEM_REG_VFPF_ERR_NUM \
+ 0x300380UL
+#define VFC_MEMORIES_RST_REG_CAM_RST \
+ (0x1<<0)
+#define VFC_MEMORIES_RST_REG_RAM_RST \
+ (0x1<<1)
+#define VFC_REG_MEMORIES_RST \
+ 0x1943cUL
+#define XCM_REG_XCM_INT_MASK \
+ 0x202b4UL
+#define XCM_REG_XCM_PRTY_MASK \
+ 0x202c4UL
+#define XCM_REG_XCM_PRTY_STS_CLR \
+ 0x202bcUL
+#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS \
+ (0x1<<0)
+#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS \
+ (0x1<<1)
+#define XMAC_CTRL_REG_LINE_LOCAL_LPBK \
+ (0x1<<2)
+#define XMAC_CTRL_REG_RX_EN \
+ (0x1<<1)
+#define XMAC_CTRL_REG_SOFT_RESET \
+ (0x1<<6)
+#define XMAC_CTRL_REG_TX_EN \
+ (0x1<<0)
+#define XMAC_CTRL_REG_XLGMII_ALIGN_ENB \
+ (0x1<<7)
+#define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN \
+ (0x1<<18)
+#define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN \
+ (0x1<<17)
+#define XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON \
+ (0x1<<1)
+#define XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN \
+ (0x1<<0)
+#define XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN \
+ (0x1<<3)
+#define XMAC_PFC_CTRL_HI_REG_RX_PFC_EN \
+ (0x1<<4)
+#define XMAC_PFC_CTRL_HI_REG_TX_PFC_EN \
+ (0x1<<5)
+#define XMAC_REG_CLEAR_RX_LSS_STATUS \
+ 0x60UL
+#define XMAC_REG_CTRL \
+ 0UL
+#define XMAC_REG_CTRL_SA_HI \
+ 0x2cUL
+#define XMAC_REG_CTRL_SA_LO \
+ 0x28UL
+#define XMAC_REG_EEE_CTRL \
+ 0xd8UL
+#define XMAC_REG_EEE_TIMERS_HI \
+ 0xe4UL
+#define XMAC_REG_PAUSE_CTRL \
+ 0x68UL
+#define XMAC_REG_PFC_CTRL \
+ 0x70UL
+#define XMAC_REG_PFC_CTRL_HI \
+ 0x74UL
+#define XMAC_REG_RX_LSS_CTRL \
+ 0x50UL
+#define XMAC_REG_RX_LSS_STATUS \
+ 0x58UL
+#define XMAC_REG_RX_MAX_SIZE \
+ 0x40UL
+#define XMAC_REG_TX_CTRL \
+ 0x20UL
+#define XMAC_RX_LSS_CTRL_REG_LOCAL_FAULT_DISABLE \
+ (0x1<<0)
+#define XMAC_RX_LSS_CTRL_REG_REMOTE_FAULT_DISABLE \
+ (0x1<<1)
+#define XSDM_REG_OPERATION_GEN \
+ 0x1664c4UL
+#define XSDM_REG_XSDM_INT_MASK_0 \
+ 0x16629cUL
+#define XSDM_REG_XSDM_INT_MASK_1 \
+ 0x1662acUL
+#define XSDM_REG_XSDM_PRTY_MASK \
+ 0x1662bcUL
+#define XSDM_REG_XSDM_PRTY_STS_CLR \
+ 0x1662b4UL
+#define XSEM_REG_FAST_MEMORY \
+ 0x2a0000UL
+#define XSEM_REG_INT_TABLE \
+ 0x280400UL
+#define XSEM_REG_PASSIVE_BUFFER \
+ 0x282000UL
+#define XSEM_REG_PRAM \
+ 0x2c0000UL
+#define XSEM_REG_VFPF_ERR_NUM \
+ 0x280380UL
+#define XSEM_REG_XSEM_INT_MASK_0 \
+ 0x280110UL
+#define XSEM_REG_XSEM_INT_MASK_1 \
+ 0x280120UL
+#define XSEM_REG_XSEM_PRTY_MASK_0 \
+ 0x280130UL
+#define XSEM_REG_XSEM_PRTY_MASK_1 \
+ 0x280140UL
+#define XSEM_REG_XSEM_PRTY_STS_CLR_0 \
+ 0x280128UL
+#define XSEM_REG_XSEM_PRTY_STS_CLR_1 \
+ 0x280138UL
+#define MCPR_ACCESS_LOCK_LOCK (1L<<31)
+#define MCPR_IMC_COMMAND_ENABLE (1L<<31)
+#define MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT 16
+#define MCPR_IMC_COMMAND_OPERATION_BITSHIFT 28
+#define MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT 8
+#define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0)
+#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1)
+#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0)
+#define MCPR_NVM_CFG4_FLASH_SIZE (0x7L<<0)
+#define MCPR_NVM_COMMAND_DOIT (1L<<4)
+#define MCPR_NVM_COMMAND_DONE (1L<<3)
+#define MCPR_NVM_COMMAND_FIRST (1L<<7)
+#define MCPR_NVM_COMMAND_LAST (1L<<8)
+#define MCPR_NVM_COMMAND_WR (1L<<5)
+#define MCPR_NVM_SW_ARB_ARB_ARB1 (1L<<9)
+#define MCPR_NVM_SW_ARB_ARB_REQ_CLR1 (1L<<5)
+#define MCPR_NVM_SW_ARB_ARB_REQ_SET1 (1L<<1)
+
+
+#define BIGMAC_REGISTER_BMAC_CONTROL (0x00<<3)
+#define BIGMAC_REGISTER_BMAC_XGXS_CONTROL (0x01<<3)
+#define BIGMAC_REGISTER_CNT_MAX_SIZE (0x05<<3)
+#define BIGMAC_REGISTER_RX_CONTROL (0x21<<3)
+#define BIGMAC_REGISTER_RX_LLFC_MSG_FLDS (0x46<<3)
+#define BIGMAC_REGISTER_RX_LSS_STATUS (0x43<<3)
+#define BIGMAC_REGISTER_RX_MAX_SIZE (0x23<<3)
+#define BIGMAC_REGISTER_RX_STAT_GR64 (0x26<<3)
+#define BIGMAC_REGISTER_RX_STAT_GRIPJ (0x42<<3)
+#define BIGMAC_REGISTER_TX_CONTROL (0x07<<3)
+#define BIGMAC_REGISTER_TX_MAX_SIZE (0x09<<3)
+#define BIGMAC_REGISTER_TX_PAUSE_THRESHOLD (0x0A<<3)
+#define BIGMAC_REGISTER_TX_SOURCE_ADDR (0x08<<3)
+#define BIGMAC_REGISTER_TX_STAT_GTBYT (0x20<<3)
+#define BIGMAC_REGISTER_TX_STAT_GTPKT (0x0C<<3)
+#define BIGMAC2_REGISTER_BMAC_CONTROL (0x00<<3)
+#define BIGMAC2_REGISTER_BMAC_XGXS_CONTROL (0x01<<3)
+#define BIGMAC2_REGISTER_CNT_MAX_SIZE (0x05<<3)
+#define BIGMAC2_REGISTER_PFC_CONTROL (0x06<<3)
+#define BIGMAC2_REGISTER_RX_CONTROL (0x3A<<3)
+#define BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS (0x62<<3)
+#define BIGMAC2_REGISTER_RX_LSS_STAT (0x3E<<3)
+#define BIGMAC2_REGISTER_RX_MAX_SIZE (0x3C<<3)
+#define BIGMAC2_REGISTER_RX_STAT_GR64 (0x40<<3)
+#define BIGMAC2_REGISTER_RX_STAT_GRIPJ (0x5f<<3)
+#define BIGMAC2_REGISTER_TX_CONTROL (0x1C<<3)
+#define BIGMAC2_REGISTER_TX_MAX_SIZE (0x1E<<3)
+#define BIGMAC2_REGISTER_TX_PAUSE_CONTROL (0x20<<3)
+#define BIGMAC2_REGISTER_TX_SOURCE_ADDR (0x1D<<3)
+#define BIGMAC2_REGISTER_TX_STAT_GTBYT (0x39<<3)
+#define BIGMAC2_REGISTER_TX_STAT_GTPOK (0x22<<3)
+
+
+#define EMAC_LED_1000MB_OVERRIDE (1L<<1)
+#define EMAC_LED_100MB_OVERRIDE (1L<<2)
+#define EMAC_LED_10MB_OVERRIDE (1L<<3)
+#define EMAC_LED_OVERRIDE (1L<<0)
+#define EMAC_MDIO_COMM_COMMAND_ADDRESS (0L<<26)
+#define EMAC_MDIO_COMM_COMMAND_READ_22 (2L<<26)
+#define EMAC_MDIO_COMM_COMMAND_READ_45 (3L<<26)
+#define EMAC_MDIO_COMM_COMMAND_WRITE_22 (1L<<26)
+#define EMAC_MDIO_COMM_COMMAND_WRITE_45 (1L<<26)
+#define EMAC_MDIO_COMM_DATA (0xffffL<<0)
+#define EMAC_MDIO_COMM_START_BUSY (1L<<29)
+#define EMAC_MDIO_MODE_AUTO_POLL (1L<<4)
+#define EMAC_MDIO_MODE_CLAUSE_45 (1L<<31)
+#define EMAC_MDIO_MODE_CLOCK_CNT (0x3ffL<<16)
+#define EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT 16
+#define EMAC_MDIO_STATUS_10MB (1L<<1)
+#define EMAC_MODE_25G_MODE (1L<<5)
+#define EMAC_MODE_HALF_DUPLEX (1L<<1)
+#define EMAC_MODE_PORT_GMII (2L<<2)
+#define EMAC_MODE_PORT_MII (1L<<2)
+#define EMAC_MODE_PORT_MII_10M (3L<<2)
+#define EMAC_MODE_RESET (1L<<0)
+#define EMAC_REG_EMAC_LED 0xc
+#define EMAC_REG_EMAC_MAC_MATCH 0x10
+#define EMAC_REG_EMAC_MDIO_COMM 0xac
+#define EMAC_REG_EMAC_MDIO_MODE 0xb4
+#define EMAC_REG_EMAC_MDIO_STATUS 0xb0
+#define EMAC_REG_EMAC_MODE 0x0
+#define EMAC_REG_EMAC_RX_MODE 0xc8
+#define EMAC_REG_EMAC_RX_MTU_SIZE 0x9c
+#define EMAC_REG_EMAC_RX_STAT_AC 0x180
+#define EMAC_REG_EMAC_RX_STAT_AC_28 0x1f4
+#define EMAC_REG_EMAC_RX_STAT_AC_COUNT 23
+#define EMAC_REG_EMAC_TX_MODE 0xbc
+#define EMAC_REG_EMAC_TX_STAT_AC 0x280
+#define EMAC_REG_EMAC_TX_STAT_AC_COUNT 22
+#define EMAC_REG_RX_PFC_MODE 0x320
+#define EMAC_REG_RX_PFC_MODE_PRIORITIES (1L<<2)
+#define EMAC_REG_RX_PFC_MODE_RX_EN (1L<<1)
+#define EMAC_REG_RX_PFC_MODE_TX_EN (1L<<0)
+#define EMAC_REG_RX_PFC_PARAM 0x324
+#define EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT 0
+#define EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT 16
+#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD 0x328
+#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT (0xffff<<0)
+#define EMAC_REG_RX_PFC_STATS_XOFF_SENT 0x330
+#define EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT (0xffff<<0)
+#define EMAC_REG_RX_PFC_STATS_XON_RCVD 0x32c
+#define EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT (0xffff<<0)
+#define EMAC_REG_RX_PFC_STATS_XON_SENT 0x334
+#define EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT (0xffff<<0)
+#define EMAC_RX_MODE_FLOW_EN (1L<<2)
+#define EMAC_RX_MODE_KEEP_MAC_CONTROL (1L<<3)
+#define EMAC_RX_MODE_KEEP_VLAN_TAG (1L<<10)
+#define EMAC_RX_MODE_PROMISCUOUS (1L<<8)
+#define EMAC_RX_MODE_RESET (1L<<0)
+#define EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31)
+#define EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3)
+#define EMAC_TX_MODE_FLOW_EN (1L<<4)
+#define EMAC_TX_MODE_RESET (1L<<0)
+
+
+#define MISC_REGISTERS_GPIO_0 0
+#define MISC_REGISTERS_GPIO_1 1
+#define MISC_REGISTERS_GPIO_2 2
+#define MISC_REGISTERS_GPIO_3 3
+#define MISC_REGISTERS_GPIO_CLR_POS 16
+#define MISC_REGISTERS_GPIO_FLOAT (0xffL<<24)
+#define MISC_REGISTERS_GPIO_FLOAT_POS 24
+#define MISC_REGISTERS_GPIO_HIGH 1
+#define MISC_REGISTERS_GPIO_INPUT_HI_Z 2
+#define MISC_REGISTERS_GPIO_INT_CLR_POS 24
+#define MISC_REGISTERS_GPIO_INT_OUTPUT_CLR 0
+#define MISC_REGISTERS_GPIO_INT_OUTPUT_SET 1
+#define MISC_REGISTERS_GPIO_INT_SET_POS 16
+#define MISC_REGISTERS_GPIO_LOW 0
+#define MISC_REGISTERS_GPIO_OUTPUT_HIGH 1
+#define MISC_REGISTERS_GPIO_OUTPUT_LOW 0
+#define MISC_REGISTERS_GPIO_PORT_SHIFT 4
+#define MISC_REGISTERS_GPIO_SET_POS 8
+#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588
+#define MISC_REGISTERS_RESET_REG_1_RST_BRB1 (0x1<<0)
+#define MISC_REGISTERS_RESET_REG_1_RST_DORQ \
+ (0x1<<19)
+#define MISC_REGISTERS_RESET_REG_1_RST_HC \
+ (0x1<<29)
+#define MISC_REGISTERS_RESET_REG_1_RST_PXP \
+ (0x1<<26)
+#define MISC_REGISTERS_RESET_REG_1_RST_PXPV \
+ (0x1<<27)
+#define MISC_REGISTERS_RESET_REG_1_RST_QM \
+ (0x1<<17)
+#define MISC_REGISTERS_RESET_REG_1_SET 0x584
+#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598
+#define MISC_REGISTERS_RESET_REG_2_MSTAT0 \
+ (0x1<<24)
+#define MISC_REGISTERS_RESET_REG_2_MSTAT1 \
+ (0x1<<25)
+#define MISC_REGISTERS_RESET_REG_2_PGLC \
+ (0x1<<19)
+#define MISC_REGISTERS_RESET_REG_2_RST_ATC \
+ (0x1<<17)
+#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0)
+#define MISC_REGISTERS_RESET_REG_2_RST_BMAC1 (0x1<<1)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0 (0x1<<2)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE \
+ (0x1<<14)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1 (0x1<<3)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE \
+ (0x1<<15)
+#define MISC_REGISTERS_RESET_REG_2_RST_GRC (0x1<<4)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B (0x1<<6)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE (0x1<<8)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU (0x1<<7)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE (0x1<<5)
+#define MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE \
+ (0x1<<11)
+#define MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO \
+ (0x1<<13)
+#define MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR \
+ (0x1<<16)
+#define MISC_REGISTERS_RESET_REG_2_RST_RBCN (0x1<<9)
+#define MISC_REGISTERS_RESET_REG_2_SET 0x594
+#define MISC_REGISTERS_RESET_REG_2_UMAC0 \
+ (0x1<<20)
+#define MISC_REGISTERS_RESET_REG_2_UMAC1 \
+ (0x1<<21)
+#define MISC_REGISTERS_RESET_REG_2_XMAC \
+ (0x1<<22)
+#define MISC_REGISTERS_RESET_REG_2_XMAC_SOFT \
+ (0x1<<23)
+#define MISC_REGISTERS_RESET_REG_3_CLEAR 0x5a8
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ (0x1<<1)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN (0x1<<2)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD (0x1<<3)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW (0x1<<0)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ (0x1<<5)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN (0x1<<6)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD (0x1<<7)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW (0x1<<4)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB (0x1<<8)
+#define MISC_REGISTERS_RESET_REG_3_SET 0x5a4
+#define MISC_SPIO_CLR_POS 16
+#define MISC_SPIO_FLOAT (0xffL<<24)
+#define MISC_SPIO_FLOAT_POS 24
+#define MISC_SPIO_INPUT_HI_Z 2
+#define MISC_SPIO_INT_OLD_SET_POS 16
+#define MISC_SPIO_OUTPUT_HIGH 1
+#define MISC_SPIO_OUTPUT_LOW 0
+#define MISC_SPIO_SET_POS 8
+#define MISC_SPIO_SPIO4 0x10
+#define MISC_SPIO_SPIO5 0x20
+#define HW_LOCK_MAX_RESOURCE_VALUE 31
+#define HW_LOCK_RESOURCE_DRV_FLAGS 10
+#define HW_LOCK_RESOURCE_GPIO 1
+#define HW_LOCK_RESOURCE_NVRAM 12
+#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3
+#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8
+#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9
+#define HW_LOCK_RESOURCE_RECOVERY_REG 11
+#define HW_LOCK_RESOURCE_RESET 5
+#define HW_LOCK_RESOURCE_SPIO 2
+
+
+#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4)
+#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_BRB_HW_INTERRUPT (0x1<<19)
+#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18)
+#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR (0x1<<30)
+#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (0x1<<9)
+#define AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR (0x1<<8)
+#define AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT (0x1<<7)
+#define AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR (0x1<<6)
+#define AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT (0x1<<29)
+#define AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR (0x1<<28)
+#define AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT (0x1<<1)
+#define AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR (0x1<<0)
+#define AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR (0x1<<18)
+#define AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT (0x1<<11)
+#define AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR (0x1<<10)
+#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT (0x1<<13)
+#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR (0x1<<12)
+#define AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (0x1<<12)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (0x1<<28)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (0x1UL<<31)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (0x1<<29)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (0x1<<30)
+#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (0x1<<15)
+#define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR (0x1<<14)
+#define AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR (0x1<<14)
+#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (0x1<<20)
+#define AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT (0x1UL<<31)
+#define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR (0x1<<30)
+#define AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR (0x1<<0)
+#define AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR (0x1<<3)
+#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR (0x1<<4)
+#define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT (0x1<<3)
+#define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT (0x1<<3)
+#define AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR (0x1<<22)
+#define AEU_INPUTS_ATTN_BITS_SPIO5 (0x1<<15)
+#define AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT (0x1<<27)
+#define AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR (0x1<<26)
+#define AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR (0x1<<4)
+#define AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT (0x1<<25)
+#define AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR (0x1<<24)
+#define AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT (0x1<<29)
+#define AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR (0x1<<28)
+#define AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT (0x1<<23)
+#define AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR (0x1<<22)
+#define AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT (0x1<<27)
+#define AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR (0x1<<26)
+#define AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT (0x1<<21)
+#define AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR (0x1<<20)
+#define AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT (0x1<<25)
+#define AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR (0x1<<24)
+#define AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR (0x1<<16)
+#define AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT (0x1<<9)
+#define AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR (0x1<<8)
+#define AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT (0x1<<7)
+#define AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR (0x1<<6)
+#define AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT (0x1<<11)
+#define AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR (0x1<<10)
+#define HW_PRTY_ASSERT_SET_0 \
+(AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR)
+#define HW_PRTY_ASSERT_SET_1 \
+(AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR)
+#define HW_PRTY_ASSERT_SET_2 \
+(AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR)
+#define HW_PRTY_ASSERT_SET_3 \
+(AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
+#define HW_PRTY_ASSERT_SET_4 \
+(AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)
+#define HW_INTERRUT_ASSERT_SET_0 \
+(AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_BRB_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT)
+#define HW_INTERRUT_ASSERT_SET_1 \
+(AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT)
+#define HW_INTERRUT_ASSERT_SET_2 \
+(AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT)
+
+
+#define RESERVED_GENERAL_ATTENTION_BIT_0 0
+
+#define EVEREST_GEN_ATTN_IN_USE_MASK 0x7ffe0
+#define EVEREST_LATCHED_ATTN_IN_USE_MASK 0xffe00000
+
+#define RESERVED_GENERAL_ATTENTION_BIT_6 6
+#define RESERVED_GENERAL_ATTENTION_BIT_7 7
+#define RESERVED_GENERAL_ATTENTION_BIT_8 8
+#define RESERVED_GENERAL_ATTENTION_BIT_9 9
+#define RESERVED_GENERAL_ATTENTION_BIT_10 10
+#define RESERVED_GENERAL_ATTENTION_BIT_11 11
+#define RESERVED_GENERAL_ATTENTION_BIT_12 12
+#define RESERVED_GENERAL_ATTENTION_BIT_13 13
+#define RESERVED_GENERAL_ATTENTION_BIT_14 14
+#define RESERVED_GENERAL_ATTENTION_BIT_15 15
+#define RESERVED_GENERAL_ATTENTION_BIT_16 16
+#define RESERVED_GENERAL_ATTENTION_BIT_17 17
+#define RESERVED_GENERAL_ATTENTION_BIT_18 18
+#define RESERVED_GENERAL_ATTENTION_BIT_19 19
+#define RESERVED_GENERAL_ATTENTION_BIT_20 20
+#define RESERVED_GENERAL_ATTENTION_BIT_21 21
+
+/* storm asserts attention bits */
+#define TSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_7
+#define USTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_8
+#define CSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_9
+#define XSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_10
+
+/* mcp error attention bit */
+#define MCP_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_11
+
+/*E1H NIG status sync attention mapped to group 4-7*/
+#define LINK_SYNC_ATTENTION_BIT_FUNC_0 RESERVED_GENERAL_ATTENTION_BIT_12
+#define LINK_SYNC_ATTENTION_BIT_FUNC_1 RESERVED_GENERAL_ATTENTION_BIT_13
+#define LINK_SYNC_ATTENTION_BIT_FUNC_2 RESERVED_GENERAL_ATTENTION_BIT_14
+#define LINK_SYNC_ATTENTION_BIT_FUNC_3 RESERVED_GENERAL_ATTENTION_BIT_15
+#define LINK_SYNC_ATTENTION_BIT_FUNC_4 RESERVED_GENERAL_ATTENTION_BIT_16
+#define LINK_SYNC_ATTENTION_BIT_FUNC_5 RESERVED_GENERAL_ATTENTION_BIT_17
+#define LINK_SYNC_ATTENTION_BIT_FUNC_6 RESERVED_GENERAL_ATTENTION_BIT_18
+#define LINK_SYNC_ATTENTION_BIT_FUNC_7 RESERVED_GENERAL_ATTENTION_BIT_19
+
+ /* Used For Error Recovery: changing this will require more \
+ changes in code that assume
+ * error recovery uses general attn bit20 ! */
+#define ERROR_RECOVERY_ATTENTION_BIT \
+ RESERVED_GENERAL_ATTENTION_BIT_20
+#define RESERVED_ATTENTION_BIT \
+ RESERVED_GENERAL_ATTENTION_BIT_21
+
+#define LATCHED_ATTN_RBCR 23
+#define LATCHED_ATTN_RBCT 24
+#define LATCHED_ATTN_RBCN 25
+#define LATCHED_ATTN_RBCU 26
+#define LATCHED_ATTN_RBCP 27
+#define LATCHED_ATTN_TIMEOUT_GRC 28
+#define LATCHED_ATTN_RSVD_GRC 29
+#define LATCHED_ATTN_ROM_PARITY_MCP 30
+#define LATCHED_ATTN_UM_RX_PARITY_MCP 31
+#define LATCHED_ATTN_UM_TX_PARITY_MCP 32
+#define LATCHED_ATTN_SCPAD_PARITY_MCP 33
+
+#define GENERAL_ATTEN_WORD(atten_name) ((94 + atten_name) / 32)
+#define GENERAL_ATTEN_OFFSET(atten_name) (1UL << ((94 + atten_name) % 32))
+
+
+/*
+ * This file defines GRC base address for every block.
+ * This file is included by chipsim, asm microcode and cpp microcode.
+ * These values are used in Design.xml on regBase attribute
+ * Use the base with the generated offsets of specific registers.
+ */
+
+#define GRCBASE_PXPCS 0x000000
+#define GRCBASE_PCICONFIG 0x002000
+#define GRCBASE_PCIREG 0x002400
+#define GRCBASE_EMAC0 0x008000
+#define GRCBASE_EMAC1 0x008400
+#define GRCBASE_DBU 0x008800
+#define GRCBASE_PGLUE_B 0x009000
+#define GRCBASE_MISC 0x00A000
+#define GRCBASE_DBG 0x00C000
+#define GRCBASE_NIG 0x010000
+#define GRCBASE_XCM 0x020000
+#define GRCBASE_PRS 0x040000
+#define GRCBASE_SRCH 0x040400
+#define GRCBASE_TSDM 0x042000
+#define GRCBASE_TCM 0x050000
+#define GRCBASE_BRB1 0x060000
+#define GRCBASE_MCP 0x080000
+#define GRCBASE_UPB 0x0C1000
+#define GRCBASE_CSDM 0x0C2000
+#define GRCBASE_USDM 0x0C4000
+#define GRCBASE_CCM 0x0D0000
+#define GRCBASE_UCM 0x0E0000
+#define GRCBASE_CDU 0x101000
+#define GRCBASE_DMAE 0x102000
+#define GRCBASE_PXP 0x103000
+#define GRCBASE_CFC 0x104000
+#define GRCBASE_HC 0x108000
+#define GRCBASE_ATC 0x110000
+#define GRCBASE_PXP2 0x120000
+#define GRCBASE_IGU 0x130000
+#define GRCBASE_PBF 0x140000
+#define GRCBASE_UMAC0 0x160000
+#define GRCBASE_UMAC1 0x160400
+#define GRCBASE_XPB 0x161000
+#define GRCBASE_MSTAT0 0x162000
+#define GRCBASE_MSTAT1 0x162800
+#define GRCBASE_XMAC0 0x163000
+#define GRCBASE_XMAC1 0x163800
+#define GRCBASE_TIMERS 0x164000
+#define GRCBASE_XSDM 0x166000
+#define GRCBASE_QM 0x168000
+#define GRCBASE_QM_4PORT 0x168000
+#define GRCBASE_DQ 0x170000
+#define GRCBASE_TSEM 0x180000
+#define GRCBASE_CSEM 0x200000
+#define GRCBASE_XSEM 0x280000
+#define GRCBASE_XSEM_4PORT 0x280000
+#define GRCBASE_USEM 0x300000
+#define GRCBASE_MCP_A 0x380000
+#define GRCBASE_MISC_AEU GRCBASE_MISC
+#define GRCBASE_Tstorm GRCBASE_TSEM
+#define GRCBASE_Cstorm GRCBASE_CSEM
+#define GRCBASE_Xstorm GRCBASE_XSEM
+#define GRCBASE_Ustorm GRCBASE_USEM
+
+
+/* offset of configuration space in the pci core register */
+#define PCICFG_OFFSET 0x2000
+#define PCICFG_VENDOR_ID_OFFSET 0x00
+#define PCICFG_DEVICE_ID_OFFSET 0x02
+#define PCICFG_COMMAND_OFFSET 0x04
+#define PCICFG_COMMAND_IO_SPACE (1<<0)
+#define PCICFG_COMMAND_MEM_SPACE (1<<1)
+#define PCICFG_COMMAND_BUS_MASTER (1<<2)
+#define PCICFG_COMMAND_SPECIAL_CYCLES (1<<3)
+#define PCICFG_COMMAND_MWI_CYCLES (1<<4)
+#define PCICFG_COMMAND_VGA_SNOOP (1<<5)
+#define PCICFG_COMMAND_PERR_ENA (1<<6)
+#define PCICFG_COMMAND_STEPPING (1<<7)
+#define PCICFG_COMMAND_SERR_ENA (1<<8)
+#define PCICFG_COMMAND_FAST_B2B (1<<9)
+#define PCICFG_COMMAND_INT_DISABLE (1<<10)
+#define PCICFG_COMMAND_RESERVED (0x1f<<11)
+#define PCICFG_STATUS_OFFSET 0x06
+#define PCICFG_REVISION_ID_OFFSET 0x08
+#define PCICFG_REVESION_ID_MASK 0xff
+#define PCICFG_REVESION_ID_ERROR_VAL 0xff
+#define PCICFG_CACHE_LINE_SIZE 0x0c
+#define PCICFG_LATENCY_TIMER 0x0d
+#define PCICFG_HEADER_TYPE 0x0e
+#define PCICFG_HEADER_TYPE_NORMAL 0
+#define PCICFG_HEADER_TYPE_BRIDGE 1
+#define PCICFG_HEADER_TYPE_CARDBUS 2
+#define PCICFG_BAR_1_LOW 0x10
+#define PCICFG_BAR_1_HIGH 0x14
+#define PCICFG_BAR_2_LOW 0x18
+#define PCICFG_BAR_2_HIGH 0x1c
+#define PCICFG_BAR_3_LOW 0x20
+#define PCICFG_BAR_3_HIGH 0x24
+#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c
+#define PCICFG_SUBSYSTEM_ID_OFFSET 0x2e
+#define PCICFG_INT_LINE 0x3c
+#define PCICFG_INT_PIN 0x3d
+#define PCICFG_PM_CAPABILITY 0x48
+#define PCICFG_PM_CAPABILITY_VERSION (0x3<<16)
+#define PCICFG_PM_CAPABILITY_CLOCK (1<<19)
+#define PCICFG_PM_CAPABILITY_RESERVED (1<<20)
+#define PCICFG_PM_CAPABILITY_DSI (1<<21)
+#define PCICFG_PM_CAPABILITY_AUX_CURRENT (0x7<<22)
+#define PCICFG_PM_CAPABILITY_D1_SUPPORT (1<<25)
+#define PCICFG_PM_CAPABILITY_D2_SUPPORT (1<<26)
+#define PCICFG_PM_CAPABILITY_PME_IN_D0 (1<<27)
+#define PCICFG_PM_CAPABILITY_PME_IN_D1 (1<<28)
+#define PCICFG_PM_CAPABILITY_PME_IN_D2 (1<<29)
+#define PCICFG_PM_CAPABILITY_PME_IN_D3_HOT (1<<30)
+#define PCICFG_PM_CAPABILITY_PME_IN_D3_COLD (1<<31)
+#define PCICFG_PM_CSR_OFFSET 0x4c
+#define PCICFG_PM_CSR_STATE (0x3<<0)
+#define PCICFG_PM_CSR_PME_ENABLE (1<<8)
+#define PCICFG_PM_CSR_PME_STATUS (1<<15)
+#define PCICFG_VPD_FLAG_ADDR_OFFSET 0x50
+#define PCICFG_VPD_DATA_OFFSET 0x54
+#define PCICFG_MSI_CAP_ID_OFFSET 0x58
+#define PCICFG_MSI_CONTROL_ENABLE (0x1<<16)
+#define PCICFG_MSI_CONTROL_MCAP (0x7<<17)
+#define PCICFG_MSI_CONTROL_MENA (0x7<<20)
+#define PCICFG_MSI_CONTROL_64_BIT_ADDR_CAP (0x1<<23)
+#define PCICFG_MSI_CONTROL_MSI_PVMASK_CAPABLE (0x1<<24)
+#define PCICFG_MSI_ADDR_LOW_OFFSET 0x5c
+#define PCICFG_MSI_ADDR_HIGH_OFFSET 0x60
+#define PCICFG_MSI_DATA_OFFSET 0x64
+#define PCICFG_GRC_ADDRESS 0x78
+#define PCICFG_GRC_DATA 0x80
+#define PCICFG_ME_REGISTER 0x98
+#define PCICFG_MSIX_CAP_ID_OFFSET 0xa0
+#define PCICFG_MSIX_CONTROL_TABLE_SIZE (0x7ff<<16)
+#define PCICFG_MSIX_CONTROL_RESERVED (0x7<<27)
+#define PCICFG_MSIX_CONTROL_FUNC_MASK (0x1<<30)
+#define PCICFG_MSIX_CONTROL_MSIX_ENABLE (0x1<<31)
+
+#define PCICFG_DEVICE_CONTROL 0xb4
+#define PCICFG_DEVICE_CONTROL_NP_TRANSACTION_PEND (1<<21)
+#define PCICFG_DEVICE_STATUS 0xb6
+#define PCICFG_DEVICE_STATUS_CORR_ERR_DET (1<<0)
+#define PCICFG_DEVICE_STATUS_NON_FATAL_ERR_DET (1<<1)
+#define PCICFG_DEVICE_STATUS_FATAL_ERR_DET (1<<2)
+#define PCICFG_DEVICE_STATUS_UNSUP_REQ_DET (1<<3)
+#define PCICFG_DEVICE_STATUS_AUX_PWR_DET (1<<4)
+#define PCICFG_DEVICE_STATUS_NO_PEND (1<<5)
+#define PCICFG_LINK_CONTROL 0xbc
+
+
+/* config_2 offset */
+#define GRC_CONFIG_2_SIZE_REG 0x408
+#define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_256K (3L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_512K (4L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_1M (5L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_2M (6L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_4M (7L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_8M (8L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_16M (9L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_32M (10L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_64M (11L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_128M (12L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0)
+#define PCI_CONFIG_2_BAR1_64ENA (1L<<4)
+#define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5)
+#define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6)
+#define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7)
+#define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_2K (1L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_4K (2L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_8K (3L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_16K (4L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_32K (5L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_64K (6L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_128K (7L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_256K (8L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_512K (9L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_1M (10L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_2M (11L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_4M (12L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_8M (13L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_16M (14L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_32M (15L<<8)
+#define PCI_CONFIG_2_BAR_PREFETCH (1L<<16)
+#define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17)
+
+/* config_3 offset */
+#define GRC_CONFIG_3_SIZE_REG 0x40c
+#define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0)
+#define PCI_CONFIG_3_FORCE_PME (1L<<24)
+#define PCI_CONFIG_3_PME_STATUS (1L<<25)
+#define PCI_CONFIG_3_PME_ENABLE (1L<<26)
+#define PCI_CONFIG_3_PM_STATE (0x3L<<27)
+#define PCI_CONFIG_3_VAUX_PRESET (1L<<30)
+#define PCI_CONFIG_3_PCI_POWER (1L<<31)
+
+#define GRC_REG_DEVICE_CONTROL 0x4d8
+#define PCIE_SRIOV_DISABLE_IN_PROGRESS \
+ (1 << 29) /*When VF Enable is cleared(after it was previously set),
+ this register will read a value of 1, indicating that all the
+ VFs that belong to this PF should be flushed.
+ Software should clear this bit within 1 second of VF Enable
+ being set by writing a 1 to it, so that VFs are visible to the system again.
+ WC */
+#define PCIE_FLR_IN_PROGRESS \
+ (1 << 27) /*When FLR is initiated, this register will read a \
+ value of 1 indicating that the
+ Function is in FLR state. Func can be brought out of FLR state either by
+ writing 1 to this register (at least 50 ms after FLR was initiated),
+ or it can also be cleared automatically after 55 ms if auto_clear bit
+ in private reg space is set. This bit also exists in VF register space
+ WC */
+
+#define GRC_BAR2_CONFIG 0x4e0
+#define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0)
+#define PCI_CONFIG_2_BAR2_64ENA (1L<<4)
+
+#define GRC_BAR3_CONFIG 0x4f4
+#define PCI_CONFIG_2_BAR3_SIZE (0xfL<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_DISABLED (0L<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_64K (1L<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_128K (2L<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_256K (3L<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_512K (4L<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_1M (5L<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_2M (6L<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_4M (7L<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_8M (8L<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_16M (9L<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_32M (10L<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_64M (11L<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_128M (12L<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_256M (13L<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_512M (14L<<0)
+#define PCI_CONFIG_2_BAR3_SIZE_1G (15L<<0)
+#define PCI_CONFIG_2_BAR3_64ENA (1L<<4)
+
+#define PCI_PM_DATA_A 0x410
+#define PCI_PM_DATA_B 0x414
+#define PCI_ID_VAL1 0x434
+#define PCI_ID_VAL2 0x438
+#define PCI_ID_VAL3 0x43c
+#define PCI_ID_VAL3_REVISION_ID_ERROR (0xffL<<24)
+
+
+#define GRC_CONFIG_REG_VF_BAR_REG_1 0x608
+#define GRC_CONFIG_REG_VF_BAR_REG_BAR0_SIZE 0xf
+
+#define GRC_CONFIG_REG_VF_MSIX_CONTROL 0x61C
+#define GRC_CR_VF_MSIX_CTRL_VF_MSIX_TBL_SIZE_MASK \
+ 0x3F /*This field resides in VF only and does not exist in PF.
+ This register controls the read value of the MSIX_CONTROL[10:0] register
+ in the VF configuration space. A value of "00000000011" indicates
+ a table size of 4. The value is controlled by IOV_MSIX_TBL_SIZ
+ define in version.v */
+
+#define GRC_CONFIG_REG_PF_INIT_VF 0x624
+#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK \
+ 0xf /*First VF_NUM for PF is encoded in this register.
+ The number of VFs assigned to a PF is assumed to be a multiple of 8.
+ Software should program these bits based on Total Number of VFs \
+ programmed for each PF.
+ Since registers from 0x000-0x7ff are spilt across functions, each PF will have
+ the same location for the same 4 bits*/
+
+#define PXPCS_TL_CONTROL_5 0x814
+#define PXPCS_TL_CONTROL_5_UNKNOWNTYPE_ERR_ATTN (1 << 29) /*WC*/
+#define PXPCS_TL_CONTROL_5_BOUNDARY4K_ERR_ATTN (1 << 28) /*WC*/
+#define PXPCS_TL_CONTROL_5_MRRS_ERR_ATTN (1 << 27) /*WC*/
+#define PXPCS_TL_CONTROL_5_MPS_ERR_ATTN (1 << 26) /*WC*/
+#define PXPCS_TL_CONTROL_5_TTX_BRIDGE_FORWARD_ERR (1 << 25) /*WC*/
+#define PXPCS_TL_CONTROL_5_TTX_TXINTF_OVERFLOW (1 << 24) /*WC*/
+#define PXPCS_TL_CONTROL_5_PHY_ERR_ATTN (1 << 23) /*RO*/
+#define PXPCS_TL_CONTROL_5_DL_ERR_ATTN (1 << 22) /*RO*/
+#define PXPCS_TL_CONTROL_5_TTX_ERR_NP_TAG_IN_USE (1 << 21) /*WC*/
+#define PXPCS_TL_CONTROL_5_TRX_ERR_UNEXP_RTAG (1 << 20) /*WC*/
+#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT1 (1 << 19) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 (1 << 18) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_ECRC1 (1 << 17) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP1 (1 << 16) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW1 (1 << 15) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL1 (1 << 14) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT1 (1 << 13) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT1 (1 << 12) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL1 (1 << 11) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP1 (1 << 10) /*WC*/
+#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT (1 << 9) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT (1 << 8) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_ECRC (1 << 7) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP (1 << 6) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW (1 << 5) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL (1 << 4) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT (1 << 3) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT (1 << 2) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL (1 << 1) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP (1 << 0) /*WC*/
+
+
+#define PXPCS_TL_FUNC345_STAT 0x854
+#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT4 (1 << 29) /* WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 \
+ (1 << 28) /* Unsupported Request Error Status in function4, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_ECRC4 \
+ (1 << 27) /* ECRC Error TLP Status Status in function 4, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP4 \
+ (1 << 26) /* Malformed TLP Status Status in function 4, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW4 \
+ (1 << 25) /* Receiver Overflow Status Status in function 4, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL4 \
+ (1 << 24) /* Unexpected Completion Status Status in function 4, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT4 \
+ (1 << 23) /* Receive UR Statusin function 4. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT4 \
+ (1 << 22) /* Completer Timeout Status Status in function 4, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL4 \
+ (1 << 21) /* Flow Control Protocol Error Status Status in \
+ function 4, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP4 \
+ (1 << 20) /* Poisoned Error Status Status in function 4, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT3 (1 << 19) /* WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 \
+ (1 << 18) /* Unsupported Request Error Status in function3, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_ECRC3 \
+ (1 << 17) /* ECRC Error TLP Status Status in function 3, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP3 \
+ (1 << 16) /* Malformed TLP Status Status in function 3, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW3 \
+ (1 << 15) /* Receiver Overflow Status Status in function 3, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL3 \
+ (1 << 14) /* Unexpected Completion Status Status in function 3, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT3 \
+ (1 << 13) /* Receive UR Statusin function 3. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT3 \
+ (1 << 12) /* Completer Timeout Status Status in function 3, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL3 \
+ (1 << 11) /* Flow Control Protocol Error Status Status in \
+ function 3, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP3 \
+ (1 << 10) /* Poisoned Error Status Status in function 3, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT2 (1 << 9) /* WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2 \
+ (1 << 8) /* Unsupported Request Error Status for Function 2, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_ECRC2 \
+ (1 << 7) /* ECRC Error TLP Status Status for Function 2, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP2 \
+ (1 << 6) /* Malformed TLP Status Status for Function 2, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW2 \
+ (1 << 5) /* Receiver Overflow Status Status for Function 2, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL2 \
+ (1 << 4) /* Unexpected Completion Status Status for Function 2, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT2 \
+ (1 << 3) /* Receive UR Statusfor Function 2. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT2 \
+ (1 << 2) /* Completer Timeout Status Status for Function 2, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL2 \
+ (1 << 1) /* Flow Control Protocol Error Status Status for \
+ Function 2, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP2 \
+ (1 << 0) /* Poisoned Error Status Status for Function 2, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+
+
+#define PXPCS_TL_FUNC678_STAT 0x85C
+#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT7 (1 << 29) /* WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 \
+ (1 << 28) /* Unsupported Request Error Status in function7, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_ECRC7 \
+ (1 << 27) /* ECRC Error TLP Status Status in function 7, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP7 \
+ (1 << 26) /* Malformed TLP Status Status in function 7, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW7 \
+ (1 << 25) /* Receiver Overflow Status Status in function 7, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL7 \
+ (1 << 24) /* Unexpected Completion Status Status in function 7, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT7 \
+ (1 << 23) /* Receive UR Statusin function 7. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT7 \
+ (1 << 22) /* Completer Timeout Status Status in function 7, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL7 \
+ (1 << 21) /* Flow Control Protocol Error Status Status in \
+ function 7, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP7 \
+ (1 << 20) /* Poisoned Error Status Status in function 7, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT6 (1 << 19) /* WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 \
+ (1 << 18) /* Unsupported Request Error Status in function6, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_ECRC6 \
+ (1 << 17) /* ECRC Error TLP Status Status in function 6, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP6 \
+ (1 << 16) /* Malformed TLP Status Status in function 6, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW6 \
+ (1 << 15) /* Receiver Overflow Status Status in function 6, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL6 \
+ (1 << 14) /* Unexpected Completion Status Status in function 6, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT6 \
+ (1 << 13) /* Receive UR Statusin function 6. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT6 \
+ (1 << 12) /* Completer Timeout Status Status in function 6, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL6 \
+ (1 << 11) /* Flow Control Protocol Error Status Status in \
+ function 6, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP6 \
+ (1 << 10) /* Poisoned Error Status Status in function 6, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT5 (1 << 9) /* WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5 \
+ (1 << 8) /* Unsupported Request Error Status for Function 5, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_ECRC5 \
+ (1 << 7) /* ECRC Error TLP Status Status for Function 5, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP5 \
+ (1 << 6) /* Malformed TLP Status Status for Function 5, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW5 \
+ (1 << 5) /* Receiver Overflow Status Status for Function 5, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL5 \
+ (1 << 4) /* Unexpected Completion Status Status for Function 5, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT5 \
+ (1 << 3) /* Receive UR Statusfor Function 5. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT5 \
+ (1 << 2) /* Completer Timeout Status Status for Function 5, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL5 \
+ (1 << 1) /* Flow Control Protocol Error Status Status for \
+ Function 5, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP5 \
+ (1 << 0) /* Poisoned Error Status Status for Function 5, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+
+
+#define BAR_USTRORM_INTMEM 0x400000
+#define BAR_CSTRORM_INTMEM 0x410000
+#define BAR_XSTRORM_INTMEM 0x420000
+#define BAR_TSTRORM_INTMEM 0x430000
+
+/* for accessing the IGU in case of status block ACK */
+#define BAR_IGU_INTMEM 0x440000
+
+#define BAR_DOORBELL_OFFSET 0x800000
+
+#define BAR_ME_REGISTER 0x450000
+#define ME_REG_PF_NUM_SHIFT 0
+#define ME_REG_PF_NUM \
+ (7L<<ME_REG_PF_NUM_SHIFT) /* Relative PF Num */
+#define ME_REG_VF_VALID (1<<8)
+#define ME_REG_VF_NUM_SHIFT 9
+#define ME_REG_VF_NUM_MASK (0x3f<<ME_REG_VF_NUM_SHIFT)
+#define VF_ID(x) ((x & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT)
+#define ME_REG_VF_ERR (0x1<<3)
+#define ME_REG_ABS_PF_NUM_SHIFT 16
+#define ME_REG_ABS_PF_NUM \
+ (7L<<ME_REG_ABS_PF_NUM_SHIFT) /* Absolute PF Num */
+
+
+#define PXP_VF_ADRR_NUM_QUEUES 136
+#define PXP_ADDR_QUEUE_SIZE 32
+#define PXP_ADDR_REG_SIZE 512
+
+
+#define PXP_VF_ADDR_IGU_START 0
+#define PXP_VF_ADDR_IGU_SIZE (0x3000)
+#define PXP_VF_ADDR_IGU_END \
+ ((PXP_VF_ADDR_IGU_START) + (PXP_VF_ADDR_IGU_SIZE) - 1)
+
+#define PXP_VF_ADDR_USDM_QUEUES_START 0x3000
+#define PXP_VF_ADDR_USDM_QUEUES_SIZE \
+ (PXP_VF_ADRR_NUM_QUEUES * PXP_ADDR_QUEUE_SIZE)
+#define PXP_VF_ADDR_USDM_QUEUES_END \
+ ((PXP_VF_ADDR_USDM_QUEUES_START) + (PXP_VF_ADDR_USDM_QUEUES_SIZE) - 1)
+
+#define PXP_VF_ADDR_CSDM_QUEUES_START 0x4100
+#define PXP_VF_ADDR_CSDM_QUEUES_SIZE \
+ (PXP_VF_ADRR_NUM_QUEUES * PXP_ADDR_QUEUE_SIZE)
+#define PXP_VF_ADDR_CSDM_QUEUES_END \
+ ((PXP_VF_ADDR_CSDM_QUEUES_START) + (PXP_VF_ADDR_CSDM_QUEUES_SIZE) - 1)
+
+#define PXP_VF_ADDR_XSDM_QUEUES_START 0x5200
+#define PXP_VF_ADDR_XSDM_QUEUES_SIZE \
+ (PXP_VF_ADRR_NUM_QUEUES * PXP_ADDR_QUEUE_SIZE)
+#define PXP_VF_ADDR_XSDM_QUEUES_END \
+ ((PXP_VF_ADDR_XSDM_QUEUES_START) + (PXP_VF_ADDR_XSDM_QUEUES_SIZE) - 1)
+
+#define PXP_VF_ADDR_TSDM_QUEUES_START 0x6300
+#define PXP_VF_ADDR_TSDM_QUEUES_SIZE \
+ (PXP_VF_ADRR_NUM_QUEUES * PXP_ADDR_QUEUE_SIZE)
+#define PXP_VF_ADDR_TSDM_QUEUES_END \
+ ((PXP_VF_ADDR_TSDM_QUEUES_START) + (PXP_VF_ADDR_TSDM_QUEUES_SIZE) - 1)
+
+#define PXP_VF_ADDR_USDM_GLOBAL_START 0x7400
+#define PXP_VF_ADDR_USDM_GLOBAL_SIZE (PXP_ADDR_REG_SIZE)
+#define PXP_VF_ADDR_USDM_GLOBAL_END \
+ ((PXP_VF_ADDR_USDM_GLOBAL_START) + (PXP_VF_ADDR_USDM_GLOBAL_SIZE) - 1)
+
+#define PXP_VF_ADDR_CSDM_GLOBAL_START 0x7600
+#define PXP_VF_ADDR_CSDM_GLOBAL_SIZE (PXP_ADDR_REG_SIZE)
+#define PXP_VF_ADDR_CSDM_GLOBAL_END \
+ ((PXP_VF_ADDR_CSDM_GLOBAL_START) + (PXP_VF_ADDR_CSDM_GLOBAL_SIZE) - 1)
+
+#define PXP_VF_ADDR_XSDM_GLOBAL_START 0x7800
+#define PXP_VF_ADDR_XSDM_GLOBAL_SIZE (PXP_ADDR_REG_SIZE)
+#define PXP_VF_ADDR_XSDM_GLOBAL_END \
+ ((PXP_VF_ADDR_XSDM_GLOBAL_START) + (PXP_VF_ADDR_XSDM_GLOBAL_SIZE) - 1)
+
+#define PXP_VF_ADDR_TSDM_GLOBAL_START 0x7a00
+#define PXP_VF_ADDR_TSDM_GLOBAL_SIZE (PXP_ADDR_REG_SIZE)
+#define PXP_VF_ADDR_TSDM_GLOBAL_END \
+ ((PXP_VF_ADDR_TSDM_GLOBAL_START) + (PXP_VF_ADDR_TSDM_GLOBAL_SIZE) - 1)
+
+#define PXP_VF_ADDR_DB_START 0x7c00
+#define PXP_VF_ADDR_DB_SIZE (0x200)
+#define PXP_VF_ADDR_DB_END \
+ ((PXP_VF_ADDR_DB_START) + (PXP_VF_ADDR_DB_SIZE) - 1)
+
+#define PXP_VF_ADDR_GRC_START 0x7e00
+#define PXP_VF_ADDR_GRC_SIZE (0x200)
+#define PXP_VF_ADDR_GRC_END \
+ ((PXP_VF_ADDR_GRC_START) + (PXP_VF_ADDR_GRC_SIZE) - 1)
+
+#define PXP_VF_ADDR_DORQ_START (0x0)
+#define PXP_VF_ADDR_DORQ_SIZE (0xffffffff)
+#define PXP_VF_ADDR_DORQ_END (0xffffffff)
+
+#define PXP_BAR_GRC 0
+#define PXP_BAR_TSDM 0
+#define PXP_BAR_USDM 0
+#define PXP_BAR_XSDM 0
+#define PXP_BAR_CSDM 0
+#define PXP_BAR_IGU 0
+#define PXP_BAR_DQ 1
+
+#define PXP_VF_BAR_IGU 0
+#define PXP_VF_BAR_USDM_QUEUES 0
+#define PXP_VF_BAR_TSDM_QUEUES 0
+#define PXP_VF_BAR_XSDM_QUEUES 0
+#define PXP_VF_BAR_CSDM_QUEUES 0
+#define PXP_VF_BAR_USDM_GLOBAL 0
+#define PXP_VF_BAR_TSDM_GLOBAL 0
+#define PXP_VF_BAR_XSDM_GLOBAL 0
+#define PXP_VF_BAR_CSDM_GLOBAL 0
+#define PXP_VF_BAR_DB 0
+#define PXP_VF_BAR_GRC 0
+#define PXP_VF_BAR_DORQ 1
+
+/* PCI CAPABILITIES*/
+
+#define PCI_CAP_PCIE 0x10 /*PCIe capability ID*/
+
+#define PCIE_DEV_CAPS 0x04
+
+#define PCIE_DEV_CTRL 0x08
+#define PCIE_DEV_CTRL_FLR 0x8000;
+
+#define PCIE_DEV_STATUS 0x0A
+
+#define PCI_CAP_MSIX 0x11 /*MSI-X capability ID*/
+#define PCI_MSIX_CONTROL_SHIFT 16
+#define PCI_MSIX_TABLE_SIZE_MASK 0x07FF
+#define PCI_MSIX_TABLE_ENABLE_MASK 0x8000
+
+
+#define MDIO_REG_BANK_CL73_IEEEB0 0x0
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN 0x0200
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN 0x1000
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_MAIN_RST 0x8000
+
+#define MDIO_REG_BANK_CL73_IEEEB1 0x10
+#define MDIO_CL73_IEEEB1_AN_ADV1 0x00
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE 0x0400
+#define MDIO_CL73_IEEEB1_AN_ADV1_ASYMMETRIC 0x0800
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH 0x0C00
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK 0x0C00
+#define MDIO_CL73_IEEEB1_AN_ADV2 0x01
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M 0x0000
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX 0x0020
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 0x0040
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR 0x0080
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1 0x03
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE 0x0400
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_ASYMMETRIC 0x0800
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_BOTH 0x0C00
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK 0x0C00
+#define MDIO_CL73_IEEEB1_AN_LP_ADV2 0x04
+
+#define MDIO_REG_BANK_RX0 0x80b0
+#define MDIO_RX0_RX_STATUS 0x10
+#define MDIO_RX0_RX_STATUS_SIGDET 0x8000
+#define MDIO_RX0_RX_STATUS_RX_SEQ_DONE 0x1000
+#define MDIO_RX0_RX_EQ_BOOST 0x1c
+#define MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_RX1 0x80c0
+#define MDIO_RX1_RX_EQ_BOOST 0x1c
+#define MDIO_RX1_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX1_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_RX2 0x80d0
+#define MDIO_RX2_RX_EQ_BOOST 0x1c
+#define MDIO_RX2_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX2_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_RX3 0x80e0
+#define MDIO_RX3_RX_EQ_BOOST 0x1c
+#define MDIO_RX3_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX3_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_RX_ALL 0x80f0
+#define MDIO_RX_ALL_RX_EQ_BOOST 0x1c
+#define MDIO_RX_ALL_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX_ALL_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_TX0 0x8060
+#define MDIO_TX0_TX_DRIVER 0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+
+#define MDIO_REG_BANK_TX1 0x8070
+#define MDIO_TX1_TX_DRIVER 0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+
+#define MDIO_REG_BANK_TX2 0x8080
+#define MDIO_TX2_TX_DRIVER 0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+
+#define MDIO_REG_BANK_TX3 0x8090
+#define MDIO_TX3_TX_DRIVER 0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+
+#define MDIO_REG_BANK_XGXS_BLOCK0 0x8000
+#define MDIO_BLOCK0_XGXS_CONTROL 0x10
+
+#define MDIO_REG_BANK_XGXS_BLOCK1 0x8010
+#define MDIO_BLOCK1_LANE_CTRL0 0x15
+#define MDIO_BLOCK1_LANE_CTRL1 0x16
+#define MDIO_BLOCK1_LANE_CTRL2 0x17
+#define MDIO_BLOCK1_LANE_PRBS 0x19
+
+#define MDIO_REG_BANK_XGXS_BLOCK2 0x8100
+#define MDIO_XGXS_BLOCK2_RX_LN_SWAP 0x10
+#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE 0x8000
+#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE 0x4000
+#define MDIO_XGXS_BLOCK2_TX_LN_SWAP 0x11
+#define MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE 0x8000
+#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G 0x14
+#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS 0x0001
+#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS 0x0010
+#define MDIO_XGXS_BLOCK2_TEST_MODE_LANE 0x15
+
+#define MDIO_REG_BANK_GP_STATUS 0x8120
+#define MDIO_GP_STATUS_TOP_AN_STATUS1 0x1B
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE 0x0001
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE 0x0002
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS 0x0004
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS 0x0008
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE 0x0010
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_LP_NP_BAM_ABLE 0x0020
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE 0x0040
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE 0x0080
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK 0x3f00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M 0x0000
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M 0x0100
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G 0x0200
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G 0x0300
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G 0x0400
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G 0x0500
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG 0x0600
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4 0x0700
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12G_HIG 0x0800
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12_5G 0x0900
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_13G 0x0A00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_15G 0x0B00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G 0x0C00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX 0x0D00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 0x0E00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR 0x0F00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI 0x1B00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS 0x1E00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI 0x1F00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_KR2 0x3900
+
+
+#define MDIO_REG_BANK_10G_PARALLEL_DETECT 0x8130
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS 0x10
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK 0x8000
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL 0x11
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN 0x1
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK 0x13
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT (0xb71<<1)
+
+#define MDIO_REG_BANK_SERDES_DIGITAL 0x8300
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1 0x10
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE 0x0001
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_TBI_IF 0x0002
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN 0x0004
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT 0x0008
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET 0x0010
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE 0x0020
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2 0x11
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN 0x0001
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_AN_FST_TMR 0x0040
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1 0x14
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SGMII 0x0001
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_LINK 0x0002
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_DUPLEX 0x0004
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_MASK 0x0018
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_SHIFT 3
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_2_5G 0x0018
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_1G 0x0010
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_100M 0x0008
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_10M 0x0000
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2 0x15
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED 0x0002
+#define MDIO_SERDES_DIGITAL_MISC1 0x18
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_MASK 0xE000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_25M 0x0000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_100M 0x2000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_125M 0x4000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M 0x6000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_187_5M 0x8000
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL 0x0010
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK 0x000f
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_2_5G 0x0000
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_5G 0x0001
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_6G 0x0002
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_HIG 0x0003
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4 0x0004
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12G 0x0005
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12_5G 0x0006
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G 0x0007
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_15G 0x0008
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_16G 0x0009
+
+#define MDIO_REG_BANK_OVER_1G 0x8320
+#define MDIO_OVER_1G_DIGCTL_3_4 0x14
+#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_MASK 0xffe0
+#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_SHIFT 5
+#define MDIO_OVER_1G_UP1 0x19
+#define MDIO_OVER_1G_UP1_2_5G 0x0001
+#define MDIO_OVER_1G_UP1_5G 0x0002
+#define MDIO_OVER_1G_UP1_6G 0x0004
+#define MDIO_OVER_1G_UP1_10G 0x0010
+#define MDIO_OVER_1G_UP1_10GH 0x0008
+#define MDIO_OVER_1G_UP1_12G 0x0020
+#define MDIO_OVER_1G_UP1_12_5G 0x0040
+#define MDIO_OVER_1G_UP1_13G 0x0080
+#define MDIO_OVER_1G_UP1_15G 0x0100
+#define MDIO_OVER_1G_UP1_16G 0x0200
+#define MDIO_OVER_1G_UP2 0x1A
+#define MDIO_OVER_1G_UP2_IPREDRIVER_MASK 0x0007
+#define MDIO_OVER_1G_UP2_IDRIVER_MASK 0x0038
+#define MDIO_OVER_1G_UP2_PREEMPHASIS_MASK 0x03C0
+#define MDIO_OVER_1G_UP3 0x1B
+#define MDIO_OVER_1G_UP3_HIGIG2 0x0001
+#define MDIO_OVER_1G_LP_UP1 0x1C
+#define MDIO_OVER_1G_LP_UP2 0x1D
+#define MDIO_OVER_1G_LP_UP2_MR_ADV_OVER_1G_MASK 0x03ff
+#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK 0x0780
+#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT 7
+#define MDIO_OVER_1G_LP_UP3 0x1E
+
+#define MDIO_REG_BANK_REMOTE_PHY 0x8330
+#define MDIO_REMOTE_PHY_MISC_RX_STATUS 0x10
+#define MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG 0x0010
+#define MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG 0x0600
+
+#define MDIO_REG_BANK_BAM_NEXT_PAGE 0x8350
+#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL 0x10
+#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE 0x0001
+#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN 0x0002
+
+#define MDIO_REG_BANK_CL73_USERB0 0x8370
+#define MDIO_CL73_USERB0_CL73_UCTRL 0x10
+#define MDIO_CL73_USERB0_CL73_UCTRL_USTAT1_MUXSEL 0x0002
+#define MDIO_CL73_USERB0_CL73_USTAT1 0x11
+#define MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK 0x0100
+#define MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37 0x0400
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1 0x12
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN 0x8000
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN 0x4000
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN 0x2000
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL3 0x14
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR 0x0001
+
+#define MDIO_REG_BANK_AER_BLOCK 0xFFD0
+#define MDIO_AER_BLOCK_AER_REG 0x1E
+
+#define MDIO_REG_BANK_COMBO_IEEE0 0xFFE0
+#define MDIO_COMBO_IEEE0_MII_CONTROL 0x10
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK 0x2040
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_10 0x0000
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100 0x2000
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000 0x0040
+#define MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX 0x0100
+#define MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN 0x0200
+#define MDIO_COMBO_IEEO_MII_CONTROL_AN_EN 0x1000
+#define MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK 0x4000
+#define MDIO_COMBO_IEEO_MII_CONTROL_RESET 0x8000
+#define MDIO_COMBO_IEEE0_MII_STATUS 0x11
+#define MDIO_COMBO_IEEE0_MII_STATUS_LINK_PASS 0x0004
+#define MDIO_COMBO_IEEE0_MII_STATUS_AUTONEG_COMPLETE 0x0020
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV 0x14
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX 0x0020
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_HALF_DUPLEX 0x0040
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK 0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE 0x0000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC 0x0080
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC 0x0100
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH 0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_NEXT_PAGE 0x8000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1 0x15
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_NEXT_PAGE 0x8000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_ACK 0x4000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_MASK 0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_NONE 0x0000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_BOTH 0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_HALF_DUP_CAP 0x0040
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_FULL_DUP_CAP 0x0020
+/*WhenthelinkpartnerisinSGMIImode(bit0=1), then
+bit15=link, bit12=duplex, bits11:10=speed, bit14=acknowledge.
+Theotherbitsarereservedandshouldbezero*/
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_SGMII_MODE 0x0001
+
+
+#define MDIO_PMA_DEVAD 0x1
+/*ieee*/
+#define MDIO_PMA_REG_CTRL 0x0
+#define MDIO_PMA_REG_STATUS 0x1
+#define MDIO_PMA_REG_10G_CTRL2 0x7
+#define MDIO_PMA_REG_TX_DISABLE 0x0009
+#define MDIO_PMA_REG_RX_SD 0xa
+/*bnx2x*/
+#define MDIO_PMA_REG_BNX2X_CTRL 0x0096
+#define MDIO_PMA_REG_FEC_CTRL 0x00ab
+#define MDIO_PMA_LASI_RXCTRL 0x9000
+#define MDIO_PMA_LASI_TXCTRL 0x9001
+#define MDIO_PMA_LASI_CTRL 0x9002
+#define MDIO_PMA_LASI_RXSTAT 0x9003
+#define MDIO_PMA_LASI_TXSTAT 0x9004
+#define MDIO_PMA_LASI_STAT 0x9005
+#define MDIO_PMA_REG_PHY_IDENTIFIER 0xc800
+#define MDIO_PMA_REG_DIGITAL_CTRL 0xc808
+#define MDIO_PMA_REG_DIGITAL_STATUS 0xc809
+#define MDIO_PMA_REG_TX_POWER_DOWN 0xca02
+#define MDIO_PMA_REG_CMU_PLL_BYPASS 0xca09
+#define MDIO_PMA_REG_MISC_CTRL 0xca0a
+#define MDIO_PMA_REG_GEN_CTRL 0xca10
+#define MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP 0x0188
+#define MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET 0x018a
+#define MDIO_PMA_REG_M8051_MSGIN_REG 0xca12
+#define MDIO_PMA_REG_M8051_MSGOUT_REG 0xca13
+#define MDIO_PMA_REG_ROM_VER1 0xca19
+#define MDIO_PMA_REG_ROM_VER2 0xca1a
+#define MDIO_PMA_REG_EDC_FFE_MAIN 0xca1b
+#define MDIO_PMA_REG_PLL_BANDWIDTH 0xca1d
+#define MDIO_PMA_REG_PLL_CTRL 0xca1e
+#define MDIO_PMA_REG_MISC_CTRL0 0xca23
+#define MDIO_PMA_REG_LRM_MODE 0xca3f
+#define MDIO_PMA_REG_CDR_BANDWIDTH 0xca46
+#define MDIO_PMA_REG_MISC_CTRL1 0xca85
+
+#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL 0x8000
+#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK 0x000c
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE 0x0000
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE 0x0004
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IN_PROGRESS 0x0008
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_FAILED 0x000c
+#define MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT 0x8002
+#define MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR 0x8003
+#define MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF 0xc820
+#define MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK 0xff
+#define MDIO_PMA_REG_8726_TX_CTRL1 0xca01
+#define MDIO_PMA_REG_8726_TX_CTRL2 0xca05
+
+#define MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR 0x8005
+#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF 0x8007
+#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK 0xff
+#define MDIO_PMA_REG_8727_MISC_CTRL 0x8309
+#define MDIO_PMA_REG_8727_TX_CTRL1 0xca02
+#define MDIO_PMA_REG_8727_TX_CTRL2 0xca05
+#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808
+#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e
+#define MDIO_PMA_REG_8727_PCS_GP 0xc842
+#define MDIO_PMA_REG_8727_OPT_CFG_REG 0xc8e4
+
+#define MDIO_AN_REG_8727_MISC_CTRL 0x8309
+#define MDIO_PMA_REG_8073_CHIP_REV 0xc801
+#define MDIO_PMA_REG_8073_SPEED_LINK_STATUS 0xc820
+#define MDIO_PMA_REG_8073_XAUI_WA 0xc841
+#define MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL 0xcd08
+
+#define MDIO_PMA_REG_7101_RESET 0xc000
+#define MDIO_PMA_REG_7107_LED_CNTL 0xc007
+#define MDIO_PMA_REG_7107_LINK_LED_CNTL 0xc009
+#define MDIO_PMA_REG_7101_VER1 0xc026
+#define MDIO_PMA_REG_7101_VER2 0xc027
+
+#define MDIO_PMA_REG_8481_PMD_SIGNAL 0xa811
+#define MDIO_PMA_REG_8481_LED1_MASK 0xa82c
+#define MDIO_PMA_REG_8481_LED2_MASK 0xa82f
+#define MDIO_PMA_REG_8481_LED3_MASK 0xa832
+#define MDIO_PMA_REG_8481_LED3_BLINK 0xa834
+#define MDIO_PMA_REG_8481_LED5_MASK 0xa838
+#define MDIO_PMA_REG_8481_SIGNAL_MASK 0xa835
+#define MDIO_PMA_REG_8481_LINK_SIGNAL 0xa83b
+#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK 0x800
+#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT 11
+
+
+#define MDIO_WIS_DEVAD 0x2
+/*bnx2x*/
+#define MDIO_WIS_REG_LASI_CNTL 0x9002
+#define MDIO_WIS_REG_LASI_STATUS 0x9005
+
+#define MDIO_PCS_DEVAD 0x3
+#define MDIO_PCS_REG_STATUS 0x0020
+#define MDIO_PCS_REG_LASI_STATUS 0x9005
+#define MDIO_PCS_REG_7101_DSP_ACCESS 0xD000
+#define MDIO_PCS_REG_7101_SPI_MUX 0xD008
+#define MDIO_PCS_REG_7101_SPI_CTRL_ADDR 0xE12A
+#define MDIO_PCS_REG_7101_SPI_RESET_BIT (5)
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR 0xE02A
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_WRITE_ENABLE_CMD (6)
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_BULK_ERASE_CMD (0xC7)
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_PAGE_PROGRAM_CMD (2)
+#define MDIO_PCS_REG_7101_SPI_BYTES_TO_TRANSFER_ADDR 0xE028
+
+
+#define MDIO_XS_DEVAD 0x4
+#define MDIO_XS_REG_STATUS 0x0001
+#define MDIO_XS_PLL_SEQUENCER 0x8000
+#define MDIO_XS_SFX7101_XGXS_TEST1 0xc00a
+
+#define MDIO_XS_8706_REG_BANK_RX0 0x80bc
+#define MDIO_XS_8706_REG_BANK_RX1 0x80cc
+#define MDIO_XS_8706_REG_BANK_RX2 0x80dc
+#define MDIO_XS_8706_REG_BANK_RX3 0x80ec
+#define MDIO_XS_8706_REG_BANK_RXA 0x80fc
+
+#define MDIO_XS_REG_8073_RX_CTRL_PCIE 0x80FA
+
+#define MDIO_AN_DEVAD 0x7
+/*ieee*/
+#define MDIO_AN_REG_CTRL 0x0000
+#define MDIO_AN_REG_STATUS 0x0001
+#define MDIO_AN_REG_STATUS_AN_COMPLETE 0x0020
+#define MDIO_AN_REG_ADV_PAUSE 0x0010
+#define MDIO_AN_REG_ADV_PAUSE_PAUSE 0x0400
+#define MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC 0x0800
+#define MDIO_AN_REG_ADV_PAUSE_BOTH 0x0C00
+#define MDIO_AN_REG_ADV_PAUSE_MASK 0x0C00
+#define MDIO_AN_REG_ADV 0x0011
+#define MDIO_AN_REG_ADV2 0x0012
+#define MDIO_AN_REG_LP_AUTO_NEG 0x0013
+#define MDIO_AN_REG_LP_AUTO_NEG2 0x0014
+#define MDIO_AN_REG_MASTER_STATUS 0x0021
+#define MDIO_AN_REG_EEE_ADV 0x003c
+#define MDIO_AN_REG_LP_EEE_ADV 0x003d
+/*bnx2x*/
+#define MDIO_AN_REG_LINK_STATUS 0x8304
+#define MDIO_AN_REG_CL37_CL73 0x8370
+#define MDIO_AN_REG_CL37_AN 0xffe0
+#define MDIO_AN_REG_CL37_FC_LD 0xffe4
+#define MDIO_AN_REG_CL37_FC_LP 0xffe5
+#define MDIO_AN_REG_1000T_STATUS 0xffea
+
+#define MDIO_AN_REG_8073_2_5G 0x8329
+#define MDIO_AN_REG_8073_BAM 0x8350
+
+#define MDIO_AN_REG_8481_10GBASE_T_AN_CTRL 0x0020
+#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0
+#define MDIO_AN_REG_8481_MII_CTRL_FORCE_1G 0x40
+#define MDIO_AN_REG_8481_LEGACY_MII_STATUS 0xffe1
+#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4
+#define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION 0xffe6
+#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9
+#define MDIO_AN_REG_8481_1G_100T_EXT_CTRL 0xfff0
+#define MIDO_AN_REG_8481_EXT_CTRL_FORCE_LEDS_OFF 0x0008
+#define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW 0xfff5
+#define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS 0xfff7
+#define MDIO_AN_REG_8481_AUX_CTRL 0xfff8
+#define MDIO_AN_REG_8481_LEGACY_SHADOW 0xfffc
+
+/* BNX2X84823 only */
+#define MDIO_CTL_DEVAD 0x1e
+#define MDIO_CTL_REG_84823_MEDIA 0x401a
+#define MDIO_CTL_REG_84823_MEDIA_MAC_MASK 0x0018
+ /* These pins configure the BNX2X84823 interface to MAC after reset. */
+#define MDIO_CTL_REG_84823_CTRL_MAC_XFI 0x0008
+#define MDIO_CTL_REG_84823_MEDIA_MAC_XAUI_M 0x0010
+ /* These pins configure the BNX2X84823 interface to Line after reset. */
+#define MDIO_CTL_REG_84823_MEDIA_LINE_MASK 0x0060
+#define MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L 0x0020
+#define MDIO_CTL_REG_84823_MEDIA_LINE_XFI 0x0040
+ /* When this pin is active high during reset, 10GBASE-T core is power
+ * down, When it is active low the 10GBASE-T is power up
+ */
+#define MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN 0x0080
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK 0x0100
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100
+#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000
+#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005
+#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080
+#define MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH 0xa82b
+#define MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ 0x2f
+#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
+#define MDIO_PMA_REG_84833_CTL_LED_CTL_1 0xa8ec
+#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
+
+/* BNX2X84833 only */
+#define MDIO_84833_TOP_CFG_FW_REV 0x400f
+#define MDIO_84833_TOP_CFG_FW_EEE 0x10b1
+#define MDIO_84833_TOP_CFG_FW_NO_EEE 0x1f81
+#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a
+#define MDIO_84833_SUPER_ISOLATE 0x8000
+/* These are mailbox register set used by 84833. */
+#define MDIO_84833_TOP_CFG_SCRATCH_REG0 0x4005
+#define MDIO_84833_TOP_CFG_SCRATCH_REG1 0x4006
+#define MDIO_84833_TOP_CFG_SCRATCH_REG2 0x4007
+#define MDIO_84833_TOP_CFG_SCRATCH_REG3 0x4008
+#define MDIO_84833_TOP_CFG_SCRATCH_REG4 0x4009
+#define MDIO_84833_TOP_CFG_SCRATCH_REG26 0x4037
+#define MDIO_84833_TOP_CFG_SCRATCH_REG27 0x4038
+#define MDIO_84833_TOP_CFG_SCRATCH_REG28 0x4039
+#define MDIO_84833_TOP_CFG_SCRATCH_REG29 0x403a
+#define MDIO_84833_TOP_CFG_SCRATCH_REG30 0x403b
+#define MDIO_84833_TOP_CFG_SCRATCH_REG31 0x403c
+#define MDIO_84833_CMD_HDLR_COMMAND MDIO_84833_TOP_CFG_SCRATCH_REG0
+#define MDIO_84833_CMD_HDLR_STATUS MDIO_84833_TOP_CFG_SCRATCH_REG26
+#define MDIO_84833_CMD_HDLR_DATA1 MDIO_84833_TOP_CFG_SCRATCH_REG27
+#define MDIO_84833_CMD_HDLR_DATA2 MDIO_84833_TOP_CFG_SCRATCH_REG28
+#define MDIO_84833_CMD_HDLR_DATA3 MDIO_84833_TOP_CFG_SCRATCH_REG29
+#define MDIO_84833_CMD_HDLR_DATA4 MDIO_84833_TOP_CFG_SCRATCH_REG30
+#define MDIO_84833_CMD_HDLR_DATA5 MDIO_84833_TOP_CFG_SCRATCH_REG31
+
+/* Mailbox command set used by 84833. */
+#define PHY84833_CMD_SET_PAIR_SWAP 0x8001
+#define PHY84833_CMD_GET_EEE_MODE 0x8008
+#define PHY84833_CMD_SET_EEE_MODE 0x8009
+#define PHY84833_CMD_GET_CURRENT_TEMP 0x8031
+/* Mailbox status set used by 84833. */
+#define PHY84833_STATUS_CMD_RECEIVED 0x0001
+#define PHY84833_STATUS_CMD_IN_PROGRESS 0x0002
+#define PHY84833_STATUS_CMD_COMPLETE_PASS 0x0004
+#define PHY84833_STATUS_CMD_COMPLETE_ERROR 0x0008
+#define PHY84833_STATUS_CMD_OPEN_FOR_CMDS 0x0010
+#define PHY84833_STATUS_CMD_SYSTEM_BOOT 0x0020
+#define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS 0x0040
+#define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080
+#define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5
+
+
+/* Warpcore clause 45 addressing */
+#define MDIO_WC_DEVAD 0x3
+#define MDIO_WC_REG_IEEE0BLK_MIICNTL 0x0
+#define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2 0x12
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000
+#define MDIO_WC_REG_PCS_STATUS2 0x0021
+#define MDIO_WC_REG_PMD_KR_CONTROL 0x0096
+#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000
+#define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e
+#define MDIO_WC_REG_XGXSBLK1_DESKEW 0x8010
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL0 0x8015
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL1 0x8016
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL2 0x8017
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL3 0x8018
+#define MDIO_WC_REG_XGXSBLK1_LANETEST0 0x801a
+#define MDIO_WC_REG_TX0_ANA_CTRL0 0x8061
+#define MDIO_WC_REG_TX1_ANA_CTRL0 0x8071
+#define MDIO_WC_REG_TX2_ANA_CTRL0 0x8081
+#define MDIO_WC_REG_TX3_ANA_CTRL0 0x8091
+#define MDIO_WC_REG_TX0_TX_DRIVER 0x8067
+#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET 0x04
+#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_MASK 0x00f0
+#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET 0x08
+#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET 0x0c
+#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_MASK 0x7000
+#define MDIO_WC_REG_TX1_TX_DRIVER 0x8077
+#define MDIO_WC_REG_TX2_TX_DRIVER 0x8087
+#define MDIO_WC_REG_TX3_TX_DRIVER 0x8097
+#define MDIO_WC_REG_RX0_ANARXCONTROL1G 0x80b9
+#define MDIO_WC_REG_RX2_ANARXCONTROL1G 0x80d9
+#define MDIO_WC_REG_RX0_PCI_CTRL 0x80ba
+#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca
+#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da
+#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea
+#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104
+#define MDIO_WC_REG_XGXS_STATUS3 0x8129
+#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130
+#define MDIO_WC_REG_PAR_DET_10G_CTRL 0x8131
+#define MDIO_WC_REG_XGXS_STATUS4 0x813c
+#define MDIO_WC_REG_XGXS_X2_CONTROL2 0x8141
+#define MDIO_WC_REG_XGXS_X2_CONTROL3 0x8142
+#define MDIO_WC_REG_XGXS_RX_LN_SWAP1 0x816B
+#define MDIO_WC_REG_XGXS_TX_LN_SWAP1 0x8169
+#define MDIO_WC_REG_GP2_STATUS_GP_2_0 0x81d0
+#define MDIO_WC_REG_GP2_STATUS_GP_2_1 0x81d1
+#define MDIO_WC_REG_GP2_STATUS_GP_2_2 0x81d2
+#define MDIO_WC_REG_GP2_STATUS_GP_2_3 0x81d3
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4 0x81d4
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL73_AN_CMPL 0x1000
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CMPL 0x0100
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_LP_AN_CAP 0x0010
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CAP 0x1
+#define MDIO_WC_REG_UC_INFO_B0_DEAD_TRAP 0x81EE
+#define MDIO_WC_REG_UC_INFO_B1_VERSION 0x81F0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE 0x81F2
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE0_OFFSET 0x0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT 0x0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_OPT_LR 0x1
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC 0x2
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_XLAUI 0x3
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_LONG_CH_6G 0x4
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE1_OFFSET 0x4
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE2_OFFSET 0x8
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE3_OFFSET 0xc
+#define MDIO_WC_REG_UC_INFO_B1_CRC 0x81FE
+#define MDIO_WC_REG_DSC1B0_UC_CTRL 0x820e
+#define MDIO_WC_REG_DSC1B0_UC_CTRL_RDY4CMD (1<<7)
+#define MDIO_WC_REG_DSC_SMC 0x8213
+#define MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0 0x821e
+#define MDIO_WC_REG_TX_FIR_TAP 0x82e2
+#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET 0x00
+#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_MASK 0x000f
+#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET 0x04
+#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_MASK 0x03f0
+#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET 0x0a
+#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_MASK 0x7c00
+#define MDIO_WC_REG_TX_FIR_TAP_ENABLE 0x8000
+#define MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP 0x82e2
+#define MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL 0x82e3
+#define MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL 0x82e6
+#define MDIO_WC_REG_CL72_USERB0_CL72_BR_DEF_CTRL 0x82e7
+#define MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL 0x82e8
+#define MDIO_WC_REG_CL72_USERB0_CL72_MISC4_CONTROL 0x82ec
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1 0x8300
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2 0x8301
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3 0x8302
+#define MDIO_WC_REG_SERDESDIGITAL_STATUS1000X1 0x8304
+#define MDIO_WC_REG_SERDESDIGITAL_MISC1 0x8308
+#define MDIO_WC_REG_SERDESDIGITAL_MISC2 0x8309
+#define MDIO_WC_REG_DIGITAL3_UP1 0x8329
+#define MDIO_WC_REG_DIGITAL3_LP_UP1 0x832c
+#define MDIO_WC_REG_DIGITAL4_MISC3 0x833c
+#define MDIO_WC_REG_DIGITAL4_MISC5 0x833e
+#define MDIO_WC_REG_DIGITAL5_MISC6 0x8345
+#define MDIO_WC_REG_DIGITAL5_MISC7 0x8349
+#define MDIO_WC_REG_DIGITAL5_LINK_STATUS 0x834d
+#define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED 0x834e
+#define MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL 0x8350
+#define MDIO_WC_REG_CL49_USERB0_CTRL 0x8368
+#define MDIO_WC_REG_CL73_USERB0_CTRL 0x8370
+#define MDIO_WC_REG_CL73_USERB0_USTAT 0x8371
+#define MDIO_WC_REG_CL73_BAM_CTRL1 0x8372
+#define MDIO_WC_REG_CL73_BAM_CTRL2 0x8373
+#define MDIO_WC_REG_CL73_BAM_CTRL3 0x8374
+#define MDIO_WC_REG_CL73_BAM_CODE_FIELD 0x837b
+#define MDIO_WC_REG_EEE_COMBO_CONTROL0 0x8390
+#define MDIO_WC_REG_TX66_CONTROL 0x83b0
+#define MDIO_WC_REG_RX66_CONTROL 0x83c0
+#define MDIO_WC_REG_RX66_SCW0 0x83c2
+#define MDIO_WC_REG_RX66_SCW1 0x83c3
+#define MDIO_WC_REG_RX66_SCW2 0x83c4
+#define MDIO_WC_REG_RX66_SCW3 0x83c5
+#define MDIO_WC_REG_RX66_SCW0_MASK 0x83c6
+#define MDIO_WC_REG_RX66_SCW1_MASK 0x83c7
+#define MDIO_WC_REG_RX66_SCW2_MASK 0x83c8
+#define MDIO_WC_REG_RX66_SCW3_MASK 0x83c9
+#define MDIO_WC_REG_FX100_CTRL1 0x8400
+#define MDIO_WC_REG_FX100_CTRL3 0x8402
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL5 0x8436
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL6 0x8437
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL7 0x8438
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL9 0x8439
+#define MDIO_WC_REG_CL82_USERB1_RX_CTRL10 0x843a
+#define MDIO_WC_REG_CL82_USERB1_RX_CTRL11 0x843b
+#define MDIO_WC_REG_ETA_CL73_OUI1 0x8453
+#define MDIO_WC_REG_ETA_CL73_OUI2 0x8454
+#define MDIO_WC_REG_ETA_CL73_OUI3 0x8455
+#define MDIO_WC_REG_ETA_CL73_LD_BAM_CODE 0x8456
+#define MDIO_WC_REG_ETA_CL73_LD_UD_CODE 0x8457
+#define MDIO_WC_REG_MICROBLK_CMD 0xffc2
+#define MDIO_WC_REG_MICROBLK_DL_STATUS 0xffc5
+#define MDIO_WC_REG_MICROBLK_CMD3 0xffcc
+
+#define MDIO_WC_REG_AERBLK_AER 0xffde
+#define MDIO_WC_REG_COMBO_IEEE0_MIICTRL 0xffe0
+#define MDIO_WC_REG_COMBO_IEEE0_MIIISTAT 0xffe1
+
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET 0x810A
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET_RX_BITSHIFT 0
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET_TX_BITSHIFT 4
+
+#define MDIO_WC0_XGXS_BLK6_XGXS_X2_CONTROL2 0x8141
+
+#define DIGITAL5_ACTUAL_SPEED_TX_MASK 0x003f
+
+/* 54618se */
+#define MDIO_REG_GPHY_MII_STATUS 0x1
+#define MDIO_REG_GPHY_PHYID_LSB 0x3
+#define MDIO_REG_GPHY_CL45_ADDR_REG 0xd
+#define MDIO_REG_GPHY_CL45_REG_WRITE 0x4000
+#define MDIO_REG_GPHY_CL45_REG_READ 0xc000
+#define MDIO_REG_GPHY_CL45_DATA_REG 0xe
+#define MDIO_REG_GPHY_EEE_RESOLVED 0x803e
+#define MDIO_REG_GPHY_EXP_ACCESS_GATE 0x15
+#define MDIO_REG_GPHY_EXP_ACCESS 0x17
+#define MDIO_REG_GPHY_EXP_ACCESS_TOP 0xd00
+#define MDIO_REG_GPHY_EXP_TOP_2K_BUF 0x40
+#define MDIO_REG_GPHY_AUX_STATUS 0x19
+#define MDIO_REG_INTR_STATUS 0x1a
+#define MDIO_REG_INTR_MASK 0x1b
+#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1)
+#define MDIO_REG_GPHY_SHADOW 0x1c
+#define MDIO_REG_GPHY_SHADOW_LED_SEL1 (0x0d << 10)
+#define MDIO_REG_GPHY_SHADOW_LED_SEL2 (0x0e << 10)
+#define MDIO_REG_GPHY_SHADOW_WR_ENA (0x1 << 15)
+#define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED (0x1e << 10)
+#define MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD (0x1 << 8)
+
+
+#define IGU_FUNC_BASE 0x0400
+
+#define IGU_ADDR_MSIX 0x0000
+#define IGU_ADDR_INT_ACK 0x0200
+#define IGU_ADDR_PROD_UPD 0x0201
+#define IGU_ADDR_ATTN_BITS_UPD 0x0202
+#define IGU_ADDR_ATTN_BITS_SET 0x0203
+#define IGU_ADDR_ATTN_BITS_CLR 0x0204
+#define IGU_ADDR_COALESCE_NOW 0x0205
+#define IGU_ADDR_SIMD_MASK 0x0206
+#define IGU_ADDR_SIMD_NOMASK 0x0207
+#define IGU_ADDR_MSI_CTL 0x0210
+#define IGU_ADDR_MSI_ADDR_LO 0x0211
+#define IGU_ADDR_MSI_ADDR_HI 0x0212
+#define IGU_ADDR_MSI_DATA 0x0213
+
+
+#define IGU_USE_REGISTER_ustorm_type_0_sb_cleanup 0
+#define IGU_USE_REGISTER_ustorm_type_1_sb_cleanup 1
+#define IGU_USE_REGISTER_cstorm_type_0_sb_cleanup 2
+#define IGU_USE_REGISTER_cstorm_type_1_sb_cleanup 3
+
+#define COMMAND_REG_INT_ACK 0x0
+#define COMMAND_REG_PROD_UPD 0x4
+#define COMMAND_REG_ATTN_BITS_UPD 0x8
+#define COMMAND_REG_ATTN_BITS_SET 0xc
+#define COMMAND_REG_ATTN_BITS_CLR 0x10
+#define COMMAND_REG_COALESCE_NOW 0x14
+#define COMMAND_REG_SIMD_MASK 0x18
+#define COMMAND_REG_SIMD_NOMASK 0x1c
+
+
+#define IGU_MEM_BASE 0x0000
+
+#define IGU_MEM_MSIX_BASE 0x0000
+#define IGU_MEM_MSIX_UPPER 0x007f
+#define IGU_MEM_MSIX_RESERVED_UPPER 0x01ff
+
+#define IGU_MEM_PBA_MSIX_BASE 0x0200
+#define IGU_MEM_PBA_MSIX_UPPER 0x0200
+
+#define IGU_CMD_BACKWARD_COMP_PROD_UPD 0x0201
+#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff
+
+#define IGU_CMD_INT_ACK_BASE 0x0400
+#define IGU_CMD_INT_ACK_UPPER \
+ (IGU_CMD_INT_ACK_BASE + MAX_SB_PER_PATH - 1)
+#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x04ff
+
+#define IGU_CMD_E2_PROD_UPD_BASE 0x0500
+#define IGU_CMD_E2_PROD_UPD_UPPER \
+ (IGU_CMD_E2_PROD_UPD_BASE + MAX_SB_PER_PATH - 1)
+#define IGU_CMD_E2_PROD_UPD_RESERVED_UPPER 0x059f
+
+#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05a0
+#define IGU_CMD_ATTN_BIT_SET_UPPER 0x05a1
+#define IGU_CMD_ATTN_BIT_CLR_UPPER 0x05a2
+
+#define IGU_REG_SISR_MDPC_WMASK_UPPER 0x05a3
+#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER 0x05a4
+#define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER 0x05a5
+#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05a6
+
+
+#define IGU_REG_RESERVED_UPPER 0x05ff
+
+#define IGU_SEG_IDX_ATTN 2
+#define IGU_SEG_IDX_DEFAULT 1
+/* Fields of IGU PF CONFIGRATION REGISTER */
+#define IGU_PF_CONF_FUNC_EN (0x1<<0) /* function enable */
+#define IGU_PF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
+#define IGU_PF_CONF_INT_LINE_EN (0x1<<2) /* INT enable */
+#define IGU_PF_CONF_ATTN_BIT_EN (0x1<<3) /* attention enable */
+#define IGU_PF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */
+#define IGU_PF_CONF_SIMD_MODE (0x1<<5) /* simd all ones mode */
+
+/* Fields of IGU VF CONFIGRATION REGISTER */
+#define IGU_VF_CONF_FUNC_EN (0x1<<0) /* function enable */
+#define IGU_VF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
+#define IGU_VF_CONF_PARENT_MASK (0x3<<2) /* Parent PF */
+#define IGU_VF_CONF_PARENT_SHIFT 2 /* Parent PF */
+#define IGU_VF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */
+
+
+#define IGU_BC_DSB_NUM_SEGS 5
+#define IGU_BC_NDSB_NUM_SEGS 2
+#define IGU_NORM_DSB_NUM_SEGS 2
+#define IGU_NORM_NDSB_NUM_SEGS 1
+#define IGU_BC_BASE_DSB_PROD 128
+#define IGU_NORM_BASE_DSB_PROD 136
+
+ /* FID (if VF - [6] = 0; [5:0] = VF number; if PF - [6] = 1; \
+ [5:2] = 0; [1:0] = PF number) */
+#define IGU_FID_ENCODE_IS_PF (0x1<<6)
+#define IGU_FID_ENCODE_IS_PF_SHIFT 6
+#define IGU_FID_VF_NUM_MASK (0x3f)
+#define IGU_FID_PF_NUM_MASK (0x7)
+
+#define IGU_REG_MAPPING_MEMORY_VALID (1<<0)
+#define IGU_REG_MAPPING_MEMORY_VECTOR_MASK (0x3F<<1)
+#define IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT 1
+#define IGU_REG_MAPPING_MEMORY_FID_MASK (0x7F<<7)
+#define IGU_REG_MAPPING_MEMORY_FID_SHIFT 7
+
+
+#define CDU_REGION_NUMBER_XCM_AG 2
+#define CDU_REGION_NUMBER_UCM_AG 4
+
+
+/* String-to-compress [31:8] = CID (all 24 bits)
+ * String-to-compress [7:4] = Region
+ * String-to-compress [3:0] = Type
+ */
+#define CDU_VALID_DATA(_cid, _region, _type) \
+ (((_cid) << 8) | (((_region)&0xf)<<4) | (((_type)&0xf)))
+#define CDU_CRC8(_cid, _region, _type) \
+ (ecore_calc_crc8(CDU_VALID_DATA(_cid, _region, _type), 0xff))
+#define CDU_RSRVD_VALUE_TYPE_A(_cid, _region, _type) \
+ (0x80 | ((CDU_CRC8(_cid, _region, _type)) & 0x7f))
+#define CDU_RSRVD_VALUE_TYPE_B(_crc, _type) \
+ (0x80 | ((_type)&0xf << 3) | ((CDU_CRC8(_cid, _region, _type)) & 0x7))
+#define CDU_RSRVD_INVALIDATE_CONTEXT_VALUE(_val) ((_val) & ~0x80)
+
+#endif /* ECORE_REG_H */
diff --git a/src/dpdk22/drivers/net/bnx2x/ecore_sp.h b/src/dpdk22/drivers/net/bnx2x/ecore_sp.h
new file mode 100644
index 00000000..9c1f55df
--- /dev/null
+++ b/src/dpdk22/drivers/net/bnx2x/ecore_sp.h
@@ -0,0 +1,1768 @@
+/*-
+ * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+ *
+ * Eric Davis <edavis@broadcom.com>
+ * David Christensen <davidch@broadcom.com>
+ * Gary Zambrano <zambrano@broadcom.com>
+ *
+ * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
+ * Copyright (c) 2015 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ */
+
+#ifndef ECORE_SP_H
+#define ECORE_SP_H
+
+#include <rte_byteorder.h>
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+#ifndef __LITTLE_ENDIAN
+#define __LITTLE_ENDIAN RTE_LITTLE_ENDIAN
+#endif
+#undef __BIG_ENDIAN
+#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+#ifndef __BIG_ENDIAN
+#define __BIG_ENDIAN RTE_BIG_ENDIAN
+#endif
+#undef __LITTLE_ENDIAN
+#endif
+
+#include "ecore_mfw_req.h"
+#include "ecore_fw_defs.h"
+#include "ecore_hsi.h"
+#include "ecore_reg.h"
+
+struct bnx2x_softc;
+typedef phys_addr_t ecore_dma_addr_t; /* expected to be 64 bit wide */
+typedef volatile int ecore_atomic_t;
+
+
+#define ETH_ALEN ETHER_ADDR_LEN /* 6 */
+
+#define ECORE_SWCID_SHIFT 17
+#define ECORE_SWCID_MASK ((0x1 << ECORE_SWCID_SHIFT) - 1)
+
+#define ECORE_MC_HASH_SIZE 8
+#define ECORE_MC_HASH_OFFSET(sc, i) \
+ (BAR_TSTRORM_INTMEM + \
+ TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(FUNC_ID(sc)) + i*4)
+
+#define ECORE_MAX_MULTICAST 64
+#define ECORE_MAX_EMUL_MULTI 1
+
+#define IRO sc->iro_array
+
+typedef rte_spinlock_t ECORE_MUTEX;
+#define ECORE_MUTEX_INIT(_mutex) rte_spinlock_init(_mutex)
+#define ECORE_MUTEX_LOCK(_mutex) rte_spinlock_lock(_mutex)
+#define ECORE_MUTEX_UNLOCK(_mutex) rte_spinlock_unlock(_mutex)
+
+typedef rte_spinlock_t ECORE_MUTEX_SPIN;
+#define ECORE_SPIN_LOCK_INIT(_spin, _sc) rte_spinlock_init(_spin)
+#define ECORE_SPIN_LOCK_BH(_spin) rte_spinlock_lock(_spin) /* bh = bottom-half */
+#define ECORE_SPIN_UNLOCK_BH(_spin) rte_spinlock_unlock(_spin) /* bh = bottom-half */
+
+#define ECORE_SMP_MB_AFTER_CLEAR_BIT() mb()
+#define ECORE_SMP_MB_BEFORE_CLEAR_BIT() mb()
+#define ECORE_SMP_MB() mb()
+#define ECORE_SMP_RMB() rmb()
+#define ECORE_SMP_WMB() wmb()
+#define ECORE_MMIOWB() wmb()
+
+#define ECORE_SET_BIT_NA(bit, var) (*var |= (1 << bit))
+#define ECORE_CLEAR_BIT_NA(bit, var) (*var &= ~(1 << bit))
+
+#define ECORE_TEST_BIT(bit, var) bnx2x_test_bit(bit, var)
+#define ECORE_SET_BIT(bit, var) bnx2x_set_bit(bit, var)
+#define ECORE_CLEAR_BIT(bit, var) bnx2x_clear_bit(bit, var)
+#define ECORE_TEST_AND_CLEAR_BIT(bit, var) bnx2x_test_and_clear_bit(bit, var)
+
+#define atomic_load_acq_int (int)*
+#define atomic_store_rel_int(a, v) (*a = v)
+#define atomic_cmpset_acq_int(a, o, n) ((*a = (o & (n)) | (n)) ^ o)
+
+#define atomic_load_acq_long (long)*
+#define atomic_store_rel_long(a, v) (*a = v)
+#define atomic_set_acq_long(a, v) (*a |= v)
+#define atomic_clear_acq_long(a, v) (*a &= ~v)
+#define atomic_cmpset_acq_long(a, o, n) ((*a = (o & (n)) | (n)) ^ o)
+#define atomic_subtract_acq_long(a, v) (*a -= v)
+#define atomic_add_acq_long(a, v) (*a += v)
+
+#define ECORE_ATOMIC_READ(a) atomic_load_acq_int((volatile int *)a)
+#define ECORE_ATOMIC_SET(a, v) atomic_store_rel_int((volatile int *)a, v)
+#define ECORE_ATOMIC_CMPXCHG(a, o, n) bnx2x_cmpxchg((volatile int *)a, o, n)
+
+#define ECORE_RET_PENDING(pending_bit, pending) \
+ (ECORE_TEST_BIT(pending_bit, pending) ? ECORE_PENDING : ECORE_SUCCESS)
+
+#define ECORE_SET_FLAG(value, mask, flag) \
+ do { \
+ (value) &= ~(mask); \
+ (value) |= ((flag) << (mask##_SHIFT)); \
+ } while (0)
+
+#define ECORE_GET_FLAG(value, mask) \
+ (((value) &= (mask)) >> (mask##_SHIFT))
+
+#define ECORE_MIGHT_SLEEP()
+
+#define ECORE_FCOE_CID(sc) ((sc)->fp[FCOE_IDX(sc)].cl_id)
+
+#define ECORE_MEMCMP(_a, _b, _s) memcmp(_a, _b, _s)
+#define ECORE_MEMCPY(_a, _b, _s) (void)rte_memcpy(_a, _b, _s)
+#define ECORE_MEMSET(_a, _c, _s) memset(_a, _c, _s)
+
+#define ECORE_CPU_TO_LE16(x) htole16(x)
+#define ECORE_CPU_TO_LE32(x) htole32(x)
+
+#define ECORE_WAIT(_s, _t) DELAY(1000)
+#define ECORE_MSLEEP(_t) DELAY((_t) * 1000)
+
+#define ECORE_LIKELY(x) likely(x)
+#define ECORE_UNLIKELY(x) unlikely(x)
+
+#define ECORE_ZALLOC(_size, _flags, _sc) \
+ rte_zmalloc("", _size, RTE_CACHE_LINE_SIZE)
+
+#define ECORE_CALLOC(_len, _size, _flags, _sc) \
+ rte_calloc("", _len, _size, RTE_CACHE_LINE_SIZE)
+
+#define ECORE_FREE(_s, _buf, _size) \
+ rte_free(_buf)
+
+#define SC_ILT(sc) ((sc)->ilt)
+#define ILOG2(x) bnx2x_ilog2(x)
+
+#define ECORE_ILT_ZALLOC(x, y, size, str) \
+ do { \
+ x = rte_malloc("", sizeof(struct bnx2x_dma), RTE_CACHE_LINE_SIZE); \
+ if (x) { \
+ if (bnx2x_dma_alloc((struct bnx2x_softc *)sc, \
+ size, (struct bnx2x_dma *)x, \
+ str, RTE_CACHE_LINE_SIZE) != 0) { \
+ rte_free(x); \
+ x = NULL; \
+ *y = 0; \
+ } else { \
+ *y = ((struct bnx2x_dma *)x)->paddr; \
+ } \
+ } \
+ } while (0)
+
+#define ECORE_ILT_FREE(x, y, size) \
+ do { \
+ if (x) { \
+ rte_free(x); \
+ x = NULL; \
+ y = 0; \
+ } \
+ } while (0)
+
+#define ECORE_IS_VALID_ETHER_ADDR(_mac) TRUE
+
+#define ECORE_IS_MF_SD_MODE IS_MF_SD_MODE
+#define ECORE_IS_MF_SI_MODE IS_MF_SI_MODE
+#define ECORE_IS_MF_AFEX_MODE IS_MF_AFEX_MODE
+
+#define ECORE_SET_CTX_VALIDATION bnx2x_set_ctx_validation
+
+#define ECORE_UPDATE_COALESCE_SB_INDEX bnx2x_update_coalesce_sb_index
+
+#define ECORE_ALIGN(x, a) ((((x) + (a) - 1) / (a)) * (a))
+
+#define ECORE_REG_WR_DMAE_LEN REG_WR_DMAE_LEN
+
+#define ECORE_PATH_ID SC_PATH
+#define ECORE_PORT_ID SC_PORT
+#define ECORE_FUNC_ID SC_FUNC
+#define ECORE_ABS_FUNC_ID SC_ABS_FUNC
+
+#define CRCPOLY_LE 0xedb88320
+uint32_t ecore_calc_crc32(uint32_t crc, uint8_t const *p,
+ uint32_t len, uint32_t magic);
+
+uint8_t ecore_calc_crc8(uint32_t data, uint8_t crc);
+
+
+static inline uint32_t
+ECORE_CRC32_LE(uint32_t seed, uint8_t *mac, uint32_t len)
+{
+ return ecore_calc_crc32(seed, mac, len, CRCPOLY_LE);
+}
+
+#define ecore_sp_post(_sc, _a, _b, _c, _d) \
+ bnx2x_sp_post(_sc, _a, _b, U64_HI(_c), U64_LO(_c), _d)
+
+#define ECORE_DBG_BREAK_IF(exp) \
+ do { \
+ if (unlikely(exp)) { \
+ rte_panic("ECORE"); \
+ } \
+ } while (0)
+
+#define ECORE_BUG() \
+ do { \
+ rte_panic("BUG (%s:%d)", __FILE__, __LINE__); \
+ } while(0);
+
+#define ECORE_BUG_ON(exp) \
+ do { \
+ if (likely(exp)) { \
+ rte_panic("BUG_ON (%s:%d)", __FILE__, __LINE__); \
+ } \
+ } while (0)
+
+
+#define ECORE_MSG(m, ...) \
+ PMD_DRV_LOG(DEBUG, m, ##__VA_ARGS__)
+
+typedef struct _ecore_list_entry_t
+{
+ struct _ecore_list_entry_t *next, *prev;
+} ecore_list_entry_t;
+
+typedef struct ecore_list_t
+{
+ ecore_list_entry_t *head, *tail;
+ unsigned long cnt;
+} ecore_list_t;
+
+/* initialize the list */
+#define ECORE_LIST_INIT(_list) \
+ do { \
+ (_list)->head = NULL; \
+ (_list)->tail = NULL; \
+ (_list)->cnt = 0; \
+ } while (0)
+
+/* return TRUE if the element is the last on the list */
+#define ECORE_LIST_IS_LAST(_elem, _list) \
+ (_elem == (_list)->tail)
+
+/* return TRUE if the list is empty */
+#define ECORE_LIST_IS_EMPTY(_list) \
+ ((_list)->cnt == 0)
+
+/* return the first element */
+#define ECORE_LIST_FIRST_ENTRY(_list, cast, _link) \
+ (cast *)((_list)->head)
+
+/* return the next element */
+#define ECORE_LIST_NEXT(_elem, _link, cast) \
+ (cast *)((&((_elem)->_link))->next)
+
+/* push an element on the head of the list */
+#define ECORE_LIST_PUSH_HEAD(_elem, _list) \
+ do { \
+ (_elem)->prev = (ecore_list_entry_t *)0; \
+ (_elem)->next = (_list)->head; \
+ if ((_list)->tail == (ecore_list_entry_t *)0) { \
+ (_list)->tail = (_elem); \
+ } else { \
+ (_list)->head->prev = (_elem); \
+ } \
+ (_list)->head = (_elem); \
+ (_list)->cnt++; \
+ } while (0)
+
+/* push an element on the tail of the list */
+#define ECORE_LIST_PUSH_TAIL(_elem, _list) \
+ do { \
+ (_elem)->next = (ecore_list_entry_t *)0; \
+ (_elem)->prev = (_list)->tail; \
+ if ((_list)->tail) { \
+ (_list)->tail->next = (_elem); \
+ } else { \
+ (_list)->head = (_elem); \
+ } \
+ (_list)->tail = (_elem); \
+ (_list)->cnt++; \
+ } while (0)
+
+/* push list1 on the head of list2 and return with list1 as empty */
+#define ECORE_LIST_SPLICE_INIT(_list1, _list2) \
+ do { \
+ (_list1)->tail->next = (_list2)->head; \
+ if ((_list2)->head) { \
+ (_list2)->head->prev = (_list1)->tail; \
+ } else { \
+ (_list2)->tail = (_list1)->tail; \
+ } \
+ (_list2)->head = (_list1)->head; \
+ (_list2)->cnt += (_list1)->cnt; \
+ (_list1)->head = NULL; \
+ (_list1)->tail = NULL; \
+ (_list1)->cnt = 0; \
+ } while (0)
+
+/* remove an element from the list */
+#define ECORE_LIST_REMOVE_ENTRY(_elem, _list) \
+ do { \
+ if ((_list)->head == (_elem)) { \
+ if ((_list)->head) { \
+ (_list)->head = (_list)->head->next; \
+ if ((_list)->head) { \
+ (_list)->head->prev = (ecore_list_entry_t *)0; \
+ } else { \
+ (_list)->tail = (ecore_list_entry_t *)0; \
+ } \
+ (_list)->cnt--; \
+ } \
+ } else if ((_list)->tail == (_elem)) { \
+ if ((_list)->tail) { \
+ (_list)->tail = (_list)->tail->prev; \
+ if ((_list)->tail) { \
+ (_list)->tail->next = (ecore_list_entry_t *)0; \
+ } else { \
+ (_list)->head = (ecore_list_entry_t *)0; \
+ } \
+ (_list)->cnt--; \
+ } \
+ } else { \
+ (_elem)->prev->next = (_elem)->next; \
+ (_elem)->next->prev = (_elem)->prev; \
+ (_list)->cnt--; \
+ } \
+ } while (0)
+
+/* walk the list */
+#define ECORE_LIST_FOR_EACH_ENTRY(pos, _list, _link, cast) \
+ for (pos = ECORE_LIST_FIRST_ENTRY(_list, cast, _link); \
+ pos; \
+ pos = ECORE_LIST_NEXT(pos, _link, cast))
+
+/* walk the list (safely) */
+#define ECORE_LIST_FOR_EACH_ENTRY_SAFE(pos, n, _list, _link, cast) \
+ for (pos = ECORE_LIST_FIRST_ENTRY(_list, cast, _lint), \
+ n = (pos) ? ECORE_LIST_NEXT(pos, _link, cast) : NULL; \
+ pos != NULL; \
+ pos = (cast *)n, \
+ n = (pos) ? ECORE_LIST_NEXT(pos, _link, cast) : NULL)
+
+
+/* Manipulate a bit vector defined as an array of uint64_t */
+
+/* Number of bits in one sge_mask array element */
+#define BIT_VEC64_ELEM_SZ 64
+#define BIT_VEC64_ELEM_SHIFT 6
+#define BIT_VEC64_ELEM_MASK ((uint64_t)BIT_VEC64_ELEM_SZ - 1)
+
+#define __BIT_VEC64_SET_BIT(el, bit) \
+ do { \
+ el = ((el) | ((uint64_t)0x1 << (bit))); \
+ } while (0)
+
+#define __BIT_VEC64_CLEAR_BIT(el, bit) \
+ do { \
+ el = ((el) & (~((uint64_t)0x1 << (bit)))); \
+ } while (0)
+
+#define BIT_VEC64_SET_BIT(vec64, idx) \
+ __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
+ (idx) & BIT_VEC64_ELEM_MASK)
+
+#define BIT_VEC64_CLEAR_BIT(vec64, idx) \
+ __BIT_VEC64_CLEAR_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
+ (idx) & BIT_VEC64_ELEM_MASK)
+
+#define BIT_VEC64_TEST_BIT(vec64, idx) \
+ (((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT] >> \
+ ((idx) & BIT_VEC64_ELEM_MASK)) & 0x1)
+
+/*
+ * Creates a bitmask of all ones in less significant bits.
+ * idx - index of the most significant bit in the created mask
+ */
+#define BIT_VEC64_ONES_MASK(idx) \
+ (((uint64_t)0x1 << (((idx) & BIT_VEC64_ELEM_MASK) + 1)) - 1)
+#define BIT_VEC64_ELEM_ONE_MASK ((uint64_t)(~0))
+
+/* fill in a MAC address the way the FW likes it */
+static inline void
+ecore_set_fw_mac_addr(uint16_t *fw_hi,
+ uint16_t *fw_mid,
+ uint16_t *fw_lo,
+ uint8_t *mac)
+{
+ ((uint8_t *)fw_hi)[0] = mac[1];
+ ((uint8_t *)fw_hi)[1] = mac[0];
+ ((uint8_t *)fw_mid)[0] = mac[3];
+ ((uint8_t *)fw_mid)[1] = mac[2];
+ ((uint8_t *)fw_lo)[0] = mac[5];
+ ((uint8_t *)fw_lo)[1] = mac[4];
+}
+
+
+enum ecore_status_t {
+ ECORE_EXISTS = -6,
+ ECORE_IO = -5,
+ ECORE_TIMEOUT = -4,
+ ECORE_INVAL = -3,
+ ECORE_BUSY = -2,
+ ECORE_NOMEM = -1,
+ ECORE_SUCCESS = 0,
+ /* PENDING is not an error and should be positive */
+ ECORE_PENDING = 1,
+};
+
+enum {
+ SWITCH_UPDATE,
+ AFEX_UPDATE,
+};
+
+
+
+
+struct bnx2x_softc;
+struct eth_context;
+
+/* Bits representing general command's configuration */
+enum {
+ RAMROD_TX,
+ RAMROD_RX,
+ /* Wait until all pending commands complete */
+ RAMROD_COMP_WAIT,
+ /* Don't send a ramrod, only update a registry */
+ RAMROD_DRV_CLR_ONLY,
+ /* Configure HW according to the current object state */
+ RAMROD_RESTORE,
+ /* Execute the next command now */
+ RAMROD_EXEC,
+ /* Don't add a new command and continue execution of posponed
+ * commands. If not set a new command will be added to the
+ * pending commands list.
+ */
+ RAMROD_CONT,
+ /* If there is another pending ramrod, wait until it finishes and
+ * re-try to submit this one. This flag can be set only in sleepable
+ * context, and should not be set from the context that completes the
+ * ramrods as deadlock will occur.
+ */
+ RAMROD_RETRY,
+};
+
+typedef enum {
+ ECORE_OBJ_TYPE_RX,
+ ECORE_OBJ_TYPE_TX,
+ ECORE_OBJ_TYPE_RX_TX,
+} ecore_obj_type;
+
+/* Public slow path states */
+enum {
+ ECORE_FILTER_MAC_PENDING,
+ ECORE_FILTER_VLAN_PENDING,
+ ECORE_FILTER_VLAN_MAC_PENDING,
+ ECORE_FILTER_RX_MODE_PENDING,
+ ECORE_FILTER_RX_MODE_SCHED,
+ ECORE_FILTER_ISCSI_ETH_START_SCHED,
+ ECORE_FILTER_ISCSI_ETH_STOP_SCHED,
+ ECORE_FILTER_FCOE_ETH_START_SCHED,
+ ECORE_FILTER_FCOE_ETH_STOP_SCHED,
+ ECORE_FILTER_MCAST_PENDING,
+ ECORE_FILTER_MCAST_SCHED,
+ ECORE_FILTER_RSS_CONF_PENDING,
+ ECORE_AFEX_FCOE_Q_UPDATE_PENDING,
+ ECORE_AFEX_PENDING_VIFSET_MCP_ACK
+};
+
+struct ecore_raw_obj {
+ uint8_t func_id;
+
+ /* Queue params */
+ uint8_t cl_id;
+ uint32_t cid;
+
+ /* Ramrod data buffer params */
+ void *rdata;
+ ecore_dma_addr_t rdata_mapping;
+
+ /* Ramrod state params */
+ int state; /* "ramrod is pending" state bit */
+ unsigned long *pstate; /* pointer to state buffer */
+
+ ecore_obj_type obj_type;
+
+ int (*wait_comp)(struct bnx2x_softc *sc,
+ struct ecore_raw_obj *o);
+
+ int (*check_pending)(struct ecore_raw_obj *o);
+ void (*clear_pending)(struct ecore_raw_obj *o);
+ void (*set_pending)(struct ecore_raw_obj *o);
+};
+
+/************************* VLAN-MAC commands related parameters ***************/
+struct ecore_mac_ramrod_data {
+ uint8_t mac[ETH_ALEN];
+ uint8_t is_inner_mac;
+};
+
+struct ecore_vlan_ramrod_data {
+ uint16_t vlan;
+};
+
+struct ecore_vlan_mac_ramrod_data {
+ uint8_t mac[ETH_ALEN];
+ uint8_t is_inner_mac;
+ uint16_t vlan;
+};
+
+union ecore_classification_ramrod_data {
+ struct ecore_mac_ramrod_data mac;
+ struct ecore_vlan_ramrod_data vlan;
+ struct ecore_vlan_mac_ramrod_data vlan_mac;
+};
+
+/* VLAN_MAC commands */
+enum ecore_vlan_mac_cmd {
+ ECORE_VLAN_MAC_ADD,
+ ECORE_VLAN_MAC_DEL,
+ ECORE_VLAN_MAC_MOVE,
+};
+
+struct ecore_vlan_mac_data {
+ /* Requested command: ECORE_VLAN_MAC_XX */
+ enum ecore_vlan_mac_cmd cmd;
+ /* used to contain the data related vlan_mac_flags bits from
+ * ramrod parameters.
+ */
+ unsigned long vlan_mac_flags;
+
+ /* Needed for MOVE command */
+ struct ecore_vlan_mac_obj *target_obj;
+
+ union ecore_classification_ramrod_data u;
+};
+
+/*************************** Exe Queue obj ************************************/
+union ecore_exe_queue_cmd_data {
+ struct ecore_vlan_mac_data vlan_mac;
+
+ struct {
+ } mcast;
+};
+
+struct ecore_exeq_elem {
+ ecore_list_entry_t link;
+
+ /* Length of this element in the exe_chunk. */
+ int cmd_len;
+
+ union ecore_exe_queue_cmd_data cmd_data;
+};
+
+union ecore_qable_obj;
+
+union ecore_exeq_comp_elem {
+ union event_ring_elem *elem;
+};
+
+struct ecore_exe_queue_obj;
+
+typedef int (*exe_q_validate)(struct bnx2x_softc *sc,
+ union ecore_qable_obj *o,
+ struct ecore_exeq_elem *elem);
+
+typedef int (*exe_q_remove)(struct bnx2x_softc *sc,
+ union ecore_qable_obj *o,
+ struct ecore_exeq_elem *elem);
+
+/* Return positive if entry was optimized, 0 - if not, negative
+ * in case of an error.
+ */
+typedef int (*exe_q_optimize)(struct bnx2x_softc *sc,
+ union ecore_qable_obj *o,
+ struct ecore_exeq_elem *elem);
+typedef int (*exe_q_execute)(struct bnx2x_softc *sc,
+ union ecore_qable_obj *o,
+ ecore_list_t *exe_chunk,
+ unsigned long *ramrod_flags);
+typedef struct ecore_exeq_elem *
+ (*exe_q_get)(struct ecore_exe_queue_obj *o,
+ struct ecore_exeq_elem *elem);
+
+struct ecore_exe_queue_obj {
+ /* Commands pending for an execution. */
+ ecore_list_t exe_queue;
+
+ /* Commands pending for an completion. */
+ ecore_list_t pending_comp;
+
+ ECORE_MUTEX_SPIN lock;
+
+ /* Maximum length of commands' list for one execution */
+ int exe_chunk_len;
+
+ union ecore_qable_obj *owner;
+
+ /****** Virtual functions ******/
+ /**
+ * Called before commands execution for commands that are really
+ * going to be executed (after 'optimize').
+ *
+ * Must run under exe_queue->lock
+ */
+ exe_q_validate validate;
+
+ /**
+ * Called before removing pending commands, cleaning allocated
+ * resources (e.g., credits from validate)
+ */
+ exe_q_remove remove;
+
+ /**
+ * This will try to cancel the current pending commands list
+ * considering the new command.
+ *
+ * Returns the number of optimized commands or a negative error code
+ *
+ * Must run under exe_queue->lock
+ */
+ exe_q_optimize optimize;
+
+ /**
+ * Run the next commands chunk (owner specific).
+ */
+ exe_q_execute execute;
+
+ /**
+ * Return the exe_queue element containing the specific command
+ * if any. Otherwise return NULL.
+ */
+ exe_q_get get;
+};
+/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
+/*
+ * Element in the VLAN_MAC registry list having all current configured
+ * rules.
+ */
+struct ecore_vlan_mac_registry_elem {
+ ecore_list_entry_t link;
+
+ /* Used to store the cam offset used for the mac/vlan/vlan-mac.
+ * Relevant for 57711 only. VLANs and MACs share the
+ * same CAM for these chips.
+ */
+ int cam_offset;
+
+ /* Needed for DEL and RESTORE flows */
+ unsigned long vlan_mac_flags;
+
+ union ecore_classification_ramrod_data u;
+};
+
+/* Bits representing VLAN_MAC commands specific flags */
+enum {
+ ECORE_UC_LIST_MAC,
+ ECORE_ETH_MAC,
+ ECORE_ISCSI_ETH_MAC,
+ ECORE_NETQ_ETH_MAC,
+ ECORE_DONT_CONSUME_CAM_CREDIT,
+ ECORE_DONT_CONSUME_CAM_CREDIT_DEST,
+};
+
+struct ecore_vlan_mac_ramrod_params {
+ /* Object to run the command from */
+ struct ecore_vlan_mac_obj *vlan_mac_obj;
+
+ /* General command flags: COMP_WAIT, etc. */
+ unsigned long ramrod_flags;
+
+ /* Command specific configuration request */
+ struct ecore_vlan_mac_data user_req;
+};
+
+struct ecore_vlan_mac_obj {
+ struct ecore_raw_obj raw;
+
+ /* Bookkeeping list: will prevent the addition of already existing
+ * entries.
+ */
+ ecore_list_t head;
+ /* Implement a simple reader/writer lock on the head list.
+ * all these fields should only be accessed under the exe_queue lock
+ */
+ uint8_t head_reader; /* Num. of readers accessing head list */
+ int head_exe_request; /* Pending execution request. */
+ unsigned long saved_ramrod_flags; /* Ramrods of pending execution */
+
+ /* Execution queue interface instance */
+ struct ecore_exe_queue_obj exe_queue;
+
+ /* MACs credit pool */
+ struct ecore_credit_pool_obj *macs_pool;
+
+ /* VLANs credit pool */
+ struct ecore_credit_pool_obj *vlans_pool;
+
+ /* RAMROD command to be used */
+ int ramrod_cmd;
+
+ /* copy first n elements onto preallocated buffer
+ *
+ * @param n number of elements to get
+ * @param buf buffer preallocated by caller into which elements
+ * will be copied. Note elements are 4-byte aligned
+ * so buffer size must be able to accommodate the
+ * aligned elements.
+ *
+ * @return number of copied bytes
+ */
+
+ int (*get_n_elements)(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *o, int n, uint8_t *base,
+ uint8_t stride, uint8_t size);
+
+ /**
+ * Checks if ADD-ramrod with the given params may be performed.
+ *
+ * @return zero if the element may be added
+ */
+
+ int (*check_add)(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *o,
+ union ecore_classification_ramrod_data *data);
+
+ /**
+ * Checks if DEL-ramrod with the given params may be performed.
+ *
+ * @return TRUE if the element may be deleted
+ */
+ struct ecore_vlan_mac_registry_elem *
+ (*check_del)(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *o,
+ union ecore_classification_ramrod_data *data);
+
+ /**
+ * Checks if DEL-ramrod with the given params may be performed.
+ *
+ * @return TRUE if the element may be deleted
+ */
+ int (*check_move)(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *src_o,
+ struct ecore_vlan_mac_obj *dst_o,
+ union ecore_classification_ramrod_data *data);
+
+ /**
+ * Update the relevant credit object(s) (consume/return
+ * correspondingly).
+ */
+ int (*get_credit)(struct ecore_vlan_mac_obj *o);
+ int (*put_credit)(struct ecore_vlan_mac_obj *o);
+ int (*get_cam_offset)(struct ecore_vlan_mac_obj *o, int *offset);
+ int (*put_cam_offset)(struct ecore_vlan_mac_obj *o, int offset);
+
+ /**
+ * Configures one rule in the ramrod data buffer.
+ */
+ void (*set_one_rule)(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *o,
+ struct ecore_exeq_elem *elem, int rule_idx,
+ int cam_offset);
+
+ /**
+ * Delete all configured elements having the given
+ * vlan_mac_flags specification. Assumes no pending for
+ * execution commands. Will schedule all all currently
+ * configured MACs/VLANs/VLAN-MACs matching the vlan_mac_flags
+ * specification for deletion and will use the given
+ * ramrod_flags for the last DEL operation.
+ *
+ * @param sc
+ * @param o
+ * @param ramrod_flags RAMROD_XX flags
+ *
+ * @return 0 if the last operation has completed successfully
+ * and there are no more elements left, positive value
+ * if there are pending for completion commands,
+ * negative value in case of failure.
+ */
+ int (*delete_all)(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *o,
+ unsigned long *vlan_mac_flags,
+ unsigned long *ramrod_flags);
+
+ /**
+ * Reconfigures the next MAC/VLAN/VLAN-MAC element from the previously
+ * configured elements list.
+ *
+ * @param sc
+ * @param p Command parameters (RAMROD_COMP_WAIT bit in
+ * ramrod_flags is only taken into an account)
+ * @param ppos a pointer to the cookie that should be given back in the
+ * next call to make function handle the next element. If
+ * *ppos is set to NULL it will restart the iterator.
+ * If returned *ppos == NULL this means that the last
+ * element has been handled.
+ *
+ * @return int
+ */
+ int (*restore)(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_ramrod_params *p,
+ struct ecore_vlan_mac_registry_elem **ppos);
+
+ /**
+ * Should be called on a completion arrival.
+ *
+ * @param sc
+ * @param o
+ * @param cqe Completion element we are handling
+ * @param ramrod_flags if RAMROD_CONT is set the next bulk of
+ * pending commands will be executed.
+ * RAMROD_DRV_CLR_ONLY and RAMROD_RESTORE
+ * may also be set if needed.
+ *
+ * @return 0 if there are neither pending nor waiting for
+ * completion commands. Positive value if there are
+ * pending for execution or for completion commands.
+ * Negative value in case of an error (including an
+ * error in the cqe).
+ */
+ int (*complete)(struct bnx2x_softc *sc, struct ecore_vlan_mac_obj *o,
+ union event_ring_elem *cqe,
+ unsigned long *ramrod_flags);
+
+ /**
+ * Wait for completion of all commands. Don't schedule new ones,
+ * just wait. It assumes that the completion code will schedule
+ * for new commands.
+ */
+ int (*wait)(struct bnx2x_softc *sc, struct ecore_vlan_mac_obj *o);
+};
+
+enum {
+ ECORE_LLH_CAM_ISCSI_ETH_LINE = 0,
+ ECORE_LLH_CAM_ETH_LINE,
+ ECORE_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
+};
+
+/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
+
+/* RX_MODE ramrod special flags: set in rx_mode_flags field in
+ * a ecore_rx_mode_ramrod_params.
+ */
+enum {
+ ECORE_RX_MODE_FCOE_ETH,
+ ECORE_RX_MODE_ISCSI_ETH,
+};
+
+enum {
+ ECORE_ACCEPT_UNICAST,
+ ECORE_ACCEPT_MULTICAST,
+ ECORE_ACCEPT_ALL_UNICAST,
+ ECORE_ACCEPT_ALL_MULTICAST,
+ ECORE_ACCEPT_BROADCAST,
+ ECORE_ACCEPT_UNMATCHED,
+ ECORE_ACCEPT_ANY_VLAN
+};
+
+struct ecore_rx_mode_ramrod_params {
+ struct ecore_rx_mode_obj *rx_mode_obj;
+ unsigned long *pstate;
+ int state;
+ uint8_t cl_id;
+ uint32_t cid;
+ uint8_t func_id;
+ unsigned long ramrod_flags;
+ unsigned long rx_mode_flags;
+
+ /* rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to
+ * a tstorm_eth_mac_filter_config (e1x).
+ */
+ void *rdata;
+ ecore_dma_addr_t rdata_mapping;
+
+ /* Rx mode settings */
+ unsigned long rx_accept_flags;
+
+ /* internal switching settings */
+ unsigned long tx_accept_flags;
+};
+
+struct ecore_rx_mode_obj {
+ int (*config_rx_mode)(struct bnx2x_softc *sc,
+ struct ecore_rx_mode_ramrod_params *p);
+
+ int (*wait_comp)(struct bnx2x_softc *sc,
+ struct ecore_rx_mode_ramrod_params *p);
+};
+
+/********************** Set multicast group ***********************************/
+
+struct ecore_mcast_list_elem {
+ ecore_list_entry_t link;
+ uint8_t *mac;
+};
+
+union ecore_mcast_config_data {
+ uint8_t *mac;
+ uint8_t bin; /* used in a RESTORE flow */
+};
+
+struct ecore_mcast_ramrod_params {
+ struct ecore_mcast_obj *mcast_obj;
+
+ /* Relevant options are RAMROD_COMP_WAIT and RAMROD_DRV_CLR_ONLY */
+ unsigned long ramrod_flags;
+
+ ecore_list_t mcast_list; /* list of struct ecore_mcast_list_elem */
+ int mcast_list_len;
+};
+
+enum ecore_mcast_cmd {
+ ECORE_MCAST_CMD_ADD,
+ ECORE_MCAST_CMD_CONT,
+ ECORE_MCAST_CMD_DEL,
+ ECORE_MCAST_CMD_RESTORE,
+};
+
+struct ecore_mcast_obj {
+ struct ecore_raw_obj raw;
+
+ union {
+ struct {
+ #define ECORE_MCAST_BINS_NUM 256
+ #define ECORE_MCAST_VEC_SZ (ECORE_MCAST_BINS_NUM / 64)
+ uint64_t vec[ECORE_MCAST_VEC_SZ];
+
+ /** Number of BINs to clear. Should be updated
+ * immediately when a command arrives in order to
+ * properly create DEL commands.
+ */
+ int num_bins_set;
+ } aprox_match;
+
+ struct {
+ ecore_list_t macs;
+ int num_macs_set;
+ } exact_match;
+ } registry;
+
+ /* Pending commands */
+ ecore_list_t pending_cmds_head;
+
+ /* A state that is set in raw.pstate, when there are pending commands */
+ int sched_state;
+
+ /* Maximal number of mcast MACs configured in one command */
+ int max_cmd_len;
+
+ /* Total number of currently pending MACs to configure: both
+ * in the pending commands list and in the current command.
+ */
+ int total_pending_num;
+
+ uint8_t engine_id;
+
+ /**
+ * @param cmd command to execute (ECORE_MCAST_CMD_X, see above)
+ */
+ int (*config_mcast)(struct bnx2x_softc *sc,
+ struct ecore_mcast_ramrod_params *p,
+ enum ecore_mcast_cmd cmd);
+
+ /**
+ * Fills the ramrod data during the RESTORE flow.
+ *
+ * @param sc
+ * @param o
+ * @param start_idx Registry index to start from
+ * @param rdata_idx Index in the ramrod data to start from
+ *
+ * @return -1 if we handled the whole registry or index of the last
+ * handled registry element.
+ */
+ int (*hdl_restore)(struct bnx2x_softc *sc, struct ecore_mcast_obj *o,
+ int start_bin, int *rdata_idx);
+
+ int (*enqueue_cmd)(struct bnx2x_softc *sc, struct ecore_mcast_obj *o,
+ struct ecore_mcast_ramrod_params *p,
+ enum ecore_mcast_cmd cmd);
+
+ void (*set_one_rule)(struct bnx2x_softc *sc,
+ struct ecore_mcast_obj *o, int idx,
+ union ecore_mcast_config_data *cfg_data,
+ enum ecore_mcast_cmd cmd);
+
+ /** Checks if there are more mcast MACs to be set or a previous
+ * command is still pending.
+ */
+ int (*check_pending)(struct ecore_mcast_obj *o);
+
+ /**
+ * Set/Clear/Check SCHEDULED state of the object
+ */
+ void (*set_sched)(struct ecore_mcast_obj *o);
+ void (*clear_sched)(struct ecore_mcast_obj *o);
+ int (*check_sched)(struct ecore_mcast_obj *o);
+
+ /* Wait until all pending commands complete */
+ int (*wait_comp)(struct bnx2x_softc *sc, struct ecore_mcast_obj *o);
+
+ /**
+ * Handle the internal object counters needed for proper
+ * commands handling. Checks that the provided parameters are
+ * feasible.
+ */
+ int (*validate)(struct bnx2x_softc *sc,
+ struct ecore_mcast_ramrod_params *p,
+ enum ecore_mcast_cmd cmd);
+
+ /**
+ * Restore the values of internal counters in case of a failure.
+ */
+ void (*revert)(struct bnx2x_softc *sc,
+ struct ecore_mcast_ramrod_params *p,
+ int old_num_bins);
+
+ int (*get_registry_size)(struct ecore_mcast_obj *o);
+ void (*set_registry_size)(struct ecore_mcast_obj *o, int n);
+};
+
+/*************************** Credit handling **********************************/
+struct ecore_credit_pool_obj {
+
+ /* Current amount of credit in the pool */
+ ecore_atomic_t credit;
+
+ /* Maximum allowed credit. put() will check against it. */
+ int pool_sz;
+
+ /* Allocate a pool table statically.
+ *
+ * Currently the maximum allowed size is MAX_MAC_CREDIT_E2(272)
+ *
+ * The set bit in the table will mean that the entry is available.
+ */
+#define ECORE_POOL_VEC_SIZE (MAX_MAC_CREDIT_E2 / 64)
+ uint64_t pool_mirror[ECORE_POOL_VEC_SIZE];
+
+ /* Base pool offset (initialized differently */
+ int base_pool_offset;
+
+ /**
+ * Get the next free pool entry.
+ *
+ * @return TRUE if there was a free entry in the pool
+ */
+ int (*get_entry)(struct ecore_credit_pool_obj *o, int *entry);
+
+ /**
+ * Return the entry back to the pool.
+ *
+ * @return TRUE if entry is legal and has been successfully
+ * returned to the pool.
+ */
+ int (*put_entry)(struct ecore_credit_pool_obj *o, int entry);
+
+ /**
+ * Get the requested amount of credit from the pool.
+ *
+ * @param cnt Amount of requested credit
+ * @return TRUE if the operation is successful
+ */
+ int (*get)(struct ecore_credit_pool_obj *o, int cnt);
+
+ /**
+ * Returns the credit to the pool.
+ *
+ * @param cnt Amount of credit to return
+ * @return TRUE if the operation is successful
+ */
+ int (*put)(struct ecore_credit_pool_obj *o, int cnt);
+
+ /**
+ * Reads the current amount of credit.
+ */
+ int (*check)(struct ecore_credit_pool_obj *o);
+};
+
+/*************************** RSS configuration ********************************/
+enum {
+ /* RSS_MODE bits are mutually exclusive */
+ ECORE_RSS_MODE_DISABLED,
+ ECORE_RSS_MODE_REGULAR,
+
+ ECORE_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */
+
+ ECORE_RSS_IPV4,
+ ECORE_RSS_IPV4_TCP,
+ ECORE_RSS_IPV4_UDP,
+ ECORE_RSS_IPV6,
+ ECORE_RSS_IPV6_TCP,
+ ECORE_RSS_IPV6_UDP,
+
+ ECORE_RSS_TUNNELING,
+};
+
+struct ecore_config_rss_params {
+ struct ecore_rss_config_obj *rss_obj;
+
+ /* may have RAMROD_COMP_WAIT set only */
+ unsigned long ramrod_flags;
+
+ /* ECORE_RSS_X bits */
+ unsigned long rss_flags;
+
+ /* Number hash bits to take into an account */
+ uint8_t rss_result_mask;
+
+ /* Indirection table */
+ uint8_t ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+
+ /* RSS hash values */
+ uint32_t rss_key[10];
+
+ /* valid only iff ECORE_RSS_UPDATE_TOE is set */
+ uint16_t toe_rss_bitmap;
+
+ /* valid iff ECORE_RSS_TUNNELING is set */
+ uint16_t tunnel_value;
+ uint16_t tunnel_mask;
+};
+
+struct ecore_rss_config_obj {
+ struct ecore_raw_obj raw;
+
+ /* RSS engine to use */
+ uint8_t engine_id;
+
+ /* Last configured indirection table */
+ uint8_t ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+
+ /* flags for enabling 4-tupple hash on UDP */
+ uint8_t udp_rss_v4;
+ uint8_t udp_rss_v6;
+
+ int (*config_rss)(struct bnx2x_softc *sc,
+ struct ecore_config_rss_params *p);
+};
+
+/*********************** Queue state update ***********************************/
+
+/* UPDATE command options */
+enum {
+ ECORE_Q_UPDATE_IN_VLAN_REM,
+ ECORE_Q_UPDATE_IN_VLAN_REM_CHNG,
+ ECORE_Q_UPDATE_OUT_VLAN_REM,
+ ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG,
+ ECORE_Q_UPDATE_ANTI_SPOOF,
+ ECORE_Q_UPDATE_ANTI_SPOOF_CHNG,
+ ECORE_Q_UPDATE_ACTIVATE,
+ ECORE_Q_UPDATE_ACTIVATE_CHNG,
+ ECORE_Q_UPDATE_DEF_VLAN_EN,
+ ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG,
+ ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+ ECORE_Q_UPDATE_SILENT_VLAN_REM,
+ ECORE_Q_UPDATE_TX_SWITCHING_CHNG,
+ ECORE_Q_UPDATE_TX_SWITCHING,
+};
+
+/* Allowed Queue states */
+enum ecore_q_state {
+ ECORE_Q_STATE_RESET,
+ ECORE_Q_STATE_INITIALIZED,
+ ECORE_Q_STATE_ACTIVE,
+ ECORE_Q_STATE_MULTI_COS,
+ ECORE_Q_STATE_MCOS_TERMINATED,
+ ECORE_Q_STATE_INACTIVE,
+ ECORE_Q_STATE_STOPPED,
+ ECORE_Q_STATE_TERMINATED,
+ ECORE_Q_STATE_FLRED,
+ ECORE_Q_STATE_MAX,
+};
+
+/* Allowed Queue states */
+enum ecore_q_logical_state {
+ ECORE_Q_LOGICAL_STATE_ACTIVE,
+ ECORE_Q_LOGICAL_STATE_STOPPED,
+};
+
+/* Allowed commands */
+enum ecore_queue_cmd {
+ ECORE_Q_CMD_INIT,
+ ECORE_Q_CMD_SETUP,
+ ECORE_Q_CMD_SETUP_TX_ONLY,
+ ECORE_Q_CMD_DEACTIVATE,
+ ECORE_Q_CMD_ACTIVATE,
+ ECORE_Q_CMD_UPDATE,
+ ECORE_Q_CMD_UPDATE_TPA,
+ ECORE_Q_CMD_HALT,
+ ECORE_Q_CMD_CFC_DEL,
+ ECORE_Q_CMD_TERMINATE,
+ ECORE_Q_CMD_EMPTY,
+ ECORE_Q_CMD_MAX,
+};
+
+/* queue SETUP + INIT flags */
+enum {
+ ECORE_Q_FLG_TPA,
+ ECORE_Q_FLG_TPA_IPV6,
+ ECORE_Q_FLG_TPA_GRO,
+ ECORE_Q_FLG_STATS,
+ ECORE_Q_FLG_ZERO_STATS,
+ ECORE_Q_FLG_ACTIVE,
+ ECORE_Q_FLG_OV,
+ ECORE_Q_FLG_VLAN,
+ ECORE_Q_FLG_COS,
+ ECORE_Q_FLG_HC,
+ ECORE_Q_FLG_HC_EN,
+ ECORE_Q_FLG_DHC,
+ ECORE_Q_FLG_OOO,
+ ECORE_Q_FLG_FCOE,
+ ECORE_Q_FLG_LEADING_RSS,
+ ECORE_Q_FLG_MCAST,
+ ECORE_Q_FLG_DEF_VLAN,
+ ECORE_Q_FLG_TX_SWITCH,
+ ECORE_Q_FLG_TX_SEC,
+ ECORE_Q_FLG_ANTI_SPOOF,
+ ECORE_Q_FLG_SILENT_VLAN_REM,
+ ECORE_Q_FLG_FORCE_DEFAULT_PRI,
+ ECORE_Q_FLG_REFUSE_OUTBAND_VLAN,
+ ECORE_Q_FLG_PCSUM_ON_PKT,
+ ECORE_Q_FLG_TUN_INC_INNER_IP_ID
+};
+
+/* Queue type options: queue type may be a combination of below. */
+enum ecore_q_type {
+ ECORE_Q_TYPE_FWD,
+ ECORE_Q_TYPE_HAS_RX,
+ ECORE_Q_TYPE_HAS_TX,
+};
+
+#define ECORE_PRIMARY_CID_INDEX 0
+#define ECORE_MULTI_TX_COS_E1X 3 /* QM only */
+#define ECORE_MULTI_TX_COS_E2_E3A0 2
+#define ECORE_MULTI_TX_COS_E3B0 3
+#define ECORE_MULTI_TX_COS 3 /* Maximum possible */
+#define MAC_PAD (ECORE_ALIGN(ETH_ALEN, sizeof(uint32_t)) - ETH_ALEN)
+
+struct ecore_queue_init_params {
+ struct {
+ unsigned long flags;
+ uint16_t hc_rate;
+ uint8_t fw_sb_id;
+ uint8_t sb_cq_index;
+ } tx;
+
+ struct {
+ unsigned long flags;
+ uint16_t hc_rate;
+ uint8_t fw_sb_id;
+ uint8_t sb_cq_index;
+ } rx;
+
+ /* CID context in the host memory */
+ struct eth_context *cxts[ECORE_MULTI_TX_COS];
+
+ /* maximum number of cos supported by hardware */
+ uint8_t max_cos;
+};
+
+struct ecore_queue_terminate_params {
+ /* index within the tx_only cids of this queue object */
+ uint8_t cid_index;
+};
+
+struct ecore_queue_cfc_del_params {
+ /* index within the tx_only cids of this queue object */
+ uint8_t cid_index;
+};
+
+struct ecore_queue_update_params {
+ unsigned long update_flags; /* ECORE_Q_UPDATE_XX bits */
+ uint16_t def_vlan;
+ uint16_t silent_removal_value;
+ uint16_t silent_removal_mask;
+/* index within the tx_only cids of this queue object */
+ uint8_t cid_index;
+};
+
+struct rxq_pause_params {
+ uint16_t bd_th_lo;
+ uint16_t bd_th_hi;
+ uint16_t rcq_th_lo;
+ uint16_t rcq_th_hi;
+ uint16_t sge_th_lo; /* valid iff ECORE_Q_FLG_TPA */
+ uint16_t sge_th_hi; /* valid iff ECORE_Q_FLG_TPA */
+ uint16_t pri_map;
+};
+
+/* general */
+struct ecore_general_setup_params {
+ /* valid iff ECORE_Q_FLG_STATS */
+ uint8_t stat_id;
+
+ uint8_t spcl_id;
+ uint16_t mtu;
+ uint8_t cos;
+};
+
+struct ecore_rxq_setup_params {
+ /* dma */
+ ecore_dma_addr_t dscr_map;
+ ecore_dma_addr_t rcq_map;
+ ecore_dma_addr_t rcq_np_map;
+
+ uint16_t drop_flags;
+ uint16_t buf_sz;
+ uint8_t fw_sb_id;
+ uint8_t cl_qzone_id;
+
+ /* valid iff ECORE_Q_FLG_TPA */
+ uint16_t tpa_agg_sz;
+ uint8_t max_tpa_queues;
+ uint8_t rss_engine_id;
+
+ /* valid iff ECORE_Q_FLG_MCAST */
+ uint8_t mcast_engine_id;
+
+ uint8_t cache_line_log;
+
+ uint8_t sb_cq_index;
+
+ /* valid iff BXN2X_Q_FLG_SILENT_VLAN_REM */
+ uint16_t silent_removal_value;
+ uint16_t silent_removal_mask;
+};
+
+struct ecore_txq_setup_params {
+ /* dma */
+ ecore_dma_addr_t dscr_map;
+
+ uint8_t fw_sb_id;
+ uint8_t sb_cq_index;
+ uint8_t cos; /* valid iff ECORE_Q_FLG_COS */
+ uint16_t traffic_type;
+ /* equals to the leading rss client id, used for TX classification*/
+ uint8_t tss_leading_cl_id;
+
+ /* valid iff ECORE_Q_FLG_DEF_VLAN */
+ uint16_t default_vlan;
+};
+
+struct ecore_queue_setup_params {
+ struct ecore_general_setup_params gen_params;
+ struct ecore_txq_setup_params txq_params;
+ struct ecore_rxq_setup_params rxq_params;
+ struct rxq_pause_params pause_params;
+ unsigned long flags;
+};
+
+struct ecore_queue_setup_tx_only_params {
+ struct ecore_general_setup_params gen_params;
+ struct ecore_txq_setup_params txq_params;
+ unsigned long flags;
+ /* index within the tx_only cids of this queue object */
+ uint8_t cid_index;
+};
+
+struct ecore_queue_state_params {
+ struct ecore_queue_sp_obj *q_obj;
+
+ /* Current command */
+ enum ecore_queue_cmd cmd;
+
+ /* may have RAMROD_COMP_WAIT set only */
+ unsigned long ramrod_flags;
+
+ /* Params according to the current command */
+ union {
+ struct ecore_queue_update_params update;
+ struct ecore_queue_setup_params setup;
+ struct ecore_queue_init_params init;
+ struct ecore_queue_setup_tx_only_params tx_only;
+ struct ecore_queue_terminate_params terminate;
+ struct ecore_queue_cfc_del_params cfc_del;
+ } params;
+};
+
+struct ecore_viflist_params {
+ uint8_t echo_res;
+ uint8_t func_bit_map_res;
+};
+
+struct ecore_queue_sp_obj {
+ uint32_t cids[ECORE_MULTI_TX_COS];
+ uint8_t cl_id;
+ uint8_t func_id;
+
+ /* number of traffic classes supported by queue.
+ * The primary connection of the queue supports the first traffic
+ * class. Any further traffic class is supported by a tx-only
+ * connection.
+ *
+ * Therefore max_cos is also a number of valid entries in the cids
+ * array.
+ */
+ uint8_t max_cos;
+ uint8_t num_tx_only, next_tx_only;
+
+ enum ecore_q_state state, next_state;
+
+ /* bits from enum ecore_q_type */
+ unsigned long type;
+
+ /* ECORE_Q_CMD_XX bits. This object implements "one
+ * pending" paradigm but for debug and tracing purposes it's
+ * more convenient to have different bits for different
+ * commands.
+ */
+ unsigned long pending;
+
+ /* Buffer to use as a ramrod data and its mapping */
+ void *rdata;
+ ecore_dma_addr_t rdata_mapping;
+
+ /**
+ * Performs one state change according to the given parameters.
+ *
+ * @return 0 in case of success and negative value otherwise.
+ */
+ int (*send_cmd)(struct bnx2x_softc *sc,
+ struct ecore_queue_state_params *params);
+
+ /**
+ * Sets the pending bit according to the requested transition.
+ */
+ int (*set_pending)(struct ecore_queue_sp_obj *o,
+ struct ecore_queue_state_params *params);
+
+ /**
+ * Checks that the requested state transition is legal.
+ */
+ int (*check_transition)(struct bnx2x_softc *sc,
+ struct ecore_queue_sp_obj *o,
+ struct ecore_queue_state_params *params);
+
+ /**
+ * Completes the pending command.
+ */
+ int (*complete_cmd)(struct bnx2x_softc *sc,
+ struct ecore_queue_sp_obj *o,
+ enum ecore_queue_cmd);
+
+ int (*wait_comp)(struct bnx2x_softc *sc,
+ struct ecore_queue_sp_obj *o,
+ enum ecore_queue_cmd cmd);
+};
+
+/********************** Function state update *********************************/
+/* Allowed Function states */
+enum ecore_func_state {
+ ECORE_F_STATE_RESET,
+ ECORE_F_STATE_INITIALIZED,
+ ECORE_F_STATE_STARTED,
+ ECORE_F_STATE_TX_STOPPED,
+ ECORE_F_STATE_MAX,
+};
+
+/* Allowed Function commands */
+enum ecore_func_cmd {
+ ECORE_F_CMD_HW_INIT,
+ ECORE_F_CMD_START,
+ ECORE_F_CMD_STOP,
+ ECORE_F_CMD_HW_RESET,
+ ECORE_F_CMD_AFEX_UPDATE,
+ ECORE_F_CMD_AFEX_VIFLISTS,
+ ECORE_F_CMD_TX_STOP,
+ ECORE_F_CMD_TX_START,
+ ECORE_F_CMD_SWITCH_UPDATE,
+ ECORE_F_CMD_MAX,
+};
+
+struct ecore_func_hw_init_params {
+ /* A load phase returned by MCP.
+ *
+ * May be:
+ * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
+ * FW_MSG_CODE_DRV_LOAD_COMMON
+ * FW_MSG_CODE_DRV_LOAD_PORT
+ * FW_MSG_CODE_DRV_LOAD_FUNCTION
+ */
+ uint32_t load_phase;
+};
+
+struct ecore_func_hw_reset_params {
+ /* A load phase returned by MCP.
+ *
+ * May be:
+ * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
+ * FW_MSG_CODE_DRV_LOAD_COMMON
+ * FW_MSG_CODE_DRV_LOAD_PORT
+ * FW_MSG_CODE_DRV_LOAD_FUNCTION
+ */
+ uint32_t reset_phase;
+};
+
+struct ecore_func_start_params {
+ /* Multi Function mode:
+ * - Single Function
+ * - Switch Dependent
+ * - Switch Independent
+ */
+ uint16_t mf_mode;
+
+ /* Switch Dependent mode outer VLAN tag */
+ uint16_t sd_vlan_tag;
+
+ /* Function cos mode */
+ uint8_t network_cos_mode;
+
+ /* NVGRE classification enablement */
+ uint8_t nvgre_clss_en;
+
+ /* NO_GRE_TUNNEL/NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */
+ uint8_t gre_tunnel_mode;
+
+ /* GRE_OUTER_HEADERS_RSS/GRE_INNER_HEADERS_RSS/NVGRE_KEY_ENTROPY_RSS */
+ uint8_t gre_tunnel_rss;
+
+};
+
+struct ecore_func_switch_update_params {
+ uint8_t suspend;
+};
+
+struct ecore_func_afex_update_params {
+ uint16_t vif_id;
+ uint16_t afex_default_vlan;
+ uint8_t allowed_priorities;
+};
+
+struct ecore_func_afex_viflists_params {
+ uint16_t vif_list_index;
+ uint8_t func_bit_map;
+ uint8_t afex_vif_list_command;
+ uint8_t func_to_clear;
+};
+struct ecore_func_tx_start_params {
+ struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
+ uint8_t dcb_enabled;
+ uint8_t dcb_version;
+ uint8_t dont_add_pri_0;
+};
+
+struct ecore_func_state_params {
+ struct ecore_func_sp_obj *f_obj;
+
+ /* Current command */
+ enum ecore_func_cmd cmd;
+
+ /* may have RAMROD_COMP_WAIT set only */
+ unsigned long ramrod_flags;
+
+ /* Params according to the current command */
+ union {
+ struct ecore_func_hw_init_params hw_init;
+ struct ecore_func_hw_reset_params hw_reset;
+ struct ecore_func_start_params start;
+ struct ecore_func_switch_update_params switch_update;
+ struct ecore_func_afex_update_params afex_update;
+ struct ecore_func_afex_viflists_params afex_viflists;
+ struct ecore_func_tx_start_params tx_start;
+ } params;
+};
+
+struct ecore_func_sp_drv_ops {
+ /* Init tool + runtime initialization:
+ * - Common Chip
+ * - Common (per Path)
+ * - Port
+ * - Function phases
+ */
+ int (*init_hw_cmn_chip)(struct bnx2x_softc *sc);
+ int (*init_hw_cmn)(struct bnx2x_softc *sc);
+ int (*init_hw_port)(struct bnx2x_softc *sc);
+ int (*init_hw_func)(struct bnx2x_softc *sc);
+
+ /* Reset Function HW: Common, Port, Function phases. */
+ void (*reset_hw_cmn)(struct bnx2x_softc *sc);
+ void (*reset_hw_port)(struct bnx2x_softc *sc);
+ void (*reset_hw_func)(struct bnx2x_softc *sc);
+
+ /* Prepare/Release FW resources */
+ int (*init_fw)(struct bnx2x_softc *sc);
+ void (*release_fw)(struct bnx2x_softc *sc);
+};
+
+struct ecore_func_sp_obj {
+ enum ecore_func_state state, next_state;
+
+ /* ECORE_FUNC_CMD_XX bits. This object implements "one
+ * pending" paradigm but for debug and tracing purposes it's
+ * more convenient to have different bits for different
+ * commands.
+ */
+ unsigned long pending;
+
+ /* Buffer to use as a ramrod data and its mapping */
+ void *rdata;
+ ecore_dma_addr_t rdata_mapping;
+
+ /* Buffer to use as a afex ramrod data and its mapping.
+ * This can't be same rdata as above because afex ramrod requests
+ * can arrive to the object in parallel to other ramrod requests.
+ */
+ void *afex_rdata;
+ ecore_dma_addr_t afex_rdata_mapping;
+
+ /* this mutex validates that when pending flag is taken, the next
+ * ramrod to be sent will be the one set the pending bit
+ */
+ ECORE_MUTEX one_pending_mutex;
+
+ /* Driver interface */
+ struct ecore_func_sp_drv_ops *drv;
+
+ /**
+ * Performs one state change according to the given parameters.
+ *
+ * @return 0 in case of success and negative value otherwise.
+ */
+ int (*send_cmd)(struct bnx2x_softc *sc,
+ struct ecore_func_state_params *params);
+
+ /**
+ * Checks that the requested state transition is legal.
+ */
+ int (*check_transition)(struct bnx2x_softc *sc,
+ struct ecore_func_sp_obj *o,
+ struct ecore_func_state_params *params);
+
+ /**
+ * Completes the pending command.
+ */
+ int (*complete_cmd)(struct bnx2x_softc *sc,
+ struct ecore_func_sp_obj *o,
+ enum ecore_func_cmd cmd);
+
+ int (*wait_comp)(struct bnx2x_softc *sc, struct ecore_func_sp_obj *o,
+ enum ecore_func_cmd cmd);
+};
+
+/********************** Interfaces ********************************************/
+/* Queueable objects set */
+union ecore_qable_obj {
+ struct ecore_vlan_mac_obj vlan_mac;
+};
+/************** Function state update *********/
+void ecore_init_func_obj(struct bnx2x_softc *sc,
+ struct ecore_func_sp_obj *obj,
+ void *rdata, ecore_dma_addr_t rdata_mapping,
+ void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping,
+ struct ecore_func_sp_drv_ops *drv_iface);
+
+int ecore_func_state_change(struct bnx2x_softc *sc,
+ struct ecore_func_state_params *params);
+
+enum ecore_func_state ecore_func_get_state(struct bnx2x_softc *sc,
+ struct ecore_func_sp_obj *o);
+/******************* Queue State **************/
+void ecore_init_queue_obj(struct bnx2x_softc *sc,
+ struct ecore_queue_sp_obj *obj, uint8_t cl_id, uint32_t *cids,
+ uint8_t cid_cnt, uint8_t func_id, void *rdata,
+ ecore_dma_addr_t rdata_mapping, unsigned long type);
+
+int ecore_queue_state_change(struct bnx2x_softc *sc,
+ struct ecore_queue_state_params *params);
+
+/********************* VLAN-MAC ****************/
+void ecore_init_mac_obj(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *mac_obj,
+ uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
+ ecore_dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, ecore_obj_type type,
+ struct ecore_credit_pool_obj *macs_pool);
+
+void ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *o);
+int ecore_vlan_mac_h_write_lock(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *o);
+void ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_obj *o);
+int ecore_config_vlan_mac(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_ramrod_params *p);
+
+int ecore_vlan_mac_move(struct bnx2x_softc *sc,
+ struct ecore_vlan_mac_ramrod_params *p,
+ struct ecore_vlan_mac_obj *dest_o);
+
+/********************* RX MODE ****************/
+
+void ecore_init_rx_mode_obj(struct bnx2x_softc *sc,
+ struct ecore_rx_mode_obj *o);
+
+/**
+ * ecore_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
+ *
+ * @p: Command parameters
+ *
+ * Return: 0 - if operation was successful and there is no pending completions,
+ * positive number - if there are pending completions,
+ * negative - if there were errors
+ */
+int ecore_config_rx_mode(struct bnx2x_softc *sc,
+ struct ecore_rx_mode_ramrod_params *p);
+
+/****************** MULTICASTS ****************/
+
+void ecore_init_mcast_obj(struct bnx2x_softc *sc,
+ struct ecore_mcast_obj *mcast_obj,
+ uint8_t mcast_cl_id, uint32_t mcast_cid, uint8_t func_id,
+ uint8_t engine_id, void *rdata, ecore_dma_addr_t rdata_mapping,
+ int state, unsigned long *pstate,
+ ecore_obj_type type);
+
+/**
+ * ecore_config_mcast - Configure multicast MACs list.
+ *
+ * @cmd: command to execute: BNX2X_MCAST_CMD_X
+ *
+ * May configure a new list
+ * provided in p->mcast_list (ECORE_MCAST_CMD_ADD), clean up
+ * (ECORE_MCAST_CMD_DEL) or restore (ECORE_MCAST_CMD_RESTORE) a current
+ * configuration, continue to execute the pending commands
+ * (ECORE_MCAST_CMD_CONT).
+ *
+ * If previous command is still pending or if number of MACs to
+ * configure is more that maximum number of MACs in one command,
+ * the current command will be enqueued to the tail of the
+ * pending commands list.
+ *
+ * Return: 0 is operation was successfull and there are no pending completions,
+ * negative if there were errors, positive if there are pending
+ * completions.
+ */
+int ecore_config_mcast(struct bnx2x_softc *sc,
+ struct ecore_mcast_ramrod_params *p,
+ enum ecore_mcast_cmd cmd);
+
+/****************** CREDIT POOL ****************/
+void ecore_init_mac_credit_pool(struct bnx2x_softc *sc,
+ struct ecore_credit_pool_obj *p, uint8_t func_id,
+ uint8_t func_num);
+void ecore_init_vlan_credit_pool(struct bnx2x_softc *sc,
+ struct ecore_credit_pool_obj *p, uint8_t func_id,
+ uint8_t func_num);
+
+/****************** RSS CONFIGURATION ****************/
+void ecore_init_rss_config_obj(struct ecore_rss_config_obj *rss_obj,
+ uint8_t cl_id, uint32_t cid, uint8_t func_id, uint8_t engine_id,
+ void *rdata, ecore_dma_addr_t rdata_mapping,
+ int state, unsigned long *pstate,
+ ecore_obj_type type);
+
+/**
+ * ecore_config_rss - Updates RSS configuration according to provided parameters
+ *
+ * Return: 0 in case of success
+ */
+int ecore_config_rss(struct bnx2x_softc *sc,
+ struct ecore_config_rss_params *p);
+
+
+#endif /* ECORE_SP_H */
diff --git a/src/dpdk22/drivers/net/bnx2x/elink.h b/src/dpdk22/drivers/net/bnx2x/elink.h
new file mode 100644
index 00000000..c4f886a7
--- /dev/null
+++ b/src/dpdk22/drivers/net/bnx2x/elink.h
@@ -0,0 +1,588 @@
+/*
+ * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+ *
+ * Eric Davis <edavis@broadcom.com>
+ * David Christensen <davidch@broadcom.com>
+ * Gary Zambrano <zambrano@broadcom.com>
+ *
+ * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
+ * Copyright (c) 2015 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ */
+
+#ifndef ELINK_H
+#define ELINK_H
+
+#define ELINK_DEBUG
+
+
+
+
+
+
+/***********************************************************/
+/* CLC Call backs functions */
+/***********************************************************/
+/* CLC device structure */
+struct bnx2x_softc;
+
+extern uint32_t elink_cb_reg_read(struct bnx2x_softc *sc, uint32_t reg_addr);
+extern void elink_cb_reg_write(struct bnx2x_softc *sc, uint32_t reg_addr, uint32_t val);
+
+/* mode - 0( LOW ) /1(HIGH)*/
+extern uint8_t elink_cb_gpio_write(struct bnx2x_softc *sc,
+ uint16_t gpio_num,
+ uint8_t mode, uint8_t port);
+extern uint8_t elink_cb_gpio_mult_write(struct bnx2x_softc *sc,
+ uint8_t pins,
+ uint8_t mode);
+
+extern uint32_t elink_cb_gpio_read(struct bnx2x_softc *sc, uint16_t gpio_num, uint8_t port);
+extern uint8_t elink_cb_gpio_int_write(struct bnx2x_softc *sc,
+ uint16_t gpio_num,
+ uint8_t mode, uint8_t port);
+
+extern uint32_t elink_cb_fw_command(struct bnx2x_softc *sc, uint32_t command, uint32_t param);
+
+/* This function is called every 1024 bytes downloading of phy firmware.
+Driver can use it to print to screen indication for download progress */
+extern void elink_cb_download_progress(struct bnx2x_softc *sc, uint32_t cur, uint32_t total);
+
+/* Each log type has its own parameters */
+typedef enum elink_log_id {
+ ELINK_LOG_ID_UNQUAL_IO_MODULE = 0, /* uint8_t port, const char* vendor_name, const char* vendor_pn */
+ ELINK_LOG_ID_OVER_CURRENT = 1, /* uint8_t port */
+ ELINK_LOG_ID_PHY_UNINITIALIZED = 2, /* uint8_t port */
+ ELINK_LOG_ID_MDIO_ACCESS_TIMEOUT= 3, /* No params */
+ ELINK_LOG_ID_NON_10G_MODULE = 4, /* uint8_t port */
+}elink_log_id_t;
+
+typedef enum elink_status {
+ ELINK_STATUS_OK = 0,
+ ELINK_STATUS_ERROR,
+ ELINK_STATUS_TIMEOUT,
+ ELINK_STATUS_NO_LINK,
+ ELINK_STATUS_INVALID_IMAGE,
+ ELINK_OP_NOT_SUPPORTED = 122
+} elink_status_t;
+extern void elink_cb_event_log(struct bnx2x_softc *sc, const elink_log_id_t log_id, ...);
+extern void elink_cb_load_warpcore_microcode(void);
+
+extern void elink_cb_notify_link_changed(struct bnx2x_softc *sc);
+
+#define ELINK_EVENT_LOG_LEVEL_ERROR 1
+#define ELINK_EVENT_LOG_LEVEL_WARNING 2
+#define ELINK_EVENT_ID_SFP_UNQUALIFIED_MODULE 1
+#define ELINK_EVENT_ID_SFP_POWER_FAULT 2
+
+#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
+/* Debug prints */
+
+/***********************************************************/
+/* Defines */
+/***********************************************************/
+#define ELINK_DEFAULT_PHY_DEV_ADDR 3
+#define ELINK_E2_DEFAULT_PHY_DEV_ADDR 5
+
+
+#define DUPLEX_FULL 1
+#define DUPLEX_HALF 2
+
+#define ELINK_FLOW_CTRL_AUTO PORT_FEATURE_FLOW_CONTROL_AUTO
+#define ELINK_FLOW_CTRL_TX PORT_FEATURE_FLOW_CONTROL_TX
+#define ELINK_FLOW_CTRL_RX PORT_FEATURE_FLOW_CONTROL_RX
+#define ELINK_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH
+#define ELINK_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE
+
+#define ELINK_NET_SERDES_IF_XFI 1
+#define ELINK_NET_SERDES_IF_SFI 2
+#define ELINK_NET_SERDES_IF_KR 3
+#define ELINK_NET_SERDES_IF_DXGXS 4
+
+#define ELINK_SPEED_AUTO_NEG 0
+#define ELINK_SPEED_10 10
+#define ELINK_SPEED_100 100
+#define ELINK_SPEED_1000 1000
+#define ELINK_SPEED_2500 2500
+#define ELINK_SPEED_10000 10000
+#define ELINK_SPEED_20000 20000
+
+#define ELINK_I2C_DEV_ADDR_A0 0xa0
+#define ELINK_I2C_DEV_ADDR_A2 0xa2
+
+#define ELINK_SFP_EEPROM_PAGE_SIZE 16
+#define ELINK_SFP_EEPROM_VENDOR_NAME_ADDR 0x14
+#define ELINK_SFP_EEPROM_VENDOR_NAME_SIZE 16
+#define ELINK_SFP_EEPROM_VENDOR_OUI_ADDR 0x25
+#define ELINK_SFP_EEPROM_VENDOR_OUI_SIZE 3
+#define ELINK_SFP_EEPROM_PART_NO_ADDR 0x28
+#define ELINK_SFP_EEPROM_PART_NO_SIZE 16
+#define ELINK_SFP_EEPROM_REVISION_ADDR 0x38
+#define ELINK_SFP_EEPROM_REVISION_SIZE 4
+#define ELINK_SFP_EEPROM_SERIAL_ADDR 0x44
+#define ELINK_SFP_EEPROM_SERIAL_SIZE 16
+#define ELINK_SFP_EEPROM_DATE_ADDR 0x54 /* ASCII YYMMDD */
+#define ELINK_SFP_EEPROM_DATE_SIZE 6
+#define ELINK_SFP_EEPROM_DIAG_TYPE_ADDR 0x5c
+#define ELINK_SFP_EEPROM_DIAG_TYPE_SIZE 1
+#define ELINK_SFP_EEPROM_DIAG_ADDR_CHANGE_REQ (1<<2)
+#define ELINK_SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e
+#define ELINK_SFP_EEPROM_SFF_8472_COMP_SIZE 1
+
+#define ELINK_SFP_EEPROM_A2_CHECKSUM_RANGE 0x5e
+#define ELINK_SFP_EEPROM_A2_CC_DMI_ADDR 0x5f
+
+#define ELINK_PWR_FLT_ERR_MSG_LEN 250
+
+#define ELINK_XGXS_EXT_PHY_TYPE(ext_phy_config) \
+ ((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK)
+#define ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config) \
+ (((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> \
+ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT)
+#define ELINK_SERDES_EXT_PHY_TYPE(ext_phy_config) \
+ ((ext_phy_config) & PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK)
+
+/* Single Media Direct board is the plain 577xx board with CX4/RJ45 jacks */
+#define ELINK_SINGLE_MEDIA_DIRECT(params) (params->num_phys == 1)
+/* Single Media board contains single external phy */
+#define ELINK_SINGLE_MEDIA(params) (params->num_phys == 2)
+/* Dual Media board contains two external phy with different media */
+#define ELINK_DUAL_MEDIA(params) (params->num_phys == 3)
+
+#define ELINK_FW_PARAM_PHY_ADDR_MASK 0x000000FF
+#define ELINK_FW_PARAM_PHY_TYPE_MASK 0x0000FF00
+#define ELINK_FW_PARAM_MDIO_CTRL_MASK 0xFFFF0000
+#define ELINK_FW_PARAM_MDIO_CTRL_OFFSET 16
+#define ELINK_FW_PARAM_PHY_ADDR(fw_param) (fw_param & \
+ ELINK_FW_PARAM_PHY_ADDR_MASK)
+#define ELINK_FW_PARAM_PHY_TYPE(fw_param) (fw_param & \
+ ELINK_FW_PARAM_PHY_TYPE_MASK)
+#define ELINK_FW_PARAM_MDIO_CTRL(fw_param) ((fw_param & \
+ ELINK_FW_PARAM_MDIO_CTRL_MASK) >> \
+ ELINK_FW_PARAM_MDIO_CTRL_OFFSET)
+#define ELINK_FW_PARAM_SET(phy_addr, phy_type, mdio_access) \
+ (phy_addr | phy_type | mdio_access << ELINK_FW_PARAM_MDIO_CTRL_OFFSET)
+
+
+#define ELINK_PFC_BRB_FULL_LB_XOFF_THRESHOLD 170
+#define ELINK_PFC_BRB_FULL_LB_XON_THRESHOLD 250
+
+#define ELINK_MAXVAL(a, b) (((a) > (b)) ? (a) : (b))
+
+#define ELINK_BMAC_CONTROL_RX_ENABLE 2
+/***********************************************************/
+/* Structs */
+/***********************************************************/
+#define ELINK_INT_PHY 0
+#define ELINK_EXT_PHY1 1
+#define ELINK_EXT_PHY2 2
+#define ELINK_MAX_PHYS 3
+
+/* Same configuration is shared between the XGXS and the first external phy */
+#define ELINK_LINK_CONFIG_SIZE (ELINK_MAX_PHYS - 1)
+#define ELINK_LINK_CONFIG_IDX(_phy_idx) ((_phy_idx == ELINK_INT_PHY) ? \
+ 0 : (_phy_idx - 1))
+/***********************************************************/
+/* elink_phy struct */
+/* Defines the required arguments and function per phy */
+/***********************************************************/
+struct elink_vars;
+struct elink_params;
+struct elink_phy;
+
+typedef uint8_t (*config_init_t)(struct elink_phy *phy, struct elink_params *params,
+ struct elink_vars *vars);
+typedef uint8_t (*read_status_t)(struct elink_phy *phy, struct elink_params *params,
+ struct elink_vars *vars);
+typedef void (*link_reset_t)(struct elink_phy *phy,
+ struct elink_params *params);
+typedef void (*config_loopback_t)(struct elink_phy *phy,
+ struct elink_params *params);
+typedef uint8_t (*format_fw_ver_t)(uint32_t raw, uint8_t *str, uint16_t *len);
+typedef void (*hw_reset_t)(struct elink_phy *phy, struct elink_params *params);
+typedef void (*set_link_led_t)(struct elink_phy *phy,
+ struct elink_params *params, uint8_t mode);
+typedef void (*phy_specific_func_t)(struct elink_phy *phy,
+ struct elink_params *params, uint32_t action);
+struct elink_reg_set {
+ uint8_t devad;
+ uint16_t reg;
+ uint16_t val;
+};
+
+struct elink_phy {
+ uint32_t type;
+
+ /* Loaded during init */
+ uint8_t addr;
+ uint8_t def_md_devad;
+ uint16_t flags;
+ /* No Over-Current detection */
+#define ELINK_FLAGS_NOC (1<<1)
+ /* Fan failure detection required */
+#define ELINK_FLAGS_FAN_FAILURE_DET_REQ (1<<2)
+ /* Initialize first the XGXS and only then the phy itself */
+#define ELINK_FLAGS_INIT_XGXS_FIRST (1<<3)
+#define ELINK_FLAGS_WC_DUAL_MODE (1<<4)
+#define ELINK_FLAGS_4_PORT_MODE (1<<5)
+#define ELINK_FLAGS_REARM_LATCH_SIGNAL (1<<6)
+#define ELINK_FLAGS_SFP_NOT_APPROVED (1<<7)
+#define ELINK_FLAGS_MDC_MDIO_WA (1<<8)
+#define ELINK_FLAGS_DUMMY_READ (1<<9)
+#define ELINK_FLAGS_MDC_MDIO_WA_B0 (1<<10)
+#define ELINK_FLAGS_SFP_MODULE_PLUGGED_IN_WC (1<<11)
+#define ELINK_FLAGS_TX_ERROR_CHECK (1<<12)
+#define ELINK_FLAGS_EEE (1<<13)
+#define ELINK_FLAGS_TEMPERATURE (1<<14)
+#define ELINK_FLAGS_MDC_MDIO_WA_G (1<<15)
+
+ /* preemphasis values for the rx side */
+ uint16_t rx_preemphasis[4];
+
+ /* preemphasis values for the tx side */
+ uint16_t tx_preemphasis[4];
+
+ /* EMAC address for access MDIO */
+ uint32_t mdio_ctrl;
+
+ uint32_t supported;
+#define ELINK_SUPPORTED_10baseT_Half (1<<0)
+#define ELINK_SUPPORTED_10baseT_Full (1<<1)
+#define ELINK_SUPPORTED_100baseT_Half (1<<2)
+#define ELINK_SUPPORTED_100baseT_Full (1<<3)
+#define ELINK_SUPPORTED_1000baseT_Full (1<<4)
+#define ELINK_SUPPORTED_2500baseX_Full (1<<5)
+#define ELINK_SUPPORTED_10000baseT_Full (1<<6)
+#define ELINK_SUPPORTED_TP (1<<7)
+#define ELINK_SUPPORTED_FIBRE (1<<8)
+#define ELINK_SUPPORTED_Autoneg (1<<9)
+#define ELINK_SUPPORTED_Pause (1<<10)
+#define ELINK_SUPPORTED_Asym_Pause (1<<11)
+#define ELINK_SUPPORTED_20000baseMLD2_Full (1<<21)
+#define ELINK_SUPPORTED_20000baseKR2_Full (1<<22)
+
+ uint32_t media_type;
+#define ELINK_ETH_PHY_UNSPECIFIED 0x0
+#define ELINK_ETH_PHY_SFPP_10G_FIBER 0x1
+#define ELINK_ETH_PHY_XFP_FIBER 0x2
+#define ELINK_ETH_PHY_DA_TWINAX 0x3
+#define ELINK_ETH_PHY_BASE_T 0x4
+#define ELINK_ETH_PHY_SFP_1G_FIBER 0x5
+#define ELINK_ETH_PHY_KR 0xf0
+#define ELINK_ETH_PHY_CX4 0xf1
+#define ELINK_ETH_PHY_NOT_PRESENT 0xff
+
+ /* The address in which version is located*/
+ uint32_t ver_addr;
+
+ uint16_t req_flow_ctrl;
+
+ uint16_t req_line_speed;
+
+ uint32_t speed_cap_mask;
+
+ uint16_t req_duplex;
+ uint16_t rsrv;
+ /* Called per phy/port init, and it configures LASI, speed, autoneg,
+ duplex, flow control negotiation, etc. */
+ config_init_t config_init;
+
+ /* Called due to interrupt. It determines the link, speed */
+ read_status_t read_status;
+
+ /* Called when driver is unloading. Should reset the phy */
+ link_reset_t link_reset;
+
+ /* Set the loopback configuration for the phy */
+ config_loopback_t config_loopback;
+
+ /* Format the given raw number into str up to len */
+ format_fw_ver_t format_fw_ver;
+
+ /* Reset the phy (both ports) */
+ hw_reset_t hw_reset;
+
+ /* Set link led mode (on/off/oper)*/
+ set_link_led_t set_link_led;
+
+ /* PHY Specific tasks */
+ phy_specific_func_t phy_specific_func;
+#define ELINK_DISABLE_TX 1
+#define ELINK_ENABLE_TX 2
+#define ELINK_PHY_INIT 3
+};
+
+/* Inputs parameters to the CLC */
+struct elink_params {
+
+ uint8_t port;
+
+ /* Default / User Configuration */
+ uint8_t loopback_mode;
+#define ELINK_LOOPBACK_NONE 0
+#define ELINK_LOOPBACK_EMAC 1
+#define ELINK_LOOPBACK_BMAC 2
+#define ELINK_LOOPBACK_XGXS 3
+#define ELINK_LOOPBACK_EXT_PHY 4
+#define ELINK_LOOPBACK_EXT 5
+#define ELINK_LOOPBACK_UMAC 6
+#define ELINK_LOOPBACK_XMAC 7
+
+ /* Device parameters */
+ uint8_t mac_addr[6];
+
+ uint16_t req_duplex[ELINK_LINK_CONFIG_SIZE];
+ uint16_t req_flow_ctrl[ELINK_LINK_CONFIG_SIZE];
+
+ uint16_t req_line_speed[ELINK_LINK_CONFIG_SIZE]; /* Also determine AutoNeg */
+
+ /* shmem parameters */
+ uint32_t shmem_base;
+ uint32_t shmem2_base;
+ uint32_t speed_cap_mask[ELINK_LINK_CONFIG_SIZE];
+ uint32_t switch_cfg;
+#define ELINK_SWITCH_CFG_1G PORT_FEATURE_CON_SWITCH_1G_SWITCH
+#define ELINK_SWITCH_CFG_10G PORT_FEATURE_CON_SWITCH_10G_SWITCH
+#define ELINK_SWITCH_CFG_AUTO_DETECT PORT_FEATURE_CON_SWITCH_AUTO_DETECT
+
+ uint32_t lane_config;
+
+ /* Phy register parameter */
+ uint32_t chip_id;
+
+ /* features */
+ uint32_t feature_config_flags;
+#define ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
+#define ELINK_FEATURE_CONFIG_PFC_ENABLED (1<<1)
+#define ELINK_FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
+#define ELINK_FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3)
+#define ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC (1<<4)
+#define ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC (1<<5)
+#define ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC (1<<6)
+#define ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC (1<<7)
+#define ELINK_FEATURE_CONFIG_BC_SUPPORTS_AFEX (1<<8)
+#define ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED (1<<9)
+#define ELINK_FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED (1<<10)
+#define ELINK_FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET (1<<11)
+#define ELINK_FEATURE_CONFIG_IEEE_PHY_TEST (1<<12)
+#define ELINK_FEATURE_CONFIG_MT_SUPPORT (1<<13)
+#define ELINK_FEATURE_CONFIG_BOOT_FROM_SAN (1<<14)
+
+ /* Will be populated during common init */
+ struct elink_phy phy[ELINK_MAX_PHYS];
+
+ /* Will be populated during common init */
+ uint8_t num_phys;
+
+ uint8_t rsrv;
+
+ /* Used to configure the EEE Tx LPI timer, has several modes of
+ * operation, according to bits 29:28 -
+ * 2'b00: Timer will be configured by nvram, output will be the value
+ * from nvram.
+ * 2'b01: Timer will be configured by nvram, output will be in
+ * microseconds.
+ * 2'b10: bits 1:0 contain an nvram value which will be used instead
+ * of the one located in the nvram. Output will be that value.
+ * 2'b11: bits 19:0 contain the idle timer in microseconds; output
+ * will be in microseconds.
+ * Bits 31:30 should be 2'b11 in order for EEE to be enabled.
+ */
+ uint32_t eee_mode;
+#define ELINK_EEE_MODE_NVRAM_BALANCED_TIME (0xa00)
+#define ELINK_EEE_MODE_NVRAM_AGGRESSIVE_TIME (0x100)
+#define ELINK_EEE_MODE_NVRAM_LATENCY_TIME (0x6000)
+#define ELINK_EEE_MODE_NVRAM_MASK (0x3)
+#define ELINK_EEE_MODE_TIMER_MASK (0xfffff)
+#define ELINK_EEE_MODE_OUTPUT_TIME (1<<28)
+#define ELINK_EEE_MODE_OVERRIDE_NVRAM (1<<29)
+#define ELINK_EEE_MODE_ENABLE_LPI (1<<30)
+#define ELINK_EEE_MODE_ADV_LPI (1<<31)
+
+ uint16_t hw_led_mode; /* part of the hw_config read from the shmem */
+ uint32_t multi_phy_config;
+
+ /* Device pointer passed to all callback functions */
+ struct bnx2x_softc *sc;
+ uint16_t req_fc_auto_adv; /* Should be set to TX / BOTH when
+ req_flow_ctrl is set to AUTO */
+ uint16_t link_flags;
+#define ELINK_LINK_FLAGS_INT_DISABLED (1<<0)
+#define ELINK_PHY_INITIALIZED (1<<1)
+ uint32_t lfa_base;
+};
+
+/* Output parameters */
+struct elink_vars {
+ uint8_t phy_flags;
+#define PHY_XGXS_FLAG (1<<0)
+#define PHY_SGMII_FLAG (1<<1)
+#define PHY_PHYSICAL_LINK_FLAG (1<<2)
+#define PHY_HALF_OPEN_CONN_FLAG (1<<3)
+#define PHY_OVER_CURRENT_FLAG (1<<4)
+#define PHY_SFP_TX_FAULT_FLAG (1<<5)
+
+ uint8_t mac_type;
+#define ELINK_MAC_TYPE_NONE 0
+#define ELINK_MAC_TYPE_EMAC 1
+#define ELINK_MAC_TYPE_BMAC 2
+#define ELINK_MAC_TYPE_UMAC 3
+#define ELINK_MAC_TYPE_XMAC 4
+
+ uint8_t phy_link_up; /* internal phy link indication */
+ uint8_t link_up;
+
+ uint16_t line_speed;
+ uint16_t duplex;
+
+ uint16_t flow_ctrl;
+ uint16_t ieee_fc;
+
+ /* The same definitions as the shmem parameter */
+ uint32_t link_status;
+ uint32_t eee_status;
+ uint8_t fault_detected;
+ uint8_t check_kr2_recovery_cnt;
+#define ELINK_CHECK_KR2_RECOVERY_CNT 5
+ uint16_t periodic_flags;
+#define ELINK_PERIODIC_FLAGS_LINK_EVENT 0x0001
+
+ uint32_t aeu_int_mask;
+ uint8_t rx_tx_asic_rst;
+ uint8_t turn_to_run_wc_rt;
+ uint16_t rsrv2;
+ /* The same definitions as the shmem2 parameter */
+ uint32_t link_attr_sync;
+};
+
+/***********************************************************/
+/* Functions */
+/***********************************************************/
+elink_status_t elink_phy_init(struct elink_params *params, struct elink_vars *vars);
+
+/* Reset the link. Should be called when driver or interface goes down
+ Before calling phy firmware upgrade, the reset_ext_phy should be set
+ to 0 */
+elink_status_t elink_lfa_reset(struct elink_params *params, struct elink_vars *vars);
+/* elink_link_update should be called upon link interrupt */
+elink_status_t elink_link_update(struct elink_params *params, struct elink_vars *vars);
+
+/* Reads the link_status from the shmem,
+ and update the link vars accordingly */
+void elink_link_status_update(struct elink_params *input,
+ struct elink_vars *output);
+
+/* Set/Unset the led
+ Basically, the CLC takes care of the led for the link, but in case one needs
+ to set/unset the led unnaturally, set the "mode" to ELINK_LED_MODE_OPER to
+ blink the led, and ELINK_LED_MODE_OFF to set the led off.*/
+elink_status_t elink_set_led(struct elink_params *params,
+ struct elink_vars *vars, uint8_t mode, uint32_t speed);
+#define ELINK_LED_MODE_OFF 0
+#define ELINK_LED_MODE_ON 1
+#define ELINK_LED_MODE_OPER 2
+#define ELINK_LED_MODE_FRONT_PANEL_OFF 3
+
+/* elink_handle_module_detect_int should be called upon module detection
+ interrupt */
+void elink_handle_module_detect_int(struct elink_params *params);
+
+/* One-time initialization for external phy after power up */
+elink_status_t elink_common_init_phy(struct bnx2x_softc *sc, uint32_t shmem_base_path[],
+ uint32_t shmem2_base_path[], uint32_t chip_id, uint8_t one_port_enabled);
+
+void elink_hw_reset_phy(struct elink_params *params);
+
+/* Check swap bit and adjust PHY order */
+uint32_t elink_phy_selection(struct elink_params *params);
+
+/* Probe the phys on board, and populate them in "params" */
+elink_status_t elink_phy_probe(struct elink_params *params);
+
+/* Checks if fan failure detection is required on one of the phys on board */
+uint8_t elink_fan_failure_det_req(struct bnx2x_softc *sc, uint32_t shmem_base,
+ uint32_t shmem2_base, uint8_t port);
+
+/* Open / close the gate between the NIG and the BRB */
+void elink_set_rx_filter(struct elink_params *params, uint8_t en);
+
+/* DCBX structs */
+
+/* Number of maximum COS per chip */
+#define ELINK_DCBX_E2E3_MAX_NUM_COS (2)
+#define ELINK_DCBX_E3B0_MAX_NUM_COS_PORT0 (6)
+#define ELINK_DCBX_E3B0_MAX_NUM_COS_PORT1 (3)
+#define ELINK_DCBX_E3B0_MAX_NUM_COS ( \
+ ELINK_MAXVAL(ELINK_DCBX_E3B0_MAX_NUM_COS_PORT0, \
+ ELINK_DCBX_E3B0_MAX_NUM_COS_PORT1))
+
+#define ELINK_DCBX_MAX_NUM_COS ( \
+ ELINK_MAXVAL(ELINK_DCBX_E3B0_MAX_NUM_COS, \
+ ELINK_DCBX_E2E3_MAX_NUM_COS))
+
+/* PFC port configuration params */
+struct elink_nig_brb_pfc_port_params {
+ /* NIG */
+ uint32_t pause_enable;
+ uint32_t llfc_out_en;
+ uint32_t llfc_enable;
+ uint32_t pkt_priority_to_cos;
+ uint8_t num_of_rx_cos_priority_mask;
+ uint32_t rx_cos_priority_mask[ELINK_DCBX_MAX_NUM_COS];
+ uint32_t llfc_high_priority_classes;
+ uint32_t llfc_low_priority_classes;
+};
+
+
+/* ETS port configuration params */
+struct elink_ets_bw_params {
+ uint8_t bw;
+};
+
+struct elink_ets_sp_params {
+ /**
+ * valid values are 0 - 5. 0 is highest strict priority.
+ * There can't be two COS's with the same pri.
+ */
+ uint8_t pri;
+};
+
+enum elink_cos_state {
+ elink_cos_state_strict = 0,
+ elink_cos_state_bw = 1,
+};
+
+struct elink_ets_cos_params {
+ enum elink_cos_state state ;
+ union {
+ struct elink_ets_bw_params bw_params;
+ struct elink_ets_sp_params sp_params;
+ } params;
+};
+
+struct elink_ets_params {
+ uint8_t num_of_cos; /* Number of valid COS entries*/
+ struct elink_ets_cos_params cos[ELINK_DCBX_MAX_NUM_COS];
+};
+
+/* Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
+ * when link is already up
+ */
+elink_status_t elink_update_pfc(struct elink_params *params,
+ struct elink_vars *vars,
+ struct elink_nig_brb_pfc_port_params *pfc_params);
+
+void elink_init_mod_abs_int(struct bnx2x_softc *sc, struct elink_vars *vars,
+ uint32_t chip_id, uint32_t shmem_base, uint32_t shmem2_base,
+ uint8_t port);
+
+void elink_period_func(struct elink_params *params, struct elink_vars *vars);
+
+void elink_enable_pmd_tx(struct elink_params *params);
+
+
+
+#endif /* ELINK_H */
diff --git a/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond.h b/src/dpdk22/drivers/net/bonding/rte_eth_bond.h
index 71779831..8efbf071 100755..100644
--- a/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond.h
+++ b/src/dpdk22/drivers/net/bonding/rte_eth_bond.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -71,12 +71,10 @@ extern "C" {
* slaves using one of three available transmit policies - l2, l2+3 or l3+4.
* See BALANCE_XMIT_POLICY macros definitions for further details on transmit
* policies. */
-#ifdef RTE_MBUF_REFCNT
#define BONDING_MODE_BROADCAST (3)
/**< Broadcast (Mode 3).
* In this mode all transmitted packets will be transmitted on all available
* active slaves of the bonded. */
-#endif
#define BONDING_MODE_8023AD (4)
/**< 802.3AD (Mode 4).
*
@@ -96,11 +94,20 @@ extern "C" {
* to rx_burst should be at least 2 times the slave count size.
*
*/
-#define BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING (5)
+#define BONDING_MODE_TLB (5)
/**< Adaptive TLB (Mode 5)
* This mode provides an adaptive transmit load balancing. It dynamically
* changes the transmitting slave, according to the computed load. Statistics
* are collected in 100ms intervals and scheduled every 10ms */
+#define BONDING_MODE_ALB (6)
+/**< Adaptive Load Balancing (Mode 6)
+ * This mode includes adaptive TLB and receive load balancing (RLB). In RLB the
+ * bonding driver intercepts ARP replies send by local system and overwrites its
+ * source MAC address, so that different peers send data to the server on
+ * different slave interfaces. When local system sends ARP request, it saves IP
+ * information from it. When ARP reply from that peer is received, its MAC is
+ * stored, one of slave MACs assigned and ARP reply send to that peer.
+ */
/* Balance Mode Transmit Policies */
#define BALANCE_XMIT_POLICY_LAYER2 (0)
@@ -124,6 +131,17 @@ int
rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id);
/**
+ * Free a bonded rte_eth_dev device
+ *
+ * @param name Name of the link bonding device.
+ *
+ * @return
+ * 0 on success, negative value otherwise
+ */
+int
+rte_eth_bond_free(const char *name);
+
+/**
* Add a rte_eth_dev device as a slave to the bonded device
*
* @param bonded_port_id Port ID of bonded device.
diff --git a/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_8023ad.h b/src/dpdk22/drivers/net/bonding/rte_eth_bond_8023ad.h
index 9adc6aa1..ebd0e934 100755..100644
--- a/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_8023ad.h
+++ b/src/dpdk22/drivers/net/bonding/rte_eth_bond_8023ad.h
@@ -36,6 +36,10 @@
#include <rte_ether.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/**
* Actor/partner states
*/
@@ -211,4 +215,8 @@ int
rte_eth_bond_8023ad_slave_info(uint8_t port_id, uint8_t slave_id,
struct rte_eth_bond_8023ad_slave_info *conf);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* RTE_ETH_BOND_8023AD_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_8023ad_private.h b/src/dpdk22/drivers/net/bonding/rte_eth_bond_8023ad_private.h
index 8adee70b..8adee70b 100755..100644
--- a/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_8023ad_private.h
+++ b/src/dpdk22/drivers/net/bonding/rte_eth_bond_8023ad_private.h
diff --git a/src/dpdk22/drivers/net/bonding/rte_eth_bond_alb.h b/src/dpdk22/drivers/net/bonding/rte_eth_bond_alb.h
new file mode 100644
index 00000000..fd7c3aeb
--- /dev/null
+++ b/src/dpdk22/drivers/net/bonding/rte_eth_bond_alb.h
@@ -0,0 +1,142 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RTE_ETH_BOND_ALB_H_
+#define RTE_ETH_BOND_ALB_H_
+
+#include <rte_ether.h>
+#include <rte_arp.h>
+
+#define ALB_HASH_TABLE_SIZE 256
+#define ALB_NULL_INDEX 0xFFFFFFFF
+
+struct client_data {
+ /** ARP data of single client */
+ struct ether_addr app_mac;
+ /**< MAC address of application running DPDK */
+ uint32_t app_ip;
+ /**< IP address of application running DPDK */
+ struct ether_addr cli_mac;
+ /**< Client MAC address */
+ uint32_t cli_ip;
+ /**< Client IP address */
+
+ uint8_t slave_idx;
+ /**< Index of slave on which we connect with that client */
+ uint8_t in_use;
+ /**< Flag indicating if entry in client table is currently used */
+ uint8_t ntt;
+ /**< Flag indicating if we need to send update to this client on next tx */
+
+ struct vlan_hdr vlan[2];
+ /**< Content of vlan headers */
+ uint8_t vlan_count;
+ /**< Number of nested vlan headers */
+};
+
+struct mode_alb_private {
+ struct client_data client_table[ALB_HASH_TABLE_SIZE];
+ /**< Hash table storing ARP data of every client connected */
+ struct rte_mempool *mempool;
+ /**< Mempool for creating ARP update packets */
+ uint8_t ntt;
+ /**< Flag indicating if we need to send update to any client on next tx */
+ uint32_t last_slave;
+ /**< Index of last used slave in client table */
+ rte_spinlock_t lock;
+};
+
+/**
+ * ALB mode initialization.
+ *
+ * @param bond_dev Pointer to bonding device.
+ *
+ * @return
+ * Error code - 0 on success.
+ */
+int
+bond_mode_alb_enable(struct rte_eth_dev *bond_dev);
+
+/**
+ * Function handles ARP packet reception. If received ARP request, it is
+ * forwarded to application without changes. If it is ARP reply, client table
+ * is updated.
+ *
+ * @param eth_h ETH header of received packet.
+ * @param offset Vlan header offset.
+ * @param internals Bonding data.
+ */
+void
+bond_mode_alb_arp_recv(struct ether_hdr *eth_h, uint16_t offset,
+ struct bond_dev_private *internals);
+
+/**
+ * Function handles ARP packet transmission. It also decides on which slave
+ * send that packet. If packet is ARP Request, it is send on primary slave.
+ * If it is ARP Reply, it is send on slave stored in client table for that
+ * connection. On Reply function also updates data in client table.
+ *
+ * @param eth_h ETH header of transmitted packet.
+ * @param offset Vlan header offset.
+ * @param internals Bonding data.
+ *
+ * @return
+ * Index of slave on which packet should be sent.
+ */
+uint8_t
+bond_mode_alb_arp_xmit(struct ether_hdr *eth_h, uint16_t offset,
+ struct bond_dev_private *internals);
+
+/**
+ * Function fills packet with ARP data from client_info.
+ *
+ * @param client_info Data of client to which packet is sent.
+ * @param pkt Pointer to packet which is sent.
+ * @param internals Bonding data.
+ *
+ * @return
+ * Index of slawe on which packet should be sent.
+ */
+uint8_t
+bond_mode_alb_arp_upd(struct client_data *client_info,
+ struct rte_mbuf *pkt, struct bond_dev_private *internals);
+
+/**
+ * Function updates slave indexes of active connections.
+ *
+ * @param bond_dev Pointer to bonded device struct.
+ */
+void
+bond_mode_alb_client_list_upd(struct rte_eth_dev *bond_dev);
+
+#endif /* RTE_ETH_BOND_ALB_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_private.h b/src/dpdk22/drivers/net/bonding/rte_eth_bond_private.h
index f913c5b9..6c47a294 100755..100644
--- a/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_private.h
+++ b/src/dpdk22/drivers/net/bonding/rte_eth_bond_private.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -34,15 +34,12 @@
#ifndef _RTE_ETH_BOND_PRIVATE_H_
#define _RTE_ETH_BOND_PRIVATE_H_
-#ifdef __cplusplus
-extern "C" {
-#endif
-
#include <rte_ethdev.h>
#include <rte_spinlock.h>
#include "rte_eth_bond.h"
#include "rte_eth_bond_8023ad_private.h"
+#include "rte_eth_bond_alb.h"
#define PMD_BOND_SLAVE_PORT_KVARG ("slave")
#define PMD_BOND_PRIMARY_SLAVE_KVARG ("primary")
@@ -65,11 +62,11 @@ extern "C" {
extern const char *pmd_bond_init_valid_arguments[];
-extern const char *driver_name;
+extern const char pmd_bond_driver_name[];
/** Port Queue Mapping Structure */
struct bond_rx_queue {
- int queue_id;
+ uint16_t queue_id;
/**< Queue Id */
struct bond_dev_private *dev_private;
/**< Reference to eth_dev private structure */
@@ -82,7 +79,7 @@ struct bond_rx_queue {
};
struct bond_tx_queue {
- int queue_id;
+ uint16_t queue_id;
/**< Queue Id */
struct bond_dev_private *dev_private;
/**< Reference to dev private structure */
@@ -106,8 +103,13 @@ struct bond_slave_details {
uint8_t last_link_status;
/**< Port Id of slave eth_dev */
struct ether_addr persisted_mac_addr;
+
+ uint16_t reta_size;
};
+
+typedef uint16_t (*xmit_hash_t)(const struct rte_mbuf *buf, uint8_t slave_count);
+
/** Link Bonding PMD device private configuration Structure */
struct bond_dev_private {
uint8_t port_id; /**< Port Id of Bonded Port */
@@ -122,6 +124,9 @@ struct bond_dev_private {
uint8_t balance_xmit_policy;
/**< Transmit policy - l2 / l23 / l34 for operation in balance mode */
+ xmit_hash_t xmit_hash;
+ /**< Transmit policy hash function */
+
uint8_t user_defined_mac;
/**< Flag for whether MAC address is user defined or not */
uint8_t promiscuous_en;
@@ -146,10 +151,22 @@ struct bond_dev_private {
/**< Arary of bonded slaves details */
struct mode8023ad_private mode4;
+ uint8_t tlb_slaves_order[RTE_MAX_ETHPORTS]; /* TLB active slaves send order */
+ struct mode_alb_private mode6;
uint32_t rx_offload_capa; /** Rx offload capability */
uint32_t tx_offload_capa; /** Tx offload capability */
+ /** Bit mask of RSS offloads, the bit offset also means flow type */
+ uint64_t flow_type_rss_offloads;
+
+ uint16_t reta_size;
+ struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_512 /
+ RTE_RETA_GROUP_SIZE];
+
+ uint8_t rss_key[52]; /**< 52-byte hash key buffer. */
+ uint8_t rss_key_len; /**< hash key length in bytes. */
+
struct rte_kvargs *kvlist;
uint8_t slave_update_idx;
};
@@ -157,7 +174,7 @@ struct bond_dev_private {
extern struct eth_dev_ops default_dev_ops;
int
-valid_bonded_ethdev(struct rte_eth_dev *eth_dev);
+check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev);
/* Search given slave array to find possition of given id.
* Return slave pos or slaves_count if not found. */
@@ -225,6 +242,15 @@ void
slave_add(struct bond_dev_private *internals,
struct rte_eth_dev *slave_eth_dev);
+uint16_t
+xmit_l2_hash(const struct rte_mbuf *buf, uint8_t slave_count);
+
+uint16_t
+xmit_l23_hash(const struct rte_mbuf *buf, uint8_t slave_count);
+
+uint16_t
+xmit_l34_hash(const struct rte_mbuf *buf, uint8_t slave_count);
+
void
bond_ethdev_primary_set(struct bond_dev_private *internals,
uint8_t slave_port_id);
@@ -261,8 +287,19 @@ int
bond_ethdev_parse_time_ms_kvarg(const char *key __rte_unused,
const char *value, void *extra_args);
-#ifdef __cplusplus
-}
-#endif
+void
+bond_tlb_disable(struct bond_dev_private *internals);
+
+void
+bond_tlb_enable(struct bond_dev_private *internals);
+
+void
+bond_tlb_activate_slave(struct bond_dev_private *internals);
+
+void
+bond_ethdev_stop(struct rte_eth_dev *eth_dev);
+
+void
+bond_ethdev_close(struct rte_eth_dev *dev);
#endif
diff --git a/src/dpdk22/drivers/net/cxgbe/base/adapter.h b/src/dpdk22/drivers/net/cxgbe/base/adapter.h
new file mode 100644
index 00000000..a5225c0e
--- /dev/null
+++ b/src/dpdk22/drivers/net/cxgbe/base/adapter.h
@@ -0,0 +1,576 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014-2015 Chelsio Communications.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Chelsio Communications nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* This file should not be included directly. Include common.h instead. */
+
+#ifndef __T4_ADAPTER_H__
+#define __T4_ADAPTER_H__
+
+#include <rte_mbuf.h>
+
+#include "cxgbe_compat.h"
+#include "t4_regs_values.h"
+
+enum {
+ MAX_ETH_QSETS = 64, /* # of Ethernet Tx/Rx queue sets */
+};
+
+struct adapter;
+struct sge_rspq;
+
+enum {
+ PORT_RSS_DONE = (1 << 0),
+};
+
+struct port_info {
+ struct adapter *adapter; /* adapter that this port belongs to */
+ struct rte_eth_dev *eth_dev; /* associated rte eth device */
+ struct port_stats stats_base; /* port statistics base */
+ struct link_config link_cfg; /* link configuration info */
+
+ unsigned long flags; /* port related flags */
+ short int xact_addr_filt; /* index of exact MAC address filter */
+
+ u16 viid; /* associated virtual interface id */
+ s8 mdio_addr; /* address of the PHY */
+ u8 port_type; /* firmware port type */
+ u8 mod_type; /* firmware module type */
+ u8 port_id; /* physical port ID */
+ u8 tx_chan; /* associated channel */
+
+ u8 n_rx_qsets; /* # of rx qsets */
+ u8 n_tx_qsets; /* # of tx qsets */
+ u8 first_qset; /* index of first qset */
+
+ u16 *rss; /* rss table */
+ u8 rss_mode; /* rss mode */
+ u16 rss_size; /* size of VI's RSS table slice */
+};
+
+/* Enable or disable autonegotiation. If this is set to enable,
+ * the forced link modes above are completely ignored.
+ */
+#define AUTONEG_DISABLE 0x00
+#define AUTONEG_ENABLE 0x01
+
+enum { /* adapter flags */
+ FULL_INIT_DONE = (1 << 0),
+ USING_MSI = (1 << 1),
+ USING_MSIX = (1 << 2),
+ FW_QUEUE_BOUND = (1 << 3),
+ FW_OK = (1 << 4),
+ CFG_QUEUES = (1 << 5),
+ MASTER_PF = (1 << 6),
+};
+
+struct rx_sw_desc { /* SW state per Rx descriptor */
+ void *buf; /* struct page or mbuf */
+ dma_addr_t dma_addr;
+};
+
+struct sge_fl { /* SGE free-buffer queue state */
+ /* RO fields */
+ struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
+
+ dma_addr_t addr; /* bus address of HW ring start */
+ __be64 *desc; /* address of HW Rx descriptor ring */
+
+ void __iomem *bar2_addr; /* address of BAR2 Queue registers */
+ unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
+
+ unsigned int cntxt_id; /* SGE relative QID for the free list */
+ unsigned int size; /* capacity of free list */
+
+ unsigned int avail; /* # of available Rx buffers */
+ unsigned int pend_cred; /* new buffers since last FL DB ring */
+ unsigned int cidx; /* consumer index */
+ unsigned int pidx; /* producer index */
+
+ unsigned long alloc_failed; /* # of times buffer allocation failed */
+ unsigned long low; /* # of times momentarily starving */
+};
+
+#define MAX_MBUF_FRAGS (16384 / 512 + 2)
+
+/* A packet gather list */
+struct pkt_gl {
+ union {
+ struct rte_mbuf *mbufs[MAX_MBUF_FRAGS];
+ } /* UNNAMED */;
+ void *va; /* virtual address of first byte */
+ unsigned int nfrags; /* # of fragments */
+ unsigned int tot_len; /* total length of fragments */
+ bool usembufs; /* use mbufs for fragments */
+};
+
+typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp,
+ const struct pkt_gl *gl);
+
+struct sge_rspq { /* state for an SGE response queue */
+ struct adapter *adapter; /* adapter that this queue belongs to */
+ struct rte_eth_dev *eth_dev; /* associated rte eth device */
+ struct rte_mempool *mb_pool; /* associated mempool */
+
+ dma_addr_t phys_addr; /* physical address of the ring */
+ __be64 *desc; /* address of HW response ring */
+ const __be64 *cur_desc; /* current descriptor in queue */
+
+ void __iomem *bar2_addr; /* address of BAR2 Queue registers */
+ unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
+
+ unsigned int cidx; /* consumer index */
+ unsigned int gts_idx; /* last gts write sent */
+ unsigned int iqe_len; /* entry size */
+ unsigned int size; /* capacity of response queue */
+ int offset; /* offset into current Rx buffer */
+
+ u8 gen; /* current generation bit */
+ u8 intr_params; /* interrupt holdoff parameters */
+ u8 next_intr_params; /* holdoff params for next interrupt */
+ u8 pktcnt_idx; /* interrupt packet threshold */
+ u8 port_id; /* associated port-id */
+ u8 idx; /* queue index within its group */
+ u16 cntxt_id; /* SGE relative QID for the response Q */
+ u16 abs_id; /* absolute SGE id for the response q */
+
+ rspq_handler_t handler; /* associated handler for this response q */
+};
+
+struct sge_eth_rx_stats { /* Ethernet rx queue statistics */
+ u64 pkts; /* # of ethernet packets */
+ u64 rx_bytes; /* # of ethernet bytes */
+ u64 rx_cso; /* # of Rx checksum offloads */
+ u64 vlan_ex; /* # of Rx VLAN extractions */
+ u64 rx_drops; /* # of packets dropped due to no mem */
+};
+
+struct sge_eth_rxq { /* a SW Ethernet Rx queue */
+ struct sge_rspq rspq;
+ struct sge_fl fl;
+ struct sge_eth_rx_stats stats;
+ bool usembufs; /* one ingress packet per mbuf FL buffer */
+} __rte_cache_aligned;
+
+/*
+ * Currently there are two types of coalesce WR. Type 0 needs 48 bytes per
+ * packet (if one sgl is present) and type 1 needs 32 bytes. This means
+ * that type 0 can fit a maximum of 10 packets per WR and type 1 can fit
+ * 15 packets. We need to keep track of the mbuf pointers in a coalesce WR
+ * to be able to free those mbufs when we get completions back from the FW.
+ * Allocating the maximum number of pointers in every tx desc is a waste
+ * of memory resources so we only store 2 pointers per tx desc which should
+ * be enough since a tx desc can only fit 2 packets in the best case
+ * scenario where a packet needs 32 bytes.
+ */
+#define ETH_COALESCE_PKT_NUM 15
+#define ETH_COALESCE_PKT_PER_DESC 2
+
+struct tx_eth_coal_desc {
+ struct rte_mbuf *mbuf[ETH_COALESCE_PKT_PER_DESC];
+ struct ulptx_sgl *sgl[ETH_COALESCE_PKT_PER_DESC];
+ int idx;
+};
+
+struct tx_desc {
+ __be64 flit[8];
+};
+
+struct tx_sw_desc { /* SW state per Tx descriptor */
+ struct rte_mbuf *mbuf;
+ struct ulptx_sgl *sgl;
+ struct tx_eth_coal_desc coalesce;
+};
+
+enum {
+ EQ_STOPPED = (1 << 0),
+};
+
+struct eth_coalesce {
+ unsigned char *ptr;
+ unsigned char type;
+ unsigned int idx;
+ unsigned int len;
+ unsigned int flits;
+ unsigned int max;
+};
+
+struct sge_txq {
+ struct tx_desc *desc; /* address of HW Tx descriptor ring */
+ struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */
+ struct sge_qstat *stat; /* queue status entry */
+ struct eth_coalesce coalesce; /* coalesce info */
+
+ uint64_t phys_addr; /* physical address of the ring */
+
+ void __iomem *bar2_addr; /* address of BAR2 Queue registers */
+ unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
+
+ unsigned int cntxt_id; /* SGE relative QID for the Tx Q */
+ unsigned int in_use; /* # of in-use Tx descriptors */
+ unsigned int size; /* # of descriptors */
+ unsigned int cidx; /* SW consumer index */
+ unsigned int pidx; /* producer index */
+ unsigned int dbidx; /* last idx when db ring was done */
+ unsigned int equeidx; /* last sent credit request */
+ unsigned int last_pidx; /* last pidx recorded by tx monitor */
+ unsigned int last_coal_idx;/* last coal-idx recorded by tx monitor */
+
+ int db_disabled; /* doorbell state */
+ unsigned short db_pidx; /* doorbell producer index */
+ unsigned short db_pidx_inc; /* doorbell producer increment */
+};
+
+struct sge_eth_tx_stats { /* Ethernet tx queue statistics */
+ u64 pkts; /* # of ethernet packets */
+ u64 tx_bytes; /* # of ethernet bytes */
+ u64 tso; /* # of TSO requests */
+ u64 tx_cso; /* # of Tx checksum offloads */
+ u64 vlan_ins; /* # of Tx VLAN insertions */
+ u64 mapping_err; /* # of I/O MMU packet mapping errors */
+ u64 coal_wr; /* # of coalesced wr */
+ u64 coal_pkts; /* # of coalesced packets */
+};
+
+struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
+ struct sge_txq q;
+ struct rte_eth_dev *eth_dev; /* port that this queue belongs to */
+ struct sge_eth_tx_stats stats; /* queue statistics */
+ rte_spinlock_t txq_lock;
+
+ unsigned int flags; /* flags for state of the queue */
+} __rte_cache_aligned;
+
+struct sge {
+ struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
+ struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
+ struct sge_rspq fw_evtq __rte_cache_aligned;
+
+ u16 max_ethqsets; /* # of available Ethernet queue sets */
+ u32 stat_len; /* length of status page at ring end */
+ u32 pktshift; /* padding between CPL & packet data */
+
+ /* response queue interrupt parameters */
+ u16 timer_val[SGE_NTIMERS];
+ u8 counter_val[SGE_NCOUNTERS];
+
+ u32 fl_align; /* response queue message alignment */
+ u32 fl_pg_order; /* large page allocation size */
+ u32 fl_starve_thres; /* Free List starvation threshold */
+};
+
+#define T4_OS_NEEDS_MBOX_LOCKING 1
+
+/*
+ * OS Lock/List primitives for those interfaces in the Common Code which
+ * need this.
+ */
+
+struct mbox_entry {
+ TAILQ_ENTRY(mbox_entry) next;
+};
+
+TAILQ_HEAD(mbox_list, mbox_entry);
+
+struct adapter {
+ struct rte_pci_device *pdev; /* associated rte pci device */
+ struct rte_eth_dev *eth_dev; /* first port's rte eth device */
+ struct adapter_params params; /* adapter parameters */
+ struct port_info port[MAX_NPORTS]; /* ports belonging to this adapter */
+ struct sge sge; /* associated SGE */
+
+ /* support for single-threading access to adapter mailbox registers */
+ struct mbox_list mbox_list;
+ rte_spinlock_t mbox_lock;
+
+ u8 *regs; /* pointer to registers region */
+ u8 *bar2; /* pointer to bar2 region */
+ unsigned long flags; /* adapter flags */
+ unsigned int mbox; /* associated mailbox */
+ unsigned int pf; /* associated physical function id */
+
+ int use_unpacked_mode; /* unpacked rx mode state */
+};
+
+#define CXGBE_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
+
+static inline uint64_t cxgbe_read_addr64(volatile void *addr)
+{
+ uint64_t val = CXGBE_PCI_REG(addr);
+ uint64_t val2 = CXGBE_PCI_REG(((volatile uint8_t *)(addr) + 4));
+
+ val2 = (uint64_t)(val2 << 32);
+ val += val2;
+ return val;
+}
+
+static inline uint32_t cxgbe_read_addr(volatile void *addr)
+{
+ return CXGBE_PCI_REG(addr);
+}
+
+#define CXGBE_PCI_REG_ADDR(adap, reg) \
+ ((volatile uint32_t *)((char *)(adap)->regs + (reg)))
+
+#define CXGBE_READ_REG(adap, reg) \
+ cxgbe_read_addr(CXGBE_PCI_REG_ADDR((adap), (reg)))
+
+#define CXGBE_READ_REG64(adap, reg) \
+ cxgbe_read_addr64(CXGBE_PCI_REG_ADDR((adap), (reg)))
+
+#define CXGBE_PCI_REG_WRITE(reg, value) ({ \
+ CXGBE_PCI_REG((reg)) = (value); })
+
+#define CXGBE_WRITE_REG(adap, reg, value) \
+ CXGBE_PCI_REG_WRITE(CXGBE_PCI_REG_ADDR((adap), (reg)), (value))
+
+static inline uint64_t cxgbe_write_addr64(volatile void *addr, uint64_t val)
+{
+ CXGBE_PCI_REG(addr) = val;
+ CXGBE_PCI_REG(((volatile uint8_t *)(addr) + 4)) = (val >> 32);
+ return val;
+}
+
+#define CXGBE_WRITE_REG64(adap, reg, value) \
+ cxgbe_write_addr64(CXGBE_PCI_REG_ADDR((adap), (reg)), (value))
+
+/**
+ * t4_read_reg - read a HW register
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ *
+ * Returns the 32-bit value of the given HW register.
+ */
+static inline u32 t4_read_reg(struct adapter *adapter, u32 reg_addr)
+{
+ u32 val = CXGBE_READ_REG(adapter, reg_addr);
+
+ CXGBE_DEBUG_REG(adapter, "read register 0x%x value 0x%x\n", reg_addr,
+ val);
+ return val;
+}
+
+/**
+ * t4_write_reg - write a HW register
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ * @val: the value to write
+ *
+ * Write a 32-bit value into the given HW register.
+ */
+static inline void t4_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
+{
+ CXGBE_DEBUG_REG(adapter, "setting register 0x%x to 0x%x\n", reg_addr,
+ val);
+ CXGBE_WRITE_REG(adapter, reg_addr, val);
+}
+
+/**
+ * t4_read_reg64 - read a 64-bit HW register
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ *
+ * Returns the 64-bit value of the given HW register.
+ */
+static inline u64 t4_read_reg64(struct adapter *adapter, u32 reg_addr)
+{
+ u64 val = CXGBE_READ_REG64(adapter, reg_addr);
+
+ CXGBE_DEBUG_REG(adapter, "64-bit read register %#x value %#llx\n",
+ reg_addr, (unsigned long long)val);
+ return val;
+}
+
+/**
+ * t4_write_reg64 - write a 64-bit HW register
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ * @val: the value to write
+ *
+ * Write a 64-bit value into the given HW register.
+ */
+static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr,
+ u64 val)
+{
+ CXGBE_DEBUG_REG(adapter, "setting register %#x to %#llx\n", reg_addr,
+ (unsigned long long)val);
+
+ CXGBE_WRITE_REG64(adapter, reg_addr, val);
+}
+
+/**
+ * t4_os_set_hw_addr - store a port's MAC address in SW
+ * @adapter: the adapter
+ * @port_idx: the port index
+ * @hw_addr: the Ethernet address
+ *
+ * Store the Ethernet address of the given port in SW. Called by the
+ * common code when it retrieves a port's Ethernet address from EEPROM.
+ */
+static inline void t4_os_set_hw_addr(struct adapter *adapter, int port_idx,
+ u8 hw_addr[])
+{
+ struct port_info *pi = &adapter->port[port_idx];
+
+ ether_addr_copy((struct ether_addr *)hw_addr,
+ &pi->eth_dev->data->mac_addrs[0]);
+}
+
+/**
+ * t4_os_lock_init - initialize spinlock
+ * @lock: the spinlock
+ */
+static inline void t4_os_lock_init(rte_spinlock_t *lock)
+{
+ rte_spinlock_init(lock);
+}
+
+/**
+ * t4_os_lock - spin until lock is acquired
+ * @lock: the spinlock
+ */
+static inline void t4_os_lock(rte_spinlock_t *lock)
+{
+ rte_spinlock_lock(lock);
+}
+
+/**
+ * t4_os_unlock - unlock a spinlock
+ * @lock: the spinlock
+ */
+static inline void t4_os_unlock(rte_spinlock_t *lock)
+{
+ rte_spinlock_unlock(lock);
+}
+
+/**
+ * t4_os_trylock - try to get a lock
+ * @lock: the spinlock
+ */
+static inline int t4_os_trylock(rte_spinlock_t *lock)
+{
+ return rte_spinlock_trylock(lock);
+}
+
+/**
+ * t4_os_init_list_head - initialize
+ * @head: head of list to initialize [to empty]
+ */
+static inline void t4_os_init_list_head(struct mbox_list *head)
+{
+ TAILQ_INIT(head);
+}
+
+static inline struct mbox_entry *t4_os_list_first_entry(struct mbox_list *head)
+{
+ return TAILQ_FIRST(head);
+}
+
+/**
+ * t4_os_atomic_add_tail - Enqueue list element atomically onto list
+ * @new: the entry to be addded to the queue
+ * @head: current head of the linked list
+ * @lock: lock to use to guarantee atomicity
+ */
+static inline void t4_os_atomic_add_tail(struct mbox_entry *entry,
+ struct mbox_list *head,
+ rte_spinlock_t *lock)
+{
+ t4_os_lock(lock);
+ TAILQ_INSERT_TAIL(head, entry, next);
+ t4_os_unlock(lock);
+}
+
+/**
+ * t4_os_atomic_list_del - Dequeue list element atomically from list
+ * @entry: the entry to be remove/dequeued from the list.
+ * @lock: the spinlock
+ */
+static inline void t4_os_atomic_list_del(struct mbox_entry *entry,
+ struct mbox_list *head,
+ rte_spinlock_t *lock)
+{
+ t4_os_lock(lock);
+ TAILQ_REMOVE(head, entry, next);
+ t4_os_unlock(lock);
+}
+
+/**
+ * adap2pinfo - return the port_info of a port
+ * @adap: the adapter
+ * @idx: the port index
+ *
+ * Return the port_info structure for the port of the given index.
+ */
+static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
+{
+ return &adap->port[idx];
+}
+
+void *t4_alloc_mem(size_t size);
+void t4_free_mem(void *addr);
+#define t4_os_alloc(_size) t4_alloc_mem((_size))
+#define t4_os_free(_ptr) t4_free_mem((_ptr))
+
+void t4_os_portmod_changed(const struct adapter *adap, int port_id);
+void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
+
+void reclaim_completed_tx(struct sge_txq *q);
+void t4_free_sge_resources(struct adapter *adap);
+void t4_sge_tx_monitor_start(struct adapter *adap);
+void t4_sge_tx_monitor_stop(struct adapter *adap);
+int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf);
+int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
+ const struct pkt_gl *gl);
+int t4_sge_init(struct adapter *adap);
+int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
+ struct rte_eth_dev *eth_dev, uint16_t queue_id,
+ unsigned int iqid, int socket_id);
+int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *rspq, bool fwevtq,
+ struct rte_eth_dev *eth_dev, int intr_idx,
+ struct sge_fl *fl, rspq_handler_t handler,
+ int cong, struct rte_mempool *mp, int queue_id,
+ int socket_id);
+int t4_sge_eth_txq_start(struct sge_eth_txq *txq);
+int t4_sge_eth_txq_stop(struct sge_eth_txq *txq);
+void t4_sge_eth_txq_release(struct adapter *adap, struct sge_eth_txq *txq);
+int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_rspq *rq);
+int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_rspq *rq);
+void t4_sge_eth_rxq_release(struct adapter *adap, struct sge_eth_rxq *rxq);
+void t4_sge_eth_clear_queues(struct port_info *pi);
+int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
+ unsigned int cnt);
+int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts,
+ unsigned int budget, unsigned int *work_done);
+int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
+
+#endif /* __T4_ADAPTER_H__ */
diff --git a/src/dpdk22/drivers/net/cxgbe/base/common.h b/src/dpdk22/drivers/net/cxgbe/base/common.h
new file mode 100644
index 00000000..cf2e82dd
--- /dev/null
+++ b/src/dpdk22/drivers/net/cxgbe/base/common.h
@@ -0,0 +1,401 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014-2015 Chelsio Communications.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Chelsio Communications nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __CHELSIO_COMMON_H
+#define __CHELSIO_COMMON_H
+
+#include "cxgbe_compat.h"
+#include "t4_hw.h"
+#include "t4_chip_type.h"
+#include "t4fw_interface.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define CXGBE_PAGE_SIZE RTE_PGSIZE_4K
+
+enum {
+ MAX_NPORTS = 4, /* max # of ports */
+};
+
+enum {
+ MEMWIN0_APERTURE = 2048,
+ MEMWIN0_BASE = 0x1b800,
+};
+
+enum dev_master { MASTER_CANT, MASTER_MAY, MASTER_MUST };
+
+enum dev_state { DEV_STATE_UNINIT, DEV_STATE_INIT, DEV_STATE_ERR };
+
+enum {
+ PAUSE_RX = 1 << 0,
+ PAUSE_TX = 1 << 1,
+ PAUSE_AUTONEG = 1 << 2
+};
+
+struct port_stats {
+ u64 tx_octets; /* total # of octets in good frames */
+ u64 tx_frames; /* all good frames */
+ u64 tx_bcast_frames; /* all broadcast frames */
+ u64 tx_mcast_frames; /* all multicast frames */
+ u64 tx_ucast_frames; /* all unicast frames */
+ u64 tx_error_frames; /* all error frames */
+
+ u64 tx_frames_64; /* # of Tx frames in a particular range */
+ u64 tx_frames_65_127;
+ u64 tx_frames_128_255;
+ u64 tx_frames_256_511;
+ u64 tx_frames_512_1023;
+ u64 tx_frames_1024_1518;
+ u64 tx_frames_1519_max;
+
+ u64 tx_drop; /* # of dropped Tx frames */
+ u64 tx_pause; /* # of transmitted pause frames */
+ u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */
+ u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */
+ u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */
+ u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */
+ u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */
+ u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */
+ u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */
+ u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */
+
+ u64 rx_octets; /* total # of octets in good frames */
+ u64 rx_frames; /* all good frames */
+ u64 rx_bcast_frames; /* all broadcast frames */
+ u64 rx_mcast_frames; /* all multicast frames */
+ u64 rx_ucast_frames; /* all unicast frames */
+ u64 rx_too_long; /* # of frames exceeding MTU */
+ u64 rx_jabber; /* # of jabber frames */
+ u64 rx_fcs_err; /* # of received frames with bad FCS */
+ u64 rx_len_err; /* # of received frames with length error */
+ u64 rx_symbol_err; /* symbol errors */
+ u64 rx_runt; /* # of short frames */
+
+ u64 rx_frames_64; /* # of Rx frames in a particular range */
+ u64 rx_frames_65_127;
+ u64 rx_frames_128_255;
+ u64 rx_frames_256_511;
+ u64 rx_frames_512_1023;
+ u64 rx_frames_1024_1518;
+ u64 rx_frames_1519_max;
+
+ u64 rx_pause; /* # of received pause frames */
+ u64 rx_ppp0; /* # of received PPP prio 0 frames */
+ u64 rx_ppp1; /* # of received PPP prio 1 frames */
+ u64 rx_ppp2; /* # of received PPP prio 2 frames */
+ u64 rx_ppp3; /* # of received PPP prio 3 frames */
+ u64 rx_ppp4; /* # of received PPP prio 4 frames */
+ u64 rx_ppp5; /* # of received PPP prio 5 frames */
+ u64 rx_ppp6; /* # of received PPP prio 6 frames */
+ u64 rx_ppp7; /* # of received PPP prio 7 frames */
+
+ u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */
+ u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */
+ u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */
+ u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */
+ u64 rx_trunc0; /* buffer-group 0 truncated packets */
+ u64 rx_trunc1; /* buffer-group 1 truncated packets */
+ u64 rx_trunc2; /* buffer-group 2 truncated packets */
+ u64 rx_trunc3; /* buffer-group 3 truncated packets */
+};
+
+struct sge_params {
+ u32 hps; /* host page size for our PF/VF */
+ u32 eq_qpp; /* egress queues/page for our PF/VF */
+ u32 iq_qpp; /* egress queues/page for our PF/VF */
+};
+
+struct tp_params {
+ unsigned int ntxchan; /* # of Tx channels */
+ unsigned int tre; /* log2 of core clocks per TP tick */
+ unsigned int dack_re; /* DACK timer resolution */
+ unsigned int la_mask; /* what events are recorded by TP LA */
+ unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */
+
+ u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */
+ u32 ingress_config; /* cached TP_INGRESS_CONFIG */
+
+ /*
+ * TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a
+ * subset of the set of fields which may be present in the Compressed
+ * Filter Tuple portion of filters and TCP TCB connections. The
+ * fields which are present are controlled by the TP_VLAN_PRI_MAP.
+ * Since a variable number of fields may or may not be present, their
+ * shifted field positions within the Compressed Filter Tuple may
+ * vary, or not even be present if the field isn't selected in
+ * TP_VLAN_PRI_MAP. Since some of these fields are needed in various
+ * places we store their offsets here, or a -1 if the field isn't
+ * present.
+ */
+ int vlan_shift;
+ int vnic_shift;
+ int port_shift;
+ int protocol_shift;
+};
+
+struct vpd_params {
+ unsigned int cclk;
+};
+
+struct pci_params {
+ uint16_t vendor_id;
+ uint16_t device_id;
+ uint32_t vpd_cap_addr;
+ uint16_t speed;
+ uint8_t width;
+};
+
+/*
+ * Firmware device log.
+ */
+struct devlog_params {
+ u32 memtype; /* which memory (EDC0, EDC1, MC) */
+ u32 start; /* start of log in firmware memory */
+ u32 size; /* size of log */
+};
+
+struct arch_specific_params {
+ u8 nchan;
+ u16 mps_rplc_size;
+ u16 vfcount;
+ u32 sge_fl_db;
+ u16 mps_tcam_size;
+};
+
+struct adapter_params {
+ struct sge_params sge;
+ struct tp_params tp;
+ struct vpd_params vpd;
+ struct pci_params pci;
+ struct devlog_params devlog;
+ enum pcie_memwin drv_memwin;
+
+ unsigned int sf_size; /* serial flash size in bytes */
+ unsigned int sf_nsec; /* # of flash sectors */
+
+ unsigned int fw_vers;
+ unsigned int tp_vers;
+
+ unsigned short mtus[NMTUS];
+ unsigned short a_wnd[NCCTRL_WIN];
+ unsigned short b_wnd[NCCTRL_WIN];
+
+ unsigned int mc_size; /* MC memory size */
+ unsigned int cim_la_size;
+
+ unsigned char nports; /* # of ethernet ports */
+ unsigned char portvec;
+
+ enum chip_type chip; /* chip code */
+ struct arch_specific_params arch; /* chip specific params */
+
+ bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
+};
+
+struct link_config {
+ unsigned short supported; /* link capabilities */
+ unsigned short advertising; /* advertised capabilities */
+ unsigned short requested_speed; /* speed user has requested */
+ unsigned short speed; /* actual link speed */
+ unsigned char requested_fc; /* flow control user has requested */
+ unsigned char fc; /* actual link flow control */
+ unsigned char autoneg; /* autonegotiating? */
+ unsigned char link_ok; /* link up? */
+};
+
+#include "adapter.h"
+
+void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
+ u32 val);
+int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
+ int polarity,
+ int attempts, int delay, u32 *valp);
+
+static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
+ int polarity, int attempts, int delay)
+{
+ return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
+ delay, NULL);
+}
+
+#define for_each_port(adapter, iter) \
+ for (iter = 0; iter < (adapter)->params.nports; ++iter)
+
+void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
+void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
+ unsigned int mask, unsigned int val);
+void t4_intr_enable(struct adapter *adapter);
+void t4_intr_disable(struct adapter *adapter);
+int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
+ struct link_config *lc);
+void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
+ const unsigned short *alpha, const unsigned short *beta);
+int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
+ enum dev_master master, enum dev_state *state);
+int t4_fw_bye(struct adapter *adap, unsigned int mbox);
+int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
+int t4_fw_halt(struct adapter *adap, unsigned int mbox, int reset);
+int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset);
+int t4_fixup_host_params_compat(struct adapter *adap, unsigned int page_size,
+ unsigned int cache_line_size,
+ enum chip_type chip_compat);
+int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
+ unsigned int cache_line_size);
+int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
+int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ u32 *val);
+int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
+ unsigned int pf, unsigned int vf,
+ unsigned int nparams, const u32 *params,
+ const u32 *val, int timeout);
+int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ const u32 *val);
+int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
+ unsigned int port, unsigned int pf, unsigned int vf,
+ unsigned int nmac, u8 *mac, unsigned int *rss_size,
+ unsigned int portfunc, unsigned int idstype);
+int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
+ unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
+ unsigned int *rss_size);
+int t4_free_vi(struct adapter *adap, unsigned int mbox,
+ unsigned int pf, unsigned int vf,
+ unsigned int viid);
+int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ int mtu, int promisc, int all_multi, int bcast, int vlanex,
+ bool sleep_ok);
+int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ int idx, const u8 *addr, bool persist, bool add_smt);
+int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
+ unsigned int viid, bool rx_en, bool tx_en, bool dcb_en);
+int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ bool rx_en, bool tx_en);
+int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
+ unsigned int pf, unsigned int vf, unsigned int iqid,
+ unsigned int fl0id, unsigned int fl1id);
+int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int iqtype, unsigned int iqid,
+ unsigned int fl0id, unsigned int fl1id);
+int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid);
+
+static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
+{
+ return adap->params.vpd.cclk / 1000;
+}
+
+static inline unsigned int us_to_core_ticks(const struct adapter *adap,
+ unsigned int us)
+{
+ return (us * adap->params.vpd.cclk) / 1000;
+}
+
+static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
+ unsigned int ticks)
+{
+ /* add Core Clock / 2 to round ticks to nearest uS */
+ return ((ticks * 1000 + adapter->params.vpd.cclk / 2) /
+ adapter->params.vpd.cclk);
+}
+
+int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
+ int size, void *rpl, bool sleep_ok, int timeout);
+int t4_wr_mbox_meat(struct adapter *adap, int mbox,
+ const void __attribute__((__may_alias__)) *cmd, int size,
+ void *rpl, bool sleep_ok);
+
+static inline int t4_wr_mbox_timeout(struct adapter *adap, int mbox,
+ const void *cmd, int size, void *rpl,
+ int timeout)
+{
+ return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, true,
+ timeout);
+}
+
+int t4_get_core_clock(struct adapter *adapter, struct vpd_params *p);
+
+static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd,
+ int size, void *rpl)
+{
+ return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true);
+}
+
+static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
+ int size, void *rpl)
+{
+ return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
+}
+
+void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, u32 *vals, unsigned int nregs,
+ unsigned int start_idx);
+void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, const u32 *vals,
+ unsigned int nregs, unsigned int start_idx);
+
+int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p);
+int t4_read_flash(struct adapter *adapter, unsigned int addr,
+ unsigned int nwords, u32 *data, int byte_oriented);
+int t4_flash_cfg_addr(struct adapter *adapter);
+unsigned int t4_get_mps_bg_map(struct adapter *adapter, int idx);
+const char *t4_get_port_type_description(enum fw_port_type port_type);
+void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
+void t4_get_port_stats_offset(struct adapter *adap, int idx,
+ struct port_stats *stats,
+ struct port_stats *offset);
+void t4_clr_port_stats(struct adapter *adap, int idx);
+void t4_reset_link_config(struct adapter *adap, int idx);
+int t4_get_fw_version(struct adapter *adapter, u32 *vers);
+int t4_get_tp_version(struct adapter *adapter, u32 *vers);
+int t4_get_flash_params(struct adapter *adapter);
+int t4_prep_adapter(struct adapter *adapter);
+int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
+int t4_init_rss_mode(struct adapter *adap, int mbox);
+int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
+ int start, int n, const u16 *rspq, unsigned int nrspq);
+int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+ unsigned int flags, unsigned int defq);
+
+enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
+int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid,
+ unsigned int qtype, u64 *pbar2_qoffset,
+ unsigned int *pbar2_qid);
+
+int t4_init_sge_params(struct adapter *adapter);
+int t4_init_tp_params(struct adapter *adap);
+int t4_filter_field_shift(const struct adapter *adap, unsigned int filter_sel);
+int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
+#endif /* __CHELSIO_COMMON_H */
diff --git a/src/dpdk_lib18/librte_sched/rte_approx.h b/src/dpdk22/drivers/net/cxgbe/base/t4_chip_type.h
index 09f30a87..1ca68039 100755..100644
--- a/src/dpdk_lib18/librte_sched/rte_approx.h
+++ b/src/dpdk22/drivers/net/cxgbe/base/t4_chip_type.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2014-2015 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -14,7 +14,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Intel Corporation nor the names of its
+ * * Neither the name of Chelsio Communications nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -31,45 +31,49 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef __INCLUDE_RTE_APPROX_H__
-#define __INCLUDE_RTE_APPROX_H__
+#ifndef __T4_CHIP_TYPE_H__
+#define __T4_CHIP_TYPE_H__
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
- * @file
- * RTE Rational Approximation
+/*
+ * All T4 and later chips have their PCI-E Device IDs encoded as 0xVFPP where:
*
- * Given a rational number alpha with 0 < alpha < 1 and a precision d, the goal
- * is to find positive integers p, q such that alpha - d < p/q < alpha + d, and
- * q is minimal.
+ * V = "4" for T4; "5" for T5, etc. or
+ * F = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs
+ * PP = adapter product designation
*
- ***/
+ * We use the "version" (V) of the adpater to code the Chip Version above.
+ */
+#define CHELSIO_PCI_ID_VER(devid) ((devid) >> 12)
+#define CHELSIO_PCI_ID_FUNC(devid) (((devid) >> 8) & 0xf)
+#define CHELSIO_PCI_ID_PROD(devid) ((devid) & 0xff)
-#include <stdint.h>
+#define CHELSIO_T4 0x4
+#define CHELSIO_T5 0x5
-/**
- * Find best rational approximation
- *
- * @param alpha
- * Rational number to approximate
- * @param d
- * Precision for the rational approximation
- * @param p
- * Pointer to pre-allocated space where the numerator of the rational
- * approximation will be stored when operation is successful
- * @param q
- * Pointer to pre-allocated space where the denominator of the rational
- * approximation will be stored when operation is successful
- * @return
- * 0 upon success, error code otherwise
- */
-int rte_approx(double alpha, double d, uint32_t *p, uint32_t *q);
+#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
+#define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf)
+#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
+
+enum chip_type {
+ T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
+ T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
+ T4_FIRST_REV = T4_A1,
+ T4_LAST_REV = T4_A2,
+
+ T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
+ T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
+ T5_FIRST_REV = T5_A0,
+ T5_LAST_REV = T5_A1,
+};
+
+static inline int is_t4(enum chip_type chip)
+{
+ return (CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4);
+}
-#ifdef __cplusplus
+static inline int is_t5(enum chip_type chip)
+{
+ return (CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5);
}
-#endif
-#endif /* __INCLUDE_RTE_APPROX_H__ */
+#endif /* __T4_CHIP_TYPE_H__ */
diff --git a/src/dpdk22/drivers/net/cxgbe/base/t4_hw.c b/src/dpdk22/drivers/net/cxgbe/base/t4_hw.c
new file mode 100644
index 00000000..884d2cf0
--- /dev/null
+++ b/src/dpdk22/drivers/net/cxgbe/base/t4_hw.c
@@ -0,0 +1,2686 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014-2015 Chelsio Communications.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Chelsio Communications nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <netinet/in.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_atomic.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_dev.h>
+#include <rte_byteorder.h>
+
+#include "common.h"
+#include "t4_regs.h"
+#include "t4_regs_values.h"
+#include "t4fw_interface.h"
+
+static void init_link_config(struct link_config *lc, unsigned int caps);
+
+/**
+ * t4_read_mtu_tbl - returns the values in the HW path MTU table
+ * @adap: the adapter
+ * @mtus: where to store the MTU values
+ * @mtu_log: where to store the MTU base-2 log (may be %NULL)
+ *
+ * Reads the HW path MTU table.
+ */
+void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
+{
+ u32 v;
+ int i;
+
+ for (i = 0; i < NMTUS; ++i) {
+ t4_write_reg(adap, A_TP_MTU_TABLE,
+ V_MTUINDEX(0xff) | V_MTUVALUE(i));
+ v = t4_read_reg(adap, A_TP_MTU_TABLE);
+ mtus[i] = G_MTUVALUE(v);
+ if (mtu_log)
+ mtu_log[i] = G_MTUWIDTH(v);
+ }
+}
+
+/**
+ * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
+ * @adap: the adapter
+ * @addr: the indirect TP register address
+ * @mask: specifies the field within the register to modify
+ * @val: new value for the field
+ *
+ * Sets a field of an indirect TP register to the given value.
+ */
+void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
+ unsigned int mask, unsigned int val)
+{
+ t4_write_reg(adap, A_TP_PIO_ADDR, addr);
+ val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
+ t4_write_reg(adap, A_TP_PIO_DATA, val);
+}
+
+/* The minimum additive increment value for the congestion control table */
+#define CC_MIN_INCR 2U
+
+/**
+ * t4_load_mtus - write the MTU and congestion control HW tables
+ * @adap: the adapter
+ * @mtus: the values for the MTU table
+ * @alpha: the values for the congestion control alpha parameter
+ * @beta: the values for the congestion control beta parameter
+ *
+ * Write the HW MTU table with the supplied MTUs and the high-speed
+ * congestion control table with the supplied alpha, beta, and MTUs.
+ * We write the two tables together because the additive increments
+ * depend on the MTUs.
+ */
+void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
+ const unsigned short *alpha, const unsigned short *beta)
+{
+ static const unsigned int avg_pkts[NCCTRL_WIN] = {
+ 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
+ 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
+ 28672, 40960, 57344, 81920, 114688, 163840, 229376
+ };
+
+ unsigned int i, w;
+
+ for (i = 0; i < NMTUS; ++i) {
+ unsigned int mtu = mtus[i];
+ unsigned int log2 = cxgbe_fls(mtu);
+
+ if (!(mtu & ((1 << log2) >> 2))) /* round */
+ log2--;
+ t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
+ V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
+
+ for (w = 0; w < NCCTRL_WIN; ++w) {
+ unsigned int inc;
+
+ inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
+ CC_MIN_INCR);
+
+ t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
+ (w << 16) | (beta[w] << 13) | inc);
+ }
+ }
+}
+
+/**
+ * t4_wait_op_done_val - wait until an operation is completed
+ * @adapter: the adapter performing the operation
+ * @reg: the register to check for completion
+ * @mask: a single-bit field within @reg that indicates completion
+ * @polarity: the value of the field when the operation is completed
+ * @attempts: number of check iterations
+ * @delay: delay in usecs between iterations
+ * @valp: where to store the value of the register at completion time
+ *
+ * Wait until an operation is completed by checking a bit in a register
+ * up to @attempts times. If @valp is not NULL the value of the register
+ * at the time it indicated completion is stored there. Returns 0 if the
+ * operation completes and -EAGAIN otherwise.
+ */
+int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
+ int polarity, int attempts, int delay, u32 *valp)
+{
+ while (1) {
+ u32 val = t4_read_reg(adapter, reg);
+
+ if (!!(val & mask) == polarity) {
+ if (valp)
+ *valp = val;
+ return 0;
+ }
+ if (--attempts == 0)
+ return -EAGAIN;
+ if (delay)
+ udelay(delay);
+ }
+}
+
+/**
+ * t4_set_reg_field - set a register field to a value
+ * @adapter: the adapter to program
+ * @addr: the register address
+ * @mask: specifies the portion of the register to modify
+ * @val: the new value for the register field
+ *
+ * Sets a register field specified by the supplied mask to the
+ * given value.
+ */
+void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
+ u32 val)
+{
+ u32 v = t4_read_reg(adapter, addr) & ~mask;
+
+ t4_write_reg(adapter, addr, v | val);
+ (void)t4_read_reg(adapter, addr); /* flush */
+}
+
+/**
+ * t4_read_indirect - read indirectly addressed registers
+ * @adap: the adapter
+ * @addr_reg: register holding the indirect address
+ * @data_reg: register holding the value of the indirect register
+ * @vals: where the read register values are stored
+ * @nregs: how many indirect registers to read
+ * @start_idx: index of first indirect register to read
+ *
+ * Reads registers that are accessed indirectly through an address/data
+ * register pair.
+ */
+void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, u32 *vals, unsigned int nregs,
+ unsigned int start_idx)
+{
+ while (nregs--) {
+ t4_write_reg(adap, addr_reg, start_idx);
+ *vals++ = t4_read_reg(adap, data_reg);
+ start_idx++;
+ }
+}
+
+/**
+ * t4_write_indirect - write indirectly addressed registers
+ * @adap: the adapter
+ * @addr_reg: register holding the indirect addresses
+ * @data_reg: register holding the value for the indirect registers
+ * @vals: values to write
+ * @nregs: how many indirect registers to write
+ * @start_idx: address of first indirect register to write
+ *
+ * Writes a sequential block of registers that are accessed indirectly
+ * through an address/data register pair.
+ */
+void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, const u32 *vals,
+ unsigned int nregs, unsigned int start_idx)
+{
+ while (nregs--) {
+ t4_write_reg(adap, addr_reg, start_idx++);
+ t4_write_reg(adap, data_reg, *vals++);
+ }
+}
+
+/**
+ * t4_report_fw_error - report firmware error
+ * @adap: the adapter
+ *
+ * The adapter firmware can indicate error conditions to the host.
+ * If the firmware has indicated an error, print out the reason for
+ * the firmware error.
+ */
+static void t4_report_fw_error(struct adapter *adap)
+{
+ static const char * const reason[] = {
+ "Crash", /* PCIE_FW_EVAL_CRASH */
+ "During Device Preparation", /* PCIE_FW_EVAL_PREP */
+ "During Device Configuration", /* PCIE_FW_EVAL_CONF */
+ "During Device Initialization", /* PCIE_FW_EVAL_INIT */
+ "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
+ "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
+ "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
+ "Reserved", /* reserved */
+ };
+ u32 pcie_fw;
+
+ pcie_fw = t4_read_reg(adap, A_PCIE_FW);
+ if (pcie_fw & F_PCIE_FW_ERR)
+ pr_err("%s: Firmware reports adapter error: %s\n",
+ __func__, reason[G_PCIE_FW_EVAL(pcie_fw)]);
+}
+
+/*
+ * Get the reply to a mailbox command and store it in @rpl in big-endian order.
+ */
+static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
+ u32 mbox_addr)
+{
+ for ( ; nflit; nflit--, mbox_addr += 8)
+ *rpl++ = htobe64(t4_read_reg64(adap, mbox_addr));
+}
+
+/*
+ * Handle a FW assertion reported in a mailbox.
+ */
+static void fw_asrt(struct adapter *adap, u32 mbox_addr)
+{
+ struct fw_debug_cmd asrt;
+
+ get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
+ pr_warn("FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
+ asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
+ be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
+}
+
+#define X_CIM_PF_NOACCESS 0xeeeeeeee
+
+/*
+ * If the Host OS Driver needs locking arround accesses to the mailbox, this
+ * can be turned on via the T4_OS_NEEDS_MBOX_LOCKING CPP define ...
+ */
+/* makes single-statement usage a bit cleaner ... */
+#ifdef T4_OS_NEEDS_MBOX_LOCKING
+#define T4_OS_MBOX_LOCKING(x) x
+#else
+#define T4_OS_MBOX_LOCKING(x) do {} while (0)
+#endif
+
+/**
+ * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
+ * @adap: the adapter
+ * @mbox: index of the mailbox to use
+ * @cmd: the command to write
+ * @size: command length in bytes
+ * @rpl: where to optionally store the reply
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ * @timeout: time to wait for command to finish before timing out
+ * (negative implies @sleep_ok=false)
+ *
+ * Sends the given command to FW through the selected mailbox and waits
+ * for the FW to execute the command. If @rpl is not %NULL it is used to
+ * store the FW's reply to the command. The command and its optional
+ * reply are of the same length. Some FW commands like RESET and
+ * INITIALIZE can take a considerable amount of time to execute.
+ * @sleep_ok determines whether we may sleep while awaiting the response.
+ * If sleeping is allowed we use progressive backoff otherwise we spin.
+ * Note that passing in a negative @timeout is an alternate mechanism
+ * for specifying @sleep_ok=false. This is useful when a higher level
+ * interface allows for specification of @timeout but not @sleep_ok ...
+ *
+ * Returns 0 on success or a negative errno on failure. A
+ * failure can happen either because we are not able to execute the
+ * command or FW executes it but signals an error. In the latter case
+ * the return value is the error code indicated by FW (negated).
+ */
+int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
+ const void __attribute__((__may_alias__)) *cmd,
+ int size, void *rpl, bool sleep_ok, int timeout)
+{
+ /*
+ * We delay in small increments at first in an effort to maintain
+ * responsiveness for simple, fast executing commands but then back
+ * off to larger delays to a maximum retry delay.
+ */
+ static const int delay[] = {
+ 1, 1, 3, 5, 10, 10, 20, 50, 100
+ };
+
+ u32 v;
+ u64 res;
+ int i, ms;
+ unsigned int delay_idx;
+ __be64 *temp = (__be64 *)malloc(size * sizeof(char));
+ __be64 *p = temp;
+ u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
+ u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
+ u32 ctl;
+ struct mbox_entry entry;
+ u32 pcie_fw = 0;
+
+ if ((size & 15) || size > MBOX_LEN) {
+ free(temp);
+ return -EINVAL;
+ }
+
+ bzero(p, size);
+ memcpy(p, (const __be64 *)cmd, size);
+
+ /*
+ * If we have a negative timeout, that implies that we can't sleep.
+ */
+ if (timeout < 0) {
+ sleep_ok = false;
+ timeout = -timeout;
+ }
+
+#ifdef T4_OS_NEEDS_MBOX_LOCKING
+ /*
+ * Queue ourselves onto the mailbox access list. When our entry is at
+ * the front of the list, we have rights to access the mailbox. So we
+ * wait [for a while] till we're at the front [or bail out with an
+ * EBUSY] ...
+ */
+ t4_os_atomic_add_tail(&entry, &adap->mbox_list, &adap->mbox_lock);
+
+ delay_idx = 0;
+ ms = delay[0];
+
+ for (i = 0; ; i += ms) {
+ /*
+ * If we've waited too long, return a busy indication. This
+ * really ought to be based on our initial position in the
+ * mailbox access list but this is a start. We very rarely
+ * contend on access to the mailbox ... Also check for a
+ * firmware error which we'll report as a device error.
+ */
+ pcie_fw = t4_read_reg(adap, A_PCIE_FW);
+ if (i > 4 * timeout || (pcie_fw & F_PCIE_FW_ERR)) {
+ t4_os_atomic_list_del(&entry, &adap->mbox_list,
+ &adap->mbox_lock);
+ t4_report_fw_error(adap);
+ return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY;
+ }
+
+ /*
+ * If we're at the head, break out and start the mailbox
+ * protocol.
+ */
+ if (t4_os_list_first_entry(&adap->mbox_list) == &entry)
+ break;
+
+ /*
+ * Delay for a bit before checking again ...
+ */
+ if (sleep_ok) {
+ ms = delay[delay_idx]; /* last element may repeat */
+ if (delay_idx < ARRAY_SIZE(delay) - 1)
+ delay_idx++;
+ msleep(ms);
+ } else {
+ rte_delay_ms(ms);
+ }
+ }
+#endif /* T4_OS_NEEDS_MBOX_LOCKING */
+
+ /*
+ * Attempt to gain access to the mailbox.
+ */
+ for (i = 0; i < 4; i++) {
+ ctl = t4_read_reg(adap, ctl_reg);
+ v = G_MBOWNER(ctl);
+ if (v != X_MBOWNER_NONE)
+ break;
+ }
+
+ /*
+ * If we were unable to gain access, dequeue ourselves from the
+ * mailbox atomic access list and report the error to our caller.
+ */
+ if (v != X_MBOWNER_PL) {
+ T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
+ &adap->mbox_list,
+ &adap->mbox_lock));
+ t4_report_fw_error(adap);
+ return (v == X_MBOWNER_FW ? -EBUSY : -ETIMEDOUT);
+ }
+
+ /*
+ * If we gain ownership of the mailbox and there's a "valid" message
+ * in it, this is likely an asynchronous error message from the
+ * firmware. So we'll report that and then proceed on with attempting
+ * to issue our own command ... which may well fail if the error
+ * presaged the firmware crashing ...
+ */
+ if (ctl & F_MBMSGVALID) {
+ dev_err(adap, "found VALID command in mbox %u: "
+ "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
+ (unsigned long long)t4_read_reg64(adap, data_reg),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 8),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 16),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 24),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 32),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 40),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 48),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 56));
+ }
+
+ /*
+ * Copy in the new mailbox command and send it on its way ...
+ */
+ for (i = 0; i < size; i += 8, p++)
+ t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
+
+ CXGBE_DEBUG_MBOX(adap, "%s: mbox %u: %016llx %016llx %016llx %016llx "
+ "%016llx %016llx %016llx %016llx\n", __func__, (mbox),
+ (unsigned long long)t4_read_reg64(adap, data_reg),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 8),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 16),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 24),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 32),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 40),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 48),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 56));
+
+ t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
+ t4_read_reg(adap, ctl_reg); /* flush write */
+
+ delay_idx = 0;
+ ms = delay[0];
+
+ /*
+ * Loop waiting for the reply; bail out if we time out or the firmware
+ * reports an error.
+ */
+ pcie_fw = t4_read_reg(adap, A_PCIE_FW);
+ for (i = 0; i < timeout && !(pcie_fw & F_PCIE_FW_ERR); i += ms) {
+ if (sleep_ok) {
+ ms = delay[delay_idx]; /* last element may repeat */
+ if (delay_idx < ARRAY_SIZE(delay) - 1)
+ delay_idx++;
+ msleep(ms);
+ } else {
+ msleep(ms);
+ }
+
+ pcie_fw = t4_read_reg(adap, A_PCIE_FW);
+ v = t4_read_reg(adap, ctl_reg);
+ if (v == X_CIM_PF_NOACCESS)
+ continue;
+ if (G_MBOWNER(v) == X_MBOWNER_PL) {
+ if (!(v & F_MBMSGVALID)) {
+ t4_write_reg(adap, ctl_reg,
+ V_MBOWNER(X_MBOWNER_NONE));
+ continue;
+ }
+
+ CXGBE_DEBUG_MBOX(adap,
+ "%s: mbox %u: %016llx %016llx %016llx %016llx "
+ "%016llx %016llx %016llx %016llx\n", __func__, (mbox),
+ (unsigned long long)t4_read_reg64(adap, data_reg),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 8),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 16),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 24),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 32),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 40),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 48),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 56));
+
+ CXGBE_DEBUG_MBOX(adap,
+ "command %#x completed in %d ms (%ssleeping)\n",
+ *(const u8 *)cmd,
+ i + ms, sleep_ok ? "" : "non-");
+
+ res = t4_read_reg64(adap, data_reg);
+ if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
+ fw_asrt(adap, data_reg);
+ res = V_FW_CMD_RETVAL(EIO);
+ } else if (rpl) {
+ get_mbox_rpl(adap, rpl, size / 8, data_reg);
+ }
+ t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
+ T4_OS_MBOX_LOCKING(
+ t4_os_atomic_list_del(&entry, &adap->mbox_list,
+ &adap->mbox_lock));
+ return -G_FW_CMD_RETVAL((int)res);
+ }
+ }
+
+ /*
+ * We timed out waiting for a reply to our mailbox command. Report
+ * the error and also check to see if the firmware reported any
+ * errors ...
+ */
+ dev_err(adap, "command %#x in mailbox %d timed out\n",
+ *(const u8 *)cmd, mbox);
+ T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
+ &adap->mbox_list,
+ &adap->mbox_lock));
+ t4_report_fw_error(adap);
+ free(temp);
+ return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
+}
+
+int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
+ void *rpl, bool sleep_ok)
+{
+ return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
+ FW_CMD_MAX_TIMEOUT);
+}
+
+/**
+ * t4_config_rss_range - configure a portion of the RSS mapping table
+ * @adapter: the adapter
+ * @mbox: mbox to use for the FW command
+ * @viid: virtual interface whose RSS subtable is to be written
+ * @start: start entry in the table to write
+ * @n: how many table entries to write
+ * @rspq: values for the "response queue" (Ingress Queue) lookup table
+ * @nrspq: number of values in @rspq
+ *
+ * Programs the selected part of the VI's RSS mapping table with the
+ * provided values. If @nrspq < @n the supplied values are used repeatedly
+ * until the full table range is populated.
+ *
+ * The caller must ensure the values in @rspq are in the range allowed for
+ * @viid.
+ */
+int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
+ int start, int n, const u16 *rspq, unsigned int nrspq)
+{
+ int ret;
+ const u16 *rsp = rspq;
+ const u16 *rsp_end = rspq + nrspq;
+ struct fw_rss_ind_tbl_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
+ V_FW_RSS_IND_TBL_CMD_VIID(viid));
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+
+ /*
+ * Each firmware RSS command can accommodate up to 32 RSS Ingress
+ * Queue Identifiers. These Ingress Queue IDs are packed three to
+ * a 32-bit word as 10-bit values with the upper remaining 2 bits
+ * reserved.
+ */
+ while (n > 0) {
+ int nq = min(n, 32);
+ int nq_packed = 0;
+ __be32 *qp = &cmd.iq0_to_iq2;
+
+ /*
+ * Set up the firmware RSS command header to send the next
+ * "nq" Ingress Queue IDs to the firmware.
+ */
+ cmd.niqid = cpu_to_be16(nq);
+ cmd.startidx = cpu_to_be16(start);
+
+ /*
+ * "nq" more done for the start of the next loop.
+ */
+ start += nq;
+ n -= nq;
+
+ /*
+ * While there are still Ingress Queue IDs to stuff into the
+ * current firmware RSS command, retrieve them from the
+ * Ingress Queue ID array and insert them into the command.
+ */
+ while (nq > 0) {
+ /*
+ * Grab up to the next 3 Ingress Queue IDs (wrapping
+ * around the Ingress Queue ID array if necessary) and
+ * insert them into the firmware RSS command at the
+ * current 3-tuple position within the commad.
+ */
+ u16 qbuf[3];
+ u16 *qbp = qbuf;
+ int nqbuf = min(3, nq);
+
+ nq -= nqbuf;
+ qbuf[0] = 0;
+ qbuf[1] = 0;
+ qbuf[2] = 0;
+ while (nqbuf && nq_packed < 32) {
+ nqbuf--;
+ nq_packed++;
+ *qbp++ = *rsp++;
+ if (rsp >= rsp_end)
+ rsp = rspq;
+ }
+ *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
+ V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
+ V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
+ }
+
+ /*
+ * Send this portion of the RRS table update to the firmware;
+ * bail out on any errors.
+ */
+ ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * t4_config_vi_rss - configure per VI RSS settings
+ * @adapter: the adapter
+ * @mbox: mbox to use for the FW command
+ * @viid: the VI id
+ * @flags: RSS flags
+ * @defq: id of the default RSS queue for the VI.
+ *
+ * Configures VI-specific RSS properties.
+ */
+int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+ unsigned int flags, unsigned int defq)
+{
+ struct fw_rss_vi_config_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
+ V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+ c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
+ V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
+ return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * init_cong_ctrl - initialize congestion control parameters
+ * @a: the alpha values for congestion control
+ * @b: the beta values for congestion control
+ *
+ * Initialize the congestion control parameters.
+ */
+static void init_cong_ctrl(unsigned short *a, unsigned short *b)
+{
+ int i;
+
+ for (i = 0; i < 9; i++) {
+ a[i] = 1;
+ b[i] = 0;
+ }
+
+ a[9] = 2;
+ a[10] = 3;
+ a[11] = 4;
+ a[12] = 5;
+ a[13] = 6;
+ a[14] = 7;
+ a[15] = 8;
+ a[16] = 9;
+ a[17] = 10;
+ a[18] = 14;
+ a[19] = 17;
+ a[20] = 21;
+ a[21] = 25;
+ a[22] = 30;
+ a[23] = 35;
+ a[24] = 45;
+ a[25] = 60;
+ a[26] = 80;
+ a[27] = 100;
+ a[28] = 200;
+ a[29] = 300;
+ a[30] = 400;
+ a[31] = 500;
+
+ b[9] = 1;
+ b[10] = 1;
+ b[11] = 2;
+ b[12] = 2;
+ b[13] = 3;
+ b[14] = 3;
+ b[15] = 3;
+ b[16] = 3;
+ b[17] = 4;
+ b[18] = 4;
+ b[19] = 4;
+ b[20] = 4;
+ b[21] = 4;
+ b[22] = 5;
+ b[23] = 5;
+ b[24] = 5;
+ b[25] = 5;
+ b[26] = 5;
+ b[27] = 5;
+ b[28] = 6;
+ b[29] = 6;
+ b[30] = 7;
+ b[31] = 7;
+}
+
+#define INIT_CMD(var, cmd, rd_wr) do { \
+ (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
+ F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
+ (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
+} while (0)
+
+int t4_get_core_clock(struct adapter *adapter, struct vpd_params *p)
+{
+ u32 cclk_param, cclk_val;
+ int ret;
+
+ /*
+ * Ask firmware for the Core Clock since it knows how to translate the
+ * Reference Clock ('V2') VPD field into a Core Clock value ...
+ */
+ cclk_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
+ ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
+ 1, &cclk_param, &cclk_val);
+ if (ret) {
+ dev_err(adapter, "%s: error in fetching from coreclock - %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ p->cclk = cclk_val;
+ dev_debug(adapter, "%s: p->cclk = %u\n", __func__, p->cclk);
+ return 0;
+}
+
+/* serial flash and firmware constants and flash config file constants */
+enum {
+ SF_ATTEMPTS = 10, /* max retries for SF operations */
+
+ /* flash command opcodes */
+ SF_PROG_PAGE = 2, /* program page */
+ SF_WR_DISABLE = 4, /* disable writes */
+ SF_RD_STATUS = 5, /* read status register */
+ SF_WR_ENABLE = 6, /* enable writes */
+ SF_RD_DATA_FAST = 0xb, /* read flash */
+ SF_RD_ID = 0x9f, /* read ID */
+ SF_ERASE_SECTOR = 0xd8, /* erase sector */
+};
+
+/**
+ * sf1_read - read data from the serial flash
+ * @adapter: the adapter
+ * @byte_cnt: number of bytes to read
+ * @cont: whether another operation will be chained
+ * @lock: whether to lock SF for PL access only
+ * @valp: where to store the read data
+ *
+ * Reads up to 4 bytes of data from the serial flash. The location of
+ * the read needs to be specified prior to calling this by issuing the
+ * appropriate commands to the serial flash.
+ */
+static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
+ int lock, u32 *valp)
+{
+ int ret;
+
+ if (!byte_cnt || byte_cnt > 4)
+ return -EINVAL;
+ if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
+ return -EBUSY;
+ t4_write_reg(adapter, A_SF_OP,
+ V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
+ ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
+ if (!ret)
+ *valp = t4_read_reg(adapter, A_SF_DATA);
+ return ret;
+}
+
+/**
+ * sf1_write - write data to the serial flash
+ * @adapter: the adapter
+ * @byte_cnt: number of bytes to write
+ * @cont: whether another operation will be chained
+ * @lock: whether to lock SF for PL access only
+ * @val: value to write
+ *
+ * Writes up to 4 bytes of data to the serial flash. The location of
+ * the write needs to be specified prior to calling this by issuing the
+ * appropriate commands to the serial flash.
+ */
+static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
+ int lock, u32 val)
+{
+ if (!byte_cnt || byte_cnt > 4)
+ return -EINVAL;
+ if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
+ return -EBUSY;
+ t4_write_reg(adapter, A_SF_DATA, val);
+ t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
+ V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
+ return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
+}
+
+/**
+ * t4_read_flash - read words from serial flash
+ * @adapter: the adapter
+ * @addr: the start address for the read
+ * @nwords: how many 32-bit words to read
+ * @data: where to store the read data
+ * @byte_oriented: whether to store data as bytes or as words
+ *
+ * Read the specified number of 32-bit words from the serial flash.
+ * If @byte_oriented is set the read data is stored as a byte array
+ * (i.e., big-endian), otherwise as 32-bit words in the platform's
+ * natural endianness.
+ */
+int t4_read_flash(struct adapter *adapter, unsigned int addr,
+ unsigned int nwords, u32 *data, int byte_oriented)
+{
+ int ret;
+
+ if (((addr + nwords * sizeof(u32)) > adapter->params.sf_size) ||
+ (addr & 3))
+ return -EINVAL;
+
+ addr = rte_constant_bswap32(addr) | SF_RD_DATA_FAST;
+
+ ret = sf1_write(adapter, 4, 1, 0, addr);
+ if (ret != 0)
+ return ret;
+
+ ret = sf1_read(adapter, 1, 1, 0, data);
+ if (ret != 0)
+ return ret;
+
+ for ( ; nwords; nwords--, data++) {
+ ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
+ if (nwords == 1)
+ t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
+ if (ret)
+ return ret;
+ if (byte_oriented)
+ *data = cpu_to_be32(*data);
+ }
+ return 0;
+}
+
+/**
+ * t4_get_fw_version - read the firmware version
+ * @adapter: the adapter
+ * @vers: where to place the version
+ *
+ * Reads the FW version from flash.
+ */
+int t4_get_fw_version(struct adapter *adapter, u32 *vers)
+{
+ return t4_read_flash(adapter, FLASH_FW_START +
+ offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
+}
+
+/**
+ * t4_get_tp_version - read the TP microcode version
+ * @adapter: the adapter
+ * @vers: where to place the version
+ *
+ * Reads the TP microcode version from flash.
+ */
+int t4_get_tp_version(struct adapter *adapter, u32 *vers)
+{
+ return t4_read_flash(adapter, FLASH_FW_START +
+ offsetof(struct fw_hdr, tp_microcode_ver),
+ 1, vers, 0);
+}
+
+#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
+ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
+ FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
+
+/**
+ * t4_link_l1cfg - apply link configuration to MAC/PHY
+ * @phy: the PHY to setup
+ * @mac: the MAC to setup
+ * @lc: the requested link configuration
+ *
+ * Set up a port's MAC and PHY according to a desired link configuration.
+ * - If the PHY can auto-negotiate first decide what to advertise, then
+ * enable/disable auto-negotiation as desired, and reset.
+ * - If the PHY does not auto-negotiate just reset it.
+ * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
+ * otherwise do it later based on the outcome of auto-negotiation.
+ */
+int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
+ struct link_config *lc)
+{
+ struct fw_port_cmd c;
+ unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
+
+ lc->link_ok = 0;
+ if (lc->requested_fc & PAUSE_RX)
+ fc |= FW_PORT_CAP_FC_RX;
+ if (lc->requested_fc & PAUSE_TX)
+ fc |= FW_PORT_CAP_FC_TX;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
+ V_FW_PORT_CMD_PORTID(port));
+ c.action_to_len16 =
+ cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
+ FW_LEN16(c));
+
+ if (!(lc->supported & FW_PORT_CAP_ANEG)) {
+ c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
+ fc);
+ lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+ } else if (lc->autoneg == AUTONEG_DISABLE) {
+ c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi);
+ lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+ } else {
+ c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi);
+ }
+
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_flash_cfg_addr - return the address of the flash configuration file
+ * @adapter: the adapter
+ *
+ * Return the address within the flash where the Firmware Configuration
+ * File is stored, or an error if the device FLASH is too small to contain
+ * a Firmware Configuration File.
+ */
+int t4_flash_cfg_addr(struct adapter *adapter)
+{
+ /*
+ * If the device FLASH isn't large enough to hold a Firmware
+ * Configuration File, return an error.
+ */
+ if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
+ return -ENOSPC;
+
+ return FLASH_CFG_START;
+}
+
+#define PF_INTR_MASK (F_PFSW | F_PFCIM)
+
+/**
+ * t4_intr_enable - enable interrupts
+ * @adapter: the adapter whose interrupts should be enabled
+ *
+ * Enable PF-specific interrupts for the calling function and the top-level
+ * interrupt concentrator for global interrupts. Interrupts are already
+ * enabled at each module, here we just enable the roots of the interrupt
+ * hierarchies.
+ *
+ * Note: this function should be called only when the driver manages
+ * non PF-specific interrupts from the various HW modules. Only one PCI
+ * function at a time should be doing this.
+ */
+void t4_intr_enable(struct adapter *adapter)
+{
+ u32 val = 0;
+ u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
+
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
+ t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
+ F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
+ F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
+ F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
+ F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
+ F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
+ F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
+ t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
+ t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
+}
+
+/**
+ * t4_intr_disable - disable interrupts
+ * @adapter: the adapter whose interrupts should be disabled
+ *
+ * Disable interrupts. We only disable the top-level interrupt
+ * concentrators. The caller must be a PCI function managing global
+ * interrupts.
+ */
+void t4_intr_disable(struct adapter *adapter)
+{
+ u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
+
+ t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
+ t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
+}
+
+/**
+ * t4_get_port_type_description - return Port Type string description
+ * @port_type: firmware Port Type enumeration
+ */
+const char *t4_get_port_type_description(enum fw_port_type port_type)
+{
+ static const char * const port_type_description[] = {
+ "Fiber_XFI",
+ "Fiber_XAUI",
+ "BT_SGMII",
+ "BT_XFI",
+ "BT_XAUI",
+ "KX4",
+ "CX4",
+ "KX",
+ "KR",
+ "SFP",
+ "BP_AP",
+ "BP4_AP",
+ "QSFP_10G",
+ "QSA",
+ "QSFP",
+ "BP40_BA",
+ };
+
+ if (port_type < ARRAY_SIZE(port_type_description))
+ return port_type_description[port_type];
+ return "UNKNOWN";
+}
+
+/**
+ * t4_get_mps_bg_map - return the buffer groups associated with a port
+ * @adap: the adapter
+ * @idx: the port index
+ *
+ * Returns a bitmap indicating which MPS buffer groups are associated
+ * with the given port. Bit i is set if buffer group i is used by the
+ * port.
+ */
+unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
+{
+ u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
+
+ if (n == 0)
+ return idx == 0 ? 0xf : 0;
+ if (n == 1)
+ return idx < 2 ? (3 << (2 * idx)) : 0;
+ return 1 << idx;
+}
+
+/**
+ * t4_get_port_stats - collect port statistics
+ * @adap: the adapter
+ * @idx: the port index
+ * @p: the stats structure to fill
+ *
+ * Collect statistics related to the given port from HW.
+ */
+void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
+{
+ u32 bgmap = t4_get_mps_bg_map(adap, idx);
+
+#define GET_STAT(name) \
+ t4_read_reg64(adap, \
+ (is_t4(adap->params.chip) ? \
+ PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) :\
+ T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
+#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
+
+ p->tx_octets = GET_STAT(TX_PORT_BYTES);
+ p->tx_frames = GET_STAT(TX_PORT_FRAMES);
+ p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
+ p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
+ p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
+ p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
+ p->tx_frames_64 = GET_STAT(TX_PORT_64B);
+ p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
+ p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
+ p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
+ p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
+ p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
+ p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
+ p->tx_drop = GET_STAT(TX_PORT_DROP);
+ p->tx_pause = GET_STAT(TX_PORT_PAUSE);
+ p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
+ p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
+ p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
+ p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
+ p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
+ p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
+ p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
+ p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
+
+ p->rx_octets = GET_STAT(RX_PORT_BYTES);
+ p->rx_frames = GET_STAT(RX_PORT_FRAMES);
+ p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
+ p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
+ p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
+ p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
+ p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
+ p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
+ p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
+ p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
+ p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
+ p->rx_frames_64 = GET_STAT(RX_PORT_64B);
+ p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
+ p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
+ p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
+ p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
+ p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
+ p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
+ p->rx_pause = GET_STAT(RX_PORT_PAUSE);
+ p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
+ p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
+ p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
+ p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
+ p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
+ p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
+ p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
+ p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
+ p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
+ p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
+ p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
+ p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
+ p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
+ p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
+ p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
+ p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
+
+#undef GET_STAT
+#undef GET_STAT_COM
+}
+
+/**
+ * t4_get_port_stats_offset - collect port stats relative to a previous snapshot
+ * @adap: The adapter
+ * @idx: The port
+ * @stats: Current stats to fill
+ * @offset: Previous stats snapshot
+ */
+void t4_get_port_stats_offset(struct adapter *adap, int idx,
+ struct port_stats *stats,
+ struct port_stats *offset)
+{
+ u64 *s, *o;
+ unsigned int i;
+
+ t4_get_port_stats(adap, idx, stats);
+ for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
+ i < (sizeof(struct port_stats) / sizeof(u64));
+ i++, s++, o++)
+ *s -= *o;
+}
+
+/**
+ * t4_clr_port_stats - clear port statistics
+ * @adap: the adapter
+ * @idx: the port index
+ *
+ * Clear HW statistics for the given port.
+ */
+void t4_clr_port_stats(struct adapter *adap, int idx)
+{
+ unsigned int i;
+ u32 bgmap = t4_get_mps_bg_map(adap, idx);
+ u32 port_base_addr;
+
+ if (is_t4(adap->params.chip))
+ port_base_addr = PORT_BASE(idx);
+ else
+ port_base_addr = T5_PORT_BASE(idx);
+
+ for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
+ i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
+ t4_write_reg(adap, port_base_addr + i, 0);
+ for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
+ i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
+ t4_write_reg(adap, port_base_addr + i, 0);
+ for (i = 0; i < 4; i++)
+ if (bgmap & (1 << i)) {
+ t4_write_reg(adap,
+ A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
+ i * 8, 0);
+ t4_write_reg(adap,
+ A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
+ i * 8, 0);
+ }
+}
+
+/**
+ * t4_fw_hello - establish communication with FW
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @evt_mbox: mailbox to receive async FW events
+ * @master: specifies the caller's willingness to be the device master
+ * @state: returns the current device state (if non-NULL)
+ *
+ * Issues a command to establish communication with FW. Returns either
+ * an error (negative integer) or the mailbox of the Master PF.
+ */
+int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
+ enum dev_master master, enum dev_state *state)
+{
+ int ret;
+ struct fw_hello_cmd c;
+ u32 v;
+ unsigned int master_mbox;
+ int retries = FW_CMD_HELLO_RETRIES;
+
+retry:
+ memset(&c, 0, sizeof(c));
+ INIT_CMD(c, HELLO, WRITE);
+ c.err_to_clearinit = cpu_to_be32(
+ V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
+ V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
+ V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
+ M_FW_HELLO_CMD_MBMASTER) |
+ V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
+ V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
+ F_FW_HELLO_CMD_CLEARINIT);
+
+ /*
+ * Issue the HELLO command to the firmware. If it's not successful
+ * but indicates that we got a "busy" or "timeout" condition, retry
+ * the HELLO until we exhaust our retry limit. If we do exceed our
+ * retry limit, check to see if the firmware left us any error
+ * information and report that if so ...
+ */
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret != FW_SUCCESS) {
+ if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
+ goto retry;
+ if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
+ t4_report_fw_error(adap);
+ return ret;
+ }
+
+ v = be32_to_cpu(c.err_to_clearinit);
+ master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
+ if (state) {
+ if (v & F_FW_HELLO_CMD_ERR)
+ *state = DEV_STATE_ERR;
+ else if (v & F_FW_HELLO_CMD_INIT)
+ *state = DEV_STATE_INIT;
+ else
+ *state = DEV_STATE_UNINIT;
+ }
+
+ /*
+ * If we're not the Master PF then we need to wait around for the
+ * Master PF Driver to finish setting up the adapter.
+ *
+ * Note that we also do this wait if we're a non-Master-capable PF and
+ * there is no current Master PF; a Master PF may show up momentarily
+ * and we wouldn't want to fail pointlessly. (This can happen when an
+ * OS loads lots of different drivers rapidly at the same time). In
+ * this case, the Master PF returned by the firmware will be
+ * M_PCIE_FW_MASTER so the test below will work ...
+ */
+ if ((v & (F_FW_HELLO_CMD_ERR | F_FW_HELLO_CMD_INIT)) == 0 &&
+ master_mbox != mbox) {
+ int waiting = FW_CMD_HELLO_TIMEOUT;
+
+ /*
+ * Wait for the firmware to either indicate an error or
+ * initialized state. If we see either of these we bail out
+ * and report the issue to the caller. If we exhaust the
+ * "hello timeout" and we haven't exhausted our retries, try
+ * again. Otherwise bail with a timeout error.
+ */
+ for (;;) {
+ u32 pcie_fw;
+
+ msleep(50);
+ waiting -= 50;
+
+ /*
+ * If neither Error nor Initialialized are indicated
+ * by the firmware keep waiting till we exaust our
+ * timeout ... and then retry if we haven't exhausted
+ * our retries ...
+ */
+ pcie_fw = t4_read_reg(adap, A_PCIE_FW);
+ if (!(pcie_fw & (F_PCIE_FW_ERR | F_PCIE_FW_INIT))) {
+ if (waiting <= 0) {
+ if (retries-- > 0)
+ goto retry;
+
+ return -ETIMEDOUT;
+ }
+ continue;
+ }
+
+ /*
+ * We either have an Error or Initialized condition
+ * report errors preferentially.
+ */
+ if (state) {
+ if (pcie_fw & F_PCIE_FW_ERR)
+ *state = DEV_STATE_ERR;
+ else if (pcie_fw & F_PCIE_FW_INIT)
+ *state = DEV_STATE_INIT;
+ }
+
+ /*
+ * If we arrived before a Master PF was selected and
+ * there's not a valid Master PF, grab its identity
+ * for our caller.
+ */
+ if (master_mbox == M_PCIE_FW_MASTER &&
+ (pcie_fw & F_PCIE_FW_MASTER_VLD))
+ master_mbox = G_PCIE_FW_MASTER(pcie_fw);
+ break;
+ }
+ }
+
+ return master_mbox;
+}
+
+/**
+ * t4_fw_bye - end communication with FW
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ *
+ * Issues a command to terminate communication with FW.
+ */
+int t4_fw_bye(struct adapter *adap, unsigned int mbox)
+{
+ struct fw_bye_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ INIT_CMD(c, BYE, WRITE);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_fw_reset - issue a reset to FW
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @reset: specifies the type of reset to perform
+ *
+ * Issues a reset command of the specified type to FW.
+ */
+int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
+{
+ struct fw_reset_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ INIT_CMD(c, RESET, WRITE);
+ c.val = cpu_to_be32(reset);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW RESET command (if desired)
+ * @force: force uP into RESET even if FW RESET command fails
+ *
+ * Issues a RESET command to firmware (if desired) with a HALT indication
+ * and then puts the microprocessor into RESET state. The RESET command
+ * will only be issued if a legitimate mailbox is provided (mbox <=
+ * M_PCIE_FW_MASTER).
+ *
+ * This is generally used in order for the host to safely manipulate the
+ * adapter without fear of conflicting with whatever the firmware might
+ * be doing. The only way out of this state is to RESTART the firmware
+ * ...
+ */
+int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
+{
+ int ret = 0;
+
+ /*
+ * If a legitimate mailbox is provided, issue a RESET command
+ * with a HALT indication.
+ */
+ if (mbox <= M_PCIE_FW_MASTER) {
+ struct fw_reset_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ INIT_CMD(c, RESET, WRITE);
+ c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
+ c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ }
+
+ /*
+ * Normally we won't complete the operation if the firmware RESET
+ * command fails but if our caller insists we'll go ahead and put the
+ * uP into RESET. This can be useful if the firmware is hung or even
+ * missing ... We'll have to take the risk of putting the uP into
+ * RESET without the cooperation of firmware in that case.
+ *
+ * We also force the firmware's HALT flag to be on in case we bypassed
+ * the firmware RESET command above or we're dealing with old firmware
+ * which doesn't have the HALT capability. This will serve as a flag
+ * for the incoming firmware to know that it's coming out of a HALT
+ * rather than a RESET ... if it's new enough to understand that ...
+ */
+ if (ret == 0 || force) {
+ t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
+ t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
+ F_PCIE_FW_HALT);
+ }
+
+ /*
+ * And we always return the result of the firmware RESET command
+ * even when we force the uP into RESET ...
+ */
+ return ret;
+}
+
+/**
+ * t4_fw_restart - restart the firmware by taking the uP out of RESET
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW RESET command (if desired)
+ * @reset: if we want to do a RESET to restart things
+ *
+ * Restart firmware previously halted by t4_fw_halt(). On successful
+ * return the previous PF Master remains as the new PF Master and there
+ * is no need to issue a new HELLO command, etc.
+ *
+ * We do this in two ways:
+ *
+ * 1. If we're dealing with newer firmware we'll simply want to take
+ * the chip's microprocessor out of RESET. This will cause the
+ * firmware to start up from its start vector. And then we'll loop
+ * until the firmware indicates it's started again (PCIE_FW.HALT
+ * reset to 0) or we timeout.
+ *
+ * 2. If we're dealing with older firmware then we'll need to RESET
+ * the chip since older firmware won't recognize the PCIE_FW.HALT
+ * flag and automatically RESET itself on startup.
+ */
+int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
+{
+ if (reset) {
+ /*
+ * Since we're directing the RESET instead of the firmware
+ * doing it automatically, we need to clear the PCIE_FW.HALT
+ * bit.
+ */
+ t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
+
+ /*
+ * If we've been given a valid mailbox, first try to get the
+ * firmware to do the RESET. If that works, great and we can
+ * return success. Otherwise, if we haven't been given a
+ * valid mailbox or the RESET command failed, fall back to
+ * hitting the chip with a hammer.
+ */
+ if (mbox <= M_PCIE_FW_MASTER) {
+ t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
+ msleep(100);
+ if (t4_fw_reset(adap, mbox,
+ F_PIORST | F_PIORSTMODE) == 0)
+ return 0;
+ }
+
+ t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
+ msleep(2000);
+ } else {
+ int ms;
+
+ t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
+ for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
+ if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
+ return FW_SUCCESS;
+ msleep(100);
+ ms += 100;
+ }
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+/**
+ * t4_fixup_host_params_compat - fix up host-dependent parameters
+ * @adap: the adapter
+ * @page_size: the host's Base Page Size
+ * @cache_line_size: the host's Cache Line Size
+ * @chip_compat: maintain compatibility with designated chip
+ *
+ * Various registers in the chip contain values which are dependent on the
+ * host's Base Page and Cache Line Sizes. This function will fix all of
+ * those registers with the appropriate values as passed in ...
+ *
+ * @chip_compat is used to limit the set of changes that are made
+ * to be compatible with the indicated chip release. This is used by
+ * drivers to maintain compatibility with chip register settings when
+ * the drivers haven't [yet] been updated with new chip support.
+ */
+int t4_fixup_host_params_compat(struct adapter *adap,
+ unsigned int page_size,
+ unsigned int cache_line_size,
+ enum chip_type chip_compat)
+{
+ unsigned int page_shift = cxgbe_fls(page_size) - 1;
+ unsigned int sge_hps = page_shift - 10;
+ unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
+ unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
+ unsigned int fl_align_log = cxgbe_fls(fl_align) - 1;
+
+ t4_write_reg(adap, A_SGE_HOST_PAGE_SIZE,
+ V_HOSTPAGESIZEPF0(sge_hps) |
+ V_HOSTPAGESIZEPF1(sge_hps) |
+ V_HOSTPAGESIZEPF2(sge_hps) |
+ V_HOSTPAGESIZEPF3(sge_hps) |
+ V_HOSTPAGESIZEPF4(sge_hps) |
+ V_HOSTPAGESIZEPF5(sge_hps) |
+ V_HOSTPAGESIZEPF6(sge_hps) |
+ V_HOSTPAGESIZEPF7(sge_hps));
+
+ if (is_t4(adap->params.chip) || is_t4(chip_compat))
+ t4_set_reg_field(adap, A_SGE_CONTROL,
+ V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
+ F_EGRSTATUSPAGESIZE,
+ V_INGPADBOUNDARY(fl_align_log -
+ X_INGPADBOUNDARY_SHIFT) |
+ V_EGRSTATUSPAGESIZE(stat_len != 64));
+ else {
+ /*
+ * T5 introduced the separation of the Free List Padding and
+ * Packing Boundaries. Thus, we can select a smaller Padding
+ * Boundary to avoid uselessly chewing up PCIe Link and Memory
+ * Bandwidth, and use a Packing Boundary which is large enough
+ * to avoid false sharing between CPUs, etc.
+ *
+ * For the PCI Link, the smaller the Padding Boundary the
+ * better. For the Memory Controller, a smaller Padding
+ * Boundary is better until we cross under the Memory Line
+ * Size (the minimum unit of transfer to/from Memory). If we
+ * have a Padding Boundary which is smaller than the Memory
+ * Line Size, that'll involve a Read-Modify-Write cycle on the
+ * Memory Controller which is never good. For T5 the smallest
+ * Padding Boundary which we can select is 32 bytes which is
+ * larger than any known Memory Controller Line Size so we'll
+ * use that.
+ */
+
+ /*
+ * N.B. T5 has a different interpretation of the "0" value for
+ * the Packing Boundary. This corresponds to 16 bytes instead
+ * of the expected 32 bytes. We never have a Packing Boundary
+ * less than 32 bytes so we can't use that special value but
+ * on the other hand, if we wanted 32 bytes, the best we can
+ * really do is 64 bytes ...
+ */
+ if (fl_align <= 32) {
+ fl_align = 64;
+ fl_align_log = 6;
+ }
+ t4_set_reg_field(adap, A_SGE_CONTROL,
+ V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
+ F_EGRSTATUSPAGESIZE,
+ V_INGPADBOUNDARY(X_INGPCIEBOUNDARY_32B) |
+ V_EGRSTATUSPAGESIZE(stat_len != 64));
+ t4_set_reg_field(adap, A_SGE_CONTROL2,
+ V_INGPACKBOUNDARY(M_INGPACKBOUNDARY),
+ V_INGPACKBOUNDARY(fl_align_log -
+ X_INGPACKBOUNDARY_SHIFT));
+ }
+
+ /*
+ * Adjust various SGE Free List Host Buffer Sizes.
+ *
+ * The first four entries are:
+ *
+ * 0: Host Page Size
+ * 1: 64KB
+ * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
+ * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
+ *
+ * For the single-MTU buffers in unpacked mode we need to include
+ * space for the SGE Control Packet Shift, 14 byte Ethernet header,
+ * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
+ * Padding boundary. All of these are accommodated in the Factory
+ * Default Firmware Configuration File but we need to adjust it for
+ * this host's cache line size.
+ */
+ t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE0, page_size);
+ t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE2,
+ (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE2) + fl_align - 1)
+ & ~(fl_align - 1));
+ t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE3,
+ (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE3) + fl_align - 1)
+ & ~(fl_align - 1));
+
+ t4_write_reg(adap, A_ULP_RX_TDDP_PSZ, V_HPZ0(page_shift - 12));
+
+ return 0;
+}
+
+/**
+ * t4_fixup_host_params - fix up host-dependent parameters (T4 compatible)
+ * @adap: the adapter
+ * @page_size: the host's Base Page Size
+ * @cache_line_size: the host's Cache Line Size
+ *
+ * Various registers in T4 contain values which are dependent on the
+ * host's Base Page and Cache Line Sizes. This function will fix all of
+ * those registers with the appropriate values as passed in ...
+ *
+ * This routine makes changes which are compatible with T4 chips.
+ */
+int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
+ unsigned int cache_line_size)
+{
+ return t4_fixup_host_params_compat(adap, page_size, cache_line_size,
+ T4_LAST_REV);
+}
+
+/**
+ * t4_fw_initialize - ask FW to initialize the device
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ *
+ * Issues a command to FW to partially initialize the device. This
+ * performs initialization that generally doesn't depend on user input.
+ */
+int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
+{
+ struct fw_initialize_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ INIT_CMD(c, INITIALIZE, WRITE);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_query_params_rw - query FW or device parameters
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF
+ * @vf: the VF
+ * @nparams: the number of parameters
+ * @params: the parameter names
+ * @val: the parameter values
+ * @rw: Write and read flag
+ *
+ * Reads the value of FW or device parameters. Up to 7 parameters can be
+ * queried at once.
+ */
+static int t4_query_params_rw(struct adapter *adap, unsigned int mbox,
+ unsigned int pf, unsigned int vf,
+ unsigned int nparams, const u32 *params,
+ u32 *val, int rw)
+{
+ unsigned int i;
+ int ret;
+ struct fw_params_cmd c;
+ __be32 *p = &c.param[0].mnem;
+
+ if (nparams > 7)
+ return -EINVAL;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ |
+ V_FW_PARAMS_CMD_PFN(pf) |
+ V_FW_PARAMS_CMD_VFN(vf));
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+
+ for (i = 0; i < nparams; i++) {
+ *p++ = cpu_to_be32(*params++);
+ if (rw)
+ *p = cpu_to_be32(*(val + i));
+ p++;
+ }
+
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret == 0)
+ for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
+ *val++ = be32_to_cpu(*p);
+ return ret;
+}
+
+int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ u32 *val)
+{
+ return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
+}
+
+/**
+ * t4_set_params_timeout - sets FW or device parameters
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF
+ * @vf: the VF
+ * @nparams: the number of parameters
+ * @params: the parameter names
+ * @val: the parameter values
+ * @timeout: the timeout time
+ *
+ * Sets the value of FW or device parameters. Up to 7 parameters can be
+ * specified at once.
+ */
+int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
+ unsigned int pf, unsigned int vf,
+ unsigned int nparams, const u32 *params,
+ const u32 *val, int timeout)
+{
+ struct fw_params_cmd c;
+ __be32 *p = &c.param[0].mnem;
+
+ if (nparams > 7)
+ return -EINVAL;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
+ V_FW_PARAMS_CMD_PFN(pf) |
+ V_FW_PARAMS_CMD_VFN(vf));
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+
+ while (nparams--) {
+ *p++ = cpu_to_be32(*params++);
+ *p++ = cpu_to_be32(*val++);
+ }
+
+ return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
+}
+
+int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ const u32 *val)
+{
+ return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
+ FW_CMD_MAX_TIMEOUT);
+}
+
+/**
+ * t4_alloc_vi_func - allocate a virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @port: physical port associated with the VI
+ * @pf: the PF owning the VI
+ * @vf: the VF owning the VI
+ * @nmac: number of MAC addresses needed (1 to 5)
+ * @mac: the MAC addresses of the VI
+ * @rss_size: size of RSS table slice associated with this VI
+ * @portfunc: which Port Application Function MAC Address is desired
+ * @idstype: Intrusion Detection Type
+ *
+ * Allocates a virtual interface for the given physical port. If @mac is
+ * not %NULL it contains the MAC addresses of the VI as assigned by FW.
+ * @mac should be large enough to hold @nmac Ethernet addresses, they are
+ * stored consecutively so the space needed is @nmac * 6 bytes.
+ * Returns a negative error number or the non-negative VI id.
+ */
+int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
+ unsigned int port, unsigned int pf, unsigned int vf,
+ unsigned int nmac, u8 *mac, unsigned int *rss_size,
+ unsigned int portfunc, unsigned int idstype)
+{
+ int ret;
+ struct fw_vi_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE | F_FW_CMD_EXEC |
+ V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
+ c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
+ c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
+ V_FW_VI_CMD_FUNC(portfunc));
+ c.portid_pkd = V_FW_VI_CMD_PORTID(port);
+ c.nmac = nmac - 1;
+
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret)
+ return ret;
+
+ if (mac) {
+ memcpy(mac, c.mac, sizeof(c.mac));
+ switch (nmac) {
+ case 5:
+ memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
+ /* FALLTHROUGH */
+ case 4:
+ memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
+ /* FALLTHROUGH */
+ case 3:
+ memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
+ /* FALLTHROUGH */
+ case 2:
+ memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
+ /* FALLTHROUGH */
+ }
+ }
+ if (rss_size)
+ *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
+ return G_FW_VI_CMD_VIID(cpu_to_be16(c.type_to_viid));
+}
+
+/**
+ * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @port: physical port associated with the VI
+ * @pf: the PF owning the VI
+ * @vf: the VF owning the VI
+ * @nmac: number of MAC addresses needed (1 to 5)
+ * @mac: the MAC addresses of the VI
+ * @rss_size: size of RSS table slice associated with this VI
+ *
+ * Backwards compatible and convieniance routine to allocate a Virtual
+ * Interface with a Ethernet Port Application Function and Intrustion
+ * Detection System disabled.
+ */
+int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
+ unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
+ unsigned int *rss_size)
+{
+ return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
+ FW_VI_FUNC_ETH, 0);
+}
+
+/**
+ * t4_free_vi - free a virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the VI
+ * @vf: the VF owning the VI
+ * @viid: virtual interface identifiler
+ *
+ * Free a previously allocated virtual interface.
+ */
+int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int viid)
+{
+ struct fw_vi_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_EXEC | V_FW_VI_CMD_PFN(pf) |
+ V_FW_VI_CMD_VFN(vf));
+ c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
+ c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
+
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+}
+
+/**
+ * t4_set_rxmode - set Rx properties of a virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @viid: the VI id
+ * @mtu: the new MTU or -1
+ * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
+ * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
+ * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
+ * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
+ * -1 no change
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ *
+ * Sets Rx properties of a virtual interface.
+ */
+int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ int mtu, int promisc, int all_multi, int bcast, int vlanex,
+ bool sleep_ok)
+{
+ struct fw_vi_rxmode_cmd c;
+
+ /* convert to FW values */
+ if (mtu < 0)
+ mtu = M_FW_VI_RXMODE_CMD_MTU;
+ if (promisc < 0)
+ promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
+ if (all_multi < 0)
+ all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
+ if (bcast < 0)
+ bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
+ if (vlanex < 0)
+ vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
+ V_FW_VI_RXMODE_CMD_VIID(viid));
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+ c.mtu_to_vlanexen = cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
+ V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
+ V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
+ V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
+ V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
+ return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
+}
+
+/**
+ * t4_change_mac - modifies the exact-match filter for a MAC address
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @viid: the VI id
+ * @idx: index of existing filter for old value of MAC address, or -1
+ * @addr: the new MAC address value
+ * @persist: whether a new MAC allocation should be persistent
+ * @add_smt: if true also add the address to the HW SMT
+ *
+ * Modifies an exact-match filter and sets it to the new MAC address if
+ * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
+ * latter case the address is added persistently if @persist is %true.
+ *
+ * Note that in general it is not possible to modify the value of a given
+ * filter so the generic way to modify an address filter is to free the one
+ * being used by the old address value and allocate a new filter for the
+ * new address value.
+ *
+ * Returns a negative error number or the index of the filter with the new
+ * MAC value. Note that this index may differ from @idx.
+ */
+int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ int idx, const u8 *addr, bool persist, bool add_smt)
+{
+ int ret, mode;
+ struct fw_vi_mac_cmd c;
+ struct fw_vi_mac_exact *p = c.u.exact;
+ int max_mac_addr = adap->params.arch.mps_tcam_size;
+
+ if (idx < 0) /* new allocation */
+ idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
+ mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
+ V_FW_VI_MAC_CMD_VIID(viid));
+ c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
+ p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
+ V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
+ V_FW_VI_MAC_CMD_IDX(idx));
+ memcpy(p->macaddr, addr, sizeof(p->macaddr));
+
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret == 0) {
+ ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
+ if (ret >= max_mac_addr)
+ ret = -ENOMEM;
+ }
+ return ret;
+}
+
+/**
+ * t4_enable_vi_params - enable/disable a virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @viid: the VI id
+ * @rx_en: 1=enable Rx, 0=disable Rx
+ * @tx_en: 1=enable Tx, 0=disable Tx
+ * @dcb_en: 1=enable delivery of Data Center Bridging messages.
+ *
+ * Enables/disables a virtual interface. Note that setting DCB Enable
+ * only makes sense when enabling a Virtual Interface ...
+ */
+int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
+ unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
+{
+ struct fw_vi_enable_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
+ V_FW_VI_ENABLE_CMD_VIID(viid));
+ c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
+ V_FW_VI_ENABLE_CMD_EEN(tx_en) |
+ V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
+ FW_LEN16(c));
+ return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_enable_vi - enable/disable a virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @viid: the VI id
+ * @rx_en: 1=enable Rx, 0=disable Rx
+ * @tx_en: 1=enable Tx, 0=disable Tx
+ *
+ * Enables/disables a virtual interface. Note that setting DCB Enable
+ * only makes sense when enabling a Virtual Interface ...
+ */
+int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ bool rx_en, bool tx_en)
+{
+ return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
+}
+
+/**
+ * t4_iq_start_stop - enable/disable an ingress queue and its FLs
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @start: %true to enable the queues, %false to disable them
+ * @pf: the PF owning the queues
+ * @vf: the VF owning the queues
+ * @iqid: ingress queue id
+ * @fl0id: FL0 queue id or 0xffff if no attached FL0
+ * @fl1id: FL1 queue id or 0xffff if no attached FL1
+ *
+ * Starts or stops an ingress queue and its associated FLs, if any.
+ */
+int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
+ unsigned int pf, unsigned int vf, unsigned int iqid,
+ unsigned int fl0id, unsigned int fl1id)
+{
+ struct fw_iq_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
+ V_FW_IQ_CMD_VFN(vf));
+ c.alloc_to_len16 = cpu_to_be32(V_FW_IQ_CMD_IQSTART(start) |
+ V_FW_IQ_CMD_IQSTOP(!start) |
+ FW_LEN16(c));
+ c.iqid = cpu_to_be16(iqid);
+ c.fl0id = cpu_to_be16(fl0id);
+ c.fl1id = cpu_to_be16(fl1id);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_iq_free - free an ingress queue and its FLs
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the queues
+ * @vf: the VF owning the queues
+ * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
+ * @iqid: ingress queue id
+ * @fl0id: FL0 queue id or 0xffff if no attached FL0
+ * @fl1id: FL1 queue id or 0xffff if no attached FL1
+ *
+ * Frees an ingress queue and its associated FLs, if any.
+ */
+int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int iqtype, unsigned int iqid,
+ unsigned int fl0id, unsigned int fl1id)
+{
+ struct fw_iq_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
+ V_FW_IQ_CMD_VFN(vf));
+ c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
+ c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
+ c.iqid = cpu_to_be16(iqid);
+ c.fl0id = cpu_to_be16(fl0id);
+ c.fl1id = cpu_to_be16(fl1id);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_eth_eq_free - free an Ethernet egress queue
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the queue
+ * @vf: the VF owning the queue
+ * @eqid: egress queue id
+ *
+ * Frees an Ethernet egress queue.
+ */
+int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid)
+{
+ struct fw_eq_eth_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
+ V_FW_EQ_ETH_CMD_PFN(pf) |
+ V_FW_EQ_ETH_CMD_VFN(vf));
+ c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
+ c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_handle_fw_rpl - process a FW reply message
+ * @adap: the adapter
+ * @rpl: start of the FW message
+ *
+ * Processes a FW message, such as link state change messages.
+ */
+int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
+{
+ u8 opcode = *(const u8 *)rpl;
+
+ /*
+ * This might be a port command ... this simplifies the following
+ * conditionals ... We can get away with pre-dereferencing
+ * action_to_len16 because it's in the first 16 bytes and all messages
+ * will be at least that long.
+ */
+ const struct fw_port_cmd *p = (const void *)rpl;
+ unsigned int action =
+ G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
+
+ if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
+ /* link/module state change message */
+ int speed = 0, fc = 0, i;
+ int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
+ struct port_info *pi = NULL;
+ struct link_config *lc;
+ u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
+ int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
+ u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
+
+ if (stat & F_FW_PORT_CMD_RXPAUSE)
+ fc |= PAUSE_RX;
+ if (stat & F_FW_PORT_CMD_TXPAUSE)
+ fc |= PAUSE_TX;
+ if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
+ speed = ETH_LINK_SPEED_100;
+ else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
+ speed = ETH_LINK_SPEED_1000;
+ else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
+ speed = ETH_LINK_SPEED_10000;
+ else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
+ speed = ETH_LINK_SPEED_40G;
+
+ for_each_port(adap, i) {
+ pi = adap2pinfo(adap, i);
+ if (pi->tx_chan == chan)
+ break;
+ }
+ lc = &pi->link_cfg;
+
+ if (mod != pi->mod_type) {
+ pi->mod_type = mod;
+ t4_os_portmod_changed(adap, i);
+ }
+ if (link_ok != lc->link_ok || speed != lc->speed ||
+ fc != lc->fc) { /* something changed */
+ if (!link_ok && lc->link_ok) {
+ static const char * const reason[] = {
+ "Link Down",
+ "Remote Fault",
+ "Auto-negotiation Failure",
+ "Reserved",
+ "Insufficient Airflow",
+ "Unable To Determine Reason",
+ "No RX Signal Detected",
+ "Reserved",
+ };
+ unsigned int rc = G_FW_PORT_CMD_LINKDNRC(stat);
+
+ dev_warn(adap, "Port %d link down, reason: %s\n",
+ chan, reason[rc]);
+ }
+ lc->link_ok = link_ok;
+ lc->speed = speed;
+ lc->fc = fc;
+ lc->supported = be16_to_cpu(p->u.info.pcap);
+ }
+ } else {
+ dev_warn(adap, "Unknown firmware reply %d\n", opcode);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+void t4_reset_link_config(struct adapter *adap, int idx)
+{
+ struct port_info *pi = adap2pinfo(adap, idx);
+ struct link_config *lc = &pi->link_cfg;
+
+ lc->link_ok = 0;
+ lc->requested_speed = 0;
+ lc->requested_fc = 0;
+ lc->speed = 0;
+ lc->fc = 0;
+}
+
+/**
+ * init_link_config - initialize a link's SW state
+ * @lc: structure holding the link state
+ * @caps: link capabilities
+ *
+ * Initializes the SW state maintained for each link, including the link's
+ * capabilities and default speed/flow-control/autonegotiation settings.
+ */
+static void init_link_config(struct link_config *lc,
+ unsigned int caps)
+{
+ lc->supported = caps;
+ lc->requested_speed = 0;
+ lc->speed = 0;
+ lc->requested_fc = 0;
+ lc->fc = 0;
+ if (lc->supported & FW_PORT_CAP_ANEG) {
+ lc->advertising = lc->supported & ADVERT_MASK;
+ lc->autoneg = AUTONEG_ENABLE;
+ } else {
+ lc->advertising = 0;
+ lc->autoneg = AUTONEG_DISABLE;
+ }
+}
+
+/**
+ * t4_wait_dev_ready - wait till to reads of registers work
+ *
+ * Right after the device is RESET is can take a small amount of time
+ * for it to respond to register reads. Until then, all reads will
+ * return either 0xff...ff or 0xee...ee. Return an error if reads
+ * don't work within a reasonable time frame.
+ */
+static int t4_wait_dev_ready(struct adapter *adapter)
+{
+ u32 whoami;
+
+ whoami = t4_read_reg(adapter, A_PL_WHOAMI);
+
+ if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
+ return 0;
+
+ msleep(500);
+ whoami = t4_read_reg(adapter, A_PL_WHOAMI);
+ return (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS
+ ? 0 : -EIO);
+}
+
+struct flash_desc {
+ u32 vendor_and_model_id;
+ u32 size_mb;
+};
+
+int t4_get_flash_params(struct adapter *adapter)
+{
+ /*
+ * Table for non-Numonix supported flash parts. Numonix parts are left
+ * to the preexisting well-tested code. All flash parts have 64KB
+ * sectors.
+ */
+ static struct flash_desc supported_flash[] = {
+ { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
+ };
+
+ int ret;
+ unsigned int i;
+ u32 info = 0;
+
+ ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
+ if (!ret)
+ ret = sf1_read(adapter, 3, 0, 1, &info);
+ t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(supported_flash); ++i)
+ if (supported_flash[i].vendor_and_model_id == info) {
+ adapter->params.sf_size = supported_flash[i].size_mb;
+ adapter->params.sf_nsec =
+ adapter->params.sf_size / SF_SEC_SIZE;
+ return 0;
+ }
+
+ if ((info & 0xff) != 0x20) /* not a Numonix flash */
+ return -EINVAL;
+ info >>= 16; /* log2 of size */
+ if (info >= 0x14 && info < 0x18)
+ adapter->params.sf_nsec = 1 << (info - 16);
+ else if (info == 0x18)
+ adapter->params.sf_nsec = 64;
+ else
+ return -EINVAL;
+ adapter->params.sf_size = 1 << info;
+
+ /*
+ * We should reject adapters with FLASHes which are too small. So, emit
+ * a warning.
+ */
+ if (adapter->params.sf_size < FLASH_MIN_SIZE) {
+ dev_warn(adapter, "WARNING!!! FLASH size %#x < %#x!!!\n",
+ adapter->params.sf_size, FLASH_MIN_SIZE);
+ }
+
+ return 0;
+}
+
+/**
+ * t4_prep_adapter - prepare SW and HW for operation
+ * @adapter: the adapter
+ *
+ * Initialize adapter SW state for the various HW modules, set initial
+ * values for some adapter tunables, take PHYs out of reset, and
+ * initialize the MDIO interface.
+ */
+int t4_prep_adapter(struct adapter *adapter)
+{
+ int ret, ver;
+ u32 pl_rev;
+
+ ret = t4_wait_dev_ready(adapter);
+ if (ret < 0)
+ return ret;
+
+ pl_rev = G_REV(t4_read_reg(adapter, A_PL_REV));
+ adapter->params.pci.device_id = adapter->pdev->id.device_id;
+ adapter->params.pci.vendor_id = adapter->pdev->id.vendor_id;
+
+ /*
+ * WE DON'T NEED adapter->params.chip CODE ONCE PL_REV CONTAINS
+ * ADAPTER (VERSION << 4 | REVISION)
+ */
+ ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id);
+ adapter->params.chip = 0;
+ switch (ver) {
+ case CHELSIO_T5:
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
+ adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ adapter->params.arch.mps_rplc_size = 128;
+ adapter->params.arch.nchan = NCHAN;
+ adapter->params.arch.vfcount = 128;
+ break;
+ default:
+ dev_err(adapter, "%s: Device %d is not supported\n",
+ __func__, adapter->params.pci.device_id);
+ return -EINVAL;
+ }
+
+ ret = t4_get_flash_params(adapter);
+ if (ret < 0)
+ return ret;
+
+ adapter->params.cim_la_size = CIMLA_SIZE;
+
+ init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
+
+ /*
+ * Default port and clock for debugging in case we can't reach FW.
+ */
+ adapter->params.nports = 1;
+ adapter->params.portvec = 1;
+ adapter->params.vpd.cclk = 50000;
+
+ return 0;
+}
+
+/**
+ * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
+ * @adapter: the adapter
+ * @qid: the Queue ID
+ * @qtype: the Ingress or Egress type for @qid
+ * @pbar2_qoffset: BAR2 Queue Offset
+ * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
+ *
+ * Returns the BAR2 SGE Queue Registers information associated with the
+ * indicated Absolute Queue ID. These are passed back in return value
+ * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
+ * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
+ *
+ * This may return an error which indicates that BAR2 SGE Queue
+ * registers aren't available. If an error is not returned, then the
+ * following values are returned:
+ *
+ * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
+ * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
+ *
+ * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
+ * require the "Inferred Queue ID" ability may be used. E.g. the
+ * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
+ * then these "Inferred Queue ID" register may not be used.
+ */
+int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid,
+ enum t4_bar2_qtype qtype, u64 *pbar2_qoffset,
+ unsigned int *pbar2_qid)
+{
+ unsigned int page_shift, page_size, qpp_shift, qpp_mask;
+ u64 bar2_page_offset, bar2_qoffset;
+ unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
+
+ /*
+ * T4 doesn't support BAR2 SGE Queue registers.
+ */
+ if (is_t4(adapter->params.chip))
+ return -EINVAL;
+
+ /*
+ * Get our SGE Page Size parameters.
+ */
+ page_shift = adapter->params.sge.hps + 10;
+ page_size = 1 << page_shift;
+
+ /*
+ * Get the right Queues per Page parameters for our Queue.
+ */
+ qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS ?
+ adapter->params.sge.eq_qpp :
+ adapter->params.sge.iq_qpp);
+ qpp_mask = (1 << qpp_shift) - 1;
+
+ /*
+ * Calculate the basics of the BAR2 SGE Queue register area:
+ * o The BAR2 page the Queue registers will be in.
+ * o The BAR2 Queue ID.
+ * o The BAR2 Queue ID Offset into the BAR2 page.
+ */
+ bar2_page_offset = ((qid >> qpp_shift) << page_shift);
+ bar2_qid = qid & qpp_mask;
+ bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
+
+ /*
+ * If the BAR2 Queue ID Offset is less than the Page Size, then the
+ * hardware will infer the Absolute Queue ID simply from the writes to
+ * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
+ * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
+ * write to the first BAR2 SGE Queue Area within the BAR2 Page with
+ * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
+ * from the BAR2 Page and BAR2 Queue ID.
+ *
+ * One important censequence of this is that some BAR2 SGE registers
+ * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
+ * there. But other registers synthesize the SGE Queue ID purely
+ * from the writes to the registers -- the Write Combined Doorbell
+ * Buffer is a good example. These BAR2 SGE Registers are only
+ * available for those BAR2 SGE Register areas where the SGE Absolute
+ * Queue ID can be inferred from simple writes.
+ */
+ bar2_qoffset = bar2_page_offset;
+ bar2_qinferred = (bar2_qid_offset < page_size);
+ if (bar2_qinferred) {
+ bar2_qoffset += bar2_qid_offset;
+ bar2_qid = 0;
+ }
+
+ *pbar2_qoffset = bar2_qoffset;
+ *pbar2_qid = bar2_qid;
+ return 0;
+}
+
+/**
+ * t4_init_sge_params - initialize adap->params.sge
+ * @adapter: the adapter
+ *
+ * Initialize various fields of the adapter's SGE Parameters structure.
+ */
+int t4_init_sge_params(struct adapter *adapter)
+{
+ struct sge_params *sge_params = &adapter->params.sge;
+ u32 hps, qpp;
+ unsigned int s_hps, s_qpp;
+
+ /*
+ * Extract the SGE Page Size for our PF.
+ */
+ hps = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
+ s_hps = (S_HOSTPAGESIZEPF0 + (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) *
+ adapter->pf);
+ sge_params->hps = ((hps >> s_hps) & M_HOSTPAGESIZEPF0);
+
+ /*
+ * Extract the SGE Egress and Ingess Queues Per Page for our PF.
+ */
+ s_qpp = (S_QUEUESPERPAGEPF0 +
+ (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf);
+ qpp = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
+ sge_params->eq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
+ qpp = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
+ sge_params->iq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
+
+ return 0;
+}
+
+/**
+ * t4_init_tp_params - initialize adap->params.tp
+ * @adap: the adapter
+ *
+ * Initialize various fields of the adapter's TP Parameters structure.
+ */
+int t4_init_tp_params(struct adapter *adap)
+{
+ int chan;
+ u32 v;
+
+ v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
+ adap->params.tp.tre = G_TIMERRESOLUTION(v);
+ adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
+
+ /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
+ for (chan = 0; chan < NCHAN; chan++)
+ adap->params.tp.tx_modq[chan] = chan;
+
+ /*
+ * Cache the adapter's Compressed Filter Mode and global Incress
+ * Configuration.
+ */
+ t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+ &adap->params.tp.vlan_pri_map, 1, A_TP_VLAN_PRI_MAP);
+ t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+ &adap->params.tp.ingress_config, 1,
+ A_TP_INGRESS_CONFIG);
+
+ /*
+ * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
+ * shift positions of several elements of the Compressed Filter Tuple
+ * for this adapter which we need frequently ...
+ */
+ adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
+ adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
+ adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
+ adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
+ F_PROTOCOL);
+
+ /*
+ * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
+ * represents the presense of an Outer VLAN instead of a VNIC ID.
+ */
+ if ((adap->params.tp.ingress_config & F_VNIC) == 0)
+ adap->params.tp.vnic_shift = -1;
+
+ return 0;
+}
+
+/**
+ * t4_filter_field_shift - calculate filter field shift
+ * @adap: the adapter
+ * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
+ *
+ * Return the shift position of a filter field within the Compressed
+ * Filter Tuple. The filter field is specified via its selection bit
+ * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
+ */
+int t4_filter_field_shift(const struct adapter *adap, unsigned int filter_sel)
+{
+ unsigned int filter_mode = adap->params.tp.vlan_pri_map;
+ unsigned int sel;
+ int field_shift;
+
+ if ((filter_mode & filter_sel) == 0)
+ return -1;
+
+ for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
+ switch (filter_mode & sel) {
+ case F_FCOE:
+ field_shift += W_FT_FCOE;
+ break;
+ case F_PORT:
+ field_shift += W_FT_PORT;
+ break;
+ case F_VNIC_ID:
+ field_shift += W_FT_VNIC_ID;
+ break;
+ case F_VLAN:
+ field_shift += W_FT_VLAN;
+ break;
+ case F_TOS:
+ field_shift += W_FT_TOS;
+ break;
+ case F_PROTOCOL:
+ field_shift += W_FT_PROTOCOL;
+ break;
+ case F_ETHERTYPE:
+ field_shift += W_FT_ETHERTYPE;
+ break;
+ case F_MACMATCH:
+ field_shift += W_FT_MACMATCH;
+ break;
+ case F_MPSHITTYPE:
+ field_shift += W_FT_MPSHITTYPE;
+ break;
+ case F_FRAGMENTATION:
+ field_shift += W_FT_FRAGMENTATION;
+ break;
+ }
+ }
+ return field_shift;
+}
+
+int t4_init_rss_mode(struct adapter *adap, int mbox)
+{
+ int i, ret;
+ struct fw_rss_vi_config_cmd rvc;
+
+ memset(&rvc, 0, sizeof(rvc));
+
+ for_each_port(adap, i) {
+ struct port_info *p = adap2pinfo(adap, i);
+
+ rvc.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ |
+ V_FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
+ rvc.retval_len16 = htonl(FW_LEN16(rvc));
+ ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
+ if (ret)
+ return ret;
+ p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
+ }
+ return 0;
+}
+
+int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
+{
+ u8 addr[6];
+ int ret, i, j = 0;
+ struct fw_port_cmd c;
+
+ memset(&c, 0, sizeof(c));
+
+ for_each_port(adap, i) {
+ unsigned int rss_size = 0;
+ struct port_info *p = adap2pinfo(adap, i);
+
+ while ((adap->params.portvec & (1 << j)) == 0)
+ j++;
+
+ c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ |
+ V_FW_PORT_CMD_PORTID(j));
+ c.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(
+ FW_PORT_ACTION_GET_PORT_INFO) |
+ FW_LEN16(c));
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret)
+ return ret;
+
+ ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
+ if (ret < 0)
+ return ret;
+
+ p->viid = ret;
+ p->tx_chan = j;
+ p->rss_size = rss_size;
+ t4_os_set_hw_addr(adap, i, addr);
+
+ ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
+ p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
+ G_FW_PORT_CMD_MDIOADDR(ret) : -1;
+ p->port_type = G_FW_PORT_CMD_PTYPE(ret);
+ p->mod_type = FW_PORT_MOD_TYPE_NA;
+
+ init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap));
+ j++;
+ }
+ return 0;
+}
diff --git a/src/dpdk22/drivers/net/cxgbe/base/t4_hw.h b/src/dpdk22/drivers/net/cxgbe/base/t4_hw.h
new file mode 100644
index 00000000..bf623cf4
--- /dev/null
+++ b/src/dpdk22/drivers/net/cxgbe/base/t4_hw.h
@@ -0,0 +1,149 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014-2015 Chelsio Communications.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Chelsio Communications nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __T4_HW_H
+#define __T4_HW_H
+
+enum {
+ NCHAN = 4, /* # of HW channels */
+ NMTUS = 16, /* size of MTU table */
+ NCCTRL_WIN = 32, /* # of congestion control windows */
+ MBOX_LEN = 64, /* mailbox size in bytes */
+ UDBS_SEG_SIZE = 128, /* segment size for BAR2 user doorbells */
+};
+
+enum {
+ CIMLA_SIZE = 2048, /* # of 32-bit words in CIM LA */
+};
+
+enum {
+ SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
+};
+
+enum {
+ SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
+ SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
+};
+
+/* PCI-e memory window access */
+enum pcie_memwin {
+ MEMWIN_NIC = 0,
+};
+
+enum {
+ SGE_MAX_WR_LEN = 512, /* max WR size in bytes */
+ SGE_EQ_IDXSIZE = 64, /* egress queue pidx/cidx unit size */
+ /* max no. of desc allowed in WR */
+ SGE_MAX_WR_NDESC = SGE_MAX_WR_LEN / SGE_EQ_IDXSIZE,
+};
+
+struct sge_qstat { /* data written to SGE queue status entries */
+ __be32 qid;
+ __be16 cidx;
+ __be16 pidx;
+};
+
+/*
+ * Structure for last 128 bits of response descriptors
+ */
+struct rsp_ctrl {
+ __be32 hdrbuflen_pidx;
+ __be32 pldbuflen_qid;
+ union {
+ u8 type_gen;
+ __be64 last_flit;
+ } u;
+};
+
+#define S_RSPD_NEWBUF 31
+#define V_RSPD_NEWBUF(x) ((x) << S_RSPD_NEWBUF)
+#define F_RSPD_NEWBUF V_RSPD_NEWBUF(1U)
+
+#define S_RSPD_LEN 0
+#define M_RSPD_LEN 0x7fffffff
+#define V_RSPD_LEN(x) ((x) << S_RSPD_LEN)
+#define G_RSPD_LEN(x) (((x) >> S_RSPD_LEN) & M_RSPD_LEN)
+
+#define S_RSPD_GEN 7
+#define V_RSPD_GEN(x) ((x) << S_RSPD_GEN)
+#define F_RSPD_GEN V_RSPD_GEN(1U)
+
+#define S_RSPD_TYPE 4
+#define M_RSPD_TYPE 0x3
+#define V_RSPD_TYPE(x) ((x) << S_RSPD_TYPE)
+#define G_RSPD_TYPE(x) (((x) >> S_RSPD_TYPE) & M_RSPD_TYPE)
+
+/* Rx queue interrupt deferral field: timer index */
+#define S_QINTR_CNT_EN 0
+#define V_QINTR_CNT_EN(x) ((x) << S_QINTR_CNT_EN)
+#define F_QINTR_CNT_EN V_QINTR_CNT_EN(1U)
+
+#define S_QINTR_TIMER_IDX 1
+#define M_QINTR_TIMER_IDX 0x7
+#define V_QINTR_TIMER_IDX(x) ((x) << S_QINTR_TIMER_IDX)
+#define G_QINTR_TIMER_IDX(x) (((x) >> S_QINTR_TIMER_IDX) & M_QINTR_TIMER_IDX)
+
+/*
+ * Flash layout.
+ */
+#define FLASH_START(start) ((start) * SF_SEC_SIZE)
+#define FLASH_MAX_SIZE(nsecs) ((nsecs) * SF_SEC_SIZE)
+
+enum {
+ /*
+ * Location of firmware image in FLASH.
+ */
+ FLASH_FW_START_SEC = 8,
+ FLASH_FW_NSECS = 16,
+ FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
+ FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
+
+ /*
+ * Location of Firmware Configuration File in FLASH.
+ */
+ FLASH_CFG_START_SEC = 31,
+ FLASH_CFG_NSECS = 1,
+ FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC),
+ FLASH_CFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_CFG_NSECS),
+
+ /*
+ * We don't support FLASH devices which can't support the full
+ * standard set of sections which we need for normal operations.
+ */
+ FLASH_MIN_SIZE = FLASH_CFG_START + FLASH_CFG_MAX_SIZE,
+};
+
+#undef FLASH_START
+#undef FLASH_MAX_SIZE
+
+#endif /* __T4_HW_H */
diff --git a/src/dpdk22/drivers/net/cxgbe/base/t4_msg.h b/src/dpdk22/drivers/net/cxgbe/base/t4_msg.h
new file mode 100644
index 00000000..4b04cd0d
--- /dev/null
+++ b/src/dpdk22/drivers/net/cxgbe/base/t4_msg.h
@@ -0,0 +1,345 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014-2015 Chelsio Communications.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Chelsio Communications nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef T4_MSG_H
+#define T4_MSG_H
+
+enum {
+ CPL_SGE_EGR_UPDATE = 0xA5,
+ CPL_FW4_MSG = 0xC0,
+ CPL_FW6_MSG = 0xE0,
+ CPL_TX_PKT_LSO = 0xED,
+ CPL_TX_PKT_XT = 0xEE,
+};
+
+enum { /* TX_PKT_XT checksum types */
+ TX_CSUM_TCPIP = 8,
+ TX_CSUM_UDPIP = 9,
+ TX_CSUM_TCPIP6 = 10,
+};
+
+union opcode_tid {
+ __be32 opcode_tid;
+ __u8 opcode;
+};
+
+struct rss_header {
+ __u8 opcode;
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ __u8 channel:2;
+ __u8 filter_hit:1;
+ __u8 filter_tid:1;
+ __u8 hash_type:2;
+ __u8 ipv6:1;
+ __u8 send2fw:1;
+#else
+ __u8 send2fw:1;
+ __u8 ipv6:1;
+ __u8 hash_type:2;
+ __u8 filter_tid:1;
+ __u8 filter_hit:1;
+ __u8 channel:2;
+#endif
+ __be16 qid;
+ __be32 hash_val;
+};
+
+#if defined(RSS_HDR_VLD) || defined(CHELSIO_FW)
+#define RSS_HDR struct rss_header rss_hdr
+#else
+#define RSS_HDR
+#endif
+
+#ifndef CHELSIO_FW
+struct work_request_hdr {
+ __be32 wr_hi;
+ __be32 wr_mid;
+ __be64 wr_lo;
+};
+
+#define WR_HDR struct work_request_hdr wr
+#define WR_HDR_SIZE sizeof(struct work_request_hdr)
+#else
+#define WR_HDR
+#define WR_HDR_SIZE 0
+#endif
+
+struct cpl_tx_data {
+ union opcode_tid ot;
+ __be32 len;
+ __be32 rsvd;
+ __be32 flags;
+};
+
+struct cpl_tx_pkt_core {
+ __be32 ctrl0;
+ __be16 pack;
+ __be16 len;
+ __be64 ctrl1;
+};
+
+struct cpl_tx_pkt {
+ WR_HDR;
+ struct cpl_tx_pkt_core c;
+};
+
+/* cpl_tx_pkt_core.ctrl0 fields */
+#define S_TXPKT_PF 8
+#define M_TXPKT_PF 0x7
+#define V_TXPKT_PF(x) ((x) << S_TXPKT_PF)
+#define G_TXPKT_PF(x) (((x) >> S_TXPKT_PF) & M_TXPKT_PF)
+
+#define S_TXPKT_INTF 16
+#define M_TXPKT_INTF 0xF
+#define V_TXPKT_INTF(x) ((x) << S_TXPKT_INTF)
+#define G_TXPKT_INTF(x) (((x) >> S_TXPKT_INTF) & M_TXPKT_INTF)
+
+#define S_TXPKT_OPCODE 24
+#define M_TXPKT_OPCODE 0xFF
+#define V_TXPKT_OPCODE(x) ((x) << S_TXPKT_OPCODE)
+#define G_TXPKT_OPCODE(x) (((x) >> S_TXPKT_OPCODE) & M_TXPKT_OPCODE)
+
+/* cpl_tx_pkt_core.ctrl1 fields */
+#define S_TXPKT_IPHDR_LEN 20
+#define M_TXPKT_IPHDR_LEN 0x3FFF
+#define V_TXPKT_IPHDR_LEN(x) ((__u64)(x) << S_TXPKT_IPHDR_LEN)
+#define G_TXPKT_IPHDR_LEN(x) (((x) >> S_TXPKT_IPHDR_LEN) & M_TXPKT_IPHDR_LEN)
+
+#define S_TXPKT_ETHHDR_LEN 34
+#define M_TXPKT_ETHHDR_LEN 0x3F
+#define V_TXPKT_ETHHDR_LEN(x) ((__u64)(x) << S_TXPKT_ETHHDR_LEN)
+#define G_TXPKT_ETHHDR_LEN(x) (((x) >> S_TXPKT_ETHHDR_LEN) & M_TXPKT_ETHHDR_LEN)
+
+#define S_T6_TXPKT_ETHHDR_LEN 32
+#define M_T6_TXPKT_ETHHDR_LEN 0xFF
+#define V_T6_TXPKT_ETHHDR_LEN(x) ((__u64)(x) << S_T6_TXPKT_ETHHDR_LEN)
+#define G_T6_TXPKT_ETHHDR_LEN(x) \
+ (((x) >> S_T6_TXPKT_ETHHDR_LEN) & M_T6_TXPKT_ETHHDR_LEN)
+
+#define S_TXPKT_CSUM_TYPE 40
+#define M_TXPKT_CSUM_TYPE 0xF
+#define V_TXPKT_CSUM_TYPE(x) ((__u64)(x) << S_TXPKT_CSUM_TYPE)
+#define G_TXPKT_CSUM_TYPE(x) (((x) >> S_TXPKT_CSUM_TYPE) & M_TXPKT_CSUM_TYPE)
+
+#define S_TXPKT_VLAN 44
+#define M_TXPKT_VLAN 0xFFFF
+#define V_TXPKT_VLAN(x) ((__u64)(x) << S_TXPKT_VLAN)
+#define G_TXPKT_VLAN(x) (((x) >> S_TXPKT_VLAN) & M_TXPKT_VLAN)
+
+#define S_TXPKT_VLAN_VLD 60
+#define V_TXPKT_VLAN_VLD(x) ((__u64)(x) << S_TXPKT_VLAN_VLD)
+#define F_TXPKT_VLAN_VLD V_TXPKT_VLAN_VLD(1ULL)
+
+#define S_TXPKT_IPCSUM_DIS 62
+#define V_TXPKT_IPCSUM_DIS(x) ((__u64)(x) << S_TXPKT_IPCSUM_DIS)
+#define F_TXPKT_IPCSUM_DIS V_TXPKT_IPCSUM_DIS(1ULL)
+
+#define S_TXPKT_L4CSUM_DIS 63
+#define V_TXPKT_L4CSUM_DIS(x) ((__u64)(x) << S_TXPKT_L4CSUM_DIS)
+#define F_TXPKT_L4CSUM_DIS V_TXPKT_L4CSUM_DIS(1ULL)
+
+struct cpl_tx_pkt_lso_core {
+ __be32 lso_ctrl;
+ __be16 ipid_ofst;
+ __be16 mss;
+ __be32 seqno_offset;
+ __be32 len;
+ /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
+};
+
+struct cpl_tx_pkt_lso {
+ WR_HDR;
+ struct cpl_tx_pkt_lso_core c;
+ /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
+};
+
+/* cpl_tx_pkt_lso_core.lso_ctrl fields */
+#define S_LSO_TCPHDR_LEN 0
+#define M_LSO_TCPHDR_LEN 0xF
+#define V_LSO_TCPHDR_LEN(x) ((x) << S_LSO_TCPHDR_LEN)
+#define G_LSO_TCPHDR_LEN(x) (((x) >> S_LSO_TCPHDR_LEN) & M_LSO_TCPHDR_LEN)
+
+#define S_LSO_IPHDR_LEN 4
+#define M_LSO_IPHDR_LEN 0xFFF
+#define V_LSO_IPHDR_LEN(x) ((x) << S_LSO_IPHDR_LEN)
+#define G_LSO_IPHDR_LEN(x) (((x) >> S_LSO_IPHDR_LEN) & M_LSO_IPHDR_LEN)
+
+#define S_LSO_ETHHDR_LEN 16
+#define M_LSO_ETHHDR_LEN 0xF
+#define V_LSO_ETHHDR_LEN(x) ((x) << S_LSO_ETHHDR_LEN)
+#define G_LSO_ETHHDR_LEN(x) (((x) >> S_LSO_ETHHDR_LEN) & M_LSO_ETHHDR_LEN)
+
+#define S_LSO_IPV6 20
+#define V_LSO_IPV6(x) ((x) << S_LSO_IPV6)
+#define F_LSO_IPV6 V_LSO_IPV6(1U)
+
+#define S_LSO_LAST_SLICE 22
+#define V_LSO_LAST_SLICE(x) ((x) << S_LSO_LAST_SLICE)
+#define F_LSO_LAST_SLICE V_LSO_LAST_SLICE(1U)
+
+#define S_LSO_FIRST_SLICE 23
+#define V_LSO_FIRST_SLICE(x) ((x) << S_LSO_FIRST_SLICE)
+#define F_LSO_FIRST_SLICE V_LSO_FIRST_SLICE(1U)
+
+#define S_LSO_OPCODE 24
+#define M_LSO_OPCODE 0xFF
+#define V_LSO_OPCODE(x) ((x) << S_LSO_OPCODE)
+#define G_LSO_OPCODE(x) (((x) >> S_LSO_OPCODE) & M_LSO_OPCODE)
+
+#define S_LSO_T5_XFER_SIZE 0
+#define M_LSO_T5_XFER_SIZE 0xFFFFFFF
+#define V_LSO_T5_XFER_SIZE(x) ((x) << S_LSO_T5_XFER_SIZE)
+#define G_LSO_T5_XFER_SIZE(x) (((x) >> S_LSO_T5_XFER_SIZE) & M_LSO_T5_XFER_SIZE)
+
+struct cpl_rx_pkt {
+ RSS_HDR;
+ __u8 opcode;
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ __u8 iff:4;
+ __u8 csum_calc:1;
+ __u8 ipmi_pkt:1;
+ __u8 vlan_ex:1;
+ __u8 ip_frag:1;
+#else
+ __u8 ip_frag:1;
+ __u8 vlan_ex:1;
+ __u8 ipmi_pkt:1;
+ __u8 csum_calc:1;
+ __u8 iff:4;
+#endif
+ __be16 csum;
+ __be16 vlan;
+ __be16 len;
+ __be32 l2info;
+ __be16 hdr_len;
+ __be16 err_vec;
+};
+
+/* rx_pkt.l2info fields */
+#define S_RXF_UDP 22
+#define V_RXF_UDP(x) ((x) << S_RXF_UDP)
+#define F_RXF_UDP V_RXF_UDP(1U)
+
+#define S_RXF_TCP 23
+#define V_RXF_TCP(x) ((x) << S_RXF_TCP)
+#define F_RXF_TCP V_RXF_TCP(1U)
+
+#define S_RXF_IP 24
+#define V_RXF_IP(x) ((x) << S_RXF_IP)
+#define F_RXF_IP V_RXF_IP(1U)
+
+#define S_RXF_IP6 25
+#define V_RXF_IP6(x) ((x) << S_RXF_IP6)
+#define F_RXF_IP6 V_RXF_IP6(1U)
+
+/* cpl_fw*.type values */
+enum {
+ FW_TYPE_RSSCPL = 4,
+};
+
+struct cpl_fw4_msg {
+ RSS_HDR;
+ u8 opcode;
+ u8 type;
+ __be16 rsvd0;
+ __be32 rsvd1;
+ __be64 data[2];
+};
+
+struct cpl_fw6_msg {
+ RSS_HDR;
+ u8 opcode;
+ u8 type;
+ __be16 rsvd0;
+ __be32 rsvd1;
+ __be64 data[4];
+};
+
+enum {
+ ULP_TX_SC_IMM = 0x81,
+ ULP_TX_SC_DSGL = 0x82,
+ ULP_TX_SC_ISGL = 0x83
+};
+
+#define S_ULPTX_CMD 24
+#define M_ULPTX_CMD 0xFF
+#define V_ULPTX_CMD(x) ((x) << S_ULPTX_CMD)
+
+#define S_ULP_TX_SC_MORE 23
+#define V_ULP_TX_SC_MORE(x) ((x) << S_ULP_TX_SC_MORE)
+#define F_ULP_TX_SC_MORE V_ULP_TX_SC_MORE(1U)
+
+struct ulptx_sge_pair {
+ __be32 len[2];
+ __be64 addr[2];
+};
+
+struct ulptx_sgl {
+ __be32 cmd_nsge;
+ __be32 len0;
+ __be64 addr0;
+
+#if !(defined C99_NOT_SUPPORTED)
+ struct ulptx_sge_pair sge[0];
+#endif
+
+};
+
+struct ulptx_idata {
+ __be32 cmd_more;
+ __be32 len;
+};
+
+#define S_ULPTX_NSGE 0
+#define M_ULPTX_NSGE 0xFFFF
+#define V_ULPTX_NSGE(x) ((x) << S_ULPTX_NSGE)
+
+struct ulp_txpkt {
+ __be32 cmd_dest;
+ __be32 len;
+};
+
+/* ulp_txpkt.cmd_dest fields */
+#define S_ULP_TXPKT_DEST 16
+#define M_ULP_TXPKT_DEST 0x3
+#define V_ULP_TXPKT_DEST(x) ((x) << S_ULP_TXPKT_DEST)
+
+#define S_ULP_TXPKT_FID 4
+#define M_ULP_TXPKT_FID 0x7ff
+#define V_ULP_TXPKT_FID(x) ((x) << S_ULP_TXPKT_FID)
+
+#define S_ULP_TXPKT_RO 3
+#define V_ULP_TXPKT_RO(x) ((x) << S_ULP_TXPKT_RO)
+#define F_ULP_TXPKT_RO V_ULP_TXPKT_RO(1U)
+
+#endif /* T4_MSG_H */
diff --git a/src/dpdk22/drivers/net/cxgbe/base/t4_pci_id_tbl.h b/src/dpdk22/drivers/net/cxgbe/base/t4_pci_id_tbl.h
new file mode 100644
index 00000000..110fadb0
--- /dev/null
+++ b/src/dpdk22/drivers/net/cxgbe/base/t4_pci_id_tbl.h
@@ -0,0 +1,151 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014-2015 Chelsio Communications.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Chelsio Communications nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __T4_PCI_ID_TBL_H__
+#define __T4_PCI_ID_TBL_H__
+
+/*
+ * The Os-Dependent code can defined cpp macros for creating a PCI Device ID
+ * Table. This is useful because it allows the PCI ID Table to be maintained
+ * in a single place and all supporting OSes to get new PCI Device IDs
+ * automatically.
+ *
+ * The macros are:
+ *
+ * CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
+ * -- Used to start the definition of the PCI ID Table.
+ *
+ * CH_PCI_DEVICE_ID_FUNCTION
+ * -- The PCI Function Number to use in the PCI Device ID Table. "0"
+ * -- for drivers attaching to PF0-3, "4" for drivers attaching to PF4,
+ * -- "8" for drivers attaching to SR-IOV Virtual Functions, etc.
+ *
+ * CH_PCI_DEVICE_ID_FUNCTION2 [optional]
+ * -- If defined, create a PCI Device ID Table with both
+ * -- CH_PCI_DEVICE_ID_FUNCTION and CH_PCI_DEVICE_ID_FUNCTION2 populated.
+ *
+ * CH_PCI_ID_TABLE_ENTRY(DeviceID)
+ * -- Used for the individual PCI Device ID entries. Note that we will
+ * -- be adding a trailing comma (",") after all of the entries (and
+ * -- between the pairs of entries if CH_PCI_DEVICE_ID_FUNCTION2 is defined).
+ *
+ * CH_PCI_DEVICE_ID_TABLE_DEFINE_END
+ * -- Used to finish the definition of the PCI ID Table. Note that we
+ * -- will be adding a trailing semi-colon (";") here.
+ *
+ * CH_PCI_DEVICE_ID_BYPASS_SUPPORTED [optional]
+ * -- If defined, indicates that the OS Driver has support for Bypass
+ * -- Adapters.
+ */
+#ifdef CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
+
+/*
+ * Some sanity checks ...
+ */
+#ifndef CH_PCI_DEVICE_ID_FUNCTION
+#error CH_PCI_DEVICE_ID_FUNCTION not defined!
+#endif
+#ifndef CH_PCI_ID_TABLE_ENTRY
+#error CH_PCI_ID_TABLE_ENTRY not defined!
+#endif
+#ifndef CH_PCI_DEVICE_ID_TABLE_DEFINE_END
+#error CH_PCI_DEVICE_ID_TABLE_DEFINE_END not defined!
+#endif
+
+/*
+ * T4 and later ASICs use a PCI Device ID scheme of 0xVFPP where:
+ *
+ * V = "4" for T4; "5" for T5, etc.
+ * F = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs
+ * PP = adapter product designation
+ *
+ * We use this consistency in order to create the proper PCI Device IDs
+ * for the specified CH_PCI_DEVICE_ID_FUNCTION.
+ */
+#ifndef CH_PCI_DEVICE_ID_FUNCTION2
+#define CH_PCI_ID_TABLE_FENTRY(devid) \
+ CH_PCI_ID_TABLE_ENTRY((devid) | \
+ ((CH_PCI_DEVICE_ID_FUNCTION) << 8))
+#else
+#define CH_PCI_ID_TABLE_FENTRY(devid) \
+ CH_PCI_ID_TABLE_ENTRY((devid) | \
+ ((CH_PCI_DEVICE_ID_FUNCTION) << 8)), \
+ CH_PCI_ID_TABLE_ENTRY((devid) | \
+ ((CH_PCI_DEVICE_ID_FUNCTION2) << 8))
+#endif
+
+CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
+ /*
+ * T5 adapters:
+ */
+ CH_PCI_ID_TABLE_FENTRY(0x5000), /* T580-dbg */
+ CH_PCI_ID_TABLE_FENTRY(0x5001), /* T520-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5002), /* T522-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5003), /* T540-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5004), /* T520-bch */
+ CH_PCI_ID_TABLE_FENTRY(0x5005), /* T540-bch */
+ CH_PCI_ID_TABLE_FENTRY(0x5006), /* T540-ch */
+ CH_PCI_ID_TABLE_FENTRY(0x5007), /* T520-so */
+ CH_PCI_ID_TABLE_FENTRY(0x5008), /* T520-cx */
+ CH_PCI_ID_TABLE_FENTRY(0x5009), /* T520-bt */
+ CH_PCI_ID_TABLE_FENTRY(0x500a), /* T504-bt */
+#ifdef CH_PCI_DEVICE_ID_BYPASS_SUPPORTED
+ CH_PCI_ID_TABLE_FENTRY(0x500b), /* B520-sr */
+ CH_PCI_ID_TABLE_FENTRY(0x500c), /* B504-bt */
+#endif
+ CH_PCI_ID_TABLE_FENTRY(0x500d), /* T580-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x500e), /* T540-LP-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5010), /* T580-LP-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5011), /* T520-LL-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5012), /* T560-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5013), /* T580-chr */
+ CH_PCI_ID_TABLE_FENTRY(0x5014), /* T580-so */
+ CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */
+ CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5083), /* Custom T540-LP-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5084), /* Custom T580-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5085), /* Custom 3x T580-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5086), /* Custom 2x T580-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5087), /* Custom T580-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5088), /* Custom T570-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5089), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5091), /* Custom T522-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5092), /* Custom T520-CR */
+CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
+
+#endif /* CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN */
+
+#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/src/dpdk22/drivers/net/cxgbe/base/t4_regs.h b/src/dpdk22/drivers/net/cxgbe/base/t4_regs.h
new file mode 100644
index 00000000..9057e409
--- /dev/null
+++ b/src/dpdk22/drivers/net/cxgbe/base/t4_regs.h
@@ -0,0 +1,795 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014-2015 Chelsio Communications.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Chelsio Communications nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define MYPF_BASE 0x1b000
+#define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr))
+
+#define PF0_BASE 0x1e000
+#define PF0_REG(reg_addr) (PF0_BASE + (reg_addr))
+
+#define PF_STRIDE 0x400
+#define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE)
+#define PF_REG(idx, reg) (PF_BASE(idx) + (reg))
+
+#define MYPORT_BASE 0x1c000
+#define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr))
+
+#define PORT0_BASE 0x20000
+#define PORT0_REG(reg_addr) (PORT0_BASE + (reg_addr))
+
+#define PORT_STRIDE 0x2000
+#define PORT_BASE(idx) (PORT0_BASE + (idx) * PORT_STRIDE)
+#define PORT_REG(idx, reg) (PORT_BASE(idx) + (reg))
+
+#define PCIE_MEM_ACCESS_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define NUM_PCIE_MEM_ACCESS_INSTANCES 8
+
+#define PCIE_FW_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_PCIE_FW_INSTANCES 8
+
+#define T5_MYPORT_BASE 0x2c000
+#define T5_MYPORT_REG(reg_addr) (T5_MYPORT_BASE + (reg_addr))
+
+#define T5_PORT0_BASE 0x30000
+#define T5_PORT0_REG(reg_addr) (T5_PORT0_BASE + (reg_addr))
+
+#define T5_PORT_STRIDE 0x4000
+#define T5_PORT_BASE(idx) (T5_PORT0_BASE + (idx) * T5_PORT_STRIDE)
+#define T5_PORT_REG(idx, reg) (T5_PORT_BASE(idx) + (reg))
+
+#define MPS_T5_CLS_SRAM_L(idx) (A_MPS_T5_CLS_SRAM_L + (idx) * 8)
+#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512
+
+#define MPS_T5_CLS_SRAM_H(idx) (A_MPS_T5_CLS_SRAM_H + (idx) * 8)
+#define NUM_MPS_T5_CLS_SRAM_H_INSTANCES 512
+
+/* registers for module SGE */
+#define SGE_BASE_ADDR 0x1000
+
+#define A_SGE_PF_KDOORBELL 0x0
+
+#define S_QID 15
+#define M_QID 0x1ffffU
+#define V_QID(x) ((x) << S_QID)
+#define G_QID(x) (((x) >> S_QID) & M_QID)
+
+#define S_DBPRIO 14
+#define V_DBPRIO(x) ((x) << S_DBPRIO)
+#define F_DBPRIO V_DBPRIO(1U)
+
+#define S_PIDX 0
+#define M_PIDX 0x3fffU
+#define V_PIDX(x) ((x) << S_PIDX)
+#define G_PIDX(x) (((x) >> S_PIDX) & M_PIDX)
+
+#define S_DBTYPE 13
+#define V_DBTYPE(x) ((x) << S_DBTYPE)
+#define F_DBTYPE V_DBTYPE(1U)
+
+#define S_PIDX_T5 0
+#define M_PIDX_T5 0x1fffU
+#define V_PIDX_T5(x) ((x) << S_PIDX_T5)
+#define G_PIDX_T5(x) (((x) >> S_PIDX_T5) & M_PIDX_T5)
+
+#define A_SGE_PF_GTS 0x4
+
+#define S_INGRESSQID 16
+#define M_INGRESSQID 0xffffU
+#define V_INGRESSQID(x) ((x) << S_INGRESSQID)
+#define G_INGRESSQID(x) (((x) >> S_INGRESSQID) & M_INGRESSQID)
+
+#define S_SEINTARM 12
+#define V_SEINTARM(x) ((x) << S_SEINTARM)
+#define F_SEINTARM V_SEINTARM(1U)
+
+#define S_CIDXINC 0
+#define M_CIDXINC 0xfffU
+#define V_CIDXINC(x) ((x) << S_CIDXINC)
+#define G_CIDXINC(x) (((x) >> S_CIDXINC) & M_CIDXINC)
+
+#define A_SGE_CONTROL 0x1008
+
+#define S_RXPKTCPLMODE 18
+#define V_RXPKTCPLMODE(x) ((x) << S_RXPKTCPLMODE)
+#define F_RXPKTCPLMODE V_RXPKTCPLMODE(1U)
+
+#define S_EGRSTATUSPAGESIZE 17
+#define V_EGRSTATUSPAGESIZE(x) ((x) << S_EGRSTATUSPAGESIZE)
+#define F_EGRSTATUSPAGESIZE V_EGRSTATUSPAGESIZE(1U)
+
+#define S_PKTSHIFT 10
+#define M_PKTSHIFT 0x7U
+#define V_PKTSHIFT(x) ((x) << S_PKTSHIFT)
+#define G_PKTSHIFT(x) (((x) >> S_PKTSHIFT) & M_PKTSHIFT)
+
+#define S_INGPADBOUNDARY 4
+#define M_INGPADBOUNDARY 0x7U
+#define V_INGPADBOUNDARY(x) ((x) << S_INGPADBOUNDARY)
+#define G_INGPADBOUNDARY(x) (((x) >> S_INGPADBOUNDARY) & M_INGPADBOUNDARY)
+
+#define A_SGE_HOST_PAGE_SIZE 0x100c
+
+#define S_HOSTPAGESIZEPF7 28
+#define M_HOSTPAGESIZEPF7 0xfU
+#define V_HOSTPAGESIZEPF7(x) ((x) << S_HOSTPAGESIZEPF7)
+#define G_HOSTPAGESIZEPF7(x) (((x) >> S_HOSTPAGESIZEPF7) & M_HOSTPAGESIZEPF7)
+
+#define S_HOSTPAGESIZEPF6 24
+#define M_HOSTPAGESIZEPF6 0xfU
+#define V_HOSTPAGESIZEPF6(x) ((x) << S_HOSTPAGESIZEPF6)
+#define G_HOSTPAGESIZEPF6(x) (((x) >> S_HOSTPAGESIZEPF6) & M_HOSTPAGESIZEPF6)
+
+#define S_HOSTPAGESIZEPF5 20
+#define M_HOSTPAGESIZEPF5 0xfU
+#define V_HOSTPAGESIZEPF5(x) ((x) << S_HOSTPAGESIZEPF5)
+#define G_HOSTPAGESIZEPF5(x) (((x) >> S_HOSTPAGESIZEPF5) & M_HOSTPAGESIZEPF5)
+
+#define S_HOSTPAGESIZEPF4 16
+#define M_HOSTPAGESIZEPF4 0xfU
+#define V_HOSTPAGESIZEPF4(x) ((x) << S_HOSTPAGESIZEPF4)
+#define G_HOSTPAGESIZEPF4(x) (((x) >> S_HOSTPAGESIZEPF4) & M_HOSTPAGESIZEPF4)
+
+#define S_HOSTPAGESIZEPF3 12
+#define M_HOSTPAGESIZEPF3 0xfU
+#define V_HOSTPAGESIZEPF3(x) ((x) << S_HOSTPAGESIZEPF3)
+#define G_HOSTPAGESIZEPF3(x) (((x) >> S_HOSTPAGESIZEPF3) & M_HOSTPAGESIZEPF3)
+
+#define S_HOSTPAGESIZEPF2 8
+#define M_HOSTPAGESIZEPF2 0xfU
+#define V_HOSTPAGESIZEPF2(x) ((x) << S_HOSTPAGESIZEPF2)
+#define G_HOSTPAGESIZEPF2(x) (((x) >> S_HOSTPAGESIZEPF2) & M_HOSTPAGESIZEPF2)
+
+#define S_HOSTPAGESIZEPF1 4
+#define M_HOSTPAGESIZEPF1 0xfU
+#define V_HOSTPAGESIZEPF1(x) ((x) << S_HOSTPAGESIZEPF1)
+#define G_HOSTPAGESIZEPF1(x) (((x) >> S_HOSTPAGESIZEPF1) & M_HOSTPAGESIZEPF1)
+
+#define S_HOSTPAGESIZEPF0 0
+#define M_HOSTPAGESIZEPF0 0xfU
+#define V_HOSTPAGESIZEPF0(x) ((x) << S_HOSTPAGESIZEPF0)
+#define G_HOSTPAGESIZEPF0(x) (((x) >> S_HOSTPAGESIZEPF0) & M_HOSTPAGESIZEPF0)
+
+#define A_SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010
+
+#define S_QUEUESPERPAGEPF1 4
+#define M_QUEUESPERPAGEPF1 0xfU
+#define V_QUEUESPERPAGEPF1(x) ((x) << S_QUEUESPERPAGEPF1)
+#define G_QUEUESPERPAGEPF1(x) (((x) >> S_QUEUESPERPAGEPF1) & M_QUEUESPERPAGEPF1)
+
+#define S_QUEUESPERPAGEPF0 0
+#define M_QUEUESPERPAGEPF0 0xfU
+#define V_QUEUESPERPAGEPF0(x) ((x) << S_QUEUESPERPAGEPF0)
+#define G_QUEUESPERPAGEPF0(x) (((x) >> S_QUEUESPERPAGEPF0) & M_QUEUESPERPAGEPF0)
+
+#define S_ERR_CPL_EXCEED_IQE_SIZE 22
+#define V_ERR_CPL_EXCEED_IQE_SIZE(x) ((x) << S_ERR_CPL_EXCEED_IQE_SIZE)
+#define F_ERR_CPL_EXCEED_IQE_SIZE V_ERR_CPL_EXCEED_IQE_SIZE(1U)
+
+#define S_ERR_INVALID_CIDX_INC 21
+#define V_ERR_INVALID_CIDX_INC(x) ((x) << S_ERR_INVALID_CIDX_INC)
+#define F_ERR_INVALID_CIDX_INC V_ERR_INVALID_CIDX_INC(1U)
+
+#define S_ERR_CPL_OPCODE_0 19
+#define V_ERR_CPL_OPCODE_0(x) ((x) << S_ERR_CPL_OPCODE_0)
+#define F_ERR_CPL_OPCODE_0 V_ERR_CPL_OPCODE_0(1U)
+
+#define S_ERR_DROPPED_DB 18
+#define V_ERR_DROPPED_DB(x) ((x) << S_ERR_DROPPED_DB)
+#define F_ERR_DROPPED_DB V_ERR_DROPPED_DB(1U)
+
+#define S_ERR_DATA_CPL_ON_HIGH_QID1 17
+#define V_ERR_DATA_CPL_ON_HIGH_QID1(x) ((x) << S_ERR_DATA_CPL_ON_HIGH_QID1)
+#define F_ERR_DATA_CPL_ON_HIGH_QID1 V_ERR_DATA_CPL_ON_HIGH_QID1(1U)
+
+#define S_ERR_DATA_CPL_ON_HIGH_QID0 16
+#define V_ERR_DATA_CPL_ON_HIGH_QID0(x) ((x) << S_ERR_DATA_CPL_ON_HIGH_QID0)
+#define F_ERR_DATA_CPL_ON_HIGH_QID0 V_ERR_DATA_CPL_ON_HIGH_QID0(1U)
+
+#define S_ERR_BAD_DB_PIDX3 15
+#define V_ERR_BAD_DB_PIDX3(x) ((x) << S_ERR_BAD_DB_PIDX3)
+#define F_ERR_BAD_DB_PIDX3 V_ERR_BAD_DB_PIDX3(1U)
+
+#define S_ERR_BAD_DB_PIDX2 14
+#define V_ERR_BAD_DB_PIDX2(x) ((x) << S_ERR_BAD_DB_PIDX2)
+#define F_ERR_BAD_DB_PIDX2 V_ERR_BAD_DB_PIDX2(1U)
+
+#define S_ERR_BAD_DB_PIDX1 13
+#define V_ERR_BAD_DB_PIDX1(x) ((x) << S_ERR_BAD_DB_PIDX1)
+#define F_ERR_BAD_DB_PIDX1 V_ERR_BAD_DB_PIDX1(1U)
+
+#define S_ERR_BAD_DB_PIDX0 12
+#define V_ERR_BAD_DB_PIDX0(x) ((x) << S_ERR_BAD_DB_PIDX0)
+#define F_ERR_BAD_DB_PIDX0 V_ERR_BAD_DB_PIDX0(1U)
+
+#define S_ERR_ING_PCIE_CHAN 11
+#define V_ERR_ING_PCIE_CHAN(x) ((x) << S_ERR_ING_PCIE_CHAN)
+#define F_ERR_ING_PCIE_CHAN V_ERR_ING_PCIE_CHAN(1U)
+
+#define S_ERR_ING_CTXT_PRIO 10
+#define V_ERR_ING_CTXT_PRIO(x) ((x) << S_ERR_ING_CTXT_PRIO)
+#define F_ERR_ING_CTXT_PRIO V_ERR_ING_CTXT_PRIO(1U)
+
+#define S_ERR_EGR_CTXT_PRIO 9
+#define V_ERR_EGR_CTXT_PRIO(x) ((x) << S_ERR_EGR_CTXT_PRIO)
+#define F_ERR_EGR_CTXT_PRIO V_ERR_EGR_CTXT_PRIO(1U)
+
+#define S_DBFIFO_HP_INT 8
+#define V_DBFIFO_HP_INT(x) ((x) << S_DBFIFO_HP_INT)
+#define F_DBFIFO_HP_INT V_DBFIFO_HP_INT(1U)
+
+#define S_DBFIFO_LP_INT 7
+#define V_DBFIFO_LP_INT(x) ((x) << S_DBFIFO_LP_INT)
+#define F_DBFIFO_LP_INT V_DBFIFO_LP_INT(1U)
+
+#define S_INGRESS_SIZE_ERR 5
+#define V_INGRESS_SIZE_ERR(x) ((x) << S_INGRESS_SIZE_ERR)
+#define F_INGRESS_SIZE_ERR V_INGRESS_SIZE_ERR(1U)
+
+#define S_EGRESS_SIZE_ERR 4
+#define V_EGRESS_SIZE_ERR(x) ((x) << S_EGRESS_SIZE_ERR)
+#define F_EGRESS_SIZE_ERR V_EGRESS_SIZE_ERR(1U)
+
+#define A_SGE_INT_ENABLE3 0x1040
+
+#define A_SGE_FL_BUFFER_SIZE0 0x1044
+#define A_SGE_FL_BUFFER_SIZE1 0x1048
+#define A_SGE_FL_BUFFER_SIZE2 0x104c
+#define A_SGE_FL_BUFFER_SIZE3 0x1050
+
+#define A_SGE_FLM_CFG 0x1090
+
+#define S_CREDITCNT 4
+#define M_CREDITCNT 0x3U
+#define V_CREDITCNT(x) ((x) << S_CREDITCNT)
+#define G_CREDITCNT(x) (((x) >> S_CREDITCNT) & M_CREDITCNT)
+
+#define S_CREDITCNTPACKING 2
+#define M_CREDITCNTPACKING 0x3U
+#define V_CREDITCNTPACKING(x) ((x) << S_CREDITCNTPACKING)
+#define G_CREDITCNTPACKING(x) (((x) >> S_CREDITCNTPACKING) & M_CREDITCNTPACKING)
+
+#define A_SGE_CONM_CTRL 0x1094
+
+#define S_EGRTHRESHOLD 8
+#define M_EGRTHRESHOLD 0x3fU
+#define V_EGRTHRESHOLD(x) ((x) << S_EGRTHRESHOLD)
+#define G_EGRTHRESHOLD(x) (((x) >> S_EGRTHRESHOLD) & M_EGRTHRESHOLD)
+
+#define S_EGRTHRESHOLDPACKING 14
+#define M_EGRTHRESHOLDPACKING 0x3fU
+#define V_EGRTHRESHOLDPACKING(x) ((x) << S_EGRTHRESHOLDPACKING)
+#define G_EGRTHRESHOLDPACKING(x) (((x) >> S_EGRTHRESHOLDPACKING) & \
+ M_EGRTHRESHOLDPACKING)
+
+#define S_INGTHRESHOLD 2
+#define M_INGTHRESHOLD 0x3fU
+#define V_INGTHRESHOLD(x) ((x) << S_INGTHRESHOLD)
+#define G_INGTHRESHOLD(x) (((x) >> S_INGTHRESHOLD) & M_INGTHRESHOLD)
+
+#define A_SGE_INGRESS_RX_THRESHOLD 0x10a0
+
+#define S_THRESHOLD_0 24
+#define M_THRESHOLD_0 0x3fU
+#define V_THRESHOLD_0(x) ((x) << S_THRESHOLD_0)
+#define G_THRESHOLD_0(x) (((x) >> S_THRESHOLD_0) & M_THRESHOLD_0)
+
+#define S_THRESHOLD_1 16
+#define M_THRESHOLD_1 0x3fU
+#define V_THRESHOLD_1(x) ((x) << S_THRESHOLD_1)
+#define G_THRESHOLD_1(x) (((x) >> S_THRESHOLD_1) & M_THRESHOLD_1)
+
+#define S_THRESHOLD_2 8
+#define M_THRESHOLD_2 0x3fU
+#define V_THRESHOLD_2(x) ((x) << S_THRESHOLD_2)
+#define G_THRESHOLD_2(x) (((x) >> S_THRESHOLD_2) & M_THRESHOLD_2)
+
+#define S_THRESHOLD_3 0
+#define M_THRESHOLD_3 0x3fU
+#define V_THRESHOLD_3(x) ((x) << S_THRESHOLD_3)
+#define G_THRESHOLD_3(x) (((x) >> S_THRESHOLD_3) & M_THRESHOLD_3)
+
+#define A_SGE_TIMER_VALUE_0_AND_1 0x10b8
+
+#define S_TIMERVALUE0 16
+#define M_TIMERVALUE0 0xffffU
+#define V_TIMERVALUE0(x) ((x) << S_TIMERVALUE0)
+#define G_TIMERVALUE0(x) (((x) >> S_TIMERVALUE0) & M_TIMERVALUE0)
+
+#define S_TIMERVALUE1 0
+#define M_TIMERVALUE1 0xffffU
+#define V_TIMERVALUE1(x) ((x) << S_TIMERVALUE1)
+#define G_TIMERVALUE1(x) (((x) >> S_TIMERVALUE1) & M_TIMERVALUE1)
+
+#define A_SGE_TIMER_VALUE_2_AND_3 0x10bc
+
+#define S_TIMERVALUE2 16
+#define M_TIMERVALUE2 0xffffU
+#define V_TIMERVALUE2(x) ((x) << S_TIMERVALUE2)
+#define G_TIMERVALUE2(x) (((x) >> S_TIMERVALUE2) & M_TIMERVALUE2)
+
+#define S_TIMERVALUE3 0
+#define M_TIMERVALUE3 0xffffU
+#define V_TIMERVALUE3(x) ((x) << S_TIMERVALUE3)
+#define G_TIMERVALUE3(x) (((x) >> S_TIMERVALUE3) & M_TIMERVALUE3)
+
+#define A_SGE_TIMER_VALUE_4_AND_5 0x10c0
+
+#define S_TIMERVALUE4 16
+#define M_TIMERVALUE4 0xffffU
+#define V_TIMERVALUE4(x) ((x) << S_TIMERVALUE4)
+#define G_TIMERVALUE4(x) (((x) >> S_TIMERVALUE4) & M_TIMERVALUE4)
+
+#define S_TIMERVALUE5 0
+#define M_TIMERVALUE5 0xffffU
+#define V_TIMERVALUE5(x) ((x) << S_TIMERVALUE5)
+#define G_TIMERVALUE5(x) (((x) >> S_TIMERVALUE5) & M_TIMERVALUE5)
+
+#define A_SGE_DEBUG_INDEX 0x10cc
+#define A_SGE_DEBUG_DATA_HIGH 0x10d0
+#define A_SGE_DEBUG_DATA_LOW 0x10d4
+#define A_SGE_STAT_CFG 0x10ec
+
+#define S_STATMODE 2
+#define M_STATMODE 0x3U
+#define V_STATMODE(x) ((x) << S_STATMODE)
+#define G_STATMODE(x) (((x) >> S_STATMODE) & M_STATMODE)
+
+#define S_STATSOURCE_T5 9
+#define M_STATSOURCE_T5 0xfU
+#define V_STATSOURCE_T5(x) ((x) << S_STATSOURCE_T5)
+#define G_STATSOURCE_T5(x) (((x) >> S_STATSOURCE_T5) & M_STATSOURCE_T5)
+
+#define A_SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
+
+#define A_SGE_CONTROL2 0x1124
+
+#define S_IDMAARBROUNDROBIN 19
+#define V_IDMAARBROUNDROBIN(x) ((x) << S_IDMAARBROUNDROBIN)
+#define F_IDMAARBROUNDROBIN V_IDMAARBROUNDROBIN(1U)
+
+#define S_INGPACKBOUNDARY 16
+#define M_INGPACKBOUNDARY 0x7U
+#define V_INGPACKBOUNDARY(x) ((x) << S_INGPACKBOUNDARY)
+#define G_INGPACKBOUNDARY(x) (((x) >> S_INGPACKBOUNDARY) & M_INGPACKBOUNDARY)
+
+#define S_BUSY 31
+#define V_BUSY(x) ((x) << S_BUSY)
+#define F_BUSY V_BUSY(1U)
+
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_10 0x12a8
+#define A_SGE_DEBUG_DATA_LOW_INDEX_2 0x12c8
+#define A_SGE_DEBUG_DATA_LOW_INDEX_3 0x12cc
+
+/* registers for module PCIE */
+#define PCIE_BASE_ADDR 0x3000
+
+#define A_PCIE_MEM_ACCESS_BASE_WIN 0x3068
+
+#define S_PCIEOFST 10
+#define M_PCIEOFST 0x3fffffU
+#define V_PCIEOFST(x) ((x) << S_PCIEOFST)
+#define G_PCIEOFST(x) (((x) >> S_PCIEOFST) & M_PCIEOFST)
+
+#define S_BIR 8
+#define M_BIR 0x3U
+#define V_BIR(x) ((x) << S_BIR)
+#define G_BIR(x) (((x) >> S_BIR) & M_BIR)
+
+#define S_WINDOW 0
+#define M_WINDOW 0xffU
+#define V_WINDOW(x) ((x) << S_WINDOW)
+#define G_WINDOW(x) (((x) >> S_WINDOW) & M_WINDOW)
+
+#define A_PCIE_MEM_ACCESS_OFFSET 0x306c
+
+#define S_PFNUM 0
+#define M_PFNUM 0x7U
+#define V_PFNUM(x) ((x) << S_PFNUM)
+#define G_PFNUM(x) (((x) >> S_PFNUM) & M_PFNUM)
+
+#define A_PCIE_FW 0x30b8
+#define A_PCIE_FW_PF 0x30bc
+
+/* registers for module CIM */
+#define CIM_BASE_ADDR 0x7b00
+
+#define A_CIM_PF_MAILBOX_DATA 0x240
+#define A_CIM_PF_MAILBOX_CTRL 0x280
+
+#define S_MBMSGVALID 3
+#define V_MBMSGVALID(x) ((x) << S_MBMSGVALID)
+#define F_MBMSGVALID V_MBMSGVALID(1U)
+
+#define S_MBOWNER 0
+#define M_MBOWNER 0x3U
+#define V_MBOWNER(x) ((x) << S_MBOWNER)
+#define G_MBOWNER(x) (((x) >> S_MBOWNER) & M_MBOWNER)
+
+#define A_CIM_PF_MAILBOX_CTRL_SHADOW_COPY 0x290
+#define A_CIM_BOOT_CFG 0x7b00
+
+#define S_UPCRST 0
+#define V_UPCRST(x) ((x) << S_UPCRST)
+#define F_UPCRST V_UPCRST(1U)
+
+/* registers for module TP */
+#define TP_BASE_ADDR 0x7d00
+
+#define A_TP_TIMER_RESOLUTION 0x7d90
+
+#define S_TIMERRESOLUTION 16
+#define M_TIMERRESOLUTION 0xffU
+#define V_TIMERRESOLUTION(x) ((x) << S_TIMERRESOLUTION)
+#define G_TIMERRESOLUTION(x) (((x) >> S_TIMERRESOLUTION) & M_TIMERRESOLUTION)
+
+#define S_DELAYEDACKRESOLUTION 0
+#define M_DELAYEDACKRESOLUTION 0xffU
+#define V_DELAYEDACKRESOLUTION(x) ((x) << S_DELAYEDACKRESOLUTION)
+#define G_DELAYEDACKRESOLUTION(x) (((x) >> S_DELAYEDACKRESOLUTION) & \
+ M_DELAYEDACKRESOLUTION)
+
+#define A_TP_CCTRL_TABLE 0x7ddc
+
+#define A_TP_MTU_TABLE 0x7de4
+
+#define S_MTUINDEX 24
+#define M_MTUINDEX 0xffU
+#define V_MTUINDEX(x) ((x) << S_MTUINDEX)
+#define G_MTUINDEX(x) (((x) >> S_MTUINDEX) & M_MTUINDEX)
+
+#define S_MTUWIDTH 16
+#define M_MTUWIDTH 0xfU
+#define V_MTUWIDTH(x) ((x) << S_MTUWIDTH)
+#define G_MTUWIDTH(x) (((x) >> S_MTUWIDTH) & M_MTUWIDTH)
+
+#define S_MTUVALUE 0
+#define M_MTUVALUE 0x3fffU
+#define V_MTUVALUE(x) ((x) << S_MTUVALUE)
+#define G_MTUVALUE(x) (((x) >> S_MTUVALUE) & M_MTUVALUE)
+
+#define A_TP_PIO_ADDR 0x7e40
+#define A_TP_PIO_DATA 0x7e44
+
+#define A_TP_VLAN_PRI_MAP 0x140
+
+#define S_FRAGMENTATION 9
+#define V_FRAGMENTATION(x) ((x) << S_FRAGMENTATION)
+#define F_FRAGMENTATION V_FRAGMENTATION(1U)
+
+#define S_MPSHITTYPE 8
+#define V_MPSHITTYPE(x) ((x) << S_MPSHITTYPE)
+#define F_MPSHITTYPE V_MPSHITTYPE(1U)
+
+#define S_MACMATCH 7
+#define V_MACMATCH(x) ((x) << S_MACMATCH)
+#define F_MACMATCH V_MACMATCH(1U)
+
+#define S_ETHERTYPE 6
+#define V_ETHERTYPE(x) ((x) << S_ETHERTYPE)
+#define F_ETHERTYPE V_ETHERTYPE(1U)
+
+#define S_PROTOCOL 5
+#define V_PROTOCOL(x) ((x) << S_PROTOCOL)
+#define F_PROTOCOL V_PROTOCOL(1U)
+
+#define S_TOS 4
+#define V_TOS(x) ((x) << S_TOS)
+#define F_TOS V_TOS(1U)
+
+#define S_VLAN 3
+#define V_VLAN(x) ((x) << S_VLAN)
+#define F_VLAN V_VLAN(1U)
+
+#define S_VNIC_ID 2
+#define V_VNIC_ID(x) ((x) << S_VNIC_ID)
+#define F_VNIC_ID V_VNIC_ID(1U)
+
+#define S_PORT 1
+#define V_PORT(x) ((x) << S_PORT)
+#define F_PORT V_PORT(1U)
+
+#define S_FCOE 0
+#define V_FCOE(x) ((x) << S_FCOE)
+#define F_FCOE V_FCOE(1U)
+
+#define A_TP_INGRESS_CONFIG 0x141
+
+#define S_VNIC 11
+#define V_VNIC(x) ((x) << S_VNIC)
+#define F_VNIC V_VNIC(1U)
+
+#define S_CSUM_HAS_PSEUDO_HDR 10
+#define V_CSUM_HAS_PSEUDO_HDR(x) ((x) << S_CSUM_HAS_PSEUDO_HDR)
+#define F_CSUM_HAS_PSEUDO_HDR V_CSUM_HAS_PSEUDO_HDR(1U)
+
+/* registers for module MPS */
+#define MPS_BASE_ADDR 0x9000
+
+#define S_REPLICATE 11
+#define V_REPLICATE(x) ((x) << S_REPLICATE)
+#define F_REPLICATE V_REPLICATE(1U)
+
+#define S_PF 8
+#define M_PF 0x7U
+#define V_PF(x) ((x) << S_PF)
+#define G_PF(x) (((x) >> S_PF) & M_PF)
+
+#define S_VF_VALID 7
+#define V_VF_VALID(x) ((x) << S_VF_VALID)
+#define F_VF_VALID V_VF_VALID(1U)
+
+#define S_VF 0
+#define M_VF 0x7fU
+#define V_VF(x) ((x) << S_VF)
+#define G_VF(x) (((x) >> S_VF) & M_VF)
+
+#define A_MPS_PORT_STAT_TX_PORT_BYTES_L 0x400
+#define A_MPS_PORT_STAT_TX_PORT_BYTES_H 0x404
+#define A_MPS_PORT_STAT_TX_PORT_FRAMES_L 0x408
+#define A_MPS_PORT_STAT_TX_PORT_FRAMES_H 0x40c
+#define A_MPS_PORT_STAT_TX_PORT_BCAST_L 0x410
+#define A_MPS_PORT_STAT_TX_PORT_BCAST_H 0x414
+#define A_MPS_PORT_STAT_TX_PORT_MCAST_L 0x418
+#define A_MPS_PORT_STAT_TX_PORT_MCAST_H 0x41c
+#define A_MPS_PORT_STAT_TX_PORT_UCAST_L 0x420
+#define A_MPS_PORT_STAT_TX_PORT_UCAST_H 0x424
+#define A_MPS_PORT_STAT_TX_PORT_ERROR_L 0x428
+#define A_MPS_PORT_STAT_TX_PORT_ERROR_H 0x42c
+#define A_MPS_PORT_STAT_TX_PORT_64B_L 0x430
+#define A_MPS_PORT_STAT_TX_PORT_64B_H 0x434
+#define A_MPS_PORT_STAT_TX_PORT_65B_127B_L 0x438
+#define A_MPS_PORT_STAT_TX_PORT_65B_127B_H 0x43c
+#define A_MPS_PORT_STAT_TX_PORT_128B_255B_L 0x440
+#define A_MPS_PORT_STAT_TX_PORT_128B_255B_H 0x444
+#define A_MPS_PORT_STAT_TX_PORT_256B_511B_L 0x448
+#define A_MPS_PORT_STAT_TX_PORT_256B_511B_H 0x44c
+#define A_MPS_PORT_STAT_TX_PORT_512B_1023B_L 0x450
+#define A_MPS_PORT_STAT_TX_PORT_512B_1023B_H 0x454
+#define A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L 0x458
+#define A_MPS_PORT_STAT_TX_PORT_1024B_1518B_H 0x45c
+#define A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L 0x460
+#define A_MPS_PORT_STAT_TX_PORT_1519B_MAX_H 0x464
+#define A_MPS_PORT_STAT_TX_PORT_DROP_L 0x468
+#define A_MPS_PORT_STAT_TX_PORT_DROP_H 0x46c
+#define A_MPS_PORT_STAT_TX_PORT_PAUSE_L 0x470
+#define A_MPS_PORT_STAT_TX_PORT_PAUSE_H 0x474
+#define A_MPS_PORT_STAT_TX_PORT_PPP0_L 0x478
+#define A_MPS_PORT_STAT_TX_PORT_PPP0_H 0x47c
+#define A_MPS_PORT_STAT_TX_PORT_PPP1_L 0x480
+#define A_MPS_PORT_STAT_TX_PORT_PPP1_H 0x484
+#define A_MPS_PORT_STAT_TX_PORT_PPP2_L 0x488
+#define A_MPS_PORT_STAT_TX_PORT_PPP2_H 0x48c
+#define A_MPS_PORT_STAT_TX_PORT_PPP3_L 0x490
+#define A_MPS_PORT_STAT_TX_PORT_PPP3_H 0x494
+#define A_MPS_PORT_STAT_TX_PORT_PPP4_L 0x498
+#define A_MPS_PORT_STAT_TX_PORT_PPP4_H 0x49c
+#define A_MPS_PORT_STAT_TX_PORT_PPP5_L 0x4a0
+#define A_MPS_PORT_STAT_TX_PORT_PPP5_H 0x4a4
+#define A_MPS_PORT_STAT_TX_PORT_PPP6_L 0x4a8
+#define A_MPS_PORT_STAT_TX_PORT_PPP6_H 0x4ac
+#define A_MPS_PORT_STAT_TX_PORT_PPP7_L 0x4b0
+#define A_MPS_PORT_STAT_TX_PORT_PPP7_H 0x4b4
+#define A_MPS_PORT_STAT_LB_PORT_BYTES_L 0x4c0
+#define A_MPS_PORT_STAT_LB_PORT_BYTES_H 0x4c4
+#define A_MPS_PORT_STAT_LB_PORT_FRAMES_L 0x4c8
+#define A_MPS_PORT_STAT_LB_PORT_FRAMES_H 0x4cc
+#define A_MPS_PORT_STAT_LB_PORT_BCAST_L 0x4d0
+#define A_MPS_PORT_STAT_LB_PORT_BCAST_H 0x4d4
+#define A_MPS_PORT_STAT_LB_PORT_MCAST_L 0x4d8
+#define A_MPS_PORT_STAT_LB_PORT_MCAST_H 0x4dc
+#define A_MPS_PORT_STAT_LB_PORT_UCAST_L 0x4e0
+#define A_MPS_PORT_STAT_LB_PORT_UCAST_H 0x4e4
+#define A_MPS_PORT_STAT_LB_PORT_ERROR_L 0x4e8
+#define A_MPS_PORT_STAT_LB_PORT_ERROR_H 0x4ec
+#define A_MPS_PORT_STAT_LB_PORT_64B_L 0x4f0
+#define A_MPS_PORT_STAT_LB_PORT_64B_H 0x4f4
+#define A_MPS_PORT_STAT_LB_PORT_65B_127B_L 0x4f8
+#define A_MPS_PORT_STAT_LB_PORT_65B_127B_H 0x4fc
+#define A_MPS_PORT_STAT_LB_PORT_128B_255B_L 0x500
+#define A_MPS_PORT_STAT_LB_PORT_128B_255B_H 0x504
+#define A_MPS_PORT_STAT_LB_PORT_256B_511B_L 0x508
+#define A_MPS_PORT_STAT_LB_PORT_256B_511B_H 0x50c
+#define A_MPS_PORT_STAT_LB_PORT_512B_1023B_L 0x510
+#define A_MPS_PORT_STAT_LB_PORT_512B_1023B_H 0x514
+#define A_MPS_PORT_STAT_LB_PORT_1024B_1518B_L 0x518
+#define A_MPS_PORT_STAT_LB_PORT_1024B_1518B_H 0x51c
+#define A_MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520
+#define A_MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524
+#define A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528
+#define A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES_L 0x528
+#define A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES_H 0x52c
+#define A_MPS_PORT_STAT_RX_PORT_BYTES_L 0x540
+#define A_MPS_PORT_STAT_RX_PORT_BYTES_H 0x544
+#define A_MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548
+#define A_MPS_PORT_STAT_RX_PORT_FRAMES_H 0x54c
+#define A_MPS_PORT_STAT_RX_PORT_BCAST_L 0x550
+#define A_MPS_PORT_STAT_RX_PORT_BCAST_H 0x554
+#define A_MPS_PORT_STAT_RX_PORT_MCAST_L 0x558
+#define A_MPS_PORT_STAT_RX_PORT_MCAST_H 0x55c
+#define A_MPS_PORT_STAT_RX_PORT_UCAST_L 0x560
+#define A_MPS_PORT_STAT_RX_PORT_UCAST_H 0x564
+#define A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L 0x568
+#define A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_H 0x56c
+#define A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L 0x570
+#define A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_H 0x574
+#define A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L 0x578
+#define A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_H 0x57c
+#define A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L 0x580
+#define A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_H 0x584
+#define A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L 0x588
+#define A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_H 0x58c
+#define A_MPS_PORT_STAT_RX_PORT_64B_L 0x590
+#define A_MPS_PORT_STAT_RX_PORT_64B_H 0x594
+#define A_MPS_PORT_STAT_RX_PORT_65B_127B_L 0x598
+#define A_MPS_PORT_STAT_RX_PORT_65B_127B_H 0x59c
+#define A_MPS_PORT_STAT_RX_PORT_128B_255B_L 0x5a0
+#define A_MPS_PORT_STAT_RX_PORT_128B_255B_H 0x5a4
+#define A_MPS_PORT_STAT_RX_PORT_256B_511B_L 0x5a8
+#define A_MPS_PORT_STAT_RX_PORT_256B_511B_H 0x5ac
+#define A_MPS_PORT_STAT_RX_PORT_512B_1023B_L 0x5b0
+#define A_MPS_PORT_STAT_RX_PORT_512B_1023B_H 0x5b4
+#define A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L 0x5b8
+#define A_MPS_PORT_STAT_RX_PORT_1024B_1518B_H 0x5bc
+#define A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L 0x5c0
+#define A_MPS_PORT_STAT_RX_PORT_1519B_MAX_H 0x5c4
+#define A_MPS_PORT_STAT_RX_PORT_PAUSE_L 0x5c8
+#define A_MPS_PORT_STAT_RX_PORT_PAUSE_H 0x5cc
+#define A_MPS_PORT_STAT_RX_PORT_PPP0_L 0x5d0
+#define A_MPS_PORT_STAT_RX_PORT_PPP0_H 0x5d4
+#define A_MPS_PORT_STAT_RX_PORT_PPP1_L 0x5d8
+#define A_MPS_PORT_STAT_RX_PORT_PPP1_H 0x5dc
+#define A_MPS_PORT_STAT_RX_PORT_PPP2_L 0x5e0
+#define A_MPS_PORT_STAT_RX_PORT_PPP2_H 0x5e4
+#define A_MPS_PORT_STAT_RX_PORT_PPP3_L 0x5e8
+#define A_MPS_PORT_STAT_RX_PORT_PPP3_H 0x5ec
+#define A_MPS_PORT_STAT_RX_PORT_PPP4_L 0x5f0
+#define A_MPS_PORT_STAT_RX_PORT_PPP4_H 0x5f4
+#define A_MPS_PORT_STAT_RX_PORT_PPP5_L 0x5f8
+#define A_MPS_PORT_STAT_RX_PORT_PPP5_H 0x5fc
+#define A_MPS_PORT_STAT_RX_PORT_PPP6_L 0x600
+#define A_MPS_PORT_STAT_RX_PORT_PPP6_H 0x604
+#define A_MPS_PORT_STAT_RX_PORT_PPP7_L 0x608
+#define A_MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c
+#define A_MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610
+#define A_MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614
+#define A_MPS_CMN_CTL 0x9000
+
+#define S_NUMPORTS 0
+#define M_NUMPORTS 0x3U
+#define V_NUMPORTS(x) ((x) << S_NUMPORTS)
+#define G_NUMPORTS(x) (((x) >> S_NUMPORTS) & M_NUMPORTS)
+
+#define A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640
+#define A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644
+#define A_MPS_STAT_RX_BG_1_MAC_DROP_FRAME_L 0x9648
+#define A_MPS_STAT_RX_BG_1_MAC_DROP_FRAME_H 0x964c
+#define A_MPS_STAT_RX_BG_2_MAC_DROP_FRAME_L 0x9650
+#define A_MPS_STAT_RX_BG_2_MAC_DROP_FRAME_H 0x9654
+#define A_MPS_STAT_RX_BG_3_MAC_DROP_FRAME_L 0x9658
+#define A_MPS_STAT_RX_BG_3_MAC_DROP_FRAME_H 0x965c
+#define A_MPS_STAT_RX_BG_0_LB_DROP_FRAME_L 0x9660
+#define A_MPS_STAT_RX_BG_0_LB_DROP_FRAME_H 0x9664
+#define A_MPS_STAT_RX_BG_1_LB_DROP_FRAME_L 0x9668
+#define A_MPS_STAT_RX_BG_1_LB_DROP_FRAME_H 0x966c
+#define A_MPS_STAT_RX_BG_2_LB_DROP_FRAME_L 0x9670
+#define A_MPS_STAT_RX_BG_2_LB_DROP_FRAME_H 0x9674
+#define A_MPS_STAT_RX_BG_3_LB_DROP_FRAME_L 0x9678
+#define A_MPS_STAT_RX_BG_3_LB_DROP_FRAME_H 0x967c
+#define A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L 0x9680
+#define A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_H 0x9684
+#define A_MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_L 0x9688
+#define A_MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_H 0x968c
+#define A_MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_L 0x9690
+#define A_MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_H 0x9694
+#define A_MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_L 0x9698
+#define A_MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_H 0x969c
+#define A_MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L 0x96a0
+#define A_MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_H 0x96a4
+#define A_MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_L 0x96a8
+#define A_MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_H 0x96ac
+#define A_MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_L 0x96b0
+#define A_MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4
+#define A_MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8
+#define A_MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc
+
+/* registers for module ULP_RX */
+#define ULP_RX_BASE_ADDR 0x19150
+
+#define S_HPZ0 0
+#define M_HPZ0 0xfU
+#define V_HPZ0(x) ((x) << S_HPZ0)
+#define G_HPZ0(x) (((x) >> S_HPZ0) & M_HPZ0)
+
+#define A_ULP_RX_TDDP_PSZ 0x19178
+
+/* registers for module SF */
+#define SF_BASE_ADDR 0x193f8
+
+#define A_SF_DATA 0x193f8
+#define A_SF_OP 0x193fc
+
+#define S_SF_LOCK 4
+#define V_SF_LOCK(x) ((x) << S_SF_LOCK)
+#define F_SF_LOCK V_SF_LOCK(1U)
+
+#define S_CONT 3
+#define V_CONT(x) ((x) << S_CONT)
+#define F_CONT V_CONT(1U)
+
+#define S_BYTECNT 1
+#define M_BYTECNT 0x3U
+#define V_BYTECNT(x) ((x) << S_BYTECNT)
+#define G_BYTECNT(x) (((x) >> S_BYTECNT) & M_BYTECNT)
+
+#define S_OP 0
+#define V_OP(x) ((x) << S_OP)
+#define F_OP V_OP(1U)
+
+/* registers for module PL */
+#define PL_BASE_ADDR 0x19400
+
+#define S_SOURCEPF 8
+#define M_SOURCEPF 0x7U
+#define V_SOURCEPF(x) ((x) << S_SOURCEPF)
+#define G_SOURCEPF(x) (((x) >> S_SOURCEPF) & M_SOURCEPF)
+
+#define A_PL_PF_INT_ENABLE 0x3c4
+
+#define S_PFSW 3
+#define V_PFSW(x) ((x) << S_PFSW)
+#define F_PFSW V_PFSW(1U)
+
+#define S_PFCIM 1
+#define V_PFCIM(x) ((x) << S_PFCIM)
+#define F_PFCIM V_PFCIM(1U)
+
+#define A_PL_WHOAMI 0x19400
+
+#define A_PL_RST 0x19428
+
+#define A_PL_INT_MAP0 0x19414
+
+#define S_PIORST 1
+#define V_PIORST(x) ((x) << S_PIORST)
+#define F_PIORST V_PIORST(1U)
+
+#define S_PIORSTMODE 0
+#define V_PIORSTMODE(x) ((x) << S_PIORSTMODE)
+#define F_PIORSTMODE V_PIORSTMODE(1U)
+
+#define A_PL_REV 0x1943c
+
+#define S_REV 0
+#define M_REV 0xfU
+#define V_REV(x) ((x) << S_REV)
+#define G_REV(x) (((x) >> S_REV) & M_REV)
diff --git a/src/dpdk22/drivers/net/cxgbe/base/t4_regs_values.h b/src/dpdk22/drivers/net/cxgbe/base/t4_regs_values.h
new file mode 100644
index 00000000..d7d3144c
--- /dev/null
+++ b/src/dpdk22/drivers/net/cxgbe/base/t4_regs_values.h
@@ -0,0 +1,169 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014-2015 Chelsio Communications.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Chelsio Communications nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __T4_REGS_VALUES_H__
+#define __T4_REGS_VALUES_H__
+
+/*
+ * This file contains definitions for various T4 register value hardware
+ * constants. The types of values encoded here are predominantly those for
+ * register fields which control "modal" behavior. For the most part, we do
+ * not include definitions for register fields which are simple numeric
+ * metrics, etc.
+ */
+
+/*
+ * SGE definitions.
+ * ================
+ */
+
+/*
+ * SGE register field values.
+ */
+
+/* CONTROL register */
+#define X_RXPKTCPLMODE_SPLIT 1
+#define X_INGPCIEBOUNDARY_32B 0
+#define X_INGPADBOUNDARY_SHIFT 5
+
+/* CONTROL2 register */
+#define X_INGPACKBOUNDARY_SHIFT 5
+#define X_INGPACKBOUNDARY_16B 0
+
+/* GTS register */
+#define X_TIMERREG_RESTART_COUNTER 6
+#define X_TIMERREG_UPDATE_CIDX 7
+
+/*
+ * Egress Context field values
+ */
+#define X_FETCHBURSTMIN_64B 2
+#define X_FETCHBURSTMIN_128B 3
+#define X_FETCHBURSTMAX_256B 2
+#define X_FETCHBURSTMAX_512B 3
+
+#define X_HOSTFCMODE_NONE 0
+
+/*
+ * Ingress Context field values
+ */
+#define X_UPDATEDELIVERY_INTERRUPT 1
+
+#define X_RSPD_TYPE_FLBUF 0
+#define X_RSPD_TYPE_CPL 1
+
+/*
+ * Context field definitions. This is by no means a complete list of SGE
+ * Context fields. In the vast majority of cases the firmware initializes
+ * things the way they need to be set up. But in a few small cases, we need
+ * to compute new values and ship them off to the firmware to be applied to
+ * the SGE Conexts ...
+ */
+
+/*
+ * Congestion Manager Definitions.
+ */
+#define S_CONMCTXT_CNGTPMODE 19
+#define M_CONMCTXT_CNGTPMODE 0x3
+#define V_CONMCTXT_CNGTPMODE(x) ((x) << S_CONMCTXT_CNGTPMODE)
+#define G_CONMCTXT_CNGTPMODE(x) \
+ (((x) >> S_CONMCTXT_CNGTPMODE) & M_CONMCTXT_CNGTPMODE)
+#define S_CONMCTXT_CNGCHMAP 0
+#define M_CONMCTXT_CNGCHMAP 0xffff
+#define V_CONMCTXT_CNGCHMAP(x) ((x) << S_CONMCTXT_CNGCHMAP)
+#define G_CONMCTXT_CNGCHMAP(x) \
+ (((x) >> S_CONMCTXT_CNGCHMAP) & M_CONMCTXT_CNGCHMAP)
+
+#define X_CONMCTXT_CNGTPMODE_QUEUE 1
+#define X_CONMCTXT_CNGTPMODE_CHANNEL 2
+
+/*
+ * T5 and later support a new BAR2-based doorbell mechanism for Egress Queues.
+ * The User Doorbells are each 128 bytes in length with a Simple Doorbell at
+ * offsets 8x and a Write Combining single 64-byte Egress Queue Unit
+ * (X_IDXSIZE_UNIT) Gather Buffer interface at offset 64. For Ingress Queues,
+ * we have a Going To Sleep register at offsets 8x+4.
+ *
+ * As noted above, we have many instances of the Simple Doorbell and Going To
+ * Sleep registers at offsets 8x and 8x+4, respectively. We want to use a
+ * non-64-byte aligned offset for the Simple Doorbell in order to attempt to
+ * avoid buffering of the writes to the Simple Doorbell and we want to use a
+ * non-contiguous offset for the Going To Sleep writes in order to avoid
+ * possible combining between them.
+ */
+#define SGE_UDB_SIZE 128
+#define SGE_UDB_KDOORBELL 8
+#define SGE_UDB_GTS 20
+
+/*
+ * CIM definitions.
+ * ================
+ */
+
+/*
+ * CIM register field values.
+ */
+#define X_MBOWNER_NONE 0
+#define X_MBOWNER_FW 1
+#define X_MBOWNER_PL 2
+
+/*
+ * PCI-E definitions.
+ * ==================
+ */
+#define X_WINDOW_SHIFT 10
+#define X_PCIEOFST_SHIFT 10
+
+/*
+ * TP definitions.
+ * ===============
+ */
+
+/*
+ * TP_VLAN_PRI_MAP controls which subset of fields will be present in the
+ * Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP
+ * selects for a particular field being present. These fields, when present
+ * in the Compressed Filter Tuple, have the following widths in bits.
+ */
+#define W_FT_FCOE 1
+#define W_FT_PORT 3
+#define W_FT_VNIC_ID 17
+#define W_FT_VLAN 17
+#define W_FT_TOS 8
+#define W_FT_PROTOCOL 8
+#define W_FT_ETHERTYPE 16
+#define W_FT_MACMATCH 9
+#define W_FT_MPSHITTYPE 3
+#define W_FT_FRAGMENTATION 1
+
+#endif /* __T4_REGS_VALUES_H__ */
diff --git a/src/dpdk22/drivers/net/cxgbe/base/t4fw_interface.h b/src/dpdk22/drivers/net/cxgbe/base/t4fw_interface.h
new file mode 100644
index 00000000..74f19fe7
--- /dev/null
+++ b/src/dpdk22/drivers/net/cxgbe/base/t4fw_interface.h
@@ -0,0 +1,1730 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014-2015 Chelsio Communications.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Chelsio Communications nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _T4FW_INTERFACE_H_
+#define _T4FW_INTERFACE_H_
+
+/******************************************************************************
+ * R E T U R N V A L U E S
+ ********************************/
+
+enum fw_retval {
+ FW_SUCCESS = 0, /* completed successfully */
+ FW_EPERM = 1, /* operation not permitted */
+ FW_ENOENT = 2, /* no such file or directory */
+ FW_EIO = 5, /* input/output error; hw bad */
+ FW_ENOEXEC = 8, /* exec format error; inv microcode */
+ FW_EAGAIN = 11, /* try again */
+ FW_ENOMEM = 12, /* out of memory */
+ FW_EFAULT = 14, /* bad address; fw bad */
+ FW_EBUSY = 16, /* resource busy */
+ FW_EEXIST = 17, /* file exists */
+ FW_ENODEV = 19, /* no such device */
+ FW_EINVAL = 22, /* invalid argument */
+ FW_ENOSPC = 28, /* no space left on device */
+ FW_ENOSYS = 38, /* functionality not implemented */
+ FW_ENODATA = 61, /* no data available */
+ FW_EPROTO = 71, /* protocol error */
+ FW_EADDRINUSE = 98, /* address already in use */
+ FW_EADDRNOTAVAIL = 99, /* cannot assigned requested address */
+ FW_ENETDOWN = 100, /* network is down */
+ FW_ENETUNREACH = 101, /* network is unreachable */
+ FW_ENOBUFS = 105, /* no buffer space available */
+ FW_ETIMEDOUT = 110, /* timeout */
+ FW_EINPROGRESS = 115, /* fw internal */
+};
+
+/******************************************************************************
+ * M E M O R Y T Y P E s
+ ******************************/
+
+enum fw_memtype {
+ FW_MEMTYPE_EDC0 = 0x0,
+ FW_MEMTYPE_EDC1 = 0x1,
+ FW_MEMTYPE_EXTMEM = 0x2,
+ FW_MEMTYPE_FLASH = 0x4,
+ FW_MEMTYPE_INTERNAL = 0x5,
+ FW_MEMTYPE_EXTMEM1 = 0x6,
+};
+
+/******************************************************************************
+ * W O R K R E Q U E S T s
+ ********************************/
+
+enum fw_wr_opcodes {
+ FW_ETH_TX_PKT_WR = 0x08,
+ FW_ETH_TX_PKTS_WR = 0x09,
+};
+
+/*
+ * Generic work request header flit0
+ */
+struct fw_wr_hdr {
+ __be32 hi;
+ __be32 lo;
+};
+
+/* work request opcode (hi)
+ */
+#define S_FW_WR_OP 24
+#define M_FW_WR_OP 0xff
+#define V_FW_WR_OP(x) ((x) << S_FW_WR_OP)
+#define G_FW_WR_OP(x) (((x) >> S_FW_WR_OP) & M_FW_WR_OP)
+
+/* work request immediate data length (hi)
+ */
+#define S_FW_WR_IMMDLEN 0
+#define M_FW_WR_IMMDLEN 0xff
+#define V_FW_WR_IMMDLEN(x) ((x) << S_FW_WR_IMMDLEN)
+#define G_FW_WR_IMMDLEN(x) \
+ (((x) >> S_FW_WR_IMMDLEN) & M_FW_WR_IMMDLEN)
+
+/* egress queue status update to egress queue status entry (lo)
+ */
+#define S_FW_WR_EQUEQ 30
+#define M_FW_WR_EQUEQ 0x1
+#define V_FW_WR_EQUEQ(x) ((x) << S_FW_WR_EQUEQ)
+#define G_FW_WR_EQUEQ(x) (((x) >> S_FW_WR_EQUEQ) & M_FW_WR_EQUEQ)
+#define F_FW_WR_EQUEQ V_FW_WR_EQUEQ(1U)
+
+/* length in units of 16-bytes (lo)
+ */
+#define S_FW_WR_LEN16 0
+#define M_FW_WR_LEN16 0xff
+#define V_FW_WR_LEN16(x) ((x) << S_FW_WR_LEN16)
+#define G_FW_WR_LEN16(x) (((x) >> S_FW_WR_LEN16) & M_FW_WR_LEN16)
+
+struct fw_eth_tx_pkt_wr {
+ __be32 op_immdlen;
+ __be32 equiq_to_len16;
+ __be64 r3;
+};
+
+#define S_FW_ETH_TX_PKT_WR_IMMDLEN 0
+#define M_FW_ETH_TX_PKT_WR_IMMDLEN 0x1ff
+#define V_FW_ETH_TX_PKT_WR_IMMDLEN(x) ((x) << S_FW_ETH_TX_PKT_WR_IMMDLEN)
+#define G_FW_ETH_TX_PKT_WR_IMMDLEN(x) \
+ (((x) >> S_FW_ETH_TX_PKT_WR_IMMDLEN) & M_FW_ETH_TX_PKT_WR_IMMDLEN)
+
+struct fw_eth_tx_pkts_wr {
+ __be32 op_pkd;
+ __be32 equiq_to_len16;
+ __be32 r3;
+ __be16 plen;
+ __u8 npkt;
+ __u8 type;
+};
+
+/******************************************************************************
+ * C O M M A N D s
+ *********************/
+
+/*
+ * The maximum length of time, in miliseconds, that we expect any firmware
+ * command to take to execute and return a reply to the host. The RESET
+ * and INITIALIZE commands can take a fair amount of time to execute but
+ * most execute in far less time than this maximum. This constant is used
+ * by host software to determine how long to wait for a firmware command
+ * reply before declaring the firmware as dead/unreachable ...
+ */
+#define FW_CMD_MAX_TIMEOUT 10000
+
+/*
+ * If a host driver does a HELLO and discovers that there's already a MASTER
+ * selected, we may have to wait for that MASTER to finish issuing RESET,
+ * configuration and INITIALIZE commands. Also, there's a possibility that
+ * our own HELLO may get lost if it happens right as the MASTER is issuign a
+ * RESET command, so we need to be willing to make a few retries of our HELLO.
+ */
+#define FW_CMD_HELLO_TIMEOUT (3 * FW_CMD_MAX_TIMEOUT)
+#define FW_CMD_HELLO_RETRIES 3
+
+enum fw_cmd_opcodes {
+ FW_RESET_CMD = 0x03,
+ FW_HELLO_CMD = 0x04,
+ FW_BYE_CMD = 0x05,
+ FW_INITIALIZE_CMD = 0x06,
+ FW_CAPS_CONFIG_CMD = 0x07,
+ FW_PARAMS_CMD = 0x08,
+ FW_IQ_CMD = 0x10,
+ FW_EQ_ETH_CMD = 0x12,
+ FW_VI_CMD = 0x14,
+ FW_VI_MAC_CMD = 0x15,
+ FW_VI_RXMODE_CMD = 0x16,
+ FW_VI_ENABLE_CMD = 0x17,
+ FW_PORT_CMD = 0x1b,
+ FW_RSS_IND_TBL_CMD = 0x20,
+ FW_RSS_VI_CONFIG_CMD = 0x23,
+ FW_DEBUG_CMD = 0x81,
+};
+
+/*
+ * Generic command header flit0
+ */
+struct fw_cmd_hdr {
+ __be32 hi;
+ __be32 lo;
+};
+
+#define S_FW_CMD_OP 24
+#define M_FW_CMD_OP 0xff
+#define V_FW_CMD_OP(x) ((x) << S_FW_CMD_OP)
+#define G_FW_CMD_OP(x) (((x) >> S_FW_CMD_OP) & M_FW_CMD_OP)
+
+#define S_FW_CMD_REQUEST 23
+#define M_FW_CMD_REQUEST 0x1
+#define V_FW_CMD_REQUEST(x) ((x) << S_FW_CMD_REQUEST)
+#define G_FW_CMD_REQUEST(x) (((x) >> S_FW_CMD_REQUEST) & M_FW_CMD_REQUEST)
+#define F_FW_CMD_REQUEST V_FW_CMD_REQUEST(1U)
+
+#define S_FW_CMD_READ 22
+#define M_FW_CMD_READ 0x1
+#define V_FW_CMD_READ(x) ((x) << S_FW_CMD_READ)
+#define G_FW_CMD_READ(x) (((x) >> S_FW_CMD_READ) & M_FW_CMD_READ)
+#define F_FW_CMD_READ V_FW_CMD_READ(1U)
+
+#define S_FW_CMD_WRITE 21
+#define M_FW_CMD_WRITE 0x1
+#define V_FW_CMD_WRITE(x) ((x) << S_FW_CMD_WRITE)
+#define G_FW_CMD_WRITE(x) (((x) >> S_FW_CMD_WRITE) & M_FW_CMD_WRITE)
+#define F_FW_CMD_WRITE V_FW_CMD_WRITE(1U)
+
+#define S_FW_CMD_EXEC 20
+#define M_FW_CMD_EXEC 0x1
+#define V_FW_CMD_EXEC(x) ((x) << S_FW_CMD_EXEC)
+#define G_FW_CMD_EXEC(x) (((x) >> S_FW_CMD_EXEC) & M_FW_CMD_EXEC)
+#define F_FW_CMD_EXEC V_FW_CMD_EXEC(1U)
+
+#define S_FW_CMD_RETVAL 8
+#define M_FW_CMD_RETVAL 0xff
+#define V_FW_CMD_RETVAL(x) ((x) << S_FW_CMD_RETVAL)
+#define G_FW_CMD_RETVAL(x) (((x) >> S_FW_CMD_RETVAL) & M_FW_CMD_RETVAL)
+
+#define S_FW_CMD_LEN16 0
+#define M_FW_CMD_LEN16 0xff
+#define V_FW_CMD_LEN16(x) ((x) << S_FW_CMD_LEN16)
+#define G_FW_CMD_LEN16(x) (((x) >> S_FW_CMD_LEN16) & M_FW_CMD_LEN16)
+
+#define FW_LEN16(fw_struct) V_FW_CMD_LEN16(sizeof(fw_struct) / 16)
+
+struct fw_reset_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ __be32 val;
+ __be32 halt_pkd;
+};
+
+#define S_FW_RESET_CMD_HALT 31
+#define M_FW_RESET_CMD_HALT 0x1
+#define V_FW_RESET_CMD_HALT(x) ((x) << S_FW_RESET_CMD_HALT)
+#define G_FW_RESET_CMD_HALT(x) \
+ (((x) >> S_FW_RESET_CMD_HALT) & M_FW_RESET_CMD_HALT)
+#define F_FW_RESET_CMD_HALT V_FW_RESET_CMD_HALT(1U)
+
+enum {
+ FW_HELLO_CMD_STAGE_OS = 0,
+};
+
+struct fw_hello_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ __be32 err_to_clearinit;
+ __be32 fwrev;
+};
+
+#define S_FW_HELLO_CMD_ERR 31
+#define M_FW_HELLO_CMD_ERR 0x1
+#define V_FW_HELLO_CMD_ERR(x) ((x) << S_FW_HELLO_CMD_ERR)
+#define G_FW_HELLO_CMD_ERR(x) \
+ (((x) >> S_FW_HELLO_CMD_ERR) & M_FW_HELLO_CMD_ERR)
+#define F_FW_HELLO_CMD_ERR V_FW_HELLO_CMD_ERR(1U)
+
+#define S_FW_HELLO_CMD_INIT 30
+#define M_FW_HELLO_CMD_INIT 0x1
+#define V_FW_HELLO_CMD_INIT(x) ((x) << S_FW_HELLO_CMD_INIT)
+#define G_FW_HELLO_CMD_INIT(x) \
+ (((x) >> S_FW_HELLO_CMD_INIT) & M_FW_HELLO_CMD_INIT)
+#define F_FW_HELLO_CMD_INIT V_FW_HELLO_CMD_INIT(1U)
+
+#define S_FW_HELLO_CMD_MASTERDIS 29
+#define M_FW_HELLO_CMD_MASTERDIS 0x1
+#define V_FW_HELLO_CMD_MASTERDIS(x) ((x) << S_FW_HELLO_CMD_MASTERDIS)
+#define G_FW_HELLO_CMD_MASTERDIS(x) \
+ (((x) >> S_FW_HELLO_CMD_MASTERDIS) & M_FW_HELLO_CMD_MASTERDIS)
+#define F_FW_HELLO_CMD_MASTERDIS V_FW_HELLO_CMD_MASTERDIS(1U)
+
+#define S_FW_HELLO_CMD_MASTERFORCE 28
+#define M_FW_HELLO_CMD_MASTERFORCE 0x1
+#define V_FW_HELLO_CMD_MASTERFORCE(x) ((x) << S_FW_HELLO_CMD_MASTERFORCE)
+#define G_FW_HELLO_CMD_MASTERFORCE(x) \
+ (((x) >> S_FW_HELLO_CMD_MASTERFORCE) & M_FW_HELLO_CMD_MASTERFORCE)
+#define F_FW_HELLO_CMD_MASTERFORCE V_FW_HELLO_CMD_MASTERFORCE(1U)
+
+#define S_FW_HELLO_CMD_MBMASTER 24
+#define M_FW_HELLO_CMD_MBMASTER 0xf
+#define V_FW_HELLO_CMD_MBMASTER(x) ((x) << S_FW_HELLO_CMD_MBMASTER)
+#define G_FW_HELLO_CMD_MBMASTER(x) \
+ (((x) >> S_FW_HELLO_CMD_MBMASTER) & M_FW_HELLO_CMD_MBMASTER)
+
+#define S_FW_HELLO_CMD_MBASYNCNOT 20
+#define M_FW_HELLO_CMD_MBASYNCNOT 0x7
+#define V_FW_HELLO_CMD_MBASYNCNOT(x) ((x) << S_FW_HELLO_CMD_MBASYNCNOT)
+#define G_FW_HELLO_CMD_MBASYNCNOT(x) \
+ (((x) >> S_FW_HELLO_CMD_MBASYNCNOT) & M_FW_HELLO_CMD_MBASYNCNOT)
+
+#define S_FW_HELLO_CMD_STAGE 17
+#define M_FW_HELLO_CMD_STAGE 0x7
+#define V_FW_HELLO_CMD_STAGE(x) ((x) << S_FW_HELLO_CMD_STAGE)
+#define G_FW_HELLO_CMD_STAGE(x) \
+ (((x) >> S_FW_HELLO_CMD_STAGE) & M_FW_HELLO_CMD_STAGE)
+
+#define S_FW_HELLO_CMD_CLEARINIT 16
+#define M_FW_HELLO_CMD_CLEARINIT 0x1
+#define V_FW_HELLO_CMD_CLEARINIT(x) ((x) << S_FW_HELLO_CMD_CLEARINIT)
+#define G_FW_HELLO_CMD_CLEARINIT(x) \
+ (((x) >> S_FW_HELLO_CMD_CLEARINIT) & M_FW_HELLO_CMD_CLEARINIT)
+#define F_FW_HELLO_CMD_CLEARINIT V_FW_HELLO_CMD_CLEARINIT(1U)
+
+struct fw_bye_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ __be64 r3;
+};
+
+struct fw_initialize_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ __be64 r3;
+};
+
+enum fw_caps_config_nic {
+ FW_CAPS_CONFIG_NIC_HASHFILTER = 0x00000020,
+ FW_CAPS_CONFIG_NIC_ETHOFLD = 0x00000040,
+};
+
+enum fw_memtype_cf {
+ FW_MEMTYPE_CF_FLASH = FW_MEMTYPE_FLASH,
+};
+
+struct fw_caps_config_cmd {
+ __be32 op_to_write;
+ __be32 cfvalid_to_len16;
+ __be32 r2;
+ __be32 hwmbitmap;
+ __be16 nbmcaps;
+ __be16 linkcaps;
+ __be16 switchcaps;
+ __be16 r3;
+ __be16 niccaps;
+ __be16 toecaps;
+ __be16 rdmacaps;
+ __be16 r4;
+ __be16 iscsicaps;
+ __be16 fcoecaps;
+ __be32 cfcsum;
+ __be32 finiver;
+ __be32 finicsum;
+};
+
+#define S_FW_CAPS_CONFIG_CMD_CFVALID 27
+#define M_FW_CAPS_CONFIG_CMD_CFVALID 0x1
+#define V_FW_CAPS_CONFIG_CMD_CFVALID(x) ((x) << S_FW_CAPS_CONFIG_CMD_CFVALID)
+#define G_FW_CAPS_CONFIG_CMD_CFVALID(x) \
+ (((x) >> S_FW_CAPS_CONFIG_CMD_CFVALID) & M_FW_CAPS_CONFIG_CMD_CFVALID)
+#define F_FW_CAPS_CONFIG_CMD_CFVALID V_FW_CAPS_CONFIG_CMD_CFVALID(1U)
+
+#define S_FW_CAPS_CONFIG_CMD_MEMTYPE_CF 24
+#define M_FW_CAPS_CONFIG_CMD_MEMTYPE_CF 0x7
+#define V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x) \
+ ((x) << S_FW_CAPS_CONFIG_CMD_MEMTYPE_CF)
+#define G_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x) \
+ (((x) >> S_FW_CAPS_CONFIG_CMD_MEMTYPE_CF) & \
+ M_FW_CAPS_CONFIG_CMD_MEMTYPE_CF)
+
+#define S_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF 16
+#define M_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF 0xff
+#define V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x) \
+ ((x) << S_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF)
+#define G_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x) \
+ (((x) >> S_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF) & \
+ M_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF)
+
+/*
+ * params command mnemonics
+ */
+enum fw_params_mnem {
+ FW_PARAMS_MNEM_DEV = 1, /* device params */
+ FW_PARAMS_MNEM_PFVF = 2, /* function params */
+ FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */
+};
+
+/*
+ * device parameters
+ */
+enum fw_params_param_dev {
+ FW_PARAMS_PARAM_DEV_CCLK = 0x00, /* chip core clock in khz */
+ FW_PARAMS_PARAM_DEV_PORTVEC = 0x01, /* the port vector */
+ FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17,
+};
+
+/*
+ * physical and virtual function parameters
+ */
+enum fw_params_param_pfvf {
+ FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31
+};
+
+/*
+ * dma queue parameters
+ */
+enum fw_params_param_dmaq {
+ FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH = 0x01,
+ FW_PARAMS_PARAM_DMAQ_CONM_CTXT = 0x20,
+};
+
+#define S_FW_PARAMS_MNEM 24
+#define M_FW_PARAMS_MNEM 0xff
+#define V_FW_PARAMS_MNEM(x) ((x) << S_FW_PARAMS_MNEM)
+#define G_FW_PARAMS_MNEM(x) \
+ (((x) >> S_FW_PARAMS_MNEM) & M_FW_PARAMS_MNEM)
+
+#define S_FW_PARAMS_PARAM_X 16
+#define M_FW_PARAMS_PARAM_X 0xff
+#define V_FW_PARAMS_PARAM_X(x) ((x) << S_FW_PARAMS_PARAM_X)
+#define G_FW_PARAMS_PARAM_X(x) \
+ (((x) >> S_FW_PARAMS_PARAM_X) & M_FW_PARAMS_PARAM_X)
+
+#define S_FW_PARAMS_PARAM_Y 8
+#define M_FW_PARAMS_PARAM_Y 0xff
+#define V_FW_PARAMS_PARAM_Y(x) ((x) << S_FW_PARAMS_PARAM_Y)
+#define G_FW_PARAMS_PARAM_Y(x) \
+ (((x) >> S_FW_PARAMS_PARAM_Y) & M_FW_PARAMS_PARAM_Y)
+
+#define S_FW_PARAMS_PARAM_Z 0
+#define M_FW_PARAMS_PARAM_Z 0xff
+#define V_FW_PARAMS_PARAM_Z(x) ((x) << S_FW_PARAMS_PARAM_Z)
+#define G_FW_PARAMS_PARAM_Z(x) \
+ (((x) >> S_FW_PARAMS_PARAM_Z) & M_FW_PARAMS_PARAM_Z)
+
+#define S_FW_PARAMS_PARAM_YZ 0
+#define M_FW_PARAMS_PARAM_YZ 0xffff
+#define V_FW_PARAMS_PARAM_YZ(x) ((x) << S_FW_PARAMS_PARAM_YZ)
+#define G_FW_PARAMS_PARAM_YZ(x) \
+ (((x) >> S_FW_PARAMS_PARAM_YZ) & M_FW_PARAMS_PARAM_YZ)
+
+struct fw_params_cmd {
+ __be32 op_to_vfn;
+ __be32 retval_len16;
+ struct fw_params_param {
+ __be32 mnem;
+ __be32 val;
+ } param[7];
+};
+
+#define S_FW_PARAMS_CMD_PFN 8
+#define M_FW_PARAMS_CMD_PFN 0x7
+#define V_FW_PARAMS_CMD_PFN(x) ((x) << S_FW_PARAMS_CMD_PFN)
+#define G_FW_PARAMS_CMD_PFN(x) \
+ (((x) >> S_FW_PARAMS_CMD_PFN) & M_FW_PARAMS_CMD_PFN)
+
+#define S_FW_PARAMS_CMD_VFN 0
+#define M_FW_PARAMS_CMD_VFN 0xff
+#define V_FW_PARAMS_CMD_VFN(x) ((x) << S_FW_PARAMS_CMD_VFN)
+#define G_FW_PARAMS_CMD_VFN(x) \
+ (((x) >> S_FW_PARAMS_CMD_VFN) & M_FW_PARAMS_CMD_VFN)
+
+/*
+ * ingress queue type; the first 1K ingress queues can have associated 0,
+ * 1 or 2 free lists and an interrupt, all other ingress queues lack these
+ * capabilities
+ */
+enum fw_iq_type {
+ FW_IQ_TYPE_FL_INT_CAP,
+};
+
+struct fw_iq_cmd {
+ __be32 op_to_vfn;
+ __be32 alloc_to_len16;
+ __be16 physiqid;
+ __be16 iqid;
+ __be16 fl0id;
+ __be16 fl1id;
+ __be32 type_to_iqandstindex;
+ __be16 iqdroprss_to_iqesize;
+ __be16 iqsize;
+ __be64 iqaddr;
+ __be32 iqns_to_fl0congen;
+ __be16 fl0dcaen_to_fl0cidxfthresh;
+ __be16 fl0size;
+ __be64 fl0addr;
+ __be32 fl1cngchmap_to_fl1congen;
+ __be16 fl1dcaen_to_fl1cidxfthresh;
+ __be16 fl1size;
+ __be64 fl1addr;
+};
+
+#define S_FW_IQ_CMD_PFN 8
+#define M_FW_IQ_CMD_PFN 0x7
+#define V_FW_IQ_CMD_PFN(x) ((x) << S_FW_IQ_CMD_PFN)
+#define G_FW_IQ_CMD_PFN(x) (((x) >> S_FW_IQ_CMD_PFN) & M_FW_IQ_CMD_PFN)
+
+#define S_FW_IQ_CMD_VFN 0
+#define M_FW_IQ_CMD_VFN 0xff
+#define V_FW_IQ_CMD_VFN(x) ((x) << S_FW_IQ_CMD_VFN)
+#define G_FW_IQ_CMD_VFN(x) (((x) >> S_FW_IQ_CMD_VFN) & M_FW_IQ_CMD_VFN)
+
+#define S_FW_IQ_CMD_ALLOC 31
+#define M_FW_IQ_CMD_ALLOC 0x1
+#define V_FW_IQ_CMD_ALLOC(x) ((x) << S_FW_IQ_CMD_ALLOC)
+#define G_FW_IQ_CMD_ALLOC(x) \
+ (((x) >> S_FW_IQ_CMD_ALLOC) & M_FW_IQ_CMD_ALLOC)
+#define F_FW_IQ_CMD_ALLOC V_FW_IQ_CMD_ALLOC(1U)
+
+#define S_FW_IQ_CMD_FREE 30
+#define M_FW_IQ_CMD_FREE 0x1
+#define V_FW_IQ_CMD_FREE(x) ((x) << S_FW_IQ_CMD_FREE)
+#define G_FW_IQ_CMD_FREE(x) (((x) >> S_FW_IQ_CMD_FREE) & M_FW_IQ_CMD_FREE)
+#define F_FW_IQ_CMD_FREE V_FW_IQ_CMD_FREE(1U)
+
+#define S_FW_IQ_CMD_IQSTART 28
+#define M_FW_IQ_CMD_IQSTART 0x1
+#define V_FW_IQ_CMD_IQSTART(x) ((x) << S_FW_IQ_CMD_IQSTART)
+#define G_FW_IQ_CMD_IQSTART(x) \
+ (((x) >> S_FW_IQ_CMD_IQSTART) & M_FW_IQ_CMD_IQSTART)
+#define F_FW_IQ_CMD_IQSTART V_FW_IQ_CMD_IQSTART(1U)
+
+#define S_FW_IQ_CMD_IQSTOP 27
+#define M_FW_IQ_CMD_IQSTOP 0x1
+#define V_FW_IQ_CMD_IQSTOP(x) ((x) << S_FW_IQ_CMD_IQSTOP)
+#define G_FW_IQ_CMD_IQSTOP(x) \
+ (((x) >> S_FW_IQ_CMD_IQSTOP) & M_FW_IQ_CMD_IQSTOP)
+#define F_FW_IQ_CMD_IQSTOP V_FW_IQ_CMD_IQSTOP(1U)
+
+#define S_FW_IQ_CMD_TYPE 29
+#define M_FW_IQ_CMD_TYPE 0x7
+#define V_FW_IQ_CMD_TYPE(x) ((x) << S_FW_IQ_CMD_TYPE)
+#define G_FW_IQ_CMD_TYPE(x) (((x) >> S_FW_IQ_CMD_TYPE) & M_FW_IQ_CMD_TYPE)
+
+#define S_FW_IQ_CMD_IQASYNCH 28
+#define M_FW_IQ_CMD_IQASYNCH 0x1
+#define V_FW_IQ_CMD_IQASYNCH(x) ((x) << S_FW_IQ_CMD_IQASYNCH)
+#define G_FW_IQ_CMD_IQASYNCH(x) \
+ (((x) >> S_FW_IQ_CMD_IQASYNCH) & M_FW_IQ_CMD_IQASYNCH)
+#define F_FW_IQ_CMD_IQASYNCH V_FW_IQ_CMD_IQASYNCH(1U)
+
+#define S_FW_IQ_CMD_VIID 16
+#define M_FW_IQ_CMD_VIID 0xfff
+#define V_FW_IQ_CMD_VIID(x) ((x) << S_FW_IQ_CMD_VIID)
+#define G_FW_IQ_CMD_VIID(x) (((x) >> S_FW_IQ_CMD_VIID) & M_FW_IQ_CMD_VIID)
+
+#define S_FW_IQ_CMD_IQANDST 15
+#define M_FW_IQ_CMD_IQANDST 0x1
+#define V_FW_IQ_CMD_IQANDST(x) ((x) << S_FW_IQ_CMD_IQANDST)
+#define G_FW_IQ_CMD_IQANDST(x) \
+ (((x) >> S_FW_IQ_CMD_IQANDST) & M_FW_IQ_CMD_IQANDST)
+#define F_FW_IQ_CMD_IQANDST V_FW_IQ_CMD_IQANDST(1U)
+
+#define S_FW_IQ_CMD_IQANUD 12
+#define M_FW_IQ_CMD_IQANUD 0x3
+#define V_FW_IQ_CMD_IQANUD(x) ((x) << S_FW_IQ_CMD_IQANUD)
+#define G_FW_IQ_CMD_IQANUD(x) \
+ (((x) >> S_FW_IQ_CMD_IQANUD) & M_FW_IQ_CMD_IQANUD)
+
+#define S_FW_IQ_CMD_IQANDSTINDEX 0
+#define M_FW_IQ_CMD_IQANDSTINDEX 0xfff
+#define V_FW_IQ_CMD_IQANDSTINDEX(x) ((x) << S_FW_IQ_CMD_IQANDSTINDEX)
+#define G_FW_IQ_CMD_IQANDSTINDEX(x) \
+ (((x) >> S_FW_IQ_CMD_IQANDSTINDEX) & M_FW_IQ_CMD_IQANDSTINDEX)
+
+#define S_FW_IQ_CMD_IQGTSMODE 14
+#define M_FW_IQ_CMD_IQGTSMODE 0x1
+#define V_FW_IQ_CMD_IQGTSMODE(x) ((x) << S_FW_IQ_CMD_IQGTSMODE)
+#define G_FW_IQ_CMD_IQGTSMODE(x) \
+ (((x) >> S_FW_IQ_CMD_IQGTSMODE) & M_FW_IQ_CMD_IQGTSMODE)
+#define F_FW_IQ_CMD_IQGTSMODE V_FW_IQ_CMD_IQGTSMODE(1U)
+
+#define S_FW_IQ_CMD_IQPCIECH 12
+#define M_FW_IQ_CMD_IQPCIECH 0x3
+#define V_FW_IQ_CMD_IQPCIECH(x) ((x) << S_FW_IQ_CMD_IQPCIECH)
+#define G_FW_IQ_CMD_IQPCIECH(x) \
+ (((x) >> S_FW_IQ_CMD_IQPCIECH) & M_FW_IQ_CMD_IQPCIECH)
+
+#define S_FW_IQ_CMD_IQINTCNTTHRESH 4
+#define M_FW_IQ_CMD_IQINTCNTTHRESH 0x3
+#define V_FW_IQ_CMD_IQINTCNTTHRESH(x) ((x) << S_FW_IQ_CMD_IQINTCNTTHRESH)
+#define G_FW_IQ_CMD_IQINTCNTTHRESH(x) \
+ (((x) >> S_FW_IQ_CMD_IQINTCNTTHRESH) & M_FW_IQ_CMD_IQINTCNTTHRESH)
+
+#define S_FW_IQ_CMD_IQESIZE 0
+#define M_FW_IQ_CMD_IQESIZE 0x3
+#define V_FW_IQ_CMD_IQESIZE(x) ((x) << S_FW_IQ_CMD_IQESIZE)
+#define G_FW_IQ_CMD_IQESIZE(x) \
+ (((x) >> S_FW_IQ_CMD_IQESIZE) & M_FW_IQ_CMD_IQESIZE)
+
+#define S_FW_IQ_CMD_IQFLINTCONGEN 27
+#define M_FW_IQ_CMD_IQFLINTCONGEN 0x1
+#define V_FW_IQ_CMD_IQFLINTCONGEN(x) ((x) << S_FW_IQ_CMD_IQFLINTCONGEN)
+#define G_FW_IQ_CMD_IQFLINTCONGEN(x) \
+ (((x) >> S_FW_IQ_CMD_IQFLINTCONGEN) & M_FW_IQ_CMD_IQFLINTCONGEN)
+#define F_FW_IQ_CMD_IQFLINTCONGEN V_FW_IQ_CMD_IQFLINTCONGEN(1U)
+
+#define S_FW_IQ_CMD_FL0CNGCHMAP 20
+#define M_FW_IQ_CMD_FL0CNGCHMAP 0xf
+#define V_FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << S_FW_IQ_CMD_FL0CNGCHMAP)
+#define G_FW_IQ_CMD_FL0CNGCHMAP(x) \
+ (((x) >> S_FW_IQ_CMD_FL0CNGCHMAP) & M_FW_IQ_CMD_FL0CNGCHMAP)
+
+#define S_FW_IQ_CMD_FL0DATARO 12
+#define M_FW_IQ_CMD_FL0DATARO 0x1
+#define V_FW_IQ_CMD_FL0DATARO(x) ((x) << S_FW_IQ_CMD_FL0DATARO)
+#define G_FW_IQ_CMD_FL0DATARO(x) \
+ (((x) >> S_FW_IQ_CMD_FL0DATARO) & M_FW_IQ_CMD_FL0DATARO)
+#define F_FW_IQ_CMD_FL0DATARO V_FW_IQ_CMD_FL0DATARO(1U)
+
+#define S_FW_IQ_CMD_FL0CONGCIF 11
+#define M_FW_IQ_CMD_FL0CONGCIF 0x1
+#define V_FW_IQ_CMD_FL0CONGCIF(x) ((x) << S_FW_IQ_CMD_FL0CONGCIF)
+#define G_FW_IQ_CMD_FL0CONGCIF(x) \
+ (((x) >> S_FW_IQ_CMD_FL0CONGCIF) & M_FW_IQ_CMD_FL0CONGCIF)
+#define F_FW_IQ_CMD_FL0CONGCIF V_FW_IQ_CMD_FL0CONGCIF(1U)
+
+#define S_FW_IQ_CMD_FL0FETCHRO 6
+#define M_FW_IQ_CMD_FL0FETCHRO 0x1
+#define V_FW_IQ_CMD_FL0FETCHRO(x) ((x) << S_FW_IQ_CMD_FL0FETCHRO)
+#define G_FW_IQ_CMD_FL0FETCHRO(x) \
+ (((x) >> S_FW_IQ_CMD_FL0FETCHRO) & M_FW_IQ_CMD_FL0FETCHRO)
+#define F_FW_IQ_CMD_FL0FETCHRO V_FW_IQ_CMD_FL0FETCHRO(1U)
+
+#define S_FW_IQ_CMD_FL0HOSTFCMODE 4
+#define M_FW_IQ_CMD_FL0HOSTFCMODE 0x3
+#define V_FW_IQ_CMD_FL0HOSTFCMODE(x) ((x) << S_FW_IQ_CMD_FL0HOSTFCMODE)
+#define G_FW_IQ_CMD_FL0HOSTFCMODE(x) \
+ (((x) >> S_FW_IQ_CMD_FL0HOSTFCMODE) & M_FW_IQ_CMD_FL0HOSTFCMODE)
+
+#define S_FW_IQ_CMD_FL0PADEN 2
+#define M_FW_IQ_CMD_FL0PADEN 0x1
+#define V_FW_IQ_CMD_FL0PADEN(x) ((x) << S_FW_IQ_CMD_FL0PADEN)
+#define G_FW_IQ_CMD_FL0PADEN(x) \
+ (((x) >> S_FW_IQ_CMD_FL0PADEN) & M_FW_IQ_CMD_FL0PADEN)
+#define F_FW_IQ_CMD_FL0PADEN V_FW_IQ_CMD_FL0PADEN(1U)
+
+#define S_FW_IQ_CMD_FL0PACKEN 1
+#define M_FW_IQ_CMD_FL0PACKEN 0x1
+#define V_FW_IQ_CMD_FL0PACKEN(x) ((x) << S_FW_IQ_CMD_FL0PACKEN)
+#define G_FW_IQ_CMD_FL0PACKEN(x) \
+ (((x) >> S_FW_IQ_CMD_FL0PACKEN) & M_FW_IQ_CMD_FL0PACKEN)
+#define F_FW_IQ_CMD_FL0PACKEN V_FW_IQ_CMD_FL0PACKEN(1U)
+
+#define S_FW_IQ_CMD_FL0CONGEN 0
+#define M_FW_IQ_CMD_FL0CONGEN 0x1
+#define V_FW_IQ_CMD_FL0CONGEN(x) ((x) << S_FW_IQ_CMD_FL0CONGEN)
+#define G_FW_IQ_CMD_FL0CONGEN(x) \
+ (((x) >> S_FW_IQ_CMD_FL0CONGEN) & M_FW_IQ_CMD_FL0CONGEN)
+#define F_FW_IQ_CMD_FL0CONGEN V_FW_IQ_CMD_FL0CONGEN(1U)
+
+#define S_FW_IQ_CMD_FL0FBMIN 7
+#define M_FW_IQ_CMD_FL0FBMIN 0x7
+#define V_FW_IQ_CMD_FL0FBMIN(x) ((x) << S_FW_IQ_CMD_FL0FBMIN)
+#define G_FW_IQ_CMD_FL0FBMIN(x) \
+ (((x) >> S_FW_IQ_CMD_FL0FBMIN) & M_FW_IQ_CMD_FL0FBMIN)
+
+#define S_FW_IQ_CMD_FL0FBMAX 4
+#define M_FW_IQ_CMD_FL0FBMAX 0x7
+#define V_FW_IQ_CMD_FL0FBMAX(x) ((x) << S_FW_IQ_CMD_FL0FBMAX)
+#define G_FW_IQ_CMD_FL0FBMAX(x) \
+ (((x) >> S_FW_IQ_CMD_FL0FBMAX) & M_FW_IQ_CMD_FL0FBMAX)
+
+struct fw_eq_eth_cmd {
+ __be32 op_to_vfn;
+ __be32 alloc_to_len16;
+ __be32 eqid_pkd;
+ __be32 physeqid_pkd;
+ __be32 fetchszm_to_iqid;
+ __be32 dcaen_to_eqsize;
+ __be64 eqaddr;
+ __be32 autoequiqe_to_viid;
+ __be32 r8_lo;
+ __be64 r9;
+};
+
+#define S_FW_EQ_ETH_CMD_PFN 8
+#define M_FW_EQ_ETH_CMD_PFN 0x7
+#define V_FW_EQ_ETH_CMD_PFN(x) ((x) << S_FW_EQ_ETH_CMD_PFN)
+#define G_FW_EQ_ETH_CMD_PFN(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_PFN) & M_FW_EQ_ETH_CMD_PFN)
+
+#define S_FW_EQ_ETH_CMD_VFN 0
+#define M_FW_EQ_ETH_CMD_VFN 0xff
+#define V_FW_EQ_ETH_CMD_VFN(x) ((x) << S_FW_EQ_ETH_CMD_VFN)
+#define G_FW_EQ_ETH_CMD_VFN(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_VFN) & M_FW_EQ_ETH_CMD_VFN)
+
+#define S_FW_EQ_ETH_CMD_ALLOC 31
+#define M_FW_EQ_ETH_CMD_ALLOC 0x1
+#define V_FW_EQ_ETH_CMD_ALLOC(x) ((x) << S_FW_EQ_ETH_CMD_ALLOC)
+#define G_FW_EQ_ETH_CMD_ALLOC(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_ALLOC) & M_FW_EQ_ETH_CMD_ALLOC)
+#define F_FW_EQ_ETH_CMD_ALLOC V_FW_EQ_ETH_CMD_ALLOC(1U)
+
+#define S_FW_EQ_ETH_CMD_FREE 30
+#define M_FW_EQ_ETH_CMD_FREE 0x1
+#define V_FW_EQ_ETH_CMD_FREE(x) ((x) << S_FW_EQ_ETH_CMD_FREE)
+#define G_FW_EQ_ETH_CMD_FREE(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_FREE) & M_FW_EQ_ETH_CMD_FREE)
+#define F_FW_EQ_ETH_CMD_FREE V_FW_EQ_ETH_CMD_FREE(1U)
+
+#define S_FW_EQ_ETH_CMD_EQSTART 28
+#define M_FW_EQ_ETH_CMD_EQSTART 0x1
+#define V_FW_EQ_ETH_CMD_EQSTART(x) ((x) << S_FW_EQ_ETH_CMD_EQSTART)
+#define G_FW_EQ_ETH_CMD_EQSTART(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_EQSTART) & M_FW_EQ_ETH_CMD_EQSTART)
+#define F_FW_EQ_ETH_CMD_EQSTART V_FW_EQ_ETH_CMD_EQSTART(1U)
+
+#define S_FW_EQ_ETH_CMD_EQID 0
+#define M_FW_EQ_ETH_CMD_EQID 0xfffff
+#define V_FW_EQ_ETH_CMD_EQID(x) ((x) << S_FW_EQ_ETH_CMD_EQID)
+#define G_FW_EQ_ETH_CMD_EQID(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_EQID) & M_FW_EQ_ETH_CMD_EQID)
+
+#define S_FW_EQ_ETH_CMD_FETCHRO 22
+#define M_FW_EQ_ETH_CMD_FETCHRO 0x1
+#define V_FW_EQ_ETH_CMD_FETCHRO(x) ((x) << S_FW_EQ_ETH_CMD_FETCHRO)
+#define G_FW_EQ_ETH_CMD_FETCHRO(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_FETCHRO) & M_FW_EQ_ETH_CMD_FETCHRO)
+#define F_FW_EQ_ETH_CMD_FETCHRO V_FW_EQ_ETH_CMD_FETCHRO(1U)
+
+#define S_FW_EQ_ETH_CMD_HOSTFCMODE 20
+#define M_FW_EQ_ETH_CMD_HOSTFCMODE 0x3
+#define V_FW_EQ_ETH_CMD_HOSTFCMODE(x) ((x) << S_FW_EQ_ETH_CMD_HOSTFCMODE)
+#define G_FW_EQ_ETH_CMD_HOSTFCMODE(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_HOSTFCMODE) & M_FW_EQ_ETH_CMD_HOSTFCMODE)
+
+#define S_FW_EQ_ETH_CMD_PCIECHN 16
+#define M_FW_EQ_ETH_CMD_PCIECHN 0x3
+#define V_FW_EQ_ETH_CMD_PCIECHN(x) ((x) << S_FW_EQ_ETH_CMD_PCIECHN)
+#define G_FW_EQ_ETH_CMD_PCIECHN(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_PCIECHN) & M_FW_EQ_ETH_CMD_PCIECHN)
+
+#define S_FW_EQ_ETH_CMD_IQID 0
+#define M_FW_EQ_ETH_CMD_IQID 0xffff
+#define V_FW_EQ_ETH_CMD_IQID(x) ((x) << S_FW_EQ_ETH_CMD_IQID)
+#define G_FW_EQ_ETH_CMD_IQID(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_IQID) & M_FW_EQ_ETH_CMD_IQID)
+
+#define S_FW_EQ_ETH_CMD_FBMIN 23
+#define M_FW_EQ_ETH_CMD_FBMIN 0x7
+#define V_FW_EQ_ETH_CMD_FBMIN(x) ((x) << S_FW_EQ_ETH_CMD_FBMIN)
+#define G_FW_EQ_ETH_CMD_FBMIN(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_FBMIN) & M_FW_EQ_ETH_CMD_FBMIN)
+
+#define S_FW_EQ_ETH_CMD_FBMAX 20
+#define M_FW_EQ_ETH_CMD_FBMAX 0x7
+#define V_FW_EQ_ETH_CMD_FBMAX(x) ((x) << S_FW_EQ_ETH_CMD_FBMAX)
+#define G_FW_EQ_ETH_CMD_FBMAX(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_FBMAX) & M_FW_EQ_ETH_CMD_FBMAX)
+
+#define S_FW_EQ_ETH_CMD_CIDXFTHRESH 16
+#define M_FW_EQ_ETH_CMD_CIDXFTHRESH 0x7
+#define V_FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << S_FW_EQ_ETH_CMD_CIDXFTHRESH)
+#define G_FW_EQ_ETH_CMD_CIDXFTHRESH(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_CIDXFTHRESH) & M_FW_EQ_ETH_CMD_CIDXFTHRESH)
+
+#define S_FW_EQ_ETH_CMD_EQSIZE 0
+#define M_FW_EQ_ETH_CMD_EQSIZE 0xffff
+#define V_FW_EQ_ETH_CMD_EQSIZE(x) ((x) << S_FW_EQ_ETH_CMD_EQSIZE)
+#define G_FW_EQ_ETH_CMD_EQSIZE(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_EQSIZE) & M_FW_EQ_ETH_CMD_EQSIZE)
+
+#define S_FW_EQ_ETH_CMD_AUTOEQUEQE 30
+#define M_FW_EQ_ETH_CMD_AUTOEQUEQE 0x1
+#define V_FW_EQ_ETH_CMD_AUTOEQUEQE(x) ((x) << S_FW_EQ_ETH_CMD_AUTOEQUEQE)
+#define G_FW_EQ_ETH_CMD_AUTOEQUEQE(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_AUTOEQUEQE) & M_FW_EQ_ETH_CMD_AUTOEQUEQE)
+#define F_FW_EQ_ETH_CMD_AUTOEQUEQE V_FW_EQ_ETH_CMD_AUTOEQUEQE(1U)
+
+#define S_FW_EQ_ETH_CMD_VIID 16
+#define M_FW_EQ_ETH_CMD_VIID 0xfff
+#define V_FW_EQ_ETH_CMD_VIID(x) ((x) << S_FW_EQ_ETH_CMD_VIID)
+#define G_FW_EQ_ETH_CMD_VIID(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_VIID) & M_FW_EQ_ETH_CMD_VIID)
+
+enum fw_vi_func {
+ FW_VI_FUNC_ETH,
+};
+
+struct fw_vi_cmd {
+ __be32 op_to_vfn;
+ __be32 alloc_to_len16;
+ __be16 type_to_viid;
+ __u8 mac[6];
+ __u8 portid_pkd;
+ __u8 nmac;
+ __u8 nmac0[6];
+ __be16 norss_rsssize;
+ __u8 nmac1[6];
+ __be16 idsiiq_pkd;
+ __u8 nmac2[6];
+ __be16 idseiq_pkd;
+ __u8 nmac3[6];
+ __be64 r9;
+ __be64 r10;
+};
+
+#define S_FW_VI_CMD_PFN 8
+#define M_FW_VI_CMD_PFN 0x7
+#define V_FW_VI_CMD_PFN(x) ((x) << S_FW_VI_CMD_PFN)
+#define G_FW_VI_CMD_PFN(x) (((x) >> S_FW_VI_CMD_PFN) & M_FW_VI_CMD_PFN)
+
+#define S_FW_VI_CMD_VFN 0
+#define M_FW_VI_CMD_VFN 0xff
+#define V_FW_VI_CMD_VFN(x) ((x) << S_FW_VI_CMD_VFN)
+#define G_FW_VI_CMD_VFN(x) (((x) >> S_FW_VI_CMD_VFN) & M_FW_VI_CMD_VFN)
+
+#define S_FW_VI_CMD_ALLOC 31
+#define M_FW_VI_CMD_ALLOC 0x1
+#define V_FW_VI_CMD_ALLOC(x) ((x) << S_FW_VI_CMD_ALLOC)
+#define G_FW_VI_CMD_ALLOC(x) \
+ (((x) >> S_FW_VI_CMD_ALLOC) & M_FW_VI_CMD_ALLOC)
+#define F_FW_VI_CMD_ALLOC V_FW_VI_CMD_ALLOC(1U)
+
+#define S_FW_VI_CMD_FREE 30
+#define M_FW_VI_CMD_FREE 0x1
+#define V_FW_VI_CMD_FREE(x) ((x) << S_FW_VI_CMD_FREE)
+#define G_FW_VI_CMD_FREE(x) (((x) >> S_FW_VI_CMD_FREE) & M_FW_VI_CMD_FREE)
+#define F_FW_VI_CMD_FREE V_FW_VI_CMD_FREE(1U)
+
+#define S_FW_VI_CMD_TYPE 15
+#define M_FW_VI_CMD_TYPE 0x1
+#define V_FW_VI_CMD_TYPE(x) ((x) << S_FW_VI_CMD_TYPE)
+#define G_FW_VI_CMD_TYPE(x) (((x) >> S_FW_VI_CMD_TYPE) & M_FW_VI_CMD_TYPE)
+#define F_FW_VI_CMD_TYPE V_FW_VI_CMD_TYPE(1U)
+
+#define S_FW_VI_CMD_FUNC 12
+#define M_FW_VI_CMD_FUNC 0x7
+#define V_FW_VI_CMD_FUNC(x) ((x) << S_FW_VI_CMD_FUNC)
+#define G_FW_VI_CMD_FUNC(x) (((x) >> S_FW_VI_CMD_FUNC) & M_FW_VI_CMD_FUNC)
+
+#define S_FW_VI_CMD_VIID 0
+#define M_FW_VI_CMD_VIID 0xfff
+#define V_FW_VI_CMD_VIID(x) ((x) << S_FW_VI_CMD_VIID)
+#define G_FW_VI_CMD_VIID(x) (((x) >> S_FW_VI_CMD_VIID) & M_FW_VI_CMD_VIID)
+
+#define S_FW_VI_CMD_PORTID 4
+#define M_FW_VI_CMD_PORTID 0xf
+#define V_FW_VI_CMD_PORTID(x) ((x) << S_FW_VI_CMD_PORTID)
+#define G_FW_VI_CMD_PORTID(x) \
+ (((x) >> S_FW_VI_CMD_PORTID) & M_FW_VI_CMD_PORTID)
+
+#define S_FW_VI_CMD_RSSSIZE 0
+#define M_FW_VI_CMD_RSSSIZE 0x7ff
+#define V_FW_VI_CMD_RSSSIZE(x) ((x) << S_FW_VI_CMD_RSSSIZE)
+#define G_FW_VI_CMD_RSSSIZE(x) \
+ (((x) >> S_FW_VI_CMD_RSSSIZE) & M_FW_VI_CMD_RSSSIZE)
+
+/* Special VI_MAC command index ids */
+#define FW_VI_MAC_ADD_MAC 0x3FF
+#define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE
+
+enum fw_vi_mac_smac {
+ FW_VI_MAC_MPS_TCAM_ENTRY,
+ FW_VI_MAC_SMT_AND_MPSTCAM
+};
+
+struct fw_vi_mac_cmd {
+ __be32 op_to_viid;
+ __be32 freemacs_to_len16;
+ union fw_vi_mac {
+ struct fw_vi_mac_exact {
+ __be16 valid_to_idx;
+ __u8 macaddr[6];
+ } exact[7];
+ struct fw_vi_mac_hash {
+ __be64 hashvec;
+ } hash;
+ } u;
+};
+
+#define S_FW_VI_MAC_CMD_VIID 0
+#define M_FW_VI_MAC_CMD_VIID 0xfff
+#define V_FW_VI_MAC_CMD_VIID(x) ((x) << S_FW_VI_MAC_CMD_VIID)
+#define G_FW_VI_MAC_CMD_VIID(x) \
+ (((x) >> S_FW_VI_MAC_CMD_VIID) & M_FW_VI_MAC_CMD_VIID)
+
+#define S_FW_VI_MAC_CMD_VALID 15
+#define M_FW_VI_MAC_CMD_VALID 0x1
+#define V_FW_VI_MAC_CMD_VALID(x) ((x) << S_FW_VI_MAC_CMD_VALID)
+#define G_FW_VI_MAC_CMD_VALID(x) \
+ (((x) >> S_FW_VI_MAC_CMD_VALID) & M_FW_VI_MAC_CMD_VALID)
+#define F_FW_VI_MAC_CMD_VALID V_FW_VI_MAC_CMD_VALID(1U)
+
+#define S_FW_VI_MAC_CMD_SMAC_RESULT 10
+#define M_FW_VI_MAC_CMD_SMAC_RESULT 0x3
+#define V_FW_VI_MAC_CMD_SMAC_RESULT(x) ((x) << S_FW_VI_MAC_CMD_SMAC_RESULT)
+#define G_FW_VI_MAC_CMD_SMAC_RESULT(x) \
+ (((x) >> S_FW_VI_MAC_CMD_SMAC_RESULT) & M_FW_VI_MAC_CMD_SMAC_RESULT)
+
+#define S_FW_VI_MAC_CMD_IDX 0
+#define M_FW_VI_MAC_CMD_IDX 0x3ff
+#define V_FW_VI_MAC_CMD_IDX(x) ((x) << S_FW_VI_MAC_CMD_IDX)
+#define G_FW_VI_MAC_CMD_IDX(x) \
+ (((x) >> S_FW_VI_MAC_CMD_IDX) & M_FW_VI_MAC_CMD_IDX)
+
+struct fw_vi_rxmode_cmd {
+ __be32 op_to_viid;
+ __be32 retval_len16;
+ __be32 mtu_to_vlanexen;
+ __be32 r4_lo;
+};
+
+#define S_FW_VI_RXMODE_CMD_VIID 0
+#define M_FW_VI_RXMODE_CMD_VIID 0xfff
+#define V_FW_VI_RXMODE_CMD_VIID(x) ((x) << S_FW_VI_RXMODE_CMD_VIID)
+#define G_FW_VI_RXMODE_CMD_VIID(x) \
+ (((x) >> S_FW_VI_RXMODE_CMD_VIID) & M_FW_VI_RXMODE_CMD_VIID)
+
+#define S_FW_VI_RXMODE_CMD_MTU 16
+#define M_FW_VI_RXMODE_CMD_MTU 0xffff
+#define V_FW_VI_RXMODE_CMD_MTU(x) ((x) << S_FW_VI_RXMODE_CMD_MTU)
+#define G_FW_VI_RXMODE_CMD_MTU(x) \
+ (((x) >> S_FW_VI_RXMODE_CMD_MTU) & M_FW_VI_RXMODE_CMD_MTU)
+
+#define S_FW_VI_RXMODE_CMD_PROMISCEN 14
+#define M_FW_VI_RXMODE_CMD_PROMISCEN 0x3
+#define V_FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << S_FW_VI_RXMODE_CMD_PROMISCEN)
+#define G_FW_VI_RXMODE_CMD_PROMISCEN(x) \
+ (((x) >> S_FW_VI_RXMODE_CMD_PROMISCEN) & M_FW_VI_RXMODE_CMD_PROMISCEN)
+
+#define S_FW_VI_RXMODE_CMD_ALLMULTIEN 12
+#define M_FW_VI_RXMODE_CMD_ALLMULTIEN 0x3
+#define V_FW_VI_RXMODE_CMD_ALLMULTIEN(x) \
+ ((x) << S_FW_VI_RXMODE_CMD_ALLMULTIEN)
+#define G_FW_VI_RXMODE_CMD_ALLMULTIEN(x) \
+ (((x) >> S_FW_VI_RXMODE_CMD_ALLMULTIEN) & M_FW_VI_RXMODE_CMD_ALLMULTIEN)
+
+#define S_FW_VI_RXMODE_CMD_BROADCASTEN 10
+#define M_FW_VI_RXMODE_CMD_BROADCASTEN 0x3
+#define V_FW_VI_RXMODE_CMD_BROADCASTEN(x) \
+ ((x) << S_FW_VI_RXMODE_CMD_BROADCASTEN)
+#define G_FW_VI_RXMODE_CMD_BROADCASTEN(x) \
+ (((x) >> S_FW_VI_RXMODE_CMD_BROADCASTEN) & \
+ M_FW_VI_RXMODE_CMD_BROADCASTEN)
+
+#define S_FW_VI_RXMODE_CMD_VLANEXEN 8
+#define M_FW_VI_RXMODE_CMD_VLANEXEN 0x3
+#define V_FW_VI_RXMODE_CMD_VLANEXEN(x) ((x) << S_FW_VI_RXMODE_CMD_VLANEXEN)
+#define G_FW_VI_RXMODE_CMD_VLANEXEN(x) \
+ (((x) >> S_FW_VI_RXMODE_CMD_VLANEXEN) & M_FW_VI_RXMODE_CMD_VLANEXEN)
+
+struct fw_vi_enable_cmd {
+ __be32 op_to_viid;
+ __be32 ien_to_len16;
+ __be16 blinkdur;
+ __be16 r3;
+ __be32 r4;
+};
+
+#define S_FW_VI_ENABLE_CMD_VIID 0
+#define M_FW_VI_ENABLE_CMD_VIID 0xfff
+#define V_FW_VI_ENABLE_CMD_VIID(x) ((x) << S_FW_VI_ENABLE_CMD_VIID)
+#define G_FW_VI_ENABLE_CMD_VIID(x) \
+ (((x) >> S_FW_VI_ENABLE_CMD_VIID) & M_FW_VI_ENABLE_CMD_VIID)
+
+#define S_FW_VI_ENABLE_CMD_IEN 31
+#define M_FW_VI_ENABLE_CMD_IEN 0x1
+#define V_FW_VI_ENABLE_CMD_IEN(x) ((x) << S_FW_VI_ENABLE_CMD_IEN)
+#define G_FW_VI_ENABLE_CMD_IEN(x) \
+ (((x) >> S_FW_VI_ENABLE_CMD_IEN) & M_FW_VI_ENABLE_CMD_IEN)
+#define F_FW_VI_ENABLE_CMD_IEN V_FW_VI_ENABLE_CMD_IEN(1U)
+
+#define S_FW_VI_ENABLE_CMD_EEN 30
+#define M_FW_VI_ENABLE_CMD_EEN 0x1
+#define V_FW_VI_ENABLE_CMD_EEN(x) ((x) << S_FW_VI_ENABLE_CMD_EEN)
+#define G_FW_VI_ENABLE_CMD_EEN(x) \
+ (((x) >> S_FW_VI_ENABLE_CMD_EEN) & M_FW_VI_ENABLE_CMD_EEN)
+#define F_FW_VI_ENABLE_CMD_EEN V_FW_VI_ENABLE_CMD_EEN(1U)
+
+#define S_FW_VI_ENABLE_CMD_DCB_INFO 28
+#define M_FW_VI_ENABLE_CMD_DCB_INFO 0x1
+#define V_FW_VI_ENABLE_CMD_DCB_INFO(x) ((x) << S_FW_VI_ENABLE_CMD_DCB_INFO)
+#define G_FW_VI_ENABLE_CMD_DCB_INFO(x) \
+ (((x) >> S_FW_VI_ENABLE_CMD_DCB_INFO) & M_FW_VI_ENABLE_CMD_DCB_INFO)
+#define F_FW_VI_ENABLE_CMD_DCB_INFO V_FW_VI_ENABLE_CMD_DCB_INFO(1U)
+
+/* VI PF stats offset definitions */
+#define VI_PF_NUM_STATS 17
+enum fw_vi_stats_pf_index {
+ FW_VI_PF_STAT_TX_BCAST_BYTES_IX,
+ FW_VI_PF_STAT_TX_BCAST_FRAMES_IX,
+ FW_VI_PF_STAT_TX_MCAST_BYTES_IX,
+ FW_VI_PF_STAT_TX_MCAST_FRAMES_IX,
+ FW_VI_PF_STAT_TX_UCAST_BYTES_IX,
+ FW_VI_PF_STAT_TX_UCAST_FRAMES_IX,
+ FW_VI_PF_STAT_TX_OFLD_BYTES_IX,
+ FW_VI_PF_STAT_TX_OFLD_FRAMES_IX,
+ FW_VI_PF_STAT_RX_BYTES_IX,
+ FW_VI_PF_STAT_RX_FRAMES_IX,
+ FW_VI_PF_STAT_RX_BCAST_BYTES_IX,
+ FW_VI_PF_STAT_RX_BCAST_FRAMES_IX,
+ FW_VI_PF_STAT_RX_MCAST_BYTES_IX,
+ FW_VI_PF_STAT_RX_MCAST_FRAMES_IX,
+ FW_VI_PF_STAT_RX_UCAST_BYTES_IX,
+ FW_VI_PF_STAT_RX_UCAST_FRAMES_IX,
+ FW_VI_PF_STAT_RX_ERR_FRAMES_IX
+};
+
+struct fw_vi_stats_cmd {
+ __be32 op_to_viid;
+ __be32 retval_len16;
+ union fw_vi_stats {
+ struct fw_vi_stats_ctl {
+ __be16 nstats_ix;
+ __be16 r6;
+ __be32 r7;
+ __be64 stat0;
+ __be64 stat1;
+ __be64 stat2;
+ __be64 stat3;
+ __be64 stat4;
+ __be64 stat5;
+ } ctl;
+ struct fw_vi_stats_pf {
+ __be64 tx_bcast_bytes;
+ __be64 tx_bcast_frames;
+ __be64 tx_mcast_bytes;
+ __be64 tx_mcast_frames;
+ __be64 tx_ucast_bytes;
+ __be64 tx_ucast_frames;
+ __be64 tx_offload_bytes;
+ __be64 tx_offload_frames;
+ __be64 rx_pf_bytes;
+ __be64 rx_pf_frames;
+ __be64 rx_bcast_bytes;
+ __be64 rx_bcast_frames;
+ __be64 rx_mcast_bytes;
+ __be64 rx_mcast_frames;
+ __be64 rx_ucast_bytes;
+ __be64 rx_ucast_frames;
+ __be64 rx_err_frames;
+ } pf;
+ struct fw_vi_stats_vf {
+ __be64 tx_bcast_bytes;
+ __be64 tx_bcast_frames;
+ __be64 tx_mcast_bytes;
+ __be64 tx_mcast_frames;
+ __be64 tx_ucast_bytes;
+ __be64 tx_ucast_frames;
+ __be64 tx_drop_frames;
+ __be64 tx_offload_bytes;
+ __be64 tx_offload_frames;
+ __be64 rx_bcast_bytes;
+ __be64 rx_bcast_frames;
+ __be64 rx_mcast_bytes;
+ __be64 rx_mcast_frames;
+ __be64 rx_ucast_bytes;
+ __be64 rx_ucast_frames;
+ __be64 rx_err_frames;
+ } vf;
+ } u;
+};
+
+/* port capabilities bitmap */
+enum fw_port_cap {
+ FW_PORT_CAP_SPEED_100M = 0x0001,
+ FW_PORT_CAP_SPEED_1G = 0x0002,
+ FW_PORT_CAP_SPEED_2_5G = 0x0004,
+ FW_PORT_CAP_SPEED_10G = 0x0008,
+ FW_PORT_CAP_SPEED_40G = 0x0010,
+ FW_PORT_CAP_SPEED_100G = 0x0020,
+ FW_PORT_CAP_FC_RX = 0x0040,
+ FW_PORT_CAP_FC_TX = 0x0080,
+ FW_PORT_CAP_ANEG = 0x0100,
+ FW_PORT_CAP_MDIX = 0x0200,
+ FW_PORT_CAP_MDIAUTO = 0x0400,
+ FW_PORT_CAP_FEC = 0x0800,
+ FW_PORT_CAP_TECHKR = 0x1000,
+ FW_PORT_CAP_TECHKX4 = 0x2000,
+ FW_PORT_CAP_802_3_PAUSE = 0x4000,
+ FW_PORT_CAP_802_3_ASM_DIR = 0x8000,
+};
+
+enum fw_port_mdi {
+ FW_PORT_CAP_MDI_AUTO,
+};
+
+#define S_FW_PORT_CAP_MDI 9
+#define M_FW_PORT_CAP_MDI 3
+#define V_FW_PORT_CAP_MDI(x) ((x) << S_FW_PORT_CAP_MDI)
+#define G_FW_PORT_CAP_MDI(x) (((x) >> S_FW_PORT_CAP_MDI) & M_FW_PORT_CAP_MDI)
+
+enum fw_port_action {
+ FW_PORT_ACTION_L1_CFG = 0x0001,
+ FW_PORT_ACTION_GET_PORT_INFO = 0x0003,
+};
+
+struct fw_port_cmd {
+ __be32 op_to_portid;
+ __be32 action_to_len16;
+ union fw_port {
+ struct fw_port_l1cfg {
+ __be32 rcap;
+ __be32 r;
+ } l1cfg;
+ struct fw_port_l2cfg {
+ __u8 ctlbf;
+ __u8 ovlan3_to_ivlan0;
+ __be16 ivlantype;
+ __be16 txipg_force_pinfo;
+ __be16 mtu;
+ __be16 ovlan0mask;
+ __be16 ovlan0type;
+ __be16 ovlan1mask;
+ __be16 ovlan1type;
+ __be16 ovlan2mask;
+ __be16 ovlan2type;
+ __be16 ovlan3mask;
+ __be16 ovlan3type;
+ } l2cfg;
+ struct fw_port_info {
+ __be32 lstatus_to_modtype;
+ __be16 pcap;
+ __be16 acap;
+ __be16 mtu;
+ __u8 cbllen;
+ __u8 auxlinfo;
+ __u8 dcbxdis_pkd;
+ __u8 r8_lo;
+ __be16 lpacap;
+ __be64 r9;
+ } info;
+ struct fw_port_diags {
+ __u8 diagop;
+ __u8 r[3];
+ __be32 diagval;
+ } diags;
+ union fw_port_dcb {
+ struct fw_port_dcb_pgid {
+ __u8 type;
+ __u8 apply_pkd;
+ __u8 r10_lo[2];
+ __be32 pgid;
+ __be64 r11;
+ } pgid;
+ struct fw_port_dcb_pgrate {
+ __u8 type;
+ __u8 apply_pkd;
+ __u8 r10_lo[5];
+ __u8 num_tcs_supported;
+ __u8 pgrate[8];
+ __u8 tsa[8];
+ } pgrate;
+ struct fw_port_dcb_priorate {
+ __u8 type;
+ __u8 apply_pkd;
+ __u8 r10_lo[6];
+ __u8 strict_priorate[8];
+ } priorate;
+ struct fw_port_dcb_pfc {
+ __u8 type;
+ __u8 pfcen;
+ __u8 r10[5];
+ __u8 max_pfc_tcs;
+ __be64 r11;
+ } pfc;
+ struct fw_port_app_priority {
+ __u8 type;
+ __u8 r10[2];
+ __u8 idx;
+ __u8 user_prio_map;
+ __u8 sel_field;
+ __be16 protocolid;
+ __be64 r12;
+ } app_priority;
+ struct fw_port_dcb_control {
+ __u8 type;
+ __u8 all_syncd_pkd;
+ __be16 dcb_version_to_app_state;
+ __be32 r11;
+ __be64 r12;
+ } control;
+ } dcb;
+ } u;
+};
+
+#define S_FW_PORT_CMD_PORTID 0
+#define M_FW_PORT_CMD_PORTID 0xf
+#define V_FW_PORT_CMD_PORTID(x) ((x) << S_FW_PORT_CMD_PORTID)
+#define G_FW_PORT_CMD_PORTID(x) \
+ (((x) >> S_FW_PORT_CMD_PORTID) & M_FW_PORT_CMD_PORTID)
+
+#define S_FW_PORT_CMD_ACTION 16
+#define M_FW_PORT_CMD_ACTION 0xffff
+#define V_FW_PORT_CMD_ACTION(x) ((x) << S_FW_PORT_CMD_ACTION)
+#define G_FW_PORT_CMD_ACTION(x) \
+ (((x) >> S_FW_PORT_CMD_ACTION) & M_FW_PORT_CMD_ACTION)
+
+#define S_FW_PORT_CMD_LSTATUS 31
+#define M_FW_PORT_CMD_LSTATUS 0x1
+#define V_FW_PORT_CMD_LSTATUS(x) ((x) << S_FW_PORT_CMD_LSTATUS)
+#define G_FW_PORT_CMD_LSTATUS(x) \
+ (((x) >> S_FW_PORT_CMD_LSTATUS) & M_FW_PORT_CMD_LSTATUS)
+#define F_FW_PORT_CMD_LSTATUS V_FW_PORT_CMD_LSTATUS(1U)
+
+#define S_FW_PORT_CMD_LSPEED 24
+#define M_FW_PORT_CMD_LSPEED 0x3f
+#define V_FW_PORT_CMD_LSPEED(x) ((x) << S_FW_PORT_CMD_LSPEED)
+#define G_FW_PORT_CMD_LSPEED(x) \
+ (((x) >> S_FW_PORT_CMD_LSPEED) & M_FW_PORT_CMD_LSPEED)
+
+#define S_FW_PORT_CMD_TXPAUSE 23
+#define M_FW_PORT_CMD_TXPAUSE 0x1
+#define V_FW_PORT_CMD_TXPAUSE(x) ((x) << S_FW_PORT_CMD_TXPAUSE)
+#define G_FW_PORT_CMD_TXPAUSE(x) \
+ (((x) >> S_FW_PORT_CMD_TXPAUSE) & M_FW_PORT_CMD_TXPAUSE)
+#define F_FW_PORT_CMD_TXPAUSE V_FW_PORT_CMD_TXPAUSE(1U)
+
+#define S_FW_PORT_CMD_RXPAUSE 22
+#define M_FW_PORT_CMD_RXPAUSE 0x1
+#define V_FW_PORT_CMD_RXPAUSE(x) ((x) << S_FW_PORT_CMD_RXPAUSE)
+#define G_FW_PORT_CMD_RXPAUSE(x) \
+ (((x) >> S_FW_PORT_CMD_RXPAUSE) & M_FW_PORT_CMD_RXPAUSE)
+#define F_FW_PORT_CMD_RXPAUSE V_FW_PORT_CMD_RXPAUSE(1U)
+
+#define S_FW_PORT_CMD_MDIOCAP 21
+#define M_FW_PORT_CMD_MDIOCAP 0x1
+#define V_FW_PORT_CMD_MDIOCAP(x) ((x) << S_FW_PORT_CMD_MDIOCAP)
+#define G_FW_PORT_CMD_MDIOCAP(x) \
+ (((x) >> S_FW_PORT_CMD_MDIOCAP) & M_FW_PORT_CMD_MDIOCAP)
+#define F_FW_PORT_CMD_MDIOCAP V_FW_PORT_CMD_MDIOCAP(1U)
+
+#define S_FW_PORT_CMD_MDIOADDR 16
+#define M_FW_PORT_CMD_MDIOADDR 0x1f
+#define V_FW_PORT_CMD_MDIOADDR(x) ((x) << S_FW_PORT_CMD_MDIOADDR)
+#define G_FW_PORT_CMD_MDIOADDR(x) \
+ (((x) >> S_FW_PORT_CMD_MDIOADDR) & M_FW_PORT_CMD_MDIOADDR)
+
+#define S_FW_PORT_CMD_PTYPE 8
+#define M_FW_PORT_CMD_PTYPE 0x1f
+#define V_FW_PORT_CMD_PTYPE(x) ((x) << S_FW_PORT_CMD_PTYPE)
+#define G_FW_PORT_CMD_PTYPE(x) \
+ (((x) >> S_FW_PORT_CMD_PTYPE) & M_FW_PORT_CMD_PTYPE)
+
+#define S_FW_PORT_CMD_LINKDNRC 5
+#define M_FW_PORT_CMD_LINKDNRC 0x7
+#define V_FW_PORT_CMD_LINKDNRC(x) ((x) << S_FW_PORT_CMD_LINKDNRC)
+#define G_FW_PORT_CMD_LINKDNRC(x) \
+ (((x) >> S_FW_PORT_CMD_LINKDNRC) & M_FW_PORT_CMD_LINKDNRC)
+
+#define S_FW_PORT_CMD_MODTYPE 0
+#define M_FW_PORT_CMD_MODTYPE 0x1f
+#define V_FW_PORT_CMD_MODTYPE(x) ((x) << S_FW_PORT_CMD_MODTYPE)
+#define G_FW_PORT_CMD_MODTYPE(x) \
+ (((x) >> S_FW_PORT_CMD_MODTYPE) & M_FW_PORT_CMD_MODTYPE)
+
+/*
+ * These are configured into the VPD and hence tools that generate
+ * VPD may use this enumeration.
+ * extPHY #lanes T4_I2C extI2C BP_Eq BP_ANEG Speed
+ *
+ * REMEMBER:
+ * Update the Common Code t4_hw.c:t4_get_port_type_description()
+ * with any new Firmware Port Technology Types!
+ */
+enum fw_port_type {
+ FW_PORT_TYPE_FIBER_XFI = 0, /* Y, 1, N, Y, N, N, 10G */
+ FW_PORT_TYPE_FIBER_XAUI = 1, /* Y, 4, N, Y, N, N, 10G */
+ FW_PORT_TYPE_BT_SGMII = 2, /* Y, 1, No, No, No, No, 1G/100M */
+ FW_PORT_TYPE_BT_XFI = 3, /* Y, 1, No, No, No, No, 10G */
+ FW_PORT_TYPE_BT_XAUI = 4, /* Y, 4, No, No, No, No, 10G/1G/100M? */
+ FW_PORT_TYPE_KX4 = 5, /* No, 4, No, No, Yes, Yes, 10G */
+ FW_PORT_TYPE_CX4 = 6, /* No, 4, No, No, No, No, 10G */
+ FW_PORT_TYPE_KX = 7, /* No, 1, No, No, Yes, No, 1G */
+ FW_PORT_TYPE_KR = 8, /* No, 1, No, No, Yes, Yes, 10G */
+ FW_PORT_TYPE_SFP = 9, /* No, 1, Yes, No, No, No, 10G */
+ FW_PORT_TYPE_BP_AP = 10,
+ /* No, 1, No, No, Yes, Yes, 10G, BP ANGE */
+ FW_PORT_TYPE_BP4_AP = 11,
+ /* No, 4, No, No, Yes, Yes, 10G, BP ANGE */
+ FW_PORT_TYPE_QSFP_10G = 12, /* No, 1, Yes, No, No, No, 10G */
+ FW_PORT_TYPE_QSA = 13, /* No, 1, Yes, No, No, No, 10G */
+ FW_PORT_TYPE_QSFP = 14, /* No, 4, Yes, No, No, No, 40G */
+ FW_PORT_TYPE_BP40_BA = 15,
+ /* No, 4, No, No, Yes, Yes, 40G/10G/1G, BP ANGE */
+
+ FW_PORT_TYPE_NONE = M_FW_PORT_CMD_PTYPE
+};
+
+/* These are read from module's EEPROM and determined once the
+ * module is inserted.
+ */
+enum fw_port_module_type {
+ FW_PORT_MOD_TYPE_NA = 0x0,
+ FW_PORT_MOD_TYPE_LR = 0x1,
+ FW_PORT_MOD_TYPE_SR = 0x2,
+ FW_PORT_MOD_TYPE_ER = 0x3,
+ FW_PORT_MOD_TYPE_TWINAX_PASSIVE = 0x4,
+ FW_PORT_MOD_TYPE_TWINAX_ACTIVE = 0x5,
+ FW_PORT_MOD_TYPE_LRM = 0x6,
+ FW_PORT_MOD_TYPE_ERROR = M_FW_PORT_CMD_MODTYPE - 3,
+ FW_PORT_MOD_TYPE_UNKNOWN = M_FW_PORT_CMD_MODTYPE - 2,
+ FW_PORT_MOD_TYPE_NOTSUPPORTED = M_FW_PORT_CMD_MODTYPE - 1,
+ FW_PORT_MOD_TYPE_NONE = M_FW_PORT_CMD_MODTYPE
+};
+
+/* used by FW and tools may use this to generate VPD */
+enum fw_port_mod_sub_type {
+ FW_PORT_MOD_SUB_TYPE_NA,
+ FW_PORT_MOD_SUB_TYPE_MV88E114X = 0x1,
+ FW_PORT_MOD_SUB_TYPE_TN8022 = 0x2,
+ FW_PORT_MOD_SUB_TYPE_AQ1202 = 0x3,
+ FW_PORT_MOD_SUB_TYPE_88x3120 = 0x4,
+ FW_PORT_MOD_SUB_TYPE_BCM84834 = 0x5,
+ FW_PORT_MOD_SUB_TYPE_BCM5482 = 0x6,
+ FW_PORT_MOD_SUB_TYPE_BCM84856 = 0x7,
+ FW_PORT_MOD_SUB_TYPE_BT_VSC8634 = 0x8,
+
+ /*
+ * The following will never been in the VPD. They are TWINAX cable
+ * lengths decoded from SFP+ module i2c PROMs. These should almost
+ * certainly go somewhere else ...
+ */
+ FW_PORT_MOD_SUB_TYPE_TWINAX_1 = 0x9,
+ FW_PORT_MOD_SUB_TYPE_TWINAX_3 = 0xA,
+ FW_PORT_MOD_SUB_TYPE_TWINAX_5 = 0xB,
+ FW_PORT_MOD_SUB_TYPE_TWINAX_7 = 0xC,
+};
+
+/* link down reason codes (3b) */
+enum fw_port_link_dn_rc {
+ FW_PORT_LINK_DN_RC_NONE,
+ FW_PORT_LINK_DN_RC_REMFLT, /* Remote fault detected */
+ FW_PORT_LINK_DN_ANEG_F, /* Auto-negotiation fault */
+ FW_PORT_LINK_DN_RESERVED3,
+ FW_PORT_LINK_DN_OVERHEAT, /* Port overheated */
+ FW_PORT_LINK_DN_UNKNOWN, /* Unable to determine reason */
+ FW_PORT_LINK_DN_RX_LOS, /* No RX signal detected */
+ FW_PORT_LINK_DN_RESERVED7
+};
+
+/* port stats */
+#define FW_NUM_PORT_STATS 50
+#define FW_NUM_PORT_TX_STATS 23
+#define FW_NUM_PORT_RX_STATS 27
+
+enum fw_port_stats_tx_index {
+ FW_STAT_TX_PORT_BYTES_IX,
+ FW_STAT_TX_PORT_FRAMES_IX,
+ FW_STAT_TX_PORT_BCAST_IX,
+ FW_STAT_TX_PORT_MCAST_IX,
+ FW_STAT_TX_PORT_UCAST_IX,
+ FW_STAT_TX_PORT_ERROR_IX,
+ FW_STAT_TX_PORT_64B_IX,
+ FW_STAT_TX_PORT_65B_127B_IX,
+ FW_STAT_TX_PORT_128B_255B_IX,
+ FW_STAT_TX_PORT_256B_511B_IX,
+ FW_STAT_TX_PORT_512B_1023B_IX,
+ FW_STAT_TX_PORT_1024B_1518B_IX,
+ FW_STAT_TX_PORT_1519B_MAX_IX,
+ FW_STAT_TX_PORT_DROP_IX,
+ FW_STAT_TX_PORT_PAUSE_IX,
+ FW_STAT_TX_PORT_PPP0_IX,
+ FW_STAT_TX_PORT_PPP1_IX,
+ FW_STAT_TX_PORT_PPP2_IX,
+ FW_STAT_TX_PORT_PPP3_IX,
+ FW_STAT_TX_PORT_PPP4_IX,
+ FW_STAT_TX_PORT_PPP5_IX,
+ FW_STAT_TX_PORT_PPP6_IX,
+ FW_STAT_TX_PORT_PPP7_IX
+};
+
+enum fw_port_stat_rx_index {
+ FW_STAT_RX_PORT_BYTES_IX,
+ FW_STAT_RX_PORT_FRAMES_IX,
+ FW_STAT_RX_PORT_BCAST_IX,
+ FW_STAT_RX_PORT_MCAST_IX,
+ FW_STAT_RX_PORT_UCAST_IX,
+ FW_STAT_RX_PORT_MTU_ERROR_IX,
+ FW_STAT_RX_PORT_MTU_CRC_ERROR_IX,
+ FW_STAT_RX_PORT_CRC_ERROR_IX,
+ FW_STAT_RX_PORT_LEN_ERROR_IX,
+ FW_STAT_RX_PORT_SYM_ERROR_IX,
+ FW_STAT_RX_PORT_64B_IX,
+ FW_STAT_RX_PORT_65B_127B_IX,
+ FW_STAT_RX_PORT_128B_255B_IX,
+ FW_STAT_RX_PORT_256B_511B_IX,
+ FW_STAT_RX_PORT_512B_1023B_IX,
+ FW_STAT_RX_PORT_1024B_1518B_IX,
+ FW_STAT_RX_PORT_1519B_MAX_IX,
+ FW_STAT_RX_PORT_PAUSE_IX,
+ FW_STAT_RX_PORT_PPP0_IX,
+ FW_STAT_RX_PORT_PPP1_IX,
+ FW_STAT_RX_PORT_PPP2_IX,
+ FW_STAT_RX_PORT_PPP3_IX,
+ FW_STAT_RX_PORT_PPP4_IX,
+ FW_STAT_RX_PORT_PPP5_IX,
+ FW_STAT_RX_PORT_PPP6_IX,
+ FW_STAT_RX_PORT_PPP7_IX,
+ FW_STAT_RX_PORT_LESS_64B_IX
+};
+
+struct fw_port_stats_cmd {
+ __be32 op_to_portid;
+ __be32 retval_len16;
+ union fw_port_stats {
+ struct fw_port_stats_ctl {
+ __u8 nstats_bg_bm;
+ __u8 tx_ix;
+ __be16 r6;
+ __be32 r7;
+ __be64 stat0;
+ __be64 stat1;
+ __be64 stat2;
+ __be64 stat3;
+ __be64 stat4;
+ __be64 stat5;
+ } ctl;
+ struct fw_port_stats_all {
+ __be64 tx_bytes;
+ __be64 tx_frames;
+ __be64 tx_bcast;
+ __be64 tx_mcast;
+ __be64 tx_ucast;
+ __be64 tx_error;
+ __be64 tx_64b;
+ __be64 tx_65b_127b;
+ __be64 tx_128b_255b;
+ __be64 tx_256b_511b;
+ __be64 tx_512b_1023b;
+ __be64 tx_1024b_1518b;
+ __be64 tx_1519b_max;
+ __be64 tx_drop;
+ __be64 tx_pause;
+ __be64 tx_ppp0;
+ __be64 tx_ppp1;
+ __be64 tx_ppp2;
+ __be64 tx_ppp3;
+ __be64 tx_ppp4;
+ __be64 tx_ppp5;
+ __be64 tx_ppp6;
+ __be64 tx_ppp7;
+ __be64 rx_bytes;
+ __be64 rx_frames;
+ __be64 rx_bcast;
+ __be64 rx_mcast;
+ __be64 rx_ucast;
+ __be64 rx_mtu_error;
+ __be64 rx_mtu_crc_error;
+ __be64 rx_crc_error;
+ __be64 rx_len_error;
+ __be64 rx_sym_error;
+ __be64 rx_64b;
+ __be64 rx_65b_127b;
+ __be64 rx_128b_255b;
+ __be64 rx_256b_511b;
+ __be64 rx_512b_1023b;
+ __be64 rx_1024b_1518b;
+ __be64 rx_1519b_max;
+ __be64 rx_pause;
+ __be64 rx_ppp0;
+ __be64 rx_ppp1;
+ __be64 rx_ppp2;
+ __be64 rx_ppp3;
+ __be64 rx_ppp4;
+ __be64 rx_ppp5;
+ __be64 rx_ppp6;
+ __be64 rx_ppp7;
+ __be64 rx_less_64b;
+ __be64 rx_bg_drop;
+ __be64 rx_bg_trunc;
+ } all;
+ } u;
+};
+
+struct fw_rss_ind_tbl_cmd {
+ __be32 op_to_viid;
+ __be32 retval_len16;
+ __be16 niqid;
+ __be16 startidx;
+ __be32 r3;
+ __be32 iq0_to_iq2;
+ __be32 iq3_to_iq5;
+ __be32 iq6_to_iq8;
+ __be32 iq9_to_iq11;
+ __be32 iq12_to_iq14;
+ __be32 iq15_to_iq17;
+ __be32 iq18_to_iq20;
+ __be32 iq21_to_iq23;
+ __be32 iq24_to_iq26;
+ __be32 iq27_to_iq29;
+ __be32 iq30_iq31;
+ __be32 r15_lo;
+};
+
+#define S_FW_RSS_IND_TBL_CMD_VIID 0
+#define M_FW_RSS_IND_TBL_CMD_VIID 0xfff
+#define V_FW_RSS_IND_TBL_CMD_VIID(x) ((x) << S_FW_RSS_IND_TBL_CMD_VIID)
+#define G_FW_RSS_IND_TBL_CMD_VIID(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_VIID) & M_FW_RSS_IND_TBL_CMD_VIID)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ0 20
+#define M_FW_RSS_IND_TBL_CMD_IQ0 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ0(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ0)
+#define G_FW_RSS_IND_TBL_CMD_IQ0(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ0) & M_FW_RSS_IND_TBL_CMD_IQ0)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ1 10
+#define M_FW_RSS_IND_TBL_CMD_IQ1 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ1(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ1)
+#define G_FW_RSS_IND_TBL_CMD_IQ1(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ1) & M_FW_RSS_IND_TBL_CMD_IQ1)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ2 0
+#define M_FW_RSS_IND_TBL_CMD_IQ2 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ2(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ2)
+#define G_FW_RSS_IND_TBL_CMD_IQ2(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ2) & M_FW_RSS_IND_TBL_CMD_IQ2)
+
+struct fw_rss_vi_config_cmd {
+ __be32 op_to_viid;
+ __be32 retval_len16;
+ union fw_rss_vi_config {
+ struct fw_rss_vi_config_manual {
+ __be64 r3;
+ __be64 r4;
+ __be64 r5;
+ } manual;
+ struct fw_rss_vi_config_basicvirtual {
+ __be32 r6;
+ __be32 defaultq_to_udpen;
+ __be64 r9;
+ __be64 r10;
+ } basicvirtual;
+ } u;
+};
+
+#define S_FW_RSS_VI_CONFIG_CMD_VIID 0
+#define M_FW_RSS_VI_CONFIG_CMD_VIID 0xfff
+#define V_FW_RSS_VI_CONFIG_CMD_VIID(x) ((x) << S_FW_RSS_VI_CONFIG_CMD_VIID)
+#define G_FW_RSS_VI_CONFIG_CMD_VIID(x) \
+ (((x) >> S_FW_RSS_VI_CONFIG_CMD_VIID) & M_FW_RSS_VI_CONFIG_CMD_VIID)
+
+#define S_FW_RSS_VI_CONFIG_CMD_DEFAULTQ 16
+#define M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ 0x3ff
+#define V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) \
+ ((x) << S_FW_RSS_VI_CONFIG_CMD_DEFAULTQ)
+#define G_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) \
+ (((x) >> S_FW_RSS_VI_CONFIG_CMD_DEFAULTQ) & \
+ M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ)
+
+#define S_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN 4
+#define M_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN 0x1
+#define V_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(x) \
+ ((x) << S_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
+#define G_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(x) \
+ (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) & \
+ M_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
+#define F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN \
+ V_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(1U)
+
+#define S_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN 3
+#define M_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN 0x1
+#define V_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(x) \
+ ((x) << S_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
+#define G_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(x) \
+ (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) & \
+ M_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
+#define F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN \
+ V_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(1U)
+
+#define S_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN 2
+#define M_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN 0x1
+#define V_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(x) \
+ ((x) << S_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
+#define G_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(x) \
+ (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) & \
+ M_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
+#define F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN \
+ V_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(1U)
+
+#define S_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN 1
+#define M_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN 0x1
+#define V_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(x) \
+ ((x) << S_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
+#define G_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(x) \
+ (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) & \
+ M_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
+#define F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN \
+ V_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(1U)
+
+#define S_FW_RSS_VI_CONFIG_CMD_UDPEN 0
+#define M_FW_RSS_VI_CONFIG_CMD_UDPEN 0x1
+#define V_FW_RSS_VI_CONFIG_CMD_UDPEN(x) ((x) << S_FW_RSS_VI_CONFIG_CMD_UDPEN)
+#define G_FW_RSS_VI_CONFIG_CMD_UDPEN(x) \
+ (((x) >> S_FW_RSS_VI_CONFIG_CMD_UDPEN) & M_FW_RSS_VI_CONFIG_CMD_UDPEN)
+#define F_FW_RSS_VI_CONFIG_CMD_UDPEN V_FW_RSS_VI_CONFIG_CMD_UDPEN(1U)
+
+/******************************************************************************
+ * D E B U G C O M M A N D s
+ ******************************************************/
+
+struct fw_debug_cmd {
+ __be32 op_type;
+ __be32 len16_pkd;
+ union fw_debug {
+ struct fw_debug_assert {
+ __be32 fcid;
+ __be32 line;
+ __be32 x;
+ __be32 y;
+ __u8 filename_0_7[8];
+ __u8 filename_8_15[8];
+ __be64 r3;
+ } assert;
+ struct fw_debug_prt {
+ __be16 dprtstridx;
+ __be16 r3[3];
+ __be32 dprtstrparam0;
+ __be32 dprtstrparam1;
+ __be32 dprtstrparam2;
+ __be32 dprtstrparam3;
+ } prt;
+ } u;
+};
+
+#define S_FW_DEBUG_CMD_TYPE 0
+#define M_FW_DEBUG_CMD_TYPE 0xff
+#define V_FW_DEBUG_CMD_TYPE(x) ((x) << S_FW_DEBUG_CMD_TYPE)
+#define G_FW_DEBUG_CMD_TYPE(x) \
+ (((x) >> S_FW_DEBUG_CMD_TYPE) & M_FW_DEBUG_CMD_TYPE)
+
+/******************************************************************************
+ * P C I E F W R E G I S T E R
+ **************************************/
+
+/*
+ * Register definitions for the PCIE_FW register which the firmware uses
+ * to retain status across RESETs. This register should be considered
+ * as a READ-ONLY register for Host Software and only to be used to
+ * track firmware initialization/error state, etc.
+ */
+#define S_PCIE_FW_ERR 31
+#define M_PCIE_FW_ERR 0x1
+#define V_PCIE_FW_ERR(x) ((x) << S_PCIE_FW_ERR)
+#define G_PCIE_FW_ERR(x) (((x) >> S_PCIE_FW_ERR) & M_PCIE_FW_ERR)
+#define F_PCIE_FW_ERR V_PCIE_FW_ERR(1U)
+
+#define S_PCIE_FW_INIT 30
+#define M_PCIE_FW_INIT 0x1
+#define V_PCIE_FW_INIT(x) ((x) << S_PCIE_FW_INIT)
+#define G_PCIE_FW_INIT(x) (((x) >> S_PCIE_FW_INIT) & M_PCIE_FW_INIT)
+#define F_PCIE_FW_INIT V_PCIE_FW_INIT(1U)
+
+#define S_PCIE_FW_HALT 29
+#define M_PCIE_FW_HALT 0x1
+#define V_PCIE_FW_HALT(x) ((x) << S_PCIE_FW_HALT)
+#define G_PCIE_FW_HALT(x) (((x) >> S_PCIE_FW_HALT) & M_PCIE_FW_HALT)
+#define F_PCIE_FW_HALT V_PCIE_FW_HALT(1U)
+
+#define S_PCIE_FW_EVAL 24
+#define M_PCIE_FW_EVAL 0x7
+#define V_PCIE_FW_EVAL(x) ((x) << S_PCIE_FW_EVAL)
+#define G_PCIE_FW_EVAL(x) (((x) >> S_PCIE_FW_EVAL) & M_PCIE_FW_EVAL)
+
+#define S_PCIE_FW_MASTER_VLD 15
+#define M_PCIE_FW_MASTER_VLD 0x1
+#define V_PCIE_FW_MASTER_VLD(x) ((x) << S_PCIE_FW_MASTER_VLD)
+#define G_PCIE_FW_MASTER_VLD(x) \
+ (((x) >> S_PCIE_FW_MASTER_VLD) & M_PCIE_FW_MASTER_VLD)
+#define F_PCIE_FW_MASTER_VLD V_PCIE_FW_MASTER_VLD(1U)
+
+#define S_PCIE_FW_MASTER 12
+#define M_PCIE_FW_MASTER 0x7
+#define V_PCIE_FW_MASTER(x) ((x) << S_PCIE_FW_MASTER)
+#define G_PCIE_FW_MASTER(x) (((x) >> S_PCIE_FW_MASTER) & M_PCIE_FW_MASTER)
+
+/******************************************************************************
+ * B I N A R Y H E A D E R F O R M A T
+ **********************************************/
+
+/*
+ * firmware binary header format
+ */
+struct fw_hdr {
+ __u8 ver;
+ __u8 chip; /* terminator chip family */
+ __be16 len512; /* bin length in units of 512-bytes */
+ __be32 fw_ver; /* firmware version */
+ __be32 tp_microcode_ver; /* tcp processor microcode version */
+ __u8 intfver_nic;
+ __u8 intfver_vnic;
+ __u8 intfver_ofld;
+ __u8 intfver_ri;
+ __u8 intfver_iscsipdu;
+ __u8 intfver_iscsi;
+ __u8 intfver_fcoepdu;
+ __u8 intfver_fcoe;
+ __u32 reserved2;
+ __u32 reserved3;
+ __u32 magic; /* runtime or bootstrap fw */
+ __be32 flags;
+ __be32 reserved6[23];
+};
+
+#define S_FW_HDR_FW_VER_MAJOR 24
+#define M_FW_HDR_FW_VER_MAJOR 0xff
+#define V_FW_HDR_FW_VER_MAJOR(x) \
+ ((x) << S_FW_HDR_FW_VER_MAJOR)
+#define G_FW_HDR_FW_VER_MAJOR(x) \
+ (((x) >> S_FW_HDR_FW_VER_MAJOR) & M_FW_HDR_FW_VER_MAJOR)
+
+#define S_FW_HDR_FW_VER_MINOR 16
+#define M_FW_HDR_FW_VER_MINOR 0xff
+#define V_FW_HDR_FW_VER_MINOR(x) \
+ ((x) << S_FW_HDR_FW_VER_MINOR)
+#define G_FW_HDR_FW_VER_MINOR(x) \
+ (((x) >> S_FW_HDR_FW_VER_MINOR) & M_FW_HDR_FW_VER_MINOR)
+
+#define S_FW_HDR_FW_VER_MICRO 8
+#define M_FW_HDR_FW_VER_MICRO 0xff
+#define V_FW_HDR_FW_VER_MICRO(x) \
+ ((x) << S_FW_HDR_FW_VER_MICRO)
+#define G_FW_HDR_FW_VER_MICRO(x) \
+ (((x) >> S_FW_HDR_FW_VER_MICRO) & M_FW_HDR_FW_VER_MICRO)
+
+#define S_FW_HDR_FW_VER_BUILD 0
+#define M_FW_HDR_FW_VER_BUILD 0xff
+#define V_FW_HDR_FW_VER_BUILD(x) \
+ ((x) << S_FW_HDR_FW_VER_BUILD)
+#define G_FW_HDR_FW_VER_BUILD(x) \
+ (((x) >> S_FW_HDR_FW_VER_BUILD) & M_FW_HDR_FW_VER_BUILD)
+
+#endif /* _T4FW_INTERFACE_H_ */
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_interrupts.c b/src/dpdk22/drivers/net/cxgbe/cxgbe.h
index cb7d4f13..0201c990 100755..100644
--- a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_interrupts.c
+++ b/src/dpdk22/drivers/net/cxgbe/cxgbe.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2014-2015 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -14,7 +14,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Intel Corporation nor the names of its
+ * * Neither the name of Chelsio Communications nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -31,41 +31,33 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <rte_common.h>
-#include <rte_interrupts.h>
-#include "eal_private.h"
+#ifndef _CXGBE_H_
+#define _CXGBE_H_
-int
-rte_intr_callback_register(struct rte_intr_handle *intr_handle __rte_unused,
- rte_intr_callback_fn cb __rte_unused,
- void *cb_arg __rte_unused)
-{
- return -ENOTSUP;
-}
+#include "common.h"
+#include "t4_regs.h"
-int
-rte_intr_callback_unregister(struct rte_intr_handle *intr_handle __rte_unused,
- rte_intr_callback_fn cb_fn __rte_unused,
- void *cb_arg __rte_unused)
-{
- return -ENOTSUP;
-}
+#define CXGBE_MIN_RING_DESC_SIZE 128 /* Min TX/RX descriptor ring size */
+#define CXGBE_MAX_RING_DESC_SIZE 4096 /* Max TX/RX descriptor ring size */
-int
-rte_intr_enable(struct rte_intr_handle *intr_handle __rte_unused)
-{
- return -ENOTSUP;
-}
+#define CXGBE_DEFAULT_TX_DESC_SIZE 1024 /* Default TX ring size */
+#define CXGBE_DEFAULT_RX_DESC_SIZE 1024 /* Default RX ring size */
-int
-rte_intr_disable(struct rte_intr_handle *intr_handle __rte_unused)
-{
- return -ENOTSUP;
-}
+#define CXGBE_MIN_RX_BUFSIZE ETHER_MIN_MTU /* min buf size */
+#define CXGBE_MAX_RX_PKTLEN (9000 + ETHER_HDR_LEN + ETHER_CRC_LEN) /* max pkt */
-int
-rte_eal_intr_init(void)
-{
- return 0;
-}
+int cxgbe_probe(struct adapter *adapter);
+int cxgbe_up(struct adapter *adap);
+int cxgbe_down(struct port_info *pi);
+void cxgbe_close(struct adapter *adapter);
+void cxgbe_stats_get(struct port_info *pi, struct port_stats *stats);
+void cxgbe_stats_reset(struct port_info *pi);
+int link_start(struct port_info *pi);
+void init_rspq(struct adapter *adap, struct sge_rspq *q, unsigned int us,
+ unsigned int cnt, unsigned int size, unsigned int iqe_size);
+int setup_sge_fwevtq(struct adapter *adapter);
+void cfg_queues(struct rte_eth_dev *eth_dev);
+int cfg_queue_count(struct rte_eth_dev *eth_dev);
+int setup_rss(struct port_info *pi);
+#endif /* _CXGBE_H_ */
diff --git a/src/dpdk22/drivers/net/cxgbe/cxgbe_compat.h b/src/dpdk22/drivers/net/cxgbe/cxgbe_compat.h
new file mode 100644
index 00000000..e68f8f59
--- /dev/null
+++ b/src/dpdk22/drivers/net/cxgbe/cxgbe_compat.h
@@ -0,0 +1,266 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014-2015 Chelsio Communications.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Chelsio Communications nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CXGBE_COMPAT_H_
+#define _CXGBE_COMPAT_H_
+
+#include <string.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdarg.h>
+
+#include <rte_common.h>
+#include <rte_memcpy.h>
+#include <rte_byteorder.h>
+#include <rte_cycles.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#define dev_printf(level, fmt, args...) \
+ RTE_LOG(level, PMD, "rte_cxgbe_pmd: " fmt, ## args)
+
+#define dev_err(x, args...) dev_printf(ERR, args)
+#define dev_info(x, args...) dev_printf(INFO, args)
+#define dev_warn(x, args...) dev_printf(WARNING, args)
+
+#ifdef RTE_LIBRTE_CXGBE_DEBUG
+#define dev_debug(x, args...) dev_printf(DEBUG, args)
+#else
+#define dev_debug(x, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_CXGBE_DEBUG_REG
+#define CXGBE_DEBUG_REG(x, args...) dev_printf(DEBUG, "REG:" args)
+#else
+#define CXGBE_DEBUG_REG(x, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_CXGBE_DEBUG_MBOX
+#define CXGBE_DEBUG_MBOX(x, args...) dev_printf(DEBUG, "MBOX:" args)
+#else
+#define CXGBE_DEBUG_MBOX(x, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_CXGBE_DEBUG_TX
+#define CXGBE_DEBUG_TX(x, args...) dev_printf(DEBUG, "TX:" args)
+#else
+#define CXGBE_DEBUG_TX(x, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_CXGBE_DEBUG_RX
+#define CXGBE_DEBUG_RX(x, args...) dev_printf(DEBUG, "RX:" args)
+#else
+#define CXGBE_DEBUG_RX(x, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_CXGBE_DEBUG
+#define CXGBE_FUNC_TRACE() \
+ RTE_LOG(DEBUG, PMD, "CXGBE trace: %s\n", __func__)
+#else
+#define CXGBE_FUNC_TRACE() do { } while (0)
+#endif
+
+#define pr_err(y, args...) dev_err(0, y, ##args)
+#define pr_warn(y, args...) dev_warn(0, y, ##args)
+#define pr_info(y, args...) dev_info(0, y, ##args)
+#define BUG() pr_err("BUG at %s:%d", __func__, __LINE__)
+
+#define ASSERT(x) do {\
+ if (!(x)) \
+ rte_panic("CXGBE: x"); \
+} while (0)
+#define BUG_ON(x) ASSERT(!(x))
+
+#ifndef WARN_ON
+#define WARN_ON(x) do { \
+ int ret = !!(x); \
+ if (unlikely(ret)) \
+ pr_warn("WARN_ON: \"" #x "\" at %s:%d\n", __func__, __LINE__); \
+} while (0)
+#endif
+
+#define __iomem
+
+#ifndef BIT
+#define BIT(n) (1 << (n))
+#endif
+
+#define L1_CACHE_SHIFT 6
+#define L1_CACHE_BYTES BIT(L1_CACHE_SHIFT)
+
+#define PAGE_SHIFT 12
+#define CXGBE_ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1))
+#define PTR_ALIGN(p, a) ((typeof(p))CXGBE_ALIGN((unsigned long)(p), (a)))
+
+#define VLAN_HLEN 4
+
+#define rmb() rte_rmb() /* dpdk rte provided rmb */
+#define wmb() rte_wmb() /* dpdk rte provided wmb */
+
+typedef uint8_t u8;
+typedef int8_t s8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef uint64_t u64;
+typedef int bool;
+typedef uint64_t dma_addr_t;
+
+#ifndef __le16
+#define __le16 uint16_t
+#endif
+#ifndef __le32
+#define __le32 uint32_t
+#endif
+#ifndef __le64
+#define __le64 uint64_t
+#endif
+#ifndef __be16
+#define __be16 uint16_t
+#endif
+#ifndef __be32
+#define __be32 uint32_t
+#endif
+#ifndef __be64
+#define __be64 uint64_t
+#endif
+#ifndef __u8
+#define __u8 uint8_t
+#endif
+#ifndef __u16
+#define __u16 uint16_t
+#endif
+#ifndef __u32
+#define __u32 uint32_t
+#endif
+#ifndef __u64
+#define __u64 uint64_t
+#endif
+
+#define FALSE 0
+#define TRUE 1
+#define false 0
+#define true 1
+
+#define min(a, b) RTE_MIN(a, b)
+#define max(a, b) RTE_MAX(a, b)
+
+/*
+ * round up val _p to a power of 2 size _s
+ */
+#define cxgbe_roundup(_p, _s) (((unsigned long)(_p) + (_s - 1)) & ~(_s - 1))
+
+#undef container_of
+#define container_of(ptr, type, member) ({ \
+ typeof(((type *)0)->member)(*__mptr) = (ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); })
+
+#define ARRAY_SIZE(arr) RTE_DIM(arr)
+
+#define cpu_to_be16(o) rte_cpu_to_be_16(o)
+#define cpu_to_be32(o) rte_cpu_to_be_32(o)
+#define cpu_to_be64(o) rte_cpu_to_be_64(o)
+#define cpu_to_le32(o) rte_cpu_to_le_32(o)
+#define be16_to_cpu(o) rte_be_to_cpu_16(o)
+#define be32_to_cpu(o) rte_be_to_cpu_32(o)
+#define be64_to_cpu(o) rte_be_to_cpu_64(o)
+#define le32_to_cpu(o) rte_le_to_cpu_32(o)
+
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define DELAY(x) rte_delay_us(x)
+#define udelay(x) DELAY(x)
+#define msleep(x) DELAY(1000 * (x))
+#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
+
+static inline uint8_t hweight32(uint32_t word32)
+{
+ uint32_t res = word32 - ((word32 >> 1) & 0x55555555);
+
+ res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
+ res = (res + (res >> 4)) & 0x0F0F0F0F;
+ res = res + (res >> 8);
+ return (res + (res >> 16)) & 0x000000FF;
+
+} /* weight32 */
+
+/**
+ * cxgbe_fls - find last (most-significant) bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as ffs.
+ * Note cxgbe_fls(0) = 0, cxgbe_fls(1) = 1, cxgbe_fls(0x80000000) = 32.
+ */
+static inline int cxgbe_fls(int x)
+{
+ return x ? sizeof(x) * 8 - __builtin_clz(x) : 0;
+}
+
+static inline unsigned long ilog2(unsigned long n)
+{
+ unsigned int e = 0;
+
+ while (n) {
+ if (n & ~((1 << 8) - 1)) {
+ e += 8;
+ n >>= 8;
+ continue;
+ }
+
+ if (n & ~((1 << 4) - 1)) {
+ e += 4;
+ n >>= 4;
+ }
+
+ for (;;) {
+ n >>= 1;
+ if (n == 0)
+ break;
+ e++;
+ }
+ }
+
+ return e;
+}
+
+static inline void writel(unsigned int val, volatile void __iomem *addr)
+{
+ *(volatile unsigned int *)addr = val;
+}
+
+static inline void writeq(u64 val, volatile void __iomem *addr)
+{
+ writel(val, addr);
+ writel(val >> 32, (void *)((uintptr_t)addr + 4));
+}
+
+#endif /* _CXGBE_COMPAT_H_ */
diff --git a/src/dpdk22/drivers/net/cxgbe/cxgbe_ethdev.c b/src/dpdk22/drivers/net/cxgbe/cxgbe_ethdev.c
new file mode 100644
index 00000000..97ef152e
--- /dev/null
+++ b/src/dpdk22/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -0,0 +1,879 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014-2015 Chelsio Communications.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Chelsio Communications nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <netinet/in.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_atomic.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_dev.h>
+
+#include "cxgbe.h"
+
+/*
+ * Macros needed to support the PCI Device ID Table ...
+ */
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
+ static struct rte_pci_id cxgb4_pci_tbl[] = {
+#define CH_PCI_DEVICE_ID_FUNCTION 0x4
+
+#define PCI_VENDOR_ID_CHELSIO 0x1425
+
+#define CH_PCI_ID_TABLE_ENTRY(devid) \
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) }
+
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
+ { .vendor_id = 0, } \
+ }
+
+/*
+ *... and the PCI ID Table itself ...
+ */
+#include "t4_pci_id_tbl.h"
+
+static uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue;
+ uint16_t pkts_sent, pkts_remain;
+ uint16_t total_sent = 0;
+ int ret = 0;
+
+ CXGBE_DEBUG_TX(adapter, "%s: txq = %p; tx_pkts = %p; nb_pkts = %d\n",
+ __func__, txq, tx_pkts, nb_pkts);
+
+ t4_os_lock(&txq->txq_lock);
+ /* free up desc from already completed tx */
+ reclaim_completed_tx(&txq->q);
+ while (total_sent < nb_pkts) {
+ pkts_remain = nb_pkts - total_sent;
+
+ for (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) {
+ ret = t4_eth_xmit(txq, tx_pkts[total_sent + pkts_sent]);
+ if (ret < 0)
+ break;
+ }
+ if (!pkts_sent)
+ break;
+ total_sent += pkts_sent;
+ /* reclaim as much as possible */
+ reclaim_completed_tx(&txq->q);
+ }
+
+ t4_os_unlock(&txq->txq_lock);
+ return total_sent;
+}
+
+static uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue;
+ unsigned int work_done;
+
+ CXGBE_DEBUG_RX(adapter, "%s: rxq->rspq.cntxt_id = %u; nb_pkts = %d\n",
+ __func__, rxq->rspq.cntxt_id, nb_pkts);
+
+ if (cxgbe_poll(&rxq->rspq, rx_pkts, (unsigned int)nb_pkts, &work_done))
+ dev_err(adapter, "error in cxgbe poll\n");
+
+ CXGBE_DEBUG_RX(adapter, "%s: work_done = %u\n", __func__, work_done);
+ return work_done;
+}
+
+static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_info *device_info)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ int max_queues = adapter->sge.max_ethqsets / adapter->params.nports;
+
+ static const struct rte_eth_desc_lim cxgbe_desc_lim = {
+ .nb_max = CXGBE_MAX_RING_DESC_SIZE,
+ .nb_min = CXGBE_MIN_RING_DESC_SIZE,
+ .nb_align = 1,
+ };
+
+ device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
+ device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
+ device_info->max_rx_queues = max_queues;
+ device_info->max_tx_queues = max_queues;
+ device_info->max_mac_addrs = 1;
+ /* XXX: For now we support one MAC/port */
+ device_info->max_vfs = adapter->params.arch.vfcount;
+ device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */
+
+ device_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+
+ device_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
+
+ device_info->reta_size = pi->rss_size;
+
+ device_info->rx_desc_lim = cxgbe_desc_lim;
+ device_info->tx_desc_lim = cxgbe_desc_lim;
+}
+
+static void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+
+ t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
+ 1, -1, 1, -1, false);
+}
+
+static void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+
+ t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
+ 0, -1, 1, -1, false);
+}
+
+static void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+
+ /* TODO: address filters ?? */
+
+ t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
+ -1, 1, 1, -1, false);
+}
+
+static void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+
+ /* TODO: address filters ?? */
+
+ t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
+ -1, 0, 1, -1, false);
+}
+
+static int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
+ __rte_unused int wait_to_complete)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ struct sge *s = &adapter->sge;
+ struct rte_eth_link *old_link = &eth_dev->data->dev_link;
+ unsigned int work_done, budget = 4;
+
+ cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
+ if (old_link->link_status == pi->link_cfg.link_ok)
+ return -1; /* link not changed */
+
+ eth_dev->data->dev_link.link_status = pi->link_cfg.link_ok;
+ eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ eth_dev->data->dev_link.link_speed = pi->link_cfg.speed;
+
+ /* link has changed */
+ return 0;
+}
+
+static int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ struct rte_eth_dev_info dev_info;
+ int err;
+ uint16_t new_mtu = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+ cxgbe_dev_info_get(eth_dev, &dev_info);
+
+ /* Must accommodate at least ETHER_MIN_MTU */
+ if ((new_mtu < ETHER_MIN_MTU) || (new_mtu > dev_info.max_rx_pktlen))
+ return -EINVAL;
+
+ /* set to jumbo mode if needed */
+ if (new_mtu > ETHER_MAX_LEN)
+ eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ else
+ eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
+
+ err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
+ -1, -1, true);
+ if (!err)
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_mtu;
+
+ return err;
+}
+
+static int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
+ uint16_t tx_queue_id);
+static int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
+ uint16_t tx_queue_id);
+static void cxgbe_dev_tx_queue_release(void *q);
+static void cxgbe_dev_rx_queue_release(void *q);
+
+/*
+ * Stop device.
+ */
+static void cxgbe_dev_close(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ int i, dev_down = 0;
+
+ CXGBE_FUNC_TRACE();
+
+ if (!(adapter->flags & FULL_INIT_DONE))
+ return;
+
+ cxgbe_down(pi);
+
+ /*
+ * We clear queues only if both tx and rx path of the port
+ * have been disabled
+ */
+ t4_sge_eth_clear_queues(pi);
+
+ /* See if all ports are down */
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ /*
+ * Skip first port of the adapter since it will be closed
+ * by DPDK
+ */
+ if (i == 0)
+ continue;
+ dev_down += (pi->eth_dev->data->dev_started == 0) ? 1 : 0;
+ }
+
+ /* If rest of the ports are stopped, then free up resources */
+ if (dev_down == (adapter->params.nports - 1))
+ cxgbe_close(adapter);
+}
+
+/* Start the device.
+ * It returns 0 on success.
+ */
+static int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ int err = 0, i;
+
+ CXGBE_FUNC_TRACE();
+
+ /*
+ * If we don't have a connection to the firmware there's nothing we
+ * can do.
+ */
+ if (!(adapter->flags & FW_OK)) {
+ err = -ENXIO;
+ goto out;
+ }
+
+ if (!(adapter->flags & FULL_INIT_DONE)) {
+ err = cxgbe_up(adapter);
+ if (err < 0)
+ goto out;
+ }
+
+ err = setup_rss(pi);
+ if (err)
+ goto out;
+
+ for (i = 0; i < pi->n_tx_qsets; i++) {
+ err = cxgbe_dev_tx_queue_start(eth_dev, i);
+ if (err)
+ goto out;
+ }
+
+ for (i = 0; i < pi->n_rx_qsets; i++) {
+ err = cxgbe_dev_rx_queue_start(eth_dev, i);
+ if (err)
+ goto out;
+ }
+
+ err = link_start(pi);
+ if (err)
+ goto out;
+
+out:
+ return err;
+}
+
+/*
+ * Stop device: disable rx and tx functions to allow for reconfiguring.
+ */
+static void cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+
+ CXGBE_FUNC_TRACE();
+
+ if (!(adapter->flags & FULL_INIT_DONE))
+ return;
+
+ cxgbe_down(pi);
+
+ /*
+ * We clear queues only if both tx and rx path of the port
+ * have been disabled
+ */
+ t4_sge_eth_clear_queues(pi);
+}
+
+static int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ int err;
+
+ CXGBE_FUNC_TRACE();
+
+ if (!(adapter->flags & FW_QUEUE_BOUND)) {
+ err = setup_sge_fwevtq(adapter);
+ if (err)
+ return err;
+ adapter->flags |= FW_QUEUE_BOUND;
+ }
+
+ err = cfg_queue_count(eth_dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
+ uint16_t tx_queue_id)
+{
+ int ret;
+ struct sge_eth_txq *txq = (struct sge_eth_txq *)
+ (eth_dev->data->tx_queues[tx_queue_id]);
+
+ dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
+
+ ret = t4_sge_eth_txq_start(txq);
+ if (ret == 0)
+ eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return ret;
+}
+
+static int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
+ uint16_t tx_queue_id)
+{
+ int ret;
+ struct sge_eth_txq *txq = (struct sge_eth_txq *)
+ (eth_dev->data->tx_queues[tx_queue_id]);
+
+ dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
+
+ ret = t4_sge_eth_txq_stop(txq);
+ if (ret == 0)
+ eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return ret;
+}
+
+static int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx, uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ struct sge *s = &adapter->sge;
+ struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset + queue_idx];
+ int err = 0;
+ unsigned int temp_nb_desc;
+
+ RTE_SET_USED(tx_conf);
+
+ dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
+ __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
+ socket_id, pi->first_qset);
+
+ /* Free up the existing queue */
+ if (eth_dev->data->tx_queues[queue_idx]) {
+ cxgbe_dev_tx_queue_release(eth_dev->data->tx_queues[queue_idx]);
+ eth_dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ eth_dev->data->tx_queues[queue_idx] = (void *)txq;
+
+ /* Sanity Checking
+ *
+ * nb_desc should be > 1023 and <= CXGBE_MAX_RING_DESC_SIZE
+ */
+ temp_nb_desc = nb_desc;
+ if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
+ dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
+ __func__, CXGBE_MIN_RING_DESC_SIZE,
+ CXGBE_DEFAULT_TX_DESC_SIZE);
+ temp_nb_desc = CXGBE_DEFAULT_TX_DESC_SIZE;
+ } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
+ dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
+ __func__, CXGBE_MIN_RING_DESC_SIZE,
+ CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_TX_DESC_SIZE);
+ return -(EINVAL);
+ }
+
+ txq->q.size = temp_nb_desc;
+
+ err = t4_sge_alloc_eth_txq(adapter, txq, eth_dev, queue_idx,
+ s->fw_evtq.cntxt_id, socket_id);
+
+ dev_debug(adapter, "%s: txq->q.cntxt_id= %d err = %d\n",
+ __func__, txq->q.cntxt_id, err);
+
+ return err;
+}
+
+static void cxgbe_dev_tx_queue_release(void *q)
+{
+ struct sge_eth_txq *txq = (struct sge_eth_txq *)q;
+
+ if (txq) {
+ struct port_info *pi = (struct port_info *)
+ (txq->eth_dev->data->dev_private);
+ struct adapter *adap = pi->adapter;
+
+ dev_debug(adapter, "%s: pi->port_id = %d; tx_queue_id = %d\n",
+ __func__, pi->port_id, txq->q.cntxt_id);
+
+ t4_sge_eth_txq_release(adap, txq);
+ }
+}
+
+static int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
+ uint16_t rx_queue_id)
+{
+ int ret;
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adap = pi->adapter;
+ struct sge_rspq *q;
+
+ dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
+ __func__, pi->port_id, rx_queue_id);
+
+ q = eth_dev->data->rx_queues[rx_queue_id];
+
+ ret = t4_sge_eth_rxq_start(adap, q);
+ if (ret == 0)
+ eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return ret;
+}
+
+static int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
+ uint16_t rx_queue_id)
+{
+ int ret;
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adap = pi->adapter;
+ struct sge_rspq *q;
+
+ dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
+ __func__, pi->port_id, rx_queue_id);
+
+ q = eth_dev->data->rx_queues[rx_queue_id];
+ ret = t4_sge_eth_rxq_stop(adap, q);
+ if (ret == 0)
+ eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return ret;
+}
+
+static int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx, uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ struct sge *s = &adapter->sge;
+ struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset + queue_idx];
+ int err = 0;
+ int msi_idx = 0;
+ unsigned int temp_nb_desc;
+ struct rte_eth_dev_info dev_info;
+ unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+
+ RTE_SET_USED(rx_conf);
+
+ dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
+ __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
+ socket_id, mp);
+
+ cxgbe_dev_info_get(eth_dev, &dev_info);
+
+ /* Must accommodate at least ETHER_MIN_MTU */
+ if ((pkt_len < dev_info.min_rx_bufsize) ||
+ (pkt_len > dev_info.max_rx_pktlen)) {
+ dev_err(adap, "%s: max pkt len must be > %d and <= %d\n",
+ __func__, dev_info.min_rx_bufsize,
+ dev_info.max_rx_pktlen);
+ return -EINVAL;
+ }
+
+ /* Free up the existing queue */
+ if (eth_dev->data->rx_queues[queue_idx]) {
+ cxgbe_dev_rx_queue_release(eth_dev->data->rx_queues[queue_idx]);
+ eth_dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ eth_dev->data->rx_queues[queue_idx] = (void *)rxq;
+
+ /* Sanity Checking
+ *
+ * nb_desc should be > 0 and <= CXGBE_MAX_RING_DESC_SIZE
+ */
+ temp_nb_desc = nb_desc;
+ if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
+ dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
+ __func__, CXGBE_MIN_RING_DESC_SIZE,
+ CXGBE_DEFAULT_RX_DESC_SIZE);
+ temp_nb_desc = CXGBE_DEFAULT_RX_DESC_SIZE;
+ } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
+ dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
+ __func__, CXGBE_MIN_RING_DESC_SIZE,
+ CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_RX_DESC_SIZE);
+ return -(EINVAL);
+ }
+
+ rxq->rspq.size = temp_nb_desc;
+ if ((&rxq->fl) != NULL)
+ rxq->fl.size = temp_nb_desc;
+
+ /* Set to jumbo mode if necessary */
+ if (pkt_len > ETHER_MAX_LEN)
+ eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ else
+ eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
+
+ err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
+ &rxq->fl, t4_ethrx_handler,
+ t4_get_mps_bg_map(adapter, pi->tx_chan), mp,
+ queue_idx, socket_id);
+
+ dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u\n",
+ __func__, err, pi->port_id, rxq->rspq.cntxt_id);
+ return err;
+}
+
+static void cxgbe_dev_rx_queue_release(void *q)
+{
+ struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q;
+ struct sge_rspq *rq = &rxq->rspq;
+
+ if (rq) {
+ struct port_info *pi = (struct port_info *)
+ (rq->eth_dev->data->dev_private);
+ struct adapter *adap = pi->adapter;
+
+ dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
+ __func__, pi->port_id, rxq->rspq.cntxt_id);
+
+ t4_sge_eth_rxq_release(adap, rxq);
+ }
+}
+
+/*
+ * Get port statistics.
+ */
+static void cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_stats *eth_stats)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ struct sge *s = &adapter->sge;
+ struct port_stats ps;
+ unsigned int i;
+
+ cxgbe_stats_get(pi, &ps);
+
+ /* RX Stats */
+ eth_stats->ipackets = ps.rx_frames;
+ eth_stats->ibytes = ps.rx_octets;
+ eth_stats->imcasts = ps.rx_mcast_frames;
+ eth_stats->imissed = ps.rx_ovflow0 + ps.rx_ovflow1 +
+ ps.rx_ovflow2 + ps.rx_ovflow3 +
+ ps.rx_trunc0 + ps.rx_trunc1 +
+ ps.rx_trunc2 + ps.rx_trunc3;
+ eth_stats->ierrors = ps.rx_symbol_err + ps.rx_fcs_err +
+ ps.rx_jabber + ps.rx_too_long + ps.rx_runt +
+ ps.rx_len_err + eth_stats->imissed;
+
+ /* TX Stats */
+ eth_stats->opackets = ps.tx_frames;
+ eth_stats->obytes = ps.tx_octets;
+ eth_stats->oerrors = ps.tx_error_frames;
+
+ for (i = 0; i < pi->n_rx_qsets; i++) {
+ struct sge_eth_rxq *rxq =
+ &s->ethrxq[pi->first_qset + i];
+
+ eth_stats->q_ipackets[i] = rxq->stats.pkts;
+ eth_stats->q_ibytes[i] = rxq->stats.rx_bytes;
+ }
+
+ for (i = 0; i < pi->n_tx_qsets; i++) {
+ struct sge_eth_txq *txq =
+ &s->ethtxq[pi->first_qset + i];
+
+ eth_stats->q_opackets[i] = txq->stats.pkts;
+ eth_stats->q_obytes[i] = txq->stats.tx_bytes;
+ eth_stats->q_errors[i] = txq->stats.mapping_err;
+ }
+}
+
+/*
+ * Reset port statistics.
+ */
+static void cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ struct sge *s = &adapter->sge;
+ unsigned int i;
+
+ cxgbe_stats_reset(pi);
+ for (i = 0; i < pi->n_rx_qsets; i++) {
+ struct sge_eth_rxq *rxq =
+ &s->ethrxq[pi->first_qset + i];
+
+ rxq->stats.pkts = 0;
+ rxq->stats.rx_bytes = 0;
+ }
+ for (i = 0; i < pi->n_tx_qsets; i++) {
+ struct sge_eth_txq *txq =
+ &s->ethtxq[pi->first_qset + i];
+
+ txq->stats.pkts = 0;
+ txq->stats.tx_bytes = 0;
+ txq->stats.mapping_err = 0;
+ }
+}
+
+static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct link_config *lc = &pi->link_cfg;
+ int rx_pause, tx_pause;
+
+ fc_conf->autoneg = lc->fc & PAUSE_AUTONEG;
+ rx_pause = lc->fc & PAUSE_RX;
+ tx_pause = lc->fc & PAUSE_TX;
+
+ if (rx_pause && tx_pause)
+ fc_conf->mode = RTE_FC_FULL;
+ else if (rx_pause)
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ else if (tx_pause)
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ else
+ fc_conf->mode = RTE_FC_NONE;
+ return 0;
+}
+
+static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ struct link_config *lc = &pi->link_cfg;
+
+ if (lc->supported & FW_PORT_CAP_ANEG) {
+ if (fc_conf->autoneg)
+ lc->requested_fc |= PAUSE_AUTONEG;
+ else
+ lc->requested_fc &= ~PAUSE_AUTONEG;
+ }
+
+ if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
+ (fc_conf->mode & RTE_FC_RX_PAUSE))
+ lc->requested_fc |= PAUSE_RX;
+ else
+ lc->requested_fc &= ~PAUSE_RX;
+
+ if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
+ (fc_conf->mode & RTE_FC_TX_PAUSE))
+ lc->requested_fc |= PAUSE_TX;
+ else
+ lc->requested_fc &= ~PAUSE_TX;
+
+ return t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan,
+ &pi->link_cfg);
+}
+
+static struct eth_dev_ops cxgbe_eth_dev_ops = {
+ .dev_start = cxgbe_dev_start,
+ .dev_stop = cxgbe_dev_stop,
+ .dev_close = cxgbe_dev_close,
+ .promiscuous_enable = cxgbe_dev_promiscuous_enable,
+ .promiscuous_disable = cxgbe_dev_promiscuous_disable,
+ .allmulticast_enable = cxgbe_dev_allmulticast_enable,
+ .allmulticast_disable = cxgbe_dev_allmulticast_disable,
+ .dev_configure = cxgbe_dev_configure,
+ .dev_infos_get = cxgbe_dev_info_get,
+ .link_update = cxgbe_dev_link_update,
+ .mtu_set = cxgbe_dev_mtu_set,
+ .tx_queue_setup = cxgbe_dev_tx_queue_setup,
+ .tx_queue_start = cxgbe_dev_tx_queue_start,
+ .tx_queue_stop = cxgbe_dev_tx_queue_stop,
+ .tx_queue_release = cxgbe_dev_tx_queue_release,
+ .rx_queue_setup = cxgbe_dev_rx_queue_setup,
+ .rx_queue_start = cxgbe_dev_rx_queue_start,
+ .rx_queue_stop = cxgbe_dev_rx_queue_stop,
+ .rx_queue_release = cxgbe_dev_rx_queue_release,
+ .stats_get = cxgbe_dev_stats_get,
+ .stats_reset = cxgbe_dev_stats_reset,
+ .flow_ctrl_get = cxgbe_flow_ctrl_get,
+ .flow_ctrl_set = cxgbe_flow_ctrl_set,
+};
+
+/*
+ * Initialize driver
+ * It returns 0 on success.
+ */
+static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev;
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = NULL;
+ char name[RTE_ETH_NAME_MAX_LEN];
+ int err = 0;
+
+ CXGBE_FUNC_TRACE();
+
+ eth_dev->dev_ops = &cxgbe_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
+ eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts;
+
+ /* for secondary processes, we don't initialise any further as primary
+ * has already done this work.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ pci_dev = eth_dev->pci_dev;
+
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
+ adapter = rte_zmalloc(name, sizeof(*adapter), 0);
+ if (!adapter)
+ return -1;
+
+ adapter->use_unpacked_mode = 1;
+ adapter->regs = (void *)pci_dev->mem_resource[0].addr;
+ if (!adapter->regs) {
+ dev_err(adapter, "%s: cannot map device registers\n", __func__);
+ err = -ENOMEM;
+ goto out_free_adapter;
+ }
+ adapter->pdev = pci_dev;
+ adapter->eth_dev = eth_dev;
+ pi->adapter = adapter;
+
+ err = cxgbe_probe(adapter);
+ if (err)
+ dev_err(adapter, "%s: cxgbe probe failed with err %d\n",
+ __func__, err);
+
+out_free_adapter:
+ return err;
+}
+
+static struct eth_driver rte_cxgbe_pmd = {
+ .pci_drv = {
+ .name = "rte_cxgbe_pmd",
+ .id_table = cxgb4_pci_tbl,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ },
+ .eth_dev_init = eth_cxgbe_dev_init,
+ .dev_private_size = sizeof(struct port_info),
+};
+
+/*
+ * Driver initialization routine.
+ * Invoked once at EAL init time.
+ * Register itself as the [Poll Mode] Driver of PCI CXGBE devices.
+ */
+static int rte_cxgbe_pmd_init(const char *name __rte_unused,
+ const char *params __rte_unused)
+{
+ CXGBE_FUNC_TRACE();
+
+ rte_eth_driver_register(&rte_cxgbe_pmd);
+ return 0;
+}
+
+static struct rte_driver rte_cxgbe_driver = {
+ .name = "cxgbe_driver",
+ .type = PMD_PDEV,
+ .init = rte_cxgbe_pmd_init,
+};
+
+PMD_REGISTER_DRIVER(rte_cxgbe_driver);
diff --git a/src/dpdk22/drivers/net/cxgbe/cxgbe_main.c b/src/dpdk22/drivers/net/cxgbe/cxgbe_main.c
new file mode 100644
index 00000000..aff23d0a
--- /dev/null
+++ b/src/dpdk22/drivers/net/cxgbe/cxgbe_main.c
@@ -0,0 +1,1216 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014-2015 Chelsio Communications.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Chelsio Communications nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <netinet/in.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_atomic.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_dev.h>
+
+#include "common.h"
+#include "t4_regs.h"
+#include "t4_msg.h"
+#include "cxgbe.h"
+
+/*
+ * Response queue handler for the FW event queue.
+ */
+static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
+ __rte_unused const struct pkt_gl *gl)
+{
+ u8 opcode = ((const struct rss_header *)rsp)->opcode;
+
+ rsp++; /* skip RSS header */
+
+ /*
+ * FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
+ */
+ if (unlikely(opcode == CPL_FW4_MSG &&
+ ((const struct cpl_fw4_msg *)rsp)->type ==
+ FW_TYPE_RSSCPL)) {
+ rsp++;
+ opcode = ((const struct rss_header *)rsp)->opcode;
+ rsp++;
+ if (opcode != CPL_SGE_EGR_UPDATE) {
+ dev_err(q->adapter, "unexpected FW4/CPL %#x on FW event queue\n",
+ opcode);
+ goto out;
+ }
+ }
+
+ if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
+ /* do nothing */
+ } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
+ const struct cpl_fw6_msg *msg = (const void *)rsp;
+
+ t4_handle_fw_rpl(q->adapter, msg->data);
+ } else {
+ dev_err(adapter, "unexpected CPL %#x on FW event queue\n",
+ opcode);
+ }
+out:
+ return 0;
+}
+
+int setup_sge_fwevtq(struct adapter *adapter)
+{
+ struct sge *s = &adapter->sge;
+ int err = 0;
+ int msi_idx = 0;
+
+ err = t4_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->eth_dev,
+ msi_idx, NULL, fwevtq_handler, -1, NULL, 0,
+ rte_socket_id());
+ return err;
+}
+
+static int closest_timer(const struct sge *s, int time)
+{
+ unsigned int i, match = 0;
+ int delta, min_delta = INT_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
+ delta = time - s->timer_val[i];
+ if (delta < 0)
+ delta = -delta;
+ if (delta < min_delta) {
+ min_delta = delta;
+ match = i;
+ }
+ }
+ return match;
+}
+
+static int closest_thres(const struct sge *s, int thres)
+{
+ unsigned int i, match = 0;
+ int delta, min_delta = INT_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
+ delta = thres - s->counter_val[i];
+ if (delta < 0)
+ delta = -delta;
+ if (delta < min_delta) {
+ min_delta = delta;
+ match = i;
+ }
+ }
+ return match;
+}
+
+/**
+ * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
+ * @q: the Rx queue
+ * @us: the hold-off time in us, or 0 to disable timer
+ * @cnt: the hold-off packet count, or 0 to disable counter
+ *
+ * Sets an Rx queue's interrupt hold-off time and packet count. At least
+ * one of the two needs to be enabled for the queue to generate interrupts.
+ */
+int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
+ unsigned int cnt)
+{
+ struct adapter *adap = q->adapter;
+ unsigned int timer_val;
+
+ if (cnt) {
+ int err;
+ u32 v, new_idx;
+
+ new_idx = closest_thres(&adap->sge, cnt);
+ if (q->desc && q->pktcnt_idx != new_idx) {
+ /* the queue has already been created, update it */
+ v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
+ V_FW_PARAMS_PARAM_X(
+ FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
+ V_FW_PARAMS_PARAM_YZ(q->cntxt_id);
+ err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
+ &v, &new_idx);
+ if (err)
+ return err;
+ }
+ q->pktcnt_idx = new_idx;
+ }
+
+ timer_val = (us == 0) ? X_TIMERREG_RESTART_COUNTER :
+ closest_timer(&adap->sge, us);
+
+ if ((us | cnt) == 0)
+ q->intr_params = V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX);
+ else
+ q->intr_params = V_QINTR_TIMER_IDX(timer_val) |
+ V_QINTR_CNT_EN(cnt > 0);
+ return 0;
+}
+
+static inline bool is_x_1g_port(const struct link_config *lc)
+{
+ return ((lc->supported & FW_PORT_CAP_SPEED_1G) != 0);
+}
+
+static inline bool is_x_10g_port(const struct link_config *lc)
+{
+ return ((lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
+ (lc->supported & FW_PORT_CAP_SPEED_40G) != 0 ||
+ (lc->supported & FW_PORT_CAP_SPEED_100G) != 0);
+}
+
+inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
+ unsigned int us, unsigned int cnt,
+ unsigned int size, unsigned int iqe_size)
+{
+ q->adapter = adap;
+ cxgb4_set_rspq_intr_params(q, us, cnt);
+ q->iqe_len = iqe_size;
+ q->size = size;
+}
+
+int cfg_queue_count(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adap = pi->adapter;
+ struct sge *s = &adap->sge;
+ unsigned int max_queues = s->max_ethqsets / adap->params.nports;
+
+ if ((eth_dev->data->nb_rx_queues < 1) ||
+ (eth_dev->data->nb_tx_queues < 1))
+ return -EINVAL;
+
+ if ((eth_dev->data->nb_rx_queues > max_queues) ||
+ (eth_dev->data->nb_tx_queues > max_queues))
+ return -EINVAL;
+
+ if (eth_dev->data->nb_rx_queues > pi->rss_size)
+ return -EINVAL;
+
+ /* We must configure RSS, since config has changed*/
+ pi->flags &= ~PORT_RSS_DONE;
+
+ pi->n_rx_qsets = eth_dev->data->nb_rx_queues;
+ pi->n_tx_qsets = eth_dev->data->nb_tx_queues;
+
+ return 0;
+}
+
+void cfg_queues(struct rte_eth_dev *eth_dev)
+{
+ struct rte_config *config = rte_eal_get_configuration();
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adap = pi->adapter;
+ struct sge *s = &adap->sge;
+ unsigned int i, nb_ports = 0, qidx = 0;
+ unsigned int q_per_port = 0;
+
+ if (!(adap->flags & CFG_QUEUES)) {
+ for_each_port(adap, i) {
+ struct port_info *tpi = adap2pinfo(adap, i);
+
+ nb_ports += (is_x_10g_port(&tpi->link_cfg)) ||
+ is_x_1g_port(&tpi->link_cfg) ? 1 : 0;
+ }
+
+ /*
+ * We default up to # of cores queues per 1G/10G port.
+ */
+ if (nb_ports)
+ q_per_port = (MAX_ETH_QSETS -
+ (adap->params.nports - nb_ports)) /
+ nb_ports;
+
+ if (q_per_port > config->lcore_count)
+ q_per_port = config->lcore_count;
+
+ for_each_port(adap, i) {
+ struct port_info *pi = adap2pinfo(adap, i);
+
+ pi->first_qset = qidx;
+
+ /* Initially n_rx_qsets == n_tx_qsets */
+ pi->n_rx_qsets = (is_x_10g_port(&pi->link_cfg) ||
+ is_x_1g_port(&pi->link_cfg)) ?
+ q_per_port : 1;
+ pi->n_tx_qsets = pi->n_rx_qsets;
+
+ if (pi->n_rx_qsets > pi->rss_size)
+ pi->n_rx_qsets = pi->rss_size;
+
+ qidx += pi->n_rx_qsets;
+ }
+
+ s->max_ethqsets = qidx;
+
+ for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
+ struct sge_eth_rxq *r = &s->ethrxq[i];
+
+ init_rspq(adap, &r->rspq, 0, 0, 1024, 64);
+ r->usembufs = 1;
+ r->fl.size = (r->usembufs ? 1024 : 72);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
+ s->ethtxq[i].q.size = 1024;
+
+ init_rspq(adap, &adap->sge.fw_evtq, 0, 0, 1024, 64);
+ adap->flags |= CFG_QUEUES;
+ }
+}
+
+void cxgbe_stats_get(struct port_info *pi, struct port_stats *stats)
+{
+ t4_get_port_stats_offset(pi->adapter, pi->tx_chan, stats,
+ &pi->stats_base);
+}
+
+void cxgbe_stats_reset(struct port_info *pi)
+{
+ t4_clr_port_stats(pi->adapter, pi->tx_chan);
+}
+
+static void setup_memwin(struct adapter *adap)
+{
+ u32 mem_win0_base;
+
+ /* For T5, only relative offset inside the PCIe BAR is passed */
+ mem_win0_base = MEMWIN0_BASE;
+
+ /*
+ * Set up memory window for accessing adapter memory ranges. (Read
+ * back MA register to ensure that changes propagate before we attempt
+ * to use the new values.)
+ */
+ t4_write_reg(adap,
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
+ MEMWIN_NIC),
+ mem_win0_base | V_BIR(0) |
+ V_WINDOW(ilog2(MEMWIN0_APERTURE) - X_WINDOW_SHIFT));
+ t4_read_reg(adap,
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
+ MEMWIN_NIC));
+}
+
+static int init_rss(struct adapter *adap)
+{
+ unsigned int i;
+ int err;
+
+ err = t4_init_rss_mode(adap, adap->mbox);
+ if (err)
+ return err;
+
+ for_each_port(adap, i) {
+ struct port_info *pi = adap2pinfo(adap, i);
+
+ pi->rss = rte_zmalloc(NULL, pi->rss_size, 0);
+ if (!pi->rss)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void print_port_info(struct adapter *adap)
+{
+ int i;
+ char buf[80];
+ struct rte_pci_addr *loc = &adap->pdev->addr;
+
+ for_each_port(adap, i) {
+ const struct port_info *pi = &adap->port[i];
+ char *bufp = buf;
+
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
+ bufp += sprintf(bufp, "100/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
+ bufp += sprintf(bufp, "1000/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
+ bufp += sprintf(bufp, "10G/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
+ bufp += sprintf(bufp, "40G/");
+ if (bufp != buf)
+ --bufp;
+ sprintf(bufp, "BASE-%s",
+ t4_get_port_type_description(
+ (enum fw_port_type)pi->port_type));
+
+ dev_info(adap,
+ " " PCI_PRI_FMT " Chelsio rev %d %s %s\n",
+ loc->domain, loc->bus, loc->devid, loc->function,
+ CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
+ (adap->flags & USING_MSIX) ? " MSI-X" :
+ (adap->flags & USING_MSI) ? " MSI" : "");
+ }
+}
+
+/*
+ * Tweak configuration based on system architecture, etc. Most of these have
+ * defaults assigned to them by Firmware Configuration Files (if we're using
+ * them) but need to be explicitly set if we're using hard-coded
+ * initialization. So these are essentially common tweaks/settings for
+ * Configuration Files and hard-coded initialization ...
+ */
+static int adap_init0_tweaks(struct adapter *adapter)
+{
+ u8 rx_dma_offset;
+
+ /*
+ * Fix up various Host-Dependent Parameters like Page Size, Cache
+ * Line Size, etc. The firmware default is for a 4KB Page Size and
+ * 64B Cache Line Size ...
+ */
+ t4_fixup_host_params_compat(adapter, CXGBE_PAGE_SIZE, L1_CACHE_BYTES,
+ T5_LAST_REV);
+
+ /*
+ * Keep the chip default offset to deliver Ingress packets into our
+ * DMA buffers to zero
+ */
+ rx_dma_offset = 0;
+ t4_set_reg_field(adapter, A_SGE_CONTROL, V_PKTSHIFT(M_PKTSHIFT),
+ V_PKTSHIFT(rx_dma_offset));
+
+ t4_set_reg_field(adapter, A_SGE_FLM_CFG,
+ V_CREDITCNT(M_CREDITCNT) | M_CREDITCNTPACKING,
+ V_CREDITCNT(3) | V_CREDITCNTPACKING(1));
+
+ t4_set_reg_field(adapter, A_SGE_CONTROL2, V_IDMAARBROUNDROBIN(1U),
+ V_IDMAARBROUNDROBIN(1U));
+
+ /*
+ * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
+ * adds the pseudo header itself.
+ */
+ t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG,
+ F_CSUM_HAS_PSEUDO_HDR, 0);
+
+ return 0;
+}
+
+/*
+ * Attempt to initialize the adapter via a Firmware Configuration File.
+ */
+static int adap_init0_config(struct adapter *adapter, int reset)
+{
+ struct fw_caps_config_cmd caps_cmd;
+ unsigned long mtype = 0, maddr = 0;
+ u32 finiver, finicsum, cfcsum;
+ int ret;
+ int config_issued = 0;
+ int cfg_addr;
+ char config_name[20];
+
+ /*
+ * Reset device if necessary.
+ */
+ if (reset) {
+ ret = t4_fw_reset(adapter, adapter->mbox,
+ F_PIORSTMODE | F_PIORST);
+ if (ret < 0) {
+ dev_warn(adapter, "Firmware reset failed, error %d\n",
+ -ret);
+ goto bye;
+ }
+ }
+
+ cfg_addr = t4_flash_cfg_addr(adapter);
+ if (cfg_addr < 0) {
+ ret = cfg_addr;
+ dev_warn(adapter, "Finding address for firmware config file in flash failed, error %d\n",
+ -ret);
+ goto bye;
+ }
+
+ strcpy(config_name, "On Flash");
+ mtype = FW_MEMTYPE_CF_FLASH;
+ maddr = cfg_addr;
+
+ /*
+ * Issue a Capability Configuration command to the firmware to get it
+ * to parse the Configuration File. We don't use t4_fw_config_file()
+ * because we want the ability to modify various features after we've
+ * processed the configuration file ...
+ */
+ memset(&caps_cmd, 0, sizeof(caps_cmd));
+ caps_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ);
+ caps_cmd.cfvalid_to_len16 =
+ cpu_to_be32(F_FW_CAPS_CONFIG_CMD_CFVALID |
+ V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
+ V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
+ FW_LEN16(caps_cmd));
+ ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
+ &caps_cmd);
+ /*
+ * If the CAPS_CONFIG failed with an ENOENT (for a Firmware
+ * Configuration File in FLASH), our last gasp effort is to use the
+ * Firmware Configuration File which is embedded in the firmware. A
+ * very few early versions of the firmware didn't have one embedded
+ * but we can ignore those.
+ */
+ if (ret == -ENOENT) {
+ dev_info(adapter, "%s: Going for embedded config in firmware..\n",
+ __func__);
+
+ memset(&caps_cmd, 0, sizeof(caps_cmd));
+ caps_cmd.op_to_write =
+ cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ);
+ caps_cmd.cfvalid_to_len16 = cpu_to_be32(FW_LEN16(caps_cmd));
+ ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
+ sizeof(caps_cmd), &caps_cmd);
+ strcpy(config_name, "Firmware Default");
+ }
+
+ config_issued = 1;
+ if (ret < 0)
+ goto bye;
+
+ finiver = be32_to_cpu(caps_cmd.finiver);
+ finicsum = be32_to_cpu(caps_cmd.finicsum);
+ cfcsum = be32_to_cpu(caps_cmd.cfcsum);
+ if (finicsum != cfcsum)
+ dev_warn(adapter, "Configuration File checksum mismatch: [fini] csum=%#x, computed csum=%#x\n",
+ finicsum, cfcsum);
+
+ /*
+ * If we're a pure NIC driver then disable all offloading facilities.
+ * This will allow the firmware to optimize aspects of the hardware
+ * configuration which will result in improved performance.
+ */
+ caps_cmd.niccaps &= cpu_to_be16(~(FW_CAPS_CONFIG_NIC_HASHFILTER |
+ FW_CAPS_CONFIG_NIC_ETHOFLD));
+ caps_cmd.toecaps = 0;
+ caps_cmd.iscsicaps = 0;
+ caps_cmd.rdmacaps = 0;
+ caps_cmd.fcoecaps = 0;
+
+ /*
+ * And now tell the firmware to use the configuration we just loaded.
+ */
+ caps_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
+ caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
+ ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
+ NULL);
+ if (ret < 0) {
+ dev_warn(adapter, "Unable to finalize Firmware Capabilities %d\n",
+ -ret);
+ goto bye;
+ }
+
+ /*
+ * Tweak configuration based on system architecture, etc.
+ */
+ ret = adap_init0_tweaks(adapter);
+ if (ret < 0) {
+ dev_warn(adapter, "Unable to do init0-tweaks %d\n", -ret);
+ goto bye;
+ }
+
+ /*
+ * And finally tell the firmware to initialize itself using the
+ * parameters from the Configuration File.
+ */
+ ret = t4_fw_initialize(adapter, adapter->mbox);
+ if (ret < 0) {
+ dev_warn(adapter, "Initializing Firmware failed, error %d\n",
+ -ret);
+ goto bye;
+ }
+
+ /*
+ * Return successfully and note that we're operating with parameters
+ * not supplied by the driver, rather than from hard-wired
+ * initialization constants burried in the driver.
+ */
+ dev_info(adapter,
+ "Successfully configured using Firmware Configuration File \"%s\", version %#x, computed checksum %#x\n",
+ config_name, finiver, cfcsum);
+
+ return 0;
+
+ /*
+ * Something bad happened. Return the error ... (If the "error"
+ * is that there's no Configuration File on the adapter we don't
+ * want to issue a warning since this is fairly common.)
+ */
+bye:
+ if (config_issued && ret != -ENOENT)
+ dev_warn(adapter, "\"%s\" configuration file error %d\n",
+ config_name, -ret);
+
+ dev_debug(adapter, "%s: returning ret = %d ..\n", __func__, ret);
+ return ret;
+}
+
+static int adap_init0(struct adapter *adap)
+{
+ int ret = 0;
+ u32 v, port_vec;
+ enum dev_state state;
+ u32 params[7], val[7];
+ int reset = 1;
+ int mbox = adap->mbox;
+
+ /*
+ * Contact FW, advertising Master capability.
+ */
+ ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
+ if (ret < 0) {
+ dev_err(adap, "%s: could not connect to FW, error %d\n",
+ __func__, -ret);
+ goto bye;
+ }
+
+ CXGBE_DEBUG_MBOX(adap, "%s: adap->mbox = %d; ret = %d\n", __func__,
+ adap->mbox, ret);
+
+ if (ret == mbox)
+ adap->flags |= MASTER_PF;
+
+ if (state == DEV_STATE_INIT) {
+ /*
+ * Force halt and reset FW because a previous instance may have
+ * exited abnormally without properly shutting down
+ */
+ ret = t4_fw_halt(adap, adap->mbox, reset);
+ if (ret < 0) {
+ dev_err(adap, "Failed to halt. Exit.\n");
+ goto bye;
+ }
+
+ ret = t4_fw_restart(adap, adap->mbox, reset);
+ if (ret < 0) {
+ dev_err(adap, "Failed to restart. Exit.\n");
+ goto bye;
+ }
+ state = (enum dev_state)((unsigned)state & ~DEV_STATE_INIT);
+ }
+
+ t4_get_fw_version(adap, &adap->params.fw_vers);
+ t4_get_tp_version(adap, &adap->params.tp_vers);
+
+ dev_info(adap, "fw: %u.%u.%u.%u, TP: %u.%u.%u.%u\n",
+ G_FW_HDR_FW_VER_MAJOR(adap->params.fw_vers),
+ G_FW_HDR_FW_VER_MINOR(adap->params.fw_vers),
+ G_FW_HDR_FW_VER_MICRO(adap->params.fw_vers),
+ G_FW_HDR_FW_VER_BUILD(adap->params.fw_vers),
+ G_FW_HDR_FW_VER_MAJOR(adap->params.tp_vers),
+ G_FW_HDR_FW_VER_MINOR(adap->params.tp_vers),
+ G_FW_HDR_FW_VER_MICRO(adap->params.tp_vers),
+ G_FW_HDR_FW_VER_BUILD(adap->params.tp_vers));
+
+ ret = t4_get_core_clock(adap, &adap->params.vpd);
+ if (ret < 0) {
+ dev_err(adap, "%s: could not get core clock, error %d\n",
+ __func__, -ret);
+ goto bye;
+ }
+
+ /*
+ * Find out what ports are available to us. Note that we need to do
+ * this before calling adap_init0_no_config() since it needs nports
+ * and portvec ...
+ */
+ v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
+ if (ret < 0) {
+ dev_err(adap, "%s: failure in t4_queury_params; error = %d\n",
+ __func__, ret);
+ goto bye;
+ }
+
+ adap->params.nports = hweight32(port_vec);
+ adap->params.portvec = port_vec;
+
+ dev_debug(adap, "%s: adap->params.nports = %u\n", __func__,
+ adap->params.nports);
+
+ /*
+ * If the firmware is initialized already (and we're not forcing a
+ * master initialization), note that we're living with existing
+ * adapter parameters. Otherwise, it's time to try initializing the
+ * adapter ...
+ */
+ if (state == DEV_STATE_INIT) {
+ dev_info(adap, "Coming up as %s: Adapter already initialized\n",
+ adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
+ } else {
+ dev_info(adap, "Coming up as MASTER: Initializing adapter\n");
+
+ ret = adap_init0_config(adap, reset);
+ if (ret == -ENOENT) {
+ dev_err(adap,
+ "No Configuration File present on adapter. Using hard-wired configuration parameters.\n");
+ goto bye;
+ }
+ }
+ if (ret < 0) {
+ dev_err(adap, "could not initialize adapter, error %d\n", -ret);
+ goto bye;
+ }
+
+ /*
+ * Give the SGE code a chance to pull in anything that it needs ...
+ * Note that this must be called after we retrieve our VPD parameters
+ * in order to know how to convert core ticks to seconds, etc.
+ */
+ ret = t4_sge_init(adap);
+ if (ret < 0) {
+ dev_err(adap, "t4_sge_init failed with error %d\n",
+ -ret);
+ goto bye;
+ }
+
+ /*
+ * Grab some of our basic fundamental operating parameters.
+ */
+#define FW_PARAM_DEV(param) \
+ (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
+
+#define FW_PARAM_PFVF(param) \
+ (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
+ V_FW_PARAMS_PARAM_Y(0) | \
+ V_FW_PARAMS_PARAM_Z(0))
+
+ /* If we're running on newer firmware, let it know that we're
+ * prepared to deal with encapsulated CPL messages. Older
+ * firmware won't understand this and we'll just get
+ * unencapsulated messages ...
+ */
+ params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
+ val[0] = 1;
+ (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
+
+ /*
+ * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
+ * capability. Earlier versions of the firmware didn't have the
+ * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
+ * permission to use ULPTX MEMWRITE DSGL.
+ */
+ if (is_t4(adap->params.chip)) {
+ adap->params.ulptx_memwrite_dsgl = false;
+ } else {
+ params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
+ 1, params, val);
+ adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
+ }
+
+ /*
+ * The MTU/MSS Table is initialized by now, so load their values. If
+ * we're initializing the adapter, then we'll make any modifications
+ * we want to the MTU/MSS Table and also initialize the congestion
+ * parameters.
+ */
+ t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
+ if (state != DEV_STATE_INIT) {
+ int i;
+
+ /*
+ * The default MTU Table contains values 1492 and 1500.
+ * However, for TCP, it's better to have two values which are
+ * a multiple of 8 +/- 4 bytes apart near this popular MTU.
+ * This allows us to have a TCP Data Payload which is a
+ * multiple of 8 regardless of what combination of TCP Options
+ * are in use (always a multiple of 4 bytes) which is
+ * important for performance reasons. For instance, if no
+ * options are in use, then we have a 20-byte IP header and a
+ * 20-byte TCP header. In this case, a 1500-byte MSS would
+ * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
+ * which is not a multiple of 8. So using an MSS of 1488 in
+ * this case results in a TCP Data Payload of 1448 bytes which
+ * is a multiple of 8. On the other hand, if 12-byte TCP Time
+ * Stamps have been negotiated, then an MTU of 1500 bytes
+ * results in a TCP Data Payload of 1448 bytes which, as
+ * above, is a multiple of 8 bytes ...
+ */
+ for (i = 0; i < NMTUS; i++)
+ if (adap->params.mtus[i] == 1492) {
+ adap->params.mtus[i] = 1488;
+ break;
+ }
+
+ t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
+ adap->params.b_wnd);
+ }
+ t4_init_sge_params(adap);
+ t4_init_tp_params(adap);
+
+ adap->params.drv_memwin = MEMWIN_NIC;
+ adap->flags |= FW_OK;
+ dev_debug(adap, "%s: returning zero..\n", __func__);
+ return 0;
+
+ /*
+ * Something bad happened. If a command timed out or failed with EIO
+ * FW does not operate within its spec or something catastrophic
+ * happened to HW/FW, stop issuing commands.
+ */
+bye:
+ if (ret != -ETIMEDOUT && ret != -EIO)
+ t4_fw_bye(adap, adap->mbox);
+ return ret;
+}
+
+/**
+ * t4_os_portmod_changed - handle port module changes
+ * @adap: the adapter associated with the module change
+ * @port_id: the port index whose module status has changed
+ *
+ * This is the OS-dependent handler for port module changes. It is
+ * invoked when a port module is removed or inserted for any OS-specific
+ * processing.
+ */
+void t4_os_portmod_changed(const struct adapter *adap, int port_id)
+{
+ static const char * const mod_str[] = {
+ NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
+ };
+
+ const struct port_info *pi = &adap->port[port_id];
+
+ if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
+ dev_info(adap, "Port%d: port module unplugged\n", pi->port_id);
+ else if (pi->mod_type < ARRAY_SIZE(mod_str))
+ dev_info(adap, "Port%d: %s port module inserted\n", pi->port_id,
+ mod_str[pi->mod_type]);
+ else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
+ dev_info(adap, "Port%d: unsupported optical port module inserted\n",
+ pi->port_id);
+ else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
+ dev_info(adap, "Port%d: unknown port module inserted, forcing TWINAX\n",
+ pi->port_id);
+ else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
+ dev_info(adap, "Port%d: transceiver module error\n",
+ pi->port_id);
+ else
+ dev_info(adap, "Port%d: unknown module type %d inserted\n",
+ pi->port_id, pi->mod_type);
+}
+
+/**
+ * link_start - enable a port
+ * @dev: the port to enable
+ *
+ * Performs the MAC and PHY actions needed to enable a port.
+ */
+int link_start(struct port_info *pi)
+{
+ struct adapter *adapter = pi->adapter;
+ int ret;
+ unsigned int mtu = pi->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+
+ /*
+ * We do not set address filters and promiscuity here, the stack does
+ * that step explicitly.
+ */
+ ret = t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, -1, -1,
+ -1, 1, true);
+ if (ret == 0) {
+ ret = t4_change_mac(adapter, adapter->mbox, pi->viid,
+ pi->xact_addr_filt,
+ (u8 *)&pi->eth_dev->data->mac_addrs[0],
+ true, true);
+ if (ret >= 0) {
+ pi->xact_addr_filt = ret;
+ ret = 0;
+ }
+ }
+ if (ret == 0)
+ ret = t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan,
+ &pi->link_cfg);
+ if (ret == 0) {
+ /*
+ * Enabling a Virtual Interface can result in an interrupt
+ * during the processing of the VI Enable command and, in some
+ * paths, result in an attempt to issue another command in the
+ * interrupt context. Thus, we disable interrupts during the
+ * course of the VI Enable command ...
+ */
+ ret = t4_enable_vi_params(adapter, adapter->mbox, pi->viid,
+ true, true, false);
+ }
+ return ret;
+}
+
+/**
+ * cxgb4_write_rss - write the RSS table for a given port
+ * @pi: the port
+ * @queues: array of queue indices for RSS
+ *
+ * Sets up the portion of the HW RSS table for the port's VI to distribute
+ * packets to the Rx queues in @queues.
+ */
+int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
+{
+ u16 *rss;
+ int i, err;
+ struct adapter *adapter = pi->adapter;
+ const struct sge_eth_rxq *rxq;
+
+ /* Should never be called before setting up sge eth rx queues */
+ BUG_ON(!(adapter->flags & FULL_INIT_DONE));
+
+ rxq = &adapter->sge.ethrxq[pi->first_qset];
+ rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0);
+ if (!rss)
+ return -ENOMEM;
+
+ /* map the queue indices to queue ids */
+ for (i = 0; i < pi->rss_size; i++, queues++)
+ rss[i] = rxq[*queues].rspq.abs_id;
+
+ err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
+ pi->rss_size, rss, pi->rss_size);
+ /*
+ * If Tunnel All Lookup isn't specified in the global RSS
+ * Configuration, then we need to specify a default Ingress
+ * Queue for any ingress packets which aren't hashed. We'll
+ * use our first ingress queue ...
+ */
+ if (!err)
+ err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
+ F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
+ F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
+ F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
+ F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN |
+ F_FW_RSS_VI_CONFIG_CMD_UDPEN,
+ rss[0]);
+ rte_free(rss);
+ return err;
+}
+
+/**
+ * setup_rss - configure RSS
+ * @adapter: the adapter
+ *
+ * Sets up RSS to distribute packets to multiple receive queues. We
+ * configure the RSS CPU lookup table to distribute to the number of HW
+ * receive queues, and the response queue lookup table to narrow that
+ * down to the response queues actually configured for each port.
+ * We always configure the RSS mapping for all ports since the mapping
+ * table has plenty of entries.
+ */
+int setup_rss(struct port_info *pi)
+{
+ int j, err;
+ struct adapter *adapter = pi->adapter;
+
+ dev_debug(adapter, "%s: pi->rss_size = %u; pi->n_rx_qsets = %u\n",
+ __func__, pi->rss_size, pi->n_rx_qsets);
+
+ if (!pi->flags & PORT_RSS_DONE) {
+ if (adapter->flags & FULL_INIT_DONE) {
+ /* Fill default values with equal distribution */
+ for (j = 0; j < pi->rss_size; j++)
+ pi->rss[j] = j % pi->n_rx_qsets;
+
+ err = cxgb4_write_rss(pi, pi->rss);
+ if (err)
+ return err;
+ pi->flags |= PORT_RSS_DONE;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Enable NAPI scheduling and interrupt generation for all Rx queues.
+ */
+static void enable_rx(struct adapter *adap)
+{
+ struct sge *s = &adap->sge;
+ struct sge_rspq *q = &s->fw_evtq;
+ int i, j;
+
+ /* 0-increment GTS to start the timer and enable interrupts */
+ t4_write_reg(adap, MYPF_REG(A_SGE_PF_GTS),
+ V_SEINTARM(q->intr_params) |
+ V_INGRESSQID(q->cntxt_id));
+
+ for_each_port(adap, i) {
+ const struct port_info *pi = &adap->port[i];
+ struct rte_eth_dev *eth_dev = pi->eth_dev;
+
+ for (j = 0; j < eth_dev->data->nb_rx_queues; j++) {
+ q = eth_dev->data->rx_queues[j];
+
+ /*
+ * 0-increment GTS to start the timer and enable
+ * interrupts
+ */
+ t4_write_reg(adap, MYPF_REG(A_SGE_PF_GTS),
+ V_SEINTARM(q->intr_params) |
+ V_INGRESSQID(q->cntxt_id));
+ }
+ }
+}
+
+/**
+ * cxgb_up - enable the adapter
+ * @adap: adapter being enabled
+ *
+ * Called when the first port is enabled, this function performs the
+ * actions necessary to make an adapter operational, such as completing
+ * the initialization of HW modules, and enabling interrupts.
+ */
+int cxgbe_up(struct adapter *adap)
+{
+ enable_rx(adap);
+ t4_sge_tx_monitor_start(adap);
+ t4_intr_enable(adap);
+ adap->flags |= FULL_INIT_DONE;
+
+ /* TODO: deadman watchdog ?? */
+ return 0;
+}
+
+/*
+ * Close the port
+ */
+int cxgbe_down(struct port_info *pi)
+{
+ struct adapter *adapter = pi->adapter;
+ int err = 0;
+
+ err = t4_enable_vi(adapter, adapter->mbox, pi->viid, false, false);
+ if (err) {
+ dev_err(adapter, "%s: disable_vi failed: %d\n", __func__, err);
+ return err;
+ }
+
+ t4_reset_link_config(adapter, pi->port_id);
+ return 0;
+}
+
+/*
+ * Release resources when all the ports have been stopped.
+ */
+void cxgbe_close(struct adapter *adapter)
+{
+ struct port_info *pi;
+ int i;
+
+ if (adapter->flags & FULL_INIT_DONE) {
+ t4_intr_disable(adapter);
+ t4_sge_tx_monitor_stop(adapter);
+ t4_free_sge_resources(adapter);
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ if (pi->viid != 0)
+ t4_free_vi(adapter, adapter->mbox,
+ adapter->pf, 0, pi->viid);
+ rte_free(pi->eth_dev->data->mac_addrs);
+ }
+ adapter->flags &= ~FULL_INIT_DONE;
+ }
+
+ if (adapter->flags & FW_OK)
+ t4_fw_bye(adapter, adapter->mbox);
+}
+
+int cxgbe_probe(struct adapter *adapter)
+{
+ struct port_info *pi;
+ int func, i;
+ int err = 0;
+
+ func = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
+ adapter->mbox = func;
+ adapter->pf = func;
+
+ t4_os_lock_init(&adapter->mbox_lock);
+ TAILQ_INIT(&adapter->mbox_list);
+
+ err = t4_prep_adapter(adapter);
+ if (err)
+ return err;
+
+ setup_memwin(adapter);
+ err = adap_init0(adapter);
+ if (err) {
+ dev_err(adapter, "%s: Adapter initialization failed, error %d\n",
+ __func__, err);
+ goto out_free;
+ }
+
+ if (!is_t4(adapter->params.chip)) {
+ /*
+ * The userspace doorbell BAR is split evenly into doorbell
+ * regions, each associated with an egress queue. If this
+ * per-queue region is large enough (at least UDBS_SEG_SIZE)
+ * then it can be used to submit a tx work request with an
+ * implied doorbell. Enable write combining on the BAR if
+ * there is room for such work requests.
+ */
+ int s_qpp, qpp, num_seg;
+
+ s_qpp = (S_QUEUESPERPAGEPF0 +
+ (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) *
+ adapter->pf);
+ qpp = 1 << ((t4_read_reg(adapter,
+ A_SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp)
+ & M_QUEUESPERPAGEPF0);
+ num_seg = CXGBE_PAGE_SIZE / UDBS_SEG_SIZE;
+ if (qpp > num_seg)
+ dev_warn(adapter, "Incorrect SGE EGRESS QUEUES_PER_PAGE configuration, continuing in debug mode\n");
+
+ adapter->bar2 = (void *)adapter->pdev->mem_resource[2].addr;
+ if (!adapter->bar2) {
+ dev_err(adapter, "cannot map device bar2 region\n");
+ err = -ENOMEM;
+ goto out_free;
+ }
+ t4_write_reg(adapter, A_SGE_STAT_CFG, V_STATSOURCE_T5(7) |
+ V_STATMODE(0));
+ }
+
+ for_each_port(adapter, i) {
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct rte_eth_dev_data *data = NULL;
+ const unsigned int numa_node = rte_socket_id();
+
+ pi = &adapter->port[i];
+ pi->adapter = adapter;
+ pi->xact_addr_filt = -1;
+ pi->port_id = i;
+
+ snprintf(name, sizeof(name), "cxgbe%d",
+ adapter->eth_dev->data->port_id + i);
+
+ if (i == 0) {
+ /* First port is already allocated by DPDK */
+ pi->eth_dev = adapter->eth_dev;
+ goto allocate_mac;
+ }
+
+ /*
+ * now do all data allocation - for eth_dev structure,
+ * and internal (private) data for the remaining ports
+ */
+
+ /* reserve an ethdev entry */
+ pi->eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_PCI);
+ if (!pi->eth_dev)
+ goto out_free;
+
+ data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
+ if (!data)
+ goto out_free;
+
+ data->port_id = adapter->eth_dev->data->port_id + i;
+
+ pi->eth_dev->data = data;
+
+allocate_mac:
+ pi->eth_dev->pci_dev = adapter->pdev;
+ pi->eth_dev->data->dev_private = pi;
+ pi->eth_dev->driver = adapter->eth_dev->driver;
+ pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops;
+ pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst;
+ pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst;
+ TAILQ_INIT(&pi->eth_dev->link_intr_cbs);
+
+ pi->eth_dev->data->mac_addrs = rte_zmalloc(name,
+ ETHER_ADDR_LEN, 0);
+ if (!pi->eth_dev->data->mac_addrs) {
+ dev_err(adapter, "%s: Mem allocation failed for storing mac addr, aborting\n",
+ __func__);
+ err = -1;
+ goto out_free;
+ }
+ }
+
+ if (adapter->flags & FW_OK) {
+ err = t4_port_init(adapter, adapter->mbox, adapter->pf, 0);
+ if (err) {
+ dev_err(adapter, "%s: t4_port_init failed with err %d\n",
+ __func__, err);
+ goto out_free;
+ }
+ }
+
+ cfg_queues(adapter->eth_dev);
+
+ print_port_info(adapter);
+
+ err = init_rss(adapter);
+ if (err)
+ goto out_free;
+
+ return 0;
+
+out_free:
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ if (pi->viid != 0)
+ t4_free_vi(adapter, adapter->mbox, adapter->pf,
+ 0, pi->viid);
+ /* Skip first port since it'll be de-allocated by DPDK */
+ if (i == 0)
+ continue;
+ if (pi->eth_dev->data)
+ rte_free(pi->eth_dev->data);
+ }
+
+ if (adapter->flags & FW_OK)
+ t4_fw_bye(adapter, adapter->mbox);
+ return -err;
+}
diff --git a/src/dpdk22/drivers/net/cxgbe/sge.c b/src/dpdk22/drivers/net/cxgbe/sge.c
new file mode 100644
index 00000000..3c62d03e
--- /dev/null
+++ b/src/dpdk22/drivers/net/cxgbe/sge.c
@@ -0,0 +1,2258 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014-2015 Chelsio Communications.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Chelsio Communications nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <netinet/in.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_atomic.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_dev.h>
+
+#include "common.h"
+#include "t4_regs.h"
+#include "t4_msg.h"
+#include "cxgbe.h"
+
+static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
+ struct sge_eth_txq *txq);
+
+/*
+ * Max number of Rx buffers we replenish at a time.
+ */
+#define MAX_RX_REFILL 64U
+
+#define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
+
+/*
+ * Max Tx descriptor space we allow for an Ethernet packet to be inlined
+ * into a WR.
+ */
+#define MAX_IMM_TX_PKT_LEN 256
+
+/*
+ * Rx buffer sizes for "usembufs" Free List buffers (one ingress packet
+ * per mbuf buffer). We currently only support two sizes for 1500- and
+ * 9000-byte MTUs. We could easily support more but there doesn't seem to be
+ * much need for that ...
+ */
+#define FL_MTU_SMALL 1500
+#define FL_MTU_LARGE 9000
+
+static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
+ unsigned int mtu)
+{
+ struct sge *s = &adapter->sge;
+
+ return CXGBE_ALIGN(s->pktshift + ETHER_HDR_LEN + VLAN_HLEN + mtu,
+ s->fl_align);
+}
+
+#define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
+#define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
+
+/*
+ * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses
+ * these to specify the buffer size as an index into the SGE Free List Buffer
+ * Size register array. We also use bit 4, when the buffer has been unmapped
+ * for DMA, but this is of course never sent to the hardware and is only used
+ * to prevent double unmappings. All of the above requires that the Free List
+ * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
+ * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
+ * Free List Buffer alignment is 32 bytes, this works out for us ...
+ */
+enum {
+ RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */
+ RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */
+ RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */
+
+ /*
+ * XXX We shouldn't depend on being able to use these indices.
+ * XXX Especially when some other Master PF has initialized the
+ * XXX adapter or we use the Firmware Configuration File. We
+ * XXX should really search through the Host Buffer Size register
+ * XXX array for the appropriately sized buffer indices.
+ */
+ RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */
+ RX_LARGE_PG_BUF = 0x1, /* buffer large page buffer */
+
+ RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */
+ RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */
+};
+
+/**
+ * txq_avail - return the number of available slots in a Tx queue
+ * @q: the Tx queue
+ *
+ * Returns the number of descriptors in a Tx queue available to write new
+ * packets.
+ */
+static inline unsigned int txq_avail(const struct sge_txq *q)
+{
+ return q->size - 1 - q->in_use;
+}
+
+static int map_mbuf(struct rte_mbuf *mbuf, dma_addr_t *addr)
+{
+ struct rte_mbuf *m = mbuf;
+
+ for (; m; m = m->next, addr++) {
+ *addr = m->buf_physaddr + rte_pktmbuf_headroom(m);
+ if (*addr == 0)
+ goto out_err;
+ }
+ return 0;
+
+out_err:
+ return -ENOMEM;
+}
+
+/**
+ * free_tx_desc - reclaims Tx descriptors and their buffers
+ * @q: the Tx queue to reclaim descriptors from
+ * @n: the number of descriptors to reclaim
+ *
+ * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
+ * Tx buffers. Called with the Tx queue lock held.
+ */
+static void free_tx_desc(struct sge_txq *q, unsigned int n)
+{
+ struct tx_sw_desc *d;
+ unsigned int cidx = 0;
+
+ d = &q->sdesc[cidx];
+ while (n--) {
+ if (d->mbuf) { /* an SGL is present */
+ rte_pktmbuf_free(d->mbuf);
+ d->mbuf = NULL;
+ }
+ if (d->coalesce.idx) {
+ int i;
+
+ for (i = 0; i < d->coalesce.idx; i++) {
+ rte_pktmbuf_free(d->coalesce.mbuf[i]);
+ d->coalesce.mbuf[i] = NULL;
+ }
+ d->coalesce.idx = 0;
+ }
+ ++d;
+ if (++cidx == q->size) {
+ cidx = 0;
+ d = q->sdesc;
+ }
+ RTE_MBUF_PREFETCH_TO_FREE(&q->sdesc->mbuf->pool);
+ }
+}
+
+static void reclaim_tx_desc(struct sge_txq *q, unsigned int n)
+{
+ struct tx_sw_desc *d;
+ unsigned int cidx = q->cidx;
+
+ d = &q->sdesc[cidx];
+ while (n--) {
+ if (d->mbuf) { /* an SGL is present */
+ rte_pktmbuf_free(d->mbuf);
+ d->mbuf = NULL;
+ }
+ ++d;
+ if (++cidx == q->size) {
+ cidx = 0;
+ d = q->sdesc;
+ }
+ }
+ q->cidx = cidx;
+}
+
+/**
+ * fl_cap - return the capacity of a free-buffer list
+ * @fl: the FL
+ *
+ * Returns the capacity of a free-buffer list. The capacity is less than
+ * the size because one descriptor needs to be left unpopulated, otherwise
+ * HW will think the FL is empty.
+ */
+static inline unsigned int fl_cap(const struct sge_fl *fl)
+{
+ return fl->size - 8; /* 1 descriptor = 8 buffers */
+}
+
+/**
+ * fl_starving - return whether a Free List is starving.
+ * @adapter: pointer to the adapter
+ * @fl: the Free List
+ *
+ * Tests specified Free List to see whether the number of buffers
+ * available to the hardware has falled below our "starvation"
+ * threshold.
+ */
+static inline bool fl_starving(const struct adapter *adapter,
+ const struct sge_fl *fl)
+{
+ const struct sge *s = &adapter->sge;
+
+ return fl->avail - fl->pend_cred <= s->fl_starve_thres;
+}
+
+static inline unsigned int get_buf_size(struct adapter *adapter,
+ const struct rx_sw_desc *d)
+{
+ unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
+ unsigned int buf_size = 0;
+
+ switch (rx_buf_size_idx) {
+ case RX_SMALL_MTU_BUF:
+ buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
+ break;
+
+ case RX_LARGE_MTU_BUF:
+ buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
+ break;
+
+ default:
+ BUG_ON(1);
+ /* NOT REACHED */
+ }
+
+ return buf_size;
+}
+
+/**
+ * free_rx_bufs - free the Rx buffers on an SGE free list
+ * @q: the SGE free list to free buffers from
+ * @n: how many buffers to free
+ *
+ * Release the next @n buffers on an SGE free-buffer Rx queue. The
+ * buffers must be made inaccessible to HW before calling this function.
+ */
+static void free_rx_bufs(struct sge_fl *q, int n)
+{
+ unsigned int cidx = q->cidx;
+ struct rx_sw_desc *d;
+
+ d = &q->sdesc[cidx];
+ while (n--) {
+ if (d->buf) {
+ rte_pktmbuf_free(d->buf);
+ d->buf = NULL;
+ }
+ ++d;
+ if (++cidx == q->size) {
+ cidx = 0;
+ d = q->sdesc;
+ }
+ q->avail--;
+ }
+ q->cidx = cidx;
+}
+
+/**
+ * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
+ * @q: the SGE free list
+ *
+ * Unmap the current buffer on an SGE free-buffer Rx queue. The
+ * buffer must be made inaccessible to HW before calling this function.
+ *
+ * This is similar to @free_rx_bufs above but does not free the buffer.
+ * Do note that the FL still loses any further access to the buffer.
+ */
+static void unmap_rx_buf(struct sge_fl *q)
+{
+ if (++q->cidx == q->size)
+ q->cidx = 0;
+ q->avail--;
+}
+
+static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
+{
+ if (q->pend_cred >= 64) {
+ u32 val = adap->params.arch.sge_fl_db;
+
+ if (is_t4(adap->params.chip))
+ val |= V_PIDX(q->pend_cred / 8);
+ else
+ val |= V_PIDX_T5(q->pend_cred / 8);
+
+ /*
+ * Make sure all memory writes to the Free List queue are
+ * committed before we tell the hardware about them.
+ */
+ wmb();
+
+ /*
+ * If we don't have access to the new User Doorbell (T5+), use
+ * the old doorbell mechanism; otherwise use the new BAR2
+ * mechanism.
+ */
+ if (unlikely(!q->bar2_addr)) {
+ t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
+ val | V_QID(q->cntxt_id));
+ } else {
+ writel(val | V_QID(q->bar2_qid),
+ (void *)((uintptr_t)q->bar2_addr +
+ SGE_UDB_KDOORBELL));
+
+ /*
+ * This Write memory Barrier will force the write to
+ * the User Doorbell area to be flushed.
+ */
+ wmb();
+ }
+ q->pend_cred &= 7;
+ }
+}
+
+static inline void set_rx_sw_desc(struct rx_sw_desc *sd, void *buf,
+ dma_addr_t mapping)
+{
+ sd->buf = buf;
+ sd->dma_addr = mapping; /* includes size low bits */
+}
+
+/**
+ * refill_fl_usembufs - refill an SGE Rx buffer ring with mbufs
+ * @adap: the adapter
+ * @q: the ring to refill
+ * @n: the number of new buffers to allocate
+ *
+ * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
+ * allocated with the supplied gfp flags. The caller must assure that
+ * @n does not exceed the queue's capacity. If afterwards the queue is
+ * found critically low mark it as starving in the bitmap of starving FLs.
+ *
+ * Returns the number of buffers allocated.
+ */
+static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q,
+ int n)
+{
+ struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, fl);
+ unsigned int cred = q->avail;
+ __be64 *d = &q->desc[q->pidx];
+ struct rx_sw_desc *sd = &q->sdesc[q->pidx];
+ unsigned int buf_size_idx = RX_SMALL_MTU_BUF;
+ struct rte_mbuf *buf_bulk[n];
+ int ret, i;
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.jumbo_frame;
+
+ /* Use jumbo mtu buffers iff mbuf data room size can fit jumbo data. */
+ mbp_priv = rte_mempool_get_priv(rxq->rspq.mb_pool);
+ if (jumbo_en &&
+ ((mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) >= 9000))
+ buf_size_idx = RX_LARGE_MTU_BUF;
+
+ ret = rte_mempool_get_bulk(rxq->rspq.mb_pool, (void *)buf_bulk, n);
+ if (unlikely(ret != 0)) {
+ dev_debug(adap, "%s: failed to allocated fl entries in bulk ..\n",
+ __func__);
+ q->alloc_failed++;
+ rxq->rspq.eth_dev->data->rx_mbuf_alloc_failed++;
+ goto out;
+ }
+
+ for (i = 0; i < n; i++) {
+ struct rte_mbuf *mbuf = buf_bulk[i];
+ dma_addr_t mapping;
+
+ if (!mbuf) {
+ dev_debug(adap, "%s: mbuf alloc failed\n", __func__);
+ q->alloc_failed++;
+ rxq->rspq.eth_dev->data->rx_mbuf_alloc_failed++;
+ goto out;
+ }
+
+ rte_mbuf_refcnt_set(mbuf, 1);
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->next = NULL;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->rspq.port_id;
+
+ mapping = (dma_addr_t)(mbuf->buf_physaddr + mbuf->data_off);
+ mapping |= buf_size_idx;
+ *d++ = cpu_to_be64(mapping);
+ set_rx_sw_desc(sd, mbuf, mapping);
+ sd++;
+
+ q->avail++;
+ if (++q->pidx == q->size) {
+ q->pidx = 0;
+ sd = q->sdesc;
+ d = q->desc;
+ }
+ }
+
+out: cred = q->avail - cred;
+ q->pend_cred += cred;
+ ring_fl_db(adap, q);
+
+ if (unlikely(fl_starving(adap, q))) {
+ /*
+ * Make sure data has been written to free list
+ */
+ wmb();
+ q->low++;
+ }
+
+ return cred;
+}
+
+/**
+ * refill_fl - refill an SGE Rx buffer ring with mbufs
+ * @adap: the adapter
+ * @q: the ring to refill
+ * @n: the number of new buffers to allocate
+ *
+ * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
+ * allocated with the supplied gfp flags. The caller must assure that
+ * @n does not exceed the queue's capacity. Returns the number of buffers
+ * allocated.
+ */
+static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n)
+{
+ return refill_fl_usembufs(adap, q, n);
+}
+
+static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
+{
+ refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail));
+}
+
+/*
+ * Return the number of reclaimable descriptors in a Tx queue.
+ */
+static inline int reclaimable(const struct sge_txq *q)
+{
+ int hw_cidx = ntohs(q->stat->cidx);
+
+ hw_cidx -= q->cidx;
+ if (hw_cidx < 0)
+ return hw_cidx + q->size;
+ return hw_cidx;
+}
+
+/**
+ * reclaim_completed_tx - reclaims completed Tx descriptors
+ * @q: the Tx queue to reclaim completed descriptors from
+ *
+ * Reclaims Tx descriptors that the SGE has indicated it has processed.
+ */
+void reclaim_completed_tx(struct sge_txq *q)
+{
+ unsigned int avail = reclaimable(q);
+
+ do {
+ /* reclaim as much as possible */
+ reclaim_tx_desc(q, avail);
+ q->in_use -= avail;
+ avail = reclaimable(q);
+ } while (avail);
+}
+
+/**
+ * sgl_len - calculates the size of an SGL of the given capacity
+ * @n: the number of SGL entries
+ *
+ * Calculates the number of flits needed for a scatter/gather list that
+ * can hold the given number of entries.
+ */
+static inline unsigned int sgl_len(unsigned int n)
+{
+ /*
+ * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
+ * addresses. The DSGL Work Request starts off with a 32-bit DSGL
+ * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
+ * repeated sequences of { Length[i], Length[i+1], Address[i],
+ * Address[i+1] } (this ensures that all addresses are on 64-bit
+ * boundaries). If N is even, then Length[N+1] should be set to 0 and
+ * Address[N+1] is omitted.
+ *
+ * The following calculation incorporates all of the above. It's
+ * somewhat hard to follow but, briefly: the "+2" accounts for the
+ * first two flits which include the DSGL header, Length0 and
+ * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
+ * flits for every pair of the remaining N) +1 if (n-1) is odd; and
+ * finally the "+((n-1)&1)" adds the one remaining flit needed if
+ * (n-1) is odd ...
+ */
+ n--;
+ return (3 * n) / 2 + (n & 1) + 2;
+}
+
+/**
+ * flits_to_desc - returns the num of Tx descriptors for the given flits
+ * @n: the number of flits
+ *
+ * Returns the number of Tx descriptors needed for the supplied number
+ * of flits.
+ */
+static inline unsigned int flits_to_desc(unsigned int n)
+{
+ return DIV_ROUND_UP(n, 8);
+}
+
+/**
+ * is_eth_imm - can an Ethernet packet be sent as immediate data?
+ * @m: the packet
+ *
+ * Returns whether an Ethernet packet is small enough to fit as
+ * immediate data. Return value corresponds to the headroom required.
+ */
+static inline int is_eth_imm(const struct rte_mbuf *m)
+{
+ unsigned int hdrlen = (m->ol_flags & PKT_TX_TCP_SEG) ?
+ sizeof(struct cpl_tx_pkt_lso_core) : 0;
+
+ hdrlen += sizeof(struct cpl_tx_pkt);
+ if (m->pkt_len <= MAX_IMM_TX_PKT_LEN - hdrlen)
+ return hdrlen;
+
+ return 0;
+}
+
+/**
+ * calc_tx_flits - calculate the number of flits for a packet Tx WR
+ * @m: the packet
+ *
+ * Returns the number of flits needed for a Tx WR for the given Ethernet
+ * packet, including the needed WR and CPL headers.
+ */
+static inline unsigned int calc_tx_flits(const struct rte_mbuf *m)
+{
+ unsigned int flits;
+ int hdrlen;
+
+ /*
+ * If the mbuf is small enough, we can pump it out as a work request
+ * with only immediate data. In that case we just have to have the
+ * TX Packet header plus the mbuf data in the Work Request.
+ */
+
+ hdrlen = is_eth_imm(m);
+ if (hdrlen)
+ return DIV_ROUND_UP(m->pkt_len + hdrlen, sizeof(__be64));
+
+ /*
+ * Otherwise, we're going to have to construct a Scatter gather list
+ * of the mbuf body and fragments. We also include the flits necessary
+ * for the TX Packet Work Request and CPL. We always have a firmware
+ * Write Header (incorporated as part of the cpl_tx_pkt_lso and
+ * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
+ * message or, if we're doing a Large Send Offload, an LSO CPL message
+ * with an embeded TX Packet Write CPL message.
+ */
+ flits = sgl_len(m->nb_segs);
+ if (m->tso_segsz)
+ flits += (sizeof(struct fw_eth_tx_pkt_wr) +
+ sizeof(struct cpl_tx_pkt_lso_core) +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ else
+ flits += (sizeof(struct fw_eth_tx_pkt_wr) +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ return flits;
+}
+
+/**
+ * write_sgl - populate a scatter/gather list for a packet
+ * @mbuf: the packet
+ * @q: the Tx queue we are writing into
+ * @sgl: starting location for writing the SGL
+ * @end: points right after the end of the SGL
+ * @start: start offset into mbuf main-body data to include in the SGL
+ * @addr: address of mapped region
+ *
+ * Generates a scatter/gather list for the buffers that make up a packet.
+ * The caller must provide adequate space for the SGL that will be written.
+ * The SGL includes all of the packet's page fragments and the data in its
+ * main body except for the first @start bytes. @sgl must be 16-byte
+ * aligned and within a Tx descriptor with available space. @end points
+ * write after the end of the SGL but does not account for any potential
+ * wrap around, i.e., @end > @sgl.
+ */
+static void write_sgl(struct rte_mbuf *mbuf, struct sge_txq *q,
+ struct ulptx_sgl *sgl, u64 *end, unsigned int start,
+ const dma_addr_t *addr)
+{
+ unsigned int i, len;
+ struct ulptx_sge_pair *to;
+ struct rte_mbuf *m = mbuf;
+ unsigned int nfrags = m->nb_segs;
+ struct ulptx_sge_pair buf[nfrags / 2];
+
+ len = m->data_len - start;
+ sgl->len0 = htonl(len);
+ sgl->addr0 = rte_cpu_to_be_64(addr[0]);
+
+ sgl->cmd_nsge = htonl(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
+ V_ULPTX_NSGE(nfrags));
+ if (likely(--nfrags == 0))
+ return;
+ /*
+ * Most of the complexity below deals with the possibility we hit the
+ * end of the queue in the middle of writing the SGL. For this case
+ * only we create the SGL in a temporary buffer and then copy it.
+ */
+ to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
+
+ for (i = 0; nfrags >= 2; nfrags -= 2, to++) {
+ m = m->next;
+ to->len[0] = rte_cpu_to_be_32(m->data_len);
+ to->addr[0] = rte_cpu_to_be_64(addr[++i]);
+ m = m->next;
+ to->len[1] = rte_cpu_to_be_32(m->data_len);
+ to->addr[1] = rte_cpu_to_be_64(addr[++i]);
+ }
+ if (nfrags) {
+ m = m->next;
+ to->len[0] = rte_cpu_to_be_32(m->data_len);
+ to->len[1] = rte_cpu_to_be_32(0);
+ to->addr[0] = rte_cpu_to_be_64(addr[i + 1]);
+ }
+ if (unlikely((u8 *)end > (u8 *)q->stat)) {
+ unsigned int part0 = RTE_PTR_DIFF((u8 *)q->stat,
+ (u8 *)sgl->sge);
+ unsigned int part1;
+
+ if (likely(part0))
+ memcpy(sgl->sge, buf, part0);
+ part1 = RTE_PTR_DIFF((u8 *)end, (u8 *)q->stat);
+ rte_memcpy(q->desc, RTE_PTR_ADD((u8 *)buf, part0), part1);
+ end = RTE_PTR_ADD((void *)q->desc, part1);
+ }
+ if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
+ *(u64 *)end = 0;
+}
+
+#define IDXDIFF(head, tail, wrap) \
+ ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head))
+
+#define Q_IDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->size)
+#define R_IDXDIFF(q, idx) IDXDIFF((q)->cidx, (q)->idx, (q)->size)
+
+/**
+ * ring_tx_db - ring a Tx queue's doorbell
+ * @adap: the adapter
+ * @q: the Tx queue
+ * @n: number of new descriptors to give to HW
+ *
+ * Ring the doorbel for a Tx queue.
+ */
+static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q)
+{
+ int n = Q_IDXDIFF(q, dbidx);
+
+ /*
+ * Make sure that all writes to the TX Descriptors are committed
+ * before we tell the hardware about them.
+ */
+ rte_wmb();
+
+ /*
+ * If we don't have access to the new User Doorbell (T5+), use the old
+ * doorbell mechanism; otherwise use the new BAR2 mechanism.
+ */
+ if (unlikely(!q->bar2_addr)) {
+ u32 val = V_PIDX(n);
+
+ /*
+ * For T4 we need to participate in the Doorbell Recovery
+ * mechanism.
+ */
+ if (!q->db_disabled)
+ t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
+ V_QID(q->cntxt_id) | val);
+ else
+ q->db_pidx_inc += n;
+ q->db_pidx = q->pidx;
+ } else {
+ u32 val = V_PIDX_T5(n);
+
+ /*
+ * T4 and later chips share the same PIDX field offset within
+ * the doorbell, but T5 and later shrank the field in order to
+ * gain a bit for Doorbell Priority. The field was absurdly
+ * large in the first place (14 bits) so we just use the T5
+ * and later limits and warn if a Queue ID is too large.
+ */
+ WARN_ON(val & F_DBPRIO);
+
+ writel(val | V_QID(q->bar2_qid),
+ (void *)((uintptr_t)q->bar2_addr + SGE_UDB_KDOORBELL));
+
+ /*
+ * This Write Memory Barrier will force the write to the User
+ * Doorbell area to be flushed. This is needed to prevent
+ * writes on different CPUs for the same queue from hitting
+ * the adapter out of order. This is required when some Work
+ * Requests take the Write Combine Gather Buffer path (user
+ * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
+ * take the traditional path where we simply increment the
+ * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
+ * hardware DMA read the actual Work Request.
+ */
+ rte_wmb();
+ }
+ q->dbidx = q->pidx;
+}
+
+/*
+ * Figure out what HW csum a packet wants and return the appropriate control
+ * bits.
+ */
+static u64 hwcsum(enum chip_type chip, const struct rte_mbuf *m)
+{
+ int csum_type;
+
+ if (m->ol_flags & PKT_TX_IP_CKSUM) {
+ switch (m->ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_TCP_CKSUM:
+ csum_type = TX_CSUM_TCPIP;
+ break;
+ case PKT_TX_UDP_CKSUM:
+ csum_type = TX_CSUM_UDPIP;
+ break;
+ default:
+ goto nocsum;
+ }
+ } else {
+ goto nocsum;
+ }
+
+ if (likely(csum_type >= TX_CSUM_TCPIP)) {
+ int hdr_len = V_TXPKT_IPHDR_LEN(m->l3_len);
+ int eth_hdr_len = m->l2_len;
+
+ if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
+ hdr_len |= V_TXPKT_ETHHDR_LEN(eth_hdr_len);
+ else
+ hdr_len |= V_T6_TXPKT_ETHHDR_LEN(eth_hdr_len);
+ return V_TXPKT_CSUM_TYPE(csum_type) | hdr_len;
+ }
+nocsum:
+ /*
+ * unknown protocol, disable HW csum
+ * and hope a bad packet is detected
+ */
+ return F_TXPKT_L4CSUM_DIS;
+}
+
+static inline void txq_advance(struct sge_txq *q, unsigned int n)
+{
+ q->in_use += n;
+ q->pidx += n;
+ if (q->pidx >= q->size)
+ q->pidx -= q->size;
+}
+
+#define MAX_COALESCE_LEN 64000
+
+static inline int wraps_around(struct sge_txq *q, int ndesc)
+{
+ return (q->pidx + ndesc) > q->size ? 1 : 0;
+}
+
+static void tx_timer_cb(void *data)
+{
+ struct adapter *adap = (struct adapter *)data;
+ struct sge_eth_txq *txq = &adap->sge.ethtxq[0];
+ int i;
+ unsigned int coal_idx;
+
+ /* monitor any pending tx */
+ for (i = 0; i < adap->sge.max_ethqsets; i++, txq++) {
+ if (t4_os_trylock(&txq->txq_lock)) {
+ coal_idx = txq->q.coalesce.idx;
+ if (coal_idx) {
+ if (coal_idx == txq->q.last_coal_idx &&
+ txq->q.pidx == txq->q.last_pidx) {
+ ship_tx_pkt_coalesce_wr(adap, txq);
+ } else {
+ txq->q.last_coal_idx = coal_idx;
+ txq->q.last_pidx = txq->q.pidx;
+ }
+ }
+ t4_os_unlock(&txq->txq_lock);
+ }
+ }
+ rte_eal_alarm_set(50, tx_timer_cb, (void *)adap);
+}
+
+/**
+ * ship_tx_pkt_coalesce_wr - finalizes and ships a coalesce WR
+ * @ adap: adapter structure
+ * @txq: tx queue
+ *
+ * writes the different fields of the pkts WR and sends it.
+ */
+static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
+ struct sge_eth_txq *txq)
+{
+ u32 wr_mid;
+ struct sge_txq *q = &txq->q;
+ struct fw_eth_tx_pkts_wr *wr;
+ unsigned int ndesc;
+
+ /* fill the pkts WR header */
+ wr = (void *)&q->desc[q->pidx];
+ wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));
+
+ wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(q->coalesce.flits, 2));
+ ndesc = flits_to_desc(q->coalesce.flits);
+ wr->equiq_to_len16 = htonl(wr_mid);
+ wr->plen = cpu_to_be16(q->coalesce.len);
+ wr->npkt = q->coalesce.idx;
+ wr->r3 = 0;
+ wr->type = q->coalesce.type;
+
+ /* zero out coalesce structure members */
+ q->coalesce.idx = 0;
+ q->coalesce.flits = 0;
+ q->coalesce.len = 0;
+
+ txq_advance(q, ndesc);
+ txq->stats.coal_wr++;
+ txq->stats.coal_pkts += wr->npkt;
+
+ if (Q_IDXDIFF(q, equeidx) >= q->size / 2) {
+ q->equeidx = q->pidx;
+ wr_mid |= F_FW_WR_EQUEQ;
+ wr->equiq_to_len16 = htonl(wr_mid);
+ }
+ ring_tx_db(adap, q);
+}
+
+/**
+ * should_tx_packet_coalesce - decides wether to coalesce an mbuf or not
+ * @txq: tx queue where the mbuf is sent
+ * @mbuf: mbuf to be sent
+ * @nflits: return value for number of flits needed
+ * @adap: adapter structure
+ *
+ * This function decides if a packet should be coalesced or not.
+ */
+static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq,
+ struct rte_mbuf *mbuf,
+ unsigned int *nflits,
+ struct adapter *adap)
+{
+ struct sge_txq *q = &txq->q;
+ unsigned int flits, ndesc;
+ unsigned char type = 0;
+ int credits, hw_cidx = ntohs(q->stat->cidx);
+ int in_use = q->pidx - hw_cidx + flits_to_desc(q->coalesce.flits);
+
+ /* use coal WR type 1 when no frags are present */
+ type = (mbuf->nb_segs == 1) ? 1 : 0;
+
+ if (in_use < 0)
+ in_use += q->size;
+
+ if (unlikely(type != q->coalesce.type && q->coalesce.idx))
+ ship_tx_pkt_coalesce_wr(adap, txq);
+
+ /* calculate the number of flits required for coalescing this packet
+ * without the 2 flits of the WR header. These are added further down
+ * if we are just starting in new PKTS WR. sgl_len doesn't account for
+ * the possible 16 bytes alignment ULP TX commands so we do it here.
+ */
+ flits = (sgl_len(mbuf->nb_segs) + 1) & ~1U;
+ if (type == 0)
+ flits += (sizeof(struct ulp_txpkt) +
+ sizeof(struct ulptx_idata)) / sizeof(__be64);
+ flits += sizeof(struct cpl_tx_pkt_core) / sizeof(__be64);
+ *nflits = flits;
+
+ /* If coalescing is on, the mbuf is added to a pkts WR */
+ if (q->coalesce.idx) {
+ ndesc = DIV_ROUND_UP(q->coalesce.flits + flits, 8);
+ credits = txq_avail(q) - ndesc;
+
+ /* If we are wrapping or this is last mbuf then, send the
+ * already coalesced mbufs and let the non-coalesce pass
+ * handle the mbuf.
+ */
+ if (unlikely(credits < 0 || wraps_around(q, ndesc))) {
+ ship_tx_pkt_coalesce_wr(adap, txq);
+ return 0;
+ }
+
+ /* If the max coalesce len or the max WR len is reached
+ * ship the WR and keep coalescing on.
+ */
+ if (unlikely((q->coalesce.len + mbuf->pkt_len >
+ MAX_COALESCE_LEN) ||
+ (q->coalesce.flits + flits >
+ q->coalesce.max))) {
+ ship_tx_pkt_coalesce_wr(adap, txq);
+ goto new;
+ }
+ return 1;
+ }
+
+new:
+ /* start a new pkts WR, the WR header is not filled below */
+ flits += sizeof(struct fw_eth_tx_pkts_wr) / sizeof(__be64);
+ ndesc = flits_to_desc(q->coalesce.flits + flits);
+ credits = txq_avail(q) - ndesc;
+
+ if (unlikely(credits < 0 || wraps_around(q, ndesc)))
+ return 0;
+ q->coalesce.flits += 2;
+ q->coalesce.type = type;
+ q->coalesce.ptr = (unsigned char *)&q->desc[q->pidx] +
+ 2 * sizeof(__be64);
+ return 1;
+}
+
+/**
+ * tx_do_packet_coalesce - add an mbuf to a coalesce WR
+ * @txq: sge_eth_txq used send the mbuf
+ * @mbuf: mbuf to be sent
+ * @flits: flits needed for this mbuf
+ * @adap: adapter structure
+ * @pi: port_info structure
+ * @addr: mapped address of the mbuf
+ *
+ * Adds an mbuf to be sent as part of a coalesce WR by filling a
+ * ulp_tx_pkt command, ulp_tx_sc_imm command, cpl message and
+ * ulp_tx_sc_dsgl command.
+ */
+static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,
+ struct rte_mbuf *mbuf,
+ int flits, struct adapter *adap,
+ const struct port_info *pi,
+ dma_addr_t *addr)
+{
+ u64 cntrl, *end;
+ struct sge_txq *q = &txq->q;
+ struct ulp_txpkt *mc;
+ struct ulptx_idata *sc_imm;
+ struct cpl_tx_pkt_core *cpl;
+ struct tx_sw_desc *sd;
+ unsigned int idx = q->coalesce.idx, len = mbuf->pkt_len;
+
+ if (q->coalesce.type == 0) {
+ mc = (struct ulp_txpkt *)q->coalesce.ptr;
+ mc->cmd_dest = htonl(V_ULPTX_CMD(4) | V_ULP_TXPKT_DEST(0) |
+ V_ULP_TXPKT_FID(adap->sge.fw_evtq.cntxt_id) |
+ F_ULP_TXPKT_RO);
+ mc->len = htonl(DIV_ROUND_UP(flits, 2));
+ sc_imm = (struct ulptx_idata *)(mc + 1);
+ sc_imm->cmd_more = htonl(V_ULPTX_CMD(ULP_TX_SC_IMM) |
+ F_ULP_TX_SC_MORE);
+ sc_imm->len = htonl(sizeof(*cpl));
+ end = (u64 *)mc + flits;
+ cpl = (struct cpl_tx_pkt_core *)(sc_imm + 1);
+ } else {
+ end = (u64 *)q->coalesce.ptr + flits;
+ cpl = (struct cpl_tx_pkt_core *)q->coalesce.ptr;
+ }
+
+ /* update coalesce structure for this txq */
+ q->coalesce.flits += flits;
+ q->coalesce.ptr += flits * sizeof(__be64);
+ q->coalesce.len += mbuf->pkt_len;
+
+ /* fill the cpl message, same as in t4_eth_xmit, this should be kept
+ * similar to t4_eth_xmit
+ */
+ if (mbuf->ol_flags & PKT_TX_IP_CKSUM) {
+ cntrl = hwcsum(adap->params.chip, mbuf) |
+ F_TXPKT_IPCSUM_DIS;
+ txq->stats.tx_cso++;
+ } else {
+ cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS;
+ }
+
+ if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ txq->stats.vlan_ins++;
+ cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(mbuf->vlan_tci);
+ }
+
+ cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
+ V_TXPKT_INTF(pi->tx_chan) |
+ V_TXPKT_PF(adap->pf));
+ cpl->pack = htons(0);
+ cpl->len = htons(len);
+ cpl->ctrl1 = cpu_to_be64(cntrl);
+ write_sgl(mbuf, q, (struct ulptx_sgl *)(cpl + 1), end, 0, addr);
+ txq->stats.pkts++;
+ txq->stats.tx_bytes += len;
+
+ sd = &q->sdesc[q->pidx + (idx >> 1)];
+ if (!(idx & 1)) {
+ if (sd->coalesce.idx) {
+ int i;
+
+ for (i = 0; i < sd->coalesce.idx; i++) {
+ rte_pktmbuf_free(sd->coalesce.mbuf[i]);
+ sd->coalesce.mbuf[i] = NULL;
+ }
+ }
+ }
+
+ /* store pointers to the mbuf and the sgl used in free_tx_desc.
+ * each tx desc can hold two pointers corresponding to the value
+ * of ETH_COALESCE_PKT_PER_DESC
+ */
+ sd->coalesce.mbuf[idx & 1] = mbuf;
+ sd->coalesce.sgl[idx & 1] = (struct ulptx_sgl *)(cpl + 1);
+ sd->coalesce.idx = (idx & 1) + 1;
+
+ /* send the coaelsced work request if max reached */
+ if (++q->coalesce.idx == ETH_COALESCE_PKT_NUM)
+ ship_tx_pkt_coalesce_wr(adap, txq);
+ return 0;
+}
+
+/**
+ * t4_eth_xmit - add a packet to an Ethernet Tx queue
+ * @txq: the egress queue
+ * @mbuf: the packet
+ *
+ * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
+ */
+int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf)
+{
+ const struct port_info *pi;
+ struct cpl_tx_pkt_lso_core *lso;
+ struct adapter *adap;
+ struct rte_mbuf *m = mbuf;
+ struct fw_eth_tx_pkt_wr *wr;
+ struct cpl_tx_pkt_core *cpl;
+ struct tx_sw_desc *d;
+ dma_addr_t addr[m->nb_segs];
+ unsigned int flits, ndesc, cflits;
+ int l3hdr_len, l4hdr_len, eth_xtra_len;
+ int len, last_desc;
+ int credits;
+ u32 wr_mid;
+ u64 cntrl, *end;
+ bool v6;
+ u32 max_pkt_len = txq->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+
+ /* Reject xmit if queue is stopped */
+ if (unlikely(txq->flags & EQ_STOPPED))
+ return -(EBUSY);
+
+ /*
+ * The chip min packet length is 10 octets but play safe and reject
+ * anything shorter than an Ethernet header.
+ */
+ if (unlikely(m->pkt_len < ETHER_HDR_LEN)) {
+out_free:
+ rte_pktmbuf_free(m);
+ return 0;
+ }
+
+ if ((!(m->ol_flags & PKT_TX_TCP_SEG)) &&
+ (unlikely(m->pkt_len > max_pkt_len)))
+ goto out_free;
+
+ pi = (struct port_info *)txq->eth_dev->data->dev_private;
+ adap = pi->adapter;
+
+ cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS;
+ /* align the end of coalesce WR to a 512 byte boundary */
+ txq->q.coalesce.max = (8 - (txq->q.pidx & 7)) * 8;
+
+ if (!((m->ol_flags & PKT_TX_TCP_SEG) || (m->pkt_len > ETHER_MAX_LEN))) {
+ if (should_tx_packet_coalesce(txq, mbuf, &cflits, adap)) {
+ if (unlikely(map_mbuf(mbuf, addr) < 0)) {
+ dev_warn(adap, "%s: mapping err for coalesce\n",
+ __func__);
+ txq->stats.mapping_err++;
+ goto out_free;
+ }
+ rte_prefetch0((volatile void *)addr);
+ return tx_do_packet_coalesce(txq, mbuf, cflits, adap,
+ pi, addr);
+ } else {
+ return -EBUSY;
+ }
+ }
+
+ if (txq->q.coalesce.idx)
+ ship_tx_pkt_coalesce_wr(adap, txq);
+
+ flits = calc_tx_flits(m);
+ ndesc = flits_to_desc(flits);
+ credits = txq_avail(&txq->q) - ndesc;
+
+ if (unlikely(credits < 0)) {
+ dev_debug(adap, "%s: Tx ring %u full; credits = %d\n",
+ __func__, txq->q.cntxt_id, credits);
+ return -EBUSY;
+ }
+
+ if (unlikely(map_mbuf(m, addr) < 0)) {
+ txq->stats.mapping_err++;
+ goto out_free;
+ }
+
+ wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
+ if (Q_IDXDIFF(&txq->q, equeidx) >= 64) {
+ txq->q.equeidx = txq->q.pidx;
+ wr_mid |= F_FW_WR_EQUEQ;
+ }
+
+ wr = (void *)&txq->q.desc[txq->q.pidx];
+ wr->equiq_to_len16 = htonl(wr_mid);
+ wr->r3 = rte_cpu_to_be_64(0);
+ end = (u64 *)wr + flits;
+
+ len = 0;
+ len += sizeof(*cpl);
+
+ /* Coalescing skipped and we send through normal path */
+ if (!(m->ol_flags & PKT_TX_TCP_SEG)) {
+ wr->op_immdlen = htonl(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
+ V_FW_WR_IMMDLEN(len));
+ cpl = (void *)(wr + 1);
+ if (m->ol_flags & PKT_TX_IP_CKSUM) {
+ cntrl = hwcsum(adap->params.chip, m) |
+ F_TXPKT_IPCSUM_DIS;
+ txq->stats.tx_cso++;
+ }
+ } else {
+ lso = (void *)(wr + 1);
+ v6 = (m->ol_flags & PKT_TX_IPV6) != 0;
+ l3hdr_len = m->l3_len;
+ l4hdr_len = m->l4_len;
+ eth_xtra_len = m->l2_len - ETHER_HDR_LEN;
+ len += sizeof(*lso);
+ wr->op_immdlen = htonl(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
+ V_FW_WR_IMMDLEN(len));
+ lso->lso_ctrl = htonl(V_LSO_OPCODE(CPL_TX_PKT_LSO) |
+ F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE |
+ V_LSO_IPV6(v6) |
+ V_LSO_ETHHDR_LEN(eth_xtra_len / 4) |
+ V_LSO_IPHDR_LEN(l3hdr_len / 4) |
+ V_LSO_TCPHDR_LEN(l4hdr_len / 4));
+ lso->ipid_ofst = htons(0);
+ lso->mss = htons(m->tso_segsz);
+ lso->seqno_offset = htonl(0);
+ if (is_t4(adap->params.chip))
+ lso->len = htonl(m->pkt_len);
+ else
+ lso->len = htonl(V_LSO_T5_XFER_SIZE(m->pkt_len));
+ cpl = (void *)(lso + 1);
+ cntrl = V_TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+ V_TXPKT_IPHDR_LEN(l3hdr_len) |
+ V_TXPKT_ETHHDR_LEN(eth_xtra_len);
+ txq->stats.tso++;
+ txq->stats.tx_cso += m->tso_segsz;
+ }
+
+ if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ txq->stats.vlan_ins++;
+ cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->vlan_tci);
+ }
+
+ cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
+ V_TXPKT_INTF(pi->tx_chan) |
+ V_TXPKT_PF(adap->pf));
+ cpl->pack = htons(0);
+ cpl->len = htons(m->pkt_len);
+ cpl->ctrl1 = cpu_to_be64(cntrl);
+
+ txq->stats.pkts++;
+ txq->stats.tx_bytes += m->pkt_len;
+ last_desc = txq->q.pidx + ndesc - 1;
+ if (last_desc >= (int)txq->q.size)
+ last_desc -= txq->q.size;
+
+ d = &txq->q.sdesc[last_desc];
+ if (d->coalesce.idx) {
+ int i;
+
+ for (i = 0; i < d->coalesce.idx; i++) {
+ rte_pktmbuf_free(d->coalesce.mbuf[i]);
+ d->coalesce.mbuf[i] = NULL;
+ }
+ d->coalesce.idx = 0;
+ }
+ write_sgl(m, &txq->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
+ addr);
+ txq->q.sdesc[last_desc].mbuf = m;
+ txq->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
+ txq_advance(&txq->q, ndesc);
+ ring_tx_db(adap, &txq->q);
+ return 0;
+}
+
+/**
+ * alloc_ring - allocate resources for an SGE descriptor ring
+ * @dev: the PCI device's core device
+ * @nelem: the number of descriptors
+ * @elem_size: the size of each descriptor
+ * @sw_size: the size of the SW state associated with each ring element
+ * @phys: the physical address of the allocated ring
+ * @metadata: address of the array holding the SW state for the ring
+ * @stat_size: extra space in HW ring for status information
+ * @node: preferred node for memory allocations
+ *
+ * Allocates resources for an SGE descriptor ring, such as Tx queues,
+ * free buffer lists, or response queues. Each SGE ring requires
+ * space for its HW descriptors plus, optionally, space for the SW state
+ * associated with each HW entry (the metadata). The function returns
+ * three values: the virtual address for the HW ring (the return value
+ * of the function), the bus address of the HW ring, and the address
+ * of the SW ring.
+ */
+static void *alloc_ring(size_t nelem, size_t elem_size,
+ size_t sw_size, dma_addr_t *phys, void *metadata,
+ size_t stat_size, __rte_unused uint16_t queue_id,
+ int socket_id, const char *z_name,
+ const char *z_name_sw)
+{
+ size_t len = CXGBE_MAX_RING_DESC_SIZE * elem_size + stat_size;
+ const struct rte_memzone *tz;
+ void *s = NULL;
+
+ dev_debug(adapter, "%s: nelem = %zu; elem_size = %zu; sw_size = %zu; "
+ "stat_size = %zu; queue_id = %u; socket_id = %d; z_name = %s;"
+ " z_name_sw = %s\n", __func__, nelem, elem_size, sw_size,
+ stat_size, queue_id, socket_id, z_name, z_name_sw);
+
+ tz = rte_memzone_lookup(z_name);
+ if (tz) {
+ dev_debug(adapter, "%s: tz exists...returning existing..\n",
+ __func__);
+ goto alloc_sw_ring;
+ }
+
+ /*
+ * Allocate TX/RX ring hardware descriptors. A memzone large enough to
+ * handle the maximum ring size is allocated in order to allow for
+ * resizing in later calls to the queue setup function.
+ */
+ tz = rte_memzone_reserve_aligned(z_name, len, socket_id, 0, 4096);
+ if (!tz)
+ return NULL;
+
+alloc_sw_ring:
+ memset(tz->addr, 0, len);
+ if (sw_size) {
+ s = rte_zmalloc_socket(z_name_sw, nelem * sw_size,
+ RTE_CACHE_LINE_SIZE, socket_id);
+
+ if (!s) {
+ dev_err(adapter, "%s: failed to get sw_ring memory\n",
+ __func__);
+ return NULL;
+ }
+ }
+ if (metadata)
+ *(void **)metadata = s;
+
+ *phys = (uint64_t)tz->phys_addr;
+ return tz->addr;
+}
+
+/**
+ * t4_pktgl_to_mbuf_usembufs - build an mbuf from a packet gather list
+ * @gl: the gather list
+ *
+ * Builds an mbuf from the given packet gather list. Returns the mbuf or
+ * %NULL if mbuf allocation failed.
+ */
+static struct rte_mbuf *t4_pktgl_to_mbuf_usembufs(const struct pkt_gl *gl)
+{
+ /*
+ * If there's only one mbuf fragment, just return that.
+ */
+ if (likely(gl->nfrags == 1))
+ return gl->mbufs[0];
+
+ return NULL;
+}
+
+/**
+ * t4_pktgl_to_mbuf - build an mbuf from a packet gather list
+ * @gl: the gather list
+ *
+ * Builds an mbuf from the given packet gather list. Returns the mbuf or
+ * %NULL if mbuf allocation failed.
+ */
+static struct rte_mbuf *t4_pktgl_to_mbuf(const struct pkt_gl *gl)
+{
+ return t4_pktgl_to_mbuf_usembufs(gl);
+}
+
+#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
+ ((dma_addr_t) ((mb)->buf_physaddr + (mb)->data_off))
+
+/**
+ * t4_ethrx_handler - process an ingress ethernet packet
+ * @q: the response queue that received the packet
+ * @rsp: the response queue descriptor holding the RX_PKT message
+ * @si: the gather list of packet fragments
+ *
+ * Process an ingress ethernet packet and deliver it to the stack.
+ */
+int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
+ const struct pkt_gl *si)
+{
+ struct rte_mbuf *mbuf;
+ const struct cpl_rx_pkt *pkt;
+ const struct rss_header *rss_hdr;
+ bool csum_ok;
+ struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
+
+ rss_hdr = (const void *)rsp;
+ pkt = (const void *)&rsp[1];
+ csum_ok = pkt->csum_calc && !pkt->err_vec;
+
+ mbuf = t4_pktgl_to_mbuf(si);
+ if (unlikely(!mbuf)) {
+ rxq->stats.rx_drops++;
+ return 0;
+ }
+
+ mbuf->port = pkt->iff;
+ if (pkt->l2info & htonl(F_RXF_IP)) {
+ mbuf->packet_type = RTE_PTYPE_L3_IPV4;
+ if (unlikely(!csum_ok))
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+
+ if ((pkt->l2info & htonl(F_RXF_UDP | F_RXF_TCP)) && !csum_ok)
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ } else if (pkt->l2info & htonl(F_RXF_IP6)) {
+ mbuf->packet_type = RTE_PTYPE_L3_IPV6;
+ }
+
+ mbuf->port = pkt->iff;
+
+ if (!rss_hdr->filter_tid && rss_hdr->hash_type) {
+ mbuf->ol_flags |= PKT_RX_RSS_HASH;
+ mbuf->hash.rss = ntohl(rss_hdr->hash_val);
+ }
+
+ if (pkt->vlan_ex) {
+ mbuf->ol_flags |= PKT_RX_VLAN_PKT;
+ mbuf->vlan_tci = ntohs(pkt->vlan);
+ }
+ rxq->stats.pkts++;
+ rxq->stats.rx_bytes += mbuf->pkt_len;
+
+ return 0;
+}
+
+/**
+ * is_new_response - check if a response is newly written
+ * @r: the response descriptor
+ * @q: the response queue
+ *
+ * Returns true if a response descriptor contains a yet unprocessed
+ * response.
+ */
+static inline bool is_new_response(const struct rsp_ctrl *r,
+ const struct sge_rspq *q)
+{
+ return (r->u.type_gen >> S_RSPD_GEN) == q->gen;
+}
+
+#define CXGB4_MSG_AN ((void *)1)
+
+/**
+ * rspq_next - advance to the next entry in a response queue
+ * @q: the queue
+ *
+ * Updates the state of a response queue to advance it to the next entry.
+ */
+static inline void rspq_next(struct sge_rspq *q)
+{
+ q->cur_desc = (const __be64 *)((const char *)q->cur_desc + q->iqe_len);
+ if (unlikely(++q->cidx == q->size)) {
+ q->cidx = 0;
+ q->gen ^= 1;
+ q->cur_desc = q->desc;
+ }
+}
+
+/**
+ * process_responses - process responses from an SGE response queue
+ * @q: the ingress queue to process
+ * @budget: how many responses can be processed in this round
+ * @rx_pkts: mbuf to put the pkts
+ *
+ * Process responses from an SGE response queue up to the supplied budget.
+ * Responses include received packets as well as control messages from FW
+ * or HW.
+ *
+ * Additionally choose the interrupt holdoff time for the next interrupt
+ * on this queue. If the system is under memory shortage use a fairly
+ * long delay to help recovery.
+ */
+static int process_responses(struct sge_rspq *q, int budget,
+ struct rte_mbuf **rx_pkts)
+{
+ int ret = 0, rsp_type;
+ int budget_left = budget;
+ const struct rsp_ctrl *rc;
+ struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
+
+ while (likely(budget_left)) {
+ rc = (const struct rsp_ctrl *)
+ ((const char *)q->cur_desc + (q->iqe_len - sizeof(*rc)));
+
+ if (!is_new_response(rc, q))
+ break;
+
+ /*
+ * Ensure response has been read
+ */
+ rmb();
+ rsp_type = G_RSPD_TYPE(rc->u.type_gen);
+
+ if (likely(rsp_type == X_RSPD_TYPE_FLBUF)) {
+ const struct rx_sw_desc *rsd =
+ &rxq->fl.sdesc[rxq->fl.cidx];
+ const struct rss_header *rss_hdr =
+ (const void *)q->cur_desc;
+ const struct cpl_rx_pkt *cpl =
+ (const void *)&q->cur_desc[1];
+ bool csum_ok = cpl->csum_calc && !cpl->err_vec;
+ struct rte_mbuf *pkt, *npkt;
+ u32 len, bufsz;
+
+ len = ntohl(rc->pldbuflen_qid);
+ BUG_ON(!(len & F_RSPD_NEWBUF));
+ pkt = rsd->buf;
+ npkt = pkt;
+ len = G_RSPD_LEN(len);
+ pkt->pkt_len = len;
+
+ /* Chain mbufs into len if necessary */
+ while (len) {
+ struct rte_mbuf *new_pkt = rsd->buf;
+
+ bufsz = min(get_buf_size(q->adapter, rsd), len);
+ new_pkt->data_len = bufsz;
+ unmap_rx_buf(&rxq->fl);
+ len -= bufsz;
+ npkt->next = new_pkt;
+ npkt = new_pkt;
+ pkt->nb_segs++;
+ rsd = &rxq->fl.sdesc[rxq->fl.cidx];
+ }
+ npkt->next = NULL;
+ pkt->nb_segs--;
+
+ if (cpl->l2info & htonl(F_RXF_IP)) {
+ pkt->packet_type = RTE_PTYPE_L3_IPV4;
+ if (unlikely(!csum_ok))
+ pkt->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+
+ if ((cpl->l2info &
+ htonl(F_RXF_UDP | F_RXF_TCP)) && !csum_ok)
+ pkt->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ } else if (cpl->l2info & htonl(F_RXF_IP6)) {
+ pkt->packet_type = RTE_PTYPE_L3_IPV6;
+ }
+
+ if (!rss_hdr->filter_tid && rss_hdr->hash_type) {
+ pkt->ol_flags |= PKT_RX_RSS_HASH;
+ pkt->hash.rss = ntohl(rss_hdr->hash_val);
+ }
+
+ if (cpl->vlan_ex) {
+ pkt->ol_flags |= PKT_RX_VLAN_PKT;
+ pkt->vlan_tci = ntohs(cpl->vlan);
+ }
+ rxq->stats.pkts++;
+ rxq->stats.rx_bytes += pkt->pkt_len;
+ rx_pkts[budget - budget_left] = pkt;
+ } else if (likely(rsp_type == X_RSPD_TYPE_CPL)) {
+ ret = q->handler(q, q->cur_desc, NULL);
+ } else {
+ ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
+ }
+
+ if (unlikely(ret)) {
+ /* couldn't process descriptor, back off for recovery */
+ q->next_intr_params = V_QINTR_TIMER_IDX(NOMEM_TMR_IDX);
+ break;
+ }
+
+ rspq_next(q);
+ budget_left--;
+
+ if (R_IDXDIFF(q, gts_idx) >= 64) {
+ unsigned int cidx_inc = R_IDXDIFF(q, gts_idx);
+ unsigned int params;
+ u32 val;
+
+ if (fl_cap(&rxq->fl) - rxq->fl.avail >= 64)
+ __refill_fl(q->adapter, &rxq->fl);
+ params = V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX);
+ q->next_intr_params = params;
+ val = V_CIDXINC(cidx_inc) | V_SEINTARM(params);
+
+ if (unlikely(!q->bar2_addr))
+ t4_write_reg(q->adapter, MYPF_REG(A_SGE_PF_GTS),
+ val |
+ V_INGRESSQID((u32)q->cntxt_id));
+ else {
+ writel(val | V_INGRESSQID(q->bar2_qid),
+ (void *)((uintptr_t)q->bar2_addr +
+ SGE_UDB_GTS));
+ /*
+ * This Write memory Barrier will force the
+ * write to the User Doorbell area to be
+ * flushed.
+ */
+ wmb();
+ }
+ q->gts_idx = q->cidx;
+ }
+ }
+
+ /*
+ * If this is a Response Queue with an associated Free List and
+ * there's room for another chunk of new Free List buffer pointers,
+ * refill the Free List.
+ */
+
+ if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 64)
+ __refill_fl(q->adapter, &rxq->fl);
+
+ return budget - budget_left;
+}
+
+int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts,
+ unsigned int budget, unsigned int *work_done)
+{
+ int err = 0;
+
+ *work_done = process_responses(q, budget, rx_pkts);
+ return err;
+}
+
+/**
+ * bar2_address - return the BAR2 address for an SGE Queue's Registers
+ * @adapter: the adapter
+ * @qid: the SGE Queue ID
+ * @qtype: the SGE Queue Type (Egress or Ingress)
+ * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
+ *
+ * Returns the BAR2 address for the SGE Queue Registers associated with
+ * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also
+ * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
+ * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID"
+ * Registers are supported (e.g. the Write Combining Doorbell Buffer).
+ */
+static void __iomem *bar2_address(struct adapter *adapter, unsigned int qid,
+ enum t4_bar2_qtype qtype,
+ unsigned int *pbar2_qid)
+{
+ u64 bar2_qoffset;
+ int ret;
+
+ ret = t4_bar2_sge_qregs(adapter, qid, qtype, &bar2_qoffset, pbar2_qid);
+ if (ret)
+ return NULL;
+
+ return adapter->bar2 + bar2_qoffset;
+}
+
+int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_rspq *rq)
+{
+ struct sge_eth_rxq *rxq = container_of(rq, struct sge_eth_rxq, rspq);
+ unsigned int fl_id = rxq->fl.size ? rxq->fl.cntxt_id : 0xffff;
+
+ return t4_iq_start_stop(adap, adap->mbox, true, adap->pf, 0,
+ rq->cntxt_id, fl_id, 0xffff);
+}
+
+int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_rspq *rq)
+{
+ struct sge_eth_rxq *rxq = container_of(rq, struct sge_eth_rxq, rspq);
+ unsigned int fl_id = rxq->fl.size ? rxq->fl.cntxt_id : 0xffff;
+
+ return t4_iq_start_stop(adap, adap->mbox, false, adap->pf, 0,
+ rq->cntxt_id, fl_id, 0xffff);
+}
+
+/*
+ * @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
+ * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
+ */
+int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
+ struct rte_eth_dev *eth_dev, int intr_idx,
+ struct sge_fl *fl, rspq_handler_t hnd, int cong,
+ struct rte_mempool *mp, int queue_id, int socket_id)
+{
+ int ret, flsz = 0;
+ struct fw_iq_cmd c;
+ struct sge *s = &adap->sge;
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ char z_name[RTE_MEMZONE_NAMESIZE];
+ char z_name_sw[RTE_MEMZONE_NAMESIZE];
+ unsigned int nb_refill;
+
+ /* Size needs to be multiple of 16, including status entry. */
+ iq->size = cxgbe_roundup(iq->size, 16);
+
+ snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
+ eth_dev->driver->pci_drv.name, fwevtq ? "fwq_ring" : "rx_ring",
+ eth_dev->data->port_id, queue_id);
+ snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
+
+ iq->desc = alloc_ring(iq->size, iq->iqe_len, 0, &iq->phys_addr, NULL, 0,
+ queue_id, socket_id, z_name, z_name_sw);
+ if (!iq->desc)
+ return -ENOMEM;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE | F_FW_CMD_EXEC |
+ V_FW_IQ_CMD_PFN(adap->pf) | V_FW_IQ_CMD_VFN(0));
+ c.alloc_to_len16 = htonl(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
+ (sizeof(c) / 16));
+ c.type_to_iqandstindex =
+ htonl(V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
+ V_FW_IQ_CMD_IQASYNCH(fwevtq) |
+ V_FW_IQ_CMD_VIID(pi->viid) |
+ V_FW_IQ_CMD_IQANDST(intr_idx < 0) |
+ V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT) |
+ V_FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
+ -intr_idx - 1));
+ c.iqdroprss_to_iqesize =
+ htons(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
+ F_FW_IQ_CMD_IQGTSMODE |
+ V_FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
+ V_FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
+ c.iqsize = htons(iq->size);
+ c.iqaddr = cpu_to_be64(iq->phys_addr);
+ if (cong >= 0)
+ c.iqns_to_fl0congen = htonl(F_FW_IQ_CMD_IQFLINTCONGEN);
+
+ if (fl) {
+ struct sge_eth_rxq *rxq = container_of(fl, struct sge_eth_rxq,
+ fl);
+ enum chip_type chip = (enum chip_type)CHELSIO_CHIP_VERSION(
+ adap->params.chip);
+
+ /*
+ * Allocate the ring for the hardware free list (with space
+ * for its status page) along with the associated software
+ * descriptor ring. The free list size needs to be a multiple
+ * of the Egress Queue Unit and at least 2 Egress Units larger
+ * than the SGE's Egress Congrestion Threshold
+ * (fl_starve_thres - 1).
+ */
+ if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
+ fl->size = s->fl_starve_thres - 1 + 2 * 8;
+ fl->size = cxgbe_roundup(fl->size, 8);
+
+ snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
+ eth_dev->driver->pci_drv.name,
+ fwevtq ? "fwq_ring" : "fl_ring",
+ eth_dev->data->port_id, queue_id);
+ snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
+
+ fl->desc = alloc_ring(fl->size, sizeof(__be64),
+ sizeof(struct rx_sw_desc),
+ &fl->addr, &fl->sdesc, s->stat_len,
+ queue_id, socket_id, z_name, z_name_sw);
+
+ if (!fl->desc)
+ goto fl_nomem;
+
+ flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
+ c.iqns_to_fl0congen |=
+ htonl(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
+ (unlikely(rxq->usembufs) ?
+ 0 : F_FW_IQ_CMD_FL0PACKEN) |
+ F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
+ F_FW_IQ_CMD_FL0PADEN);
+ if (cong >= 0)
+ c.iqns_to_fl0congen |=
+ htonl(V_FW_IQ_CMD_FL0CNGCHMAP(cong) |
+ F_FW_IQ_CMD_FL0CONGCIF |
+ F_FW_IQ_CMD_FL0CONGEN);
+
+ /* In T6, for egress queue type FL there is internal overhead
+ * of 16B for header going into FLM module.
+ * Hence maximum allowed burst size will be 448 bytes.
+ */
+ c.fl0dcaen_to_fl0cidxfthresh =
+ htons(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_128B) |
+ V_FW_IQ_CMD_FL0FBMAX((chip <= CHELSIO_T5) ?
+ X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B));
+ c.fl0size = htons(flsz);
+ c.fl0addr = cpu_to_be64(fl->addr);
+ }
+
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ if (ret)
+ goto err;
+
+ iq->cur_desc = iq->desc;
+ iq->cidx = 0;
+ iq->gts_idx = 0;
+ iq->gen = 1;
+ iq->next_intr_params = iq->intr_params;
+ iq->cntxt_id = ntohs(c.iqid);
+ iq->abs_id = ntohs(c.physiqid);
+ iq->bar2_addr = bar2_address(adap, iq->cntxt_id, T4_BAR2_QTYPE_INGRESS,
+ &iq->bar2_qid);
+ iq->size--; /* subtract status entry */
+ iq->eth_dev = eth_dev;
+ iq->handler = hnd;
+ iq->port_id = pi->port_id;
+ iq->mb_pool = mp;
+
+ /* set offset to -1 to distinguish ingress queues without FL */
+ iq->offset = fl ? 0 : -1;
+
+ if (fl) {
+ fl->cntxt_id = ntohs(c.fl0id);
+ fl->avail = 0;
+ fl->pend_cred = 0;
+ fl->pidx = 0;
+ fl->cidx = 0;
+ fl->alloc_failed = 0;
+
+ /*
+ * Note, we must initialize the BAR2 Free List User Doorbell
+ * information before refilling the Free List!
+ */
+ fl->bar2_addr = bar2_address(adap, fl->cntxt_id,
+ T4_BAR2_QTYPE_EGRESS,
+ &fl->bar2_qid);
+
+ nb_refill = refill_fl(adap, fl, fl_cap(fl));
+ if (nb_refill != fl_cap(fl)) {
+ ret = -ENOMEM;
+ dev_err(adap, "%s: mbuf alloc failed with error: %d\n",
+ __func__, ret);
+ goto refill_fl_err;
+ }
+ }
+
+ /*
+ * For T5 and later we attempt to set up the Congestion Manager values
+ * of the new RX Ethernet Queue. This should really be handled by
+ * firmware because it's more complex than any host driver wants to
+ * get involved with and it's different per chip and this is almost
+ * certainly wrong. Formware would be wrong as well, but it would be
+ * a lot easier to fix in one place ... For now we do something very
+ * simple (and hopefully less wrong).
+ */
+ if (!is_t4(adap->params.chip) && cong >= 0) {
+ u32 param, val;
+ int i;
+
+ param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
+ V_FW_PARAMS_PARAM_YZ(iq->cntxt_id));
+ if (cong == 0) {
+ val = V_CONMCTXT_CNGTPMODE(X_CONMCTXT_CNGTPMODE_QUEUE);
+ } else {
+ val = V_CONMCTXT_CNGTPMODE(
+ X_CONMCTXT_CNGTPMODE_CHANNEL);
+ for (i = 0; i < 4; i++) {
+ if (cong & (1 << i))
+ val |= V_CONMCTXT_CNGCHMAP(1 <<
+ (i << 2));
+ }
+ }
+ ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
+ &param, &val);
+ if (ret)
+ dev_warn(adap->pdev_dev, "Failed to set Congestion Manager Context for Ingress Queue %d: %d\n",
+ iq->cntxt_id, -ret);
+ }
+
+ return 0;
+
+refill_fl_err:
+ t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
+ iq->cntxt_id, fl->cntxt_id, 0xffff);
+fl_nomem:
+ ret = -ENOMEM;
+err:
+ iq->cntxt_id = 0;
+ iq->abs_id = 0;
+ if (iq->desc)
+ iq->desc = NULL;
+
+ if (fl && fl->desc) {
+ rte_free(fl->sdesc);
+ fl->cntxt_id = 0;
+ fl->sdesc = NULL;
+ fl->desc = NULL;
+ }
+ return ret;
+}
+
+static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
+{
+ q->cntxt_id = id;
+ q->bar2_addr = bar2_address(adap, q->cntxt_id, T4_BAR2_QTYPE_EGRESS,
+ &q->bar2_qid);
+ q->cidx = 0;
+ q->pidx = 0;
+ q->dbidx = 0;
+ q->in_use = 0;
+ q->equeidx = 0;
+ q->coalesce.idx = 0;
+ q->coalesce.len = 0;
+ q->coalesce.flits = 0;
+ q->last_coal_idx = 0;
+ q->last_pidx = 0;
+ q->stat = (void *)&q->desc[q->size];
+}
+
+int t4_sge_eth_txq_start(struct sge_eth_txq *txq)
+{
+ /*
+ * TODO: For flow-control, queue may be stopped waiting to reclaim
+ * credits.
+ * Ensure queue is in EQ_STOPPED state before starting it.
+ */
+ if (!(txq->flags & EQ_STOPPED))
+ return -(EBUSY);
+
+ txq->flags &= ~EQ_STOPPED;
+
+ return 0;
+}
+
+int t4_sge_eth_txq_stop(struct sge_eth_txq *txq)
+{
+ txq->flags |= EQ_STOPPED;
+
+ return 0;
+}
+
+int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
+ struct rte_eth_dev *eth_dev, uint16_t queue_id,
+ unsigned int iqid, int socket_id)
+{
+ int ret, nentries;
+ struct fw_eq_eth_cmd c;
+ struct sge *s = &adap->sge;
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ char z_name[RTE_MEMZONE_NAMESIZE];
+ char z_name_sw[RTE_MEMZONE_NAMESIZE];
+
+ /* Add status entries */
+ nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
+
+ snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
+ eth_dev->driver->pci_drv.name, "tx_ring",
+ eth_dev->data->port_id, queue_id);
+ snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
+
+ txq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc),
+ sizeof(struct tx_sw_desc), &txq->q.phys_addr,
+ &txq->q.sdesc, s->stat_len, queue_id,
+ socket_id, z_name, z_name_sw);
+ if (!txq->q.desc)
+ return -ENOMEM;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE | F_FW_CMD_EXEC |
+ V_FW_EQ_ETH_CMD_PFN(adap->pf) |
+ V_FW_EQ_ETH_CMD_VFN(0));
+ c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_ALLOC |
+ F_FW_EQ_ETH_CMD_EQSTART | (sizeof(c) / 16));
+ c.autoequiqe_to_viid = htonl(F_FW_EQ_ETH_CMD_AUTOEQUEQE |
+ V_FW_EQ_ETH_CMD_VIID(pi->viid));
+ c.fetchszm_to_iqid =
+ htonl(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
+ V_FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
+ F_FW_EQ_ETH_CMD_FETCHRO | V_FW_EQ_ETH_CMD_IQID(iqid));
+ c.dcaen_to_eqsize =
+ htonl(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
+ V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
+ V_FW_EQ_ETH_CMD_EQSIZE(nentries));
+ c.eqaddr = rte_cpu_to_be_64(txq->q.phys_addr);
+
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ if (ret) {
+ rte_free(txq->q.sdesc);
+ txq->q.sdesc = NULL;
+ txq->q.desc = NULL;
+ return ret;
+ }
+
+ init_txq(adap, &txq->q, G_FW_EQ_ETH_CMD_EQID(ntohl(c.eqid_pkd)));
+ txq->stats.tso = 0;
+ txq->stats.pkts = 0;
+ txq->stats.tx_cso = 0;
+ txq->stats.coal_wr = 0;
+ txq->stats.vlan_ins = 0;
+ txq->stats.tx_bytes = 0;
+ txq->stats.coal_pkts = 0;
+ txq->stats.mapping_err = 0;
+ txq->flags |= EQ_STOPPED;
+ txq->eth_dev = eth_dev;
+ t4_os_lock_init(&txq->txq_lock);
+ return 0;
+}
+
+static void free_txq(struct sge_txq *q)
+{
+ q->cntxt_id = 0;
+ q->sdesc = NULL;
+ q->desc = NULL;
+}
+
+static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
+ struct sge_fl *fl)
+{
+ unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
+
+ t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
+ rq->cntxt_id, fl_id, 0xffff);
+ rq->cntxt_id = 0;
+ rq->abs_id = 0;
+ rq->desc = NULL;
+
+ if (fl) {
+ free_rx_bufs(fl, fl->avail);
+ rte_free(fl->sdesc);
+ fl->sdesc = NULL;
+ fl->cntxt_id = 0;
+ fl->desc = NULL;
+ }
+}
+
+/*
+ * Clear all queues of the port
+ *
+ * Note: This function must only be called after rx and tx path
+ * of the port have been disabled.
+ */
+void t4_sge_eth_clear_queues(struct port_info *pi)
+{
+ int i;
+ struct adapter *adap = pi->adapter;
+ struct sge_eth_rxq *rxq = &adap->sge.ethrxq[pi->first_qset];
+ struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
+
+ for (i = 0; i < pi->n_rx_qsets; i++, rxq++) {
+ if (rxq->rspq.desc)
+ t4_sge_eth_rxq_stop(adap, &rxq->rspq);
+ }
+ for (i = 0; i < pi->n_tx_qsets; i++, txq++) {
+ if (txq->q.desc) {
+ struct sge_txq *q = &txq->q;
+
+ t4_sge_eth_txq_stop(txq);
+ reclaim_completed_tx(q);
+ free_tx_desc(q, q->size);
+ q->equeidx = q->pidx;
+ }
+ }
+}
+
+void t4_sge_eth_rxq_release(struct adapter *adap, struct sge_eth_rxq *rxq)
+{
+ if (rxq->rspq.desc) {
+ t4_sge_eth_rxq_stop(adap, &rxq->rspq);
+ free_rspq_fl(adap, &rxq->rspq, rxq->fl.size ? &rxq->fl : NULL);
+ }
+}
+
+void t4_sge_eth_txq_release(struct adapter *adap, struct sge_eth_txq *txq)
+{
+ if (txq->q.desc) {
+ t4_sge_eth_txq_stop(txq);
+ reclaim_completed_tx(&txq->q);
+ t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, txq->q.cntxt_id);
+ free_tx_desc(&txq->q, txq->q.size);
+ rte_free(txq->q.sdesc);
+ free_txq(&txq->q);
+ }
+}
+
+void t4_sge_tx_monitor_start(struct adapter *adap)
+{
+ rte_eal_alarm_set(50, tx_timer_cb, (void *)adap);
+}
+
+void t4_sge_tx_monitor_stop(struct adapter *adap)
+{
+ rte_eal_alarm_cancel(tx_timer_cb, (void *)adap);
+}
+
+/**
+ * t4_free_sge_resources - free SGE resources
+ * @adap: the adapter
+ *
+ * Frees resources used by the SGE queue sets.
+ */
+void t4_free_sge_resources(struct adapter *adap)
+{
+ int i;
+ struct sge_eth_rxq *rxq = &adap->sge.ethrxq[0];
+ struct sge_eth_txq *txq = &adap->sge.ethtxq[0];
+
+ /* clean up Ethernet Tx/Rx queues */
+ for (i = 0; i < adap->sge.max_ethqsets; i++, rxq++, txq++) {
+ /* Free only the queues allocated */
+ if (rxq->rspq.desc) {
+ t4_sge_eth_rxq_release(adap, rxq);
+ rxq->rspq.eth_dev = NULL;
+ }
+ if (txq->q.desc) {
+ t4_sge_eth_txq_release(adap, txq);
+ txq->eth_dev = NULL;
+ }
+ }
+
+ if (adap->sge.fw_evtq.desc)
+ free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
+}
+
+/**
+ * t4_sge_init - initialize SGE
+ * @adap: the adapter
+ *
+ * Performs SGE initialization needed every time after a chip reset.
+ * We do not initialize any of the queues here, instead the driver
+ * top-level must request those individually.
+ *
+ * Called in two different modes:
+ *
+ * 1. Perform actual hardware initialization and record hard-coded
+ * parameters which were used. This gets used when we're the
+ * Master PF and the Firmware Configuration File support didn't
+ * work for some reason.
+ *
+ * 2. We're not the Master PF or initialization was performed with
+ * a Firmware Configuration File. In this case we need to grab
+ * any of the SGE operating parameters that we need to have in
+ * order to do our job and make sure we can live with them ...
+ */
+static int t4_sge_init_soft(struct adapter *adap)
+{
+ struct sge *s = &adap->sge;
+ u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
+ u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
+ u32 ingress_rx_threshold;
+
+ /*
+ * Verify that CPL messages are going to the Ingress Queue for
+ * process_responses() and that only packet data is going to the
+ * Free Lists.
+ */
+ if ((t4_read_reg(adap, A_SGE_CONTROL) & F_RXPKTCPLMODE) !=
+ V_RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
+ dev_err(adap, "bad SGE CPL MODE\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Validate the Host Buffer Register Array indices that we want to
+ * use ...
+ *
+ * XXX Note that we should really read through the Host Buffer Size
+ * XXX register array and find the indices of the Buffer Sizes which
+ * XXX meet our needs!
+ */
+#define READ_FL_BUF(x) \
+ t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE0 + (x) * sizeof(u32))
+
+ fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
+ fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
+ fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
+ fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
+
+ /*
+ * We only bother using the Large Page logic if the Large Page Buffer
+ * is larger than our Page Size Buffer.
+ */
+ if (fl_large_pg <= fl_small_pg)
+ fl_large_pg = 0;
+
+#undef READ_FL_BUF
+
+ /*
+ * The Page Size Buffer must be exactly equal to our Page Size and the
+ * Large Page Size Buffer should be 0 (per above) or a power of 2.
+ */
+ if (fl_small_pg != CXGBE_PAGE_SIZE ||
+ (fl_large_pg & (fl_large_pg - 1)) != 0) {
+ dev_err(adap, "bad SGE FL page buffer sizes [%d, %d]\n",
+ fl_small_pg, fl_large_pg);
+ return -EINVAL;
+ }
+ if (fl_large_pg)
+ s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
+
+ if (adap->use_unpacked_mode) {
+ int err = 0;
+
+ if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap)) {
+ dev_err(adap, "bad SGE FL small MTU %d\n",
+ fl_small_mtu);
+ err = -EINVAL;
+ }
+ if (fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
+ dev_err(adap, "bad SGE FL large MTU %d\n",
+ fl_large_mtu);
+ err = -EINVAL;
+ }
+ if (err)
+ return err;
+ }
+
+ /*
+ * Retrieve our RX interrupt holdoff timer values and counter
+ * threshold values from the SGE parameters.
+ */
+ timer_value_0_and_1 = t4_read_reg(adap, A_SGE_TIMER_VALUE_0_AND_1);
+ timer_value_2_and_3 = t4_read_reg(adap, A_SGE_TIMER_VALUE_2_AND_3);
+ timer_value_4_and_5 = t4_read_reg(adap, A_SGE_TIMER_VALUE_4_AND_5);
+ s->timer_val[0] = core_ticks_to_us(adap,
+ G_TIMERVALUE0(timer_value_0_and_1));
+ s->timer_val[1] = core_ticks_to_us(adap,
+ G_TIMERVALUE1(timer_value_0_and_1));
+ s->timer_val[2] = core_ticks_to_us(adap,
+ G_TIMERVALUE2(timer_value_2_and_3));
+ s->timer_val[3] = core_ticks_to_us(adap,
+ G_TIMERVALUE3(timer_value_2_and_3));
+ s->timer_val[4] = core_ticks_to_us(adap,
+ G_TIMERVALUE4(timer_value_4_and_5));
+ s->timer_val[5] = core_ticks_to_us(adap,
+ G_TIMERVALUE5(timer_value_4_and_5));
+
+ ingress_rx_threshold = t4_read_reg(adap, A_SGE_INGRESS_RX_THRESHOLD);
+ s->counter_val[0] = G_THRESHOLD_0(ingress_rx_threshold);
+ s->counter_val[1] = G_THRESHOLD_1(ingress_rx_threshold);
+ s->counter_val[2] = G_THRESHOLD_2(ingress_rx_threshold);
+ s->counter_val[3] = G_THRESHOLD_3(ingress_rx_threshold);
+
+ return 0;
+}
+
+int t4_sge_init(struct adapter *adap)
+{
+ struct sge *s = &adap->sge;
+ u32 sge_control, sge_control2, sge_conm_ctrl;
+ unsigned int ingpadboundary, ingpackboundary;
+ int ret, egress_threshold;
+
+ /*
+ * Ingress Padding Boundary and Egress Status Page Size are set up by
+ * t4_fixup_host_params().
+ */
+ sge_control = t4_read_reg(adap, A_SGE_CONTROL);
+ s->pktshift = G_PKTSHIFT(sge_control);
+ s->stat_len = (sge_control & F_EGRSTATUSPAGESIZE) ? 128 : 64;
+
+ /*
+ * T4 uses a single control field to specify both the PCIe Padding and
+ * Packing Boundary. T5 introduced the ability to specify these
+ * separately. The actual Ingress Packet Data alignment boundary
+ * within Packed Buffer Mode is the maximum of these two
+ * specifications.
+ */
+ ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) +
+ X_INGPADBOUNDARY_SHIFT);
+ s->fl_align = ingpadboundary;
+
+ if (!is_t4(adap->params.chip) && !adap->use_unpacked_mode) {
+ /*
+ * T5 has a weird interpretation of one of the PCIe Packing
+ * Boundary values. No idea why ...
+ */
+ sge_control2 = t4_read_reg(adap, A_SGE_CONTROL2);
+ ingpackboundary = G_INGPACKBOUNDARY(sge_control2);
+ if (ingpackboundary == X_INGPACKBOUNDARY_16B)
+ ingpackboundary = 16;
+ else
+ ingpackboundary = 1 << (ingpackboundary +
+ X_INGPACKBOUNDARY_SHIFT);
+
+ s->fl_align = max(ingpadboundary, ingpackboundary);
+ }
+
+ ret = t4_sge_init_soft(adap);
+ if (ret < 0) {
+ dev_err(adap, "%s: t4_sge_init_soft failed, error %d\n",
+ __func__, -ret);
+ return ret;
+ }
+
+ /*
+ * A FL with <= fl_starve_thres buffers is starving and a periodic
+ * timer will attempt to refill it. This needs to be larger than the
+ * SGE's Egress Congestion Threshold. If it isn't, then we can get
+ * stuck waiting for new packets while the SGE is waiting for us to
+ * give it more Free List entries. (Note that the SGE's Egress
+ * Congestion Threshold is in units of 2 Free List pointers.) For T4,
+ * there was only a single field to control this. For T5 there's the
+ * original field which now only applies to Unpacked Mode Free List
+ * buffers and a new field which only applies to Packed Mode Free List
+ * buffers.
+ */
+ sge_conm_ctrl = t4_read_reg(adap, A_SGE_CONM_CTRL);
+ if (is_t4(adap->params.chip) || adap->use_unpacked_mode)
+ egress_threshold = G_EGRTHRESHOLD(sge_conm_ctrl);
+ else
+ egress_threshold = G_EGRTHRESHOLDPACKING(sge_conm_ctrl);
+ s->fl_starve_thres = 2 * egress_threshold + 1;
+
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/README b/src/dpdk22/drivers/net/e1000/base/README
index 851e54e1..8d48135a 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/README
+++ b/src/dpdk22/drivers/net/e1000/base/README
@@ -1,7 +1,7 @@
..
BSD LICENSE
- Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -31,9 +31,14 @@
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
This directory contains source code of FreeBSD em & igb drivers of version
-cid-shared-code.2014.04.21 released by LAD. The sub-directory of lad/
+cid-shared-code.2015.10.09 released by ND. The sub-directory of base/
contains the original source package.
-Few changes to the original FreeBSD sources were made to:
-- Adopt it for PMD usage mode:
- e1000_osdep.c
- e1000_osdep.h
+
+Updating the driver
+===================
+
+NOTE: The source code in this directory should not be modified apart from
+the following file(s):
+
+ e1000_osdep.c
+ e1000_osdep.h
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_80003es2lan.c b/src/dpdk22/drivers/net/e1000/base/e1000_80003es2lan.c
index 72692d9e..5ac925e4 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_80003es2lan.c
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_80003es2lan.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -852,11 +852,15 @@ STATIC s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
/* Disable IBIST slave mode (far-end loopback) */
ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
E1000_KMRNCTRLSTA_INBAND_PARAM, &kum_reg_data);
- if (ret_val)
- return ret_val;
- kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
- e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
- kum_reg_data);
+ if (!ret_val) {
+ kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_INBAND_PARAM,
+ kum_reg_data);
+ if (ret_val)
+ DEBUGOUT("Error disabling far-end loopback\n");
+ } else
+ DEBUGOUT("Error disabling far-end loopback\n");
ret_val = e1000_get_auto_rd_done_generic(hw);
if (ret_val)
@@ -912,11 +916,18 @@ STATIC s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
return ret_val;
/* Disable IBIST slave mode (far-end loopback) */
- e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
- &kum_reg_data);
- kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
- e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
- kum_reg_data);
+ ret_val =
+ e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+ &kum_reg_data);
+ if (!ret_val) {
+ kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_INBAND_PARAM,
+ kum_reg_data);
+ if (ret_val)
+ DEBUGOUT("Error disabling far-end loopback\n");
+ } else
+ DEBUGOUT("Error disabling far-end loopback\n");
/* Set the transmit descriptor write-back policy */
reg_data = E1000_READ_REG(hw, E1000_TXDCTL(0));
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_80003es2lan.h b/src/dpdk22/drivers/net/e1000/base/e1000_80003es2lan.h
index f5fe9677..93ec19be 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_80003es2lan.h
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_80003es2lan.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82540.c b/src/dpdk22/drivers/net/e1000/base/e1000_82540.c
index fc1fa946..7de7b7ba 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82540.c
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_82540.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82541.c b/src/dpdk22/drivers/net/e1000/base/e1000_82541.c
index 952aea28..9cdb91c9 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82541.c
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_82541.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82541.h b/src/dpdk22/drivers/net/e1000/base/e1000_82541.h
index 0f50f556..e0bee7ce 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82541.h
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_82541.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82542.c b/src/dpdk22/drivers/net/e1000/base/e1000_82542.c
index afea4697..4f1183af 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82542.c
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_82542.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -46,7 +46,7 @@ STATIC s32 e1000_init_hw_82542(struct e1000_hw *hw);
STATIC s32 e1000_setup_link_82542(struct e1000_hw *hw);
STATIC s32 e1000_led_on_82542(struct e1000_hw *hw);
STATIC s32 e1000_led_off_82542(struct e1000_hw *hw);
-STATIC void e1000_rar_set_82542(struct e1000_hw *hw, u8 *addr, u32 index);
+STATIC int e1000_rar_set_82542(struct e1000_hw *hw, u8 *addr, u32 index);
STATIC void e1000_clear_hw_cntrs_82542(struct e1000_hw *hw);
STATIC s32 e1000_read_mac_addr_82542(struct e1000_hw *hw);
@@ -410,7 +410,7 @@ STATIC s32 e1000_led_off_82542(struct e1000_hw *hw)
* Sets the receive address array register at index to the address passed
* in by addr.
**/
-STATIC void e1000_rar_set_82542(struct e1000_hw *hw, u8 *addr, u32 index)
+STATIC int e1000_rar_set_82542(struct e1000_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
@@ -431,6 +431,8 @@ STATIC void e1000_rar_set_82542(struct e1000_hw *hw, u8 *addr, u32 index)
E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low);
E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high);
+
+ return E1000_SUCCESS;
}
/**
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82543.c b/src/dpdk22/drivers/net/e1000/base/e1000_82543.c
index 36335ba2..fc96199d 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82543.c
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_82543.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82543.h b/src/dpdk22/drivers/net/e1000/base/e1000_82543.h
index 51056dbc..4eb3f624 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82543.h
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_82543.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82571.c b/src/dpdk22/drivers/net/e1000/base/e1000_82571.c
index 8ae1cb12..7c279dbb 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82571.c
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_82571.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -1452,10 +1452,14 @@ STATIC void e1000_clear_vfta_82571(struct e1000_hw *hw)
STATIC bool e1000_check_mng_mode_82574(struct e1000_hw *hw)
{
u16 data;
+ s32 ret_val;
DEBUGFUNC("e1000_check_mng_mode_82574");
- hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &data);
+ ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &data);
+ if (ret_val)
+ return false;
+
return (data & E1000_NVM_INIT_CTRL2_MNGM) != 0;
}
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82571.h b/src/dpdk22/drivers/net/e1000/base/e1000_82571.h
index bdf64469..c8037b61 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82571.h
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_82571.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82575.c b/src/dpdk22/drivers/net/e1000/base/e1000_82575.c
index 25fa6727..723885d7 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82575.c
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_82575.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -277,6 +277,11 @@ STATIC s32 e1000_init_phy_params_82575(struct e1000_hw *hw)
if (ret_val)
goto out;
}
+ if (phy->id == M88E1543_E_PHY_ID) {
+ ret_val = e1000_initialize_M88E1543_phy(hw);
+ if (ret_val)
+ goto out;
+ }
break;
case IGP03E1000_E_PHY_ID:
case IGP04E1000_E_PHY_ID:
@@ -891,7 +896,6 @@ out:
STATIC s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
{
struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = E1000_SUCCESS;
u32 data;
DEBUGFUNC("e1000_set_d0_lplu_state_82580");
@@ -919,7 +923,7 @@ STATIC s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
}
E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data);
- return ret_val;
+ return E1000_SUCCESS;
}
/**
@@ -939,7 +943,6 @@ STATIC s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
{
struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = E1000_SUCCESS;
u32 data;
DEBUGFUNC("e1000_set_d3_lplu_state_82580");
@@ -967,7 +970,7 @@ s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
}
E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data);
- return ret_val;
+ return E1000_SUCCESS;
}
/**
@@ -981,7 +984,7 @@ s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
**/
STATIC s32 e1000_acquire_nvm_82575(struct e1000_hw *hw)
{
- s32 ret_val;
+ s32 ret_val = E1000_SUCCESS;
DEBUGFUNC("e1000_acquire_nvm_82575");
@@ -1003,6 +1006,7 @@ STATIC s32 e1000_acquire_nvm_82575(struct e1000_hw *hw)
DEBUGOUT("Nvm bit banging access error detected and cleared.\n");
}
}
+
if (hw->mac.type == e1000_82580) {
u32 eecd = E1000_READ_REG(hw, E1000_EECD);
if (eecd & E1000_EECD_BLOCKED) {
@@ -1013,7 +1017,6 @@ STATIC s32 e1000_acquire_nvm_82575(struct e1000_hw *hw)
}
}
-
ret_val = e1000_acquire_nvm_generic(hw);
if (ret_val)
e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
@@ -1052,7 +1055,7 @@ STATIC s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
u32 swmask = mask;
u32 fwmask = mask << 16;
s32 ret_val = E1000_SUCCESS;
- s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
+ s32 i = 0, timeout = 200;
DEBUGFUNC("e1000_acquire_swfw_sync_82575");
@@ -1127,7 +1130,6 @@ STATIC void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
STATIC s32 e1000_get_cfg_done_82575(struct e1000_hw *hw)
{
s32 timeout = PHY_CFG_TIMEOUT;
- s32 ret_val = E1000_SUCCESS;
u32 mask = E1000_NVM_CFG_DONE_PORT_0;
DEBUGFUNC("e1000_get_cfg_done_82575");
@@ -1152,7 +1154,7 @@ STATIC s32 e1000_get_cfg_done_82575(struct e1000_hw *hw)
(hw->phy.type == e1000_phy_igp_3))
e1000_phy_init_script_igp3(hw);
- return ret_val;
+ return E1000_SUCCESS;
}
/**
@@ -1237,7 +1239,7 @@ STATIC s32 e1000_check_for_link_media_swap(struct e1000_hw *hw)
DEBUGFUNC("e1000_check_for_link_media_swap");
- /* Check the copper medium. */
+ /* Check for copper. */
ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
if (ret_val)
return ret_val;
@@ -1249,7 +1251,7 @@ STATIC s32 e1000_check_for_link_media_swap(struct e1000_hw *hw)
if (data & E1000_M88E1112_STATUS_LINK)
port = E1000_MEDIA_PORT_COPPER;
- /* Check the other medium. */
+ /* Check for other. */
ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1);
if (ret_val)
return ret_val;
@@ -1258,11 +1260,6 @@ STATIC s32 e1000_check_for_link_media_swap(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /* reset page to 0 */
- ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
- if (ret_val)
- return ret_val;
-
if (data & E1000_M88E1112_STATUS_LINK)
port = E1000_MEDIA_PORT_OTHER;
@@ -1270,8 +1267,20 @@ STATIC s32 e1000_check_for_link_media_swap(struct e1000_hw *hw)
if (port && (hw->dev_spec._82575.media_port != port)) {
hw->dev_spec._82575.media_port = port;
hw->dev_spec._82575.media_changed = true;
+ }
+
+ if (port == E1000_MEDIA_PORT_COPPER) {
+ /* reset page to 0 */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+ if (ret_val)
+ return ret_val;
+ e1000_check_for_link_82575(hw);
} else {
- ret_val = e1000_check_for_link_82575(hw);
+ e1000_check_for_link_82575(hw);
+ /* reset page to 0 */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+ if (ret_val)
+ return ret_val;
}
return E1000_SUCCESS;
@@ -2128,7 +2137,7 @@ STATIC void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw)
* e1000_rx_fifo_flush_82575 - Clean rx fifo after Rx enable
* @hw: pointer to the HW structure
*
- * After rx enable if managability is enabled then there is likely some
+ * After Rx enable, if manageability is enabled then there is likely some
* bad data at the start of the fifo and possibly in the DMA fifo. This
* function clears the fifos and flushes any packets that came in as rx was
* being enabled.
@@ -2138,7 +2147,13 @@ void e1000_rx_fifo_flush_82575(struct e1000_hw *hw)
u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
int i, ms_wait;
- DEBUGFUNC("e1000_rx_fifo_workaround_82575");
+ DEBUGFUNC("e1000_rx_fifo_flush_82575");
+
+ /* disable IPv6 options as per hardware errata */
+ rfctl = E1000_READ_REG(hw, E1000_RFCTL);
+ rfctl |= E1000_RFCTL_IPV6_EX_DIS;
+ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
+
if (hw->mac.type != e1000_82575 ||
!(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN))
return;
@@ -2166,7 +2181,6 @@ void e1000_rx_fifo_flush_82575(struct e1000_hw *hw)
* incoming packets are rejected. Set enable and wait 2ms so that
* any packet that was coming in as RCTL.EN was set is flushed
*/
- rfctl = E1000_READ_REG(hw, E1000_RFCTL);
E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
rlpml = E1000_READ_REG(hw, E1000_RLPML);
@@ -2490,11 +2504,17 @@ STATIC s32 e1000_reset_hw_82580(struct e1000_hw *hw)
ctrl |= E1000_CTRL_RST;
E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
- E1000_WRITE_FLUSH(hw);
- /* Add delay to insure DEV_RST has time to complete */
- if (global_device_reset)
- msec_delay(5);
+ switch (hw->device_id) {
+ case E1000_DEV_ID_DH89XXCC_SGMII:
+ break;
+ default:
+ E1000_WRITE_FLUSH(hw);
+ break;
+ }
+
+ /* Add delay to insure DEV_RST or RST has time to complete */
+ msec_delay(5);
ret_val = e1000_get_auto_rd_done_generic(hw);
if (ret_val) {
@@ -2802,7 +2822,7 @@ s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
* e1000_initialize_M88E1512_phy - Initialize M88E1512 PHY
* @hw: pointer to the HW structure
*
- * Initialize Marverl 1512 to work correctly with Avoton.
+ * Initialize Marvell 1512 to work correctly with Avoton.
**/
s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw)
{
@@ -2888,15 +2908,115 @@ out:
}
/**
+ * e1000_initialize_M88E1543_phy - Initialize M88E1543 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Initialize Marvell 1543 to work correctly with Avoton.
+ **/
+s32 e1000_initialize_M88E1543_phy(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_initialize_M88E1543_phy");
+
+ /* Check if this is correct PHY. */
+ if (phy->id != M88E1543_E_PHY_ID)
+ goto out;
+
+ /* Switch to PHY page 0xFF. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xDC0C);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159);
+ if (ret_val)
+ goto out;
+
+ /* Switch to PHY page 0xFB. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0xC00D);
+ if (ret_val)
+ goto out;
+
+ /* Switch to PHY page 0x12. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12);
+ if (ret_val)
+ goto out;
+
+ /* Change mode to SGMII-to-Copper */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001);
+ if (ret_val)
+ goto out;
+
+ /* Switch to PHY page 1. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x1);
+ if (ret_val)
+ goto out;
+
+ /* Change mode to 1000BASE-X/SGMII and autoneg enable; reset */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_FIBER_CTRL, 0x9140);
+ if (ret_val)
+ goto out;
+
+ /* Return the PHY to page 0. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.commit(hw);
+ if (ret_val) {
+ DEBUGOUT("Error committing the PHY changes\n");
+ return ret_val;
+ }
+
+ msec_delay(1000);
+out:
+ return ret_val;
+}
+
+/**
* e1000_set_eee_i350 - Enable/disable EEE support
* @hw: pointer to the HW structure
+ * @adv1g: boolean flag enabling 1G EEE advertisement
+ * @adv100m: boolean flag enabling 100M EEE advertisement
*
* Enable/disable EEE based on setting in dev_spec structure.
*
**/
-s32 e1000_set_eee_i350(struct e1000_hw *hw)
+s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M)
{
- s32 ret_val = E1000_SUCCESS;
u32 ipcnfg, eeer;
DEBUGFUNC("e1000_set_eee_i350");
@@ -2911,7 +3031,16 @@ s32 e1000_set_eee_i350(struct e1000_hw *hw)
if (!(hw->dev_spec._82575.eee_disable)) {
u32 eee_su = E1000_READ_REG(hw, E1000_EEE_SU);
- ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
+ if (adv100M)
+ ipcnfg |= E1000_IPCNFG_EEE_100M_AN;
+ else
+ ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN;
+
+ if (adv1G)
+ ipcnfg |= E1000_IPCNFG_EEE_1G_AN;
+ else
+ ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN;
+
eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
E1000_EEER_LPI_FC);
@@ -2929,17 +3058,19 @@ s32 e1000_set_eee_i350(struct e1000_hw *hw)
E1000_READ_REG(hw, E1000_EEER);
out:
- return ret_val;
+ return E1000_SUCCESS;
}
/**
* e1000_set_eee_i354 - Enable/disable EEE support
* @hw: pointer to the HW structure
+ * @adv1g: boolean flag enabling 1G EEE advertisement
+ * @adv100m: boolean flag enabling 100M EEE advertisement
*
* Enable/disable EEE legacy mode based on setting in dev_spec structure.
*
**/
-s32 e1000_set_eee_i354(struct e1000_hw *hw)
+s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val = E1000_SUCCESS;
@@ -2981,8 +3112,16 @@ s32 e1000_set_eee_i354(struct e1000_hw *hw)
if (ret_val)
goto out;
- phy_data |= E1000_EEE_ADV_100_SUPPORTED |
- E1000_EEE_ADV_1000_SUPPORTED;
+ if (adv100M)
+ phy_data |= E1000_EEE_ADV_100_SUPPORTED;
+ else
+ phy_data &= ~E1000_EEE_ADV_100_SUPPORTED;
+
+ if (adv1G)
+ phy_data |= E1000_EEE_ADV_1000_SUPPORTED;
+ else
+ phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED;
+
ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
E1000_EEE_ADV_DEV_I354,
phy_data);
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82575.h b/src/dpdk22/drivers/net/e1000/base/e1000_82575.h
index 09b7bf2e..c4986841 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82575.h
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_82575.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -494,10 +494,11 @@ void e1000_rlpml_set_vf(struct e1000_hw *, u16);
s32 e1000_promisc_set_vf(struct e1000_hw *, enum e1000_promisc_type type);
u16 e1000_rxpbs_adjust_82580(u32 data);
s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data);
-s32 e1000_set_eee_i350(struct e1000_hw *);
-s32 e1000_set_eee_i354(struct e1000_hw *);
+s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M);
+s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M);
s32 e1000_get_eee_status_i354(struct e1000_hw *, bool *);
s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw);
+s32 e1000_initialize_M88E1543_phy(struct e1000_hw *hw);
/* I2C SDA and SCL timing parameters for standard mode */
#define E1000_I2C_T_HD_STA 4
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_api.c b/src/dpdk22/drivers/net/e1000/base/e1000_api.c
index a0645651..bbfcae88 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_api.c
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_api.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -292,6 +292,10 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_PCH_LPT_I217_V:
case E1000_DEV_ID_PCH_LPTLP_I218_LM:
case E1000_DEV_ID_PCH_LPTLP_I218_V:
+ case E1000_DEV_ID_PCH_I218_LM2:
+ case E1000_DEV_ID_PCH_I218_V2:
+ case E1000_DEV_ID_PCH_I218_LM3:
+ case E1000_DEV_ID_PCH_I218_V3:
mac->type = e1000_pch_lpt;
break;
case E1000_DEV_ID_82575EB_COPPER:
@@ -827,10 +831,12 @@ void e1000_config_collision_dist(struct e1000_hw *hw)
*
* Sets a Receive Address Register (RAR) to the specified address.
**/
-void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
+int e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
{
if (hw->mac.ops.rar_set)
- hw->mac.ops.rar_set(hw, addr, index);
+ return hw->mac.ops.rar_set(hw, addr, index);
+
+ return E1000_SUCCESS;
}
/**
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_api.h b/src/dpdk22/drivers/net/e1000/base/e1000_api.h
index 02b16da3..0bc471d9 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_api.h
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_api.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -68,7 +68,7 @@ s32 e1000_setup_link(struct e1000_hw *hw);
s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex);
s32 e1000_disable_pcie_master(struct e1000_hw *hw);
void e1000_config_collision_dist(struct e1000_hw *hw);
-void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
+int e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
u32 mc_addr_count);
@@ -124,14 +124,14 @@ u32 e1000_translate_register_82542(u32 reg);
* TBI_ACCEPT macro definition:
*
* This macro requires:
- * adapter = a pointer to struct e1000_hw
+ * a = a pointer to struct e1000_hw
* status = the 8 bit status field of the Rx descriptor with EOP set
- * error = the 8 bit error field of the Rx descriptor with EOP set
+ * errors = the 8 bit error field of the Rx descriptor with EOP set
* length = the sum of all the length fields of the Rx descriptors that
* make up the current frame
* last_byte = the last byte of the frame DMAed by the hardware
- * max_frame_length = the maximum frame length we want to accept.
- * min_frame_length = the minimum frame length we want to accept.
+ * min_frame_size = the minimum frame length we want to accept.
+ * max_frame_size = the maximum frame length we want to accept.
*
* This macro is a conditional that should be used in the interrupt
* handler's Rx processing routine when RxErrors have been detected.
@@ -157,10 +157,10 @@ u32 e1000_translate_register_82542(u32 reg);
(((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \
((last_byte) == CARRIER_EXTENSION) && \
(((status) & E1000_RXD_STAT_VP) ? \
- (((length) > (min_frame_size - VLAN_TAG_SIZE)) && \
- ((length) <= (max_frame_size + 1))) : \
- (((length) > min_frame_size) && \
- ((length) <= (max_frame_size + VLAN_TAG_SIZE + 1)))))
+ (((length) > ((min_frame_size) - VLAN_TAG_SIZE)) && \
+ ((length) <= ((max_frame_size) + 1))) : \
+ (((length) > (min_frame_size)) && \
+ ((length) <= ((max_frame_size) + VLAN_TAG_SIZE + 1)))))
#define E1000_MAX(a, b) ((a) > (b) ? (a) : (b))
#define E1000_DIVIDE_ROUND_UP(a, b) (((a) + (b) - 1) / (b)) /* ceil(a/b) */
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_defines.h b/src/dpdk22/drivers/net/e1000/base/e1000_defines.h
index 278c5072..69aa1f23 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_defines.h
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_defines.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -197,6 +197,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */
+#define E1000_RCTL_RDMTS_HEX 0x00010000
#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
@@ -467,6 +468,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define ETHERNET_FCS_SIZE 4
#define MAX_JUMBO_FRAME_SIZE 0x3F00
+#define E1000_TX_PTR_GAP 0x1F
/* Extended Configuration Control and Size */
#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
@@ -845,6 +847,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */
#define E1000_M88E1543_EEE_CTRL_1 0x0
#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */
+#define E1000_M88E1543_FIBER_CTRL 0x0 /* Fiber Control Register */
#define E1000_EEE_ADV_DEV_I354 7
#define E1000_EEE_ADV_ADDR_I354 60
#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */
@@ -1466,6 +1469,9 @@ POSSIBILITY OF SUCH DAMAGE.
#define E1000_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */
#define E1000_RXPBS_SIZE_I210_MASK 0x0000003F /* Rx packet buffer size */
#define E1000_TXPB0S_SIZE_I210_MASK 0x0000003F /* Tx packet buffer 0 size */
+#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
+#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
+
/* Proxy Filter Control */
#define E1000_PROXYFC_D0 0x00000001 /* Enable offload in D0 */
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_hw.h b/src/dpdk22/drivers/net/e1000/base/e1000_hw.h
index 4dd92a30..e4e4f764 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_hw.h
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_hw.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -132,6 +132,10 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B
#define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A
#define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559
+#define E1000_DEV_ID_PCH_I218_LM2 0x15A0
+#define E1000_DEV_ID_PCH_I218_V2 0x15A1
+#define E1000_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */
+#define E1000_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */
#define E1000_DEV_ID_82576 0x10C9
#define E1000_DEV_ID_82576_FIBER 0x10E6
#define E1000_DEV_ID_82576_SERDES 0x10E7
@@ -695,7 +699,7 @@ struct e1000_mac_operations {
s32 (*setup_led)(struct e1000_hw *);
void (*write_vfta)(struct e1000_hw *, u32, u32);
void (*config_collision_dist)(struct e1000_hw *);
- void (*rar_set)(struct e1000_hw *, u8*, u32);
+ int (*rar_set)(struct e1000_hw *, u8*, u32);
s32 (*read_mac_addr)(struct e1000_hw *);
s32 (*validate_mdi_setting)(struct e1000_hw *);
s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
@@ -781,7 +785,7 @@ struct e1000_mac_info {
u16 uta_reg_count;
/* Maximum size of the MTA register table in all supported adapters */
- #define MAX_MTA_REG 128
+#define MAX_MTA_REG 128
u32 mta_shadow[MAX_MTA_REG];
u16 rar_entry_count;
@@ -931,7 +935,7 @@ struct e1000_shadow_ram {
#define E1000_SHADOW_RAM_WORDS 2048
-#if defined(NAHUM6LP_HW) && defined(ULP_SUPPORT)
+#ifdef ULP_SUPPORT
/* I218 PHY Ultra Low Power (ULP) states */
enum e1000_ulp_state {
e1000_ulp_state_unknown,
@@ -939,7 +943,7 @@ enum e1000_ulp_state {
e1000_ulp_state_on,
};
-#endif /* NAHUM6LP_HW && ULP_SUPPORT */
+#endif /* ULP_SUPPORT */
struct e1000_dev_spec_ich8lan {
bool kmrn_lock_loss_workaround_enabled;
struct e1000_shadow_ram shadow_ram[E1000_SHADOW_RAM_WORDS];
@@ -948,7 +952,7 @@ struct e1000_dev_spec_ich8lan {
bool nvm_k1_enabled;
bool eee_disable;
u16 eee_lp_ability;
-#if defined(NAHUM6LP_HW) && defined(ULP_SUPPORT)
+#ifdef ULP_SUPPORT
enum e1000_ulp_state ulp_state;
#endif /* NAHUM6LP_HW && ULP_SUPPORT */
u16 lat_enc;
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_i210.c b/src/dpdk22/drivers/net/e1000/base/e1000_i210.c
index 1f5600d5..277331c4 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_i210.c
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_i210.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -925,7 +925,7 @@ s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
STATIC s32 e1000_pll_workaround_i210(struct e1000_hw *hw)
{
s32 ret_val;
- u32 wuc, mdicnfg, ctrl_ext, reg_val;
+ u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
u16 nvm_word, phy_word, pci_word, tmp_nvm;
int i;
@@ -942,9 +942,9 @@ STATIC s32 e1000_pll_workaround_i210(struct e1000_hw *hw)
nvm_word = E1000_INVM_DEFAULT_AL;
tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
- /* check current state */
- hw->phy.ops.read_reg(hw, (E1000_PHY_PLL_FREQ_PAGE |
- E1000_PHY_PLL_FREQ_REG), &phy_word);
+ /* check current state directly from internal PHY */
+ e1000_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE |
+ E1000_PHY_PLL_FREQ_REG), &phy_word);
if ((phy_word & E1000_PHY_PLL_UNCONF)
!= E1000_PHY_PLL_UNCONF) {
ret_val = E1000_SUCCESS;
@@ -952,14 +952,17 @@ STATIC s32 e1000_pll_workaround_i210(struct e1000_hw *hw)
} else {
ret_val = -E1000_ERR_PHY;
}
- hw->phy.ops.reset(hw);
+ /* directly reset the internal PHY */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
+
ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
E1000_WRITE_REG(hw, E1000_WUC, 0);
reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
- E1000_WRITE_REG(hw, E1000_EEARBC, reg_val);
+ E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val);
e1000_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
pci_word |= E1000_PCI_PMCSR_D3;
@@ -968,7 +971,7 @@ STATIC s32 e1000_pll_workaround_i210(struct e1000_hw *hw)
pci_word &= ~E1000_PCI_PMCSR_D3;
e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
- E1000_WRITE_REG(hw, E1000_EEARBC, reg_val);
+ E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val);
/* restore WUC register */
E1000_WRITE_REG(hw, E1000_WUC, wuc);
@@ -979,6 +982,35 @@ STATIC s32 e1000_pll_workaround_i210(struct e1000_hw *hw)
}
/**
+ * e1000_get_cfg_done_i210 - Read config done bit
+ * @hw: pointer to the HW structure
+ *
+ * Read the management control register for the config done bit for
+ * completion status. NOTE: silicon which is EEPROM-less will fail trying
+ * to read the config done bit, so an error is *ONLY* logged and returns
+ * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon
+ * would not be able to be reset or change link.
+ **/
+STATIC s32 e1000_get_cfg_done_i210(struct e1000_hw *hw)
+{
+ s32 timeout = PHY_CFG_TIMEOUT;
+ u32 mask = E1000_NVM_CFG_DONE_PORT_0;
+
+ DEBUGFUNC("e1000_get_cfg_done_i210");
+
+ while (timeout) {
+ if (E1000_READ_REG(hw, E1000_EEMNGCTL_I210) & mask)
+ break;
+ msec_delay(1);
+ timeout--;
+ }
+ if (!timeout)
+ DEBUGOUT("MNG configuration cycle has not completed.\n");
+
+ return E1000_SUCCESS;
+}
+
+/**
* e1000_init_hw_i210 - Init hw for I210/I211
* @hw: pointer to the HW structure
*
@@ -995,6 +1027,7 @@ s32 e1000_init_hw_i210(struct e1000_hw *hw)
if (ret_val != E1000_SUCCESS)
return ret_val;
}
+ hw->phy.ops.get_cfg_done = e1000_get_cfg_done_i210;
ret_val = e1000_init_hw_82575(hw);
return ret_val;
}
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_i210.h b/src/dpdk22/drivers/net/e1000/base/e1000_i210.h
index f2bd43bb..1a6f1dd4 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_i210.h
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_i210.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_ich8lan.c b/src/dpdk22/drivers/net/e1000/base/e1000_ich8lan.c
index 3b1627bf..89d07e90 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_ich8lan.c
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_ich8lan.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -62,6 +62,10 @@ POSSIBILITY OF SUCH DAMAGE.
* Ethernet Connection I217-V
* Ethernet Connection I218-V
* Ethernet Connection I218-LM
+ * Ethernet Connection (2) I218-LM
+ * Ethernet Connection (2) I218-V
+ * Ethernet Connection (3) I218-LM
+ * Ethernet Connection (3) I218-V
*/
#include "e1000_api.h"
@@ -73,8 +77,8 @@ STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
-STATIC void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
-STATIC void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
+STATIC int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
+STATIC int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
#ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
@@ -230,15 +234,19 @@ STATIC bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
return false;
out:
if (hw->mac.type == e1000_pch_lpt) {
- /* Unforce SMBus mode in PHY */
- hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
- phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
- hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
+ /* Only unforce SMBus if ME is not active */
+ if (!(E1000_READ_REG(hw, E1000_FWSM) &
+ E1000_ICH_FWSM_FW_VALID)) {
+ /* Unforce SMBus mode in PHY */
+ hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
+ phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
+ hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
- /* Unforce SMBus mode in MAC */
- mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
- mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
- E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
+ /* Unforce SMBus mode in MAC */
+ mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
+ }
}
return true;
@@ -307,13 +315,13 @@ STATIC s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
*/
e1000_gate_hw_phy_config_ich8lan(hw, true);
-#if defined(NAHUM6LP_HW) && defined(ULP_SUPPORT)
+#ifdef ULP_SUPPORT
/* It is not possible to be certain of the current state of ULP
* so forcibly disable it.
*/
hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
-#endif /* NAHUM6LP_HW && ULP_SUPPORT */
+#endif /* ULP_SUPPORT */
ret_val = hw->phy.ops.acquire(hw);
if (ret_val) {
DEBUGOUT("Failed to initialize PHY flow\n");
@@ -754,6 +762,7 @@ STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
/* multicast address update for pch2 */
mac->ops.update_mc_addr_list =
e1000_update_mc_addr_list_pch2lan;
+ /* fall-through */
#endif
case e1000_pchlan:
#if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
@@ -931,6 +940,17 @@ s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
}
}
+ if (hw->phy.type == e1000_phy_82579) {
+ ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
+ &data);
+ if (ret_val)
+ goto release;
+
+ data &= ~I82579_LPI_100_PLL_SHUT;
+ ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
+ data);
+ }
+
/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
if (ret_val)
@@ -1032,7 +1052,7 @@ update_fextnvm6:
return ret_val;
}
-#if defined(NAHUM6LP_HW) && defined(ULP_SUPPORT)
+#ifdef ULP_SUPPORT
/**
* e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
* @hw: pointer to the HW structure
@@ -1052,19 +1072,19 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
if ((hw->mac.type < e1000_pch_lpt) ||
(hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
(hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
+ (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
+ (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
(hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
return 0;
if (!to_sx) {
int i = 0;
-
/* Poll up to 5 seconds for Cable Disconnected indication */
while (!(E1000_READ_REG(hw, E1000_FEXT) &
E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
/* Bail if link is re-acquired */
if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
return -E1000_ERR_PHY;
-
if (i++ == 100)
break;
@@ -1074,6 +1094,9 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
(E1000_READ_REG(hw, E1000_FEXT) &
E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
i * 50);
+ if (!(E1000_READ_REG(hw, E1000_FEXT) &
+ E1000_FEXT_PHY_CABLE_DISCONNECTED))
+ return 0;
}
if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
@@ -1131,10 +1154,15 @@ skip_smbus:
if (to_sx) {
if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
+ else
+ phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
+ phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT;
} else {
phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
+ phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP;
+ phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
}
e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
@@ -1155,6 +1183,7 @@ skip_smbus:
mac_reg &= ~E1000_TCTL_EN;
E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
}
+
release:
hw->phy.ops.release(hw);
out:
@@ -1197,6 +1226,8 @@ s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
if ((hw->mac.type < e1000_pch_lpt) ||
(hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
(hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
+ (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
+ (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
(hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
return 0;
@@ -1234,10 +1265,15 @@ s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
/* Restore link speed advertisements and restart
* Auto-negotiation
*/
- ret_val = e1000_phy_setup_autoneg(hw);
- if (ret_val)
- goto out;
-
+ if (hw->mac.autoneg) {
+ ret_val = e1000_phy_setup_autoneg(hw);
+ if (ret_val)
+ goto out;
+ } else {
+ ret_val = e1000_setup_copper_link_generic(hw);
+ if (ret_val)
+ goto out;
+ }
ret_val = e1000_oem_bits_config_ich8lan(hw, true);
}
@@ -1360,7 +1396,7 @@ out:
return ret_val;
}
-#endif /* NAHUM6LP_HW && ULP_SUPPORT */
+#endif /* ULP_SUPPORT */
/**
* e1000_check_for_copper_link_ich8lan - Check for link (Copper)
* @hw: pointer to the HW structure
@@ -1372,7 +1408,8 @@ out:
STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
- s32 ret_val;
+ s32 ret_val, tipg_reg = 0;
+ u16 emi_addr, emi_val = 0;
bool link = false;
u16 phy_reg;
@@ -1405,7 +1442,6 @@ STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
ret_val = e1000_disable_ulp_lpt_lp(hw, false);
else
ret_val = e1000_enable_ulp_lpt_lp(hw, false);
-
if (ret_val)
return ret_val;
}
@@ -1422,45 +1458,71 @@ STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
*/
if (((hw->mac.type == e1000_pch2lan) ||
(hw->mac.type == e1000_pch_lpt)) && link) {
- u32 reg;
- reg = E1000_READ_REG(hw, E1000_STATUS);
- if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
- u16 emi_addr;
+ u16 speed, duplex;
- reg = E1000_READ_REG(hw, E1000_TIPG);
- reg &= ~E1000_TIPG_IPGT_MASK;
- reg |= 0xFF;
- E1000_WRITE_REG(hw, E1000_TIPG, reg);
+ e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex);
+ tipg_reg = E1000_READ_REG(hw, E1000_TIPG);
+ tipg_reg &= ~E1000_TIPG_IPGT_MASK;
+ if (duplex == HALF_DUPLEX && speed == SPEED_10) {
+ tipg_reg |= 0xFF;
/* Reduce Rx latency in analog PHY */
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- return ret_val;
+ emi_val = 0;
+ } else {
+ /* Roll back the default values */
+ tipg_reg |= 0x08;
+ emi_val = 1;
+ }
- if (hw->mac.type == e1000_pch2lan)
- emi_addr = I82579_RX_CONFIG;
- else
- emi_addr = I217_RX_CONFIG;
- ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0);
+ E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg);
- hw->phy.ops.release(hw);
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
- if (ret_val)
- return ret_val;
- }
+ if (hw->mac.type == e1000_pch2lan)
+ emi_addr = I82579_RX_CONFIG;
+ else
+ emi_addr = I217_RX_CONFIG;
+ ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
+
+ hw->phy.ops.release(hw);
+
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* I217 Packet Loss issue:
+ * ensure that FEXTNVM4 Beacon Duration is set correctly
+ * on power up.
+ * Set the Beacon Duration for I217 to 8 usec
+ */
+ if (hw->mac.type == e1000_pch_lpt) {
+ u32 mac_reg;
+
+ mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
+ mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
+ mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
+ E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
}
/* Work-around I218 hang issue */
if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
- (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
+ (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
+ (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
+ (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
ret_val = e1000_k1_workaround_lpt_lp(hw, link);
if (ret_val)
return ret_val;
}
-
/* Clear link partner's EEE ability */
hw->dev_spec.ich8lan.eee_lp_ability = 0;
+ /* Configure K0s minimum time */
+ if (hw->mac.type == e1000_pch_lpt) {
+ e1000_configure_k0s_lpt(hw, K1_ENTRY_LATENCY, K1_MIN_TIME);
+ }
+
if (!link)
return E1000_SUCCESS; /* No link detected */
@@ -1731,7 +1793,7 @@ STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
* contain the MAC address but RAR[1-6] are reserved for manageability (ME).
* Use SHRA[0-3] in place of those reserved for ME.
**/
-STATIC void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
+STATIC int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
@@ -1755,7 +1817,7 @@ STATIC void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
E1000_WRITE_FLUSH(hw);
E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
E1000_WRITE_FLUSH(hw);
- return;
+ return E1000_SUCCESS;
}
/* RAR[1-6] are owned by manageability. Skip those and program the
@@ -1778,7 +1840,7 @@ STATIC void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
/* verify the register updates */
if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
(E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
- return;
+ return E1000_SUCCESS;
DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
(index - 1), E1000_READ_REG(hw, E1000_FWSM));
@@ -1786,6 +1848,7 @@ STATIC void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
out:
DEBUGOUT1("Failed to write receive address at index %d\n", index);
+ return -E1000_ERR_CONFIG;
}
/**
@@ -1799,7 +1862,7 @@ out:
* contain the MAC address. SHRA[0-10] are the shared receive address
* registers that are shared between the Host and manageability engine (ME).
**/
-STATIC void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
+STATIC int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
u32 wlock_mac;
@@ -1823,7 +1886,7 @@ STATIC void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
E1000_WRITE_FLUSH(hw);
E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
E1000_WRITE_FLUSH(hw);
- return;
+ return E1000_SUCCESS;
}
/* The manageability engine (ME) can lock certain SHRAR registers that
@@ -1858,12 +1921,13 @@ STATIC void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
/* verify the register updates */
if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
(E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
- return;
+ return E1000_SUCCESS;
}
}
out:
DEBUGOUT1("Failed to write receive address at index %d\n", index);
+ return -E1000_ERR_CONFIG;
}
#ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
@@ -1936,7 +2000,7 @@ STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
continue;
}
blocked = false;
- } while (blocked && (i++ < 10));
+ } while (blocked && (i++ < 30));
return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
}
@@ -2582,7 +2646,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
return ret_val;
hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
data &= ~(0x3FF << 2);
- data |= (0x1A << 2);
+ data |= (E1000_TX_PTR_GAP << 2);
ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
if (ret_val)
return ret_val;
@@ -2914,7 +2978,6 @@ STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
u16 oem_reg;
DEBUGFUNC("e1000_set_lplu_state_pchlan");
-
ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
if (ret_val)
return ret_val;
@@ -3134,6 +3197,7 @@ STATIC s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
struct e1000_nvm_info *nvm = &hw->nvm;
u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
+ u32 nvm_dword = 0;
u8 sig_byte = 0;
s32 ret_val;
@@ -3441,12 +3505,10 @@ STATIC s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
hsflctl.hsf_ctrl.fldbcount = size - 1;
hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
-
E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
- ret_val =
- e1000_flash_cycle_ich8lan(hw,
- ICH_FLASH_READ_COMMAND_TIMEOUT);
+ ret_val = e1000_flash_cycle_ich8lan(hw,
+ ICH_FLASH_READ_COMMAND_TIMEOUT);
/* Check if FCERR is set to 1, if set to 1, clear it
* and try the whole sequence a few more times, else
@@ -3481,6 +3543,7 @@ STATIC s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
return ret_val;
}
+
/**
* e1000_write_nvm_ich8lan - Write word(s) to the NVM
* @hw: pointer to the HW structure
@@ -3534,7 +3597,7 @@ STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
s32 ret_val;
- u16 data;
+ u16 data = 0;
DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
@@ -3570,12 +3633,7 @@ STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
if (ret_val)
goto release;
}
-
for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
- /* Determine whether to write the value stored
- * in the other NVM bank or a modified value stored
- * in the shadow RAM
- */
if (dev_spec->shadow_ram[i].modified) {
data = dev_spec->shadow_ram[i].value;
} else {
@@ -3585,7 +3643,6 @@ STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
if (ret_val)
break;
}
-
/* If the word is 0x13, then make sure the signature bits
* (15:14) are 11b until the commit has completed.
* This will allow us to write 10b which indicates the
@@ -3600,6 +3657,7 @@ STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
act_offset = (i + new_bank_offset) << 1;
usec_delay(100);
+
/* Write the bytes to the new bank. */
ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
act_offset,
@@ -3634,8 +3692,7 @@ STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
goto release;
data &= 0xBFFF;
- ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
- act_offset * 2 + 1,
+ ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1,
(u8)(data >> 8));
if (ret_val)
goto release;
@@ -3646,7 +3703,9 @@ STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
* to 1's. We can write 1's to 0's without an erase
*/
act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
+
ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
+
if (ret_val)
goto release;
@@ -3800,6 +3859,7 @@ STATIC s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
return ret_val;
}
+
/**
* e1000_write_flash_byte_ich8lan - Write a single byte to NVM
* @hw: pointer to the HW structure
@@ -3818,6 +3878,8 @@ STATIC s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
}
+
+
/**
* e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
* @hw: pointer to the HW structure
@@ -4794,7 +4856,9 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
u16 phy_reg, device_id = hw->device_id;
if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
- (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
+ (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
+ (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
+ (device_id == E1000_DEV_ID_PCH_I218_V3)) {
u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
E1000_WRITE_REG(hw, E1000_FEXTNVM6,
@@ -4910,19 +4974,18 @@ out:
* the PHY.
* On i217, setup Intel Rapid Start Technology.
**/
-void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
+u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
{
s32 ret_val;
DEBUGFUNC("e1000_resume_workarounds_pchlan");
-
if (hw->mac.type < e1000_pch2lan)
- return;
+ return E1000_SUCCESS;
ret_val = e1000_init_phy_workarounds_pchlan(hw);
if (ret_val) {
DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
- return;
+ return ret_val;
}
/* For i217 Intel Rapid Start Technology support when the system
@@ -4936,7 +4999,7 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
ret_val = hw->phy.ops.acquire(hw);
if (ret_val) {
DEBUGOUT("Failed to setup iRST\n");
- return;
+ return ret_val;
}
/* Clear Auto Enable LPI after link up */
@@ -4970,7 +5033,9 @@ release:
if (ret_val)
DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
hw->phy.ops.release(hw);
+ return ret_val;
}
+ return E1000_SUCCESS;
}
/**
@@ -5258,3 +5323,44 @@ release:
}
}
+/**
+ * e1000_configure_k0s_lpt - Configure K0s power state
+ * @hw: pointer to the HW structure
+ * @entry_latency: Tx idle period for entering K0s - valid values are 0 to 3.
+ * 0 corresponds to 128ns, each value over 0 doubles the duration.
+ * @min_time: Minimum Tx idle period allowed - valid values are 0 to 4.
+ * 0 corresponds to 128ns, each value over 0 doubles the duration.
+ *
+ * Configure the K1 power state based on the provided parameter.
+ * Assumes semaphore already acquired.
+ *
+ * Success returns 0, Failure returns:
+ * -E1000_ERR_PHY (-2) in case of access error
+ * -E1000_ERR_PARAM (-4) in case of parameters error
+ **/
+s32 e1000_configure_k0s_lpt(struct e1000_hw *hw, u8 entry_latency, u8 min_time)
+{
+ s32 ret_val;
+ u16 kmrn_reg = 0;
+
+ DEBUGFUNC("e1000_configure_k0s_lpt");
+
+ if (entry_latency > 3 || min_time > 4)
+ return -E1000_ERR_PARAM;
+
+ ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K0S_CTRL,
+ &kmrn_reg);
+ if (ret_val)
+ return ret_val;
+
+ /* for now don't touch the latency */
+ kmrn_reg &= ~(E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_MASK);
+ kmrn_reg |= ((min_time << E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_SHIFT));
+
+ ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K0S_CTRL,
+ kmrn_reg);
+ if (ret_val)
+ return ret_val;
+
+ return E1000_SUCCESS;
+}
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_ich8lan.h b/src/dpdk22/drivers/net/e1000/base/e1000_ich8lan.h
index 8c5e9c32..33e77fb8 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_ich8lan.h
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_ich8lan.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -69,22 +69,22 @@ POSSIBILITY OF SUCH DAMAGE.
#define E1000_FWSM_WLOCK_MAC_MASK 0x0380
#define E1000_FWSM_WLOCK_MAC_SHIFT 7
-#if !defined(EXTERNAL_RELEASE) || (defined(NAHUM6LP_HW) && defined(ULP_SUPPORT))
+#if !defined(EXTERNAL_RELEASE) || defined(ULP_SUPPORT)
#define E1000_FWSM_ULP_CFG_DONE 0x00000400 /* Low power cfg done */
-#endif /* !EXTERNAL_RELEASE || (NAHUM6LP_HW && ULP_SUPPORT) */
+#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */
/* Shared Receive Address Registers */
#define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8))
#define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8))
-#if !defined(EXTERNAL_RELEASE) || (defined(NAHUM6LP_HW) && defined(ULP_SUPPORT))
+#if !defined(EXTERNAL_RELEASE) || defined(ULP_SUPPORT)
#define E1000_H2ME 0x05B50 /* Host to ME */
-#endif /* !EXTERNAL_RELEASE || (NAHUM6LP_HW && ULP_SUPPORT) */
-#if !defined(EXTERNAL_RELEASE) || (defined(NAHUM6LP_HW) && defined(ULP_SUPPORT))
+#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */
+#if !defined(EXTERNAL_RELEASE) || defined(ULP_SUPPORT)
#define E1000_H2ME_ULP 0x00000800 /* ULP Indication Bit */
#define E1000_H2ME_ENFORCE_SETTINGS 0x00001000 /* Enforce Settings */
-#endif /* !EXTERNAL_RELEASE || (NAHUM6LP_HW && ULP_SUPPORT) */
+#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */
#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
(ID_LED_OFF1_OFF2 << 8) | \
(ID_LED_OFF1_ON2 << 4) | \
@@ -97,11 +97,11 @@ POSSIBILITY OF SUCH DAMAGE.
#define E1000_ICH8_LAN_INIT_TIMEOUT 1500
-#if !defined(EXTERNAL_RELEASE) || (defined(NAHUM6LP_HW) && defined(ULP_SUPPORT))
+#if !defined(EXTERNAL_RELEASE) || defined(ULP_SUPPORT)
/* FEXT register bit definition */
#define E1000_FEXT_PHY_CABLE_DISCONNECTED 0x00000004
-#endif /* !EXTERNAL_RELEASE || (NAHUM6LP_HW && ULP_SUPPORT) */
+#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */
#define E1000_FEXTNVM_SW_CONFIG 1
#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* different on ICH8M */
@@ -114,11 +114,13 @@ POSSIBILITY OF SUCH DAMAGE.
#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100
#define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200
-
-#if !defined(EXTERNAL_RELEASE) || (defined(NAHUM6LP_HW) && defined(ULP_SUPPORT))
+#define E1000_FEXTNVM6_K1_OFF_ENABLE 0x80000000
+/* bit for disabling packet buffer read */
+#define E1000_FEXTNVM7_DISABLE_PB_READ 0x00040000
+#define E1000_FEXTNVM7_SIDE_CLK_UNGATE 0x00000004
+#if !defined(EXTERNAL_RELEASE) || defined(ULP_SUPPORT)
#define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020
-
-#endif /* !EXTERNAL_RELEASE || (NAHUM6LP_HW && ULP_SUPPORT) */
+#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
#define E1000_ICH_RAR_ENTRIES 7
@@ -180,12 +182,14 @@ POSSIBILITY OF SUCH DAMAGE.
#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */
+#define K1_ENTRY_LATENCY 0
+#define K1_MIN_TIME 1
/* SMBus Control Phy Register */
#define CV_SMB_CTRL PHY_REG(769, 23)
#define CV_SMB_CTRL_FORCE_SMBUS 0x0001
-#if !defined(EXTERNAL_RELEASE) || (defined(NAHUM6LP_HW) && defined(ULP_SUPPORT))
+#if !defined(EXTERNAL_RELEASE) || defined(ULP_SUPPORT)
/* I218 Ultra Low Power Configuration 1 Register */
#define I218_ULP_CONFIG1 PHY_REG(779, 16)
#define I218_ULP_CONFIG1_START 0x0001 /* Start auto ULP config */
@@ -196,7 +200,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I218_ULP_CONFIG1_RESET_TO_SMBUS 0x0100 /* Reset to SMBus mode */
#define I218_ULP_CONFIG1_DISABLE_SMB_PERST 0x1000 /* Disable on PERST# */
-#endif /* !EXTERNAL_RELEASE || (NAHUM6LP_HW && ULP_SUPPORT) */
+#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */
/* SMBus Address Phy Register */
#define HV_SMB_ADDR PHY_REG(768, 26)
#define HV_SMB_ADDR_MASK 0x007F
@@ -262,12 +266,14 @@ POSSIBILITY OF SUCH DAMAGE.
#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */
#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
#define I82579_RX_CONFIG 0x3412 /* Receive configuration */
+#define I82579_LPI_PLL_SHUT 0x4412 /* LPI PLL Shut Enable */
#define I82579_EEE_PCS_STATUS 0x182E /* IEEE MMD Register 3.1 >> 8 */
#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */
#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */
#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */
#define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE */
#define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE */
+#define I82579_LPI_100_PLL_SHUT (1 << 2) /* 100M LPI PLL Shut Enabled */
#define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */
#define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */
#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */
@@ -298,16 +304,17 @@ void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw);
-void e1000_resume_workarounds_pchlan(struct e1000_hw *hw);
+u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw);
s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
+s32 e1000_configure_k0s_lpt(struct e1000_hw *hw, u8 entry_latency, u8 min_time);
void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data);
s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data);
s32 e1000_set_eee_pchlan(struct e1000_hw *hw);
-#if defined(NAHUM6LP_HW) && defined(ULP_SUPPORT)
+#ifdef ULP_SUPPORT
s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx);
s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force);
-#endif /* NAHUM6LP_HW && ULP_SUPPORT */
+#endif /* ULP_SUPPORT */
#endif /* _E1000_ICH8LAN_H_ */
void e1000_demote_ltr(struct e1000_hw *hw, bool demote, bool link);
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_mac.c b/src/dpdk22/drivers/net/e1000/base/e1000_mac.c
index c8ec049b..a0f3a999 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_mac.c
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_mac.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -36,7 +36,7 @@ POSSIBILITY OF SUCH DAMAGE.
STATIC s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw);
STATIC void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
STATIC void e1000_config_collision_dist_generic(struct e1000_hw *hw);
-STATIC void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
+STATIC int e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
/**
* e1000_init_mac_ops_generic - Initialize MAC function pointers
@@ -149,15 +149,15 @@ void e1000_null_write_vfta(struct e1000_hw E1000_UNUSEDARG *hw,
}
/**
- * e1000_null_rar_set - No-op function, return void
+ * e1000_null_rar_set - No-op function, return 0
* @hw: pointer to the HW structure
**/
-void e1000_null_rar_set(struct e1000_hw E1000_UNUSEDARG *hw,
+int e1000_null_rar_set(struct e1000_hw E1000_UNUSEDARG *hw,
u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a)
{
DEBUGFUNC("e1000_null_rar_set");
UNREFERENCED_3PARAMETER(hw, h, a);
- return;
+ return E1000_SUCCESS;
}
/**
@@ -469,7 +469,7 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
* Sets the receive address array register at index to the address passed
* in by addr.
**/
-STATIC void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
+STATIC int e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
@@ -495,6 +495,8 @@ STATIC void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
E1000_WRITE_FLUSH(hw);
E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
E1000_WRITE_FLUSH(hw);
+
+ return E1000_SUCCESS;
}
/**
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_mac.h b/src/dpdk22/drivers/net/e1000/base/e1000_mac.h
index 5a7ce4a4..96a260c3 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_mac.h
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_mac.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -44,7 +44,7 @@ s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d);
bool e1000_null_mng_mode(struct e1000_hw *hw);
void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a);
void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b);
-void e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a);
+int e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a);
s32 e1000_blink_led_generic(struct e1000_hw *hw);
s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw);
s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw);
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_manage.c b/src/dpdk22/drivers/net/e1000/base/e1000_manage.c
index 30db8920..8564a7f8 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_manage.c
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_manage.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -363,9 +363,12 @@ bool e1000_enable_mng_pass_thru(struct e1000_hw *hw)
} else if ((hw->mac.type == e1000_82574) ||
(hw->mac.type == e1000_82583)) {
u16 data;
+ s32 ret_val;
factps = E1000_READ_REG(hw, E1000_FACTPS);
- e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
+ ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
+ if (ret_val)
+ return false;
if (!(factps & E1000_FACTPS_MNGCG) &&
((data & E1000_NVM_INIT_CTRL2_MNGM) ==
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_manage.h b/src/dpdk22/drivers/net/e1000/base/e1000_manage.h
index e6f92c0c..25be1156 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_manage.h
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_manage.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_mbx.c b/src/dpdk22/drivers/net/e1000/base/e1000_mbx.c
index 7ec4c564..6daf16b0 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_mbx.c
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_mbx.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_mbx.h b/src/dpdk22/drivers/net/e1000/base/e1000_mbx.h
index e9524fc8..563dcb9d 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_mbx.h
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_mbx.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_nvm.c b/src/dpdk22/drivers/net/e1000/base/e1000_nvm.c
index 8be437a8..762acd16 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_nvm.c
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_nvm.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -585,6 +585,9 @@ s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
E1000_NVM_RW_REG_DATA);
}
+ if (ret_val)
+ DEBUGOUT1("NVM read error: %d\n", ret_val);
+
return ret_val;
}
@@ -1370,8 +1373,12 @@ etrack_id:
hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT)
| eeprom_verl;
+ } else if ((etrack_test & NVM_ETRACK_VALID) == 0) {
+ hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh);
+ hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl);
+ fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) |
+ eeprom_verl;
}
- return;
}
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_nvm.h b/src/dpdk22/drivers/net/e1000/base/e1000_nvm.h
index dee1f62f..c400dc3a 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_nvm.h
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_nvm.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_osdep.c b/src/dpdk22/drivers/net/e1000/base/e1000_osdep.c
index 7270edfa..7270edfa 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_osdep.c
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_osdep.c
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_osdep.h b/src/dpdk22/drivers/net/e1000/base/e1000_osdep.h
index 438641e2..b2c76e34 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_osdep.h
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_osdep.h
@@ -43,6 +43,7 @@
#include <rte_cycles.h>
#include <rte_log.h>
#include <rte_debug.h>
+#include <rte_byteorder.h>
#include "../e1000_logs.h"
@@ -96,7 +97,7 @@ typedef int bool;
#define E1000_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
#define E1000_PCI_REG_WRITE(reg, value) do { \
- E1000_PCI_REG((reg)) = (value); \
+ E1000_PCI_REG((reg)) = (rte_cpu_to_le_32(value)); \
} while (0)
#define E1000_PCI_REG_ADDR(hw, reg) \
@@ -107,7 +108,7 @@ typedef int bool;
static inline uint32_t e1000_read_addr(volatile void* addr)
{
- return E1000_PCI_REG(addr);
+ return rte_le_to_cpu_32(E1000_PCI_REG(addr));
}
/* Necessary defines */
@@ -117,7 +118,6 @@ static inline uint32_t e1000_read_addr(volatile void* addr)
ADVERTISE_10_FULL | ADVERTISE_100_FULL | ADVERTISE_1000_FULL)
#define M88E1543_E_PHY_ID 0x01410EA0
-#define NAHUM6LP_HW
#define ULP_SUPPORT
#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_phy.c b/src/dpdk22/drivers/net/e1000/base/e1000_phy.c
index e214f179..d43b7ce0 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_phy.c
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_phy.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -1833,9 +1833,9 @@ s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw)
phy_data);
if (ret_val)
return ret_val;
- }
- DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data);
+ DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data);
+ }
ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
if (ret_val)
@@ -3498,16 +3498,10 @@ STATIC s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
void e1000_power_up_phy_copper(struct e1000_hw *hw)
{
u16 mii_reg = 0;
- u16 power_reg = 0;
/* The PHY will retain its settings across a power down/up cycle */
hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
mii_reg &= ~MII_CR_POWER_DOWN;
- if (hw->phy.type == e1000_phy_i210) {
- hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
- power_reg &= ~GS40G_CS_POWER_DOWN;
- hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
- }
hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
}
@@ -3522,17 +3516,10 @@ void e1000_power_up_phy_copper(struct e1000_hw *hw)
void e1000_power_down_phy_copper(struct e1000_hw *hw)
{
u16 mii_reg = 0;
- u16 power_reg = 0;
/* The PHY will retain its settings across a power down/up cycle */
hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
mii_reg |= MII_CR_POWER_DOWN;
- /* i210 Phy requires an additional bit for power up/down */
- if (hw->phy.type == e1000_phy_i210) {
- hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
- power_reg |= GS40G_CS_POWER_DOWN;
- hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
- }
hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
msec_delay(1);
}
@@ -3563,7 +3550,6 @@ STATIC s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
if (ret_val)
return ret_val;
}
-
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
@@ -3673,7 +3659,6 @@ STATIC s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
if (ret_val)
return ret_val;
}
-
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_phy.h b/src/dpdk22/drivers/net/e1000/base/e1000_phy.h
index 73a9b1fd..3e45a9ef 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_phy.h
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_phy.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -143,7 +143,6 @@ bool e1000_is_mphy_ready(struct e1000_hw *hw);
#define GS40G_MAC_LB 0x4140
#define GS40G_MAC_SPEED_1G 0X0006
#define GS40G_COPPER_SPEC 0x0010
-#define GS40G_CS_POWER_DOWN 0x0002
/* BM/HV Specific Registers */
#define BM_PORT_CTRL_PAGE 769
@@ -275,6 +274,13 @@ bool e1000_is_mphy_ready(struct e1000_hw *hw);
#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7
#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002 /* enable K1 */
#define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */
+#define E1000_KMRNCTRLSTA_K0S_CTRL 0x1E /* Kumeran K0s Control */
+#define E1000_KMRNCTRLSTA_K0S_CTRL_ENTRY_LTNCY_SHIFT 0
+#define E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_SHIFT 4
+#define E1000_KMRNCTRLSTA_K0S_CTRL_ENTRY_LTNCY_MASK \
+ (3 << E1000_KMRNCTRLSTA_K0S_CTRL_ENTRY_LTNCY_SHIFT)
+#define E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_MASK \
+ (7 << E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_SHIFT)
#define E1000_KMRNCTRLSTA_OP_MODES 0x1F /* Kumeran Modes of Operation */
#define E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC 0x0002 /* change LSC to CSC */
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_regs.h b/src/dpdk22/drivers/net/e1000/base/e1000_regs.h
index bde2a089..84531a99 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_regs.h
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_regs.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -58,14 +58,15 @@ POSSIBILITY OF SUCH DAMAGE.
#define E1000_SCTL 0x00024 /* SerDes Control - RW */
#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
-#if !defined(EXTERNAL_RELEASE) || (defined(NAHUM6LP_HW) && defined(ULP_SUPPORT))
+#if !defined(EXTERNAL_RELEASE) || defined(ULP_SUPPORT)
#define E1000_FEXT 0x0002C /* Future Extended - RW */
-#endif /* !EXTERNAL_RELEASE || (NAHUM6LP_HW && ULP_SUPPORT) */
+#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */
#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
+#define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
@@ -109,7 +110,9 @@ POSSIBILITY OF SUCH DAMAGE.
#define E1000_PBS 0x01008 /* Packet Buffer Size */
#define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */
#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
+#define E1000_EEMNGCTL_I210 0x01010 /* i210 MNG EEprom Mode Control */
#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */
+#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */
#define E1000_FLASHT 0x01028 /* FLASH Timer Register */
#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
#define E1000_FLSWCTL 0x01030 /* FLASH control register */
@@ -202,7 +205,7 @@ POSSIBILITY OF SUCH DAMAGE.
/* Queues fetch arbitration priority control register */
#define E1000_I210_TQAVARBCTRL 0x3574
/* Queues priority masks where _n and _p can be 0-3. */
-#define E1000_TQAVARBCTRL_QUEUE_PRI(_n, _p) ((_p) << (2 * _n))
+#define E1000_TQAVARBCTRL_QUEUE_PRI(_n, _p) ((_p) << (2 * (_n)))
/* QAV Tx mode control registers where _n can be 0 or 1. */
#define E1000_I210_TQAVCC(_n) (0x3004 + 0x40 * (_n))
@@ -215,7 +218,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define E1000_PQGPTC(_n) (0x010014 + (0x100 * (_n)))
/* Queues packet buffer size masks where _n can be 0-3 and _s 0-63 [kB] */
-#define E1000_I210_TXPBS_SIZE(_n, _s) ((_s) << (6 * _n))
+#define E1000_I210_TXPBS_SIZE(_n, _s) ((_s) << (6 * (_n)))
#define E1000_MMDAC 13 /* MMD Access Control */
#define E1000_MMDAAD 14 /* MMD Access Address/Data */
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_vf.c b/src/dpdk22/drivers/net/e1000/base/e1000_vf.c
index 778561e7..7845b48e 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_vf.c
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_vf.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -48,7 +48,7 @@ STATIC s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed,
STATIC s32 e1000_init_hw_vf(struct e1000_hw *hw);
STATIC s32 e1000_reset_hw_vf(struct e1000_hw *hw);
STATIC void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *, u32);
-STATIC void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32);
+STATIC int e1000_rar_set_vf(struct e1000_hw *, u8 *, u32);
STATIC s32 e1000_read_mac_addr_vf(struct e1000_hw *);
/**
@@ -322,7 +322,7 @@ STATIC s32 e1000_init_hw_vf(struct e1000_hw *hw)
* @addr: pointer to the receive address
* @index receive address array register
**/
-STATIC void e1000_rar_set_vf(struct e1000_hw *hw, u8 *addr,
+STATIC int e1000_rar_set_vf(struct e1000_hw *hw, u8 *addr,
u32 E1000_UNUSEDARG index)
{
struct e1000_mbx_info *mbx = &hw->mbx;
@@ -345,6 +345,8 @@ STATIC void e1000_rar_set_vf(struct e1000_hw *hw, u8 *addr,
if (!ret_val &&
(msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK)))
e1000_read_mac_addr_vf(hw);
+
+ return E1000_SUCCESS;
}
/**
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_vf.h b/src/dpdk22/drivers/net/e1000/base/e1000_vf.h
index 6d5bd996..d6216dec 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_vf.h
+++ b/src/dpdk22/drivers/net/e1000/base/e1000_vf.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -207,7 +207,7 @@ struct e1000_mac_operations {
s32 (*init_hw)(struct e1000_hw *);
s32 (*setup_link)(struct e1000_hw *);
void (*write_vfta)(struct e1000_hw *, u32, u32);
- void (*rar_set)(struct e1000_hw *, u8*, u32);
+ int (*rar_set)(struct e1000_hw *, u8*, u32);
s32 (*read_mac_addr)(struct e1000_hw *);
};
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000_ethdev.h b/src/dpdk22/drivers/net/e1000/e1000_ethdev.h
index 71eb5fb7..e8bf8dad 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000_ethdev.h
+++ b/src/dpdk22/drivers/net/e1000/e1000_ethdev.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -33,6 +33,7 @@
#ifndef _E1000_ETHDEV_H_
#define _E1000_ETHDEV_H_
+#include <rte_time.h>
/* need update link, bit flag */
#define E1000_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
@@ -67,22 +68,19 @@
#define E1000_IMIR_DSTPORT 0x0000FFFF
#define E1000_IMIR_PRIORITY 0xE0000000
-#define E1000_IMIR_EXT_SIZE_BP 0x00001000
-#define E1000_IMIR_EXT_CTRL_UGR 0x00002000
-#define E1000_IMIR_EXT_CTRL_ACK 0x00004000
-#define E1000_IMIR_EXT_CTRL_PSH 0x00008000
-#define E1000_IMIR_EXT_CTRL_RST 0x00010000
-#define E1000_IMIR_EXT_CTRL_SYN 0x00020000
-#define E1000_IMIR_EXT_CTRL_FIN 0x00040000
-#define E1000_IMIR_EXT_CTRL_BP 0x00080000
#define E1000_MAX_TTQF_FILTERS 8
#define E1000_2TUPLE_MAX_PRI 7
-#define E1000_MAX_FLEXIBLE_FILTERS 8
+#define E1000_MAX_FLEX_FILTERS 8
#define E1000_MAX_FHFT 4
#define E1000_MAX_FHFT_EXT 4
+#define E1000_FHFT_SIZE_IN_DWD 64
#define E1000_MAX_FLEX_FILTER_PRI 7
#define E1000_MAX_FLEX_FILTER_LEN 128
+#define E1000_MAX_FLEX_FILTER_DWDS \
+ (E1000_MAX_FLEX_FILTER_LEN / sizeof(uint32_t))
+#define E1000_FLEX_FILTERS_MASK_SIZE \
+ (E1000_MAX_FLEX_FILTER_DWDS / 4)
#define E1000_FHFT_QUEUEING_LEN 0x0000007F
#define E1000_FHFT_QUEUEING_QUEUE 0x00000700
#define E1000_FHFT_QUEUEING_PRIO 0x00070000
@@ -96,15 +94,48 @@
#define E1000_MAX_FTQF_FILTERS 8
#define E1000_FTQF_PROTOCOL_MASK 0x000000FF
#define E1000_FTQF_5TUPLE_MASK_SHIFT 28
-#define E1000_FTQF_PROTOCOL_COMP_MASK 0x10000000
-#define E1000_FTQF_SOURCE_ADDR_MASK 0x20000000
-#define E1000_FTQF_DEST_ADDR_MASK 0x40000000
-#define E1000_FTQF_SOURCE_PORT_MASK 0x80000000
-#define E1000_FTQF_VF_MASK_EN 0x00008000
#define E1000_FTQF_QUEUE_MASK 0x03ff0000
#define E1000_FTQF_QUEUE_SHIFT 16
#define E1000_FTQF_QUEUE_ENABLE 0x00000100
+#define IGB_RSS_OFFLOAD_ALL ( \
+ ETH_RSS_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_NONFRAG_IPV4_UDP | \
+ ETH_RSS_IPV6 | \
+ ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_NONFRAG_IPV6_UDP | \
+ ETH_RSS_IPV6_EX | \
+ ETH_RSS_IPV6_TCP_EX | \
+ ETH_RSS_IPV6_UDP_EX)
+
+/*
+ * Maximum number of Ring Descriptors.
+ *
+ * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
+ * desscriptors should meet the following condition:
+ * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
+ */
+#define E1000_MIN_RING_DESC 32
+#define E1000_MAX_RING_DESC 4096
+
+/*
+ * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
+ * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
+ * This will also optimize cache line size effect.
+ * H/W supports up to cache line size 128.
+ */
+#define E1000_ALIGN 128
+
+#define IGB_RXD_ALIGN (E1000_ALIGN / sizeof(union e1000_adv_rx_desc))
+#define IGB_TXD_ALIGN (E1000_ALIGN / sizeof(union e1000_adv_tx_desc))
+
+#define EM_RXD_ALIGN (E1000_ALIGN / sizeof(struct e1000_rx_desc))
+#define EM_TXD_ALIGN (E1000_ALIGN / sizeof(struct e1000_data_desc))
+
+#define E1000_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
+#define E1000_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
+
/* structure for interrupt relative data */
struct e1000_interrupt {
uint32_t flags;
@@ -131,6 +162,91 @@ struct e1000_vf_info {
uint16_t tx_rate;
};
+TAILQ_HEAD(e1000_flex_filter_list, e1000_flex_filter);
+
+struct e1000_flex_filter_info {
+ uint16_t len;
+ uint32_t dwords[E1000_MAX_FLEX_FILTER_DWDS]; /* flex bytes in dword. */
+ /* if mask bit is 1b, do not compare corresponding byte in dwords. */
+ uint8_t mask[E1000_FLEX_FILTERS_MASK_SIZE];
+ uint8_t priority;
+};
+
+/* Flex filter structure */
+struct e1000_flex_filter {
+ TAILQ_ENTRY(e1000_flex_filter) entries;
+ uint16_t index; /* index of flex filter */
+ struct e1000_flex_filter_info filter_info;
+ uint16_t queue; /* rx queue assigned to */
+};
+
+TAILQ_HEAD(e1000_5tuple_filter_list, e1000_5tuple_filter);
+TAILQ_HEAD(e1000_2tuple_filter_list, e1000_2tuple_filter);
+
+struct e1000_5tuple_filter_info {
+ uint32_t dst_ip;
+ uint32_t src_ip;
+ uint16_t dst_port;
+ uint16_t src_port;
+ uint8_t proto; /* l4 protocol. */
+ /* the packet matched above 5tuple and contain any set bit will hit this filter. */
+ uint8_t tcp_flags;
+ uint8_t priority; /* seven levels (001b-111b), 111b is highest,
+ used when more than one filter matches. */
+ uint8_t dst_ip_mask:1, /* if mask is 1b, do not compare dst ip. */
+ src_ip_mask:1, /* if mask is 1b, do not compare src ip. */
+ dst_port_mask:1, /* if mask is 1b, do not compare dst port. */
+ src_port_mask:1, /* if mask is 1b, do not compare src port. */
+ proto_mask:1; /* if mask is 1b, do not compare protocol. */
+};
+
+struct e1000_2tuple_filter_info {
+ uint16_t dst_port;
+ uint8_t proto; /* l4 protocol. */
+ /* the packet matched above 2tuple and contain any set bit will hit this filter. */
+ uint8_t tcp_flags;
+ uint8_t priority; /* seven levels (001b-111b), 111b is highest,
+ used when more than one filter matches. */
+ uint8_t dst_ip_mask:1, /* if mask is 1b, do not compare dst ip. */
+ src_ip_mask:1, /* if mask is 1b, do not compare src ip. */
+ dst_port_mask:1, /* if mask is 1b, do not compare dst port. */
+ src_port_mask:1, /* if mask is 1b, do not compare src port. */
+ proto_mask:1; /* if mask is 1b, do not compare protocol. */
+};
+
+/* 5tuple filter structure */
+struct e1000_5tuple_filter {
+ TAILQ_ENTRY(e1000_5tuple_filter) entries;
+ uint16_t index; /* the index of 5tuple filter */
+ struct e1000_5tuple_filter_info filter_info;
+ uint16_t queue; /* rx queue assigned to */
+};
+
+/* 2tuple filter structure */
+struct e1000_2tuple_filter {
+ TAILQ_ENTRY(e1000_2tuple_filter) entries;
+ uint16_t index; /* the index of 2tuple filter */
+ struct e1000_2tuple_filter_info filter_info;
+ uint16_t queue; /* rx queue assigned to */
+};
+
+/*
+ * Structure to store filters' info.
+ */
+struct e1000_filter_info {
+ uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */
+ /* store used ethertype filters*/
+ uint16_t ethertype_filters[E1000_MAX_ETQF_FILTERS];
+ uint8_t flex_mask; /* Bit mask for every used flex filter */
+ struct e1000_flex_filter_list flex_list;
+ /* Bit mask for every used 5tuple filter */
+ uint8_t fivetuple_mask;
+ struct e1000_5tuple_filter_list fivetuple_list;
+ /* Bit mask for every used 2tuple filter */
+ uint8_t twotuple_mask;
+ struct e1000_2tuple_filter_list twotuple_list;
+};
+
/*
* Structure to store private data for each driver instance (for each port).
*/
@@ -140,8 +256,16 @@ struct e1000_adapter {
struct e1000_interrupt intr;
struct e1000_vfta shadow_vfta;
struct e1000_vf_info *vfdata;
+ struct e1000_filter_info filter;
+ bool stopped;
+ struct rte_timecounter systime_tc;
+ struct rte_timecounter rx_tstamp_tc;
+ struct rte_timecounter tx_tstamp_tc;
};
+#define E1000_DEV_PRIVATE(adapter) \
+ ((struct e1000_adapter *)adapter)
+
#define E1000_DEV_PRIVATE_TO_HW(adapter) \
(&((struct e1000_adapter *)adapter)->hw)
@@ -157,12 +281,16 @@ struct e1000_adapter {
#define E1000_DEV_PRIVATE_TO_P_VFDATA(adapter) \
(&((struct e1000_adapter *)adapter)->vfdata)
+#define E1000_DEV_PRIVATE_TO_FILTER_INFO(adapter) \
+ (&((struct e1000_adapter *)adapter)->filter)
+
/*
* RX/TX IGB function prototypes
*/
void eth_igb_tx_queue_release(void *txq);
void eth_igb_rx_queue_release(void *rxq);
void igb_dev_clear_queues(struct rte_eth_dev *dev);
+void igb_dev_free_queues(struct rte_eth_dev *dev);
int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
@@ -210,6 +338,12 @@ void igb_pf_mbx_process(struct rte_eth_dev *eth_dev);
int igb_pf_host_configure(struct rte_eth_dev *eth_dev);
+void igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo);
+
+void igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo);
+
/*
* RX/TX EM function prototypes
*/
@@ -217,6 +351,7 @@ void eth_em_tx_queue_release(void *txq);
void eth_em_rx_queue_release(void *rxq);
void em_dev_clear_queues(struct rte_eth_dev *dev);
+void em_dev_free_queues(struct rte_eth_dev *dev);
int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
@@ -245,4 +380,12 @@ uint16_t eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+void em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo);
+
+void em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo);
+
+void igb_pf_host_uninit(struct rte_eth_dev *dev);
+
#endif /* _E1000_ETHDEV_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000_logs.h b/src/dpdk22/drivers/net/e1000/e1000_logs.h
index 67f2c84c..81e7bf52 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/e1000_logs.h
+++ b/src/dpdk22/drivers/net/e1000/e1000_logs.h
@@ -34,8 +34,8 @@
#ifndef _E1000_LOGS_H_
#define _E1000_LOGS_H_
-#define PMD_INIT_LOG(level, fmt, args...) RTE_LOG(level, PMD," " fmt "\n", ##args)
-
+#define PMD_INIT_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args)
#ifdef RTE_LIBRTE_E1000_DEBUG_INIT
#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
diff --git a/src/dpdk_lib18/librte_pmd_e1000/em_ethdev.c b/src/dpdk22/drivers/net/e1000/em_ethdev.c
index 3f2897ee..66e89933 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/em_ethdev.c
+++ b/src/dpdk22/drivers/net/e1000/em_ethdev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -47,14 +47,13 @@
#include <rte_ethdev.h>
#include <rte_memory.h>
#include <rte_memzone.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_dev.h>
#include "e1000_logs.h"
-#include "e1000/e1000_api.h"
+#include "base/e1000_api.h"
#include "e1000_ethdev.h"
#define EM_EIAC 0x000DC
@@ -82,6 +81,7 @@ static int eth_em_flow_ctrl_get(struct rte_eth_dev *dev,
static int eth_em_flow_ctrl_set(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
static int eth_em_interrupt_setup(struct rte_eth_dev *dev);
+static int eth_em_rxq_interrupt_setup(struct rte_eth_dev *dev);
static int eth_em_interrupt_get_status(struct rte_eth_dev *dev);
static int eth_em_interrupt_action(struct rte_eth_dev *dev);
static void eth_em_interrupt_handler(struct rte_intr_handle *handle,
@@ -108,15 +108,25 @@ static void em_vlan_hw_strip_disable(struct rte_eth_dev *dev);
static void eth_em_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id, int on);
*/
+
+static int eth_em_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
+static int eth_em_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
+static void em_lsc_intr_disable(struct e1000_hw *hw);
+static void em_rxq_intr_enable(struct e1000_hw *hw);
+static void em_rxq_intr_disable(struct e1000_hw *hw);
+
static int eth_em_led_on(struct rte_eth_dev *dev);
static int eth_em_led_off(struct rte_eth_dev *dev);
-static void em_intr_disable(struct e1000_hw *hw);
static int em_get_rx_buffer_size(struct e1000_hw *hw);
static void eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, uint32_t pool);
static void eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index);
+static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr);
+
#define EM_FC_PAUSE_TIME 0x0680
#define EM_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */
#define EM_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
@@ -126,15 +136,15 @@ static enum e1000_fc_mode em_fc_setting = e1000_fc_full;
/*
* The set of PCI devices this driver supports
*/
-static struct rte_pci_id pci_id_em_map[] = {
+static const struct rte_pci_id pci_id_em_map[] = {
#define RTE_PCI_DEV_ID_DECL_EM(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
#include "rte_pci_dev_ids.h"
-{.device_id = 0},
+{0},
};
-static struct eth_dev_ops eth_em_ops = {
+static const struct eth_dev_ops eth_em_ops = {
.dev_configure = eth_em_configure,
.dev_start = eth_em_start,
.dev_stop = eth_em_stop,
@@ -156,12 +166,17 @@ static struct eth_dev_ops eth_em_ops = {
.rx_descriptor_done = eth_em_rx_descriptor_done,
.tx_queue_setup = eth_em_tx_queue_setup,
.tx_queue_release = eth_em_tx_queue_release,
+ .rx_queue_intr_enable = eth_em_rx_queue_intr_enable,
+ .rx_queue_intr_disable = eth_em_rx_queue_intr_disable,
.dev_led_on = eth_em_led_on,
.dev_led_off = eth_em_led_off,
.flow_ctrl_get = eth_em_flow_ctrl_get,
.flow_ctrl_set = eth_em_flow_ctrl_set,
.mac_addr_add = eth_em_rar_set,
.mac_addr_remove = eth_em_rar_clear,
+ .set_mc_addr_list = eth_em_set_mc_addr_list,
+ .rxq_info_get = em_rxq_info_get,
+ .txq_info_get = em_txq_info_get,
};
/**
@@ -217,16 +232,18 @@ rte_em_dev_atomic_write_link_status(struct rte_eth_dev *dev,
}
static int
-eth_em_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
- struct rte_eth_dev *eth_dev)
+eth_em_dev_init(struct rte_eth_dev *eth_dev)
{
struct rte_pci_device *pci_dev;
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(eth_dev->data->dev_private);
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct e1000_vfta * shadow_vfta =
E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
pci_dev = eth_dev->pci_dev;
+
eth_dev->dev_ops = &eth_em_ops;
eth_dev->rx_pkt_burst = (eth_rx_burst_t)&eth_em_recv_pkts;
eth_dev->tx_pkt_burst = (eth_tx_burst_t)&eth_em_xmit_pkts;
@@ -241,8 +258,11 @@ eth_em_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
return 0;
}
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
hw->device_id = pci_dev->id.device_id;
+ adapter->stopped = 0;
/* For ICH8 support we'll need to map the flash memory BAR */
@@ -272,7 +292,7 @@ eth_em_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
/* initialize the vfta */
memset(shadow_vfta, 0, sizeof(*shadow_vfta));
- PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x",
+ PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x",
eth_dev->data->port_id, pci_dev->id.vendor_id,
pci_dev->id.device_id);
@@ -282,13 +302,47 @@ eth_em_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
return (0);
}
+static int
+eth_em_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev;
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(eth_dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ pci_dev = eth_dev->pci_dev;
+
+ if (adapter->stopped == 0)
+ eth_em_close(eth_dev);
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+
+ /* disable uio intr before callback unregister */
+ rte_intr_disable(&(pci_dev->intr_handle));
+ rte_intr_callback_unregister(&(pci_dev->intr_handle),
+ eth_em_interrupt_handler, (void *)eth_dev);
+
+ return 0;
+}
+
static struct eth_driver rte_em_pmd = {
- {
+ .pci_drv = {
.name = "rte_em_pmd",
.id_table = pci_id_em_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_DETACHABLE,
},
.eth_dev_init = eth_em_dev_init,
+ .eth_dev_uninit = eth_em_dev_uninit,
.dev_private_size = sizeof(struct e1000_adapter),
};
@@ -448,9 +502,13 @@ em_set_pba(struct e1000_hw *hw)
static int
eth_em_start(struct rte_eth_dev *dev)
{
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(dev->data->dev_private);
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
int ret, mask;
+ uint32_t intr_vector = 0;
PMD_INIT_FUNC_TRACE();
@@ -486,6 +544,26 @@ eth_em_start(struct rte_eth_dev *dev)
/* Configure for OS presence */
em_init_manageability(hw);
+ if (dev->data->dev_conf.intr_conf.rxq != 0) {
+ intr_vector = dev->data->nb_rx_queues;
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -1;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle)) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int), 0);
+ if (intr_handle->intr_vec == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+ " intr_vec\n", dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+
+ /* enable rx interrupt */
+ em_rxq_intr_enable(hw);
+ }
+
eth_em_tx_init(dev);
ret = eth_em_rx_init(dev);
@@ -557,15 +635,30 @@ eth_em_start(struct rte_eth_dev *dev)
}
e1000_setup_link(hw);
- /* check if lsc interrupt feature is enabled */
- if (dev->data->dev_conf.intr_conf.lsc != 0) {
- ret = eth_em_interrupt_setup(dev);
- if (ret) {
- PMD_INIT_LOG(ERR, "Unable to setup interrupts");
- em_dev_clear_queues(dev);
- return ret;
- }
+ if (rte_intr_allow_others(intr_handle)) {
+ /* check if lsc interrupt is enabled */
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ ret = eth_em_interrupt_setup(dev);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Unable to setup interrupts");
+ em_dev_clear_queues(dev);
+ return ret;
+ }
+ } else {
+ rte_intr_callback_unregister(intr_handle,
+ eth_em_interrupt_handler,
+ (void *)dev);
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ PMD_INIT_LOG(INFO, "lsc won't enable because of"
+ " no intr multiplex\n");
}
+ /* check if rxq interrupt is enabled */
+ if (dev->data->dev_conf.intr_conf.rxq != 0)
+ eth_em_rxq_interrupt_setup(dev);
+
+ rte_intr_enable(intr_handle);
+
+ adapter->stopped = 0;
PMD_INIT_LOG(DEBUG, "<<");
@@ -590,8 +683,11 @@ eth_em_stop(struct rte_eth_dev *dev)
{
struct rte_eth_link link;
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+
+ em_rxq_intr_disable(hw);
+ em_lsc_intr_disable(hw);
- em_intr_disable(hw);
e1000_reset_hw(hw);
if (hw->mac.type >= e1000_82544)
E1000_WRITE_REG(hw, E1000_WUC, 0);
@@ -604,14 +700,31 @@ eth_em_stop(struct rte_eth_dev *dev)
/* clear the recorded link status */
memset(&link, 0, sizeof(link));
rte_em_dev_atomic_write_link_status(dev, &link);
+
+ if (!rte_intr_allow_others(intr_handle))
+ /* resume to the default handler */
+ rte_intr_callback_register(intr_handle,
+ eth_em_interrupt_handler,
+ (void *)dev);
+
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec != NULL) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
}
static void
eth_em_close(struct rte_eth_dev *dev)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(dev->data->dev_private);
eth_em_stop(dev);
+ adapter->stopped = 1;
+ em_dev_free_queues(dev);
e1000_phy_hw_reset(hw);
em_release_manageability(hw);
em_hw_control_release(hw);
@@ -798,11 +911,9 @@ eth_em_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
return;
/* Rx Errors */
- rte_stats->ibadcrc = stats->crcerrs;
- rte_stats->ibadlen = stats->rlec + stats->ruc + stats->roc;
rte_stats->imissed = stats->mpc;
- rte_stats->ierrors = rte_stats->ibadcrc +
- rte_stats->ibadlen +
+ rte_stats->ierrors = stats->crcerrs +
+ stats->rlec + stats->ruc + stats->roc +
rte_stats->imissed +
stats->rxerrc + stats->algnerrc + stats->cexterr;
@@ -813,12 +924,6 @@ eth_em_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
rte_stats->opackets = stats->gptc;
rte_stats->ibytes = stats->gorc;
rte_stats->obytes = stats->gotc;
-
- /* XON/XOFF pause frames stats registers */
- rte_stats->tx_pause_xon = stats->xontxc;
- rte_stats->rx_pause_xon = stats->xonrxc;
- rte_stats->tx_pause_xoff = stats->xofftxc;
- rte_stats->rx_pause_xoff = stats->xoffrxc;
}
static void
@@ -834,6 +939,27 @@ eth_em_stats_reset(struct rte_eth_dev *dev)
memset(hw_stats, 0, sizeof(*hw_stats));
}
+static int
+eth_em_rx_queue_intr_enable(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ em_rxq_intr_enable(hw);
+ rte_intr_enable(&dev->pci_dev->intr_handle);
+
+ return 0;
+}
+
+static int
+eth_em_rx_queue_intr_disable(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ em_rxq_intr_disable(hw);
+
+ return 0;
+}
+
static uint32_t
em_get_max_pktlen(const struct e1000_hw *hw)
{
@@ -845,11 +971,11 @@ em_get_max_pktlen(const struct e1000_hw *hw)
case e1000_pch2lan:
case e1000_82574:
case e1000_80003es2lan: /* 9K Jumbo Frame size */
+ case e1000_82583:
return (0x2412);
case e1000_pchlan:
return (0x1000);
/* Adapters that do not support jumbo frames */
- case e1000_82583:
case e1000_ich8lan:
return (ETHER_MAX_LEN);
default:
@@ -885,6 +1011,18 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_queues = 1;
dev_info->max_tx_queues = 1;
+
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = E1000_MAX_RING_DESC,
+ .nb_min = E1000_MIN_RING_DESC,
+ .nb_align = EM_RXD_ALIGN,
+ };
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = E1000_MAX_RING_DESC,
+ .nb_min = E1000_MIN_RING_DESC,
+ .nb_align = EM_TXD_ALIGN,
+ };
}
/* return 0 means link status changed, -1 means not changed */
@@ -1201,13 +1339,7 @@ eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask)
}
}
-static void
-em_intr_disable(struct e1000_hw *hw)
-{
- E1000_WRITE_REG(hw, E1000_IMC, ~0);
-}
-
-/**
+/*
* It enables the interrupt mask and then enable the interrupt.
*
* @param dev
@@ -1220,15 +1352,83 @@ em_intr_disable(struct e1000_hw *hw)
static int
eth_em_interrupt_setup(struct rte_eth_dev *dev)
{
+ uint32_t regval;
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- E1000_WRITE_REG(hw, E1000_IMS, E1000_ICR_LSC);
- rte_intr_enable(&(dev->pci_dev->intr_handle));
+ /* clear interrupt */
+ E1000_READ_REG(hw, E1000_ICR);
+ regval = E1000_READ_REG(hw, E1000_IMS);
+ E1000_WRITE_REG(hw, E1000_IMS, regval | E1000_ICR_LSC);
return (0);
}
/*
+ * It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_em_rxq_interrupt_setup(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ E1000_READ_REG(hw, E1000_ICR);
+ em_rxq_intr_enable(hw);
+ return 0;
+}
+
+/*
+ * It enable receive packet interrupt.
+ * @param hw
+ * Pointer to struct e1000_hw
+ *
+ * @return
+ */
+static void
+em_rxq_intr_enable(struct e1000_hw *hw)
+{
+ E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_RXT0);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/*
+ * It disabled lsc interrupt.
+ * @param hw
+ * Pointer to struct e1000_hw
+ *
+ * @return
+ */
+static void
+em_lsc_intr_disable(struct e1000_hw *hw)
+{
+ E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_LSC);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/*
+ * It disabled receive packet interrupt.
+ * @param hw
+ * Pointer to struct e1000_hw
+ *
+ * @return
+ */
+static void
+em_rxq_intr_disable(struct e1000_hw *hw)
+{
+ E1000_READ_REG(hw, E1000_ICR);
+ E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/*
* It reads ICR and gets interrupt causes, check it and set a bit flag
* to update link status.
*
@@ -1302,9 +1502,10 @@ eth_em_interrupt_action(struct rte_eth_dev *dev)
} else {
PMD_INIT_LOG(INFO, " Port %d: Link Down", dev->data->port_id);
}
- PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
+ PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d",
dev->pci_dev->addr.domain, dev->pci_dev->addr.bus,
dev->pci_dev->addr.devid, dev->pci_dev->addr.function);
+
tctl = E1000_READ_REG(hw, E1000_TCTL);
rctl = E1000_READ_REG(hw, E1000_RCTL);
if (link.link_status) {
@@ -1524,6 +1725,18 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return 0;
}
+static int
+eth_em_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ struct e1000_hw *hw;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);
+ return 0;
+}
+
struct rte_driver em_pmd_drv = {
.type = PMD_PDEV,
.init = rte_em_pmd_init,
diff --git a/src/dpdk_lib18/librte_pmd_e1000/em_rxtx.c b/src/dpdk22/drivers/net/e1000/em_rxtx.c
index aa0b88c1..d8fb252f 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/em_rxtx.c
+++ b/src/dpdk22/drivers/net/e1000/em_rxtx.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -51,7 +51,6 @@
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_launch.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
@@ -71,9 +70,9 @@
#include <rte_string_fns.h>
#include "e1000_logs.h"
-#include "e1000/e1000_api.h"
+#include "base/e1000_api.h"
#include "e1000_ethdev.h"
-#include "e1000/e1000_osdep.h"
+#include "base/e1000_osdep.h"
#define E1000_TXD_VLAN_SHIFT 16
@@ -183,7 +182,9 @@ struct em_tx_queue {
volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
uint16_t nb_tx_desc; /**< number of TX descriptors. */
uint16_t tx_tail; /**< Current value of TDT register. */
- uint16_t tx_free_thresh;/**< minimum TX before freeing. */
+ /**< Start freeing TX buffers if there are less free descriptors than
+ this value. */
+ uint16_t tx_free_thresh;
/**< Number of TX descriptors to use before RS bit is set. */
uint16_t tx_rs_thresh;
/** Number of TX descriptors used since RS bit was set. */
@@ -419,9 +420,8 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
txe = &sw_ring[tx_id];
/* Determine if the descriptor ring needs to be cleaned. */
- if ((txq->nb_tx_desc - txq->nb_tx_free) > txq->tx_free_thresh) {
+ if (txq->nb_tx_free < txq->tx_free_thresh)
em_xmit_cleanup(txq);
- }
/* TX loop */
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
@@ -1081,51 +1081,9 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
return (nb_rx);
}
-/*
- * Rings setup and release.
- *
- * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
- * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
- * This will also optimize cache line size effect.
- * H/W supports up to cache line size 128.
- */
-#define EM_ALIGN 128
-
-/*
- * Maximum number of Ring Descriptors.
- *
- * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
- * desscriptors should meet the following condition:
- * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
- */
-#define EM_MIN_RING_DESC 32
-#define EM_MAX_RING_DESC 4096
-
#define EM_MAX_BUF_SIZE 16384
#define EM_RCTL_FLXBUF_STEP 1024
-static const struct rte_memzone *
-ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
- uint16_t queue_id, uint32_t ring_size, int socket_id)
-{
- const struct rte_memzone *mz;
- char z_name[RTE_MEMZONE_NAMESIZE];
-
- snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->driver->pci_drv.name, ring_name, dev->data->port_id,
- queue_id);
-
- if ((mz = rte_memzone_lookup(z_name)) != 0)
- return (mz);
-
-#ifdef RTE_LIBRTE_XEN_DOM0
- return rte_memzone_reserve_bounded(z_name, ring_size,
- socket_id, 0, RTE_CACHE_LINE_SIZE, RTE_PGSIZE_2M);
-#else
- return rte_memzone_reserve(z_name, ring_size, socket_id, 0);
-#endif
-}
-
static void
em_tx_queue_release_mbufs(struct em_tx_queue *txq)
{
@@ -1210,11 +1168,11 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
/*
* Validate number of transmit descriptors.
* It must not exceed hardware maximum, and must be multiple
- * of EM_ALIGN.
+ * of E1000_ALIGN.
*/
- if (((nb_desc * sizeof(*txq->tx_ring)) % EM_ALIGN) != 0 ||
- (nb_desc > EM_MAX_RING_DESC) ||
- (nb_desc < EM_MIN_RING_DESC)) {
+ if (nb_desc % EM_TXD_ALIGN != 0 ||
+ (nb_desc > E1000_MAX_RING_DESC) ||
+ (nb_desc < E1000_MIN_RING_DESC)) {
return -(EINVAL);
}
@@ -1272,9 +1230,10 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
* handle the maximum ring size is allocated in order to allow for
* resizing in later calls to the queue setup function.
*/
- tsize = sizeof (txq->tx_ring[0]) * EM_MAX_RING_DESC;
- if ((tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx, tsize,
- socket_id)) == NULL)
+ tsize = sizeof(txq->tx_ring[0]) * E1000_MAX_RING_DESC;
+ tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, tsize,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (tz == NULL)
return (-ENOMEM);
/* Allocate the tx queue data structure. */
@@ -1300,11 +1259,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
txq->port_id = dev->data->port_id;
txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx));
-#ifndef RTE_LIBRTE_XEN_DOM0
- txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
-#else
txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
-#endif
txq->tx_ring = (struct e1000_data_desc *) tz->addr;
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
@@ -1375,11 +1330,11 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
/*
* Validate number of receive descriptors.
* It must not exceed hardware maximum, and must be multiple
- * of EM_ALIGN.
+ * of E1000_ALIGN.
*/
- if (((nb_desc * sizeof(rxq->rx_ring[0])) % EM_ALIGN) != 0 ||
- (nb_desc > EM_MAX_RING_DESC) ||
- (nb_desc < EM_MIN_RING_DESC)) {
+ if (nb_desc % EM_RXD_ALIGN != 0 ||
+ (nb_desc > E1000_MAX_RING_DESC) ||
+ (nb_desc < E1000_MIN_RING_DESC)) {
return (-EINVAL);
}
@@ -1399,9 +1354,10 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
}
/* Allocate RX ring for max possible mumber of hardware descriptors. */
- rsize = sizeof (rxq->rx_ring[0]) * EM_MAX_RING_DESC;
- if ((rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, rsize,
- socket_id)) == NULL)
+ rsize = sizeof(rxq->rx_ring[0]) * E1000_MAX_RING_DESC;
+ rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, rsize,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rz == NULL)
return (-ENOMEM);
/* Allocate the RX queue data structure. */
@@ -1430,11 +1386,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx));
-#ifndef RTE_LIBRTE_XEN_DOM0
- rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
-#else
rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
-#endif
rxq->rx_ring = (struct e1000_rx_desc *) rz->addr;
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
@@ -1515,6 +1467,24 @@ em_dev_clear_queues(struct rte_eth_dev *dev)
}
}
+void
+em_dev_free_queues(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ eth_em_rx_queue_release(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = NULL;
+ }
+ dev->data->nb_rx_queues = 0;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ eth_em_tx_queue_release(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = NULL;
+ }
+ dev->data->nb_tx_queues = 0;
+}
+
/*
* Takes as input/output parameter RX buffer size.
* Returns (BSIZE | BSEX | FLXBUF) fields of RCTL register.
@@ -1669,12 +1639,11 @@ eth_em_rx_init(struct rte_eth_dev *dev)
/* Determine RX bufsize. */
rctl_bsize = EM_MAX_BUF_SIZE;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- struct rte_pktmbuf_pool_private *mbp_priv;
uint32_t buf_size;
rxq = dev->data->rx_queues[i];
- mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
- buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+ buf_size = rte_pktmbuf_data_room_size(rxq->mb_pool) -
+ RTE_PKTMBUF_HEADROOM;
rctl_bsize = RTE_MIN(rctl_bsize, buf_size);
}
@@ -1865,3 +1834,33 @@ eth_em_tx_init(struct rte_eth_dev *dev)
E1000_WRITE_REG(hw, E1000_TCTL, tctl);
}
+void
+em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct em_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+}
+
+void
+em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct em_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+
+ qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+ qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+ qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+ qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+ qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+}
diff --git a/src/dpdk22/drivers/net/e1000/igb_ethdev.c b/src/dpdk22/drivers/net/e1000/igb_ethdev.c
new file mode 100644
index 00000000..d1bbcda0
--- /dev/null
+++ b/src/dpdk22/drivers/net/e1000/igb_ethdev.c
@@ -0,0 +1,4867 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+
+#include <rte_common.h>
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_atomic.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+
+#include "e1000_logs.h"
+#include "base/e1000_api.h"
+#include "e1000_ethdev.h"
+#include "igb_regs.h"
+
+/*
+ * Default values for port configuration
+ */
+#define IGB_DEFAULT_RX_FREE_THRESH 32
+#define IGB_DEFAULT_RX_PTHRESH 8
+#define IGB_DEFAULT_RX_HTHRESH 8
+#define IGB_DEFAULT_RX_WTHRESH 0
+
+#define IGB_DEFAULT_TX_PTHRESH 32
+#define IGB_DEFAULT_TX_HTHRESH 0
+#define IGB_DEFAULT_TX_WTHRESH 0
+
+#define IGB_HKEY_MAX_INDEX 10
+
+/* Bit shift and mask */
+#define IGB_4_BIT_WIDTH (CHAR_BIT / 2)
+#define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t)
+#define IGB_8_BIT_WIDTH CHAR_BIT
+#define IGB_8_BIT_MASK UINT8_MAX
+
+/* Additional timesync values. */
+#define E1000_CYCLECOUNTER_MASK 0xffffffffffffffffULL
+#define E1000_ETQF_FILTER_1588 3
+#define IGB_82576_TSYNC_SHIFT 16
+#define E1000_INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT)
+#define E1000_INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
+#define E1000_TSAUXC_DISABLE_SYSTIME 0x80000000
+
+static int eth_igb_configure(struct rte_eth_dev *dev);
+static int eth_igb_start(struct rte_eth_dev *dev);
+static void eth_igb_stop(struct rte_eth_dev *dev);
+static void eth_igb_close(struct rte_eth_dev *dev);
+static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev);
+static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev);
+static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev);
+static void eth_igb_allmulticast_disable(struct rte_eth_dev *dev);
+static int eth_igb_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
+static void eth_igb_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *rte_stats);
+static int eth_igb_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstats *xstats, unsigned n);
+static void eth_igb_stats_reset(struct rte_eth_dev *dev);
+static void eth_igb_xstats_reset(struct rte_eth_dev *dev);
+static void eth_igb_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static void eth_igbvf_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);
+static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev);
+static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
+static int eth_igb_interrupt_action(struct rte_eth_dev *dev);
+static void eth_igb_interrupt_handler(struct rte_intr_handle *handle,
+ void *param);
+static int igb_hardware_init(struct e1000_hw *hw);
+static void igb_hw_control_acquire(struct e1000_hw *hw);
+static void igb_hw_control_release(struct e1000_hw *hw);
+static void igb_init_manageability(struct e1000_hw *hw);
+static void igb_release_manageability(struct e1000_hw *hw);
+
+static int eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+
+static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vlan_id, int on);
+static void eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
+static void eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+
+static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev);
+static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev);
+static void igb_vlan_hw_strip_enable(struct rte_eth_dev *dev);
+static void igb_vlan_hw_strip_disable(struct rte_eth_dev *dev);
+static void igb_vlan_hw_extend_enable(struct rte_eth_dev *dev);
+static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev);
+
+static int eth_igb_led_on(struct rte_eth_dev *dev);
+static int eth_igb_led_off(struct rte_eth_dev *dev);
+
+static void igb_intr_disable(struct e1000_hw *hw);
+static int igb_get_rx_buffer_size(struct e1000_hw *hw);
+static void eth_igb_rar_set(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index, uint32_t pool);
+static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
+static void eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
+ struct ether_addr *addr);
+
+static void igbvf_intr_disable(struct e1000_hw *hw);
+static int igbvf_dev_configure(struct rte_eth_dev *dev);
+static int igbvf_dev_start(struct rte_eth_dev *dev);
+static void igbvf_dev_stop(struct rte_eth_dev *dev);
+static void igbvf_dev_close(struct rte_eth_dev *dev);
+static int eth_igbvf_link_update(struct e1000_hw *hw);
+static void eth_igbvf_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *rte_stats);
+static int eth_igbvf_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstats *xstats, unsigned n);
+static void eth_igbvf_stats_reset(struct rte_eth_dev *dev);
+static int igbvf_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vlan_id, int on);
+static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on);
+static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on);
+static void igbvf_default_mac_addr_set(struct rte_eth_dev *dev,
+ struct ether_addr *addr);
+static int igbvf_get_reg_length(struct rte_eth_dev *dev);
+static int igbvf_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs);
+
+static int eth_igb_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int eth_igb_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+
+static int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter,
+ bool add);
+static int eth_igb_syn_filter_get(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter);
+static int eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg);
+static int igb_add_2tuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter);
+static int igb_remove_2tuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter);
+static int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
+ struct rte_eth_flex_filter *filter,
+ bool add);
+static int eth_igb_get_flex_filter(struct rte_eth_dev *dev,
+ struct rte_eth_flex_filter *filter);
+static int eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg);
+static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter);
+static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter);
+static int igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *filter,
+ bool add);
+static int igb_get_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *filter);
+static int igb_ntuple_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg);
+static int igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter,
+ bool add);
+static int igb_ethertype_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg);
+static int igb_get_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter);
+static int eth_igb_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg);
+static int eth_igb_get_reg_length(struct rte_eth_dev *dev);
+static int eth_igb_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs);
+static int eth_igb_get_eeprom_length(struct rte_eth_dev *dev);
+static int eth_igb_get_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *eeprom);
+static int eth_igb_set_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *eeprom);
+static int eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr);
+static int igb_timesync_enable(struct rte_eth_dev *dev);
+static int igb_timesync_disable(struct rte_eth_dev *dev);
+static int igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp,
+ uint32_t flags);
+static int igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp);
+static int igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
+static int igb_timesync_read_time(struct rte_eth_dev *dev,
+ struct timespec *timestamp);
+static int igb_timesync_write_time(struct rte_eth_dev *dev,
+ const struct timespec *timestamp);
+static int eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+static void eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
+ uint8_t queue, uint8_t msix_vector);
+static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
+ uint8_t index, uint8_t offset);
+static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev);
+
+/*
+ * Define VF Stats MACRO for Non "cleared on read" register
+ */
+#define UPDATE_VF_STAT(reg, last, cur) \
+{ \
+ u32 latest = E1000_READ_REG(hw, reg); \
+ cur += (latest - last) & UINT_MAX; \
+ last = latest; \
+}
+
+#define IGB_FC_PAUSE_TIME 0x0680
+#define IGB_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */
+#define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
+
+#define IGBVF_PMD_NAME "rte_igbvf_pmd" /* PMD name */
+
+static enum e1000_fc_mode igb_fc_setting = e1000_fc_full;
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id pci_id_igb_map[] = {
+
+#define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
+#include "rte_pci_dev_ids.h"
+
+{0},
+};
+
+/*
+ * The set of PCI devices this driver supports (for 82576&I350 VF)
+ */
+static const struct rte_pci_id pci_id_igbvf_map[] = {
+
+#define RTE_PCI_DEV_ID_DECL_IGBVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
+#include "rte_pci_dev_ids.h"
+
+{0},
+};
+
+static const struct rte_eth_desc_lim rx_desc_lim = {
+ .nb_max = E1000_MAX_RING_DESC,
+ .nb_min = E1000_MIN_RING_DESC,
+ .nb_align = IGB_RXD_ALIGN,
+};
+
+static const struct rte_eth_desc_lim tx_desc_lim = {
+ .nb_max = E1000_MAX_RING_DESC,
+ .nb_min = E1000_MIN_RING_DESC,
+ .nb_align = IGB_RXD_ALIGN,
+};
+
+static const struct eth_dev_ops eth_igb_ops = {
+ .dev_configure = eth_igb_configure,
+ .dev_start = eth_igb_start,
+ .dev_stop = eth_igb_stop,
+ .dev_close = eth_igb_close,
+ .promiscuous_enable = eth_igb_promiscuous_enable,
+ .promiscuous_disable = eth_igb_promiscuous_disable,
+ .allmulticast_enable = eth_igb_allmulticast_enable,
+ .allmulticast_disable = eth_igb_allmulticast_disable,
+ .link_update = eth_igb_link_update,
+ .stats_get = eth_igb_stats_get,
+ .xstats_get = eth_igb_xstats_get,
+ .stats_reset = eth_igb_stats_reset,
+ .xstats_reset = eth_igb_xstats_reset,
+ .dev_infos_get = eth_igb_infos_get,
+ .mtu_set = eth_igb_mtu_set,
+ .vlan_filter_set = eth_igb_vlan_filter_set,
+ .vlan_tpid_set = eth_igb_vlan_tpid_set,
+ .vlan_offload_set = eth_igb_vlan_offload_set,
+ .rx_queue_setup = eth_igb_rx_queue_setup,
+ .rx_queue_intr_enable = eth_igb_rx_queue_intr_enable,
+ .rx_queue_intr_disable = eth_igb_rx_queue_intr_disable,
+ .rx_queue_release = eth_igb_rx_queue_release,
+ .rx_queue_count = eth_igb_rx_queue_count,
+ .rx_descriptor_done = eth_igb_rx_descriptor_done,
+ .tx_queue_setup = eth_igb_tx_queue_setup,
+ .tx_queue_release = eth_igb_tx_queue_release,
+ .dev_led_on = eth_igb_led_on,
+ .dev_led_off = eth_igb_led_off,
+ .flow_ctrl_get = eth_igb_flow_ctrl_get,
+ .flow_ctrl_set = eth_igb_flow_ctrl_set,
+ .mac_addr_add = eth_igb_rar_set,
+ .mac_addr_remove = eth_igb_rar_clear,
+ .mac_addr_set = eth_igb_default_mac_addr_set,
+ .reta_update = eth_igb_rss_reta_update,
+ .reta_query = eth_igb_rss_reta_query,
+ .rss_hash_update = eth_igb_rss_hash_update,
+ .rss_hash_conf_get = eth_igb_rss_hash_conf_get,
+ .filter_ctrl = eth_igb_filter_ctrl,
+ .set_mc_addr_list = eth_igb_set_mc_addr_list,
+ .rxq_info_get = igb_rxq_info_get,
+ .txq_info_get = igb_txq_info_get,
+ .timesync_enable = igb_timesync_enable,
+ .timesync_disable = igb_timesync_disable,
+ .timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp,
+ .timesync_read_tx_timestamp = igb_timesync_read_tx_timestamp,
+ .get_reg_length = eth_igb_get_reg_length,
+ .get_reg = eth_igb_get_regs,
+ .get_eeprom_length = eth_igb_get_eeprom_length,
+ .get_eeprom = eth_igb_get_eeprom,
+ .set_eeprom = eth_igb_set_eeprom,
+ .timesync_adjust_time = igb_timesync_adjust_time,
+ .timesync_read_time = igb_timesync_read_time,
+ .timesync_write_time = igb_timesync_write_time,
+};
+
+/*
+ * dev_ops for virtual function, bare necessities for basic vf
+ * operation have been implemented
+ */
+static const struct eth_dev_ops igbvf_eth_dev_ops = {
+ .dev_configure = igbvf_dev_configure,
+ .dev_start = igbvf_dev_start,
+ .dev_stop = igbvf_dev_stop,
+ .dev_close = igbvf_dev_close,
+ .link_update = eth_igb_link_update,
+ .stats_get = eth_igbvf_stats_get,
+ .xstats_get = eth_igbvf_xstats_get,
+ .stats_reset = eth_igbvf_stats_reset,
+ .xstats_reset = eth_igbvf_stats_reset,
+ .vlan_filter_set = igbvf_vlan_filter_set,
+ .dev_infos_get = eth_igbvf_infos_get,
+ .rx_queue_setup = eth_igb_rx_queue_setup,
+ .rx_queue_release = eth_igb_rx_queue_release,
+ .tx_queue_setup = eth_igb_tx_queue_setup,
+ .tx_queue_release = eth_igb_tx_queue_release,
+ .set_mc_addr_list = eth_igb_set_mc_addr_list,
+ .rxq_info_get = igb_rxq_info_get,
+ .txq_info_get = igb_txq_info_get,
+ .mac_addr_set = igbvf_default_mac_addr_set,
+ .get_reg_length = igbvf_get_reg_length,
+ .get_reg = igbvf_get_regs,
+};
+
+/* store statistics names and its offset in stats structure */
+struct rte_igb_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned offset;
+};
+
+static const struct rte_igb_xstats_name_off rte_igb_stats_strings[] = {
+ {"rx_crc_errors", offsetof(struct e1000_hw_stats, crcerrs)},
+ {"rx_align_errors", offsetof(struct e1000_hw_stats, algnerrc)},
+ {"rx_symbol_errors", offsetof(struct e1000_hw_stats, symerrs)},
+ {"rx_missed_packets", offsetof(struct e1000_hw_stats, mpc)},
+ {"tx_single_collision_packets", offsetof(struct e1000_hw_stats, scc)},
+ {"tx_multiple_collision_packets", offsetof(struct e1000_hw_stats, mcc)},
+ {"tx_excessive_collision_packets", offsetof(struct e1000_hw_stats,
+ ecol)},
+ {"tx_late_collisions", offsetof(struct e1000_hw_stats, latecol)},
+ {"tx_total_collisions", offsetof(struct e1000_hw_stats, colc)},
+ {"tx_deferred_packets", offsetof(struct e1000_hw_stats, dc)},
+ {"tx_no_carrier_sense_packets", offsetof(struct e1000_hw_stats, tncrs)},
+ {"rx_carrier_ext_errors", offsetof(struct e1000_hw_stats, cexterr)},
+ {"rx_length_errors", offsetof(struct e1000_hw_stats, rlec)},
+ {"rx_xon_packets", offsetof(struct e1000_hw_stats, xonrxc)},
+ {"tx_xon_packets", offsetof(struct e1000_hw_stats, xontxc)},
+ {"rx_xoff_packets", offsetof(struct e1000_hw_stats, xoffrxc)},
+ {"tx_xoff_packets", offsetof(struct e1000_hw_stats, xofftxc)},
+ {"rx_flow_control_unsupported_packets", offsetof(struct e1000_hw_stats,
+ fcruc)},
+ {"rx_size_64_packets", offsetof(struct e1000_hw_stats, prc64)},
+ {"rx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, prc127)},
+ {"rx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, prc255)},
+ {"rx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, prc511)},
+ {"rx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats,
+ prc1023)},
+ {"rx_size_1024_to_max_packets", offsetof(struct e1000_hw_stats,
+ prc1522)},
+ {"rx_broadcast_packets", offsetof(struct e1000_hw_stats, bprc)},
+ {"rx_multicast_packets", offsetof(struct e1000_hw_stats, mprc)},
+ {"rx_undersize_errors", offsetof(struct e1000_hw_stats, ruc)},
+ {"rx_fragment_errors", offsetof(struct e1000_hw_stats, rfc)},
+ {"rx_oversize_errors", offsetof(struct e1000_hw_stats, roc)},
+ {"rx_jabber_errors", offsetof(struct e1000_hw_stats, rjc)},
+ {"rx_management_packets", offsetof(struct e1000_hw_stats, mgprc)},
+ {"rx_management_dropped", offsetof(struct e1000_hw_stats, mgpdc)},
+ {"tx_management_packets", offsetof(struct e1000_hw_stats, mgptc)},
+ {"rx_total_packets", offsetof(struct e1000_hw_stats, tpr)},
+ {"tx_total_packets", offsetof(struct e1000_hw_stats, tpt)},
+ {"rx_total_bytes", offsetof(struct e1000_hw_stats, tor)},
+ {"tx_total_bytes", offsetof(struct e1000_hw_stats, tot)},
+ {"tx_size_64_packets", offsetof(struct e1000_hw_stats, ptc64)},
+ {"tx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, ptc127)},
+ {"tx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, ptc255)},
+ {"tx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, ptc511)},
+ {"tx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats,
+ ptc1023)},
+ {"tx_size_1023_to_max_packets", offsetof(struct e1000_hw_stats,
+ ptc1522)},
+ {"tx_multicast_packets", offsetof(struct e1000_hw_stats, mptc)},
+ {"tx_broadcast_packets", offsetof(struct e1000_hw_stats, bptc)},
+ {"tx_tso_packets", offsetof(struct e1000_hw_stats, tsctc)},
+ {"tx_tso_errors", offsetof(struct e1000_hw_stats, tsctfc)},
+ {"rx_sent_to_host_packets", offsetof(struct e1000_hw_stats, rpthc)},
+ {"tx_sent_by_host_packets", offsetof(struct e1000_hw_stats, hgptc)},
+ {"rx_code_violation_packets", offsetof(struct e1000_hw_stats, scvpc)},
+
+ {"interrupt_assert_count", offsetof(struct e1000_hw_stats, iac)},
+};
+
+#define IGB_NB_XSTATS (sizeof(rte_igb_stats_strings) / \
+ sizeof(rte_igb_stats_strings[0]))
+
+static const struct rte_igb_xstats_name_off rte_igbvf_stats_strings[] = {
+ {"rx_multicast_packets", offsetof(struct e1000_vf_stats, mprc)},
+ {"rx_good_loopback_packets", offsetof(struct e1000_vf_stats, gprlbc)},
+ {"tx_good_loopback_packets", offsetof(struct e1000_vf_stats, gptlbc)},
+ {"rx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gorlbc)},
+ {"tx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gotlbc)},
+};
+
+#define IGBVF_NB_XSTATS (sizeof(rte_igbvf_stats_strings) / \
+ sizeof(rte_igbvf_stats_strings[0]))
+
+/**
+ * Atomically reads the link status information from global
+ * structure rte_eth_dev.
+ *
+ * @param dev
+ * - Pointer to the structure rte_eth_dev to read from.
+ * - Pointer to the buffer to be saved with the link status.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, negative value.
+ */
+static inline int
+rte_igb_dev_atomic_read_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = link;
+ struct rte_eth_link *src = &(dev->data->dev_link);
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+/**
+ * Atomically writes the link status information into global
+ * structure rte_eth_dev.
+ *
+ * @param dev
+ * - Pointer to the structure rte_eth_dev to read from.
+ * - Pointer to the buffer to be saved with the link status.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, negative value.
+ */
+static inline int
+rte_igb_dev_atomic_write_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = &(dev->data->dev_link);
+ struct rte_eth_link *src = link;
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+static inline void
+igb_intr_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_interrupt *intr =
+ E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ E1000_WRITE_REG(hw, E1000_IMS, intr->mask);
+ E1000_WRITE_FLUSH(hw);
+}
+
+static void
+igb_intr_disable(struct e1000_hw *hw)
+{
+ E1000_WRITE_REG(hw, E1000_IMC, ~0);
+ E1000_WRITE_FLUSH(hw);
+}
+
+static inline int32_t
+igb_pf_reset_hw(struct e1000_hw *hw)
+{
+ uint32_t ctrl_ext;
+ int32_t status;
+
+ status = e1000_reset_hw(hw);
+
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH(hw);
+
+ return status;
+}
+
+static void
+igb_identify_hardware(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ hw->vendor_id = dev->pci_dev->id.vendor_id;
+ hw->device_id = dev->pci_dev->id.device_id;
+ hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
+ hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
+
+ e1000_set_mac_type(hw);
+
+ /* need to check if it is a vf device below */
+}
+
+static int
+igb_reset_swfw_lock(struct e1000_hw *hw)
+{
+ int ret_val;
+
+ /*
+ * Do mac ops initialization manually here, since we will need
+ * some function pointers set by this call.
+ */
+ ret_val = e1000_init_mac_params(hw);
+ if (ret_val)
+ return ret_val;
+
+ /*
+ * SMBI lock should not fail in this early stage. If this is the case,
+ * it is due to an improper exit of the application.
+ * So force the release of the faulty lock.
+ */
+ if (e1000_get_hw_semaphore_generic(hw) < 0) {
+ PMD_DRV_LOG(DEBUG, "SMBI lock released");
+ }
+ e1000_put_hw_semaphore_generic(hw);
+
+ if (hw->mac.ops.acquire_swfw_sync != NULL) {
+ uint16_t mask;
+
+ /*
+ * Phy lock should not fail in this early stage. If this is the case,
+ * it is due to an improper exit of the application.
+ * So force the release of the faulty lock.
+ */
+ mask = E1000_SWFW_PHY0_SM << hw->bus.func;
+ if (hw->bus.func > E1000_FUNC_1)
+ mask <<= 2;
+ if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
+ PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
+ hw->bus.func);
+ }
+ hw->mac.ops.release_swfw_sync(hw, mask);
+
+ /*
+ * This one is more tricky since it is common to all ports; but
+ * swfw_sync retries last long enough (1s) to be almost sure that if
+ * lock can not be taken it is due to an improper lock of the
+ * semaphore.
+ */
+ mask = E1000_SWFW_EEP_SM;
+ if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
+ PMD_DRV_LOG(DEBUG, "SWFW common locks released");
+ }
+ hw->mac.ops.release_swfw_sync(hw, mask);
+ }
+
+ return E1000_SUCCESS;
+}
+
+static int
+eth_igb_dev_init(struct rte_eth_dev *eth_dev)
+{
+ int error = 0;
+ struct rte_pci_device *pci_dev;
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct e1000_vfta * shadow_vfta =
+ E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(eth_dev->data->dev_private);
+
+ uint32_t ctrl_ext;
+
+ pci_dev = eth_dev->pci_dev;
+
+ eth_dev->dev_ops = &eth_igb_ops;
+ eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
+ eth_dev->tx_pkt_burst = &eth_igb_xmit_pkts;
+
+ /* for secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX function */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+ if (eth_dev->data->scattered_rx)
+ eth_dev->rx_pkt_burst = &eth_igb_recv_scattered_pkts;
+ return 0;
+ }
+
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
+
+ igb_identify_hardware(eth_dev);
+ if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) {
+ error = -EIO;
+ goto err_late;
+ }
+
+ e1000_get_bus_info(hw);
+
+ /* Reset any pending lock */
+ if (igb_reset_swfw_lock(hw) != E1000_SUCCESS) {
+ error = -EIO;
+ goto err_late;
+ }
+
+ /* Finish initialization */
+ if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) {
+ error = -EIO;
+ goto err_late;
+ }
+
+ hw->mac.autoneg = 1;
+ hw->phy.autoneg_wait_to_complete = 0;
+ hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
+
+ /* Copper options */
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ hw->phy.mdix = 0; /* AUTO_ALL_MODES */
+ hw->phy.disable_polarity_correction = 0;
+ hw->phy.ms_type = e1000_ms_hw_default;
+ }
+
+ /*
+ * Start from a known state, this is important in reading the nvm
+ * and mac from that.
+ */
+ igb_pf_reset_hw(hw);
+
+ /* Make sure we have a good EEPROM before we read from it */
+ if (e1000_validate_nvm_checksum(hw) < 0) {
+ /*
+ * Some PCI-E parts fail the first check due to
+ * the link being in sleep state, call it again,
+ * if it fails a second time its a real issue.
+ */
+ if (e1000_validate_nvm_checksum(hw) < 0) {
+ PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
+ error = -EIO;
+ goto err_late;
+ }
+ }
+
+ /* Read the permanent MAC address out of the EEPROM */
+ if (e1000_read_mac_addr(hw) != 0) {
+ PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
+ error = -EIO;
+ goto err_late;
+ }
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("e1000",
+ ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
+ "store MAC addresses",
+ ETHER_ADDR_LEN * hw->mac.rar_entry_count);
+ error = -ENOMEM;
+ goto err_late;
+ }
+
+ /* Copy the permanent MAC address */
+ ether_addr_copy((struct ether_addr *)hw->mac.addr, &eth_dev->data->mac_addrs[0]);
+
+ /* initialize the vfta */
+ memset(shadow_vfta, 0, sizeof(*shadow_vfta));
+
+ /* Now initialize the hardware */
+ if (igb_hardware_init(hw) != 0) {
+ PMD_INIT_LOG(ERR, "Hardware initialization failed");
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+ error = -ENODEV;
+ goto err_late;
+ }
+ hw->mac.get_link_status = 1;
+ adapter->stopped = 0;
+
+ /* Indicate SOL/IDER usage */
+ if (e1000_check_reset_block(hw) < 0) {
+ PMD_INIT_LOG(ERR, "PHY reset is blocked due to"
+ "SOL/IDER session");
+ }
+
+ /* initialize PF if max_vfs not zero */
+ igb_pf_host_init(eth_dev);
+
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH(hw);
+
+ PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x",
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
+
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ eth_igb_interrupt_handler,
+ (void *)eth_dev);
+
+ /* enable uio/vfio intr/eventfd mapping */
+ rte_intr_enable(&pci_dev->intr_handle);
+
+ /* enable support intr */
+ igb_intr_enable(eth_dev);
+
+ TAILQ_INIT(&filter_info->flex_list);
+ filter_info->flex_mask = 0;
+ TAILQ_INIT(&filter_info->twotuple_list);
+ filter_info->twotuple_mask = 0;
+ TAILQ_INIT(&filter_info->fivetuple_list);
+ filter_info->fivetuple_mask = 0;
+
+ return 0;
+
+err_late:
+ igb_hw_control_release(hw);
+
+ return (error);
+}
+
+static int
+eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev;
+ struct e1000_hw *hw;
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(eth_dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ pci_dev = eth_dev->pci_dev;
+
+ if (adapter->stopped == 0)
+ eth_igb_close(eth_dev);
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ /* Reset any pending lock */
+ igb_reset_swfw_lock(hw);
+
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+
+ /* uninitialize PF if max_vfs not zero */
+ igb_pf_host_uninit(eth_dev);
+
+ /* disable uio intr before callback unregister */
+ rte_intr_disable(&(pci_dev->intr_handle));
+ rte_intr_callback_unregister(&(pci_dev->intr_handle),
+ eth_igb_interrupt_handler, (void *)eth_dev);
+
+ return 0;
+}
+
+/*
+ * Virtual Function device init
+ */
+static int
+eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev;
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(eth_dev->data->dev_private);
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ int diag;
+ struct ether_addr *perm_addr = (struct ether_addr *)hw->mac.perm_addr;
+
+ PMD_INIT_FUNC_TRACE();
+
+ eth_dev->dev_ops = &igbvf_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
+ eth_dev->tx_pkt_burst = &eth_igb_xmit_pkts;
+
+ /* for secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX function */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+ if (eth_dev->data->scattered_rx)
+ eth_dev->rx_pkt_burst = &eth_igb_recv_scattered_pkts;
+ return 0;
+ }
+
+ pci_dev = eth_dev->pci_dev;
+
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ hw->device_id = pci_dev->id.device_id;
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+ adapter->stopped = 0;
+
+ /* Initialize the shared code (base driver) */
+ diag = e1000_setup_init_funcs(hw, TRUE);
+ if (diag != 0) {
+ PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d",
+ diag);
+ return -EIO;
+ }
+
+ /* init_mailbox_params */
+ hw->mbx.ops.init_params(hw);
+
+ /* Disable the interrupts for VF */
+ igbvf_intr_disable(hw);
+
+ diag = hw->mac.ops.reset_hw(hw);
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN *
+ hw->mac.rar_entry_count, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate %d bytes needed to store MAC "
+ "addresses",
+ ETHER_ADDR_LEN * hw->mac.rar_entry_count);
+ return -ENOMEM;
+ }
+
+ /* Generate a random MAC address, if none was assigned by PF. */
+ if (is_zero_ether_addr(perm_addr)) {
+ eth_random_addr(perm_addr->addr_bytes);
+ diag = e1000_rar_set(hw, perm_addr->addr_bytes, 0);
+ if (diag) {
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+ return diag;
+ }
+ PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
+ PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
+ "%02x:%02x:%02x:%02x:%02x:%02x",
+ perm_addr->addr_bytes[0],
+ perm_addr->addr_bytes[1],
+ perm_addr->addr_bytes[2],
+ perm_addr->addr_bytes[3],
+ perm_addr->addr_bytes[4],
+ perm_addr->addr_bytes[5]);
+ }
+
+ /* Copy the permanent MAC address */
+ ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
+ &eth_dev->data->mac_addrs[0]);
+
+ PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x "
+ "mac.type=%s",
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id, "igb_mac_82576_vf");
+
+ return 0;
+}
+
+static int
+eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(eth_dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ if (adapter->stopped == 0)
+ igbvf_dev_close(eth_dev);
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+
+ return 0;
+}
+
+static struct eth_driver rte_igb_pmd = {
+ .pci_drv = {
+ .name = "rte_igb_pmd",
+ .id_table = pci_id_igb_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_DETACHABLE,
+ },
+ .eth_dev_init = eth_igb_dev_init,
+ .eth_dev_uninit = eth_igb_dev_uninit,
+ .dev_private_size = sizeof(struct e1000_adapter),
+};
+
+/*
+ * virtual function driver struct
+ */
+static struct eth_driver rte_igbvf_pmd = {
+ .pci_drv = {
+ .name = "rte_igbvf_pmd",
+ .id_table = pci_id_igbvf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
+ },
+ .eth_dev_init = eth_igbvf_dev_init,
+ .eth_dev_uninit = eth_igbvf_dev_uninit,
+ .dev_private_size = sizeof(struct e1000_adapter),
+};
+
+static int
+rte_igb_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
+{
+ rte_eth_driver_register(&rte_igb_pmd);
+ return 0;
+}
+
+static void
+igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ /* RCTL: enable VLAN filter since VMDq always use VLAN filter */
+ uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl |= E1000_RCTL_VFE;
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+/*
+ * VF Driver initialization routine.
+ * Invoked one at EAL init time.
+ * Register itself as the [Virtual Poll Mode] Driver of PCI IGB devices.
+ */
+static int
+rte_igbvf_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ rte_eth_driver_register(&rte_igbvf_pmd);
+ return (0);
+}
+
+static int
+igb_check_mq_mode(struct rte_eth_dev *dev)
+{
+ enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
+ enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
+ uint16_t nb_rx_q = dev->data->nb_rx_queues;
+ uint16_t nb_tx_q = dev->data->nb_rx_queues;
+
+ if ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) ||
+ tx_mq_mode == ETH_MQ_TX_DCB ||
+ tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ PMD_INIT_LOG(ERR, "DCB mode is not supported.");
+ return -EINVAL;
+ }
+ if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
+ /* Check multi-queue mode.
+ * To no break software we accept ETH_MQ_RX_NONE as this might
+ * be used to turn off VLAN filter.
+ */
+
+ if (rx_mq_mode == ETH_MQ_RX_NONE ||
+ rx_mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+ dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
+ RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
+ } else {
+ /* Only support one queue on VFs.
+ * RSS together with SRIOV is not supported.
+ */
+ PMD_INIT_LOG(ERR, "SRIOV is active,"
+ " wrong mq_mode rx %d.",
+ rx_mq_mode);
+ return -EINVAL;
+ }
+ /* TX mode is not used here, so mode might be ignored.*/
+ if (tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
+ /* SRIOV only works in VMDq enable mode */
+ PMD_INIT_LOG(WARNING, "SRIOV is active,"
+ " TX mode %d is not supported. "
+ " Driver will behave as %d mode.",
+ tx_mq_mode, ETH_MQ_TX_VMDQ_ONLY);
+ }
+
+ /* check valid queue number */
+ if ((nb_rx_q > 1) || (nb_tx_q > 1)) {
+ PMD_INIT_LOG(ERR, "SRIOV is active,"
+ " only support one queue on VFs.");
+ return -EINVAL;
+ }
+ } else {
+ /* To no break software that set invalid mode, only display
+ * warning if invalid mode is used.
+ */
+ if (rx_mq_mode != ETH_MQ_RX_NONE &&
+ rx_mq_mode != ETH_MQ_RX_VMDQ_ONLY &&
+ rx_mq_mode != ETH_MQ_RX_RSS) {
+ /* RSS together with VMDq not supported*/
+ PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
+ rx_mq_mode);
+ return -EINVAL;
+ }
+
+ if (tx_mq_mode != ETH_MQ_TX_NONE &&
+ tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
+ PMD_INIT_LOG(WARNING, "TX mode %d is not supported."
+ " Due to txmode is meaningless in this"
+ " driver, just ignore.",
+ tx_mq_mode);
+ }
+ }
+ return 0;
+}
+
+static int
+eth_igb_configure(struct rte_eth_dev *dev)
+{
+ struct e1000_interrupt *intr =
+ E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* multipe queue mode checking */
+ ret = igb_check_mq_mode(dev);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "igb_check_mq_mode fails with %d.",
+ ret);
+ return ret;
+ }
+
+ intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
+ PMD_INIT_FUNC_TRACE();
+
+ return 0;
+}
+
+static int
+eth_igb_start(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(dev->data->dev_private);
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ int ret, mask;
+ uint32_t intr_vector = 0;
+ uint32_t ctrl_ext;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* disable uio/vfio intr/eventfd mapping */
+ rte_intr_disable(intr_handle);
+
+ /* Power up the phy. Needed to make the link go Up */
+ e1000_power_up_phy(hw);
+
+ /*
+ * Packet Buffer Allocation (PBA)
+ * Writing PBA sets the receive portion of the buffer
+ * the remainder is used for the transmit buffer.
+ */
+ if (hw->mac.type == e1000_82575) {
+ uint32_t pba;
+
+ pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
+ E1000_WRITE_REG(hw, E1000_PBA, pba);
+ }
+
+ /* Put the address into the Receive Address Array */
+ e1000_rar_set(hw, hw->mac.addr, 0);
+
+ /* Initialize the hardware */
+ if (igb_hardware_init(hw)) {
+ PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
+ return (-EIO);
+ }
+ adapter->stopped = 0;
+
+ E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
+
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH(hw);
+
+ /* configure PF module if SRIOV enabled */
+ igb_pf_host_configure(dev);
+
+ /* check and configure queue intr-vector mapping */
+ if ((rte_intr_cap_multiple(intr_handle) ||
+ !RTE_ETH_DEV_SRIOV(dev).active) &&
+ dev->data->dev_conf.intr_conf.rxq != 0) {
+ intr_vector = dev->data->nb_rx_queues;
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -1;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int), 0);
+ if (intr_handle->intr_vec == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+ " intr_vec\n", dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+ }
+
+ /* confiugre msix for rx interrupt */
+ eth_igb_configure_msix_intr(dev);
+
+ /* Configure for OS presence */
+ igb_init_manageability(hw);
+
+ eth_igb_tx_init(dev);
+
+ /* This can fail when allocating mbufs for descriptor rings */
+ ret = eth_igb_rx_init(dev);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
+ igb_dev_clear_queues(dev);
+ return ret;
+ }
+
+ e1000_clear_hw_cntrs_base_generic(hw);
+
+ /*
+ * VLAN Offload Settings
+ */
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
+ ETH_VLAN_EXTEND_MASK;
+ eth_igb_vlan_offload_set(dev, mask);
+
+ if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+ /* Enable VLAN filter since VMDq always use VLAN filter */
+ igb_vmdq_vlan_hw_filter_enable(dev);
+ }
+
+ if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) ||
+ (hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210) ||
+ (hw->mac.type == e1000_i211)) {
+ /* Configure EITR with the maximum possible value (0xFFFF) */
+ E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF);
+ }
+
+ /* Setup link speed and duplex */
+ switch (dev->data->dev_conf.link_speed) {
+ case ETH_LINK_SPEED_AUTONEG:
+ if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
+ hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
+ else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
+ hw->phy.autoneg_advertised = E1000_ALL_HALF_DUPLEX;
+ else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
+ hw->phy.autoneg_advertised = E1000_ALL_FULL_DUPLEX;
+ else
+ goto error_invalid_config;
+ break;
+ case ETH_LINK_SPEED_10:
+ if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
+ hw->phy.autoneg_advertised = E1000_ALL_10_SPEED;
+ else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
+ hw->phy.autoneg_advertised = ADVERTISE_10_HALF;
+ else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
+ hw->phy.autoneg_advertised = ADVERTISE_10_FULL;
+ else
+ goto error_invalid_config;
+ break;
+ case ETH_LINK_SPEED_100:
+ if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
+ hw->phy.autoneg_advertised = E1000_ALL_100_SPEED;
+ else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
+ hw->phy.autoneg_advertised = ADVERTISE_100_HALF;
+ else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
+ hw->phy.autoneg_advertised = ADVERTISE_100_FULL;
+ else
+ goto error_invalid_config;
+ break;
+ case ETH_LINK_SPEED_1000:
+ if ((dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX) ||
+ (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX))
+ hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
+ else
+ goto error_invalid_config;
+ break;
+ case ETH_LINK_SPEED_10000:
+ default:
+ goto error_invalid_config;
+ }
+ e1000_setup_link(hw);
+
+ if (rte_intr_allow_others(intr_handle)) {
+ /* check if lsc interrupt is enabled */
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ eth_igb_lsc_interrupt_setup(dev);
+ } else {
+ rte_intr_callback_unregister(intr_handle,
+ eth_igb_interrupt_handler,
+ (void *)dev);
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ PMD_INIT_LOG(INFO, "lsc won't enable because of"
+ " no intr multiplex\n");
+ }
+
+ /* check if rxq interrupt is enabled */
+ if (dev->data->dev_conf.intr_conf.rxq != 0 &&
+ rte_intr_dp_is_en(intr_handle))
+ eth_igb_rxq_interrupt_setup(dev);
+
+ /* enable uio/vfio intr/eventfd mapping */
+ rte_intr_enable(intr_handle);
+
+ /* resume enabled intr since hw reset */
+ igb_intr_enable(dev);
+
+ PMD_INIT_LOG(DEBUG, "<<");
+
+ return (0);
+
+error_invalid_config:
+ PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u",
+ dev->data->dev_conf.link_speed,
+ dev->data->dev_conf.link_duplex, dev->data->port_id);
+ igb_dev_clear_queues(dev);
+ return (-EINVAL);
+}
+
+/*********************************************************************
+ *
+ * This routine disables all traffic on the adapter by issuing a
+ * global reset on the MAC.
+ *
+ **********************************************************************/
+static void
+eth_igb_stop(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct rte_eth_link link;
+ struct e1000_flex_filter *p_flex;
+ struct e1000_5tuple_filter *p_5tuple, *p_5tuple_next;
+ struct e1000_2tuple_filter *p_2tuple, *p_2tuple_next;
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+
+ igb_intr_disable(hw);
+
+ /* disable intr eventfd mapping */
+ rte_intr_disable(intr_handle);
+
+ igb_pf_reset_hw(hw);
+ E1000_WRITE_REG(hw, E1000_WUC, 0);
+
+ /* Set bit for Go Link disconnect */
+ if (hw->mac.type >= e1000_82580) {
+ uint32_t phpm_reg;
+
+ phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
+ phpm_reg |= E1000_82580_PM_GO_LINKD;
+ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
+ }
+
+ /* Power down the phy. Needed to make the link go Down */
+ if (hw->phy.media_type == e1000_media_type_copper)
+ e1000_power_down_phy(hw);
+ else
+ e1000_shutdown_fiber_serdes_link(hw);
+
+ igb_dev_clear_queues(dev);
+
+ /* clear the recorded link status */
+ memset(&link, 0, sizeof(link));
+ rte_igb_dev_atomic_write_link_status(dev, &link);
+
+ /* Remove all flex filters of the device */
+ while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) {
+ TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries);
+ rte_free(p_flex);
+ }
+ filter_info->flex_mask = 0;
+
+ /* Remove all ntuple filters of the device */
+ for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list);
+ p_5tuple != NULL; p_5tuple = p_5tuple_next) {
+ p_5tuple_next = TAILQ_NEXT(p_5tuple, entries);
+ TAILQ_REMOVE(&filter_info->fivetuple_list,
+ p_5tuple, entries);
+ rte_free(p_5tuple);
+ }
+ filter_info->fivetuple_mask = 0;
+ for (p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list);
+ p_2tuple != NULL; p_2tuple = p_2tuple_next) {
+ p_2tuple_next = TAILQ_NEXT(p_2tuple, entries);
+ TAILQ_REMOVE(&filter_info->twotuple_list,
+ p_2tuple, entries);
+ rte_free(p_2tuple);
+ }
+ filter_info->twotuple_mask = 0;
+
+ if (!rte_intr_allow_others(intr_handle))
+ /* resume to the default handler */
+ rte_intr_callback_register(intr_handle,
+ eth_igb_interrupt_handler,
+ (void *)dev);
+
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec != NULL) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+}
+
+static void
+eth_igb_close(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(dev->data->dev_private);
+ struct rte_eth_link link;
+ struct rte_pci_device *pci_dev;
+
+ eth_igb_stop(dev);
+ adapter->stopped = 1;
+
+ e1000_phy_hw_reset(hw);
+ igb_release_manageability(hw);
+ igb_hw_control_release(hw);
+
+ /* Clear bit for Go Link disconnect */
+ if (hw->mac.type >= e1000_82580) {
+ uint32_t phpm_reg;
+
+ phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
+ phpm_reg &= ~E1000_82580_PM_GO_LINKD;
+ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
+ }
+
+ igb_dev_free_queues(dev);
+
+ pci_dev = dev->pci_dev;
+ if (pci_dev->intr_handle.intr_vec) {
+ rte_free(pci_dev->intr_handle.intr_vec);
+ pci_dev->intr_handle.intr_vec = NULL;
+ }
+
+ memset(&link, 0, sizeof(link));
+ rte_igb_dev_atomic_write_link_status(dev, &link);
+}
+
+static int
+igb_get_rx_buffer_size(struct e1000_hw *hw)
+{
+ uint32_t rx_buf_size;
+ if (hw->mac.type == e1000_82576) {
+ rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10;
+ } else if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i350) {
+ /* PBS needs to be translated according to a lookup table */
+ rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf);
+ rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size);
+ rx_buf_size = (rx_buf_size << 10);
+ } else if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
+ rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0x3f) << 10;
+ } else {
+ rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10;
+ }
+
+ return rx_buf_size;
+}
+
+/*********************************************************************
+ *
+ * Initialize the hardware
+ *
+ **********************************************************************/
+static int
+igb_hardware_init(struct e1000_hw *hw)
+{
+ uint32_t rx_buf_size;
+ int diag;
+
+ /* Let the firmware know the OS is in control */
+ igb_hw_control_acquire(hw);
+
+ /*
+ * These parameters control the automatic generation (Tx) and
+ * response (Rx) to Ethernet PAUSE frames.
+ * - High water mark should allow for at least two standard size (1518)
+ * frames to be received after sending an XOFF.
+ * - Low water mark works best when it is very near the high water mark.
+ * This allows the receiver to restart by sending XON when it has
+ * drained a bit. Here we use an arbitrary value of 1500 which will
+ * restart after one full frame is pulled from the buffer. There
+ * could be several smaller frames in the buffer and if so they will
+ * not trigger the XON until their total number reduces the buffer
+ * by 1500.
+ * - The pause time is fairly large at 1000 x 512ns = 512 usec.
+ */
+ rx_buf_size = igb_get_rx_buffer_size(hw);
+
+ hw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2);
+ hw->fc.low_water = hw->fc.high_water - 1500;
+ hw->fc.pause_time = IGB_FC_PAUSE_TIME;
+ hw->fc.send_xon = 1;
+
+ /* Set Flow control, use the tunable location if sane */
+ if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4))
+ hw->fc.requested_mode = igb_fc_setting;
+ else
+ hw->fc.requested_mode = e1000_fc_none;
+
+ /* Issue a global reset */
+ igb_pf_reset_hw(hw);
+ E1000_WRITE_REG(hw, E1000_WUC, 0);
+
+ diag = e1000_init_hw(hw);
+ if (diag < 0)
+ return (diag);
+
+ E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
+ e1000_get_phy_info(hw);
+ e1000_check_for_link(hw);
+
+ return (0);
+}
+
+/* This function is based on igb_update_stats_counters() in igb/if_igb.c */
+static void
+igb_read_stats_registers(struct e1000_hw *hw, struct e1000_hw_stats *stats)
+{
+ int pause_frames;
+
+ uint64_t old_gprc = stats->gprc;
+ uint64_t old_gptc = stats->gptc;
+ uint64_t old_tpr = stats->tpr;
+ uint64_t old_tpt = stats->tpt;
+ uint64_t old_rpthc = stats->rpthc;
+ uint64_t old_hgptc = stats->hgptc;
+
+ if(hw->phy.media_type == e1000_media_type_copper ||
+ (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
+ stats->symerrs +=
+ E1000_READ_REG(hw,E1000_SYMERRS);
+ stats->sec += E1000_READ_REG(hw, E1000_SEC);
+ }
+
+ stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
+ stats->mpc += E1000_READ_REG(hw, E1000_MPC);
+ stats->scc += E1000_READ_REG(hw, E1000_SCC);
+ stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
+
+ stats->mcc += E1000_READ_REG(hw, E1000_MCC);
+ stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
+ stats->colc += E1000_READ_REG(hw, E1000_COLC);
+ stats->dc += E1000_READ_REG(hw, E1000_DC);
+ stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
+ stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
+ stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
+ /*
+ ** For watchdog management we need to know if we have been
+ ** paused during the last interval, so capture that here.
+ */
+ pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
+ stats->xoffrxc += pause_frames;
+ stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
+ stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
+ stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
+ stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
+ stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
+ stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
+ stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
+ stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
+ stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
+ stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
+ stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
+ stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
+
+ /* For the 64-bit byte counters the low dword must be read first. */
+ /* Both registers clear on the read of the high dword */
+
+ /* Workaround CRC bytes included in size, take away 4 bytes/packet */
+ stats->gorc += E1000_READ_REG(hw, E1000_GORCL);
+ stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
+ stats->gorc -= (stats->gprc - old_gprc) * ETHER_CRC_LEN;
+ stats->gotc += E1000_READ_REG(hw, E1000_GOTCL);
+ stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
+ stats->gotc -= (stats->gptc - old_gptc) * ETHER_CRC_LEN;
+
+ stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
+ stats->ruc += E1000_READ_REG(hw, E1000_RUC);
+ stats->rfc += E1000_READ_REG(hw, E1000_RFC);
+ stats->roc += E1000_READ_REG(hw, E1000_ROC);
+ stats->rjc += E1000_READ_REG(hw, E1000_RJC);
+
+ stats->tpr += E1000_READ_REG(hw, E1000_TPR);
+ stats->tpt += E1000_READ_REG(hw, E1000_TPT);
+
+ stats->tor += E1000_READ_REG(hw, E1000_TORL);
+ stats->tor += ((uint64_t)E1000_READ_REG(hw, E1000_TORH) << 32);
+ stats->tor -= (stats->tpr - old_tpr) * ETHER_CRC_LEN;
+ stats->tot += E1000_READ_REG(hw, E1000_TOTL);
+ stats->tot += ((uint64_t)E1000_READ_REG(hw, E1000_TOTH) << 32);
+ stats->tot -= (stats->tpt - old_tpt) * ETHER_CRC_LEN;
+
+ stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
+ stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
+ stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
+ stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
+ stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
+ stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
+ stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
+ stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
+
+ /* Interrupt Counts */
+
+ stats->iac += E1000_READ_REG(hw, E1000_IAC);
+ stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
+ stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
+ stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
+ stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
+ stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
+ stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
+ stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
+ stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
+
+ /* Host to Card Statistics */
+
+ stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
+ stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
+ stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
+ stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
+ stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
+ stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
+ stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
+ stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL);
+ stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32);
+ stats->hgorc -= (stats->rpthc - old_rpthc) * ETHER_CRC_LEN;
+ stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL);
+ stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32);
+ stats->hgotc -= (stats->hgptc - old_hgptc) * ETHER_CRC_LEN;
+ stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
+ stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
+ stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
+
+ stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
+ stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
+ stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
+ stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
+ stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
+ stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
+}
+
+static void
+eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_hw_stats *stats =
+ E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ igb_read_stats_registers(hw, stats);
+
+ if (rte_stats == NULL)
+ return;
+
+ /* Rx Errors */
+ rte_stats->imissed = stats->mpc;
+ rte_stats->ierrors = stats->crcerrs +
+ stats->rlec + stats->ruc + stats->roc +
+ rte_stats->imissed +
+ stats->rxerrc + stats->algnerrc + stats->cexterr;
+
+ /* Tx Errors */
+ rte_stats->oerrors = stats->ecol + stats->latecol;
+
+ rte_stats->ipackets = stats->gprc;
+ rte_stats->opackets = stats->gptc;
+ rte_stats->ibytes = stats->gorc;
+ rte_stats->obytes = stats->gotc;
+}
+
+static void
+eth_igb_stats_reset(struct rte_eth_dev *dev)
+{
+ struct e1000_hw_stats *hw_stats =
+ E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ /* HW registers are cleared on read */
+ eth_igb_stats_get(dev, NULL);
+
+ /* Reset software totals */
+ memset(hw_stats, 0, sizeof(*hw_stats));
+}
+
+static void
+eth_igb_xstats_reset(struct rte_eth_dev *dev)
+{
+ struct e1000_hw_stats *stats =
+ E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ /* HW registers are cleared on read */
+ eth_igb_xstats_get(dev, NULL, IGB_NB_XSTATS);
+
+ /* Reset software totals */
+ memset(stats, 0, sizeof(*stats));
+}
+
+static int
+eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
+ unsigned n)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_hw_stats *hw_stats =
+ E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ unsigned i;
+
+ if (n < IGB_NB_XSTATS)
+ return IGB_NB_XSTATS;
+
+ igb_read_stats_registers(hw, hw_stats);
+
+ /* If this is a reset xstats is NULL, and we have cleared the
+ * registers by reading them.
+ */
+ if (!xstats)
+ return 0;
+
+ /* Extended stats */
+ for (i = 0; i < IGB_NB_XSTATS; i++) {
+ snprintf(xstats[i].name, sizeof(xstats[i].name),
+ "%s", rte_igb_stats_strings[i].name);
+ xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
+ rte_igb_stats_strings[i].offset);
+ }
+
+ return IGB_NB_XSTATS;
+}
+
+static void
+igbvf_read_stats_registers(struct e1000_hw *hw, struct e1000_vf_stats *hw_stats)
+{
+ /* Good Rx packets, include VF loopback */
+ UPDATE_VF_STAT(E1000_VFGPRC,
+ hw_stats->last_gprc, hw_stats->gprc);
+
+ /* Good Rx octets, include VF loopback */
+ UPDATE_VF_STAT(E1000_VFGORC,
+ hw_stats->last_gorc, hw_stats->gorc);
+
+ /* Good Tx packets, include VF loopback */
+ UPDATE_VF_STAT(E1000_VFGPTC,
+ hw_stats->last_gptc, hw_stats->gptc);
+
+ /* Good Tx octets, include VF loopback */
+ UPDATE_VF_STAT(E1000_VFGOTC,
+ hw_stats->last_gotc, hw_stats->gotc);
+
+ /* Rx Multicst packets */
+ UPDATE_VF_STAT(E1000_VFMPRC,
+ hw_stats->last_mprc, hw_stats->mprc);
+
+ /* Good Rx loopback packets */
+ UPDATE_VF_STAT(E1000_VFGPRLBC,
+ hw_stats->last_gprlbc, hw_stats->gprlbc);
+
+ /* Good Rx loopback octets */
+ UPDATE_VF_STAT(E1000_VFGORLBC,
+ hw_stats->last_gorlbc, hw_stats->gorlbc);
+
+ /* Good Tx loopback packets */
+ UPDATE_VF_STAT(E1000_VFGPTLBC,
+ hw_stats->last_gptlbc, hw_stats->gptlbc);
+
+ /* Good Tx loopback octets */
+ UPDATE_VF_STAT(E1000_VFGOTLBC,
+ hw_stats->last_gotlbc, hw_stats->gotlbc);
+}
+
+static int
+eth_igbvf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
+ unsigned n)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *)
+ E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ unsigned i;
+
+ if (n < IGBVF_NB_XSTATS)
+ return IGBVF_NB_XSTATS;
+
+ igbvf_read_stats_registers(hw, hw_stats);
+
+ if (!xstats)
+ return 0;
+
+ for (i = 0; i < IGBVF_NB_XSTATS; i++) {
+ snprintf(xstats[i].name, sizeof(xstats[i].name), "%s",
+ rte_igbvf_stats_strings[i].name);
+ xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
+ rte_igbvf_stats_strings[i].offset);
+ }
+
+ return IGBVF_NB_XSTATS;
+}
+
+static void
+eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *)
+ E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ igbvf_read_stats_registers(hw, hw_stats);
+
+ if (rte_stats == NULL)
+ return;
+
+ rte_stats->ipackets = hw_stats->gprc;
+ rte_stats->ibytes = hw_stats->gorc;
+ rte_stats->opackets = hw_stats->gptc;
+ rte_stats->obytes = hw_stats->gotc;
+ rte_stats->imcasts = hw_stats->mprc;
+ rte_stats->ilbpackets = hw_stats->gprlbc;
+ rte_stats->ilbbytes = hw_stats->gorlbc;
+ rte_stats->olbpackets = hw_stats->gptlbc;
+ rte_stats->olbbytes = hw_stats->gotlbc;
+}
+
+static void
+eth_igbvf_stats_reset(struct rte_eth_dev *dev)
+{
+ struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
+ E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ /* Sync HW register to the last stats */
+ eth_igbvf_stats_get(dev, NULL);
+
+ /* reset HW current stats*/
+ memset(&hw_stats->gprc, 0, sizeof(*hw_stats) -
+ offsetof(struct e1000_vf_stats, gprc));
+}
+
+static void
+eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
+ dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
+ dev_info->max_mac_addrs = hw->mac.rar_entry_count;
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
+
+ switch (hw->mac.type) {
+ case e1000_82575:
+ dev_info->max_rx_queues = 4;
+ dev_info->max_tx_queues = 4;
+ dev_info->max_vmdq_pools = 0;
+ break;
+
+ case e1000_82576:
+ dev_info->max_rx_queues = 16;
+ dev_info->max_tx_queues = 16;
+ dev_info->max_vmdq_pools = ETH_8_POOLS;
+ dev_info->vmdq_queue_num = 16;
+ break;
+
+ case e1000_82580:
+ dev_info->max_rx_queues = 8;
+ dev_info->max_tx_queues = 8;
+ dev_info->max_vmdq_pools = ETH_8_POOLS;
+ dev_info->vmdq_queue_num = 8;
+ break;
+
+ case e1000_i350:
+ dev_info->max_rx_queues = 8;
+ dev_info->max_tx_queues = 8;
+ dev_info->max_vmdq_pools = ETH_8_POOLS;
+ dev_info->vmdq_queue_num = 8;
+ break;
+
+ case e1000_i354:
+ dev_info->max_rx_queues = 8;
+ dev_info->max_tx_queues = 8;
+ break;
+
+ case e1000_i210:
+ dev_info->max_rx_queues = 4;
+ dev_info->max_tx_queues = 4;
+ dev_info->max_vmdq_pools = 0;
+ break;
+
+ case e1000_i211:
+ dev_info->max_rx_queues = 2;
+ dev_info->max_tx_queues = 2;
+ dev_info->max_vmdq_pools = 0;
+ break;
+
+ default:
+ /* Should not happen */
+ break;
+ }
+ dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t);
+ dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+ dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = IGB_DEFAULT_RX_PTHRESH,
+ .hthresh = IGB_DEFAULT_RX_HTHRESH,
+ .wthresh = IGB_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = IGB_DEFAULT_TX_PTHRESH,
+ .hthresh = IGB_DEFAULT_TX_HTHRESH,
+ .wthresh = IGB_DEFAULT_TX_WTHRESH,
+ },
+ .txq_flags = 0,
+ };
+
+ dev_info->rx_desc_lim = rx_desc_lim;
+ dev_info->tx_desc_lim = tx_desc_lim;
+}
+
+static void
+eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
+ dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
+ dev_info->max_mac_addrs = hw->mac.rar_entry_count;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
+ switch (hw->mac.type) {
+ case e1000_vfadapt:
+ dev_info->max_rx_queues = 2;
+ dev_info->max_tx_queues = 2;
+ break;
+ case e1000_vfadapt_i350:
+ dev_info->max_rx_queues = 1;
+ dev_info->max_tx_queues = 1;
+ break;
+ default:
+ /* Should not happen */
+ break;
+ }
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = IGB_DEFAULT_RX_PTHRESH,
+ .hthresh = IGB_DEFAULT_RX_HTHRESH,
+ .wthresh = IGB_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = IGB_DEFAULT_TX_PTHRESH,
+ .hthresh = IGB_DEFAULT_TX_HTHRESH,
+ .wthresh = IGB_DEFAULT_TX_WTHRESH,
+ },
+ .txq_flags = 0,
+ };
+
+ dev_info->rx_desc_lim = rx_desc_lim;
+ dev_info->tx_desc_lim = tx_desc_lim;
+}
+
+/* return 0 means link status changed, -1 means not changed */
+static int
+eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_link link, old;
+ int link_check, count;
+
+ link_check = 0;
+ hw->mac.get_link_status = 1;
+
+ /* possible wait-to-complete in up to 9 seconds */
+ for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) {
+ /* Read the real link status */
+ switch (hw->phy.media_type) {
+ case e1000_media_type_copper:
+ /* Do the work to read phy */
+ e1000_check_for_link(hw);
+ link_check = !hw->mac.get_link_status;
+ break;
+
+ case e1000_media_type_fiber:
+ e1000_check_for_link(hw);
+ link_check = (E1000_READ_REG(hw, E1000_STATUS) &
+ E1000_STATUS_LU);
+ break;
+
+ case e1000_media_type_internal_serdes:
+ e1000_check_for_link(hw);
+ link_check = hw->mac.serdes_has_link;
+ break;
+
+ /* VF device is type_unknown */
+ case e1000_media_type_unknown:
+ eth_igbvf_link_update(hw);
+ link_check = !hw->mac.get_link_status;
+ break;
+
+ default:
+ break;
+ }
+ if (link_check || wait_to_complete == 0)
+ break;
+ rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL);
+ }
+ memset(&link, 0, sizeof(link));
+ rte_igb_dev_atomic_read_link_status(dev, &link);
+ old = link;
+
+ /* Now we check if a transition has happened */
+ if (link_check) {
+ hw->mac.ops.get_link_up_info(hw, &link.link_speed,
+ &link.link_duplex);
+ link.link_status = 1;
+ } else if (!link_check) {
+ link.link_speed = 0;
+ link.link_duplex = 0;
+ link.link_status = 0;
+ }
+ rte_igb_dev_atomic_write_link_status(dev, &link);
+
+ /* not changed */
+ if (old.link_status == link.link_status)
+ return -1;
+
+ /* changed */
+ return 0;
+}
+
+/*
+ * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means
+ * that the driver is loaded.
+ */
+static void
+igb_hw_control_acquire(struct e1000_hw *hw)
+{
+ uint32_t ctrl_ext;
+
+ /* Let firmware know the driver has taken over */
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+}
+
+/*
+ * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded.
+ */
+static void
+igb_hw_control_release(struct e1000_hw *hw)
+{
+ uint32_t ctrl_ext;
+
+ /* Let firmware taken over control of h/w */
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT,
+ ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+}
+
+/*
+ * Bit of a misnomer, what this really means is
+ * to enable OS management of the system... aka
+ * to disable special hardware management features.
+ */
+static void
+igb_init_manageability(struct e1000_hw *hw)
+{
+ if (e1000_enable_mng_pass_thru(hw)) {
+ uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H);
+ uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
+
+ /* disable hardware interception of ARP */
+ manc &= ~(E1000_MANC_ARP_EN);
+
+ /* enable receiving management packets to the host */
+ manc |= E1000_MANC_EN_MNG2HOST;
+ manc2h |= 1 << 5; /* Mng Port 623 */
+ manc2h |= 1 << 6; /* Mng Port 664 */
+ E1000_WRITE_REG(hw, E1000_MANC2H, manc2h);
+ E1000_WRITE_REG(hw, E1000_MANC, manc);
+ }
+}
+
+static void
+igb_release_manageability(struct e1000_hw *hw)
+{
+ if (e1000_enable_mng_pass_thru(hw)) {
+ uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
+
+ manc |= E1000_MANC_ARP_EN;
+ manc &= ~E1000_MANC_EN_MNG2HOST;
+
+ E1000_WRITE_REG(hw, E1000_MANC, manc);
+ }
+}
+
+static void
+eth_igb_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t rctl;
+
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+static void
+eth_igb_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t rctl;
+
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl &= (~E1000_RCTL_UPE);
+ if (dev->data->all_multicast == 1)
+ rctl |= E1000_RCTL_MPE;
+ else
+ rctl &= (~E1000_RCTL_MPE);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+static void
+eth_igb_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t rctl;
+
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl |= E1000_RCTL_MPE;
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+static void
+eth_igb_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t rctl;
+
+ if (dev->data->promiscuous == 1)
+ return; /* must remain in all_multicast mode */
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl &= (~E1000_RCTL_MPE);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+static int
+eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_vfta * shadow_vfta =
+ E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+ uint32_t vfta;
+ uint32_t vid_idx;
+ uint32_t vid_bit;
+
+ vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) &
+ E1000_VFTA_ENTRY_MASK);
+ vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK));
+ vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx);
+ if (on)
+ vfta |= vid_bit;
+ else
+ vfta &= ~vid_bit;
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta);
+
+ /* update local VFTA copy */
+ shadow_vfta->vfta[vid_idx] = vfta;
+
+ return 0;
+}
+
+static void
+eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg = ETHER_TYPE_VLAN ;
+
+ reg |= (tpid << 16);
+ E1000_WRITE_REG(hw, E1000_VET, reg);
+}
+
+static void
+igb_vlan_hw_filter_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+
+ /* Filter Table Disable */
+ reg = E1000_READ_REG(hw, E1000_RCTL);
+ reg &= ~E1000_RCTL_CFIEN;
+ reg &= ~E1000_RCTL_VFE;
+ E1000_WRITE_REG(hw, E1000_RCTL, reg);
+}
+
+static void
+igb_vlan_hw_filter_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_vfta * shadow_vfta =
+ E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+ uint32_t reg;
+ int i;
+
+ /* Filter Table Enable, CFI not used for packet acceptance */
+ reg = E1000_READ_REG(hw, E1000_RCTL);
+ reg &= ~E1000_RCTL_CFIEN;
+ reg |= E1000_RCTL_VFE;
+ E1000_WRITE_REG(hw, E1000_RCTL, reg);
+
+ /* restore VFTA table */
+ for (i = 0; i < IGB_VFTA_SIZE; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]);
+}
+
+static void
+igb_vlan_hw_strip_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+
+ /* VLAN Mode Disable */
+ reg = E1000_READ_REG(hw, E1000_CTRL);
+ reg &= ~E1000_CTRL_VME;
+ E1000_WRITE_REG(hw, E1000_CTRL, reg);
+}
+
+static void
+igb_vlan_hw_strip_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+
+ /* VLAN Mode Enable */
+ reg = E1000_READ_REG(hw, E1000_CTRL);
+ reg |= E1000_CTRL_VME;
+ E1000_WRITE_REG(hw, E1000_CTRL, reg);
+}
+
+static void
+igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+
+ /* CTRL_EXT: Extended VLAN */
+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ reg &= ~E1000_CTRL_EXT_EXTEND_VLAN;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+
+ /* Update maximum packet length */
+ if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+ E1000_WRITE_REG(hw, E1000_RLPML,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ VLAN_TAG_SIZE);
+}
+
+static void
+igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+
+ /* CTRL_EXT: Extended VLAN */
+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ reg |= E1000_CTRL_EXT_EXTEND_VLAN;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+
+ /* Update maximum packet length */
+ if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+ E1000_WRITE_REG(hw, E1000_RLPML,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ 2 * VLAN_TAG_SIZE);
+}
+
+static void
+eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ if(mask & ETH_VLAN_STRIP_MASK){
+ if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ igb_vlan_hw_strip_enable(dev);
+ else
+ igb_vlan_hw_strip_disable(dev);
+ }
+
+ if(mask & ETH_VLAN_FILTER_MASK){
+ if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ igb_vlan_hw_filter_enable(dev);
+ else
+ igb_vlan_hw_filter_disable(dev);
+ }
+
+ if(mask & ETH_VLAN_EXTEND_MASK){
+ if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+ igb_vlan_hw_extend_enable(dev);
+ else
+ igb_vlan_hw_extend_disable(dev);
+ }
+}
+
+
+/**
+ * It enables the interrupt mask and then enable the interrupt.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev)
+{
+ struct e1000_interrupt *intr =
+ E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ intr->mask |= E1000_ICR_LSC;
+
+ return 0;
+}
+
+/* It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev)
+{
+ uint32_t mask, regval;
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_dev_info dev_info;
+
+ memset(&dev_info, 0, sizeof(dev_info));
+ eth_igb_infos_get(dev, &dev_info);
+
+ mask = 0xFFFFFFFF >> (32 - dev_info.max_rx_queues);
+ regval = E1000_READ_REG(hw, E1000_EIMS);
+ E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
+
+ return 0;
+}
+
+/*
+ * It reads ICR and gets interrupt causes, check it and set a bit flag
+ * to update link status.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
+{
+ uint32_t icr;
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_interrupt *intr =
+ E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ igb_intr_disable(hw);
+
+ /* read-on-clear nic registers here */
+ icr = E1000_READ_REG(hw, E1000_ICR);
+
+ intr->flags = 0;
+ if (icr & E1000_ICR_LSC) {
+ intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
+ }
+
+ if (icr & E1000_ICR_VMMB)
+ intr->flags |= E1000_FLAG_MAILBOX;
+
+ return 0;
+}
+
+/*
+ * It executes link_update after knowing an interrupt is prsent.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_interrupt_action(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_interrupt *intr =
+ E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ uint32_t tctl, rctl;
+ struct rte_eth_link link;
+ int ret;
+
+ if (intr->flags & E1000_FLAG_MAILBOX) {
+ igb_pf_mbx_process(dev);
+ intr->flags &= ~E1000_FLAG_MAILBOX;
+ }
+
+ igb_intr_enable(dev);
+ rte_intr_enable(&(dev->pci_dev->intr_handle));
+
+ if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) {
+ intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
+
+ /* set get_link_status to check register later */
+ hw->mac.get_link_status = 1;
+ ret = eth_igb_link_update(dev, 0);
+
+ /* check if link has changed */
+ if (ret < 0)
+ return 0;
+
+ memset(&link, 0, sizeof(link));
+ rte_igb_dev_atomic_read_link_status(dev, &link);
+ if (link.link_status) {
+ PMD_INIT_LOG(INFO,
+ " Port %d: Link Up - speed %u Mbps - %s",
+ dev->data->port_id,
+ (unsigned)link.link_speed,
+ link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+ "full-duplex" : "half-duplex");
+ } else {
+ PMD_INIT_LOG(INFO, " Port %d: Link Down",
+ dev->data->port_id);
+ }
+
+ PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d",
+ dev->pci_dev->addr.domain,
+ dev->pci_dev->addr.bus,
+ dev->pci_dev->addr.devid,
+ dev->pci_dev->addr.function);
+ tctl = E1000_READ_REG(hw, E1000_TCTL);
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ if (link.link_status) {
+ /* enable Tx/Rx */
+ tctl |= E1000_TCTL_EN;
+ rctl |= E1000_RCTL_EN;
+ } else {
+ /* disable Tx/Rx */
+ tctl &= ~E1000_TCTL_EN;
+ rctl &= ~E1000_RCTL_EN;
+ }
+ E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+ E1000_WRITE_FLUSH(hw);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
+ }
+
+ return 0;
+}
+
+/**
+ * Interrupt handler which shall be registered at first.
+ *
+ * @param handle
+ * Pointer to interrupt handle.
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ * void
+ */
+static void
+eth_igb_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
+ void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+
+ eth_igb_interrupt_get_status(dev);
+ eth_igb_interrupt_action(dev);
+}
+
+static int
+eth_igb_led_on(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ return (e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
+}
+
+static int
+eth_igb_led_off(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ return (e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
+}
+
+static int
+eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct e1000_hw *hw;
+ uint32_t ctrl;
+ int tx_pause;
+ int rx_pause;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ fc_conf->pause_time = hw->fc.pause_time;
+ fc_conf->high_water = hw->fc.high_water;
+ fc_conf->low_water = hw->fc.low_water;
+ fc_conf->send_xon = hw->fc.send_xon;
+ fc_conf->autoneg = hw->mac.autoneg;
+
+ /*
+ * Return rx_pause and tx_pause status according to actual setting of
+ * the TFCE and RFCE bits in the CTRL register.
+ */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ if (ctrl & E1000_CTRL_TFCE)
+ tx_pause = 1;
+ else
+ tx_pause = 0;
+
+ if (ctrl & E1000_CTRL_RFCE)
+ rx_pause = 1;
+ else
+ rx_pause = 0;
+
+ if (rx_pause && tx_pause)
+ fc_conf->mode = RTE_FC_FULL;
+ else if (rx_pause)
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ else if (tx_pause)
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ else
+ fc_conf->mode = RTE_FC_NONE;
+
+ return 0;
+}
+
+static int
+eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct e1000_hw *hw;
+ int err;
+ enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = {
+ e1000_fc_none,
+ e1000_fc_rx_pause,
+ e1000_fc_tx_pause,
+ e1000_fc_full
+ };
+ uint32_t rx_buf_size;
+ uint32_t max_high_water;
+ uint32_t rctl;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (fc_conf->autoneg != hw->mac.autoneg)
+ return -ENOTSUP;
+ rx_buf_size = igb_get_rx_buffer_size(hw);
+ PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
+
+ /* At least reserve one Ethernet frame for watermark */
+ max_high_water = rx_buf_size - ETHER_MAX_LEN;
+ if ((fc_conf->high_water > max_high_water) ||
+ (fc_conf->high_water < fc_conf->low_water)) {
+ PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
+ PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water);
+ return (-EINVAL);
+ }
+
+ hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
+ hw->fc.pause_time = fc_conf->pause_time;
+ hw->fc.high_water = fc_conf->high_water;
+ hw->fc.low_water = fc_conf->low_water;
+ hw->fc.send_xon = fc_conf->send_xon;
+
+ err = e1000_setup_link_generic(hw);
+ if (err == E1000_SUCCESS) {
+
+ /* check if we want to forward MAC frames - driver doesn't have native
+ * capability to do that, so we'll write the registers ourselves */
+
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+
+ /* set or clear MFLCN.PMCF bit depending on configuration */
+ if (fc_conf->mac_ctrl_frame_fwd != 0)
+ rctl |= E1000_RCTL_PMCF;
+ else
+ rctl &= ~E1000_RCTL_PMCF;
+
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+ E1000_WRITE_FLUSH(hw);
+
+ return 0;
+ }
+
+ PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
+ return (-EIO);
+}
+
+#define E1000_RAH_POOLSEL_SHIFT (18)
+static void
+eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint32_t index, __rte_unused uint32_t pool)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t rah;
+
+ e1000_rar_set(hw, mac_addr->addr_bytes, index);
+ rah = E1000_READ_REG(hw, E1000_RAH(index));
+ rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool));
+ E1000_WRITE_REG(hw, E1000_RAH(index), rah);
+}
+
+static void
+eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
+{
+ uint8_t addr[ETHER_ADDR_LEN];
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ memset(addr, 0, sizeof(addr));
+
+ e1000_rar_set(hw, addr, index);
+}
+
+static void
+eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
+ struct ether_addr *addr)
+{
+ eth_igb_rar_clear(dev, 0);
+
+ eth_igb_rar_set(dev, (void *)addr, 0, 0);
+}
+/*
+ * Virtual Function operations
+ */
+static void
+igbvf_intr_disable(struct e1000_hw *hw)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ /* Clear interrupt mask to stop from interrupts being generated */
+ E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF);
+
+ E1000_WRITE_FLUSH(hw);
+}
+
+static void
+igbvf_stop_adapter(struct rte_eth_dev *dev)
+{
+ u32 reg_val;
+ u16 i;
+ struct rte_eth_dev_info dev_info;
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ memset(&dev_info, 0, sizeof(dev_info));
+ eth_igbvf_infos_get(dev, &dev_info);
+
+ /* Clear interrupt mask to stop from interrupts being generated */
+ igbvf_intr_disable(hw);
+
+ /* Clear any pending interrupts, flush previous writes */
+ E1000_READ_REG(hw, E1000_EICR);
+
+ /* Disable the transmit unit. Each queue must be disabled. */
+ for (i = 0; i < dev_info.max_tx_queues; i++)
+ E1000_WRITE_REG(hw, E1000_TXDCTL(i), E1000_TXDCTL_SWFLSH);
+
+ /* Disable the receive unit by stopping each queue */
+ for (i = 0; i < dev_info.max_rx_queues; i++) {
+ reg_val = E1000_READ_REG(hw, E1000_RXDCTL(i));
+ reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE;
+ E1000_WRITE_REG(hw, E1000_RXDCTL(i), reg_val);
+ while (E1000_READ_REG(hw, E1000_RXDCTL(i)) & E1000_RXDCTL_QUEUE_ENABLE)
+ ;
+ }
+
+ /* flush all queues disables */
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(2);
+}
+
+static int eth_igbvf_link_update(struct e1000_hw *hw)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ struct e1000_mac_info *mac = &hw->mac;
+ int ret_val = E1000_SUCCESS;
+
+ PMD_INIT_LOG(DEBUG, "e1000_check_for_link_vf");
+
+ /*
+ * We only want to run this if there has been a rst asserted.
+ * in this case that could mean a link change, device reset,
+ * or a virtual function reset
+ */
+
+ /* If we were hit with a reset or timeout drop the link */
+ if (!e1000_check_for_rst(hw, 0) || !mbx->timeout)
+ mac->get_link_status = TRUE;
+
+ if (!mac->get_link_status)
+ goto out;
+
+ /* if link status is down no point in checking to see if pf is up */
+ if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))
+ goto out;
+
+ /* if we passed all the tests above then the link is up and we no
+ * longer need to check for link */
+ mac->get_link_status = FALSE;
+
+out:
+ return ret_val;
+}
+
+
+static int
+igbvf_dev_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf* conf = &dev->data->dev_conf;
+
+ PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
+ dev->data->port_id);
+
+ /*
+ * VF has no ability to enable/disable HW CRC
+ * Keep the persistent behavior the same as Host PF
+ */
+#ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
+ if (!conf->rxmode.hw_strip_crc) {
+ PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
+ conf->rxmode.hw_strip_crc = 1;
+ }
+#else
+ if (conf->rxmode.hw_strip_crc) {
+ PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
+ conf->rxmode.hw_strip_crc = 0;
+ }
+#endif
+
+ return 0;
+}
+
+static int
+igbvf_dev_start(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(dev->data->dev_private);
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ hw->mac.ops.reset_hw(hw);
+ adapter->stopped = 0;
+
+ /* Set all vfta */
+ igbvf_set_vfta_all(dev,1);
+
+ eth_igbvf_tx_init(dev);
+
+ /* This can fail when allocating mbufs for descriptor rings */
+ ret = eth_igbvf_rx_init(dev);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
+ igb_dev_clear_queues(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+igbvf_dev_stop(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ igbvf_stop_adapter(dev);
+
+ /*
+ * Clear what we set, but we still keep shadow_vfta to
+ * restore after device starts
+ */
+ igbvf_set_vfta_all(dev,0);
+
+ igb_dev_clear_queues(dev);
+}
+
+static void
+igbvf_dev_close(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ e1000_reset_hw(hw);
+
+ igbvf_dev_stop(dev);
+ adapter->stopped = 1;
+ igb_dev_free_queues(dev);
+}
+
+static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ uint32_t msgbuf[2];
+ s32 err;
+
+ /* After set vlan, vlan strip will also be enabled in igb driver*/
+ msgbuf[0] = E1000_VF_SET_VLAN;
+ msgbuf[1] = vid;
+ /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
+ if (on)
+ msgbuf[0] |= E1000_VF_SET_VLAN_ADD;
+
+ err = mbx->ops.write_posted(hw, msgbuf, 2, 0);
+ if (err)
+ goto mbx_err;
+
+ err = mbx->ops.read_posted(hw, msgbuf, 2, 0);
+ if (err)
+ goto mbx_err;
+
+ msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS;
+ if (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK))
+ err = -EINVAL;
+
+mbx_err:
+ return err;
+}
+
+static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_vfta * shadow_vfta =
+ E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+ int i = 0, j = 0, vfta = 0, mask = 1;
+
+ for (i = 0; i < IGB_VFTA_SIZE; i++){
+ vfta = shadow_vfta->vfta[i];
+ if(vfta){
+ mask = 1;
+ for (j = 0; j < 32; j++){
+ if(vfta & mask)
+ igbvf_set_vfta(hw,
+ (uint16_t)((i<<5)+j), on);
+ mask<<=1;
+ }
+ }
+ }
+
+}
+
+static int
+igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_vfta * shadow_vfta =
+ E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+ uint32_t vid_idx = 0;
+ uint32_t vid_bit = 0;
+ int ret = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/
+ ret = igbvf_set_vfta(hw, vlan_id, !!on);
+ if(ret){
+ PMD_INIT_LOG(ERR, "Unable to set VF vlan");
+ return ret;
+ }
+ vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
+ vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
+
+ /*Save what we set and retore it after device reset*/
+ if (on)
+ shadow_vfta->vfta[vid_idx] |= vid_bit;
+ else
+ shadow_vfta->vfta[vid_idx] &= ~vid_bit;
+
+ return 0;
+}
+
+static void
+igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* index is not used by rar_set() */
+ hw->mac.ops.rar_set(hw, (void *)addr, 0);
+}
+
+
+static int
+eth_igb_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ uint8_t i, j, mask;
+ uint32_t reta, r;
+ uint16_t idx, shift;
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (reta_size != ETH_RSS_RETA_SIZE_128) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+ IGB_4_BIT_MASK);
+ if (!mask)
+ continue;
+ if (mask == IGB_4_BIT_MASK)
+ r = 0;
+ else
+ r = E1000_READ_REG(hw, E1000_RETA(i >> 2));
+ for (j = 0, reta = 0; j < IGB_4_BIT_WIDTH; j++) {
+ if (mask & (0x1 << j))
+ reta |= reta_conf[idx].reta[shift + j] <<
+ (CHAR_BIT * j);
+ else
+ reta |= r & (IGB_8_BIT_MASK << (CHAR_BIT * j));
+ }
+ E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta);
+ }
+
+ return 0;
+}
+
+static int
+eth_igb_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ uint8_t i, j, mask;
+ uint32_t reta;
+ uint16_t idx, shift;
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (reta_size != ETH_RSS_RETA_SIZE_128) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+ IGB_4_BIT_MASK);
+ if (!mask)
+ continue;
+ reta = E1000_READ_REG(hw, E1000_RETA(i >> 2));
+ for (j = 0; j < IGB_4_BIT_WIDTH; j++) {
+ if (mask & (0x1 << j))
+ reta_conf[idx].reta[shift + j] =
+ ((reta >> (CHAR_BIT * j)) &
+ IGB_8_BIT_MASK);
+ }
+ }
+
+ return 0;
+}
+
+#define MAC_TYPE_FILTER_SUP(type) do {\
+ if ((type) != e1000_82580 && (type) != e1000_i350 &&\
+ (type) != e1000_82576)\
+ return -ENOTSUP;\
+} while (0)
+
+static int
+eth_igb_syn_filter_set(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter,
+ bool add)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t synqf, rfctl;
+
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
+ return -EINVAL;
+
+ synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
+
+ if (add) {
+ if (synqf & E1000_SYN_FILTER_ENABLE)
+ return -EINVAL;
+
+ synqf = (uint32_t)(((filter->queue << E1000_SYN_FILTER_QUEUE_SHIFT) &
+ E1000_SYN_FILTER_QUEUE) | E1000_SYN_FILTER_ENABLE);
+
+ rfctl = E1000_READ_REG(hw, E1000_RFCTL);
+ if (filter->hig_pri)
+ rfctl |= E1000_RFCTL_SYNQFP;
+ else
+ rfctl &= ~E1000_RFCTL_SYNQFP;
+
+ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
+ } else {
+ if (!(synqf & E1000_SYN_FILTER_ENABLE))
+ return -ENOENT;
+ synqf = 0;
+ }
+
+ E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
+ E1000_WRITE_FLUSH(hw);
+ return 0;
+}
+
+static int
+eth_igb_syn_filter_get(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t synqf, rfctl;
+
+ synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
+ if (synqf & E1000_SYN_FILTER_ENABLE) {
+ rfctl = E1000_READ_REG(hw, E1000_RFCTL);
+ filter->hig_pri = (rfctl & E1000_RFCTL_SYNQFP) ? 1 : 0;
+ filter->queue = (uint8_t)((synqf & E1000_SYN_FILTER_QUEUE) >>
+ E1000_SYN_FILTER_QUEUE_SHIFT);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int
+eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL) {
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
+ filter_op);
+ return -EINVAL;
+ }
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = eth_igb_syn_filter_set(dev,
+ (struct rte_eth_syn_filter *)arg,
+ TRUE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = eth_igb_syn_filter_set(dev,
+ (struct rte_eth_syn_filter *)arg,
+ FALSE);
+ break;
+ case RTE_ETH_FILTER_GET:
+ ret = eth_igb_syn_filter_get(dev,
+ (struct rte_eth_syn_filter *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+#define MAC_TYPE_FILTER_SUP_EXT(type) do {\
+ if ((type) != e1000_82580 && (type) != e1000_i350)\
+ return -ENOSYS; \
+} while (0)
+
+/* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/
+static inline int
+ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter,
+ struct e1000_2tuple_filter_info *filter_info)
+{
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
+ return -EINVAL;
+ if (filter->priority > E1000_2TUPLE_MAX_PRI)
+ return -EINVAL; /* filter index is out of range. */
+ if (filter->tcp_flags > TCP_FLAG_ALL)
+ return -EINVAL; /* flags is invalid. */
+
+ switch (filter->dst_port_mask) {
+ case UINT16_MAX:
+ filter_info->dst_port_mask = 0;
+ filter_info->dst_port = filter->dst_port;
+ break;
+ case 0:
+ filter_info->dst_port_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid dst_port mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->proto_mask) {
+ case UINT8_MAX:
+ filter_info->proto_mask = 0;
+ filter_info->proto = filter->proto;
+ break;
+ case 0:
+ filter_info->proto_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid protocol mask.");
+ return -EINVAL;
+ }
+
+ filter_info->priority = (uint8_t)filter->priority;
+ if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
+ filter_info->tcp_flags = filter->tcp_flags;
+ else
+ filter_info->tcp_flags = 0;
+
+ return 0;
+}
+
+static inline struct e1000_2tuple_filter *
+igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list *filter_list,
+ struct e1000_2tuple_filter_info *key)
+{
+ struct e1000_2tuple_filter *it;
+
+ TAILQ_FOREACH(it, filter_list, entries) {
+ if (memcmp(key, &it->filter_info,
+ sizeof(struct e1000_2tuple_filter_info)) == 0) {
+ return it;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * igb_add_2tuple_filter - add a 2tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * ntuple_filter: ponter to the filter that will be added.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+igb_add_2tuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_2tuple_filter *filter;
+ uint32_t ttqf = E1000_TTQF_DISABLE_MASK;
+ uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP;
+ int i, ret;
+
+ filter = rte_zmalloc("e1000_2tuple_filter",
+ sizeof(struct e1000_2tuple_filter), 0);
+ if (filter == NULL)
+ return -ENOMEM;
+
+ ret = ntuple_filter_to_2tuple(ntuple_filter,
+ &filter->filter_info);
+ if (ret < 0) {
+ rte_free(filter);
+ return ret;
+ }
+ if (igb_2tuple_filter_lookup(&filter_info->twotuple_list,
+ &filter->filter_info) != NULL) {
+ PMD_DRV_LOG(ERR, "filter exists.");
+ rte_free(filter);
+ return -EEXIST;
+ }
+ filter->queue = ntuple_filter->queue;
+
+ /*
+ * look for an unused 2tuple filter index,
+ * and insert the filter to list.
+ */
+ for (i = 0; i < E1000_MAX_TTQF_FILTERS; i++) {
+ if (!(filter_info->twotuple_mask & (1 << i))) {
+ filter_info->twotuple_mask |= 1 << i;
+ filter->index = i;
+ TAILQ_INSERT_TAIL(&filter_info->twotuple_list,
+ filter,
+ entries);
+ break;
+ }
+ }
+ if (i >= E1000_MAX_TTQF_FILTERS) {
+ PMD_DRV_LOG(ERR, "2tuple filters are full.");
+ rte_free(filter);
+ return -ENOSYS;
+ }
+
+ imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
+ if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
+ imir |= E1000_IMIR_PORT_BP;
+ else
+ imir &= ~E1000_IMIR_PORT_BP;
+
+ imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
+
+ ttqf |= E1000_TTQF_QUEUE_ENABLE;
+ ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT);
+ ttqf |= (uint32_t)(filter->filter_info.proto & E1000_TTQF_PROTOCOL_MASK);
+ if (filter->filter_info.proto_mask == 0)
+ ttqf &= ~E1000_TTQF_MASK_ENABLE;
+
+ /* tcp flags bits setting. */
+ if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
+ if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_URG;
+ if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_ACK;
+ if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_PSH;
+ if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_RST;
+ if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_SYN;
+ if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_FIN;
+ } else
+ imir_ext |= E1000_IMIREXT_CTRL_BP;
+ E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
+ E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf);
+ E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
+ return 0;
+}
+
+/*
+ * igb_remove_2tuple_filter - remove a 2tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * ntuple_filter: ponter to the filter that will be removed.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+igb_remove_2tuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_2tuple_filter_info filter_2tuple;
+ struct e1000_2tuple_filter *filter;
+ int ret;
+
+ memset(&filter_2tuple, 0, sizeof(struct e1000_2tuple_filter_info));
+ ret = ntuple_filter_to_2tuple(ntuple_filter,
+ &filter_2tuple);
+ if (ret < 0)
+ return ret;
+
+ filter = igb_2tuple_filter_lookup(&filter_info->twotuple_list,
+ &filter_2tuple);
+ if (filter == NULL) {
+ PMD_DRV_LOG(ERR, "filter doesn't exist.");
+ return -ENOENT;
+ }
+
+ filter_info->twotuple_mask &= ~(1 << filter->index);
+ TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries);
+ rte_free(filter);
+
+ E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK);
+ E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
+ E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
+ return 0;
+}
+
+static inline struct e1000_flex_filter *
+eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list,
+ struct e1000_flex_filter_info *key)
+{
+ struct e1000_flex_filter *it;
+
+ TAILQ_FOREACH(it, filter_list, entries) {
+ if (memcmp(key, &it->filter_info,
+ sizeof(struct e1000_flex_filter_info)) == 0)
+ return it;
+ }
+
+ return NULL;
+}
+
+static int
+eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
+ struct rte_eth_flex_filter *filter,
+ bool add)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_flex_filter *flex_filter, *it;
+ uint32_t wufc, queueing, mask;
+ uint32_t reg_off;
+ uint8_t shift, i, j = 0;
+
+ flex_filter = rte_zmalloc("e1000_flex_filter",
+ sizeof(struct e1000_flex_filter), 0);
+ if (flex_filter == NULL)
+ return -ENOMEM;
+
+ flex_filter->filter_info.len = filter->len;
+ flex_filter->filter_info.priority = filter->priority;
+ memcpy(flex_filter->filter_info.dwords, filter->bytes, filter->len);
+ for (i = 0; i < RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT; i++) {
+ mask = 0;
+ /* reverse bits in flex filter's mask*/
+ for (shift = 0; shift < CHAR_BIT; shift++) {
+ if (filter->mask[i] & (0x01 << shift))
+ mask |= (0x80 >> shift);
+ }
+ flex_filter->filter_info.mask[i] = mask;
+ }
+
+ wufc = E1000_READ_REG(hw, E1000_WUFC);
+ if (flex_filter->index < E1000_MAX_FHFT)
+ reg_off = E1000_FHFT(flex_filter->index);
+ else
+ reg_off = E1000_FHFT_EXT(flex_filter->index - E1000_MAX_FHFT);
+
+ if (add) {
+ if (eth_igb_flex_filter_lookup(&filter_info->flex_list,
+ &flex_filter->filter_info) != NULL) {
+ PMD_DRV_LOG(ERR, "filter exists.");
+ rte_free(flex_filter);
+ return -EEXIST;
+ }
+ flex_filter->queue = filter->queue;
+ /*
+ * look for an unused flex filter index
+ * and insert the filter into the list.
+ */
+ for (i = 0; i < E1000_MAX_FLEX_FILTERS; i++) {
+ if (!(filter_info->flex_mask & (1 << i))) {
+ filter_info->flex_mask |= 1 << i;
+ flex_filter->index = i;
+ TAILQ_INSERT_TAIL(&filter_info->flex_list,
+ flex_filter,
+ entries);
+ break;
+ }
+ }
+ if (i >= E1000_MAX_FLEX_FILTERS) {
+ PMD_DRV_LOG(ERR, "flex filters are full.");
+ rte_free(flex_filter);
+ return -ENOSYS;
+ }
+
+ E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ |
+ (E1000_WUFC_FLX0 << flex_filter->index));
+ queueing = filter->len |
+ (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) |
+ (filter->priority << E1000_FHFT_QUEUEING_PRIO_SHIFT);
+ E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET,
+ queueing);
+ for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) {
+ E1000_WRITE_REG(hw, reg_off,
+ flex_filter->filter_info.dwords[j]);
+ reg_off += sizeof(uint32_t);
+ E1000_WRITE_REG(hw, reg_off,
+ flex_filter->filter_info.dwords[++j]);
+ reg_off += sizeof(uint32_t);
+ E1000_WRITE_REG(hw, reg_off,
+ (uint32_t)flex_filter->filter_info.mask[i]);
+ reg_off += sizeof(uint32_t) * 2;
+ ++j;
+ }
+ } else {
+ it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
+ &flex_filter->filter_info);
+ if (it == NULL) {
+ PMD_DRV_LOG(ERR, "filter doesn't exist.");
+ rte_free(flex_filter);
+ return -ENOENT;
+ }
+
+ for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++)
+ E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0);
+ E1000_WRITE_REG(hw, E1000_WUFC, wufc &
+ (~(E1000_WUFC_FLX0 << it->index)));
+
+ filter_info->flex_mask &= ~(1 << it->index);
+ TAILQ_REMOVE(&filter_info->flex_list, it, entries);
+ rte_free(it);
+ rte_free(flex_filter);
+ }
+
+ return 0;
+}
+
+static int
+eth_igb_get_flex_filter(struct rte_eth_dev *dev,
+ struct rte_eth_flex_filter *filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_flex_filter flex_filter, *it;
+ uint32_t wufc, queueing, wufc_en = 0;
+
+ memset(&flex_filter, 0, sizeof(struct e1000_flex_filter));
+ flex_filter.filter_info.len = filter->len;
+ flex_filter.filter_info.priority = filter->priority;
+ memcpy(flex_filter.filter_info.dwords, filter->bytes, filter->len);
+ memcpy(flex_filter.filter_info.mask, filter->mask,
+ RTE_ALIGN(filter->len, sizeof(char)) / sizeof(char));
+
+ it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
+ &flex_filter.filter_info);
+ if (it == NULL) {
+ PMD_DRV_LOG(ERR, "filter doesn't exist.");
+ return -ENOENT;
+ }
+
+ wufc = E1000_READ_REG(hw, E1000_WUFC);
+ wufc_en = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << it->index);
+
+ if ((wufc & wufc_en) == wufc_en) {
+ uint32_t reg_off = 0;
+ if (it->index < E1000_MAX_FHFT)
+ reg_off = E1000_FHFT(it->index);
+ else
+ reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT);
+
+ queueing = E1000_READ_REG(hw,
+ reg_off + E1000_FHFT_QUEUEING_OFFSET);
+ filter->len = queueing & E1000_FHFT_QUEUEING_LEN;
+ filter->priority = (queueing & E1000_FHFT_QUEUEING_PRIO) >>
+ E1000_FHFT_QUEUEING_PRIO_SHIFT;
+ filter->queue = (queueing & E1000_FHFT_QUEUEING_QUEUE) >>
+ E1000_FHFT_QUEUEING_QUEUE_SHIFT;
+ return 0;
+ }
+ return -ENOENT;
+}
+
+static int
+eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_flex_filter *filter;
+ int ret = 0;
+
+ MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return ret;
+
+ if (arg == NULL) {
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
+ filter_op);
+ return -EINVAL;
+ }
+
+ filter = (struct rte_eth_flex_filter *)arg;
+ if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN
+ || filter->len % sizeof(uint64_t) != 0) {
+ PMD_DRV_LOG(ERR, "filter's length is out of range");
+ return -EINVAL;
+ }
+ if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {
+ PMD_DRV_LOG(ERR, "filter's priority is out of range");
+ return -EINVAL;
+ }
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = eth_igb_add_del_flex_filter(dev, filter, TRUE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = eth_igb_add_del_flex_filter(dev, filter, FALSE);
+ break;
+ case RTE_ETH_FILTER_GET:
+ ret = eth_igb_get_flex_filter(dev, filter);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* translate elements in struct rte_eth_ntuple_filter to struct e1000_5tuple_filter_info*/
+static inline int
+ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter,
+ struct e1000_5tuple_filter_info *filter_info)
+{
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576)
+ return -EINVAL;
+ if (filter->priority > E1000_2TUPLE_MAX_PRI)
+ return -EINVAL; /* filter index is out of range. */
+ if (filter->tcp_flags > TCP_FLAG_ALL)
+ return -EINVAL; /* flags is invalid. */
+
+ switch (filter->dst_ip_mask) {
+ case UINT32_MAX:
+ filter_info->dst_ip_mask = 0;
+ filter_info->dst_ip = filter->dst_ip;
+ break;
+ case 0:
+ filter_info->dst_ip_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->src_ip_mask) {
+ case UINT32_MAX:
+ filter_info->src_ip_mask = 0;
+ filter_info->src_ip = filter->src_ip;
+ break;
+ case 0:
+ filter_info->src_ip_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid src_ip mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->dst_port_mask) {
+ case UINT16_MAX:
+ filter_info->dst_port_mask = 0;
+ filter_info->dst_port = filter->dst_port;
+ break;
+ case 0:
+ filter_info->dst_port_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid dst_port mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->src_port_mask) {
+ case UINT16_MAX:
+ filter_info->src_port_mask = 0;
+ filter_info->src_port = filter->src_port;
+ break;
+ case 0:
+ filter_info->src_port_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid src_port mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->proto_mask) {
+ case UINT8_MAX:
+ filter_info->proto_mask = 0;
+ filter_info->proto = filter->proto;
+ break;
+ case 0:
+ filter_info->proto_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid protocol mask.");
+ return -EINVAL;
+ }
+
+ filter_info->priority = (uint8_t)filter->priority;
+ if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
+ filter_info->tcp_flags = filter->tcp_flags;
+ else
+ filter_info->tcp_flags = 0;
+
+ return 0;
+}
+
+static inline struct e1000_5tuple_filter *
+igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list *filter_list,
+ struct e1000_5tuple_filter_info *key)
+{
+ struct e1000_5tuple_filter *it;
+
+ TAILQ_FOREACH(it, filter_list, entries) {
+ if (memcmp(key, &it->filter_info,
+ sizeof(struct e1000_5tuple_filter_info)) == 0) {
+ return it;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * igb_add_5tuple_filter_82576 - add a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * ntuple_filter: ponter to the filter that will be added.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_5tuple_filter *filter;
+ uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK;
+ uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP;
+ uint8_t i;
+ int ret;
+
+ filter = rte_zmalloc("e1000_5tuple_filter",
+ sizeof(struct e1000_5tuple_filter), 0);
+ if (filter == NULL)
+ return -ENOMEM;
+
+ ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
+ &filter->filter_info);
+ if (ret < 0) {
+ rte_free(filter);
+ return ret;
+ }
+
+ if (igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
+ &filter->filter_info) != NULL) {
+ PMD_DRV_LOG(ERR, "filter exists.");
+ rte_free(filter);
+ return -EEXIST;
+ }
+ filter->queue = ntuple_filter->queue;
+
+ /*
+ * look for an unused 5tuple filter index,
+ * and insert the filter to list.
+ */
+ for (i = 0; i < E1000_MAX_FTQF_FILTERS; i++) {
+ if (!(filter_info->fivetuple_mask & (1 << i))) {
+ filter_info->fivetuple_mask |= 1 << i;
+ filter->index = i;
+ TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
+ filter,
+ entries);
+ break;
+ }
+ }
+ if (i >= E1000_MAX_FTQF_FILTERS) {
+ PMD_DRV_LOG(ERR, "5tuple filters are full.");
+ rte_free(filter);
+ return -ENOSYS;
+ }
+
+ ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK;
+ if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */
+ ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP;
+ if (filter->filter_info.dst_ip_mask == 0)
+ ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP;
+ if (filter->filter_info.src_port_mask == 0)
+ ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
+ if (filter->filter_info.proto_mask == 0)
+ ftqf &= ~E1000_FTQF_MASK_PROTO_BP;
+ ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) &
+ E1000_FTQF_QUEUE_MASK;
+ ftqf |= E1000_FTQF_QUEUE_ENABLE;
+ E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf);
+ E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip);
+ E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip);
+
+ spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT;
+ E1000_WRITE_REG(hw, E1000_SPQF(i), spqf);
+
+ imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
+ if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
+ imir |= E1000_IMIR_PORT_BP;
+ else
+ imir &= ~E1000_IMIR_PORT_BP;
+ imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
+
+ /* tcp flags bits setting. */
+ if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
+ if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_URG;
+ if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_ACK;
+ if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_PSH;
+ if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_RST;
+ if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_SYN;
+ if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_FIN;
+ } else
+ imir_ext |= E1000_IMIREXT_CTRL_BP;
+ E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
+ E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
+ return 0;
+}
+
+/*
+ * igb_remove_5tuple_filter_82576 - remove a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * ntuple_filter: ponter to the filter that will be removed.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_5tuple_filter_info filter_5tuple;
+ struct e1000_5tuple_filter *filter;
+ int ret;
+
+ memset(&filter_5tuple, 0, sizeof(struct e1000_5tuple_filter_info));
+ ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
+ &filter_5tuple);
+ if (ret < 0)
+ return ret;
+
+ filter = igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
+ &filter_5tuple);
+ if (filter == NULL) {
+ PMD_DRV_LOG(ERR, "filter doesn't exist.");
+ return -ENOENT;
+ }
+
+ filter_info->fivetuple_mask &= ~(1 << filter->index);
+ TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
+ rte_free(filter);
+
+ E1000_WRITE_REG(hw, E1000_FTQF(filter->index),
+ E1000_FTQF_VF_BP | E1000_FTQF_MASK);
+ E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0);
+ E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0);
+ E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0);
+ E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
+ E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
+ return 0;
+}
+
+static int
+eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ uint32_t rctl;
+ struct e1000_hw *hw;
+ struct rte_eth_dev_info dev_info;
+ uint32_t frame_size = mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN +
+ VLAN_TAG_SIZE);
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+#ifdef RTE_LIBRTE_82571_SUPPORT
+ /* XXX: not bigger than max_rx_pktlen */
+ if (hw->mac.type == e1000_82571)
+ return -ENOTSUP;
+#endif
+ eth_igb_infos_get(dev, &dev_info);
+
+ /* check that mtu is within the allowed range */
+ if ((mtu < ETHER_MIN_MTU) ||
+ (frame_size > dev_info.max_rx_pktlen))
+ return -EINVAL;
+
+ /* refuse mtu that requires the support of scattered packets when this
+ * feature has not been enabled before. */
+ if (!dev->data->scattered_rx &&
+ frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
+ return -EINVAL;
+
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+
+ /* switch to jumbo mode if needed */
+ if (frame_size > ETHER_MAX_LEN) {
+ dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ rctl |= E1000_RCTL_LPE;
+ } else {
+ dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ rctl &= ~E1000_RCTL_LPE;
+ }
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+
+ /* update max frame size */
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ E1000_WRITE_REG(hw, E1000_RLPML,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len);
+
+ return 0;
+}
+
+/*
+ * igb_add_del_ntuple_filter - add or delete a ntuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
+ * add: if true, add filter, if false, remove filter
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter,
+ bool add)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ switch (ntuple_filter->flags) {
+ case RTE_5TUPLE_FLAGS:
+ case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
+ if (hw->mac.type != e1000_82576)
+ return -ENOTSUP;
+ if (add)
+ ret = igb_add_5tuple_filter_82576(dev,
+ ntuple_filter);
+ else
+ ret = igb_remove_5tuple_filter_82576(dev,
+ ntuple_filter);
+ break;
+ case RTE_2TUPLE_FLAGS:
+ case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
+ if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350)
+ return -ENOTSUP;
+ if (add)
+ ret = igb_add_2tuple_filter(dev, ntuple_filter);
+ else
+ ret = igb_remove_2tuple_filter(dev, ntuple_filter);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * igb_get_ntuple_filter - get a ntuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+igb_get_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_5tuple_filter_info filter_5tuple;
+ struct e1000_2tuple_filter_info filter_2tuple;
+ struct e1000_5tuple_filter *p_5tuple_filter;
+ struct e1000_2tuple_filter *p_2tuple_filter;
+ int ret;
+
+ switch (ntuple_filter->flags) {
+ case RTE_5TUPLE_FLAGS:
+ case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
+ if (hw->mac.type != e1000_82576)
+ return -ENOTSUP;
+ memset(&filter_5tuple,
+ 0,
+ sizeof(struct e1000_5tuple_filter_info));
+ ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
+ &filter_5tuple);
+ if (ret < 0)
+ return ret;
+ p_5tuple_filter = igb_5tuple_filter_lookup_82576(
+ &filter_info->fivetuple_list,
+ &filter_5tuple);
+ if (p_5tuple_filter == NULL) {
+ PMD_DRV_LOG(ERR, "filter doesn't exist.");
+ return -ENOENT;
+ }
+ ntuple_filter->queue = p_5tuple_filter->queue;
+ break;
+ case RTE_2TUPLE_FLAGS:
+ case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
+ if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350)
+ return -ENOTSUP;
+ memset(&filter_2tuple,
+ 0,
+ sizeof(struct e1000_2tuple_filter_info));
+ ret = ntuple_filter_to_2tuple(ntuple_filter, &filter_2tuple);
+ if (ret < 0)
+ return ret;
+ p_2tuple_filter = igb_2tuple_filter_lookup(
+ &filter_info->twotuple_list,
+ &filter_2tuple);
+ if (p_2tuple_filter == NULL) {
+ PMD_DRV_LOG(ERR, "filter doesn't exist.");
+ return -ENOENT;
+ }
+ ntuple_filter->queue = p_2tuple_filter->queue;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * igb_ntuple_filter_handle - Handle operations for ntuple filter.
+ * @dev: pointer to rte_eth_dev structure
+ * @filter_op:operation will be taken.
+ * @arg: a pointer to specific structure corresponding to the filter_op
+ */
+static int
+igb_ntuple_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL) {
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
+ filter_op);
+ return -EINVAL;
+ }
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = igb_add_del_ntuple_filter(dev,
+ (struct rte_eth_ntuple_filter *)arg,
+ TRUE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = igb_add_del_ntuple_filter(dev,
+ (struct rte_eth_ntuple_filter *)arg,
+ FALSE);
+ break;
+ case RTE_ETH_FILTER_GET:
+ ret = igb_get_ntuple_filter(dev,
+ (struct rte_eth_ntuple_filter *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static inline int
+igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info,
+ uint16_t ethertype)
+{
+ int i;
+
+ for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
+ if (filter_info->ethertype_filters[i] == ethertype &&
+ (filter_info->ethertype_mask & (1 << i)))
+ return i;
+ }
+ return -1;
+}
+
+static inline int
+igb_ethertype_filter_insert(struct e1000_filter_info *filter_info,
+ uint16_t ethertype)
+{
+ int i;
+
+ for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
+ if (!(filter_info->ethertype_mask & (1 << i))) {
+ filter_info->ethertype_mask |= 1 << i;
+ filter_info->ethertype_filters[i] = ethertype;
+ return i;
+ }
+ }
+ return -1;
+}
+
+static inline int
+igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
+ uint8_t idx)
+{
+ if (idx >= E1000_MAX_ETQF_FILTERS)
+ return -1;
+ filter_info->ethertype_mask &= ~(1 << idx);
+ filter_info->ethertype_filters[idx] = 0;
+ return idx;
+}
+
+
+static int
+igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter,
+ bool add)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ uint32_t etqf = 0;
+ int ret;
+
+ if (filter->ether_type == ETHER_TYPE_IPv4 ||
+ filter->ether_type == ETHER_TYPE_IPv6) {
+ PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
+ " ethertype filter.", filter->ether_type);
+ return -EINVAL;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+ PMD_DRV_LOG(ERR, "mac compare is unsupported.");
+ return -EINVAL;
+ }
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+ PMD_DRV_LOG(ERR, "drop option is unsupported.");
+ return -EINVAL;
+ }
+
+ ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
+ if (ret >= 0 && add) {
+ PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
+ filter->ether_type);
+ return -EEXIST;
+ }
+ if (ret < 0 && !add) {
+ PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
+ filter->ether_type);
+ return -ENOENT;
+ }
+
+ if (add) {
+ ret = igb_ethertype_filter_insert(filter_info,
+ filter->ether_type);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "ethertype filters are full.");
+ return -ENOSYS;
+ }
+
+ etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE;
+ etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE);
+ etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT;
+ } else {
+ ret = igb_ethertype_filter_remove(filter_info, (uint8_t)ret);
+ if (ret < 0)
+ return -ENOSYS;
+ }
+ E1000_WRITE_REG(hw, E1000_ETQF(ret), etqf);
+ E1000_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static int
+igb_get_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ uint32_t etqf;
+ int ret;
+
+ ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
+ filter->ether_type);
+ return -ENOENT;
+ }
+
+ etqf = E1000_READ_REG(hw, E1000_ETQF(ret));
+ if (etqf & E1000_ETQF_FILTER_ENABLE) {
+ filter->ether_type = etqf & E1000_ETQF_ETHERTYPE;
+ filter->flags = 0;
+ filter->queue = (etqf & E1000_ETQF_QUEUE) >>
+ E1000_ETQF_QUEUE_SHIFT;
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+/*
+ * igb_ethertype_filter_handle - Handle operations for ethertype filter.
+ * @dev: pointer to rte_eth_dev structure
+ * @filter_op:operation will be taken.
+ * @arg: a pointer to specific structure corresponding to the filter_op
+ */
+static int
+igb_ethertype_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL) {
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
+ filter_op);
+ return -EINVAL;
+ }
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = igb_add_del_ethertype_filter(dev,
+ (struct rte_eth_ethertype_filter *)arg,
+ TRUE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = igb_add_del_ethertype_filter(dev,
+ (struct rte_eth_ethertype_filter *)arg,
+ FALSE);
+ break;
+ case RTE_ETH_FILTER_GET:
+ ret = igb_get_ethertype_filter(dev,
+ (struct rte_eth_ethertype_filter *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int
+eth_igb_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret = -EINVAL;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_NTUPLE:
+ ret = igb_ntuple_filter_handle(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = igb_ethertype_filter_handle(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_SYN:
+ ret = eth_igb_syn_filter_handle(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_FLEXIBLE:
+ ret = eth_igb_flex_filter_handle(dev, filter_op, arg);
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ break;
+ }
+
+ return ret;
+}
+
+static int
+eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ struct e1000_hw *hw;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);
+ return 0;
+}
+
+static uint64_t
+igb_read_systime_cyclecounter(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t systime_cycles;
+
+ switch (hw->mac.type) {
+ case e1000_i210:
+ case e1000_i211:
+ /*
+ * Need to read System Time Residue Register to be able
+ * to read the other two registers.
+ */
+ E1000_READ_REG(hw, E1000_SYSTIMR);
+ /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
+ systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML);
+ systime_cycles += (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH)
+ * NSEC_PER_SEC;
+ break;
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ /*
+ * Need to read System Time Residue Register to be able
+ * to read the other two registers.
+ */
+ E1000_READ_REG(hw, E1000_SYSTIMR);
+ systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML);
+ /* Only the 8 LSB are valid. */
+ systime_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_SYSTIMH)
+ & 0xff) << 32;
+ break;
+ default:
+ systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML);
+ systime_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH)
+ << 32;
+ break;
+ }
+
+ return systime_cycles;
+}
+
+static uint64_t
+igb_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t rx_tstamp_cycles;
+
+ switch (hw->mac.type) {
+ case e1000_i210:
+ case e1000_i211:
+ /* RXSTMPL stores ns and RXSTMPH stores seconds. */
+ rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL);
+ rx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH)
+ * NSEC_PER_SEC;
+ break;
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL);
+ /* Only the 8 LSB are valid. */
+ rx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_RXSTMPH)
+ & 0xff) << 32;
+ break;
+ default:
+ rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL);
+ rx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH)
+ << 32;
+ break;
+ }
+
+ return rx_tstamp_cycles;
+}
+
+static uint64_t
+igb_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t tx_tstamp_cycles;
+
+ switch (hw->mac.type) {
+ case e1000_i210:
+ case e1000_i211:
+ /* RXSTMPL stores ns and RXSTMPH stores seconds. */
+ tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL);
+ tx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH)
+ * NSEC_PER_SEC;
+ break;
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL);
+ /* Only the 8 LSB are valid. */
+ tx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_TXSTMPH)
+ & 0xff) << 32;
+ break;
+ default:
+ tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL);
+ tx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH)
+ << 32;
+ break;
+ }
+
+ return tx_tstamp_cycles;
+}
+
+static void
+igb_start_timecounters(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_adapter *adapter =
+ (struct e1000_adapter *)dev->data->dev_private;
+ uint32_t incval = 1;
+ uint32_t shift = 0;
+ uint64_t mask = E1000_CYCLECOUNTER_MASK;
+
+ switch (hw->mac.type) {
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ /* 32 LSB bits + 8 MSB bits = 40 bits */
+ mask = (1ULL << 40) - 1;
+ /* fall-through */
+ case e1000_i210:
+ case e1000_i211:
+ /*
+ * Start incrementing the register
+ * used to timestamp PTP packets.
+ */
+ E1000_WRITE_REG(hw, E1000_TIMINCA, incval);
+ break;
+ case e1000_82576:
+ incval = E1000_INCVALUE_82576;
+ shift = IGB_82576_TSYNC_SHIFT;
+ E1000_WRITE_REG(hw, E1000_TIMINCA,
+ E1000_INCPERIOD_82576 | incval);
+ break;
+ default:
+ /* Not supported */
+ return;
+ }
+
+ memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
+ memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+ memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+
+ adapter->systime_tc.cc_mask = mask;
+ adapter->systime_tc.cc_shift = shift;
+ adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
+
+ adapter->rx_tstamp_tc.cc_mask = mask;
+ adapter->rx_tstamp_tc.cc_shift = shift;
+ adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+
+ adapter->tx_tstamp_tc.cc_mask = mask;
+ adapter->tx_tstamp_tc.cc_shift = shift;
+ adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+}
+
+static int
+igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
+{
+ struct e1000_adapter *adapter =
+ (struct e1000_adapter *)dev->data->dev_private;
+
+ adapter->systime_tc.nsec += delta;
+ adapter->rx_tstamp_tc.nsec += delta;
+ adapter->tx_tstamp_tc.nsec += delta;
+
+ return 0;
+}
+
+static int
+igb_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
+{
+ uint64_t ns;
+ struct e1000_adapter *adapter =
+ (struct e1000_adapter *)dev->data->dev_private;
+
+ ns = rte_timespec_to_ns(ts);
+
+ /* Set the timecounters to a new value. */
+ adapter->systime_tc.nsec = ns;
+ adapter->rx_tstamp_tc.nsec = ns;
+ adapter->tx_tstamp_tc.nsec = ns;
+
+ return 0;
+}
+
+static int
+igb_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+ uint64_t ns, systime_cycles;
+ struct e1000_adapter *adapter =
+ (struct e1000_adapter *)dev->data->dev_private;
+
+ systime_cycles = igb_read_systime_cyclecounter(dev);
+ ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
+ *ts = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+igb_timesync_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t tsync_ctl;
+ uint32_t tsauxc;
+
+ /* Stop the timesync system time. */
+ E1000_WRITE_REG(hw, E1000_TIMINCA, 0x0);
+ /* Reset the timesync system time value. */
+ switch (hw->mac.type) {
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ case e1000_i210:
+ case e1000_i211:
+ E1000_WRITE_REG(hw, E1000_SYSTIMR, 0x0);
+ /* fall-through */
+ case e1000_82576:
+ E1000_WRITE_REG(hw, E1000_SYSTIML, 0x0);
+ E1000_WRITE_REG(hw, E1000_SYSTIMH, 0x0);
+ break;
+ default:
+ /* Not supported. */
+ return -ENOTSUP;
+ }
+
+ /* Enable system time for it isn't on by default. */
+ tsauxc = E1000_READ_REG(hw, E1000_TSAUXC);
+ tsauxc &= ~E1000_TSAUXC_DISABLE_SYSTIME;
+ E1000_WRITE_REG(hw, E1000_TSAUXC, tsauxc);
+
+ igb_start_timecounters(dev);
+
+ /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
+ E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588),
+ (ETHER_TYPE_1588 |
+ E1000_ETQF_FILTER_ENABLE |
+ E1000_ETQF_1588));
+
+ /* Enable timestamping of received PTP packets. */
+ tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
+ tsync_ctl |= E1000_TSYNCRXCTL_ENABLED;
+ E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
+
+ /* Enable Timestamping of transmitted PTP packets. */
+ tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
+ tsync_ctl |= E1000_TSYNCTXCTL_ENABLED;
+ E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
+
+ return 0;
+}
+
+static int
+igb_timesync_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t tsync_ctl;
+
+ /* Disable timestamping of transmitted PTP packets. */
+ tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
+ tsync_ctl &= ~E1000_TSYNCTXCTL_ENABLED;
+ E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
+
+ /* Disable timestamping of received PTP packets. */
+ tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
+ tsync_ctl &= ~E1000_TSYNCRXCTL_ENABLED;
+ E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
+
+ /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
+ E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 0);
+
+ /* Stop incrementating the System Time registers. */
+ E1000_WRITE_REG(hw, E1000_TIMINCA, 0);
+
+ return 0;
+}
+
+static int
+igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp,
+ uint32_t flags __rte_unused)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_adapter *adapter =
+ (struct e1000_adapter *)dev->data->dev_private;
+ uint32_t tsync_rxctl;
+ uint64_t rx_tstamp_cycles;
+ uint64_t ns;
+
+ tsync_rxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
+ if ((tsync_rxctl & E1000_TSYNCRXCTL_VALID) == 0)
+ return -EINVAL;
+
+ rx_tstamp_cycles = igb_read_rx_tstamp_cyclecounter(dev);
+ ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
+ *timestamp = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_adapter *adapter =
+ (struct e1000_adapter *)dev->data->dev_private;
+ uint32_t tsync_txctl;
+ uint64_t tx_tstamp_cycles;
+ uint64_t ns;
+
+ tsync_txctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
+ if ((tsync_txctl & E1000_TSYNCTXCTL_VALID) == 0)
+ return -EINVAL;
+
+ tx_tstamp_cycles = igb_read_tx_tstamp_cyclecounter(dev);
+ ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
+ *timestamp = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+eth_igb_get_reg_length(struct rte_eth_dev *dev __rte_unused)
+{
+ int count = 0;
+ int g_ind = 0;
+ const struct reg_info *reg_group;
+
+ while ((reg_group = igb_regs[g_ind++]))
+ count += igb_reg_group_count(reg_group);
+
+ return count;
+}
+
+static int
+igbvf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
+{
+ int count = 0;
+ int g_ind = 0;
+ const struct reg_info *reg_group;
+
+ while ((reg_group = igbvf_regs[g_ind++]))
+ count += igb_reg_group_count(reg_group);
+
+ return count;
+}
+
+static int
+eth_igb_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t *data = regs->data;
+ int g_ind = 0;
+ int count = 0;
+ const struct reg_info *reg_group;
+
+ /* Support only full register dump */
+ if ((regs->length == 0) ||
+ (regs->length == (uint32_t)eth_igb_get_reg_length(dev))) {
+ regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
+ hw->device_id;
+ while ((reg_group = igb_regs[g_ind++]))
+ count += igb_read_regs_group(dev, &data[count],
+ reg_group);
+ return 0;
+ }
+
+ return -ENOTSUP;
+}
+
+static int
+igbvf_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t *data = regs->data;
+ int g_ind = 0;
+ int count = 0;
+ const struct reg_info *reg_group;
+
+ /* Support only full register dump */
+ if ((regs->length == 0) ||
+ (regs->length == (uint32_t)igbvf_get_reg_length(dev))) {
+ regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
+ hw->device_id;
+ while ((reg_group = igbvf_regs[g_ind++]))
+ count += igb_read_regs_group(dev, &data[count],
+ reg_group);
+ return 0;
+ }
+
+ return -ENOTSUP;
+}
+
+static int
+eth_igb_get_eeprom_length(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Return unit is byte count */
+ return hw->nvm.word_size * 2;
+}
+
+static int
+eth_igb_get_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *in_eeprom)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ uint16_t *data = in_eeprom->data;
+ int first, length;
+
+ first = in_eeprom->offset >> 1;
+ length = in_eeprom->length >> 1;
+ if ((first >= hw->nvm.word_size) ||
+ ((first + length) >= hw->nvm.word_size))
+ return -EINVAL;
+
+ in_eeprom->magic = hw->vendor_id |
+ ((uint32_t)hw->device_id << 16);
+
+ if ((nvm->ops.read) == NULL)
+ return -ENOTSUP;
+
+ return nvm->ops.read(hw, first, length, data);
+}
+
+static int
+eth_igb_set_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *in_eeprom)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ uint16_t *data = in_eeprom->data;
+ int first, length;
+
+ first = in_eeprom->offset >> 1;
+ length = in_eeprom->length >> 1;
+ if ((first >= hw->nvm.word_size) ||
+ ((first + length) >= hw->nvm.word_size))
+ return -EINVAL;
+
+ in_eeprom->magic = (uint32_t)hw->vendor_id |
+ ((uint32_t)hw->device_id << 16);
+
+ if ((nvm->ops.write) == NULL)
+ return -ENOTSUP;
+ return nvm->ops.write(hw, first, length, data);
+}
+
+static struct rte_driver pmd_igb_drv = {
+ .type = PMD_PDEV,
+ .init = rte_igb_pmd_init,
+};
+
+static struct rte_driver pmd_igbvf_drv = {
+ .type = PMD_PDEV,
+ .init = rte_igbvf_pmd_init,
+};
+
+static int
+eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t mask = 1 << queue_id;
+
+ E1000_WRITE_REG(hw, E1000_EIMC, mask);
+ E1000_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static int
+eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t mask = 1 << queue_id;
+ uint32_t regval;
+
+ regval = E1000_READ_REG(hw, E1000_EIMS);
+ E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
+ E1000_WRITE_FLUSH(hw);
+
+ rte_intr_enable(&dev->pci_dev->intr_handle);
+
+ return 0;
+}
+
+static void
+eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
+ uint8_t index, uint8_t offset)
+{
+ uint32_t val = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
+
+ /* clear bits */
+ val &= ~((uint32_t)0xFF << offset);
+
+ /* write vector and valid bit */
+ val |= (msix_vector | E1000_IVAR_VALID) << offset;
+
+ E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, val);
+}
+
+static void
+eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
+ uint8_t queue, uint8_t msix_vector)
+{
+ uint32_t tmp = 0;
+
+ if (hw->mac.type == e1000_82575) {
+ if (direction == 0)
+ tmp = E1000_EICR_RX_QUEUE0 << queue;
+ else if (direction == 1)
+ tmp = E1000_EICR_TX_QUEUE0 << queue;
+ E1000_WRITE_REG(hw, E1000_MSIXBM(msix_vector), tmp);
+ } else if (hw->mac.type == e1000_82576) {
+ if ((direction == 0) || (direction == 1))
+ eth_igb_write_ivar(hw, msix_vector, queue & 0x7,
+ ((queue & 0x8) << 1) +
+ 8 * direction);
+ } else if ((hw->mac.type == e1000_82580) ||
+ (hw->mac.type == e1000_i350) ||
+ (hw->mac.type == e1000_i354) ||
+ (hw->mac.type == e1000_i210) ||
+ (hw->mac.type == e1000_i211)) {
+ if ((direction == 0) || (direction == 1))
+ eth_igb_write_ivar(hw, msix_vector,
+ queue >> 1,
+ ((queue & 0x1) << 4) +
+ 8 * direction);
+ }
+}
+
+/* Sets up the hardware to generate MSI-X interrupts properly
+ * @hw
+ * board private structure
+ */
+static void
+eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
+{
+ int queue_id;
+ uint32_t tmpval, regval, intr_mask;
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t vec = E1000_MISC_VEC_ID;
+ uint32_t base = E1000_MISC_VEC_ID;
+ uint32_t misc_shift = 0;
+
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+
+ /* won't configure msix register if no mapping is done
+ * between intr vector and event fd
+ */
+ if (!rte_intr_dp_is_en(intr_handle))
+ return;
+
+ if (rte_intr_allow_others(intr_handle)) {
+ vec = base = E1000_RX_VEC_START;
+ misc_shift = 1;
+ }
+
+ /* set interrupt vector for other causes */
+ if (hw->mac.type == e1000_82575) {
+ tmpval = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ /* enable MSI-X PBA support */
+ tmpval |= E1000_CTRL_EXT_PBA_CLR;
+
+ /* Auto-Mask interrupts upon ICR read */
+ tmpval |= E1000_CTRL_EXT_EIAME;
+ tmpval |= E1000_CTRL_EXT_IRCA;
+
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmpval);
+
+ /* enable msix_other interrupt */
+ E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), 0, E1000_EIMS_OTHER);
+ regval = E1000_READ_REG(hw, E1000_EIAC);
+ E1000_WRITE_REG(hw, E1000_EIAC, regval | E1000_EIMS_OTHER);
+ regval = E1000_READ_REG(hw, E1000_EIAM);
+ E1000_WRITE_REG(hw, E1000_EIMS, regval | E1000_EIMS_OTHER);
+ } else if ((hw->mac.type == e1000_82576) ||
+ (hw->mac.type == e1000_82580) ||
+ (hw->mac.type == e1000_i350) ||
+ (hw->mac.type == e1000_i354) ||
+ (hw->mac.type == e1000_i210) ||
+ (hw->mac.type == e1000_i211)) {
+ /* turn on MSI-X capability first */
+ E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE |
+ E1000_GPIE_PBA | E1000_GPIE_EIAME |
+ E1000_GPIE_NSICR);
+ intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) <<
+ misc_shift;
+ regval = E1000_READ_REG(hw, E1000_EIAC);
+ E1000_WRITE_REG(hw, E1000_EIAC, regval | intr_mask);
+
+ /* enable msix_other interrupt */
+ regval = E1000_READ_REG(hw, E1000_EIMS);
+ E1000_WRITE_REG(hw, E1000_EIMS, regval | intr_mask);
+ tmpval = (dev->data->nb_rx_queues | E1000_IVAR_VALID) << 8;
+ E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmpval);
+ }
+
+ /* use EIAM to auto-mask when MSI-X interrupt
+ * is asserted, this saves a register write for every interrupt
+ */
+ intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) <<
+ misc_shift;
+ regval = E1000_READ_REG(hw, E1000_EIAM);
+ E1000_WRITE_REG(hw, E1000_EIAM, regval | intr_mask);
+
+ for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) {
+ eth_igb_assign_msix_vector(hw, 0, queue_id, vec);
+ intr_handle->intr_vec[queue_id] = vec;
+ if (vec < base + intr_handle->nb_efd - 1)
+ vec++;
+ }
+
+ E1000_WRITE_FLUSH(hw);
+}
+
+PMD_REGISTER_DRIVER(pmd_igb_drv);
+PMD_REGISTER_DRIVER(pmd_igbvf_drv);
diff --git a/src/dpdk_lib18/librte_pmd_e1000/igb_pf.c b/src/dpdk22/drivers/net/e1000/igb_pf.c
index bc3816a7..1d00ddae 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/igb_pf.c
+++ b/src/dpdk22/drivers/net/e1000/igb_pf.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -49,9 +49,9 @@
#include <rte_malloc.h>
#include <rte_random.h>
-#include "e1000/e1000_defines.h"
-#include "e1000/e1000_regs.h"
-#include "e1000/e1000_hw.h"
+#include "base/e1000_defines.h"
+#include "base/e1000_regs.h"
+#include "base/e1000_hw.h"
#include "e1000_ethdev.h"
static inline uint16_t
@@ -127,6 +127,28 @@ void igb_pf_host_init(struct rte_eth_dev *eth_dev)
return;
}
+void igb_pf_host_uninit(struct rte_eth_dev *dev)
+{
+ struct e1000_vf_info **vfinfo;
+ uint16_t vf_num;
+
+ PMD_INIT_FUNC_TRACE();
+
+ vfinfo = E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+
+ RTE_ETH_DEV_SRIOV(dev).active = 0;
+ RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 0;
+ RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx = 0;
+ RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = 0;
+
+ vf_num = dev_num_vf(dev);
+ if (vf_num == 0)
+ return;
+
+ rte_free(*vfinfo);
+ *vfinfo = NULL;
+}
+
#define E1000_RAH_POOLSEL_SHIFT (18)
int igb_pf_host_configure(struct rte_eth_dev *eth_dev)
{
@@ -207,7 +229,7 @@ set_rx_mode(struct rte_eth_dev *dev)
/* set all bits that we expect to always be set */
fctrl &= ~E1000_RCTL_SBP; /* disable store-bad-packets */
- fctrl |= E1000_RCTL_BAM;;
+ fctrl |= E1000_RCTL_BAM;
/* clear the bits we are changing the status of */
fctrl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
@@ -395,6 +417,31 @@ igb_vf_set_vlan(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
}
static int
+igb_vf_set_rlpml(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t rlpml = msgbuf[1] & E1000_VMOLR_RLPML_MASK;
+ uint32_t max_frame = rlpml + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ uint32_t vmolr;
+
+ if ((max_frame < ETHER_MIN_LEN) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
+ return -1;
+
+ vmolr = E1000_READ_REG(hw, E1000_VMOLR(vf));
+
+ vmolr &= ~E1000_VMOLR_RLPML_MASK;
+ vmolr |= rlpml;
+
+ /* Enable Long Packet support */
+ vmolr |= E1000_VMOLR_LPE;
+
+ E1000_WRITE_REG(hw, E1000_VMOLR(vf), vmolr);
+ E1000_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static int
igb_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
{
uint16_t mbx_size = E1000_VFMAILBOX_SIZE;
@@ -428,6 +475,9 @@ igb_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
case E1000_VF_SET_MULTICAST:
retval = igb_vf_set_multicast(dev, vf, msgbuf);
break;
+ case E1000_VF_SET_LPE:
+ retval = igb_vf_set_rlpml(dev, vf, msgbuf);
+ break;
case E1000_VF_SET_VLAN:
retval = igb_vf_set_vlan(dev, vf, msgbuf);
break;
diff --git a/src/dpdk22/drivers/net/e1000/igb_regs.h b/src/dpdk22/drivers/net/e1000/igb_regs.h
new file mode 100644
index 00000000..0b5e5e58
--- /dev/null
+++ b/src/dpdk22/drivers/net/e1000/igb_regs.h
@@ -0,0 +1,223 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _IGB_REGS_H_
+#define _IGB_REGS_H_
+
+#include "e1000_ethdev.h"
+
+struct reg_info {
+ uint32_t base_addr;
+ uint32_t count;
+ uint32_t stride;
+ const char *name;
+};
+
+static const struct reg_info igb_regs_general[] = {
+ {E1000_CTRL, 1, 1, "E1000_CTRL"},
+ {E1000_STATUS, 1, 1, "E1000_STATUS"},
+ {E1000_CTRL_EXT, 1, 1, "E1000_CTRL_EXT"},
+ {E1000_MDIC, 1, 1, "E1000_MDIC"},
+ {E1000_SCTL, 1, 1, "E1000_SCTL"},
+ {E1000_CONNSW, 1, 1, "E1000_CONNSW"},
+ {E1000_VET, 1, 1, "E1000_VET"},
+ {E1000_LEDCTL, 1, 1, "E1000_LEDCTL"},
+ {E1000_PBA, 1, 1, "E1000_PBA"},
+ {E1000_PBS, 1, 1, "E1000_PBS"},
+ {E1000_FRTIMER, 1, 1, "E1000_FRTIMER"},
+ {E1000_TCPTIMER, 1, 1, "E1000_TCPTIMER"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info igb_regs_nvm[] = {
+ {E1000_EECD, 1, 1, "E1000_EECD"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info igb_regs_interrupt[] = {
+ {E1000_EICS, 1, 1, "E1000_EICS"},
+ {E1000_EIMS, 1, 1, "E1000_EIMS"},
+ {E1000_EIMC, 1, 1, "E1000_EIMC"},
+ {E1000_EIAC, 1, 1, "E1000_EIAC"},
+ {E1000_EIAM, 1, 1, "E1000_EIAM"},
+ {E1000_ICS, 1, 1, "E1000_ICS"},
+ {E1000_IMS, 1, 1, "E1000_IMS"},
+ {E1000_IMC, 1, 1, "E1000_IMC"},
+ {E1000_IAC, 1, 1, "E1000_IAC"},
+ {E1000_IAM, 1, 1, "E1000_IAM"},
+ {E1000_IMIRVP, 1, 1, "E1000_IMIRVP"},
+ {E1000_EITR(0), 10, 4, "E1000_EITR"},
+ {E1000_IMIR(0), 8, 4, "E1000_IMIR"},
+ {E1000_IMIREXT(0), 8, 4, "E1000_IMIREXT"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info igb_regs_fctl[] = {
+ {E1000_FCAL, 1, 1, "E1000_FCAL"},
+ {E1000_FCAH, 1, 1, "E1000_FCAH"},
+ {E1000_FCTTV, 1, 1, "E1000_FCTTV"},
+ {E1000_FCRTL, 1, 1, "E1000_FCRTL"},
+ {E1000_FCRTH, 1, 1, "E1000_FCRTH"},
+ {E1000_FCRTV, 1, 1, "E1000_FCRTV"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info igb_regs_rxdma[] = {
+ {E1000_RDBAL(0), 4, 0x100, "E1000_RDBAL"},
+ {E1000_RDBAH(0), 4, 0x100, "E1000_RDBAH"},
+ {E1000_RDLEN(0), 4, 0x100, "E1000_RDLEN"},
+ {E1000_RDH(0), 4, 0x100, "E1000_RDH"},
+ {E1000_RDT(0), 4, 0x100, "E1000_RDT"},
+ {E1000_RXCTL(0), 4, 0x100, "E1000_RXCTL"},
+ {E1000_SRRCTL(0), 4, 0x100, "E1000_SRRCTL"},
+ {E1000_DCA_RXCTRL(0), 4, 0x100, "E1000_DCA_RXCTRL"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info igb_regs_rx[] = {
+ {E1000_RCTL, 1, 1, "E1000_RCTL"},
+ {E1000_RXCSUM, 1, 1, "E1000_RXCSUM"},
+ {E1000_RLPML, 1, 1, "E1000_RLPML"},
+ {E1000_RFCTL, 1, 1, "E1000_RFCTL"},
+ {E1000_MRQC, 1, 1, "E1000_MRQC"},
+ {E1000_VT_CTL, 1, 1, "E1000_VT_CTL"},
+ {E1000_RAL(0), 16, 8, "E1000_RAL"},
+ {E1000_RAH(0), 16, 8, "E1000_RAH"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info igb_regs_tx[] = {
+ {E1000_TCTL, 1, 1, "E1000_TCTL"},
+ {E1000_TCTL_EXT, 1, 1, "E1000_TCTL_EXT"},
+ {E1000_TIPG, 1, 1, "E1000_TIPG"},
+ {E1000_DTXCTL, 1, 1, "E1000_DTXCTL"},
+ {E1000_TDBAL(0), 4, 0x100, "E1000_TDBAL"},
+ {E1000_TDBAH(0), 4, 0x100, "E1000_TDBAH"},
+ {E1000_TDLEN(0), 4, 0x100, "E1000_TDLEN"},
+ {E1000_TDH(0), 4, 0x100, "E1000_TDLEN"},
+ {E1000_TDT(0), 4, 0x100, "E1000_TDT"},
+ {E1000_TXDCTL(0), 4, 0x100, "E1000_TXDCTL"},
+ {E1000_TDWBAL(0), 4, 0x100, "E1000_TDWBAL"},
+ {E1000_TDWBAH(0), 4, 0x100, "E1000_TDWBAH"},
+ {E1000_DCA_TXCTRL(0), 4, 0x100, "E1000_DCA_TXCTRL"},
+ {E1000_TDFH, 1, 1, "E1000_TDFH"},
+ {E1000_TDFT, 1, 1, "E1000_TDFT"},
+ {E1000_TDFHS, 1, 1, "E1000_TDFHS"},
+ {E1000_TDFPC, 1, 1, "E1000_TDFPC"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info igb_regs_wakeup[] = {
+ {E1000_WUC, 1, 1, "E1000_WUC"},
+ {E1000_WUFC, 1, 1, "E1000_WUFC"},
+ {E1000_WUS, 1, 1, "E1000_WUS"},
+ {E1000_IPAV, 1, 1, "E1000_IPAV"},
+ {E1000_WUPL, 1, 1, "E1000_WUPL"},
+ {E1000_IP4AT_REG(0), 4, 8, "E1000_IP4AT_REG"},
+ {E1000_IP6AT_REG(0), 4, 4, "E1000_IP6AT_REG"},
+ {E1000_WUPM_REG(0), 4, 4, "E1000_WUPM_REG"},
+ {E1000_FFMT_REG(0), 4, 8, "E1000_FFMT_REG"},
+ {E1000_FFVT_REG(0), 4, 8, "E1000_FFVT_REG"},
+ {E1000_FFLT_REG(0), 4, 8, "E1000_FFLT_REG"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info igb_regs_mac[] = {
+ {E1000_PCS_CFG0, 1, 1, "E1000_PCS_CFG0"},
+ {E1000_PCS_LCTL, 1, 1, "E1000_PCS_LCTL"},
+ {E1000_PCS_LSTAT, 1, 1, "E1000_PCS_LSTAT"},
+ {E1000_PCS_ANADV, 1, 1, "E1000_PCS_ANADV"},
+ {E1000_PCS_LPAB, 1, 1, "E1000_PCS_LPAB"},
+ {E1000_PCS_NPTX, 1, 1, "E1000_PCS_NPTX"},
+ {E1000_PCS_LPABNP, 1, 1, "E1000_PCS_LPABNP"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info *igb_regs[] = {
+ igb_regs_general,
+ igb_regs_nvm,
+ igb_regs_interrupt,
+ igb_regs_fctl,
+ igb_regs_rxdma,
+ igb_regs_rx,
+ igb_regs_tx,
+ igb_regs_wakeup,
+ igb_regs_mac,
+ NULL};
+
+/* FIXME: reading igb_regs_interrupt results side-effect which doesn't
+ * work with VFIO; re-install igb_regs_interrupt once issue is resolved.
+ */
+static const struct reg_info *igbvf_regs[] = {
+ igb_regs_general,
+ igb_regs_rxdma,
+ igb_regs_tx,
+ NULL};
+
+static inline int
+igb_read_regs(struct e1000_hw *hw, const struct reg_info *reg,
+ uint32_t *reg_buf)
+{
+ unsigned int i;
+
+ for (i = 0; i < reg->count; i++) {
+ reg_buf[i] = E1000_READ_REG(hw,
+ reg->base_addr + i * reg->stride);
+ }
+ return reg->count;
+};
+
+static inline int
+igb_reg_group_count(const struct reg_info *regs)
+{
+ int count = 0;
+ int i = 0;
+
+ while (regs[i].count)
+ count += regs[i++].count;
+ return count;
+};
+
+static inline int
+igb_read_regs_group(struct rte_eth_dev *dev, uint32_t *reg_buf,
+ const struct reg_info *regs)
+{
+ int count = 0;
+ int i = 0;
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ while (regs[i].count)
+ count += igb_read_regs(hw, &regs[i++], &reg_buf[count]);
+ return count;
+};
+
+#endif /* _IGB_REGS_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_e1000/igb_rxtx.c b/src/dpdk22/drivers/net/e1000/igb_rxtx.c
index 5c394a98..996e7da4 100755..100644
--- a/src/dpdk_lib18/librte_pmd_e1000/igb_rxtx.c
+++ b/src/dpdk22/drivers/net/e1000/igb_rxtx.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -51,7 +51,6 @@
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_launch.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
@@ -70,25 +69,15 @@
#include <rte_string_fns.h>
#include "e1000_logs.h"
-#include "e1000/e1000_api.h"
+#include "base/e1000_api.h"
#include "e1000_ethdev.h"
-#define IGB_RSS_OFFLOAD_ALL ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_IPV4_TCP | \
- ETH_RSS_IPV6 | \
- ETH_RSS_IPV6_EX | \
- ETH_RSS_IPV6_TCP | \
- ETH_RSS_IPV6_TCP_EX | \
- ETH_RSS_IPV4_UDP | \
- ETH_RSS_IPV6_UDP | \
- ETH_RSS_IPV6_UDP_EX)
-
/* Bit Mask to indicate what bits required for building TX context */
#define IGB_TX_OFFLOAD_MASK ( \
PKT_TX_VLAN_PKT | \
PKT_TX_IP_CKSUM | \
- PKT_TX_L4_MASK)
+ PKT_TX_L4_MASK | \
+ PKT_TX_TCP_SEG)
static inline struct rte_mbuf *
rte_rxmbuf_alloc(struct rte_mempool *mp)
@@ -158,32 +147,40 @@ enum igb_advctx_num {
};
/** Offload features */
-union igb_vlan_macip {
- uint32_t data;
+union igb_tx_offload {
+ uint64_t data;
struct {
- uint16_t l2_l3_len; /**< 7bit L2 and 9b L3 lengths combined */
- uint16_t vlan_tci;
- /**< VLAN Tag Control Identifier (CPU order). */
- } f;
+ uint64_t l3_len:9; /**< L3 (IP) Header Length. */
+ uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
+ uint64_t vlan_tci:16; /**< VLAN Tag Control Identifier(CPU order). */
+ uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
+ uint64_t tso_segsz:16; /**< TCP TSO segment size. */
+
+ /* uint64_t unused:8; */
+ };
};
/*
- * Compare mask for vlan_macip_len.data,
- * should be in sync with igb_vlan_macip.f layout.
+ * Compare mask for igb_tx_offload.data,
+ * should be in sync with igb_tx_offload layout.
* */
-#define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
-#define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
-#define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
-/** MAC+IP length. */
-#define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
+#define TX_MACIP_LEN_CMP_MASK 0x000000000000FFFFULL /**< L2L3 header mask. */
+#define TX_VLAN_CMP_MASK 0x00000000FFFF0000ULL /**< Vlan mask. */
+#define TX_TCP_LEN_CMP_MASK 0x000000FF00000000ULL /**< TCP header mask. */
+#define TX_TSO_MSS_CMP_MASK 0x00FFFF0000000000ULL /**< TSO segsz mask. */
+/** Mac + IP + TCP + Mss mask. */
+#define TX_TSO_CMP_MASK \
+ (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
/**
* Strucutre to check if new context need be built
*/
struct igb_advctx_info {
uint64_t flags; /**< ol_flags related to context build. */
- uint32_t cmp_mask; /**< compare mask for vlan_macip_lens */
- union igb_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
+ /** tx offload: vlan, tso, l2-l3-l4 lengths. */
+ union igb_tx_offload tx_offload;
+ /** compare mask for tx offload. */
+ union igb_tx_offload tx_offload_mask;
};
/**
@@ -233,6 +230,8 @@ struct igb_tx_queue {
* Macro for VMDq feature for 1 GbE NIC.
*/
#define E1000_VMOLR_SIZE (8)
+#define IGB_TSO_MAX_HDRLEN (512)
+#define IGB_TSO_MAX_MSS (9216)
/*********************************************************************
*
@@ -241,6 +240,23 @@ struct igb_tx_queue {
**********************************************************************/
/*
+ *There're some limitations in hardware for TCP segmentation offload. We
+ *should check whether the parameters are valid.
+ */
+static inline uint64_t
+check_tso_para(uint64_t ol_req, union igb_tx_offload ol_para)
+{
+ if (!(ol_req & PKT_TX_TCP_SEG))
+ return ol_req;
+ if ((ol_para.tso_segsz > IGB_TSO_MAX_MSS) || (ol_para.l2_len +
+ ol_para.l3_len + ol_para.l4_len > IGB_TSO_MAX_HDRLEN)) {
+ ol_req &= ~PKT_TX_TCP_SEG;
+ ol_req |= PKT_TX_TCP_CKSUM;
+ }
+ return ol_req;
+}
+
+/*
* Advanced context descriptor are almost same between igb/ixgbe
* This is a separate function, looking for optimization opportunity here
* Rework required to go with the pre-defined values.
@@ -249,64 +265,81 @@ struct igb_tx_queue {
static inline void
igbe_set_xmit_ctx(struct igb_tx_queue* txq,
volatile struct e1000_adv_tx_context_desc *ctx_txd,
- uint64_t ol_flags, uint32_t vlan_macip_lens)
+ uint64_t ol_flags, union igb_tx_offload tx_offload)
{
uint32_t type_tucmd_mlhl;
uint32_t mss_l4len_idx;
uint32_t ctx_idx, ctx_curr;
- uint32_t cmp_mask;
+ uint32_t vlan_macip_lens;
+ union igb_tx_offload tx_offload_mask;
ctx_curr = txq->ctx_curr;
ctx_idx = ctx_curr + txq->ctx_start;
- cmp_mask = 0;
+ tx_offload_mask.data = 0;
type_tucmd_mlhl = 0;
- if (ol_flags & PKT_TX_VLAN_PKT) {
- cmp_mask |= TX_VLAN_CMP_MASK;
- }
-
- if (ol_flags & PKT_TX_IP_CKSUM) {
- type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
- cmp_mask |= TX_MACIP_LEN_CMP_MASK;
- }
-
/* Specify which HW CTX to upload. */
mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
- switch (ol_flags & PKT_TX_L4_MASK) {
- case PKT_TX_UDP_CKSUM:
- type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
+
+ if (ol_flags & PKT_TX_VLAN_PKT)
+ tx_offload_mask.data |= TX_VLAN_CMP_MASK;
+
+ /* check if TCP segmentation required for this packet */
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ /* implies IP cksum in IPv4 */
+ if (ol_flags & PKT_TX_IP_CKSUM)
+ type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4 |
+ E1000_ADVTXD_TUCMD_L4T_TCP |
E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
- mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
- cmp_mask |= TX_MACIP_LEN_CMP_MASK;
- break;
- case PKT_TX_TCP_CKSUM:
- type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
+ else
+ type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV6 |
+ E1000_ADVTXD_TUCMD_L4T_TCP |
E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
- mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
- cmp_mask |= TX_MACIP_LEN_CMP_MASK;
- break;
- case PKT_TX_SCTP_CKSUM:
- type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
+
+ tx_offload_mask.data |= TX_TSO_CMP_MASK;
+ mss_l4len_idx |= tx_offload.tso_segsz << E1000_ADVTXD_MSS_SHIFT;
+ mss_l4len_idx |= tx_offload.l4_len << E1000_ADVTXD_L4LEN_SHIFT;
+ } else { /* no TSO, check if hardware checksum is needed */
+ if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
+ tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;
+
+ if (ol_flags & PKT_TX_IP_CKSUM)
+ type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
+
+ switch (ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_UDP_CKSUM:
+ type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
- mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
- cmp_mask |= TX_MACIP_LEN_CMP_MASK;
- break;
- default:
- type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
+ mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
+ break;
+ case PKT_TX_TCP_CKSUM:
+ type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
+ E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
+ mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
+ break;
+ case PKT_TX_SCTP_CKSUM:
+ type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
+ E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
+ mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
+ break;
+ default:
+ type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
- break;
+ break;
+ }
}
- txq->ctx_cache[ctx_curr].flags = ol_flags;
- txq->ctx_cache[ctx_curr].cmp_mask = cmp_mask;
- txq->ctx_cache[ctx_curr].vlan_macip_lens.data =
- vlan_macip_lens & cmp_mask;
+ txq->ctx_cache[ctx_curr].flags = ol_flags;
+ txq->ctx_cache[ctx_idx].tx_offload.data =
+ tx_offload_mask.data & tx_offload.data;
+ txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
+ vlan_macip_lens = (uint32_t)tx_offload.data;
ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
- ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
- ctx_txd->seqnum_seed = 0;
+ ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
+ ctx_txd->seqnum_seed = 0;
}
/*
@@ -315,20 +348,20 @@ igbe_set_xmit_ctx(struct igb_tx_queue* txq,
*/
static inline uint32_t
what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
- uint32_t vlan_macip_lens)
+ union igb_tx_offload tx_offload)
{
/* If match with the current context */
if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
- (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
- (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
return txq->ctx_curr;
}
/* If match with the second context */
txq->ctx_curr ^= 1;
if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
- (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
- (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
return txq->ctx_curr;
}
@@ -345,14 +378,19 @@ tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
+ tmp |= l4_olinfo[(ol_flags & PKT_TX_TCP_SEG) != 0];
return tmp;
}
static inline uint32_t
tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
{
+ uint32_t cmdtype;
static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
- return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
+ static uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE};
+ cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
+ cmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0];
+ return cmdtype;
}
uint16_t
@@ -366,14 +404,6 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
volatile union e1000_adv_tx_desc *txd;
struct rte_mbuf *tx_pkt;
struct rte_mbuf *m_seg;
- union igb_vlan_macip vlan_macip_lens;
- union {
- uint16_t u16;
- struct {
- uint16_t l3_len:9;
- uint16_t l2_len:7;
- };
- } l2_l3_len;
uint64_t buf_dma_addr;
uint32_t olinfo_status;
uint32_t cmd_type_len;
@@ -387,6 +417,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint64_t tx_ol_req;
uint32_t new_ctx = 0;
uint32_t ctx = 0;
+ union igb_tx_offload tx_offload = {0};
txq = tx_queue;
sw_ring = txq->sw_ring;
@@ -411,16 +442,18 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
ol_flags = tx_pkt->ol_flags;
- l2_l3_len.l2_len = tx_pkt->l2_len;
- l2_l3_len.l3_len = tx_pkt->l3_len;
- vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
- vlan_macip_lens.f.l2_l3_len = l2_l3_len.u16;
tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
/* If a Context Descriptor need be built . */
if (tx_ol_req) {
- ctx = what_advctx_update(txq, tx_ol_req,
- vlan_macip_lens.data);
+ tx_offload.l2_len = tx_pkt->l2_len;
+ tx_offload.l3_len = tx_pkt->l3_len;
+ tx_offload.l4_len = tx_pkt->l4_len;
+ tx_offload.vlan_tci = tx_pkt->vlan_tci;
+ tx_offload.tso_segsz = tx_pkt->tso_segsz;
+ tx_ol_req = check_tso_para(tx_ol_req, tx_offload);
+
+ ctx = what_advctx_update(txq, tx_ol_req, tx_offload);
/* Only allocate context descriptor if required*/
new_ctx = (ctx == IGB_CTX_NUM);
ctx = txq->ctx_curr;
@@ -512,6 +545,8 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
*/
cmd_type_len = txq->txd_type |
E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
+ if (tx_ol_req & PKT_TX_TCP_SEG)
+ pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len);
olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
#if defined(RTE_LIBRTE_IEEE1588)
if (ol_flags & PKT_TX_IEEE1588_TMST)
@@ -535,8 +570,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
txe->mbuf = NULL;
}
- igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
- vlan_macip_lens.data);
+ igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, tx_offload);
txe->last_id = tx_last;
tx_id = txe->next_id;
@@ -544,8 +578,8 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
}
/* Setup the TX Advanced Data Descriptor */
- cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
- olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
+ cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(tx_ol_req);
+ olinfo_status |= tx_desc_cksum_flags_to_olinfo(tx_ol_req);
olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
}
@@ -602,17 +636,87 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* RX functions
*
**********************************************************************/
-static inline uint64_t
-rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
+#define IGB_PACKET_TYPE_IPV4 0X01
+#define IGB_PACKET_TYPE_IPV4_TCP 0X11
+#define IGB_PACKET_TYPE_IPV4_UDP 0X21
+#define IGB_PACKET_TYPE_IPV4_SCTP 0X41
+#define IGB_PACKET_TYPE_IPV4_EXT 0X03
+#define IGB_PACKET_TYPE_IPV4_EXT_SCTP 0X43
+#define IGB_PACKET_TYPE_IPV6 0X04
+#define IGB_PACKET_TYPE_IPV6_TCP 0X14
+#define IGB_PACKET_TYPE_IPV6_UDP 0X24
+#define IGB_PACKET_TYPE_IPV6_EXT 0X0C
+#define IGB_PACKET_TYPE_IPV6_EXT_TCP 0X1C
+#define IGB_PACKET_TYPE_IPV6_EXT_UDP 0X2C
+#define IGB_PACKET_TYPE_IPV4_IPV6 0X05
+#define IGB_PACKET_TYPE_IPV4_IPV6_TCP 0X15
+#define IGB_PACKET_TYPE_IPV4_IPV6_UDP 0X25
+#define IGB_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
+#define IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
+#define IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
+#define IGB_PACKET_TYPE_MAX 0X80
+#define IGB_PACKET_TYPE_MASK 0X7F
+#define IGB_PACKET_TYPE_SHIFT 0X04
+static inline uint32_t
+igb_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
{
- uint64_t pkt_flags;
-
- static uint64_t ip_pkt_types_map[16] = {
- 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
- PKT_RX_IPV6_HDR, 0, 0, 0,
- PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
- PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
+ static const uint32_t
+ ptype_table[IGB_PACKET_TYPE_MAX] __rte_cache_aligned = {
+ [IGB_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4,
+ [IGB_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT,
+ [IGB_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6,
+ [IGB_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6,
+ [IGB_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT,
+ [IGB_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ [IGB_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+ [IGB_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+ [IGB_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
+ [IGB_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
+ [IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
+ [IGB_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+ [IGB_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+ [IGB_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
+ [IGB_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
+ [IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
+ [IGB_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
+ [IGB_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
};
+ if (unlikely(pkt_info & E1000_RXDADV_PKTTYPE_ETQF))
+ return RTE_PTYPE_UNKNOWN;
+
+ pkt_info = (pkt_info >> IGB_PACKET_TYPE_SHIFT) & IGB_PACKET_TYPE_MASK;
+
+ return ptype_table[pkt_info];
+}
+
+static inline uint64_t
+rx_desc_hlen_type_rss_to_pkt_flags(struct igb_rx_queue *rxq, uint32_t hl_tp_rs)
+{
+ uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ? 0 : PKT_RX_RSS_HASH;
#if defined(RTE_LIBRTE_IEEE1588)
static uint32_t ip_pkt_etqf_map[8] = {
@@ -620,14 +724,19 @@ rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
0, 0, 0, 0,
};
- pkt_flags = (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
- ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
- ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
+ struct rte_eth_dev dev = rte_eth_devices[rxq->port_id];
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev.data->dev_private);
+
+ /* EtherType is in bits 8:10 in Packet Type, and not in the default 0:2 */
+ if (hw->mac.type == e1000_i210)
+ pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 12) & 0x07];
+ else
+ pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07];
#else
- pkt_flags = (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
- ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
+ RTE_SET_USED(rxq);
#endif
- return pkt_flags | (((hl_tp_rs & 0x0F) == 0) ? 0 : PKT_RX_RSS_HASH);
+
+ return pkt_flags;
}
static inline uint64_t
@@ -767,7 +876,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxe->mbuf = nmb;
dma_addr =
rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
- rxdp->read.hdr_addr = dma_addr;
+ rxdp->read.hdr_addr = 0;
rxdp->read.pkt_addr = dma_addr;
/*
@@ -798,10 +907,12 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
- pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
+ pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
rxm->ol_flags = pkt_flags;
+ rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower.
+ lo_dword.hs_rss.pkt_info);
/*
* Store the mbuf address into the next entry of the array
@@ -952,7 +1063,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxe->mbuf = nmb;
dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
rxdp->read.pkt_addr = dma;
- rxdp->read.hdr_addr = dma;
+ rxdp->read.hdr_addr = 0;
/*
* Set data length & data buffer address of mbuf.
@@ -1032,10 +1143,12 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
*/
first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
- pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
+ pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
first_seg->ol_flags = pkt_flags;
+ first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.
+ lower.lo_dword.hs_rss.pkt_info);
/* Prefetch data of first segment, if configured to do so. */
rte_packet_prefetch((char *)first_seg->buf_addr +
@@ -1090,47 +1203,12 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
}
/*
- * Rings setup and release.
- *
- * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
- * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
- * This will also optimize cache line size effect.
- * H/W supports up to cache line size 128.
- */
-#define IGB_ALIGN 128
-
-/*
* Maximum number of Ring Descriptors.
*
* Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
* desscriptors should meet the following condition:
* (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
*/
-#define IGB_MIN_RING_DESC 32
-#define IGB_MAX_RING_DESC 4096
-
-static const struct rte_memzone *
-ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
- uint16_t queue_id, uint32_t ring_size, int socket_id)
-{
- char z_name[RTE_MEMZONE_NAMESIZE];
- const struct rte_memzone *mz;
-
- snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->driver->pci_drv.name, ring_name,
- dev->data->port_id, queue_id);
- mz = rte_memzone_lookup(z_name);
- if (mz)
- return mz;
-
-#ifdef RTE_LIBRTE_XEN_DOM0
- return rte_memzone_reserve_bounded(z_name, ring_size,
- socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M);
-#else
- return rte_memzone_reserve_aligned(z_name, ring_size,
- socket_id, 0, IGB_ALIGN);
-#endif
-}
static void
igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
@@ -1176,8 +1254,7 @@ igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
static void
igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
{
- static const union e1000_adv_tx_desc zeroed_desc = { .read = {
- .buffer_addr = 0}};
+ static const union e1000_adv_tx_desc zeroed_desc = {{0}};
struct igb_tx_entry *txe = txq->sw_ring;
uint16_t i, prev;
struct e1000_hw *hw;
@@ -1225,10 +1302,11 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
/*
* Validate number of transmit descriptors.
* It must not exceed hardware maximum, and must be multiple
- * of IGB_ALIGN.
+ * of E1000_ALIGN.
*/
- if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
- (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
+ if (nb_desc % IGB_TXD_ALIGN != 0 ||
+ (nb_desc > E1000_MAX_RING_DESC) ||
+ (nb_desc < E1000_MIN_RING_DESC)) {
return -EINVAL;
}
@@ -1264,9 +1342,9 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
* handle the maximum ring size is allocated in order to allow for
* resizing in later calls to the queue setup function.
*/
- size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
- tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
- size, socket_id);
+ size = sizeof(union e1000_adv_tx_desc) * E1000_MAX_RING_DESC;
+ tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size,
+ E1000_ALIGN, socket_id);
if (tz == NULL) {
igb_tx_queue_release(txq);
return (-ENOMEM);
@@ -1284,12 +1362,9 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
txq->port_id = dev->data->port_id;
txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
-#ifndef RTE_LIBRTE_XEN_DOM0
- txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
-#else
txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
-#endif
- txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
+
+ txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
/* Allocate software ring */
txq->sw_ring = rte_zmalloc("txq->sw_ring",
sizeof(struct igb_tx_entry) * nb_desc,
@@ -1342,8 +1417,7 @@ eth_igb_rx_queue_release(void *rxq)
static void
igb_reset_rx_queue(struct igb_rx_queue *rxq)
{
- static const union e1000_adv_rx_desc zeroed_desc = { .read = {
- .pkt_addr = 0}};
+ static const union e1000_adv_rx_desc zeroed_desc = {{0}};
unsigned i;
/* Zero out HW ring memory */
@@ -1374,10 +1448,11 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
/*
* Validate number of receive descriptors.
* It must not exceed hardware maximum, and must be multiple
- * of IGB_ALIGN.
+ * of E1000_ALIGN.
*/
- if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
- (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
+ if (nb_desc % IGB_RXD_ALIGN != 0 ||
+ (nb_desc > E1000_MAX_RING_DESC) ||
+ (nb_desc < E1000_MIN_RING_DESC)) {
return (-EINVAL);
}
@@ -1413,19 +1488,16 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
* handle the maximum ring size is allocated in order to allow for
* resizing in later calls to the queue setup function.
*/
- size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
- rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
+ size = sizeof(union e1000_adv_rx_desc) * E1000_MAX_RING_DESC;
+ rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size,
+ E1000_ALIGN, socket_id);
if (rz == NULL) {
igb_rx_queue_release(rxq);
return (-ENOMEM);
}
rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
-#ifndef RTE_LIBRTE_XEN_DOM0
- rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
-#else
rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
-#endif
rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
/* Allocate software ring. */
@@ -1514,6 +1586,24 @@ igb_dev_clear_queues(struct rte_eth_dev *dev)
}
}
+void
+igb_dev_free_queues(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ eth_igb_rx_queue_release(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = NULL;
+ }
+ dev->data->nb_rx_queues = 0;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ eth_igb_tx_queue_release(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = NULL;
+ }
+ dev->data->nb_tx_queues = 0;
+}
+
/**
* Receive Side Scaling (RSS).
* See section 7.1.1.7 in the following document:
@@ -1582,19 +1672,19 @@ igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
if (rss_hf & ETH_RSS_IPV4)
mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
- if (rss_hf & ETH_RSS_IPV4_TCP)
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
if (rss_hf & ETH_RSS_IPV6)
mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
if (rss_hf & ETH_RSS_IPV6_EX)
mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
- if (rss_hf & ETH_RSS_IPV6_TCP)
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
if (rss_hf & ETH_RSS_IPV6_TCP_EX)
mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
- if (rss_hf & ETH_RSS_IPV4_UDP)
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
- if (rss_hf & ETH_RSS_IPV6_UDP)
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
if (rss_hf & ETH_RSS_IPV6_UDP_EX)
mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
@@ -1664,19 +1754,19 @@ int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
rss_hf |= ETH_RSS_IPV4;
if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
- rss_hf |= ETH_RSS_IPV4_TCP;
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
rss_hf |= ETH_RSS_IPV6;
if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
rss_hf |= ETH_RSS_IPV6_EX;
if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
- rss_hf |= ETH_RSS_IPV6_TCP;
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
rss_hf |= ETH_RSS_IPV6_TCP_EX;
if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
- rss_hf |= ETH_RSS_IPV4_UDP;
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
- rss_hf |= ETH_RSS_IPV6_UDP;
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
rss_hf |= ETH_RSS_IPV6_UDP_EX;
rss_conf->rss_hf = rss_hf;
@@ -1882,7 +1972,7 @@ igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
dma_addr =
rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
rxd = &rxq->rx_ring[i];
- rxd->read.hdr_addr = dma_addr;
+ rxd->read.hdr_addr = 0;
rxd->read.pkt_addr = dma_addr;
rxe[i].mbuf = mbuf;
}
@@ -1935,7 +2025,6 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
{
struct e1000_hw *hw;
struct igb_rx_queue *rxq;
- struct rte_pktmbuf_pool_private *mbp_priv;
uint32_t rctl;
uint32_t rxcsum;
uint32_t srrctl;
@@ -2005,9 +2094,8 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
/*
* Configure RX buffer size.
*/
- mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
- buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
- RTE_PKTMBUF_HEADROOM);
+ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
+ RTE_PKTMBUF_HEADROOM);
if (buf_size >= 1024) {
/*
* Configure the BSIZEPACKET field of the SRRCTL
@@ -2235,7 +2323,6 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
{
struct e1000_hw *hw;
struct igb_rx_queue *rxq;
- struct rte_pktmbuf_pool_private *mbp_priv;
uint32_t srrctl;
uint16_t buf_size;
uint16_t rctl_bsize;
@@ -2276,9 +2363,8 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
/*
* Configure RX buffer size.
*/
- mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
- buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
- RTE_PKTMBUF_HEADROOM);
+ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
+ RTE_PKTMBUF_HEADROOM);
if (buf_size >= 1024) {
/*
* Configure the BSIZEPACKET field of the SRRCTL
@@ -2413,3 +2499,33 @@ eth_igbvf_tx_init(struct rte_eth_dev *dev)
}
+void
+igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct igb_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.rx_drop_en = rxq->drop_en;
+}
+
+void
+igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct igb_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+
+ qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+ qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+ qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+}
diff --git a/src/dpdk_lib18/librte_pmd_enic/LICENSE b/src/dpdk22/drivers/net/enic/LICENSE
index 46a27a4e..46a27a4e 100755..100644
--- a/src/dpdk_lib18/librte_pmd_enic/LICENSE
+++ b/src/dpdk22/drivers/net/enic/LICENSE
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/cq_desc.h b/src/dpdk22/drivers/net/enic/base/cq_desc.h
index c4189679..f3ef6bb5 100755..100644
--- a/src/dpdk_lib18/librte_pmd_enic/vnic/cq_desc.h
+++ b/src/dpdk22/drivers/net/enic/base/cq_desc.h
@@ -31,7 +31,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*
*/
-#ident "$Id: cq_desc.h 129574 2013-04-26 22:11:14Z rfaucett $"
#ifndef _CQ_DESC_H_
#define _CQ_DESC_H_
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/cq_enet_desc.h b/src/dpdk22/drivers/net/enic/base/cq_enet_desc.h
index 669a2b50..f9822a45 100755..100644
--- a/src/dpdk_lib18/librte_pmd_enic/vnic/cq_enet_desc.h
+++ b/src/dpdk22/drivers/net/enic/base/cq_enet_desc.h
@@ -31,7 +31,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*
*/
-#ident "$Id: cq_enet_desc.h 160468 2014-02-18 09:50:15Z gvaradar $"
#ifndef _CQ_ENET_DESC_H_
#define _CQ_ENET_DESC_H_
diff --git a/src/dpdk22/drivers/net/enic/base/enic_vnic_wq.h b/src/dpdk22/drivers/net/enic/base/enic_vnic_wq.h
new file mode 100644
index 00000000..e3ea5742
--- /dev/null
+++ b/src/dpdk22/drivers/net/enic/base/enic_vnic_wq.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2008-2015 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * Copyright (c) 2015, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _ENIC_VNIC_WQ_H_
+#define _ENIC_VNIC_WQ_H_
+
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+static inline void enic_vnic_post_wq_index(struct vnic_wq *wq)
+{
+ struct vnic_wq_buf *buf = wq->to_use;
+
+ /* Adding write memory barrier prevents compiler and/or CPU
+ * reordering, thus avoiding descriptor posting before
+ * descriptor is initialized. Otherwise, hardware can read
+ * stale descriptor fields.
+ */
+ wmb();
+ iowrite32(buf->index, &wq->ctrl->posted_index);
+}
+
+static inline void enic_vnic_post_wq(struct vnic_wq *wq,
+ void *os_buf, dma_addr_t dma_addr,
+ unsigned int len, int sop,
+ uint8_t desc_skip_cnt, uint8_t cq_entry,
+ uint8_t compressed_send, uint64_t wrid)
+{
+ struct vnic_wq_buf *buf = wq->to_use;
+
+ buf->sop = sop;
+ buf->cq_entry = cq_entry;
+ buf->compressed_send = compressed_send;
+ buf->desc_skip_cnt = desc_skip_cnt;
+ buf->os_buf = os_buf;
+ buf->dma_addr = dma_addr;
+ buf->len = len;
+ buf->wr_id = wrid;
+
+ buf = buf->next;
+ if (cq_entry)
+ enic_vnic_post_wq_index(wq);
+ wq->to_use = buf;
+
+ wq->ring.desc_avail -= desc_skip_cnt;
+}
+
+#endif /* _ENIC_VNIC_WQ_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/rq_enet_desc.h b/src/dpdk22/drivers/net/enic/base/rq_enet_desc.h
index f38ff2a1..7292d9dc 100755..100644
--- a/src/dpdk_lib18/librte_pmd_enic/vnic/rq_enet_desc.h
+++ b/src/dpdk22/drivers/net/enic/base/rq_enet_desc.h
@@ -31,7 +31,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*
*/
-#ident "$Id: rq_enet_desc.h 59839 2010-09-27 20:36:31Z roprabhu $"
#ifndef _RQ_ENET_DESC_H_
#define _RQ_ENET_DESC_H_
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_cq.c b/src/dpdk22/drivers/net/enic/base/vnic_cq.c
index cda97e4e..2f65f357 100755..100644
--- a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_cq.c
+++ b/src/dpdk22/drivers/net/enic/base/vnic_cq.c
@@ -31,7 +31,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*
*/
-#ident "$Id: vnic_cq.c 171146 2014-05-02 07:08:20Z ssujith $"
#include "vnic_dev.h"
#include "vnic_cq.h"
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_cq.h b/src/dpdk22/drivers/net/enic/base/vnic_cq.h
index 0928d720..922391b3 100755..100644
--- a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_cq.h
+++ b/src/dpdk22/drivers/net/enic/base/vnic_cq.h
@@ -31,7 +31,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*
*/
-#ident "$Id: vnic_cq.h 173398 2014-05-19 09:17:02Z gvaradar $"
#ifndef _VNIC_CQ_H_
#define _VNIC_CQ_H_
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_dev.c b/src/dpdk22/drivers/net/enic/base/vnic_dev.c
index 6407994d..61538646 100755..100644
--- a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_dev.c
+++ b/src/dpdk22/drivers/net/enic/base/vnic_dev.c
@@ -31,7 +31,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*
*/
-#ident "$Id$"
#include <rte_memzone.h>
#include <rte_memcpy.h>
@@ -242,9 +241,9 @@ unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
if (desc_count == 0)
desc_count = 4096;
- ring->desc_count = ALIGN(desc_count, count_align);
+ ring->desc_count = VNIC_ALIGN(desc_count, count_align);
- ring->desc_size = ALIGN(desc_size, desc_align);
+ ring->desc_size = VNIC_ALIGN(desc_size, desc_align);
ring->size = ring->desc_count * ring->desc_size;
ring->size_unaligned = ring->size + ring->base_align;
@@ -294,7 +293,7 @@ int vnic_dev_alloc_desc_ring(__attribute__((unused)) struct vnic_dev *vdev,
ring->base_addr_unaligned = (dma_addr_t)rz->phys_addr;
- ring->base_addr = ALIGN(ring->base_addr_unaligned,
+ ring->base_addr = VNIC_ALIGN(ring->base_addr_unaligned,
ring->base_align);
ring->descs = (u8 *)ring->descs_unaligned +
(ring->base_addr - ring->base_addr_unaligned);
@@ -474,7 +473,7 @@ static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
return !(err || a0);
}
-int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
+int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size,
void *value)
{
u64 a0, a1;
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_dev.h b/src/dpdk22/drivers/net/enic/base/vnic_dev.h
index d1373a5b..113d6acc 100755..100644
--- a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_dev.h
+++ b/src/dpdk22/drivers/net/enic/base/vnic_dev.h
@@ -31,7 +31,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*
*/
-#ident "$Id: vnic_dev.h 196958 2014-11-04 18:23:37Z xuywang $"
#ifndef _VNIC_DEV_H_
#define _VNIC_DEV_H_
@@ -55,7 +54,7 @@ static inline u64 readq(void __iomem *reg)
static inline void writeq(u64 val, void __iomem *reg)
{
writel(val & 0xffffffff, reg);
- writel(val >> 32, (char *)reg + 0x4UL);
+ writel((u32)(val >> 32), (char *)reg + 0x4UL);
}
#endif
@@ -136,7 +135,7 @@ void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev);
int vnic_dev_fw_info(struct vnic_dev *vdev,
struct vnic_devcmd_fw_info **fw_info);
int vnic_dev_asic_info(struct vnic_dev *vdev, u16 *asic_type, u16 *asic_rev);
-int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
+int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size,
void *value);
int vnic_dev_stats_clear(struct vnic_dev *vdev);
int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_devcmd.h b/src/dpdk22/drivers/net/enic/base/vnic_devcmd.h
index e7ecf31a..b3d5a6cc 100755..100644
--- a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_devcmd.h
+++ b/src/dpdk22/drivers/net/enic/base/vnic_devcmd.h
@@ -31,7 +31,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*
*/
-#ident "$Id: vnic_devcmd.h 173135 2014-05-16 03:14:07Z sanpilla $"
#ifndef _VNIC_DEVCMD_H_
#define _VNIC_DEVCMD_H_
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_enet.h b/src/dpdk22/drivers/net/enic/base/vnic_enet.h
index 9d3cc07e..cc34998f 100755..100644
--- a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_enet.h
+++ b/src/dpdk22/drivers/net/enic/base/vnic_enet.h
@@ -31,7 +31,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*
*/
-#ident "$Id: vnic_enet.h 175806 2014-06-04 19:31:17Z rfaucett $"
#ifndef _VNIC_ENIC_H_
#define _VNIC_ENIC_H_
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_intr.c b/src/dpdk22/drivers/net/enic/base/vnic_intr.c
index 84368afc..04bb4261 100755..100644
--- a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_intr.c
+++ b/src/dpdk22/drivers/net/enic/base/vnic_intr.c
@@ -31,7 +31,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*
*/
-#ident "$Id: vnic_intr.c 171146 2014-05-02 07:08:20Z ssujith $"
#include "vnic_dev.h"
#include "vnic_intr.h"
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_intr.h b/src/dpdk22/drivers/net/enic/base/vnic_intr.h
index ecb82bf4..da089bcf 100755..100644
--- a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_intr.h
+++ b/src/dpdk22/drivers/net/enic/base/vnic_intr.h
@@ -31,7 +31,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*
*/
-#ident "$Id: vnic_intr.h 171146 2014-05-02 07:08:20Z ssujith $"
#ifndef _VNIC_INTR_H_
#define _VNIC_INTR_H_
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_nic.h b/src/dpdk22/drivers/net/enic/base/vnic_nic.h
index 332cfb4f..88907c00 100755..100644
--- a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_nic.h
+++ b/src/dpdk22/drivers/net/enic/base/vnic_nic.h
@@ -31,7 +31,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*
*/
-#ident "$Id: vnic_nic.h 59839 2010-09-27 20:36:31Z roprabhu $"
#ifndef _VNIC_NIC_H_
#define _VNIC_NIC_H_
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_resource.h b/src/dpdk22/drivers/net/enic/base/vnic_resource.h
index 2512712e..b7a9b612 100755..100644
--- a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_resource.h
+++ b/src/dpdk22/drivers/net/enic/base/vnic_resource.h
@@ -31,7 +31,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*
*/
-#ident "$Id: vnic_resource.h 196958 2014-11-04 18:23:37Z xuywang $"
#ifndef _VNIC_RESOURCE_H_
#define _VNIC_RESOURCE_H_
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_rq.c b/src/dpdk22/drivers/net/enic/base/vnic_rq.c
index 3a4b65ab..14416047 100755..100644
--- a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_rq.c
+++ b/src/dpdk22/drivers/net/enic/base/vnic_rq.c
@@ -31,7 +31,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*
*/
-#ident "$Id: vnic_rq.c 171146 2014-05-02 07:08:20Z ssujith $"
#include "vnic_dev.h"
#include "vnic_rq.h"
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_rq.h b/src/dpdk22/drivers/net/enic/base/vnic_rq.h
index 54b66123..0f5c3c18 100755..100644
--- a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_rq.h
+++ b/src/dpdk22/drivers/net/enic/base/vnic_rq.h
@@ -31,7 +31,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*
*/
-#ident "$Id: vnic_rq.h 180262 2014-07-02 07:57:43Z gvaradar $"
#ifndef _VNIC_RQ_H_
#define _VNIC_RQ_H_
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_rss.c b/src/dpdk22/drivers/net/enic/base/vnic_rss.c
index 5ff76b14..1cf055b0 100755..100644
--- a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_rss.c
+++ b/src/dpdk22/drivers/net/enic/base/vnic_rss.c
@@ -31,7 +31,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*
*/
-#ident "$Id$"
#include "enic_compat.h"
#include "vnic_rss.h"
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_rss.h b/src/dpdk22/drivers/net/enic/base/vnic_rss.h
index 45ed3d2a..ebb18b59 100755..100644
--- a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_rss.h
+++ b/src/dpdk22/drivers/net/enic/base/vnic_rss.h
@@ -30,7 +30,6 @@
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
-#ident "$Id: vnic_rss.h 64224 2010-11-09 19:43:13Z vkolluri $"
#ifndef _VNIC_RSS_H_
#define _VNIC_RSS_H_
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_stats.h b/src/dpdk22/drivers/net/enic/base/vnic_stats.h
index ac5aa722..0c779d8a 100755..100644
--- a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_stats.h
+++ b/src/dpdk22/drivers/net/enic/base/vnic_stats.h
@@ -31,7 +31,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*
*/
-#ident "$Id: vnic_stats.h 84040 2011-08-09 23:38:43Z dwang2 $"
#ifndef _VNIC_STATS_H_
#define _VNIC_STATS_H_
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_wq.c b/src/dpdk22/drivers/net/enic/base/vnic_wq.c
index e52cef0b..a3ef4170 100755..100644
--- a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_wq.c
+++ b/src/dpdk22/drivers/net/enic/base/vnic_wq.c
@@ -31,7 +31,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*
*/
-#ident "$Id: vnic_wq.c 183023 2014-07-22 23:47:25Z xuywang $"
#include "vnic_dev.h"
#include "vnic_wq.h"
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_wq.h b/src/dpdk22/drivers/net/enic/base/vnic_wq.h
index f8219ad5..c23de625 100755..100644
--- a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_wq.h
+++ b/src/dpdk22/drivers/net/enic/base/vnic_wq.h
@@ -31,7 +31,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*
*/
-#ident "$Id: vnic_wq.h 183023 2014-07-22 23:47:25Z xuywang $"
#ifndef _VNIC_WQ_H_
#define _VNIC_WQ_H_
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/wq_enet_desc.h b/src/dpdk22/drivers/net/enic/base/wq_enet_desc.h
index ff2b7680..db41d00e 100755..100644
--- a/src/dpdk_lib18/librte_pmd_enic/vnic/wq_enet_desc.h
+++ b/src/dpdk22/drivers/net/enic/base/wq_enet_desc.h
@@ -31,7 +31,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*
*/
-#ident "$Id: wq_enet_desc.h 59839 2010-09-27 20:36:31Z roprabhu $"
#ifndef _WQ_ENET_DESC_H_
#define _WQ_ENET_DESC_H_
diff --git a/src/dpdk_lib18/librte_pmd_enic/enic.h b/src/dpdk22/drivers/net/enic/enic.h
index c43417c2..9e783051 100755..100644
--- a/src/dpdk_lib18/librte_pmd_enic/enic.h
+++ b/src/dpdk22/drivers/net/enic/enic.h
@@ -31,7 +31,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*
*/
-#ident "$Id$"
#ifndef _ENIC_H_
#define _ENIC_H_
@@ -49,8 +48,8 @@
#define DRV_NAME "enic_pmd"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Poll-mode Driver"
-#define DRV_VERSION "1.0.0.4"
-#define DRV_COPYRIGHT "Copyright 2008-2014 Cisco Systems, Inc"
+#define DRV_VERSION "1.0.0.6"
+#define DRV_COPYRIGHT "Copyright 2008-2015 Cisco Systems, Inc"
#define ENIC_WQ_MAX 8
#define ENIC_RQ_MAX 8
@@ -66,9 +65,9 @@
#define ENIC_CALC_IP_CKSUM 1
#define ENIC_CALC_TCP_UDP_CKSUM 2
#define ENIC_MAX_MTU 9000
-#define PAGE_SIZE 4096
+#define ENIC_PAGE_SIZE 4096
#define PAGE_ROUND_UP(x) \
- ((((unsigned long)(x)) + PAGE_SIZE-1) & (~(PAGE_SIZE-1)))
+ ((((unsigned long)(x)) + ENIC_PAGE_SIZE-1) & (~(ENIC_PAGE_SIZE-1)))
#define ENICPMD_VFIO_PATH "/dev/vfio/vfio"
/*#define ENIC_DESC_COUNT_MAKE_ODD (x) do{if ((~(x)) & 1) { (x)--; } }while(0)*/
@@ -80,13 +79,13 @@
#define ENICPMD_FDIR_MAX 64
struct enic_fdir_node {
- struct rte_fdir_filter filter;
+ struct rte_eth_fdir_filter filter;
u16 fltr_id;
u16 rq_index;
};
struct enic_fdir {
- struct rte_eth_fdir stats;
+ struct rte_eth_fdir_stats stats;
struct rte_hash *hash;
struct enic_fdir_node *nodes[ENICPMD_FDIR_MAX];
};
@@ -99,6 +98,7 @@ struct enic {
struct vnic_dev_bar bar0;
struct vnic_dev *vdev;
+ unsigned int port_id;
struct rte_eth_dev *rte_dev;
struct enic_fdir fdir;
char bdf_name[ENICPMD_BDF_LENGTH];
@@ -110,7 +110,7 @@ struct enic {
pthread_t err_intr_thread;
int promisc;
int allmulti;
- int ig_vlan_strip_en;
+ u8 ig_vlan_strip_en;
int link_status;
u8 hw_ip_checksum;
@@ -154,10 +154,12 @@ static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev)
return (struct enic *)eth_dev->data->dev_private;
}
+extern void enic_fdir_stats_get(struct enic *enic,
+ struct rte_eth_fdir_stats *stats);
extern int enic_fdir_add_fltr(struct enic *enic,
- struct rte_fdir_filter *params, u16 queue, u8 drop);
+ struct rte_eth_fdir_filter *params);
extern int enic_fdir_del_fltr(struct enic *enic,
- struct rte_fdir_filter *params);
+ struct rte_eth_fdir_filter *params);
extern void enic_free_wq(void *txq);
extern int enic_alloc_intr_resources(struct enic *enic);
extern int enic_setup_finish(struct enic *enic);
@@ -185,10 +187,12 @@ extern void enic_add_packet_filter(struct enic *enic);
extern void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr);
extern void enic_del_mac_address(struct enic *enic);
extern unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq);
-extern int enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
- struct rte_mbuf *tx_pkt, unsigned short len,
- uint8_t sop, uint8_t eop,
- uint16_t ol_flags, uint16_t vlan_tag);
+extern void enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
+ struct rte_mbuf *tx_pkt, unsigned short len,
+ uint8_t sop, uint8_t eop, uint8_t cq_entry,
+ uint16_t ol_flags, uint16_t vlan_tag);
+
+extern void enic_post_wq_index(struct vnic_wq *wq);
extern int enic_poll(struct vnic_rq *rq, struct rte_mbuf **rx_pkts,
unsigned int budget, unsigned int *work_done);
extern int enic_probe(struct enic *enic);
diff --git a/src/dpdk_lib18/librte_pmd_enic/enic_clsf.c b/src/dpdk22/drivers/net/enic/enic_clsf.c
index 577a382c..9c2bc210 100755..100644
--- a/src/dpdk_lib18/librte_pmd_enic/enic_clsf.c
+++ b/src/dpdk22/drivers/net/enic/enic_clsf.c
@@ -31,7 +31,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*
*/
-#ident "$Id$"
#include <libgen.h>
@@ -61,11 +60,14 @@
#define DEFAULT_HASH_FUNC rte_jhash
#endif
-#define SOCKET_0 0
#define ENICPMD_CLSF_HASH_ENTRIES ENICPMD_FDIR_MAX
-#define ENICPMD_CLSF_BUCKET_ENTRIES 4
-int enic_fdir_del_fltr(struct enic *enic, struct rte_fdir_filter *params)
+void enic_fdir_stats_get(struct enic *enic, struct rte_eth_fdir_stats *stats)
+{
+ *stats = enic->fdir.stats;
+}
+
+int enic_fdir_del_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
{
int32_t pos;
struct enic_fdir_node *key;
@@ -92,23 +94,33 @@ int enic_fdir_del_fltr(struct enic *enic, struct rte_fdir_filter *params)
return 0;
}
-int enic_fdir_add_fltr(struct enic *enic, struct rte_fdir_filter *params,
- u16 queue, u8 drop)
+int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
{
struct enic_fdir_node *key;
- struct filter fltr = {.type = 0};
+ struct filter fltr = {0};
int32_t pos;
u8 do_free = 0;
u16 old_fltr_id = 0;
+ u32 flowtype_supported;
+ u16 flex_bytes;
+ u16 queue;
- if (!enic->fdir.hash || params->vlan_id || !params->l4type ||
- (RTE_FDIR_IPTYPE_IPV6 == params->iptype) ||
- (RTE_FDIR_L4TYPE_SCTP == params->l4type) ||
- params->flex_bytes || drop) {
+ flowtype_supported = (
+ (RTE_ETH_FLOW_NONFRAG_IPV4_TCP == params->input.flow_type) ||
+ (RTE_ETH_FLOW_NONFRAG_IPV4_UDP == params->input.flow_type));
+
+ flex_bytes = ((params->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
+ (params->input.flow_ext.flexbytes[0] & 0xFF));
+
+ if (!enic->fdir.hash ||
+ (params->input.flow_ext.vlan_tci & 0xFFF) ||
+ !flowtype_supported || flex_bytes ||
+ params->action.behavior /* drop */) {
enic->fdir.stats.f_add++;
return -ENOTSUP;
}
+ queue = params->action.rx_queue;
/* See if the key is already there in the table */
pos = rte_hash_del_key(enic->fdir.hash, params);
switch (pos) {
@@ -121,9 +133,8 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_fdir_filter *params,
enic->fdir.stats.f_add++;
return -ENOSPC;
}
- key = (struct enic_fdir_node *)rte_zmalloc(
- "enic_fdir_node",
- sizeof(struct enic_fdir_node), 0);
+ key = rte_zmalloc("enic_fdir_node",
+ sizeof(struct enic_fdir_node), 0);
if (!key) {
enic->fdir.stats.f_add++;
return -ENOMEM;
@@ -169,12 +180,16 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_fdir_filter *params,
key->rq_index = queue;
fltr.type = FILTER_IPV4_5TUPLE;
- fltr.u.ipv4.src_addr = rte_be_to_cpu_32(params->ip_src.ipv4_addr);
- fltr.u.ipv4.dst_addr = rte_be_to_cpu_32(params->ip_dst.ipv4_addr);
- fltr.u.ipv4.src_port = rte_be_to_cpu_16(params->port_src);
- fltr.u.ipv4.dst_port = rte_be_to_cpu_16(params->port_dst);
-
- if (RTE_FDIR_L4TYPE_TCP == params->l4type)
+ fltr.u.ipv4.src_addr = rte_be_to_cpu_32(
+ params->input.flow.ip4_flow.src_ip);
+ fltr.u.ipv4.dst_addr = rte_be_to_cpu_32(
+ params->input.flow.ip4_flow.dst_ip);
+ fltr.u.ipv4.src_port = rte_be_to_cpu_16(
+ params->input.flow.udp4_flow.src_port);
+ fltr.u.ipv4.dst_port = rte_be_to_cpu_16(
+ params->input.flow.udp4_flow.dst_port);
+
+ if (RTE_ETH_FLOW_NONFRAG_IPV4_TCP == params->input.flow_type)
fltr.u.ipv4.protocol = PROTO_TCP;
else
fltr.u.ipv4.protocol = PROTO_UDP;
@@ -197,7 +212,7 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_fdir_filter *params,
enic->fdir.stats.add++;
}
- pos = rte_hash_add_key(enic->fdir.hash, (void *)key);
+ pos = rte_hash_add_key(enic->fdir.hash, params);
enic->fdir.nodes[pos] = key;
return 0;
}
@@ -227,11 +242,10 @@ int enic_clsf_init(struct enic *enic)
struct rte_hash_parameters hash_params = {
.name = "enicpmd_clsf_hash",
.entries = ENICPMD_CLSF_HASH_ENTRIES,
- .bucket_entries = ENICPMD_CLSF_BUCKET_ENTRIES,
- .key_len = sizeof(struct rte_fdir_filter),
+ .key_len = sizeof(struct rte_eth_fdir_filter),
.hash_func = DEFAULT_HASH_FUNC,
.hash_func_init_val = 0,
- .socket_id = SOCKET_0,
+ .socket_id = SOCKET_ID_ANY,
};
enic->fdir.hash = rte_hash_create(&hash_params);
diff --git a/src/dpdk_lib18/librte_pmd_enic/enic_compat.h b/src/dpdk22/drivers/net/enic/enic_compat.h
index b1af838d..5dbd983b 100755..100644
--- a/src/dpdk_lib18/librte_pmd_enic/enic_compat.h
+++ b/src/dpdk22/drivers/net/enic/enic_compat.h
@@ -31,7 +31,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*
*/
-#ident "$Id$"
#ifndef _ENIC_COMPAT_H_
#define _ENIC_COMPAT_H_
@@ -41,8 +40,9 @@
#include <rte_atomic.h>
#include <rte_malloc.h>
+#include <rte_log.h>
-#define ENIC_PAGE_ALIGN 4096ULL
+#define ENIC_PAGE_ALIGN 4096UL
#define ENIC_ALIGN ENIC_PAGE_ALIGN
#define NAME_MAX 255
#define ETH_ALEN 6
@@ -67,7 +67,7 @@
#define pr_warn(y, args...) dev_warning(0, y, ##args)
#define BUG() pr_err("BUG at %s:%d", __func__, __LINE__)
-#define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1)
+#define VNIC_ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1)
#define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask))
#define udelay usleep
#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
@@ -75,10 +75,13 @@
#define kzalloc(size, flags) calloc(1, size)
#define kfree(x) free(x)
-#define dev_err(x, args...) printf("rte_enic_pmd : Error - " args)
-#define dev_info(x, args...) printf("rte_enic_pmd: Info - " args)
-#define dev_warning(x, args...) printf("rte_enic_pmd: Warning - " args)
-#define dev_trace(x, args...) printf("rte_enic_pmd: Trace - " args)
+#define dev_printk(level, fmt, args...) \
+ RTE_LOG(level, PMD, "rte_enic_pmd: " fmt, ## args)
+
+#define dev_err(x, args...) dev_printk(ERR, args)
+#define dev_info(x, args...) dev_printk(INFO, args)
+#define dev_warning(x, args...) dev_printk(WARNING, args)
+#define dev_debug(x, args...) dev_printk(DEBUG, args)
#define __le16 u16
#define __le32 u32
diff --git a/src/dpdk_lib18/librte_pmd_enic/enic_ethdev.c b/src/dpdk22/drivers/net/enic/enic_ethdev.c
index 9cb6666b..2a880431 100755..100644
--- a/src/dpdk_lib18/librte_pmd_enic/enic_ethdev.c
+++ b/src/dpdk22/drivers/net/enic/enic_ethdev.c
@@ -31,7 +31,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*
*/
-#ident "$Id$"
#include <stdio.h>
#include <stdint.h>
@@ -48,13 +47,17 @@
#include "vnic_enet.h"
#include "enic.h"
+#ifdef RTE_LIBRTE_ENIC_DEBUG
#define ENICPMD_FUNC_TRACE() \
RTE_LOG(DEBUG, PMD, "ENICPMD trace: %s\n", __func__)
+#else
+#define ENICPMD_FUNC_TRACE() (void)0
+#endif
/*
* The set of PCI devices this driver supports
*/
-static struct rte_pci_id pci_id_enic_map[] = {
+static const struct rte_pci_id pci_id_enic_map[] = {
#define RTE_PCI_DEV_ID_DECL_ENIC(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
#ifndef PCI_VENDOR_ID_CISCO
#define PCI_VENDOR_ID_CISCO 0x1137
@@ -65,33 +68,64 @@ RTE_PCI_DEV_ID_DECL_ENIC(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF)
{.vendor_id = 0, /* Sentinal */},
};
-static int enicpmd_fdir_remove_perfect_filter(struct rte_eth_dev *eth_dev,
- struct rte_fdir_filter *fdir_filter,
- __rte_unused uint16_t soft_id)
+static int
+enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op, void *arg)
{
struct enic *enic = pmd_priv(eth_dev);
+ int ret = 0;
ENICPMD_FUNC_TRACE();
- return enic_fdir_del_fltr(enic, fdir_filter);
-}
-
-static int enicpmd_fdir_add_perfect_filter(struct rte_eth_dev *eth_dev,
- struct rte_fdir_filter *fdir_filter, __rte_unused uint16_t soft_id,
- uint8_t queue, uint8_t drop)
-{
- struct enic *enic = pmd_priv(eth_dev);
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
- ENICPMD_FUNC_TRACE();
- return enic_fdir_add_fltr(enic, fdir_filter, (uint16_t)queue, drop);
+ if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
+ return -EINVAL;
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ case RTE_ETH_FILTER_UPDATE:
+ ret = enic_fdir_add_fltr(enic,
+ (struct rte_eth_fdir_filter *)arg);
+ break;
+
+ case RTE_ETH_FILTER_DELETE:
+ ret = enic_fdir_del_fltr(enic,
+ (struct rte_eth_fdir_filter *)arg);
+ break;
+
+ case RTE_ETH_FILTER_STATS:
+ enic_fdir_stats_get(enic, (struct rte_eth_fdir_stats *)arg);
+ break;
+
+ case RTE_ETH_FILTER_FLUSH:
+ case RTE_ETH_FILTER_INFO:
+ dev_warning(enic, "unsupported operation %u", filter_op);
+ ret = -ENOTSUP;
+ break;
+ default:
+ dev_err(enic, "unknown operation %u", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
}
-static void enicpmd_fdir_info_get(struct rte_eth_dev *eth_dev,
- struct rte_eth_fdir *fdir)
+static int
+enicpmd_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
{
- struct enic *enic = pmd_priv(eth_dev);
+ int ret = -EINVAL;
- ENICPMD_FUNC_TRACE();
- *fdir = enic->fdir.stats;
+ if (RTE_ETH_FILTER_FDIR == filter_type)
+ ret = enicpmd_fdir_ctrl_func(dev, filter_op, arg);
+ else
+ dev_warning(enic, "Filter type (%d) not supported",
+ filter_type);
+
+ return ret;
}
static void enicpmd_dev_tx_queue_release(void *txq)
@@ -159,6 +193,7 @@ static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
ENICPMD_FUNC_TRACE();
enic_start_wq(enic, queue_idx);
+ eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
return 0;
}
@@ -174,6 +209,8 @@ static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
ret = enic_stop_wq(enic, queue_idx);
if (ret)
dev_err(enic, "error in stopping wq %d\n", queue_idx);
+ else
+ eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
return ret;
}
@@ -186,6 +223,7 @@ static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
ENICPMD_FUNC_TRACE();
enic_start_rq(enic, queue_idx);
+ eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
return 0;
}
@@ -201,6 +239,8 @@ static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
ret = enic_stop_rq(enic, queue_idx);
if (ret)
dev_err(enic, "error in stopping rq %d\n", queue_idx);
+ else
+ eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
return ret;
}
@@ -237,13 +277,14 @@ static int enicpmd_vlan_filter_set(struct rte_eth_dev *eth_dev,
uint16_t vlan_id, int on)
{
struct enic *enic = pmd_priv(eth_dev);
+ int err;
ENICPMD_FUNC_TRACE();
if (on)
- enic_add_vlan(enic, vlan_id);
+ err = enic_add_vlan(enic, vlan_id);
else
- enic_del_vlan(enic, vlan_id);
- return 0;
+ err = enic_del_vlan(enic, vlan_id);
+ return err;
}
static void enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
@@ -454,21 +495,26 @@ static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
unsigned int seg_len;
unsigned int inc_len;
unsigned int nb_segs;
- struct rte_mbuf *tx_pkt;
+ struct rte_mbuf *tx_pkt, *next_tx_pkt;
struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
struct enic *enic = vnic_dev_priv(wq->vdev);
unsigned short vlan_id;
unsigned short ol_flags;
+ uint8_t last_seg, eop;
for (index = 0; index < nb_pkts; index++) {
tx_pkt = *tx_pkts++;
inc_len = 0;
nb_segs = tx_pkt->nb_segs;
if (nb_segs > vnic_wq_desc_avail(wq)) {
+ if (index > 0)
+ enic_post_wq_index(wq);
+
/* wq cleanup and try again */
if (!enic_cleanup_wq(enic, wq) ||
- (nb_segs > vnic_wq_desc_avail(wq)))
+ (nb_segs > vnic_wq_desc_avail(wq))) {
return index;
+ }
}
pkt_len = tx_pkt->pkt_len;
vlan_id = tx_pkt->vlan_tci;
@@ -476,14 +522,15 @@ static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
for (frags = 0; inc_len < pkt_len; frags++) {
if (!tx_pkt)
break;
+ next_tx_pkt = tx_pkt->next;
seg_len = tx_pkt->data_len;
inc_len += seg_len;
- if (enic_send_pkt(enic, wq, tx_pkt,
- (unsigned short)seg_len, !frags,
- (pkt_len == inc_len), ol_flags, vlan_id)) {
- break;
- }
- tx_pkt = tx_pkt->next;
+ eop = (pkt_len == inc_len) || (!next_tx_pkt);
+ last_seg = eop &&
+ (index == ((unsigned int)nb_pkts - 1));
+ enic_send_pkt(enic, wq, tx_pkt, (unsigned short)seg_len,
+ !frags, eop, last_seg, ol_flags, vlan_id);
+ tx_pkt = next_tx_pkt;
}
}
@@ -503,7 +550,7 @@ static uint16_t enicpmd_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
return work_done;
}
-static struct eth_dev_ops enicpmd_eth_dev_ops = {
+static const struct eth_dev_ops enicpmd_eth_dev_ops = {
.dev_configure = enicpmd_dev_configure,
.dev_start = enicpmd_dev_start,
.dev_stop = enicpmd_dev_stop,
@@ -541,23 +588,14 @@ static struct eth_dev_ops enicpmd_eth_dev_ops = {
.priority_flow_ctrl_set = NULL,
.mac_addr_add = enicpmd_add_mac_addr,
.mac_addr_remove = enicpmd_remove_mac_addr,
- .fdir_add_signature_filter = NULL,
- .fdir_update_signature_filter = NULL,
- .fdir_remove_signature_filter = NULL,
- .fdir_infos_get = enicpmd_fdir_info_get,
- .fdir_add_perfect_filter = enicpmd_fdir_add_perfect_filter,
- .fdir_update_perfect_filter = enicpmd_fdir_add_perfect_filter,
- .fdir_remove_perfect_filter = enicpmd_fdir_remove_perfect_filter,
- .fdir_set_masks = NULL,
+ .filter_ctrl = enicpmd_dev_filter_ctrl,
};
struct enic *enicpmd_list_head = NULL;
/* Initialize the driver
* It returns 0 on success.
*/
-static int eth_enicpmd_dev_init(
- __attribute__((unused))struct eth_driver *eth_drv,
- struct rte_eth_dev *eth_dev)
+static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
{
struct rte_pci_device *pdev;
struct rte_pci_addr *addr;
@@ -565,12 +603,14 @@ static int eth_enicpmd_dev_init(
ENICPMD_FUNC_TRACE();
+ enic->port_id = eth_dev->data->port_id;
enic->rte_dev = eth_dev;
eth_dev->dev_ops = &enicpmd_eth_dev_ops;
eth_dev->rx_pkt_burst = &enicpmd_recv_pkts;
eth_dev->tx_pkt_burst = &enicpmd_xmit_pkts;
pdev = eth_dev->pci_dev;
+ rte_eth_copy_pci_info(eth_dev, pdev);
enic->pdev = pdev;
addr = &pdev->addr;
@@ -581,7 +621,7 @@ static int eth_enicpmd_dev_init(
}
static struct eth_driver rte_enic_pmd = {
- {
+ .pci_drv = {
.name = "rte_enic_pmd",
.id_table = pci_id_enic_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
diff --git a/src/dpdk_lib18/librte_pmd_enic/enic_main.c b/src/dpdk22/drivers/net/enic/enic_main.c
index 8ab8e44b..07a98101 100755..100644
--- a/src/dpdk_lib18/librte_pmd_enic/enic_main.c
+++ b/src/dpdk22/drivers/net/enic/enic_main.c
@@ -31,7 +31,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*
*/
-#ident "$Id$"
#include <stdio.h>
@@ -59,6 +58,7 @@
#include "vnic_cq.h"
#include "vnic_intr.h"
#include "vnic_nic.h"
+#include "enic_vnic_wq.h"
static inline int enic_is_sriov_vf(struct enic *enic)
{
@@ -152,15 +152,18 @@ unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq)
-1 /*wq_work_to_do*/, enic_wq_service, NULL);
}
+void enic_post_wq_index(struct vnic_wq *wq)
+{
+ enic_vnic_post_wq_index(wq);
+}
-int enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
- struct rte_mbuf *tx_pkt, unsigned short len,
- uint8_t sop, uint8_t eop,
- uint16_t ol_flags, uint16_t vlan_tag)
+void enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
+ struct rte_mbuf *tx_pkt, unsigned short len,
+ uint8_t sop, uint8_t eop, uint8_t cq_entry,
+ uint16_t ol_flags, uint16_t vlan_tag)
{
struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
uint16_t mss = 0;
- uint8_t cq_entry = eop;
uint8_t vlan_tag_insert = 0;
uint64_t bus_addr = (dma_addr_t)
(tx_pkt->buf_physaddr + RTE_PKTMBUF_HEADROOM);
@@ -191,14 +194,12 @@ int enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
vlan_tag,
0 /* loopback */);
- vnic_wq_post(wq, (void *)tx_pkt, bus_addr, len,
- sop, eop,
- 1 /*desc_skip_cnt*/,
- cq_entry,
- 0 /*compressed send*/,
- 0 /*wrid*/);
-
- return 0;
+ enic_vnic_post_wq(wq, (void *)tx_pkt, bus_addr, len,
+ sop,
+ 1 /*desc_skip_cnt*/,
+ cq_entry,
+ 0 /*compressed send*/,
+ 0 /*wrid*/);
}
void enic_dev_stats_clear(struct enic *enic)
@@ -211,7 +212,6 @@ void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
{
struct vnic_stats *stats;
- memset(r_stats, 0, sizeof(*r_stats));
if (vnic_dev_stats_dump(enic->vdev, &stats)) {
dev_err(enic, "Error in getting stats\n");
return;
@@ -345,7 +345,7 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
hdr_mbuf->data_off = RTE_PKTMBUF_HEADROOM;
hdr_mbuf->nb_segs = 2;
- hdr_mbuf->port = rq->index;
+ hdr_mbuf->port = enic->port_id;
hdr_mbuf->next = mbuf;
dma_addr = (dma_addr_t)
@@ -360,7 +360,7 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
type = RQ_ENET_TYPE_NOT_SOP;
} else {
mbuf->nb_segs = 1;
- mbuf->port = rq->index;
+ mbuf->port = enic->port_id;
}
mbuf->data_off = RTE_PKTMBUF_HEADROOM;
@@ -424,7 +424,7 @@ static int enic_rq_indicate_buf(struct vnic_rq *rq,
rx_pkt->pkt_len = bytes_written;
if (ipv4) {
- rx_pkt->ol_flags |= PKT_RX_IPV4_HDR;
+ rx_pkt->packet_type = RTE_PTYPE_L3_IPV4;
if (!csum_not_calc) {
if (unlikely(!ipv4_csum_ok))
rx_pkt->ol_flags |= PKT_RX_IP_CKSUM_BAD;
@@ -433,7 +433,7 @@ static int enic_rq_indicate_buf(struct vnic_rq *rq,
rx_pkt->ol_flags |= PKT_RX_L4_CKSUM_BAD;
}
} else if (ipv6)
- rx_pkt->ol_flags |= PKT_RX_IPV6_HDR;
+ rx_pkt->packet_type = RTE_PTYPE_L3_IPV6;
} else {
/* Header split */
if (sop && !eop) {
@@ -446,7 +446,7 @@ static int enic_rq_indicate_buf(struct vnic_rq *rq,
*rx_pkt_bucket = rx_pkt;
rx_pkt->pkt_len = bytes_written;
if (ipv4) {
- rx_pkt->ol_flags |= PKT_RX_IPV4_HDR;
+ rx_pkt->packet_type = RTE_PTYPE_L3_IPV4;
if (!csum_not_calc) {
if (unlikely(!ipv4_csum_ok))
rx_pkt->ol_flags |=
@@ -458,13 +458,14 @@ static int enic_rq_indicate_buf(struct vnic_rq *rq,
PKT_RX_L4_CKSUM_BAD;
}
} else if (ipv6)
- rx_pkt->ol_flags |= PKT_RX_IPV6_HDR;
+ rx_pkt->packet_type = RTE_PTYPE_L3_IPV6;
} else {
/* Payload */
hdr_rx_pkt = *rx_pkt_bucket;
hdr_rx_pkt->pkt_len += bytes_written;
if (ipv4) {
- hdr_rx_pkt->ol_flags |= PKT_RX_IPV4_HDR;
+ hdr_rx_pkt->packet_type =
+ RTE_PTYPE_L3_IPV4;
if (!csum_not_calc) {
if (unlikely(!ipv4_csum_ok))
hdr_rx_pkt->ol_flags |=
@@ -476,8 +477,8 @@ static int enic_rq_indicate_buf(struct vnic_rq *rq,
PKT_RX_L4_CKSUM_BAD;
}
} else if (ipv6)
- hdr_rx_pkt->ol_flags |= PKT_RX_IPV6_HDR;
-
+ hdr_rx_pkt->packet_type =
+ RTE_PTYPE_L3_IPV6;
}
}
}
@@ -539,7 +540,7 @@ enic_alloc_consistent(__rte_unused void *priv, size_t size,
*dma_handle = 0;
rz = rte_memzone_reserve_aligned((const char *)name,
- size, 0, 0, ENIC_ALIGN);
+ size, SOCKET_ID_ANY, 0, ENIC_ALIGN);
if (!rz) {
pr_err("%s : Failed to allocate memory requested for %s",
__func__, name);
@@ -841,10 +842,12 @@ static int enic_set_rsskey(struct enic *enic)
dma_addr_t rss_key_buf_pa;
union vnic_rss_key *rss_key_buf_va = NULL;
static union vnic_rss_key rss_key = {
- .key[0] = {.b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101}},
- .key[1] = {.b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101}},
- .key[2] = {.b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115}},
- .key[3] = {.b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108}},
+ .key = {
+ [0] = {.b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101}},
+ [1] = {.b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101}},
+ [2] = {.b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115}},
+ [3] = {.b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108}},
+ }
};
int err;
u8 name[NAME_MAX];
@@ -982,8 +985,7 @@ static void enic_dev_deinit(struct enic *enic)
{
struct rte_eth_dev *eth_dev = enic->rte_dev;
- if (eth_dev->data->mac_addrs)
- rte_free(eth_dev->data->mac_addrs);
+ rte_free(eth_dev->data->mac_addrs);
}
@@ -1047,7 +1049,7 @@ int enic_probe(struct enic *enic)
struct rte_pci_device *pdev = enic->pdev;
int err = -1;
- dev_info(enic, " Initializing ENIC PMD version %s\n", DRV_VERSION);
+ dev_debug(enic, " Initializing ENIC PMD version %s\n", DRV_VERSION);
enic->bar0.vaddr = (void *)pdev->mem_resource[0].addr;
enic->bar0.len = pdev->mem_resource[0].len;
diff --git a/src/dpdk_lib18/librte_pmd_enic/enic_res.c b/src/dpdk22/drivers/net/enic/enic_res.c
index 12a337c0..ebe379dd 100755..100644
--- a/src/dpdk_lib18/librte_pmd_enic/enic_res.c
+++ b/src/dpdk22/drivers/net/enic/enic_res.c
@@ -31,7 +31,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*
*/
-#ident "$Id: enic_res.c 171146 2014-05-02 07:08:20Z ssujith $"
#include "enic_compat.h"
#include "rte_ethdev.h"
diff --git a/src/dpdk_lib18/librte_pmd_enic/enic_res.h b/src/dpdk22/drivers/net/enic/enic_res.h
index ea60f6a3..49f7e22d 100755..100644
--- a/src/dpdk_lib18/librte_pmd_enic/enic_res.h
+++ b/src/dpdk22/drivers/net/enic/enic_res.h
@@ -31,7 +31,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*
*/
-#ident "$Id: enic_res.h 173137 2014-05-16 03:27:22Z sanpilla $"
#ifndef _ENIC_RES_H_
#define _ENIC_RES_H_
diff --git a/src/dpdk22/drivers/net/fm10k/base/fm10k_api.c b/src/dpdk22/drivers/net/fm10k/base/fm10k_api.c
new file mode 100644
index 00000000..eb5bdaa2
--- /dev/null
+++ b/src/dpdk22/drivers/net/fm10k/base/fm10k_api.c
@@ -0,0 +1,361 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "fm10k_api.h"
+#include "fm10k_common.h"
+
+/**
+ * fm10k_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+s32 fm10k_set_mac_type(struct fm10k_hw *hw)
+{
+ s32 ret_val = FM10K_SUCCESS;
+
+ DEBUGFUNC("fm10k_set_mac_type");
+
+ if (hw->vendor_id != FM10K_INTEL_VENDOR_ID) {
+ ERROR_REPORT2(FM10K_ERROR_UNSUPPORTED,
+ "Unsupported vendor id: %x\n", hw->vendor_id);
+ return FM10K_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ switch (hw->device_id) {
+ case FM10K_DEV_ID_PF:
+#ifdef BOULDER_RAPIDS_HW
+ case FM10K_DEV_ID_SDI_FM10420_QDA2:
+#endif /* BOULDER_RAPIDS_HW */
+#ifdef ATWOOD_CHANNEL_HW
+ case FM10K_DEV_ID_SDI_FM10420_DA2:
+#endif /* ATWOOD_CHANNEL_HW */
+ hw->mac.type = fm10k_mac_pf;
+ break;
+ case FM10K_DEV_ID_VF:
+ hw->mac.type = fm10k_mac_vf;
+ break;
+ default:
+ ret_val = FM10K_ERR_DEVICE_NOT_SUPPORTED;
+ ERROR_REPORT2(FM10K_ERROR_UNSUPPORTED,
+ "Unsupported device id: %x\n",
+ hw->device_id);
+ break;
+ }
+
+ DEBUGOUT2("fm10k_set_mac_type found mac: %d, returns: %d\n",
+ hw->mac.type, ret_val);
+
+ return ret_val;
+}
+
+/**
+ * fm10k_init_shared_code - Initialize the shared code
+ * @hw: pointer to hardware structure
+ *
+ * This will assign function pointers and assign the MAC type and PHY code.
+ * Does not touch the hardware. This function must be called prior to any
+ * other function in the shared code. The fm10k_hw structure should be
+ * memset to 0 prior to calling this function. The following fields in
+ * hw structure should be filled in prior to calling this function:
+ * hw_addr, back, device_id, vendor_id, subsystem_device_id,
+ * subsystem_vendor_id, and revision_id
+ **/
+s32 fm10k_init_shared_code(struct fm10k_hw *hw)
+{
+ s32 status;
+
+ DEBUGFUNC("fm10k_init_shared_code");
+
+ /* Set the mac type */
+ fm10k_set_mac_type(hw);
+
+ switch (hw->mac.type) {
+ case fm10k_mac_pf:
+ status = fm10k_init_ops_pf(hw);
+ break;
+ case fm10k_mac_vf:
+ status = fm10k_init_ops_vf(hw);
+ break;
+ default:
+ status = FM10K_ERR_DEVICE_NOT_SUPPORTED;
+ break;
+ }
+
+ return status;
+}
+
+#define fm10k_call_func(hw, func, params, error) \
+ ((func) ? (func params) : (error))
+
+/**
+ * fm10k_reset_hw - Reset the hardware to known good state
+ * @hw: pointer to hardware structure
+ *
+ * This function should return the hardware to a state similar to the
+ * one it is in after being powered on.
+ **/
+s32 fm10k_reset_hw(struct fm10k_hw *hw)
+{
+ return fm10k_call_func(hw, hw->mac.ops.reset_hw, (hw),
+ FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_init_hw - Initialize the hardware
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the hardware by resetting and then starting the hardware
+ **/
+s32 fm10k_init_hw(struct fm10k_hw *hw)
+{
+ return fm10k_call_func(hw, hw->mac.ops.init_hw, (hw),
+ FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_stop_hw - Prepares hardware to shutdown Rx/Tx
+ * @hw: pointer to hardware structure
+ *
+ * Disables Rx/Tx queues and disables the DMA engine.
+ **/
+s32 fm10k_stop_hw(struct fm10k_hw *hw)
+{
+ return fm10k_call_func(hw, hw->mac.ops.stop_hw, (hw),
+ FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_start_hw - Prepares hardware for Rx/Tx
+ * @hw: pointer to hardware structure
+ *
+ * This function sets the flags indicating that the hardware is ready to
+ * begin operation.
+ **/
+s32 fm10k_start_hw(struct fm10k_hw *hw)
+{
+ return fm10k_call_func(hw, hw->mac.ops.start_hw, (hw),
+ FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_get_bus_info - Set PCI bus info
+ * @hw: pointer to hardware structure
+ *
+ * Sets the PCI bus info (speed, width, type) within the fm10k_hw structure
+ **/
+s32 fm10k_get_bus_info(struct fm10k_hw *hw)
+{
+ return fm10k_call_func(hw, hw->mac.ops.get_bus_info, (hw),
+ FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_is_slot_appropriate - Indicate appropriate slot for this SKU
+ * @hw: pointer to hardware structure
+ *
+ * Looks at the PCIe bus info to confirm whether or not this slot can support
+ * the necessary bandwidth for this device.
+ **/
+bool fm10k_is_slot_appropriate(struct fm10k_hw *hw)
+{
+ if (hw->mac.ops.is_slot_appropriate)
+ return hw->mac.ops.is_slot_appropriate(hw);
+ return true;
+}
+
+/**
+ * fm10k_update_vlan - Clear VLAN ID to VLAN filter table
+ * @hw: pointer to hardware structure
+ * @vid: VLAN ID to add to table
+ * @idx: Index indicating VF ID or PF ID in table
+ * @set: Indicates if this is a set or clear operation
+ *
+ * This function adds or removes the corresponding VLAN ID from the VLAN
+ * filter table for the corresponding function.
+ **/
+s32 fm10k_update_vlan(struct fm10k_hw *hw, u32 vid, u8 idx, bool set)
+{
+ return fm10k_call_func(hw, hw->mac.ops.update_vlan, (hw, vid, idx, set),
+ FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_read_mac_addr - Reads MAC address
+ * @hw: pointer to hardware structure
+ *
+ * Reads the MAC address out of the interface and stores it in the HW
+ * structures.
+ **/
+s32 fm10k_read_mac_addr(struct fm10k_hw *hw)
+{
+ return fm10k_call_func(hw, hw->mac.ops.read_mac_addr, (hw),
+ FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_update_hw_stats - Update hw statistics
+ * @hw: pointer to hardware structure
+ *
+ * This function updates statistics that are related to hardware.
+ * */
+void fm10k_update_hw_stats(struct fm10k_hw *hw, struct fm10k_hw_stats *stats)
+{
+ if (hw->mac.ops.update_hw_stats)
+ hw->mac.ops.update_hw_stats(hw, stats);
+}
+
+/**
+ * fm10k_rebind_hw_stats - Reset base for hw statistics
+ * @hw: pointer to hardware structure
+ *
+ * This function resets the base for statistics that are related to hardware.
+ * */
+void fm10k_rebind_hw_stats(struct fm10k_hw *hw, struct fm10k_hw_stats *stats)
+{
+ if (hw->mac.ops.rebind_hw_stats)
+ hw->mac.ops.rebind_hw_stats(hw, stats);
+}
+
+/**
+ * fm10k_configure_dglort_map - Configures GLORT entry and queues
+ * @hw: pointer to hardware structure
+ * @dglort: pointer to dglort configuration structure
+ *
+ * Reads the configuration structure contained in dglort_cfg and uses
+ * that information to then populate a DGLORTMAP/DEC entry and the queues
+ * to which it has been assigned.
+ **/
+s32 fm10k_configure_dglort_map(struct fm10k_hw *hw,
+ struct fm10k_dglort_cfg *dglort)
+{
+ return fm10k_call_func(hw, hw->mac.ops.configure_dglort_map,
+ (hw, dglort), FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_set_dma_mask - Configures PhyAddrSpace to limit DMA to system
+ * @hw: pointer to hardware structure
+ * @dma_mask: 64 bit DMA mask required for platform
+ *
+ * This function configures the endpoint to limit the access to memory
+ * beyond what is physically in the system.
+ **/
+void fm10k_set_dma_mask(struct fm10k_hw *hw, u64 dma_mask)
+{
+ if (hw->mac.ops.set_dma_mask)
+ hw->mac.ops.set_dma_mask(hw, dma_mask);
+}
+
+/**
+ * fm10k_get_fault - Record a fault in one of the interface units
+ * @hw: pointer to hardware structure
+ * @type: pointer to fault type register offset
+ * @fault: pointer to memory location to record the fault
+ *
+ * Record the fault register contents to the fault data structure and
+ * clear the entry from the register.
+ *
+ * Returns ERR_PARAM if invalid register is specified or no error is present.
+ **/
+s32 fm10k_get_fault(struct fm10k_hw *hw, int type, struct fm10k_fault *fault)
+{
+ return fm10k_call_func(hw, hw->mac.ops.get_fault, (hw, type, fault),
+ FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_update_uc_addr - Update device unicast address
+ * @hw: pointer to the HW structure
+ * @lport: logical port ID to update - unused
+ * @mac: MAC address to add/remove from table
+ * @vid: VLAN ID to add/remove from table
+ * @add: Indicates if this is an add or remove operation
+ * @flags: flags field to indicate add and secure - unused
+ *
+ * This function is used to add or remove unicast MAC addresses
+ **/
+s32 fm10k_update_uc_addr(struct fm10k_hw *hw, u16 lport,
+ const u8 *mac, u16 vid, bool add, u8 flags)
+{
+ return fm10k_call_func(hw, hw->mac.ops.update_uc_addr,
+ (hw, lport, mac, vid, add, flags),
+ FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_update_mc_addr - Update device multicast address
+ * @hw: pointer to the HW structure
+ * @lport: logical port ID to update - unused
+ * @mac: MAC address to add/remove from table
+ * @vid: VLAN ID to add/remove from table
+ * @add: Indicates if this is an add or remove operation
+ *
+ * This function is used to add or remove multicast MAC addresses
+ **/
+s32 fm10k_update_mc_addr(struct fm10k_hw *hw, u16 lport,
+ const u8 *mac, u16 vid, bool add)
+{
+ return fm10k_call_func(hw, hw->mac.ops.update_mc_addr,
+ (hw, lport, mac, vid, add),
+ FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_adjust_systime - Adjust systime frequency
+ * @hw: pointer to hardware structure
+ * @ppb: adjustment rate in parts per billion
+ *
+ * This function is meant to update the frequency of the clock represented
+ * by the SYSTIME register.
+ **/
+s32 fm10k_adjust_systime(struct fm10k_hw *hw, s32 ppb)
+{
+ return fm10k_call_func(hw, hw->mac.ops.adjust_systime,
+ (hw, ppb), FM10K_NOT_IMPLEMENTED);
+}
+
+/**
+ * fm10k_notify_offset - Notify switch of change in PTP offset
+ * @hw: pointer to hardware structure
+ * @offset: 64bit unsigned offset from hardware SYSTIME value
+ *
+ * This function is meant to notify switch of change in the PTP offset for
+ * the hardware SYSTIME registers.
+ **/
+s32 fm10k_notify_offset(struct fm10k_hw *hw, u64 offset)
+{
+ return fm10k_call_func(hw, hw->mac.ops.notify_offset,
+ (hw, offset), FM10K_NOT_IMPLEMENTED);
+}
diff --git a/src/dpdk22/drivers/net/fm10k/base/fm10k_api.h b/src/dpdk22/drivers/net/fm10k/base/fm10k_api.h
new file mode 100644
index 00000000..113aef5f
--- /dev/null
+++ b/src/dpdk22/drivers/net/fm10k/base/fm10k_api.h
@@ -0,0 +1,62 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _FM10K_API_H_
+#define _FM10K_API_H_
+
+#include "fm10k_pf.h"
+#include "fm10k_vf.h"
+
+s32 fm10k_set_mac_type(struct fm10k_hw *hw);
+s32 fm10k_reset_hw(struct fm10k_hw *hw);
+s32 fm10k_init_hw(struct fm10k_hw *hw);
+s32 fm10k_stop_hw(struct fm10k_hw *hw);
+s32 fm10k_start_hw(struct fm10k_hw *hw);
+s32 fm10k_init_shared_code(struct fm10k_hw *hw);
+s32 fm10k_get_bus_info(struct fm10k_hw *hw);
+bool fm10k_is_slot_appropriate(struct fm10k_hw *hw);
+s32 fm10k_update_vlan(struct fm10k_hw *hw, u32 vid, u8 idx, bool set);
+s32 fm10k_read_mac_addr(struct fm10k_hw *hw);
+void fm10k_update_hw_stats(struct fm10k_hw *hw, struct fm10k_hw_stats *stats);
+void fm10k_rebind_hw_stats(struct fm10k_hw *hw, struct fm10k_hw_stats *stats);
+s32 fm10k_configure_dglort_map(struct fm10k_hw *hw,
+ struct fm10k_dglort_cfg *dglort);
+void fm10k_set_dma_mask(struct fm10k_hw *hw, u64 dma_mask);
+s32 fm10k_get_fault(struct fm10k_hw *hw, int type, struct fm10k_fault *fault);
+s32 fm10k_update_uc_addr(struct fm10k_hw *hw, u16 lport,
+ const u8 *mac, u16 vid, bool add, u8 flags);
+s32 fm10k_update_mc_addr(struct fm10k_hw *hw, u16 lport,
+ const u8 *mac, u16 vid, bool add);
+s32 fm10k_adjust_systime(struct fm10k_hw *hw, s32 ppb);
+s32 fm10k_notify_offset(struct fm10k_hw *hw, u64 offset);
+#endif /* _FM10K_API_H_ */
diff --git a/src/dpdk22/drivers/net/fm10k/base/fm10k_common.c b/src/dpdk22/drivers/net/fm10k/base/fm10k_common.c
new file mode 100644
index 00000000..a90d2f0b
--- /dev/null
+++ b/src/dpdk22/drivers/net/fm10k/base/fm10k_common.c
@@ -0,0 +1,572 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "fm10k_common.h"
+
+/**
+ * fm10k_get_bus_info_generic - Generic set PCI bus info
+ * @hw: pointer to hardware structure
+ *
+ * Gets the PCI bus info (speed, width, type) then calls helper function to
+ * store this data within the fm10k_hw structure.
+ **/
+STATIC s32 fm10k_get_bus_info_generic(struct fm10k_hw *hw)
+{
+ u16 link_cap, link_status, device_cap, device_control;
+
+ DEBUGFUNC("fm10k_get_bus_info_generic");
+
+ /* Get the maximum link width and speed from PCIe config space */
+ link_cap = FM10K_READ_PCI_WORD(hw, FM10K_PCIE_LINK_CAP);
+
+ switch (link_cap & FM10K_PCIE_LINK_WIDTH) {
+ case FM10K_PCIE_LINK_WIDTH_1:
+ hw->bus_caps.width = fm10k_bus_width_pcie_x1;
+ break;
+ case FM10K_PCIE_LINK_WIDTH_2:
+ hw->bus_caps.width = fm10k_bus_width_pcie_x2;
+ break;
+ case FM10K_PCIE_LINK_WIDTH_4:
+ hw->bus_caps.width = fm10k_bus_width_pcie_x4;
+ break;
+ case FM10K_PCIE_LINK_WIDTH_8:
+ hw->bus_caps.width = fm10k_bus_width_pcie_x8;
+ break;
+ default:
+ hw->bus_caps.width = fm10k_bus_width_unknown;
+ break;
+ }
+
+ switch (link_cap & FM10K_PCIE_LINK_SPEED) {
+ case FM10K_PCIE_LINK_SPEED_2500:
+ hw->bus_caps.speed = fm10k_bus_speed_2500;
+ break;
+ case FM10K_PCIE_LINK_SPEED_5000:
+ hw->bus_caps.speed = fm10k_bus_speed_5000;
+ break;
+ case FM10K_PCIE_LINK_SPEED_8000:
+ hw->bus_caps.speed = fm10k_bus_speed_8000;
+ break;
+ default:
+ hw->bus_caps.speed = fm10k_bus_speed_unknown;
+ break;
+ }
+
+ /* Get the PCIe maximum payload size for the PCIe function */
+ device_cap = FM10K_READ_PCI_WORD(hw, FM10K_PCIE_DEV_CAP);
+
+ switch (device_cap & FM10K_PCIE_DEV_CAP_PAYLOAD) {
+ case FM10K_PCIE_DEV_CAP_PAYLOAD_128:
+ hw->bus_caps.payload = fm10k_bus_payload_128;
+ break;
+ case FM10K_PCIE_DEV_CAP_PAYLOAD_256:
+ hw->bus_caps.payload = fm10k_bus_payload_256;
+ break;
+ case FM10K_PCIE_DEV_CAP_PAYLOAD_512:
+ hw->bus_caps.payload = fm10k_bus_payload_512;
+ break;
+ default:
+ hw->bus_caps.payload = fm10k_bus_payload_unknown;
+ break;
+ }
+
+ /* Get the negotiated link width and speed from PCIe config space */
+ link_status = FM10K_READ_PCI_WORD(hw, FM10K_PCIE_LINK_STATUS);
+
+ switch (link_status & FM10K_PCIE_LINK_WIDTH) {
+ case FM10K_PCIE_LINK_WIDTH_1:
+ hw->bus.width = fm10k_bus_width_pcie_x1;
+ break;
+ case FM10K_PCIE_LINK_WIDTH_2:
+ hw->bus.width = fm10k_bus_width_pcie_x2;
+ break;
+ case FM10K_PCIE_LINK_WIDTH_4:
+ hw->bus.width = fm10k_bus_width_pcie_x4;
+ break;
+ case FM10K_PCIE_LINK_WIDTH_8:
+ hw->bus.width = fm10k_bus_width_pcie_x8;
+ break;
+ default:
+ hw->bus.width = fm10k_bus_width_unknown;
+ break;
+ }
+
+ switch (link_status & FM10K_PCIE_LINK_SPEED) {
+ case FM10K_PCIE_LINK_SPEED_2500:
+ hw->bus.speed = fm10k_bus_speed_2500;
+ break;
+ case FM10K_PCIE_LINK_SPEED_5000:
+ hw->bus.speed = fm10k_bus_speed_5000;
+ break;
+ case FM10K_PCIE_LINK_SPEED_8000:
+ hw->bus.speed = fm10k_bus_speed_8000;
+ break;
+ default:
+ hw->bus.speed = fm10k_bus_speed_unknown;
+ break;
+ }
+
+ /* Get the negotiated PCIe maximum payload size for the PCIe function */
+ device_control = FM10K_READ_PCI_WORD(hw, FM10K_PCIE_DEV_CTRL);
+
+ switch (device_control & FM10K_PCIE_DEV_CTRL_PAYLOAD) {
+ case FM10K_PCIE_DEV_CTRL_PAYLOAD_128:
+ hw->bus.payload = fm10k_bus_payload_128;
+ break;
+ case FM10K_PCIE_DEV_CTRL_PAYLOAD_256:
+ hw->bus.payload = fm10k_bus_payload_256;
+ break;
+ case FM10K_PCIE_DEV_CTRL_PAYLOAD_512:
+ hw->bus.payload = fm10k_bus_payload_512;
+ break;
+ default:
+ hw->bus.payload = fm10k_bus_payload_unknown;
+ break;
+ }
+
+ return FM10K_SUCCESS;
+}
+
+u16 fm10k_get_pcie_msix_count_generic(struct fm10k_hw *hw)
+{
+ u16 msix_count;
+
+ DEBUGFUNC("fm10k_get_pcie_msix_count_generic");
+
+ /* read in value from MSI-X capability register */
+ msix_count = FM10K_READ_PCI_WORD(hw, FM10K_PCI_MSIX_MSG_CTRL);
+ msix_count &= FM10K_PCI_MSIX_MSG_CTRL_TBL_SZ_MASK;
+
+ /* MSI-X count is zero-based in HW */
+ msix_count++;
+
+ if (msix_count > FM10K_MAX_MSIX_VECTORS)
+ msix_count = FM10K_MAX_MSIX_VECTORS;
+
+ return msix_count;
+}
+
+/**
+ * fm10k_init_ops_generic - Inits function ptrs
+ * @hw: pointer to the hardware structure
+ *
+ * Initialize the function pointers.
+ **/
+s32 fm10k_init_ops_generic(struct fm10k_hw *hw)
+{
+ struct fm10k_mac_info *mac = &hw->mac;
+
+ DEBUGFUNC("fm10k_init_ops_generic");
+
+ /* MAC */
+ mac->ops.get_bus_info = &fm10k_get_bus_info_generic;
+
+ /* initialize GLORT state to avoid any false hits */
+ mac->dglort_map = FM10K_DGLORTMAP_NONE;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_start_hw_generic - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * This function sets the Tx ready flag to indicate that the Tx path has
+ * been initialized.
+ **/
+s32 fm10k_start_hw_generic(struct fm10k_hw *hw)
+{
+ DEBUGFUNC("fm10k_start_hw_generic");
+
+ /* set flag indicating we are beginning Tx */
+ hw->mac.tx_ready = true;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_disable_queues_generic - Stop Tx/Rx queues
+ * @hw: pointer to hardware structure
+ * @q_cnt: number of queues to be disabled
+ *
+ **/
+s32 fm10k_disable_queues_generic(struct fm10k_hw *hw, u16 q_cnt)
+{
+ u32 reg;
+ u16 i, time;
+
+ DEBUGFUNC("fm10k_disable_queues_generic");
+
+ /* clear tx_ready to prevent any false hits for reset */
+ hw->mac.tx_ready = false;
+
+ /* clear the enable bit for all rings */
+ for (i = 0; i < q_cnt; i++) {
+ reg = FM10K_READ_REG(hw, FM10K_TXDCTL(i));
+ FM10K_WRITE_REG(hw, FM10K_TXDCTL(i),
+ reg & ~FM10K_TXDCTL_ENABLE);
+ reg = FM10K_READ_REG(hw, FM10K_RXQCTL(i));
+ FM10K_WRITE_REG(hw, FM10K_RXQCTL(i),
+ reg & ~FM10K_RXQCTL_ENABLE);
+ }
+
+ FM10K_WRITE_FLUSH(hw);
+ usec_delay(1);
+
+ /* loop through all queues to verify that they are all disabled */
+ for (i = 0, time = FM10K_QUEUE_DISABLE_TIMEOUT; time;) {
+ /* if we are at end of rings all rings are disabled */
+ if (i == q_cnt)
+ return FM10K_SUCCESS;
+
+ /* if queue enables cleared, then move to next ring pair */
+ reg = FM10K_READ_REG(hw, FM10K_TXDCTL(i));
+ if (!~reg || !(reg & FM10K_TXDCTL_ENABLE)) {
+ reg = FM10K_READ_REG(hw, FM10K_RXQCTL(i));
+ if (!~reg || !(reg & FM10K_RXQCTL_ENABLE)) {
+ i++;
+ continue;
+ }
+ }
+
+ /* decrement time and wait 1 usec */
+ time--;
+ if (time)
+ usec_delay(1);
+ }
+
+ return FM10K_ERR_REQUESTS_PENDING;
+}
+
+/**
+ * fm10k_stop_hw_generic - Stop Tx/Rx units
+ * @hw: pointer to hardware structure
+ *
+ **/
+s32 fm10k_stop_hw_generic(struct fm10k_hw *hw)
+{
+ DEBUGFUNC("fm10k_stop_hw_generic");
+
+ return fm10k_disable_queues_generic(hw, hw->mac.max_queues);
+}
+
+/**
+ * fm10k_read_hw_stats_32b - Reads value of 32-bit registers
+ * @hw: pointer to the hardware structure
+ * @addr: address of register containing a 32-bit value
+ *
+ * Function reads the content of the register and returns the delta
+ * between the base and the current value.
+ * **/
+u32 fm10k_read_hw_stats_32b(struct fm10k_hw *hw, u32 addr,
+ struct fm10k_hw_stat *stat)
+{
+ u32 delta = FM10K_READ_REG(hw, addr) - stat->base_l;
+
+ DEBUGFUNC("fm10k_read_hw_stats_32b");
+
+ if (FM10K_REMOVED(hw->hw_addr))
+ stat->base_h = 0;
+
+ return delta;
+}
+
+/**
+ * fm10k_read_hw_stats_48b - Reads value of 48-bit registers
+ * @hw: pointer to the hardware structure
+ * @addr: address of register containing the lower 32-bit value
+ *
+ * Function reads the content of 2 registers, combined to represent a 48-bit
+ * statistical value. Extra processing is required to handle overflowing.
+ * Finally, a delta value is returned representing the difference between the
+ * values stored in registers and values stored in the statistic counters.
+ * **/
+STATIC u64 fm10k_read_hw_stats_48b(struct fm10k_hw *hw, u32 addr,
+ struct fm10k_hw_stat *stat)
+{
+ u32 count_l;
+ u32 count_h;
+ u32 count_tmp;
+ u64 delta;
+
+ DEBUGFUNC("fm10k_read_hw_stats_48b");
+
+ count_h = FM10K_READ_REG(hw, addr + 1);
+
+ /* Check for overflow */
+ do {
+ count_tmp = count_h;
+ count_l = FM10K_READ_REG(hw, addr);
+ count_h = FM10K_READ_REG(hw, addr + 1);
+ } while (count_h != count_tmp);
+
+ delta = ((u64)(count_h - stat->base_h) << 32) + count_l;
+ delta -= stat->base_l;
+
+ return delta & FM10K_48_BIT_MASK;
+}
+
+/**
+ * fm10k_update_hw_base_48b - Updates 48-bit statistic base value
+ * @stat: pointer to the hardware statistic structure
+ * @delta: value to be updated into the hardware statistic structure
+ *
+ * Function receives a value and determines if an update is required based on
+ * a delta calculation. Only the base value will be updated.
+ **/
+STATIC void fm10k_update_hw_base_48b(struct fm10k_hw_stat *stat, u64 delta)
+{
+ DEBUGFUNC("fm10k_update_hw_base_48b");
+
+ if (!delta)
+ return;
+
+ /* update lower 32 bits */
+ delta += stat->base_l;
+ stat->base_l = (u32)delta;
+
+ /* update upper 32 bits */
+ stat->base_h += (u32)(delta >> 32);
+}
+
+/**
+ * fm10k_update_hw_stats_tx_q - Updates TX queue statistics counters
+ * @hw: pointer to the hardware structure
+ * @q: pointer to the ring of hardware statistics queue
+ * @idx: index pointing to the start of the ring iteration
+ *
+ * Function updates the TX queue statistics counters that are related to the
+ * hardware.
+ **/
+STATIC void fm10k_update_hw_stats_tx_q(struct fm10k_hw *hw,
+ struct fm10k_hw_stats_q *q,
+ u32 idx)
+{
+ u32 id_tx, id_tx_prev, tx_packets;
+ u64 tx_bytes = 0;
+
+ DEBUGFUNC("fm10k_update_hw_stats_tx_q");
+
+ /* Retrieve TX Owner Data */
+ id_tx = FM10K_READ_REG(hw, FM10K_TXQCTL(idx));
+
+ /* Process TX Ring */
+ do {
+ tx_packets = fm10k_read_hw_stats_32b(hw, FM10K_QPTC(idx),
+ &q->tx_packets);
+
+ if (tx_packets)
+ tx_bytes = fm10k_read_hw_stats_48b(hw,
+ FM10K_QBTC_L(idx),
+ &q->tx_bytes);
+
+ /* Re-Check Owner Data */
+ id_tx_prev = id_tx;
+ id_tx = FM10K_READ_REG(hw, FM10K_TXQCTL(idx));
+ } while ((id_tx ^ id_tx_prev) & FM10K_TXQCTL_ID_MASK);
+
+ /* drop non-ID bits and set VALID ID bit */
+ id_tx &= FM10K_TXQCTL_ID_MASK;
+ id_tx |= FM10K_STAT_VALID;
+
+ /* update packet counts */
+ if (q->tx_stats_idx == id_tx) {
+ q->tx_packets.count += tx_packets;
+ q->tx_bytes.count += tx_bytes;
+ }
+
+ /* update bases and record ID */
+ fm10k_update_hw_base_32b(&q->tx_packets, tx_packets);
+ fm10k_update_hw_base_48b(&q->tx_bytes, tx_bytes);
+
+ q->tx_stats_idx = id_tx;
+}
+
+/**
+ * fm10k_update_hw_stats_rx_q - Updates RX queue statistics counters
+ * @hw: pointer to the hardware structure
+ * @q: pointer to the ring of hardware statistics queue
+ * @idx: index pointing to the start of the ring iteration
+ *
+ * Function updates the RX queue statistics counters that are related to the
+ * hardware.
+ **/
+STATIC void fm10k_update_hw_stats_rx_q(struct fm10k_hw *hw,
+ struct fm10k_hw_stats_q *q,
+ u32 idx)
+{
+ u32 id_rx, id_rx_prev, rx_packets, rx_drops;
+ u64 rx_bytes = 0;
+
+ DEBUGFUNC("fm10k_update_hw_stats_rx_q");
+
+ /* Retrieve RX Owner Data */
+ id_rx = FM10K_READ_REG(hw, FM10K_RXQCTL(idx));
+
+ /* Process RX Ring */
+ do {
+ rx_drops = fm10k_read_hw_stats_32b(hw, FM10K_QPRDC(idx),
+ &q->rx_drops);
+
+ rx_packets = fm10k_read_hw_stats_32b(hw, FM10K_QPRC(idx),
+ &q->rx_packets);
+
+ if (rx_packets)
+ rx_bytes = fm10k_read_hw_stats_48b(hw,
+ FM10K_QBRC_L(idx),
+ &q->rx_bytes);
+
+ /* Re-Check Owner Data */
+ id_rx_prev = id_rx;
+ id_rx = FM10K_READ_REG(hw, FM10K_RXQCTL(idx));
+ } while ((id_rx ^ id_rx_prev) & FM10K_RXQCTL_ID_MASK);
+
+ /* drop non-ID bits and set VALID ID bit */
+ id_rx &= FM10K_RXQCTL_ID_MASK;
+ id_rx |= FM10K_STAT_VALID;
+
+ /* update packet counts */
+ if (q->rx_stats_idx == id_rx) {
+ q->rx_drops.count += rx_drops;
+ q->rx_packets.count += rx_packets;
+ q->rx_bytes.count += rx_bytes;
+ }
+
+ /* update bases and record ID */
+ fm10k_update_hw_base_32b(&q->rx_drops, rx_drops);
+ fm10k_update_hw_base_32b(&q->rx_packets, rx_packets);
+ fm10k_update_hw_base_48b(&q->rx_bytes, rx_bytes);
+
+ q->rx_stats_idx = id_rx;
+}
+
+/**
+ * fm10k_update_hw_stats_q - Updates queue statistics counters
+ * @hw: pointer to the hardware structure
+ * @q: pointer to the ring of hardware statistics queue
+ * @idx: index pointing to the start of the ring iteration
+ * @count: number of queues to iterate over
+ *
+ * Function updates the queue statistics counters that are related to the
+ * hardware.
+ **/
+void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
+ u32 idx, u32 count)
+{
+ u32 i;
+
+ DEBUGFUNC("fm10k_update_hw_stats_q");
+
+ for (i = 0; i < count; i++, idx++, q++) {
+ fm10k_update_hw_stats_tx_q(hw, q, idx);
+ fm10k_update_hw_stats_rx_q(hw, q, idx);
+ }
+}
+
+/**
+ * fm10k_unbind_hw_stats_q - Unbind the queue counters from their queues
+ * @hw: pointer to the hardware structure
+ * @q: pointer to the ring of hardware statistics queue
+ * @idx: index pointing to the start of the ring iteration
+ * @count: number of queues to iterate over
+ *
+ * Function invalidates the index values for the queues so any updates that
+ * may have happened are ignored and the base for the queue stats is reset.
+ **/
+void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count)
+{
+ u32 i;
+
+ for (i = 0; i < count; i++, idx++, q++) {
+ q->rx_stats_idx = 0;
+ q->tx_stats_idx = 0;
+ }
+}
+
+/**
+ * fm10k_get_host_state_generic - Returns the state of the host
+ * @hw: pointer to hardware structure
+ * @host_ready: pointer to boolean value that will record host state
+ *
+ * This function will check the health of the mailbox and Tx queue 0
+ * in order to determine if we should report that the link is up or not.
+ **/
+s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ struct fm10k_mac_info *mac = &hw->mac;
+ s32 ret_val = FM10K_SUCCESS;
+ u32 txdctl = FM10K_READ_REG(hw, FM10K_TXDCTL(0));
+
+ DEBUGFUNC("fm10k_get_host_state_generic");
+
+ /* process upstream mailbox in case interrupts were disabled */
+ mbx->ops.process(hw, mbx);
+
+ /* If Tx is no longer enabled link should come down */
+ if (!(~txdctl) || !(txdctl & FM10K_TXDCTL_ENABLE))
+ mac->get_host_state = true;
+
+ /* exit if not checking for link, or link cannot be changed */
+ if (!mac->get_host_state || !(~txdctl))
+ goto out;
+
+ /* if we somehow dropped the Tx enable we should reset */
+ if (hw->mac.tx_ready && !(txdctl & FM10K_TXDCTL_ENABLE)) {
+ ret_val = FM10K_ERR_RESET_REQUESTED;
+ goto out;
+ }
+
+ /* if Mailbox timed out we should request reset */
+ if (!mbx->timeout) {
+ ret_val = FM10K_ERR_RESET_REQUESTED;
+ goto out;
+ }
+
+ /* verify Mailbox is still valid */
+ if (!mbx->ops.tx_ready(mbx, FM10K_VFMBX_MSG_MTU))
+ goto out;
+
+ /* interface cannot receive traffic without logical ports */
+ if (mac->dglort_map == FM10K_DGLORTMAP_NONE)
+ goto out;
+
+ /* if we passed all the tests above then the switch is ready and we no
+ * longer need to check for link
+ */
+ mac->get_host_state = false;
+
+out:
+ *host_ready = !mac->get_host_state;
+ return ret_val;
+}
diff --git a/src/dpdk22/drivers/net/fm10k/base/fm10k_common.h b/src/dpdk22/drivers/net/fm10k/base/fm10k_common.h
new file mode 100644
index 00000000..45fbbc0b
--- /dev/null
+++ b/src/dpdk22/drivers/net/fm10k/base/fm10k_common.h
@@ -0,0 +1,52 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _FM10K_COMMON_H_
+#define _FM10K_COMMON_H_
+
+#include "fm10k_type.h"
+
+u16 fm10k_get_pcie_msix_count_generic(struct fm10k_hw *hw);
+s32 fm10k_init_ops_generic(struct fm10k_hw *hw);
+s32 fm10k_disable_queues_generic(struct fm10k_hw *hw, u16 q_cnt);
+s32 fm10k_start_hw_generic(struct fm10k_hw *hw);
+s32 fm10k_stop_hw_generic(struct fm10k_hw *hw);
+u32 fm10k_read_hw_stats_32b(struct fm10k_hw *hw, u32 addr,
+ struct fm10k_hw_stat *stat);
+#define fm10k_update_hw_base_32b(stat, delta) ((stat)->base_l += (delta))
+void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
+ u32 idx, u32 count);
+#define fm10k_unbind_hw_stats_32b(s) ((s)->base_h = 0)
+void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count);
+s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready);
+#endif /* _FM10K_COMMON_H_ */
diff --git a/src/dpdk22/drivers/net/fm10k/base/fm10k_mbx.c b/src/dpdk22/drivers/net/fm10k/base/fm10k_mbx.c
new file mode 100644
index 00000000..3c9ab3a5
--- /dev/null
+++ b/src/dpdk22/drivers/net/fm10k/base/fm10k_mbx.c
@@ -0,0 +1,2239 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "fm10k_common.h"
+
+/**
+ * fm10k_fifo_init - Initialize a message FIFO
+ * @fifo: pointer to FIFO
+ * @buffer: pointer to memory to be used to store FIFO
+ * @size: maximum message size to store in FIFO, must be 2^n - 1
+ **/
+STATIC void fm10k_fifo_init(struct fm10k_mbx_fifo *fifo, u32 *buffer, u16 size)
+{
+ fifo->buffer = buffer;
+ fifo->size = size;
+ fifo->head = 0;
+ fifo->tail = 0;
+}
+
+/**
+ * fm10k_fifo_used - Retrieve used space in FIFO
+ * @fifo: pointer to FIFO
+ *
+ * This function returns the number of DWORDs used in the FIFO
+ **/
+STATIC u16 fm10k_fifo_used(struct fm10k_mbx_fifo *fifo)
+{
+ return fifo->tail - fifo->head;
+}
+
+/**
+ * fm10k_fifo_unused - Retrieve unused space in FIFO
+ * @fifo: pointer to FIFO
+ *
+ * This function returns the number of unused DWORDs in the FIFO
+ **/
+STATIC u16 fm10k_fifo_unused(struct fm10k_mbx_fifo *fifo)
+{
+ return fifo->size + fifo->head - fifo->tail;
+}
+
+/**
+ * fm10k_fifo_empty - Test to verify if fifo is empty
+ * @fifo: pointer to FIFO
+ *
+ * This function returns true if the FIFO is empty, else false
+ **/
+STATIC bool fm10k_fifo_empty(struct fm10k_mbx_fifo *fifo)
+{
+ return fifo->head == fifo->tail;
+}
+
+/**
+ * fm10k_fifo_head_offset - returns indices of head with given offset
+ * @fifo: pointer to FIFO
+ * @offset: offset to add to head
+ *
+ * This function returns the indices into the fifo based on head + offset
+ **/
+STATIC u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset)
+{
+ return (fifo->head + offset) & (fifo->size - 1);
+}
+
+/**
+ * fm10k_fifo_tail_offset - returns indices of tail with given offset
+ * @fifo: pointer to FIFO
+ * @offset: offset to add to tail
+ *
+ * This function returns the indices into the fifo based on tail + offset
+ **/
+STATIC u16 fm10k_fifo_tail_offset(struct fm10k_mbx_fifo *fifo, u16 offset)
+{
+ return (fifo->tail + offset) & (fifo->size - 1);
+}
+
+/**
+ * fm10k_fifo_head_len - Retrieve length of first message in FIFO
+ * @fifo: pointer to FIFO
+ *
+ * This function returns the size of the first message in the FIFO
+ **/
+STATIC u16 fm10k_fifo_head_len(struct fm10k_mbx_fifo *fifo)
+{
+ u32 *head = fifo->buffer + fm10k_fifo_head_offset(fifo, 0);
+
+ /* verify there is at least 1 DWORD in the fifo so *head is valid */
+ if (fm10k_fifo_empty(fifo))
+ return 0;
+
+ /* retieve the message length */
+ return FM10K_TLV_DWORD_LEN(*head);
+}
+
+/**
+ * fm10k_fifo_head_drop - Drop the first message in FIFO
+ * @fifo: pointer to FIFO
+ *
+ * This function returns the size of the message dropped from the FIFO
+ **/
+STATIC u16 fm10k_fifo_head_drop(struct fm10k_mbx_fifo *fifo)
+{
+ u16 len = fm10k_fifo_head_len(fifo);
+
+ /* update head so it is at the start of next frame */
+ fifo->head += len;
+
+ return len;
+}
+
+/**
+ * fm10k_fifo_drop_all - Drop all messages in FIFO
+ * @fifo: pointer to FIFO
+ *
+ * This function resets the head pointer to drop all messages in the FIFO and
+ * ensure the FIFO is empty.
+ **/
+STATIC void fm10k_fifo_drop_all(struct fm10k_mbx_fifo *fifo)
+{
+ fifo->head = fifo->tail;
+}
+
+/**
+ * fm10k_mbx_index_len - Convert a head/tail index into a length value
+ * @mbx: pointer to mailbox
+ * @head: head index
+ * @tail: head index
+ *
+ * This function takes the head and tail index and determines the length
+ * of the data indicated by this pair.
+ **/
+STATIC u16 fm10k_mbx_index_len(struct fm10k_mbx_info *mbx, u16 head, u16 tail)
+{
+ u16 len = tail - head;
+
+ /* we wrapped so subtract 2, one for index 0, one for all 1s index */
+ if (len > tail)
+ len -= 2;
+
+ return len & ((mbx->mbmem_len << 1) - 1);
+}
+
+/**
+ * fm10k_mbx_tail_add - Determine new tail value with added offset
+ * @mbx: pointer to mailbox
+ * @offset: length to add to head offset
+ *
+ * This function takes the local tail index and recomputes it for
+ * a given length added as an offset.
+ **/
+STATIC u16 fm10k_mbx_tail_add(struct fm10k_mbx_info *mbx, u16 offset)
+{
+ u16 tail = (mbx->tail + offset + 1) & ((mbx->mbmem_len << 1) - 1);
+
+ /* add/sub 1 because we cannot have offset 0 or all 1s */
+ return (tail > mbx->tail) ? --tail : ++tail;
+}
+
+/**
+ * fm10k_mbx_tail_sub - Determine new tail value with subtracted offset
+ * @mbx: pointer to mailbox
+ * @offset: length to add to head offset
+ *
+ * This function takes the local tail index and recomputes it for
+ * a given length added as an offset.
+ **/
+STATIC u16 fm10k_mbx_tail_sub(struct fm10k_mbx_info *mbx, u16 offset)
+{
+ u16 tail = (mbx->tail - offset - 1) & ((mbx->mbmem_len << 1) - 1);
+
+ /* sub/add 1 because we cannot have offset 0 or all 1s */
+ return (tail < mbx->tail) ? ++tail : --tail;
+}
+
+/**
+ * fm10k_mbx_head_add - Determine new head value with added offset
+ * @mbx: pointer to mailbox
+ * @offset: length to add to head offset
+ *
+ * This function takes the local head index and recomputes it for
+ * a given length added as an offset.
+ **/
+STATIC u16 fm10k_mbx_head_add(struct fm10k_mbx_info *mbx, u16 offset)
+{
+ u16 head = (mbx->head + offset + 1) & ((mbx->mbmem_len << 1) - 1);
+
+ /* add/sub 1 because we cannot have offset 0 or all 1s */
+ return (head > mbx->head) ? --head : ++head;
+}
+
+/**
+ * fm10k_mbx_head_sub - Determine new head value with subtracted offset
+ * @mbx: pointer to mailbox
+ * @offset: length to add to head offset
+ *
+ * This function takes the local head index and recomputes it for
+ * a given length added as an offset.
+ **/
+STATIC u16 fm10k_mbx_head_sub(struct fm10k_mbx_info *mbx, u16 offset)
+{
+ u16 head = (mbx->head - offset - 1) & ((mbx->mbmem_len << 1) - 1);
+
+ /* sub/add 1 because we cannot have offset 0 or all 1s */
+ return (head < mbx->head) ? ++head : --head;
+}
+
+/**
+ * fm10k_mbx_pushed_tail_len - Retrieve the length of message being pushed
+ * @mbx: pointer to mailbox
+ *
+ * This function will return the length of the message currently being
+ * pushed onto the tail of the Rx queue.
+ **/
+STATIC u16 fm10k_mbx_pushed_tail_len(struct fm10k_mbx_info *mbx)
+{
+ u32 *tail = mbx->rx.buffer + fm10k_fifo_tail_offset(&mbx->rx, 0);
+
+ /* pushed tail is only valid if pushed is set */
+ if (!mbx->pushed)
+ return 0;
+
+ return FM10K_TLV_DWORD_LEN(*tail);
+}
+
+/**
+ * fm10k_fifo_write_copy - pulls data off of msg and places it in fifo
+ * @fifo: pointer to FIFO
+ * @msg: message array to populate
+ * @tail_offset: additional offset to add to tail pointer
+ * @len: length of FIFO to copy into message header
+ *
+ * This function will take a message and copy it into a section of the
+ * FIFO. In order to get something into a location other than just
+ * the tail you can use tail_offset to adjust the pointer.
+ **/
+STATIC void fm10k_fifo_write_copy(struct fm10k_mbx_fifo *fifo,
+ const u32 *msg, u16 tail_offset, u16 len)
+{
+ u16 end = fm10k_fifo_tail_offset(fifo, tail_offset);
+ u32 *tail = fifo->buffer + end;
+
+ /* track when we should cross the end of the FIFO */
+ end = fifo->size - end;
+
+ /* copy end of message before start of message */
+ if (end < len)
+ memcpy(fifo->buffer, msg + end, (len - end) << 2);
+ else
+ end = len;
+
+ /* Copy remaining message into Tx FIFO */
+ memcpy(tail, msg, end << 2);
+}
+
+/**
+ * fm10k_fifo_enqueue - Enqueues the message to the tail of the FIFO
+ * @fifo: pointer to FIFO
+ * @msg: message array to read
+ *
+ * This function enqueues a message up to the size specified by the length
+ * contained in the first DWORD of the message and will place at the tail
+ * of the FIFO. It will return 0 on success, or a negative value on error.
+ **/
+STATIC s32 fm10k_fifo_enqueue(struct fm10k_mbx_fifo *fifo, const u32 *msg)
+{
+ u16 len = FM10K_TLV_DWORD_LEN(*msg);
+
+ DEBUGFUNC("fm10k_fifo_enqueue");
+
+ /* verify parameters */
+ if (len > fifo->size)
+ return FM10K_MBX_ERR_SIZE;
+
+ /* verify there is room for the message */
+ if (len > fm10k_fifo_unused(fifo))
+ return FM10K_MBX_ERR_NO_SPACE;
+
+ /* Copy message into FIFO */
+ fm10k_fifo_write_copy(fifo, msg, 0, len);
+
+ /* memory barrier to guarantee FIFO is written before tail update */
+ FM10K_WMB();
+
+ /* Update Tx FIFO tail */
+ fifo->tail += len;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_mbx_validate_msg_size - Validate incoming message based on size
+ * @mbx: pointer to mailbox
+ * @len: length of data pushed onto buffer
+ *
+ * This function analyzes the frame and will return a non-zero value when
+ * the start of a message larger than the mailbox is detected.
+ **/
+STATIC u16 fm10k_mbx_validate_msg_size(struct fm10k_mbx_info *mbx, u16 len)
+{
+ struct fm10k_mbx_fifo *fifo = &mbx->rx;
+ u16 total_len = 0, msg_len;
+ u32 *msg;
+
+ DEBUGFUNC("fm10k_mbx_validate_msg");
+
+ /* length should include previous amounts pushed */
+ len += mbx->pushed;
+
+ /* offset in message is based off of current message size */
+ do {
+ msg = fifo->buffer + fm10k_fifo_tail_offset(fifo, total_len);
+ msg_len = FM10K_TLV_DWORD_LEN(*msg);
+ total_len += msg_len;
+ } while (total_len < len);
+
+ /* message extends out of pushed section, but fits in FIFO */
+ if ((len < total_len) && (msg_len <= mbx->max_size))
+ return 0;
+
+ /* return length of invalid section */
+ return (len < total_len) ? len : (len - total_len);
+}
+
+/**
+ * fm10k_mbx_write_copy - pulls data off of Tx FIFO and places it in mbmem
+ * @mbx: pointer to mailbox
+ *
+ * This function will take a section of the Tx FIFO and copy it into the
+ * mailbox memory. The offset in mbmem is based on the lower bits of the
+ * tail and len determines the length to copy.
+ **/
+STATIC void fm10k_mbx_write_copy(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ struct fm10k_mbx_fifo *fifo = &mbx->tx;
+ u32 mbmem = mbx->mbmem_reg;
+ u32 *head = fifo->buffer;
+ u16 end, len, tail, mask;
+
+ DEBUGFUNC("fm10k_mbx_write_copy");
+
+ if (!mbx->tail_len)
+ return;
+
+ /* determine data length and mbmem tail index */
+ mask = mbx->mbmem_len - 1;
+ len = mbx->tail_len;
+ tail = fm10k_mbx_tail_sub(mbx, len);
+ if (tail > mask)
+ tail++;
+
+ /* determine offset in the ring */
+ end = fm10k_fifo_head_offset(fifo, mbx->pulled);
+ head += end;
+
+ /* memory barrier to guarantee data is ready to be read */
+ FM10K_RMB();
+
+ /* Copy message from Tx FIFO */
+ for (end = fifo->size - end; len; head = fifo->buffer) {
+ do {
+ /* adjust tail to match offset for FIFO */
+ tail &= mask;
+ if (!tail)
+ tail++;
+
+ mbx->tx_mbmem_pulled++;
+
+ /* write message to hardware FIFO */
+ FM10K_WRITE_MBX(hw, mbmem + tail++, *(head++));
+ } while (--len && --end);
+ }
+}
+
+/**
+ * fm10k_mbx_pull_head - Pulls data off of head of Tx FIFO
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ * @head: acknowledgement number last received
+ *
+ * This function will push the tail index forward based on the remote
+ * head index. It will then pull up to mbmem_len DWORDs off of the
+ * head of the FIFO and will place it in the MBMEM registers
+ * associated with the mailbox.
+ **/
+STATIC void fm10k_mbx_pull_head(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx, u16 head)
+{
+ u16 mbmem_len, len, ack = fm10k_mbx_index_len(mbx, head, mbx->tail);
+ struct fm10k_mbx_fifo *fifo = &mbx->tx;
+
+ /* update number of bytes pulled and update bytes in transit */
+ mbx->pulled += mbx->tail_len - ack;
+
+ /* determine length of data to pull, reserve space for mbmem header */
+ mbmem_len = mbx->mbmem_len - 1;
+ len = fm10k_fifo_used(fifo) - mbx->pulled;
+ if (len > mbmem_len)
+ len = mbmem_len;
+
+ /* update tail and record number of bytes in transit */
+ mbx->tail = fm10k_mbx_tail_add(mbx, len - ack);
+ mbx->tail_len = len;
+
+ /* drop pulled messages from the FIFO */
+ for (len = fm10k_fifo_head_len(fifo);
+ len && (mbx->pulled >= len);
+ len = fm10k_fifo_head_len(fifo)) {
+ mbx->pulled -= fm10k_fifo_head_drop(fifo);
+ mbx->tx_messages++;
+ mbx->tx_dwords += len;
+ }
+
+ /* Copy message out from the Tx FIFO */
+ fm10k_mbx_write_copy(hw, mbx);
+}
+
+/**
+ * fm10k_mbx_read_copy - pulls data off of mbmem and places it in Rx FIFO
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will take a section of the mailbox memory and copy it
+ * into the Rx FIFO. The offset is based on the lower bits of the
+ * head and len determines the length to copy.
+ **/
+STATIC void fm10k_mbx_read_copy(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ struct fm10k_mbx_fifo *fifo = &mbx->rx;
+ u32 mbmem = mbx->mbmem_reg ^ mbx->mbmem_len;
+ u32 *tail = fifo->buffer;
+ u16 end, len, head;
+
+ DEBUGFUNC("fm10k_mbx_read_copy");
+
+ /* determine data length and mbmem head index */
+ len = mbx->head_len;
+ head = fm10k_mbx_head_sub(mbx, len);
+ if (head >= mbx->mbmem_len)
+ head++;
+
+ /* determine offset in the ring */
+ end = fm10k_fifo_tail_offset(fifo, mbx->pushed);
+ tail += end;
+
+ /* Copy message into Rx FIFO */
+ for (end = fifo->size - end; len; tail = fifo->buffer) {
+ do {
+ /* adjust head to match offset for FIFO */
+ head &= mbx->mbmem_len - 1;
+ if (!head)
+ head++;
+
+ mbx->rx_mbmem_pushed++;
+
+ /* read message from hardware FIFO */
+ *(tail++) = FM10K_READ_MBX(hw, mbmem + head++);
+ } while (--len && --end);
+ }
+
+ /* memory barrier to guarantee FIFO is written before tail update */
+ FM10K_WMB();
+}
+
+/**
+ * fm10k_mbx_push_tail - Pushes up to 15 DWORDs on to tail of FIFO
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ * @tail: tail index of message
+ *
+ * This function will first validate the tail index and size for the
+ * incoming message. It then updates the acknowledgment number and
+ * copies the data into the FIFO. It will return the number of messages
+ * dequeued on success and a negative value on error.
+ **/
+STATIC s32 fm10k_mbx_push_tail(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx,
+ u16 tail)
+{
+ struct fm10k_mbx_fifo *fifo = &mbx->rx;
+ u16 len, seq = fm10k_mbx_index_len(mbx, mbx->head, tail);
+
+ DEBUGFUNC("fm10k_mbx_push_tail");
+
+ /* determine length of data to push */
+ len = fm10k_fifo_unused(fifo) - mbx->pushed;
+ if (len > seq)
+ len = seq;
+
+ /* update head and record bytes received */
+ mbx->head = fm10k_mbx_head_add(mbx, len);
+ mbx->head_len = len;
+
+ /* nothing to do if there is no data */
+ if (!len)
+ return FM10K_SUCCESS;
+
+ /* Copy msg into Rx FIFO */
+ fm10k_mbx_read_copy(hw, mbx);
+
+ /* determine if there are any invalid lengths in message */
+ if (fm10k_mbx_validate_msg_size(mbx, len))
+ return FM10K_MBX_ERR_SIZE;
+
+ /* Update pushed */
+ mbx->pushed += len;
+
+ /* flush any completed messages */
+ for (len = fm10k_mbx_pushed_tail_len(mbx);
+ len && (mbx->pushed >= len);
+ len = fm10k_mbx_pushed_tail_len(mbx)) {
+ fifo->tail += len;
+ mbx->pushed -= len;
+ mbx->rx_messages++;
+ mbx->rx_dwords += len;
+ }
+
+ return FM10K_SUCCESS;
+}
+
+/* pre-generated data for generating the CRC based on the poly 0xAC9A. */
+static const u16 fm10k_crc_16b_table[256] = {
+ 0x0000, 0x7956, 0xF2AC, 0x8BFA, 0xBC6D, 0xC53B, 0x4EC1, 0x3797,
+ 0x21EF, 0x58B9, 0xD343, 0xAA15, 0x9D82, 0xE4D4, 0x6F2E, 0x1678,
+ 0x43DE, 0x3A88, 0xB172, 0xC824, 0xFFB3, 0x86E5, 0x0D1F, 0x7449,
+ 0x6231, 0x1B67, 0x909D, 0xE9CB, 0xDE5C, 0xA70A, 0x2CF0, 0x55A6,
+ 0x87BC, 0xFEEA, 0x7510, 0x0C46, 0x3BD1, 0x4287, 0xC97D, 0xB02B,
+ 0xA653, 0xDF05, 0x54FF, 0x2DA9, 0x1A3E, 0x6368, 0xE892, 0x91C4,
+ 0xC462, 0xBD34, 0x36CE, 0x4F98, 0x780F, 0x0159, 0x8AA3, 0xF3F5,
+ 0xE58D, 0x9CDB, 0x1721, 0x6E77, 0x59E0, 0x20B6, 0xAB4C, 0xD21A,
+ 0x564D, 0x2F1B, 0xA4E1, 0xDDB7, 0xEA20, 0x9376, 0x188C, 0x61DA,
+ 0x77A2, 0x0EF4, 0x850E, 0xFC58, 0xCBCF, 0xB299, 0x3963, 0x4035,
+ 0x1593, 0x6CC5, 0xE73F, 0x9E69, 0xA9FE, 0xD0A8, 0x5B52, 0x2204,
+ 0x347C, 0x4D2A, 0xC6D0, 0xBF86, 0x8811, 0xF147, 0x7ABD, 0x03EB,
+ 0xD1F1, 0xA8A7, 0x235D, 0x5A0B, 0x6D9C, 0x14CA, 0x9F30, 0xE666,
+ 0xF01E, 0x8948, 0x02B2, 0x7BE4, 0x4C73, 0x3525, 0xBEDF, 0xC789,
+ 0x922F, 0xEB79, 0x6083, 0x19D5, 0x2E42, 0x5714, 0xDCEE, 0xA5B8,
+ 0xB3C0, 0xCA96, 0x416C, 0x383A, 0x0FAD, 0x76FB, 0xFD01, 0x8457,
+ 0xAC9A, 0xD5CC, 0x5E36, 0x2760, 0x10F7, 0x69A1, 0xE25B, 0x9B0D,
+ 0x8D75, 0xF423, 0x7FD9, 0x068F, 0x3118, 0x484E, 0xC3B4, 0xBAE2,
+ 0xEF44, 0x9612, 0x1DE8, 0x64BE, 0x5329, 0x2A7F, 0xA185, 0xD8D3,
+ 0xCEAB, 0xB7FD, 0x3C07, 0x4551, 0x72C6, 0x0B90, 0x806A, 0xF93C,
+ 0x2B26, 0x5270, 0xD98A, 0xA0DC, 0x974B, 0xEE1D, 0x65E7, 0x1CB1,
+ 0x0AC9, 0x739F, 0xF865, 0x8133, 0xB6A4, 0xCFF2, 0x4408, 0x3D5E,
+ 0x68F8, 0x11AE, 0x9A54, 0xE302, 0xD495, 0xADC3, 0x2639, 0x5F6F,
+ 0x4917, 0x3041, 0xBBBB, 0xC2ED, 0xF57A, 0x8C2C, 0x07D6, 0x7E80,
+ 0xFAD7, 0x8381, 0x087B, 0x712D, 0x46BA, 0x3FEC, 0xB416, 0xCD40,
+ 0xDB38, 0xA26E, 0x2994, 0x50C2, 0x6755, 0x1E03, 0x95F9, 0xECAF,
+ 0xB909, 0xC05F, 0x4BA5, 0x32F3, 0x0564, 0x7C32, 0xF7C8, 0x8E9E,
+ 0x98E6, 0xE1B0, 0x6A4A, 0x131C, 0x248B, 0x5DDD, 0xD627, 0xAF71,
+ 0x7D6B, 0x043D, 0x8FC7, 0xF691, 0xC106, 0xB850, 0x33AA, 0x4AFC,
+ 0x5C84, 0x25D2, 0xAE28, 0xD77E, 0xE0E9, 0x99BF, 0x1245, 0x6B13,
+ 0x3EB5, 0x47E3, 0xCC19, 0xB54F, 0x82D8, 0xFB8E, 0x7074, 0x0922,
+ 0x1F5A, 0x660C, 0xEDF6, 0x94A0, 0xA337, 0xDA61, 0x519B, 0x28CD };
+
+/**
+ * fm10k_crc_16b - Generate a 16 bit CRC for a region of 16 bit data
+ * @data: pointer to data to process
+ * @seed: seed value for CRC
+ * @len: length measured in 16 bits words
+ *
+ * This function will generate a CRC based on the polynomial 0xAC9A and
+ * whatever value is stored in the seed variable. Note that this
+ * value inverts the local seed and the result in order to capture all
+ * leading and trailing zeros.
+ */
+STATIC u16 fm10k_crc_16b(const u32 *data, u16 seed, u16 len)
+{
+ u32 result = seed;
+
+ while (len--) {
+ result ^= *(data++);
+ result = (result >> 8) ^ fm10k_crc_16b_table[result & 0xFF];
+ result = (result >> 8) ^ fm10k_crc_16b_table[result & 0xFF];
+
+ if (!(len--))
+ break;
+
+ result = (result >> 8) ^ fm10k_crc_16b_table[result & 0xFF];
+ result = (result >> 8) ^ fm10k_crc_16b_table[result & 0xFF];
+ }
+
+ return (u16)result;
+}
+
+/**
+ * fm10k_fifo_crc - generate a CRC based off of FIFO data
+ * @fifo: pointer to FIFO
+ * @offset: offset point for start of FIFO
+ * @len: number of DWORDS words to process
+ * @seed: seed value for CRC
+ *
+ * This function generates a CRC for some region of the FIFO
+ **/
+STATIC u16 fm10k_fifo_crc(struct fm10k_mbx_fifo *fifo, u16 offset,
+ u16 len, u16 seed)
+{
+ u32 *data = fifo->buffer + offset;
+
+ /* track when we should cross the end of the FIFO */
+ offset = fifo->size - offset;
+
+ /* if we are in 2 blocks process the end of the FIFO first */
+ if (offset < len) {
+ seed = fm10k_crc_16b(data, seed, offset * 2);
+ data = fifo->buffer;
+ len -= offset;
+ }
+
+ /* process any remaining bits */
+ return fm10k_crc_16b(data, seed, len * 2);
+}
+
+/**
+ * fm10k_mbx_update_local_crc - Update the local CRC for outgoing data
+ * @mbx: pointer to mailbox
+ * @head: head index provided by remote mailbox
+ *
+ * This function will generate the CRC for all data from the end of the
+ * last head update to the current one. It uses the result of the
+ * previous CRC as the seed for this update. The result is stored in
+ * mbx->local.
+ **/
+STATIC void fm10k_mbx_update_local_crc(struct fm10k_mbx_info *mbx, u16 head)
+{
+ u16 len = mbx->tail_len - fm10k_mbx_index_len(mbx, head, mbx->tail);
+
+ /* determine the offset for the start of the region to be pulled */
+ head = fm10k_fifo_head_offset(&mbx->tx, mbx->pulled);
+
+ /* update local CRC to include all of the pulled data */
+ mbx->local = fm10k_fifo_crc(&mbx->tx, head, len, mbx->local);
+}
+
+/**
+ * fm10k_mbx_verify_remote_crc - Verify the CRC is correct for current data
+ * @mbx: pointer to mailbox
+ *
+ * This function will take all data that has been provided from the remote
+ * end and generate a CRC for it. This is stored in mbx->remote. The
+ * CRC for the header is then computed and if the result is non-zero this
+ * is an error and we signal an error dropping all data and resetting the
+ * connection.
+ */
+STATIC s32 fm10k_mbx_verify_remote_crc(struct fm10k_mbx_info *mbx)
+{
+ struct fm10k_mbx_fifo *fifo = &mbx->rx;
+ u16 len = mbx->head_len;
+ u16 offset = fm10k_fifo_tail_offset(fifo, mbx->pushed) - len;
+ u16 crc;
+
+ /* update the remote CRC if new data has been received */
+ if (len)
+ mbx->remote = fm10k_fifo_crc(fifo, offset, len, mbx->remote);
+
+ /* process the full header as we have to validate the CRC */
+ crc = fm10k_crc_16b(&mbx->mbx_hdr, mbx->remote, 1);
+
+ /* notify other end if we have a problem */
+ return crc ? FM10K_MBX_ERR_CRC : FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_mbx_rx_ready - Indicates that a message is ready in the Rx FIFO
+ * @mbx: pointer to mailbox
+ *
+ * This function returns true if there is a message in the Rx FIFO to dequeue.
+ **/
+STATIC bool fm10k_mbx_rx_ready(struct fm10k_mbx_info *mbx)
+{
+ u16 msg_size = fm10k_fifo_head_len(&mbx->rx);
+
+ return msg_size && (fm10k_fifo_used(&mbx->rx) >= msg_size);
+}
+
+/**
+ * fm10k_mbx_tx_ready - Indicates that the mailbox is in state ready for Tx
+ * @mbx: pointer to mailbox
+ * @len: verify free space is >= this value
+ *
+ * This function returns true if the mailbox is in a state ready to transmit.
+ **/
+STATIC bool fm10k_mbx_tx_ready(struct fm10k_mbx_info *mbx, u16 len)
+{
+ u16 fifo_unused = fm10k_fifo_unused(&mbx->tx);
+
+ return (mbx->state == FM10K_STATE_OPEN) && (fifo_unused >= len);
+}
+
+/**
+ * fm10k_mbx_tx_complete - Indicates that the Tx FIFO has been emptied
+ * @mbx: pointer to mailbox
+ *
+ * This function returns true if the Tx FIFO is empty.
+ **/
+STATIC bool fm10k_mbx_tx_complete(struct fm10k_mbx_info *mbx)
+{
+ return fm10k_fifo_empty(&mbx->tx);
+}
+
+/**
+ * fm10k_mbx_deqeueue_rx - Dequeues the message from the head in the Rx FIFO
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function dequeues messages and hands them off to the tlv parser.
+ * It will return the number of messages processed when called.
+ **/
+STATIC u16 fm10k_mbx_dequeue_rx(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ struct fm10k_mbx_fifo *fifo = &mbx->rx;
+ s32 err;
+ u16 cnt;
+
+ /* parse Rx messages out of the Rx FIFO to empty it */
+ for (cnt = 0; !fm10k_fifo_empty(fifo); cnt++) {
+ err = fm10k_tlv_msg_parse(hw, fifo->buffer + fifo->head,
+ mbx, mbx->msg_data);
+ if (err < 0)
+ mbx->rx_parse_err++;
+
+ fm10k_fifo_head_drop(fifo);
+ }
+
+ /* shift remaining bytes back to start of FIFO */
+ memmove(fifo->buffer, fifo->buffer + fifo->tail, mbx->pushed << 2);
+
+ /* shift head and tail based on the memory we moved */
+ fifo->tail -= fifo->head;
+ fifo->head = 0;
+
+ return cnt;
+}
+
+/**
+ * fm10k_mbx_enqueue_tx - Enqueues the message to the tail of the Tx FIFO
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ * @msg: message array to read
+ *
+ * This function enqueues a message up to the size specified by the length
+ * contained in the first DWORD of the message and will place at the tail
+ * of the FIFO. It will return 0 on success, or a negative value on error.
+ **/
+STATIC s32 fm10k_mbx_enqueue_tx(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx, const u32 *msg)
+{
+ u32 countdown = mbx->timeout;
+ s32 err;
+
+ switch (mbx->state) {
+ case FM10K_STATE_CLOSED:
+ case FM10K_STATE_DISCONNECT:
+ return FM10K_MBX_ERR_NO_MBX;
+ default:
+ break;
+ }
+
+ /* enqueue the message on the Tx FIFO */
+ err = fm10k_fifo_enqueue(&mbx->tx, msg);
+
+ /* if it failed give the FIFO a chance to drain */
+ while (err && countdown) {
+ countdown--;
+ usec_delay(mbx->usec_delay);
+ mbx->ops.process(hw, mbx);
+ err = fm10k_fifo_enqueue(&mbx->tx, msg);
+ }
+
+ /* if we failed treat the error */
+ if (err) {
+ mbx->timeout = 0;
+ mbx->tx_busy++;
+ }
+
+ /* begin processing message, ignore errors as this is just meant
+ * to start the mailbox flow so we are not concerned if there
+ * is a bad error, or the mailbox is already busy with a request
+ */
+ if (!mbx->tail_len)
+ mbx->ops.process(hw, mbx);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_mbx_read - Copies the mbmem to local message buffer
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function copies the message from the mbmem to the message array
+ **/
+STATIC s32 fm10k_mbx_read(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
+{
+ DEBUGFUNC("fm10k_mbx_read");
+
+ /* only allow one reader in here at a time */
+ if (mbx->mbx_hdr)
+ return FM10K_MBX_ERR_BUSY;
+
+ /* read to capture initial interrupt bits */
+ if (FM10K_READ_MBX(hw, mbx->mbx_reg) & FM10K_MBX_REQ_INTERRUPT)
+ mbx->mbx_lock = FM10K_MBX_ACK;
+
+ /* write back interrupt bits to clear */
+ FM10K_WRITE_MBX(hw, mbx->mbx_reg,
+ FM10K_MBX_REQ_INTERRUPT | FM10K_MBX_ACK_INTERRUPT);
+
+ /* read remote header */
+ mbx->mbx_hdr = FM10K_READ_MBX(hw, mbx->mbmem_reg ^ mbx->mbmem_len);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_mbx_write - Copies the local message buffer to mbmem
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function copies the message from the the message array to mbmem
+ **/
+STATIC void fm10k_mbx_write(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
+{
+ u32 mbmem = mbx->mbmem_reg;
+
+ DEBUGFUNC("fm10k_mbx_write");
+
+ /* write new msg header to notify recipient of change */
+ FM10K_WRITE_MBX(hw, mbmem, mbx->mbx_hdr);
+
+ /* write mailbox to send interrupt */
+ if (mbx->mbx_lock)
+ FM10K_WRITE_MBX(hw, mbx->mbx_reg, mbx->mbx_lock);
+
+ /* we no longer are using the header so free it */
+ mbx->mbx_hdr = 0;
+ mbx->mbx_lock = 0;
+}
+
+/**
+ * fm10k_mbx_create_connect_hdr - Generate a connect mailbox header
+ * @mbx: pointer to mailbox
+ *
+ * This function returns a connection mailbox header
+ **/
+STATIC void fm10k_mbx_create_connect_hdr(struct fm10k_mbx_info *mbx)
+{
+ mbx->mbx_lock |= FM10K_MBX_REQ;
+
+ mbx->mbx_hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_CONNECT, TYPE) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->rx.size - 1, CONNECT_SIZE);
+}
+
+/**
+ * fm10k_mbx_create_data_hdr - Generate a data mailbox header
+ * @mbx: pointer to mailbox
+ *
+ * This function returns a data mailbox header
+ **/
+STATIC void fm10k_mbx_create_data_hdr(struct fm10k_mbx_info *mbx)
+{
+ u32 hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_DATA, TYPE) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->tail, TAIL) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD);
+ struct fm10k_mbx_fifo *fifo = &mbx->tx;
+ u16 crc;
+
+ if (mbx->tail_len)
+ mbx->mbx_lock |= FM10K_MBX_REQ;
+
+ /* generate CRC for data in flight and header */
+ crc = fm10k_fifo_crc(fifo, fm10k_fifo_head_offset(fifo, mbx->pulled),
+ mbx->tail_len, mbx->local);
+ crc = fm10k_crc_16b(&hdr, crc, 1);
+
+ /* load header to memory to be written */
+ mbx->mbx_hdr = hdr | FM10K_MSG_HDR_FIELD_SET(crc, CRC);
+}
+
+/**
+ * fm10k_mbx_create_disconnect_hdr - Generate a disconnect mailbox header
+ * @mbx: pointer to mailbox
+ *
+ * This function returns a disconnect mailbox header
+ **/
+STATIC void fm10k_mbx_create_disconnect_hdr(struct fm10k_mbx_info *mbx)
+{
+ u32 hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_DISCONNECT, TYPE) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->tail, TAIL) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD);
+ u16 crc = fm10k_crc_16b(&hdr, mbx->local, 1);
+
+ mbx->mbx_lock |= FM10K_MBX_ACK;
+
+ /* load header to memory to be written */
+ mbx->mbx_hdr = hdr | FM10K_MSG_HDR_FIELD_SET(crc, CRC);
+}
+
+/**
+ * fm10k_mbx_create_fake_disconnect_hdr - Generate a false disconnect mailbox header
+ * @mbx: pointer to mailbox
+ *
+ * This function creates a fake disconnect header for loading into remote
+ * mailbox header. The primary purpose is to prevent errors on immediate
+ * start up after mbx->connect.
+ **/
+STATIC void fm10k_mbx_create_fake_disconnect_hdr(struct fm10k_mbx_info *mbx)
+{
+ u32 hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_DISCONNECT, TYPE) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->head, TAIL) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->tail, HEAD);
+ u16 crc = fm10k_crc_16b(&hdr, mbx->local, 1);
+
+ mbx->mbx_lock |= FM10K_MBX_ACK;
+
+ /* load header to memory to be written */
+ mbx->mbx_hdr = hdr | FM10K_MSG_HDR_FIELD_SET(crc, CRC);
+}
+
+/**
+ * fm10k_mbx_create_error_msg - Generate a error message
+ * @mbx: pointer to mailbox
+ * @err: local error encountered
+ *
+ * This function will interpret the error provided by err, and based on
+ * that it may shift the message by 1 DWORD and then place an error header
+ * at the start of the message.
+ **/
+STATIC void fm10k_mbx_create_error_msg(struct fm10k_mbx_info *mbx, s32 err)
+{
+ /* only generate an error message for these types */
+ switch (err) {
+ case FM10K_MBX_ERR_TAIL:
+ case FM10K_MBX_ERR_HEAD:
+ case FM10K_MBX_ERR_TYPE:
+ case FM10K_MBX_ERR_SIZE:
+ case FM10K_MBX_ERR_RSVD0:
+ case FM10K_MBX_ERR_CRC:
+ break;
+ default:
+ return;
+ }
+
+ mbx->mbx_lock |= FM10K_MBX_REQ;
+
+ mbx->mbx_hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_ERROR, TYPE) |
+ FM10K_MSG_HDR_FIELD_SET(err, ERR_NO) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD);
+}
+
+/**
+ * fm10k_mbx_validate_msg_hdr - Validate common fields in the message header
+ * @mbx: pointer to mailbox
+ * @msg: message array to read
+ *
+ * This function will parse up the fields in the mailbox header and return
+ * an error if the header contains any of a number of invalid configurations
+ * including unrecognized type, invalid route, or a malformed message.
+ **/
+STATIC s32 fm10k_mbx_validate_msg_hdr(struct fm10k_mbx_info *mbx)
+{
+ u16 type, rsvd0, head, tail, size;
+ const u32 *hdr = &mbx->mbx_hdr;
+
+ DEBUGFUNC("fm10k_mbx_validate_msg_hdr");
+
+ type = FM10K_MSG_HDR_FIELD_GET(*hdr, TYPE);
+ rsvd0 = FM10K_MSG_HDR_FIELD_GET(*hdr, RSVD0);
+ tail = FM10K_MSG_HDR_FIELD_GET(*hdr, TAIL);
+ head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD);
+ size = FM10K_MSG_HDR_FIELD_GET(*hdr, CONNECT_SIZE);
+
+ if (rsvd0)
+ return FM10K_MBX_ERR_RSVD0;
+
+ switch (type) {
+ case FM10K_MSG_DISCONNECT:
+ /* validate that all data has been received */
+ if (tail != mbx->head)
+ return FM10K_MBX_ERR_TAIL;
+
+ /* fall through */
+ case FM10K_MSG_DATA:
+ /* validate that head is moving correctly */
+ if (!head || (head == FM10K_MSG_HDR_MASK(HEAD)))
+ return FM10K_MBX_ERR_HEAD;
+ if (fm10k_mbx_index_len(mbx, head, mbx->tail) > mbx->tail_len)
+ return FM10K_MBX_ERR_HEAD;
+
+ /* validate that tail is moving correctly */
+ if (!tail || (tail == FM10K_MSG_HDR_MASK(TAIL)))
+ return FM10K_MBX_ERR_TAIL;
+ if (fm10k_mbx_index_len(mbx, mbx->head, tail) < mbx->mbmem_len)
+ break;
+
+ return FM10K_MBX_ERR_TAIL;
+ case FM10K_MSG_CONNECT:
+ /* validate size is in range and is power of 2 mask */
+ if ((size < FM10K_VFMBX_MSG_MTU) || (size & (size + 1)))
+ return FM10K_MBX_ERR_SIZE;
+
+ /* fall through */
+ case FM10K_MSG_ERROR:
+ if (!head || (head == FM10K_MSG_HDR_MASK(HEAD)))
+ return FM10K_MBX_ERR_HEAD;
+ /* neither create nor error include a tail offset */
+ if (tail)
+ return FM10K_MBX_ERR_TAIL;
+
+ break;
+ default:
+ return FM10K_MBX_ERR_TYPE;
+ }
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_mbx_create_reply - Generate reply based on state and remote head
+ * @mbx: pointer to mailbox
+ * @head: acknowledgement number
+ *
+ * This function will generate an outgoing message based on the current
+ * mailbox state and the remote fifo head. It will return the length
+ * of the outgoing message excluding header on success, and a negative value
+ * on error.
+ **/
+STATIC s32 fm10k_mbx_create_reply(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx, u16 head)
+{
+ switch (mbx->state) {
+ case FM10K_STATE_OPEN:
+ case FM10K_STATE_DISCONNECT:
+ /* update our checksum for the outgoing data */
+ fm10k_mbx_update_local_crc(mbx, head);
+
+ /* as long as other end recognizes us keep sending data */
+ fm10k_mbx_pull_head(hw, mbx, head);
+
+ /* generate new header based on data */
+ if (mbx->tail_len || (mbx->state == FM10K_STATE_OPEN))
+ fm10k_mbx_create_data_hdr(mbx);
+ else
+ fm10k_mbx_create_disconnect_hdr(mbx);
+ break;
+ case FM10K_STATE_CONNECT:
+ /* send disconnect even if we aren't connected */
+ fm10k_mbx_create_connect_hdr(mbx);
+ break;
+ case FM10K_STATE_CLOSED:
+ /* generate new header based on data */
+ fm10k_mbx_create_disconnect_hdr(mbx);
+ default:
+ break;
+ }
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_mbx_reset_work- Reset internal pointers for any pending work
+ * @mbx: pointer to mailbox
+ *
+ * This function will reset all internal pointers so any work in progress
+ * is dropped. This call should occur every time we transition from the
+ * open state to the connect state.
+ **/
+STATIC void fm10k_mbx_reset_work(struct fm10k_mbx_info *mbx)
+{
+ u16 len, head, ack;
+
+ /* reset our outgoing max size back to Rx limits */
+ mbx->max_size = mbx->rx.size - 1;
+
+ /* update mbx->pulled to account for tail_len and ack */
+ head = FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, HEAD);
+ ack = fm10k_mbx_index_len(mbx, head, mbx->tail);
+ mbx->pulled += mbx->tail_len - ack;
+
+ /* now drop any messages which have started or finished transmitting */
+ while (fm10k_fifo_head_len(&mbx->tx) && mbx->pulled) {
+ len = fm10k_fifo_head_drop(&mbx->tx);
+ mbx->tx_dropped++;
+ if (mbx->pulled >= len)
+ mbx->pulled -= len;
+ else
+ mbx->pulled = 0;
+ }
+
+ /* just do a quick resysnc to start of message */
+ mbx->pushed = 0;
+ mbx->pulled = 0;
+ mbx->tail_len = 0;
+ mbx->head_len = 0;
+ mbx->rx.tail = 0;
+ mbx->rx.head = 0;
+}
+
+/**
+ * fm10k_mbx_update_max_size - Update the max_size and drop large messages
+ * @mbx: pointer to mailbox
+ * @size: new value for max_size
+ *
+ * This function updates the max_size value and drops any outgoing messages
+ * at the head of the Tx FIFO if they are larger than max_size. It does not
+ * drop all messages, as this is too difficult to parse and remove them from
+ * the FIFO. Instead, rely on the checking to ensure that messages larger
+ * than max_size aren't pushed into the memory buffer.
+ **/
+STATIC void fm10k_mbx_update_max_size(struct fm10k_mbx_info *mbx, u16 size)
+{
+ u16 len;
+
+ DEBUGFUNC("fm10k_mbx_update_max_size_hdr");
+
+ mbx->max_size = size;
+
+ /* flush any oversized messages from the queue */
+ for (len = fm10k_fifo_head_len(&mbx->tx);
+ len > size;
+ len = fm10k_fifo_head_len(&mbx->tx)) {
+ fm10k_fifo_head_drop(&mbx->tx);
+ mbx->tx_dropped++;
+ }
+}
+
+/**
+ * fm10k_mbx_connect_reset - Reset following request for reset
+ * @mbx: pointer to mailbox
+ *
+ * This function resets the mailbox to either a disconnected state
+ * or a connect state depending on the current mailbox state
+ **/
+STATIC void fm10k_mbx_connect_reset(struct fm10k_mbx_info *mbx)
+{
+ /* just do a quick resysnc to start of frame */
+ fm10k_mbx_reset_work(mbx);
+
+ /* reset CRC seeds */
+ mbx->local = FM10K_MBX_CRC_SEED;
+ mbx->remote = FM10K_MBX_CRC_SEED;
+
+ /* we cannot exit connect until the size is good */
+ if (mbx->state == FM10K_STATE_OPEN)
+ mbx->state = FM10K_STATE_CONNECT;
+ else
+ mbx->state = FM10K_STATE_CLOSED;
+}
+
+/**
+ * fm10k_mbx_process_connect - Process connect header
+ * @mbx: pointer to mailbox
+ * @msg: message array to process
+ *
+ * This function will read an incoming connect header and reply with the
+ * appropriate message. It will return a value indicating the number of
+ * data DWORDs on success, or will return a negative value on failure.
+ **/
+STATIC s32 fm10k_mbx_process_connect(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ const enum fm10k_mbx_state state = mbx->state;
+ const u32 *hdr = &mbx->mbx_hdr;
+ u16 size, head;
+
+ /* we will need to pull all of the fields for verification */
+ size = FM10K_MSG_HDR_FIELD_GET(*hdr, CONNECT_SIZE);
+ head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD);
+
+ switch (state) {
+ case FM10K_STATE_DISCONNECT:
+ case FM10K_STATE_OPEN:
+ /* reset any in-progress work */
+ fm10k_mbx_connect_reset(mbx);
+ break;
+ case FM10K_STATE_CONNECT:
+ /* we cannot exit connect until the size is good */
+ if (size > mbx->rx.size) {
+ mbx->max_size = mbx->rx.size - 1;
+ } else {
+ /* record the remote system requesting connection */
+ mbx->state = FM10K_STATE_OPEN;
+
+ fm10k_mbx_update_max_size(mbx, size);
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* align our tail index to remote head index */
+ mbx->tail = head;
+
+ return fm10k_mbx_create_reply(hw, mbx, head);
+}
+
+/**
+ * fm10k_mbx_process_data - Process data header
+ * @mbx: pointer to mailbox
+ *
+ * This function will read an incoming data header and reply with the
+ * appropriate message. It will return a value indicating the number of
+ * data DWORDs on success, or will return a negative value on failure.
+ **/
+STATIC s32 fm10k_mbx_process_data(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ const u32 *hdr = &mbx->mbx_hdr;
+ u16 head, tail;
+ s32 err;
+
+ DEBUGFUNC("fm10k_mbx_process_data");
+
+ /* we will need to pull all of the fields for verification */
+ head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD);
+ tail = FM10K_MSG_HDR_FIELD_GET(*hdr, TAIL);
+
+ /* if we are in connect just update our data and go */
+ if (mbx->state == FM10K_STATE_CONNECT) {
+ mbx->tail = head;
+ mbx->state = FM10K_STATE_OPEN;
+ }
+
+ /* abort on message size errors */
+ err = fm10k_mbx_push_tail(hw, mbx, tail);
+ if (err < 0)
+ return err;
+
+ /* verify the checksum on the incoming data */
+ err = fm10k_mbx_verify_remote_crc(mbx);
+ if (err)
+ return err;
+
+ /* process messages if we have received any */
+ fm10k_mbx_dequeue_rx(hw, mbx);
+
+ return fm10k_mbx_create_reply(hw, mbx, head);
+}
+
+/**
+ * fm10k_mbx_process_disconnect - Process disconnect header
+ * @mbx: pointer to mailbox
+ *
+ * This function will read an incoming disconnect header and reply with the
+ * appropriate message. It will return a value indicating the number of
+ * data DWORDs on success, or will return a negative value on failure.
+ **/
+STATIC s32 fm10k_mbx_process_disconnect(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ const enum fm10k_mbx_state state = mbx->state;
+ const u32 *hdr = &mbx->mbx_hdr;
+ u16 head;
+ s32 err;
+
+ /* we will need to pull the header field for verification */
+ head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD);
+
+ /* We should not be receiving disconnect if Rx is incomplete */
+ if (mbx->pushed)
+ return FM10K_MBX_ERR_TAIL;
+
+ /* we have already verified mbx->head == tail so we know this is 0 */
+ mbx->head_len = 0;
+
+ /* verify the checksum on the incoming header is correct */
+ err = fm10k_mbx_verify_remote_crc(mbx);
+ if (err)
+ return err;
+
+ switch (state) {
+ case FM10K_STATE_DISCONNECT:
+ case FM10K_STATE_OPEN:
+ /* state doesn't change if we still have work to do */
+ if (!fm10k_mbx_tx_complete(mbx))
+ break;
+
+ /* verify the head indicates we completed all transmits */
+ if (head != mbx->tail)
+ return FM10K_MBX_ERR_HEAD;
+
+ /* reset any in-progress work */
+ fm10k_mbx_connect_reset(mbx);
+ break;
+ default:
+ break;
+ }
+
+ return fm10k_mbx_create_reply(hw, mbx, head);
+}
+
+/**
+ * fm10k_mbx_process_error - Process error header
+ * @mbx: pointer to mailbox
+ *
+ * This function will read an incoming error header and reply with the
+ * appropriate message. It will return a value indicating the number of
+ * data DWORDs on success, or will return a negative value on failure.
+ **/
+STATIC s32 fm10k_mbx_process_error(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ const u32 *hdr = &mbx->mbx_hdr;
+ u16 head;
+
+ /* we will need to pull all of the fields for verification */
+ head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD);
+
+ switch (mbx->state) {
+ case FM10K_STATE_OPEN:
+ case FM10K_STATE_DISCONNECT:
+ /* flush any uncompleted work */
+ fm10k_mbx_reset_work(mbx);
+
+ /* reset CRC seeds */
+ mbx->local = FM10K_MBX_CRC_SEED;
+ mbx->remote = FM10K_MBX_CRC_SEED;
+
+ /* reset tail index and size to prepare for reconnect */
+ mbx->tail = head;
+
+ /* if open then reset max_size and go back to connect */
+ if (mbx->state == FM10K_STATE_OPEN) {
+ mbx->state = FM10K_STATE_CONNECT;
+ break;
+ }
+
+ /* send a connect message to get data flowing again */
+ fm10k_mbx_create_connect_hdr(mbx);
+ return FM10K_SUCCESS;
+ default:
+ break;
+ }
+
+ return fm10k_mbx_create_reply(hw, mbx, mbx->tail);
+}
+
+/**
+ * fm10k_mbx_process - Process mailbox interrupt
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will process incoming mailbox events and generate mailbox
+ * replies. It will return a value indicating the number of DWORDs
+ * transmitted excluding header on success or a negative value on error.
+ **/
+STATIC s32 fm10k_mbx_process(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ s32 err;
+
+ DEBUGFUNC("fm10k_mbx_process");
+
+ /* we do not read mailbox if closed */
+ if (mbx->state == FM10K_STATE_CLOSED)
+ return FM10K_SUCCESS;
+
+ /* copy data from mailbox */
+ err = fm10k_mbx_read(hw, mbx);
+ if (err)
+ return err;
+
+ /* validate type, source, and destination */
+ err = fm10k_mbx_validate_msg_hdr(mbx);
+ if (err < 0)
+ goto msg_err;
+
+ switch (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, TYPE)) {
+ case FM10K_MSG_CONNECT:
+ err = fm10k_mbx_process_connect(hw, mbx);
+ break;
+ case FM10K_MSG_DATA:
+ err = fm10k_mbx_process_data(hw, mbx);
+ break;
+ case FM10K_MSG_DISCONNECT:
+ err = fm10k_mbx_process_disconnect(hw, mbx);
+ break;
+ case FM10K_MSG_ERROR:
+ err = fm10k_mbx_process_error(hw, mbx);
+ break;
+ default:
+ err = FM10K_MBX_ERR_TYPE;
+ break;
+ }
+
+msg_err:
+ /* notify partner of errors on our end */
+ if (err < 0)
+ fm10k_mbx_create_error_msg(mbx, err);
+
+ /* copy data from mailbox */
+ fm10k_mbx_write(hw, mbx);
+
+ return err;
+}
+
+/**
+ * fm10k_mbx_disconnect - Shutdown mailbox connection
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will shut down the mailbox. It places the mailbox first
+ * in the disconnect state, it then allows up to a predefined timeout for
+ * the mailbox to transition to close on its own. If this does not occur
+ * then the mailbox will be forced into the closed state.
+ *
+ * Any mailbox transactions not completed before calling this function
+ * are not guaranteed to complete and may be dropped.
+ **/
+STATIC void fm10k_mbx_disconnect(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ int timeout = mbx->timeout ? FM10K_MBX_DISCONNECT_TIMEOUT : 0;
+
+ DEBUGFUNC("fm10k_mbx_disconnect");
+
+ /* Place mbx in ready to disconnect state */
+ mbx->state = FM10K_STATE_DISCONNECT;
+
+ /* trigger interrupt to start shutdown process */
+ FM10K_WRITE_MBX(hw, mbx->mbx_reg, FM10K_MBX_REQ |
+ FM10K_MBX_INTERRUPT_DISABLE);
+ do {
+ usec_delay(FM10K_MBX_POLL_DELAY);
+ mbx->ops.process(hw, mbx);
+ timeout -= FM10K_MBX_POLL_DELAY;
+ } while ((timeout > 0) && (mbx->state != FM10K_STATE_CLOSED));
+
+ /* in case we didn't close, just force the mailbox into shutdown and
+ * drop all left over messages in the FIFO.
+ */
+ fm10k_mbx_connect_reset(mbx);
+ fm10k_fifo_drop_all(&mbx->tx);
+
+ FM10K_WRITE_MBX(hw, mbx->mbmem_reg, 0);
+}
+
+/**
+ * fm10k_mbx_connect - Start mailbox connection
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will initiate a mailbox connection. It will populate the
+ * mailbox with a broadcast connect message and then initialize the lock.
+ * This is safe since the connect message is a single DWORD so the mailbox
+ * transaction is guaranteed to be atomic.
+ *
+ * This function will return an error if the mailbox has not been initiated
+ * or is currently in use.
+ **/
+STATIC s32 fm10k_mbx_connect(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
+{
+ DEBUGFUNC("fm10k_mbx_connect");
+
+ /* we cannot connect an uninitialized mailbox */
+ if (!mbx->rx.buffer)
+ return FM10K_MBX_ERR_NO_SPACE;
+
+ /* we cannot connect an already connected mailbox */
+ if (mbx->state != FM10K_STATE_CLOSED)
+ return FM10K_MBX_ERR_BUSY;
+
+ /* mailbox timeout can now become active */
+ mbx->timeout = FM10K_MBX_INIT_TIMEOUT;
+
+ /* Place mbx in ready to connect state */
+ mbx->state = FM10K_STATE_CONNECT;
+
+ fm10k_mbx_reset_work(mbx);
+
+ fm10k_mbx_create_fake_disconnect_hdr(mbx);
+ FM10K_WRITE_MBX(hw, mbx->mbmem_reg ^ mbx->mbmem_len, mbx->mbx_hdr);
+
+ /* enable interrupt and notify other party of new message */
+ mbx->mbx_lock = FM10K_MBX_REQ_INTERRUPT | FM10K_MBX_ACK_INTERRUPT |
+ FM10K_MBX_INTERRUPT_ENABLE;
+
+ /* generate and load connect header into mailbox */
+ fm10k_mbx_create_connect_hdr(mbx);
+ fm10k_mbx_write(hw, mbx);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_mbx_validate_handlers - Validate layout of message parsing data
+ * @msg_data: handlers for mailbox events
+ *
+ * This function validates the layout of the message parsing data. This
+ * should be mostly static, but it is important to catch any errors that
+ * are made when constructing the parsers.
+ **/
+STATIC s32 fm10k_mbx_validate_handlers(const struct fm10k_msg_data *msg_data)
+{
+ const struct fm10k_tlv_attr *attr;
+ unsigned int id;
+
+ DEBUGFUNC("fm10k_mbx_validate_handlers");
+
+ /* Allow NULL mailboxes that transmit but don't receive */
+ if (!msg_data)
+ return FM10K_SUCCESS;
+
+ while (msg_data->id != FM10K_TLV_ERROR) {
+ /* all messages should have a function handler */
+ if (!msg_data->func)
+ return FM10K_ERR_PARAM;
+
+ /* parser is optional */
+ attr = msg_data->attr;
+ if (attr) {
+ while (attr->id != FM10K_TLV_ERROR) {
+ id = attr->id;
+ attr++;
+ /* ID should always be increasing */
+ if (id >= attr->id)
+ return FM10K_ERR_PARAM;
+ /* ID should fit in results array */
+ if (id >= FM10K_TLV_RESULTS_MAX)
+ return FM10K_ERR_PARAM;
+ }
+
+ /* verify terminator is in the list */
+ if (attr->id != FM10K_TLV_ERROR)
+ return FM10K_ERR_PARAM;
+ }
+
+ id = msg_data->id;
+ msg_data++;
+ /* ID should always be increasing */
+ if (id >= msg_data->id)
+ return FM10K_ERR_PARAM;
+ }
+
+ /* verify terminator is in the list */
+ if ((msg_data->id != FM10K_TLV_ERROR) || !msg_data->func)
+ return FM10K_ERR_PARAM;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_mbx_register_handlers - Register a set of handler ops for mailbox
+ * @mbx: pointer to mailbox
+ * @msg_data: handlers for mailbox events
+ *
+ * This function associates a set of message handling ops with a mailbox.
+ **/
+STATIC s32 fm10k_mbx_register_handlers(struct fm10k_mbx_info *mbx,
+ const struct fm10k_msg_data *msg_data)
+{
+ DEBUGFUNC("fm10k_mbx_register_handlers");
+
+ /* validate layout of handlers before assigning them */
+ if (fm10k_mbx_validate_handlers(msg_data))
+ return FM10K_ERR_PARAM;
+
+ /* initialize the message handlers */
+ mbx->msg_data = msg_data;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_pfvf_mbx_init - Initialize mailbox memory for PF/VF mailbox
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ * @msg_data: handlers for mailbox events
+ * @id: ID reference for PF as it supports up to 64 PF/VF mailboxes
+ *
+ * This function initializes the mailbox for use. It will split the
+ * buffer provided an use that th populate both the Tx and Rx FIFO by
+ * evenly splitting it. In order to allow for easy masking of head/tail
+ * the value reported in size must be a power of 2 and is reported in
+ * DWORDs, not bytes. Any invalid values will cause the mailbox to return
+ * error.
+ **/
+s32 fm10k_pfvf_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx,
+ const struct fm10k_msg_data *msg_data, u8 id)
+{
+ DEBUGFUNC("fm10k_pfvf_mbx_init");
+
+ /* initialize registers */
+ switch (hw->mac.type) {
+ case fm10k_mac_vf:
+ mbx->mbx_reg = FM10K_VFMBX;
+ mbx->mbmem_reg = FM10K_VFMBMEM(FM10K_VFMBMEM_VF_XOR);
+ break;
+ case fm10k_mac_pf:
+ /* there are only 64 VF <-> PF mailboxes */
+ if (id < 64) {
+ mbx->mbx_reg = FM10K_MBX(id);
+ mbx->mbmem_reg = FM10K_MBMEM_VF(id, 0);
+ break;
+ }
+ /* fallthough */
+ default:
+ return FM10K_MBX_ERR_NO_MBX;
+ }
+
+ /* start out in closed state */
+ mbx->state = FM10K_STATE_CLOSED;
+
+ /* validate layout of handlers before assigning them */
+ if (fm10k_mbx_validate_handlers(msg_data))
+ return FM10K_ERR_PARAM;
+
+ /* initialize the message handlers */
+ mbx->msg_data = msg_data;
+
+ /* start mailbox as timed out and let the reset_hw call
+ * set the timeout value to begin communications
+ */
+ mbx->timeout = 0;
+ mbx->usec_delay = FM10K_MBX_INIT_DELAY;
+
+ /* initialize tail and head */
+ mbx->tail = 1;
+ mbx->head = 1;
+
+ /* initialize CRC seeds */
+ mbx->local = FM10K_MBX_CRC_SEED;
+ mbx->remote = FM10K_MBX_CRC_SEED;
+
+ /* Split buffer for use by Tx/Rx FIFOs */
+ mbx->max_size = FM10K_MBX_MSG_MAX_SIZE;
+ mbx->mbmem_len = FM10K_VFMBMEM_VF_XOR;
+
+ /* initialize the FIFOs, sizes are in 4 byte increments */
+ fm10k_fifo_init(&mbx->tx, mbx->buffer, FM10K_MBX_TX_BUFFER_SIZE);
+ fm10k_fifo_init(&mbx->rx, &mbx->buffer[FM10K_MBX_TX_BUFFER_SIZE],
+ FM10K_MBX_RX_BUFFER_SIZE);
+
+ /* initialize function pointers */
+ mbx->ops.connect = fm10k_mbx_connect;
+ mbx->ops.disconnect = fm10k_mbx_disconnect;
+ mbx->ops.rx_ready = fm10k_mbx_rx_ready;
+ mbx->ops.tx_ready = fm10k_mbx_tx_ready;
+ mbx->ops.tx_complete = fm10k_mbx_tx_complete;
+ mbx->ops.enqueue_tx = fm10k_mbx_enqueue_tx;
+ mbx->ops.process = fm10k_mbx_process;
+ mbx->ops.register_handlers = fm10k_mbx_register_handlers;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_sm_mbx_create_data_hdr - Generate a mailbox header for local FIFO
+ * @mbx: pointer to mailbox
+ *
+ * This function returns a connection mailbox header
+ **/
+STATIC void fm10k_sm_mbx_create_data_hdr(struct fm10k_mbx_info *mbx)
+{
+ if (mbx->tail_len)
+ mbx->mbx_lock |= FM10K_MBX_REQ;
+
+ mbx->mbx_hdr = FM10K_MSG_HDR_FIELD_SET(mbx->tail, SM_TAIL) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->remote, SM_VER) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->head, SM_HEAD);
+}
+
+/**
+ * fm10k_sm_mbx_create_connect_hdr - Generate a mailbox header for local FIFO
+ * @mbx: pointer to mailbox
+ * @err: error flags to report if any
+ *
+ * This function returns a connection mailbox header
+ **/
+STATIC void fm10k_sm_mbx_create_connect_hdr(struct fm10k_mbx_info *mbx, u8 err)
+{
+ if (mbx->local)
+ mbx->mbx_lock |= FM10K_MBX_REQ;
+
+ mbx->mbx_hdr = FM10K_MSG_HDR_FIELD_SET(mbx->tail, SM_TAIL) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->remote, SM_VER) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->head, SM_HEAD) |
+ FM10K_MSG_HDR_FIELD_SET(err, SM_ERR);
+}
+
+/**
+ * fm10k_sm_mbx_connect_reset - Reset following request for reset
+ * @mbx: pointer to mailbox
+ *
+ * This function resets the mailbox to a just connected state
+ **/
+STATIC void fm10k_sm_mbx_connect_reset(struct fm10k_mbx_info *mbx)
+{
+ /* flush any uncompleted work */
+ fm10k_mbx_reset_work(mbx);
+
+ /* set local version to max and remote version to 0 */
+ mbx->local = FM10K_SM_MBX_VERSION;
+ mbx->remote = 0;
+
+ /* initialize tail and head */
+ mbx->tail = 1;
+ mbx->head = 1;
+
+ /* reset state back to connect */
+ mbx->state = FM10K_STATE_CONNECT;
+}
+
+/**
+ * fm10k_sm_mbx_connect - Start switch manager mailbox connection
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will initiate a mailbox connection with the switch
+ * manager. To do this it will first disconnect the mailbox, and then
+ * reconnect it in order to complete a reset of the mailbox.
+ *
+ * This function will return an error if the mailbox has not been initiated
+ * or is currently in use.
+ **/
+STATIC s32 fm10k_sm_mbx_connect(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
+{
+ DEBUGFUNC("fm10k_mbx_connect");
+
+ /* we cannot connect an uninitialized mailbox */
+ if (!mbx->rx.buffer)
+ return FM10K_MBX_ERR_NO_SPACE;
+
+ /* we cannot connect an already connected mailbox */
+ if (mbx->state != FM10K_STATE_CLOSED)
+ return FM10K_MBX_ERR_BUSY;
+
+ /* mailbox timeout can now become active */
+ mbx->timeout = FM10K_MBX_INIT_TIMEOUT;
+
+ /* Place mbx in ready to connect state */
+ mbx->state = FM10K_STATE_CONNECT;
+ mbx->max_size = FM10K_MBX_MSG_MAX_SIZE;
+
+ /* reset interface back to connect */
+ fm10k_sm_mbx_connect_reset(mbx);
+
+ /* enable interrupt and notify other party of new message */
+ mbx->mbx_lock = FM10K_MBX_REQ_INTERRUPT | FM10K_MBX_ACK_INTERRUPT |
+ FM10K_MBX_INTERRUPT_ENABLE;
+
+ /* generate and load connect header into mailbox */
+ fm10k_sm_mbx_create_connect_hdr(mbx, 0);
+ fm10k_mbx_write(hw, mbx);
+
+ /* enable interrupt and notify other party of new message */
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_sm_mbx_disconnect - Shutdown mailbox connection
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will shut down the mailbox. It places the mailbox first
+ * in the disconnect state, it then allows up to a predefined timeout for
+ * the mailbox to transition to close on its own. If this does not occur
+ * then the mailbox will be forced into the closed state.
+ *
+ * Any mailbox transactions not completed before calling this function
+ * are not guaranteed to complete and may be dropped.
+ **/
+STATIC void fm10k_sm_mbx_disconnect(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ int timeout = mbx->timeout ? FM10K_MBX_DISCONNECT_TIMEOUT : 0;
+
+ DEBUGFUNC("fm10k_sm_mbx_disconnect");
+
+ /* Place mbx in ready to disconnect state */
+ mbx->state = FM10K_STATE_DISCONNECT;
+
+ /* trigger interrupt to start shutdown process */
+ FM10K_WRITE_REG(hw, mbx->mbx_reg, FM10K_MBX_REQ |
+ FM10K_MBX_INTERRUPT_DISABLE);
+ do {
+ usec_delay(FM10K_MBX_POLL_DELAY);
+ mbx->ops.process(hw, mbx);
+ timeout -= FM10K_MBX_POLL_DELAY;
+ } while ((timeout > 0) && (mbx->state != FM10K_STATE_CLOSED));
+
+ /* in case we didn't close just force the mailbox into shutdown */
+ mbx->state = FM10K_STATE_CLOSED;
+ mbx->remote = 0;
+ fm10k_mbx_reset_work(mbx);
+ fm10k_fifo_drop_all(&mbx->tx);
+
+ FM10K_WRITE_REG(hw, mbx->mbmem_reg, 0);
+}
+
+/**
+ * fm10k_mbx_validate_fifo_hdr - Validate fields in the remote FIFO header
+ * @mbx: pointer to mailbox
+ *
+ * This function will parse up the fields in the mailbox header and return
+ * an error if the header contains any of a number of invalid configurations
+ * including unrecognized offsets or version numbers.
+ **/
+STATIC s32 fm10k_sm_mbx_validate_fifo_hdr(struct fm10k_mbx_info *mbx)
+{
+ const u32 *hdr = &mbx->mbx_hdr;
+ u16 tail, head, ver;
+
+ DEBUGFUNC("fm10k_mbx_validate_msg_hdr");
+
+ tail = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_TAIL);
+ ver = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_VER);
+ head = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_HEAD);
+
+ switch (ver) {
+ case 0:
+ break;
+ case FM10K_SM_MBX_VERSION:
+ if (!head || head > FM10K_SM_MBX_FIFO_LEN)
+ return FM10K_MBX_ERR_HEAD;
+ if (!tail || tail > FM10K_SM_MBX_FIFO_LEN)
+ return FM10K_MBX_ERR_TAIL;
+ if (mbx->tail < head)
+ head += mbx->mbmem_len - 1;
+ if (tail < mbx->head)
+ tail += mbx->mbmem_len - 1;
+ if (fm10k_mbx_index_len(mbx, head, mbx->tail) > mbx->tail_len)
+ return FM10K_MBX_ERR_HEAD;
+ if (fm10k_mbx_index_len(mbx, mbx->head, tail) < mbx->mbmem_len)
+ break;
+ return FM10K_MBX_ERR_TAIL;
+ default:
+ return FM10K_MBX_ERR_SRC;
+ }
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_sm_mbx_process_error - Process header with error flag set
+ * @mbx: pointer to mailbox
+ *
+ * This function is meant to respond to a request where the error flag
+ * is set. As a result we will terminate a connection if one is present
+ * and fall back into the reset state with a connection header of version
+ * 0 (RESET).
+ **/
+STATIC void fm10k_sm_mbx_process_error(struct fm10k_mbx_info *mbx)
+{
+ const enum fm10k_mbx_state state = mbx->state;
+
+ switch (state) {
+ case FM10K_STATE_DISCONNECT:
+ /* if there is an error just disconnect */
+ mbx->remote = 0;
+ break;
+ case FM10K_STATE_OPEN:
+ /* flush any uncompleted work */
+ fm10k_sm_mbx_connect_reset(mbx);
+ break;
+ case FM10K_STATE_CONNECT:
+ /* try connnecting at lower version */
+ if (mbx->remote) {
+ while (mbx->local > 1)
+ mbx->local--;
+ mbx->remote = 0;
+ }
+ break;
+ default:
+ break;
+ }
+
+ fm10k_sm_mbx_create_connect_hdr(mbx, 0);
+}
+
+/**
+ * fm10k_sm_mbx_create_error_message - Process an error in FIFO hdr
+ * @mbx: pointer to mailbox
+ * @err: local error encountered
+ *
+ * This function will interpret the error provided by err, and based on
+ * that it may set the error bit in the local message header
+ **/
+STATIC void fm10k_sm_mbx_create_error_msg(struct fm10k_mbx_info *mbx, s32 err)
+{
+ /* only generate an error message for these types */
+ switch (err) {
+ case FM10K_MBX_ERR_TAIL:
+ case FM10K_MBX_ERR_HEAD:
+ case FM10K_MBX_ERR_SRC:
+ case FM10K_MBX_ERR_SIZE:
+ case FM10K_MBX_ERR_RSVD0:
+ break;
+ default:
+ return;
+ }
+
+ /* process it as though we received an error, and send error reply */
+ fm10k_sm_mbx_process_error(mbx);
+ fm10k_sm_mbx_create_connect_hdr(mbx, 1);
+}
+
+/**
+ * fm10k_sm_mbx_receive - Take message from Rx mailbox FIFO and put it in Rx
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will dequeue one message from the Rx switch manager mailbox
+ * FIFO and place it in the Rx mailbox FIFO for processing by software.
+ **/
+STATIC s32 fm10k_sm_mbx_receive(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx,
+ u16 tail)
+{
+ /* reduce length by 1 to convert to a mask */
+ u16 mbmem_len = mbx->mbmem_len - 1;
+ s32 err;
+
+ DEBUGFUNC("fm10k_sm_mbx_receive");
+
+ /* push tail in front of head */
+ if (tail < mbx->head)
+ tail += mbmem_len;
+
+ /* copy data to the Rx FIFO */
+ err = fm10k_mbx_push_tail(hw, mbx, tail);
+ if (err < 0)
+ return err;
+
+ /* process messages if we have received any */
+ fm10k_mbx_dequeue_rx(hw, mbx);
+
+ /* guarantee head aligns with the end of the last message */
+ mbx->head = fm10k_mbx_head_sub(mbx, mbx->pushed);
+ mbx->pushed = 0;
+
+ /* clear any extra bits left over since index adds 1 extra bit */
+ if (mbx->head > mbmem_len)
+ mbx->head -= mbmem_len;
+
+ return err;
+}
+
+/**
+ * fm10k_sm_mbx_transmit - Take message from Tx and put it in Tx mailbox FIFO
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will dequeue one message from the Tx mailbox FIFO and place
+ * it in the Tx switch manager mailbox FIFO for processing by hardware.
+ **/
+STATIC void fm10k_sm_mbx_transmit(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx, u16 head)
+{
+ struct fm10k_mbx_fifo *fifo = &mbx->tx;
+ /* reduce length by 1 to convert to a mask */
+ u16 mbmem_len = mbx->mbmem_len - 1;
+ u16 tail_len, len = 0;
+ u32 *msg;
+
+ DEBUGFUNC("fm10k_sm_mbx_transmit");
+
+ /* push head behind tail */
+ if (mbx->tail < head)
+ head += mbmem_len;
+
+ fm10k_mbx_pull_head(hw, mbx, head);
+
+ /* determine msg aligned offset for end of buffer */
+ do {
+ msg = fifo->buffer + fm10k_fifo_head_offset(fifo, len);
+ tail_len = len;
+ len += FM10K_TLV_DWORD_LEN(*msg);
+ } while ((len <= mbx->tail_len) && (len < mbmem_len));
+
+ /* guarantee we stop on a message boundary */
+ if (mbx->tail_len > tail_len) {
+ mbx->tail = fm10k_mbx_tail_sub(mbx, mbx->tail_len - tail_len);
+ mbx->tail_len = tail_len;
+ }
+
+ /* clear any extra bits left over since index adds 1 extra bit */
+ if (mbx->tail > mbmem_len)
+ mbx->tail -= mbmem_len;
+}
+
+/**
+ * fm10k_sm_mbx_create_reply - Generate reply based on state and remote head
+ * @mbx: pointer to mailbox
+ * @head: acknowledgement number
+ *
+ * This function will generate an outgoing message based on the current
+ * mailbox state and the remote fifo head. It will return the length
+ * of the outgoing message excluding header on success, and a negative value
+ * on error.
+ **/
+STATIC void fm10k_sm_mbx_create_reply(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx, u16 head)
+{
+ switch (mbx->state) {
+ case FM10K_STATE_OPEN:
+ case FM10K_STATE_DISCONNECT:
+ /* flush out Tx data */
+ fm10k_sm_mbx_transmit(hw, mbx, head);
+
+ /* generate new header based on data */
+ if (mbx->tail_len || (mbx->state == FM10K_STATE_OPEN)) {
+ fm10k_sm_mbx_create_data_hdr(mbx);
+ } else {
+ mbx->remote = 0;
+ fm10k_sm_mbx_create_connect_hdr(mbx, 0);
+ }
+ break;
+ case FM10K_STATE_CONNECT:
+ case FM10K_STATE_CLOSED:
+ fm10k_sm_mbx_create_connect_hdr(mbx, 0);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * fm10k_sm_mbx_process_reset - Process header with version == 0 (RESET)
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function is meant to respond to a request where the version data
+ * is set to 0. As such we will either terminate the connection or go
+ * into the connect state in order to re-establish the connection. This
+ * function can also be used to respond to an error as the connection
+ * resetting would also be a means of dealing with errors.
+ **/
+STATIC void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ const enum fm10k_mbx_state state = mbx->state;
+
+ switch (state) {
+ case FM10K_STATE_DISCONNECT:
+ /* drop remote connections and disconnect */
+ mbx->state = FM10K_STATE_CLOSED;
+ mbx->remote = 0;
+ mbx->local = 0;
+ break;
+ case FM10K_STATE_OPEN:
+ /* flush any incomplete work */
+ fm10k_sm_mbx_connect_reset(mbx);
+ break;
+ case FM10K_STATE_CONNECT:
+ /* Update remote value to match local value */
+ mbx->remote = mbx->local;
+ default:
+ break;
+ }
+
+ fm10k_sm_mbx_create_reply(hw, mbx, mbx->tail);
+}
+
+/**
+ * fm10k_sm_mbx_process_version_1 - Process header with version == 1
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function is meant to process messages received when the remote
+ * mailbox is active.
+ **/
+STATIC s32 fm10k_sm_mbx_process_version_1(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ const u32 *hdr = &mbx->mbx_hdr;
+ u16 head, tail;
+ s32 len;
+
+ /* pull all fields needed for verification */
+ tail = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_TAIL);
+ head = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_HEAD);
+
+ /* if we are in connect and wanting version 1 then start up and go */
+ if (mbx->state == FM10K_STATE_CONNECT) {
+ if (!mbx->remote)
+ goto send_reply;
+ if (mbx->remote != 1)
+ return FM10K_MBX_ERR_SRC;
+
+ mbx->state = FM10K_STATE_OPEN;
+ }
+
+ do {
+ /* abort on message size errors */
+ len = fm10k_sm_mbx_receive(hw, mbx, tail);
+ if (len < 0)
+ return len;
+
+ /* continue until we have flushed the Rx FIFO */
+ } while (len);
+
+send_reply:
+ fm10k_sm_mbx_create_reply(hw, mbx, head);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_sm_mbx_process - Process mailbox switch mailbox interrupt
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ *
+ * This function will process incoming mailbox events and generate mailbox
+ * replies. It will return a value indicating the number of DWORDs
+ * transmitted excluding header on success or a negative value on error.
+ **/
+STATIC s32 fm10k_sm_mbx_process(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
+{
+ s32 err;
+
+ DEBUGFUNC("fm10k_sm_mbx_process");
+
+ /* we do not read mailbox if closed */
+ if (mbx->state == FM10K_STATE_CLOSED)
+ return FM10K_SUCCESS;
+
+ /* retrieve data from switch manager */
+ err = fm10k_mbx_read(hw, mbx);
+ if (err)
+ return err;
+
+ err = fm10k_sm_mbx_validate_fifo_hdr(mbx);
+ if (err < 0)
+ goto fifo_err;
+
+ if (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, SM_ERR)) {
+ fm10k_sm_mbx_process_error(mbx);
+ goto fifo_err;
+ }
+
+ switch (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, SM_VER)) {
+ case 0:
+ fm10k_sm_mbx_process_reset(hw, mbx);
+ break;
+ case FM10K_SM_MBX_VERSION:
+ err = fm10k_sm_mbx_process_version_1(hw, mbx);
+ break;
+ }
+
+fifo_err:
+ if (err < 0)
+ fm10k_sm_mbx_create_error_msg(mbx, err);
+
+ /* report data to switch manager */
+ fm10k_mbx_write(hw, mbx);
+
+ return err;
+}
+
+/**
+ * fm10k_sm_mbx_init - Initialize mailbox memory for PF/SM mailbox
+ * @hw: pointer to hardware structure
+ * @mbx: pointer to mailbox
+ * @msg_data: handlers for mailbox events
+ *
+ * This function for now is used to stub out the PF/SM mailbox
+ **/
+s32 fm10k_sm_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx,
+ const struct fm10k_msg_data *msg_data)
+{
+ DEBUGFUNC("fm10k_sm_mbx_init");
+ UNREFERENCED_1PARAMETER(hw);
+
+ mbx->mbx_reg = FM10K_GMBX;
+ mbx->mbmem_reg = FM10K_MBMEM_PF(0);
+
+ /* start out in closed state */
+ mbx->state = FM10K_STATE_CLOSED;
+
+ /* validate layout of handlers before assigning them */
+ if (fm10k_mbx_validate_handlers(msg_data))
+ return FM10K_ERR_PARAM;
+
+ /* initialize the message handlers */
+ mbx->msg_data = msg_data;
+
+ /* start mailbox as timed out and let the reset_hw call
+ * set the timeout value to begin communications
+ */
+ mbx->timeout = 0;
+ mbx->usec_delay = FM10K_MBX_INIT_DELAY;
+
+ /* Split buffer for use by Tx/Rx FIFOs */
+ mbx->max_size = FM10K_MBX_MSG_MAX_SIZE;
+ mbx->mbmem_len = FM10K_MBMEM_PF_XOR;
+
+ /* initialize the FIFOs, sizes are in 4 byte increments */
+ fm10k_fifo_init(&mbx->tx, mbx->buffer, FM10K_MBX_TX_BUFFER_SIZE);
+ fm10k_fifo_init(&mbx->rx, &mbx->buffer[FM10K_MBX_TX_BUFFER_SIZE],
+ FM10K_MBX_RX_BUFFER_SIZE);
+
+ /* initialize function pointers */
+ mbx->ops.connect = fm10k_sm_mbx_connect;
+ mbx->ops.disconnect = fm10k_sm_mbx_disconnect;
+ mbx->ops.rx_ready = fm10k_mbx_rx_ready;
+ mbx->ops.tx_ready = fm10k_mbx_tx_ready;
+ mbx->ops.tx_complete = fm10k_mbx_tx_complete;
+ mbx->ops.enqueue_tx = fm10k_mbx_enqueue_tx;
+ mbx->ops.process = fm10k_sm_mbx_process;
+ mbx->ops.register_handlers = fm10k_mbx_register_handlers;
+
+ return FM10K_SUCCESS;
+}
diff --git a/src/dpdk22/drivers/net/fm10k/base/fm10k_mbx.h b/src/dpdk22/drivers/net/fm10k/base/fm10k_mbx.h
new file mode 100644
index 00000000..4b22f0ef
--- /dev/null
+++ b/src/dpdk22/drivers/net/fm10k/base/fm10k_mbx.h
@@ -0,0 +1,331 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _FM10K_MBX_H_
+#define _FM10K_MBX_H_
+
+/* forward declaration */
+struct fm10k_mbx_info;
+
+#include "fm10k_type.h"
+#include "fm10k_tlv.h"
+
+/* PF Mailbox Registers */
+#define FM10K_MBMEM(_n) ((_n) + 0x18000)
+#define FM10K_MBMEM_VF(_n, _m) (((_n) * 0x10) + (_m) + 0x18000)
+#define FM10K_MBMEM_SM(_n) ((_n) + 0x18400)
+#define FM10K_MBMEM_PF(_n) ((_n) + 0x18600)
+/* XOR provides means of switching from Tx to Rx FIFO */
+#define FM10K_MBMEM_PF_XOR (FM10K_MBMEM_SM(0) ^ FM10K_MBMEM_PF(0))
+#define FM10K_MBX(_n) ((_n) + 0x18800)
+#define FM10K_MBX_OWNER 0x00000001
+#define FM10K_MBX_REQ 0x00000002
+#define FM10K_MBX_ACK 0x00000004
+#define FM10K_MBX_REQ_INTERRUPT 0x00000008
+#define FM10K_MBX_ACK_INTERRUPT 0x00000010
+#define FM10K_MBX_INTERRUPT_ENABLE 0x00000020
+#define FM10K_MBX_INTERRUPT_DISABLE 0x00000040
+#define FM10K_MBICR(_n) ((_n) + 0x18840)
+#define FM10K_GMBX 0x18842
+
+/* VF Mailbox Registers */
+#define FM10K_VFMBX 0x00010
+#define FM10K_VFMBMEM(_n) ((_n) + 0x00020)
+#define FM10K_VFMBMEM_LEN 16
+#define FM10K_VFMBMEM_VF_XOR (FM10K_VFMBMEM_LEN / 2)
+
+/* Delays/timeouts */
+#define FM10K_MBX_DISCONNECT_TIMEOUT 500
+#define FM10K_MBX_POLL_DELAY 19
+#define FM10K_MBX_INT_DELAY 20
+
+#define FM10K_WRITE_MBX(hw, reg, value) FM10K_WRITE_REG(hw, reg, value)
+
+/* PF/VF Mailbox state machine
+ *
+ * +----------+ connect() +----------+
+ * | CLOSED | --------------> | CONNECT |
+ * +----------+ +----------+
+ * ^ ^ |
+ * | rcv: rcv: | | rcv:
+ * | Connect Disconnect | | Connect
+ * | Disconnect Error | | Data
+ * | | |
+ * | | V
+ * +----------+ disconnect() +----------+
+ * |DISCONNECT| <-------------- | OPEN |
+ * +----------+ +----------+
+ *
+ * The diagram above describes the PF/VF mailbox state machine. There
+ * are four main states to this machine.
+ * Closed: This state represents a mailbox that is in a standby state
+ * with interrupts disabled. In this state the mailbox should not
+ * read the mailbox or write any data. The only means of exiting
+ * this state is for the system to make the connect() call for the
+ * mailbox, it will then transition to the connect state.
+ * Connect: In this state the mailbox is seeking a connection. It will
+ * post a connect message with no specified destination and will
+ * wait for a reply from the other side of the mailbox. This state
+ * is exited when either a connect with the local mailbox as the
+ * destination is received or when a data message is received with
+ * a valid sequence number.
+ * Open: In this state the mailbox is able to transfer data between the local
+ * entity and the remote. It will fall back to connect in the event of
+ * receiving either an error message, or a disconnect message. It will
+ * transition to disconnect on a call to disconnect();
+ * Disconnect: In this state the mailbox is attempting to gracefully terminate
+ * the connection. It will do so at the first point where it knows
+ * that the remote endpoint is either done sending, or when the
+ * remote endpoint has fallen back into connect.
+ */
+enum fm10k_mbx_state {
+ FM10K_STATE_CLOSED,
+ FM10K_STATE_CONNECT,
+ FM10K_STATE_OPEN,
+ FM10K_STATE_DISCONNECT,
+};
+
+/* PF/VF Mailbox header format
+ * 3 2 1 0
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Size/Err_no/CRC | Rsvd0 | Head | Tail | Type |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * The layout above describes the format for the header used in the PF/VF
+ * mailbox. The header is broken out into the following fields:
+ * Type: There are 4 supported message types
+ * 0x8: Data header - used to transport message data
+ * 0xC: Connect header - used to establish connection
+ * 0xD: Disconnect header - used to tear down a connection
+ * 0xE: Error header - used to address message exceptions
+ * Tail: Tail index for local FIFO
+ * Tail index actually consists of two parts. The MSB of
+ * the head is a loop tracker, it is 0 on an even numbered
+ * loop through the FIFO, and 1 on the odd numbered loops.
+ * To get the actual mailbox offset based on the tail it
+ * is necessary to add bit 3 to bit 0 and clear bit 3. This
+ * gives us a valid range of 0x1 - 0xE.
+ * Head: Head index for remote FIFO
+ * Head index follows the same format as the tail index.
+ * Rsvd0: Reserved 0 portion of the mailbox header
+ * CRC: Running CRC for all data since connect plus current message header
+ * Size: Maximum message size - Applies only to connect headers
+ * The maximum message size is provided during connect to avoid
+ * jamming the mailbox with messages that do not fit.
+ * Err_no: Error number - Applies only to error headers
+ * The error number provides a indication of the type of error
+ * experienced.
+ */
+
+/* macros for retriving and setting header values */
+#define FM10K_MSG_HDR_MASK(name) \
+ ((0x1u << FM10K_MSG_##name##_SIZE) - 1)
+#define FM10K_MSG_HDR_FIELD_SET(value, name) \
+ (((u32)(value) & FM10K_MSG_HDR_MASK(name)) << FM10K_MSG_##name##_SHIFT)
+#define FM10K_MSG_HDR_FIELD_GET(value, name) \
+ ((u16)((value) >> FM10K_MSG_##name##_SHIFT) & FM10K_MSG_HDR_MASK(name))
+
+/* offsets shared between all headers */
+#define FM10K_MSG_TYPE_SHIFT 0
+#define FM10K_MSG_TYPE_SIZE 4
+#define FM10K_MSG_TAIL_SHIFT 4
+#define FM10K_MSG_TAIL_SIZE 4
+#define FM10K_MSG_HEAD_SHIFT 8
+#define FM10K_MSG_HEAD_SIZE 4
+#define FM10K_MSG_RSVD0_SHIFT 12
+#define FM10K_MSG_RSVD0_SIZE 4
+
+/* offsets for data/disconnect headers */
+#define FM10K_MSG_CRC_SHIFT 16
+#define FM10K_MSG_CRC_SIZE 16
+
+/* offsets for connect headers */
+#define FM10K_MSG_CONNECT_SIZE_SHIFT 16
+#define FM10K_MSG_CONNECT_SIZE_SIZE 16
+
+/* offsets for error headers */
+#define FM10K_MSG_ERR_NO_SHIFT 16
+#define FM10K_MSG_ERR_NO_SIZE 16
+
+enum fm10k_msg_type {
+ FM10K_MSG_DATA = 0x8,
+ FM10K_MSG_CONNECT = 0xC,
+ FM10K_MSG_DISCONNECT = 0xD,
+ FM10K_MSG_ERROR = 0xE,
+};
+
+/* HNI/SM Mailbox FIFO format
+ * 3 2 1 0
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-------+-----------------------+-------+-----------------------+
+ * | Error | Remote Head |Version| Local Tail |
+ * +-------+-----------------------+-------+-----------------------+
+ * | |
+ * . Local FIFO Data .
+ * . .
+ * +-------+-----------------------+-------+-----------------------+
+ *
+ * The layout above describes the format for the FIFOs used by the host
+ * network interface and the switch manager to communicate messages back
+ * and forth. Both the HNI and the switch maintain one such FIFO. The
+ * layout in memory has the switch manager FIFO followed immediately by
+ * the HNI FIFO. For this reason I am using just the pointer to the
+ * HNI FIFO in the mailbox ops as the offset between the two is fixed.
+ *
+ * The header for the FIFO is broken out into the following fields:
+ * Local Tail: Offset into FIFO region for next DWORD to write.
+ * Version: Version info for mailbox, only values of 0/1 are supported.
+ * Remote Head: Offset into remote FIFO to indicate how much we have read.
+ * Error: Error indication, values TBD.
+ */
+
+/* version number for switch manager mailboxes */
+#define FM10K_SM_MBX_VERSION 1
+#define FM10K_SM_MBX_FIFO_LEN (FM10K_MBMEM_PF_XOR - 1)
+#define FM10K_SM_MBX_FIFO_HDR_LEN 1
+
+/* offsets shared between all SM FIFO headers */
+#define FM10K_MSG_SM_TAIL_SHIFT 0
+#define FM10K_MSG_SM_TAIL_SIZE 12
+#define FM10K_MSG_SM_VER_SHIFT 12
+#define FM10K_MSG_SM_VER_SIZE 4
+#define FM10K_MSG_SM_HEAD_SHIFT 16
+#define FM10K_MSG_SM_HEAD_SIZE 12
+#define FM10K_MSG_SM_ERR_SHIFT 28
+#define FM10K_MSG_SM_ERR_SIZE 4
+
+/* All error messages returned by mailbox functions
+ * The value -511 is 0xFE01 in hex. The idea is to order the errors
+ * from 0xFE01 - 0xFEFF so error codes are easily visible in the mailbox
+ * messages. This also helps to avoid error number collisions as Linux
+ * doesn't appear to use error numbers 256 - 511.
+ */
+#define FM10K_MBX_ERR(_n) ((_n) - 512)
+#define FM10K_MBX_ERR_NO_MBX FM10K_MBX_ERR(0x01)
+#define FM10K_MBX_ERR_NO_MSG FM10K_MBX_ERR(0x02)
+#define FM10K_MBX_ERR_NO_SPACE FM10K_MBX_ERR(0x03)
+#define FM10K_MBX_ERR_LOCK FM10K_MBX_ERR(0x04)
+#define FM10K_MBX_ERR_TAIL FM10K_MBX_ERR(0x05)
+#define FM10K_MBX_ERR_HEAD FM10K_MBX_ERR(0x06)
+#define FM10K_MBX_ERR_DST FM10K_MBX_ERR(0x07)
+#define FM10K_MBX_ERR_SRC FM10K_MBX_ERR(0x08)
+#define FM10K_MBX_ERR_TYPE FM10K_MBX_ERR(0x09)
+#define FM10K_MBX_ERR_LEN FM10K_MBX_ERR(0x0A)
+#define FM10K_MBX_ERR_SIZE FM10K_MBX_ERR(0x0B)
+#define FM10K_MBX_ERR_BUSY FM10K_MBX_ERR(0x0C)
+#define FM10K_MBX_ERR_VALUE FM10K_MBX_ERR(0x0D)
+#define FM10K_MBX_ERR_RSVD0 FM10K_MBX_ERR(0x0E)
+#define FM10K_MBX_ERR_CRC FM10K_MBX_ERR(0x0F)
+
+#define FM10K_MBX_CRC_SEED 0xFFFF
+
+struct fm10k_mbx_ops {
+ s32 (*connect)(struct fm10k_hw *, struct fm10k_mbx_info *);
+ void (*disconnect)(struct fm10k_hw *, struct fm10k_mbx_info *);
+ bool (*rx_ready)(struct fm10k_mbx_info *);
+ bool (*tx_ready)(struct fm10k_mbx_info *, u16);
+ bool (*tx_complete)(struct fm10k_mbx_info *);
+ s32 (*enqueue_tx)(struct fm10k_hw *, struct fm10k_mbx_info *,
+ const u32 *);
+ s32 (*process)(struct fm10k_hw *, struct fm10k_mbx_info *);
+ s32 (*register_handlers)(struct fm10k_mbx_info *,
+ const struct fm10k_msg_data *);
+};
+
+struct fm10k_mbx_fifo {
+ u32 *buffer;
+ u16 head;
+ u16 tail;
+ u16 size;
+};
+
+/* size of buffer to be stored in mailbox for FIFOs */
+#define FM10K_MBX_TX_BUFFER_SIZE 512
+#define FM10K_MBX_RX_BUFFER_SIZE 128
+#define FM10K_MBX_BUFFER_SIZE \
+ (FM10K_MBX_TX_BUFFER_SIZE + FM10K_MBX_RX_BUFFER_SIZE)
+
+/* minimum and maximum message size in dwords */
+#define FM10K_MBX_MSG_MAX_SIZE \
+ ((FM10K_MBX_TX_BUFFER_SIZE - 1) & (FM10K_MBX_RX_BUFFER_SIZE - 1))
+#define FM10K_VFMBX_MSG_MTU ((FM10K_VFMBMEM_LEN / 2) - 1)
+
+#define FM10K_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define FM10K_MBX_INIT_DELAY 500 /* microseconds between retries */
+
+struct fm10k_mbx_info {
+ /* function pointers for mailbox operations */
+ struct fm10k_mbx_ops ops;
+ const struct fm10k_msg_data *msg_data;
+
+ /* message FIFOs */
+ struct fm10k_mbx_fifo rx;
+ struct fm10k_mbx_fifo tx;
+
+ /* delay for handling timeouts */
+ u32 timeout;
+ u32 usec_delay;
+
+ /* mailbox state info */
+ u32 mbx_reg, mbmem_reg, mbx_lock, mbx_hdr;
+ u16 max_size, mbmem_len;
+ u16 tail, tail_len, pulled;
+ u16 head, head_len, pushed;
+ u16 local, remote;
+ enum fm10k_mbx_state state;
+
+ /* result of last mailbox test */
+ s32 test_result;
+
+ /* statistics */
+ u64 tx_busy;
+ u64 tx_dropped;
+ u64 tx_messages;
+ u64 tx_dwords;
+ u64 tx_mbmem_pulled;
+ u64 rx_messages;
+ u64 rx_dwords;
+ u64 rx_mbmem_pushed;
+ u64 rx_parse_err;
+
+ /* Buffer to store messages */
+ u32 buffer[FM10K_MBX_BUFFER_SIZE];
+};
+
+s32 fm10k_pfvf_mbx_init(struct fm10k_hw *, struct fm10k_mbx_info *,
+ const struct fm10k_msg_data *, u8);
+s32 fm10k_sm_mbx_init(struct fm10k_hw *, struct fm10k_mbx_info *,
+ const struct fm10k_msg_data *);
+
+#endif /* _FM10K_MBX_H_ */
diff --git a/src/dpdk22/drivers/net/fm10k/base/fm10k_osdep.h b/src/dpdk22/drivers/net/fm10k/base/fm10k_osdep.h
new file mode 100644
index 00000000..6852ef0b
--- /dev/null
+++ b/src/dpdk22/drivers/net/fm10k/base/fm10k_osdep.h
@@ -0,0 +1,158 @@
+/*******************************************************************************
+
+Copyright (c) 2013-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _FM10K_OSDEP_H_
+#define _FM10K_OSDEP_H_
+
+#include <stdint.h>
+#include <string.h>
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_cycles.h>
+#include "../fm10k_logs.h"
+
+/* TODO: this does not look like it should be used... */
+#define ERROR_REPORT2(v1, v2, v3) do { } while (0)
+
+#ifndef BOULDER_RAPIDS_HW
+#define BOULDER_RAPIDS_HW
+#endif
+
+#define STATIC static
+#define DEBUGFUNC(F) DEBUGOUT(F "\n");
+#define DEBUGOUT(S, args...) PMD_DRV_LOG_RAW(DEBUG, S, ##args)
+#define DEBUGOUT1(S, args...) DEBUGOUT(S, ##args)
+#define DEBUGOUT2(S, args...) DEBUGOUT(S, ##args)
+#define DEBUGOUT3(S, args...) DEBUGOUT(S, ##args)
+#define DEBUGOUT6(S, args...) DEBUGOUT(S, ##args)
+#define DEBUGOUT7(S, args...) DEBUGOUT(S, ##args)
+
+#define FALSE 0
+#define TRUE 1
+#ifndef false
+#define false FALSE
+#endif
+#ifndef true
+#define true TRUE
+#endif
+
+typedef uint8_t u8;
+typedef int8_t s8;
+typedef uint16_t u16;
+typedef int16_t s16;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef int64_t s64;
+typedef uint64_t u64;
+typedef int bool;
+
+#ifndef __le16
+#define __le16 u16
+#define __le32 u32
+#define __le64 u64
+#endif
+#ifndef __be16
+#define __be16 u16
+#define __be32 u32
+#define __be64 u64
+#endif
+
+/* offsets are WORD offsets, not BYTE offsets */
+#define FM10K_WRITE_REG(hw, reg, val) \
+ ((((volatile uint32_t *)(hw)->hw_addr)[(reg)]) = ((uint32_t)(val)))
+#define FM10K_READ_REG(hw, reg) \
+ (((volatile uint32_t *)(hw)->hw_addr)[(reg)])
+#define FM10K_WRITE_FLUSH(a) FM10K_READ_REG(a, FM10K_CTRL)
+
+#define FM10K_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
+
+#define FM10K_PCI_REG_WRITE(reg, value) do { \
+ FM10K_PCI_REG((reg)) = (value); \
+} while (0)
+
+/* not implemented */
+#define FM10K_READ_PCI_WORD(hw, reg) 0
+
+#define FM10K_WRITE_MBX(hw, reg, value) FM10K_WRITE_REG(hw, reg, value)
+#define FM10K_READ_MBX(hw, reg) FM10K_READ_REG(hw, reg)
+
+#define FM10K_LE16_TO_CPU rte_le_to_cpu_16
+#define FM10K_LE32_TO_CPU rte_le_to_cpu_32
+#define FM10K_CPU_TO_LE32 rte_cpu_to_le_32
+#define FM10K_CPU_TO_LE16 rte_cpu_to_le_16
+#define le16_to_cpu rte_le_to_cpu_16
+
+#define FM10K_RMB rte_rmb
+#define FM10K_WMB rte_wmb
+
+#define usec_delay rte_delay_us
+
+#define FM10K_REMOVED(hw_addr) (!(hw_addr))
+
+#ifndef FM10K_IS_ZERO_ETHER_ADDR
+/* make certain address is not 0 */
+#define FM10K_IS_ZERO_ETHER_ADDR(addr) \
+(!((addr)[0] | (addr)[1] | (addr)[2] | (addr)[3] | (addr)[4] | (addr)[5]))
+#endif
+
+#ifndef FM10K_IS_MULTICAST_ETHER_ADDR
+#define FM10K_IS_MULTICAST_ETHER_ADDR(addr) ((addr)[0] & 0x1)
+#endif
+
+#ifndef FM10K_IS_VALID_ETHER_ADDR
+/* make certain address is not multicast or 0 */
+#define FM10K_IS_VALID_ETHER_ADDR(addr) \
+(!FM10K_IS_MULTICAST_ETHER_ADDR(addr) && !FM10K_IS_ZERO_ETHER_ADDR(addr))
+#endif
+
+#ifndef do_div
+#define do_div(n, base) ({\
+ (n) = (n) / (base);\
+})
+#endif /* do_div */
+
+/* DPDK can't access IOMEM directly */
+#ifndef FM10K_WRITE_SW_REG
+#define FM10K_WRITE_SW_REG(v1, v2, v3) do { } while (0)
+#endif
+
+#ifndef fm10k_read_reg
+#define fm10k_read_reg FM10K_READ_REG
+#endif
+
+#define FM10K_TSO_MINMSS \
+ (FM10K_DMA_CTRL_MINMSS_64 >> FM10K_DMA_CTRL_MINMSS_SHIFT)
+#define FM10K_TSO_MIN_HEADERLEN 54
+#define FM10K_TSO_MAX_HEADERLEN 192
+
+#endif /* _FM10K_OSDEP_H_ */
diff --git a/src/dpdk22/drivers/net/fm10k/base/fm10k_pf.c b/src/dpdk22/drivers/net/fm10k/base/fm10k_pf.c
new file mode 100644
index 00000000..6e6d71ed
--- /dev/null
+++ b/src/dpdk22/drivers/net/fm10k/base/fm10k_pf.c
@@ -0,0 +1,2094 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "fm10k_pf.h"
+#include "fm10k_vf.h"
+
+/**
+ * fm10k_reset_hw_pf - PF hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * This function should return the hardware to a state similar to the
+ * one it is in after being powered on.
+ **/
+STATIC s32 fm10k_reset_hw_pf(struct fm10k_hw *hw)
+{
+ s32 err;
+ u32 reg;
+ u16 i;
+
+ DEBUGFUNC("fm10k_reset_hw_pf");
+
+ /* Disable interrupts */
+ FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(ALL));
+
+ /* Lock ITR2 reg 0 into itself and disable interrupt moderation */
+ FM10K_WRITE_REG(hw, FM10K_ITR2(0), 0);
+ FM10K_WRITE_REG(hw, FM10K_INT_CTRL, 0);
+
+ /* We assume here Tx and Rx queue 0 are owned by the PF */
+
+ /* Shut off VF access to their queues forcing them to queue 0 */
+ for (i = 0; i < FM10K_TQMAP_TABLE_SIZE; i++) {
+ FM10K_WRITE_REG(hw, FM10K_TQMAP(i), 0);
+ FM10K_WRITE_REG(hw, FM10K_RQMAP(i), 0);
+ }
+
+ /* shut down all rings */
+ err = fm10k_disable_queues_generic(hw, FM10K_MAX_QUEUES);
+ if (err)
+ return err;
+
+ /* Verify that DMA is no longer active */
+ reg = FM10K_READ_REG(hw, FM10K_DMA_CTRL);
+ if (reg & (FM10K_DMA_CTRL_TX_ACTIVE | FM10K_DMA_CTRL_RX_ACTIVE))
+ return FM10K_ERR_DMA_PENDING;
+
+ /* verify the switch is ready for reset */
+ reg = FM10K_READ_REG(hw, FM10K_DMA_CTRL2);
+ if (!(reg & FM10K_DMA_CTRL2_SWITCH_READY))
+ goto out;
+
+ /* Inititate data path reset */
+ reg |= FM10K_DMA_CTRL_DATAPATH_RESET;
+ FM10K_WRITE_REG(hw, FM10K_DMA_CTRL, reg);
+
+ /* Flush write and allow 100us for reset to complete */
+ FM10K_WRITE_FLUSH(hw);
+ usec_delay(FM10K_RESET_TIMEOUT);
+
+ /* Verify we made it out of reset */
+ reg = FM10K_READ_REG(hw, FM10K_IP);
+ if (!(reg & FM10K_IP_NOTINRESET))
+ err = FM10K_ERR_RESET_FAILED;
+
+out:
+ return err;
+}
+
+/**
+ * fm10k_is_ari_hierarchy_pf - Indicate ARI hierarchy support
+ * @hw: pointer to hardware structure
+ *
+ * Looks at the ARI hierarchy bit to determine whether ARI is supported or not.
+ **/
+STATIC bool fm10k_is_ari_hierarchy_pf(struct fm10k_hw *hw)
+{
+ u16 sriov_ctrl = FM10K_READ_PCI_WORD(hw, FM10K_PCIE_SRIOV_CTRL);
+
+ DEBUGFUNC("fm10k_is_ari_hierarchy_pf");
+
+ return !!(sriov_ctrl & FM10K_PCIE_SRIOV_CTRL_VFARI);
+}
+
+/**
+ * fm10k_init_hw_pf - PF hardware initialization
+ * @hw: pointer to hardware structure
+ *
+ **/
+STATIC s32 fm10k_init_hw_pf(struct fm10k_hw *hw)
+{
+ u32 dma_ctrl, txqctl;
+ u16 i;
+
+ DEBUGFUNC("fm10k_init_hw_pf");
+
+ /* Establish default VSI as valid */
+ FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(fm10k_dglort_default), 0);
+ FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(fm10k_dglort_default),
+ FM10K_DGLORTMAP_ANY);
+
+ /* Invalidate all other GLORT entries */
+ for (i = 1; i < FM10K_DGLORT_COUNT; i++)
+ FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i), FM10K_DGLORTMAP_NONE);
+
+ /* reset ITR2(0) to point to itself */
+ FM10K_WRITE_REG(hw, FM10K_ITR2(0), 0);
+
+ /* reset VF ITR2(0) to point to 0 avoid PF registers */
+ FM10K_WRITE_REG(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), 0);
+
+ /* loop through all PF ITR2 registers pointing them to the previous */
+ for (i = 1; i < FM10K_ITR_REG_COUNT_PF; i++)
+ FM10K_WRITE_REG(hw, FM10K_ITR2(i), i - 1);
+
+ /* Enable interrupt moderator if not already enabled */
+ FM10K_WRITE_REG(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR);
+
+ /* compute the default txqctl configuration */
+ txqctl = FM10K_TXQCTL_PF | FM10K_TXQCTL_UNLIMITED_BW |
+ (hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT);
+
+ for (i = 0; i < FM10K_MAX_QUEUES; i++) {
+ /* configure rings for 256 Queue / 32 Descriptor cache mode */
+ FM10K_WRITE_REG(hw, FM10K_TQDLOC(i),
+ (i * FM10K_TQDLOC_BASE_32_DESC) |
+ FM10K_TQDLOC_SIZE_32_DESC);
+ FM10K_WRITE_REG(hw, FM10K_TXQCTL(i), txqctl);
+
+ /* configure rings to provide TPH processing hints */
+ FM10K_WRITE_REG(hw, FM10K_TPH_TXCTRL(i),
+ FM10K_TPH_TXCTRL_DESC_TPHEN |
+ FM10K_TPH_TXCTRL_DESC_RROEN |
+ FM10K_TPH_TXCTRL_DESC_WROEN |
+ FM10K_TPH_TXCTRL_DATA_RROEN);
+ FM10K_WRITE_REG(hw, FM10K_TPH_RXCTRL(i),
+ FM10K_TPH_RXCTRL_DESC_TPHEN |
+ FM10K_TPH_RXCTRL_DESC_RROEN |
+ FM10K_TPH_RXCTRL_DATA_WROEN |
+ FM10K_TPH_RXCTRL_HDR_WROEN);
+ }
+
+ /* set max hold interval to align with 1.024 usec in all modes and
+ * store ITR scale
+ */
+ switch (hw->bus.speed) {
+ case fm10k_bus_speed_2500:
+ dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN1;
+ hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN1;
+ break;
+ case fm10k_bus_speed_5000:
+ dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN2;
+ hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN2;
+ break;
+ case fm10k_bus_speed_8000:
+ dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN3;
+ hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN3;
+ break;
+ default:
+ dma_ctrl = 0;
+ /* just in case, assume Gen3 ITR scale */
+ hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN3;
+ break;
+ }
+
+ /* Configure TSO flags */
+ FM10K_WRITE_REG(hw, FM10K_DTXTCPFLGL, FM10K_TSO_FLAGS_LOW);
+ FM10K_WRITE_REG(hw, FM10K_DTXTCPFLGH, FM10K_TSO_FLAGS_HI);
+
+ /* Enable DMA engine
+ * Set Rx Descriptor size to 32
+ * Set Minimum MSS to 64
+ * Set Maximum number of Rx queues to 256 / 32 Descriptor
+ */
+ dma_ctrl |= FM10K_DMA_CTRL_TX_ENABLE | FM10K_DMA_CTRL_RX_ENABLE |
+ FM10K_DMA_CTRL_RX_DESC_SIZE | FM10K_DMA_CTRL_MINMSS_64 |
+ FM10K_DMA_CTRL_32_DESC;
+
+ FM10K_WRITE_REG(hw, FM10K_DMA_CTRL, dma_ctrl);
+
+ /* record maximum queue count, we limit ourselves to 128 */
+ hw->mac.max_queues = FM10K_MAX_QUEUES_PF;
+
+ /* We support either 64 VFs or 7 VFs depending on if we have ARI */
+ hw->iov.total_vfs = fm10k_is_ari_hierarchy_pf(hw) ? 64 : 7;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_is_slot_appropriate_pf - Indicate appropriate slot for this SKU
+ * @hw: pointer to hardware structure
+ *
+ * Looks at the PCIe bus info to confirm whether or not this slot can support
+ * the necessary bandwidth for this device.
+ **/
+STATIC bool fm10k_is_slot_appropriate_pf(struct fm10k_hw *hw)
+{
+ DEBUGFUNC("fm10k_is_slot_appropriate_pf");
+
+ return (hw->bus.speed == hw->bus_caps.speed) &&
+ (hw->bus.width == hw->bus_caps.width);
+}
+
+/**
+ * fm10k_update_vlan_pf - Update status of VLAN ID in VLAN filter table
+ * @hw: pointer to hardware structure
+ * @vid: VLAN ID to add to table
+ * @vsi: Index indicating VF ID or PF ID in table
+ * @set: Indicates if this is a set or clear operation
+ *
+ * This function adds or removes the corresponding VLAN ID from the VLAN
+ * filter table for the corresponding function. In addition to the
+ * standard set/clear that supports one bit a multi-bit write is
+ * supported to set 64 bits at a time.
+ **/
+STATIC s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
+{
+ u32 vlan_table, reg, mask, bit, len;
+
+ /* verify the VSI index is valid */
+ if (vsi > FM10K_VLAN_TABLE_VSI_MAX)
+ return FM10K_ERR_PARAM;
+
+ /* VLAN multi-bit write:
+ * The multi-bit write has several parts to it.
+ * 3 2 1 0
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RSVD0 | Length |C|RSVD0| VLAN ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * VLAN ID: Vlan Starting value
+ * RSVD0: Reserved section, must be 0
+ * C: Flag field, 0 is set, 1 is clear (Used in VF VLAN message)
+ * Length: Number of times to repeat the bit being set
+ */
+ len = vid >> 16;
+ vid = (vid << 17) >> 17;
+
+ /* verify the reserved 0 fields are 0 */
+ if (len >= FM10K_VLAN_TABLE_VID_MAX || vid >= FM10K_VLAN_TABLE_VID_MAX)
+ return FM10K_ERR_PARAM;
+
+ /* Loop through the table updating all required VLANs */
+ for (reg = FM10K_VLAN_TABLE(vsi, vid / 32), bit = vid % 32;
+ len < FM10K_VLAN_TABLE_VID_MAX;
+ len -= 32 - bit, reg++, bit = 0) {
+ /* record the initial state of the register */
+ vlan_table = FM10K_READ_REG(hw, reg);
+
+ /* truncate mask if we are at the start or end of the run */
+ mask = (~(u32)0 >> ((len < 31) ? 31 - len : 0)) << bit;
+
+ /* make necessary modifications to the register */
+ mask &= set ? ~vlan_table : vlan_table;
+ if (mask)
+ FM10K_WRITE_REG(hw, reg, vlan_table ^ mask);
+ }
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_read_mac_addr_pf - Read device MAC address
+ * @hw: pointer to the HW structure
+ *
+ * Reads the device MAC address from the SM_AREA and stores the value.
+ **/
+STATIC s32 fm10k_read_mac_addr_pf(struct fm10k_hw *hw)
+{
+ u8 perm_addr[ETH_ALEN];
+ u32 serial_num;
+ int i;
+
+ DEBUGFUNC("fm10k_read_mac_addr_pf");
+
+ serial_num = FM10K_READ_REG(hw, FM10K_SM_AREA(1));
+
+ /* last byte should be all 1's */
+ if ((~serial_num) << 24)
+ return FM10K_ERR_INVALID_MAC_ADDR;
+
+ perm_addr[0] = (u8)(serial_num >> 24);
+ perm_addr[1] = (u8)(serial_num >> 16);
+ perm_addr[2] = (u8)(serial_num >> 8);
+
+ serial_num = FM10K_READ_REG(hw, FM10K_SM_AREA(0));
+
+ /* first byte should be all 1's */
+ if ((~serial_num) >> 24)
+ return FM10K_ERR_INVALID_MAC_ADDR;
+
+ perm_addr[3] = (u8)(serial_num >> 16);
+ perm_addr[4] = (u8)(serial_num >> 8);
+ perm_addr[5] = (u8)(serial_num);
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ hw->mac.perm_addr[i] = perm_addr[i];
+ hw->mac.addr[i] = perm_addr[i];
+ }
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_glort_valid_pf - Validate that the provided glort is valid
+ * @hw: pointer to the HW structure
+ * @glort: base glort to be validated
+ *
+ * This function will return an error if the provided glort is invalid
+ **/
+bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort)
+{
+ glort &= hw->mac.dglort_map >> FM10K_DGLORTMAP_MASK_SHIFT;
+
+ return glort == (hw->mac.dglort_map & FM10K_DGLORTMAP_NONE);
+}
+
+/**
+ * fm10k_update_xc_addr_pf - Update device addresses
+ * @hw: pointer to the HW structure
+ * @glort: base resource tag for this request
+ * @mac: MAC address to add/remove from table
+ * @vid: VLAN ID to add/remove from table
+ * @add: Indicates if this is an add or remove operation
+ * @flags: flags field to indicate add and secure
+ *
+ * This function generates a message to the Switch API requesting
+ * that the given logical port add/remove the given L2 MAC/VLAN address.
+ **/
+STATIC s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort,
+ const u8 *mac, u16 vid, bool add, u8 flags)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ struct fm10k_mac_update mac_update;
+ u32 msg[5];
+
+ DEBUGFUNC("fm10k_update_xc_addr_pf");
+
+ /* clear set bit from VLAN ID */
+ vid &= ~FM10K_VLAN_CLEAR;
+
+ /* if glort or VLAN are not valid return error */
+ if (!fm10k_glort_valid_pf(hw, glort) || vid >= FM10K_VLAN_TABLE_VID_MAX)
+ return FM10K_ERR_PARAM;
+
+ /* record fields */
+ mac_update.mac_lower = FM10K_CPU_TO_LE32(((u32)mac[2] << 24) |
+ ((u32)mac[3] << 16) |
+ ((u32)mac[4] << 8) |
+ ((u32)mac[5]));
+ mac_update.mac_upper = FM10K_CPU_TO_LE16(((u32)mac[0] << 8) |
+ ((u32)mac[1]));
+ mac_update.vlan = FM10K_CPU_TO_LE16(vid);
+ mac_update.glort = FM10K_CPU_TO_LE16(glort);
+ mac_update.action = add ? 0 : 1;
+ mac_update.flags = flags;
+
+ /* populate mac_update fields */
+ fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_UPDATE_MAC_FWD_RULE);
+ fm10k_tlv_attr_put_le_struct(msg, FM10K_PF_ATTR_ID_MAC_UPDATE,
+ &mac_update, sizeof(mac_update));
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+/**
+ * fm10k_update_uc_addr_pf - Update device unicast addresses
+ * @hw: pointer to the HW structure
+ * @glort: base resource tag for this request
+ * @mac: MAC address to add/remove from table
+ * @vid: VLAN ID to add/remove from table
+ * @add: Indicates if this is an add or remove operation
+ * @flags: flags field to indicate add and secure
+ *
+ * This function is used to add or remove unicast addresses for
+ * the PF.
+ **/
+STATIC s32 fm10k_update_uc_addr_pf(struct fm10k_hw *hw, u16 glort,
+ const u8 *mac, u16 vid, bool add, u8 flags)
+{
+ DEBUGFUNC("fm10k_update_uc_addr_pf");
+
+ /* verify MAC address is valid */
+ if (!FM10K_IS_VALID_ETHER_ADDR(mac))
+ return FM10K_ERR_PARAM;
+
+ return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, flags);
+}
+
+/**
+ * fm10k_update_mc_addr_pf - Update device multicast addresses
+ * @hw: pointer to the HW structure
+ * @glort: base resource tag for this request
+ * @mac: MAC address to add/remove from table
+ * @vid: VLAN ID to add/remove from table
+ * @add: Indicates if this is an add or remove operation
+ *
+ * This function is used to add or remove multicast MAC addresses for
+ * the PF.
+ **/
+STATIC s32 fm10k_update_mc_addr_pf(struct fm10k_hw *hw, u16 glort,
+ const u8 *mac, u16 vid, bool add)
+{
+ DEBUGFUNC("fm10k_update_mc_addr_pf");
+
+ /* verify multicast address is valid */
+ if (!FM10K_IS_MULTICAST_ETHER_ADDR(mac))
+ return FM10K_ERR_PARAM;
+
+ return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, 0);
+}
+
+/**
+ * fm10k_update_xcast_mode_pf - Request update of multicast mode
+ * @hw: pointer to hardware structure
+ * @glort: base resource tag for this request
+ * @mode: integer value indicating mode being requested
+ *
+ * This function will attempt to request a higher mode for the port
+ * so that it can enable either multicast, multicast promiscuous, or
+ * promiscuous mode of operation.
+ **/
+STATIC s32 fm10k_update_xcast_mode_pf(struct fm10k_hw *hw, u16 glort, u8 mode)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ u32 msg[3], xcast_mode;
+
+ DEBUGFUNC("fm10k_update_xcast_mode_pf");
+
+ if (mode > FM10K_XCAST_MODE_NONE)
+ return FM10K_ERR_PARAM;
+
+ /* if glort is not valid return error */
+ if (!fm10k_glort_valid_pf(hw, glort))
+ return FM10K_ERR_PARAM;
+
+ /* write xcast mode as a single u32 value,
+ * lower 16 bits: glort
+ * upper 16 bits: mode
+ */
+ xcast_mode = ((u32)mode << 16) | glort;
+
+ /* generate message requesting to change xcast mode */
+ fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_XCAST_MODES);
+ fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_XCAST_MODE, xcast_mode);
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+/**
+ * fm10k_update_int_moderator_pf - Update interrupt moderator linked list
+ * @hw: pointer to hardware structure
+ *
+ * This function walks through the MSI-X vector table to determine the
+ * number of active interrupts and based on that information updates the
+ * interrupt moderator linked list.
+ **/
+STATIC void fm10k_update_int_moderator_pf(struct fm10k_hw *hw)
+{
+ u32 i;
+
+ /* Disable interrupt moderator */
+ FM10K_WRITE_REG(hw, FM10K_INT_CTRL, 0);
+
+ /* loop through PF from last to first looking enabled vectors */
+ for (i = FM10K_ITR_REG_COUNT_PF - 1; i; i--) {
+ if (!FM10K_READ_REG(hw, FM10K_MSIX_VECTOR_MASK(i)))
+ break;
+ }
+
+ /* always reset VFITR2[0] to point to last enabled PF vector */
+ FM10K_WRITE_REG(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), i);
+
+ /* reset ITR2[0] to point to last enabled PF vector */
+ if (!hw->iov.num_vfs)
+ FM10K_WRITE_REG(hw, FM10K_ITR2(0), i);
+
+ /* Enable interrupt moderator */
+ FM10K_WRITE_REG(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR);
+}
+
+/**
+ * fm10k_update_lport_state_pf - Notify the switch of a change in port state
+ * @hw: pointer to the HW structure
+ * @glort: base resource tag for this request
+ * @count: number of logical ports being updated
+ * @enable: boolean value indicating enable or disable
+ *
+ * This function is used to add/remove a logical port from the switch.
+ **/
+STATIC s32 fm10k_update_lport_state_pf(struct fm10k_hw *hw, u16 glort,
+ u16 count, bool enable)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ u32 msg[3], lport_msg;
+
+ DEBUGFUNC("fm10k_lport_state_pf");
+
+ /* do nothing if we are being asked to create or destroy 0 ports */
+ if (!count)
+ return FM10K_SUCCESS;
+
+ /* if glort is not valid return error */
+ if (!fm10k_glort_valid_pf(hw, glort))
+ return FM10K_ERR_PARAM;
+
+ /* construct the lport message from the 2 pieces of data we have */
+ lport_msg = ((u32)count << 16) | glort;
+
+ /* generate lport create/delete message */
+ fm10k_tlv_msg_init(msg, enable ? FM10K_PF_MSG_ID_LPORT_CREATE :
+ FM10K_PF_MSG_ID_LPORT_DELETE);
+ fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_PORT, lport_msg);
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+/**
+ * fm10k_configure_dglort_map_pf - Configures GLORT entry and queues
+ * @hw: pointer to hardware structure
+ * @dglort: pointer to dglort configuration structure
+ *
+ * Reads the configuration structure contained in dglort_cfg and uses
+ * that information to then populate a DGLORTMAP/DEC entry and the queues
+ * to which it has been assigned.
+ **/
+STATIC s32 fm10k_configure_dglort_map_pf(struct fm10k_hw *hw,
+ struct fm10k_dglort_cfg *dglort)
+{
+ u16 glort, queue_count, vsi_count, pc_count;
+ u16 vsi, queue, pc, q_idx;
+ u32 txqctl, dglortdec, dglortmap;
+
+ /* verify the dglort pointer */
+ if (!dglort)
+ return FM10K_ERR_PARAM;
+
+ /* verify the dglort values */
+ if ((dglort->idx > 7) || (dglort->rss_l > 7) || (dglort->pc_l > 3) ||
+ (dglort->vsi_l > 6) || (dglort->vsi_b > 64) ||
+ (dglort->queue_l > 8) || (dglort->queue_b >= 256))
+ return FM10K_ERR_PARAM;
+
+ /* determine count of VSIs and queues */
+ queue_count = 1 << (dglort->rss_l + dglort->pc_l);
+ vsi_count = 1 << (dglort->vsi_l + dglort->queue_l);
+ glort = dglort->glort;
+ q_idx = dglort->queue_b;
+
+ /* configure SGLORT for queues */
+ for (vsi = 0; vsi < vsi_count; vsi++, glort++) {
+ for (queue = 0; queue < queue_count; queue++, q_idx++) {
+ if (q_idx >= FM10K_MAX_QUEUES)
+ break;
+
+ FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(q_idx), glort);
+ FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(q_idx), glort);
+ }
+ }
+
+ /* determine count of PCs and queues */
+ queue_count = 1 << (dglort->queue_l + dglort->rss_l + dglort->vsi_l);
+ pc_count = 1 << dglort->pc_l;
+
+ /* configure PC for Tx queues */
+ for (pc = 0; pc < pc_count; pc++) {
+ q_idx = pc + dglort->queue_b;
+ for (queue = 0; queue < queue_count; queue++) {
+ if (q_idx >= FM10K_MAX_QUEUES)
+ break;
+
+ txqctl = FM10K_READ_REG(hw, FM10K_TXQCTL(q_idx));
+ txqctl &= ~FM10K_TXQCTL_PC_MASK;
+ txqctl |= pc << FM10K_TXQCTL_PC_SHIFT;
+ FM10K_WRITE_REG(hw, FM10K_TXQCTL(q_idx), txqctl);
+
+ q_idx += pc_count;
+ }
+ }
+
+ /* configure DGLORTDEC */
+ dglortdec = ((u32)(dglort->rss_l) << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) |
+ ((u32)(dglort->queue_b) << FM10K_DGLORTDEC_QBASE_SHIFT) |
+ ((u32)(dglort->pc_l) << FM10K_DGLORTDEC_PCLENGTH_SHIFT) |
+ ((u32)(dglort->vsi_b) << FM10K_DGLORTDEC_VSIBASE_SHIFT) |
+ ((u32)(dglort->vsi_l) << FM10K_DGLORTDEC_VSILENGTH_SHIFT) |
+ ((u32)(dglort->queue_l));
+ if (dglort->inner_rss)
+ dglortdec |= FM10K_DGLORTDEC_INNERRSS_ENABLE;
+
+ /* configure DGLORTMAP */
+ dglortmap = (dglort->idx == fm10k_dglort_default) ?
+ FM10K_DGLORTMAP_ANY : FM10K_DGLORTMAP_ZERO;
+ dglortmap <<= dglort->vsi_l + dglort->queue_l + dglort->shared_l;
+ dglortmap |= dglort->glort;
+
+ /* write values to hardware */
+ FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(dglort->idx), dglortdec);
+ FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(dglort->idx), dglortmap);
+
+ return FM10K_SUCCESS;
+}
+
+u16 fm10k_queues_per_pool(struct fm10k_hw *hw)
+{
+ u16 num_pools = hw->iov.num_pools;
+
+ return (num_pools > 32) ? 2 : (num_pools > 16) ? 4 : (num_pools > 8) ?
+ 8 : FM10K_MAX_QUEUES_POOL;
+}
+
+u16 fm10k_vf_queue_index(struct fm10k_hw *hw, u16 vf_idx)
+{
+ u16 num_vfs = hw->iov.num_vfs;
+ u16 vf_q_idx = FM10K_MAX_QUEUES;
+
+ vf_q_idx -= fm10k_queues_per_pool(hw) * (num_vfs - vf_idx);
+
+ return vf_q_idx;
+}
+
+STATIC u16 fm10k_vectors_per_pool(struct fm10k_hw *hw)
+{
+ u16 num_pools = hw->iov.num_pools;
+
+ return (num_pools > 32) ? 8 : (num_pools > 16) ? 16 :
+ FM10K_MAX_VECTORS_POOL;
+}
+
+STATIC u16 fm10k_vf_vector_index(struct fm10k_hw *hw, u16 vf_idx)
+{
+ u16 vf_v_idx = FM10K_MAX_VECTORS_PF;
+
+ vf_v_idx += fm10k_vectors_per_pool(hw) * vf_idx;
+
+ return vf_v_idx;
+}
+
+/**
+ * fm10k_iov_assign_resources_pf - Assign pool resources for virtualization
+ * @hw: pointer to the HW structure
+ * @num_vfs: number of VFs to be allocated
+ * @num_pools: number of virtualization pools to be allocated
+ *
+ * Allocates queues and traffic classes to virtualization entities to prepare
+ * the PF for SR-IOV and VMDq
+ **/
+STATIC s32 fm10k_iov_assign_resources_pf(struct fm10k_hw *hw, u16 num_vfs,
+ u16 num_pools)
+{
+ u16 qmap_stride, qpp, vpp, vf_q_idx, vf_q_idx0, qmap_idx;
+ u32 vid = hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT;
+ int i, j;
+
+ /* hardware only supports up to 64 pools */
+ if (num_pools > 64)
+ return FM10K_ERR_PARAM;
+
+ /* the number of VFs cannot exceed the number of pools */
+ if ((num_vfs > num_pools) || (num_vfs > hw->iov.total_vfs))
+ return FM10K_ERR_PARAM;
+
+ /* record number of virtualization entities */
+ hw->iov.num_vfs = num_vfs;
+ hw->iov.num_pools = num_pools;
+
+ /* determine qmap offsets and counts */
+ qmap_stride = (num_vfs > 8) ? 32 : 256;
+ qpp = fm10k_queues_per_pool(hw);
+ vpp = fm10k_vectors_per_pool(hw);
+
+ /* calculate starting index for queues */
+ vf_q_idx = fm10k_vf_queue_index(hw, 0);
+ qmap_idx = 0;
+
+ /* establish TCs with -1 credits and no quanta to prevent transmit */
+ for (i = 0; i < num_vfs; i++) {
+ FM10K_WRITE_REG(hw, FM10K_TC_MAXCREDIT(i), 0);
+ FM10K_WRITE_REG(hw, FM10K_TC_RATE(i), 0);
+ FM10K_WRITE_REG(hw, FM10K_TC_CREDIT(i),
+ FM10K_TC_CREDIT_CREDIT_MASK);
+ }
+
+ /* zero out all mbmem registers */
+ for (i = FM10K_VFMBMEM_LEN * num_vfs; i--;)
+ FM10K_WRITE_REG(hw, FM10K_MBMEM(i), 0);
+
+ /* clear event notification of VF FLR */
+ FM10K_WRITE_REG(hw, FM10K_PFVFLREC(0), ~0);
+ FM10K_WRITE_REG(hw, FM10K_PFVFLREC(1), ~0);
+
+ /* loop through unallocated rings assigning them back to PF */
+ for (i = FM10K_MAX_QUEUES_PF; i < vf_q_idx; i++) {
+ FM10K_WRITE_REG(hw, FM10K_TXDCTL(i), 0);
+ FM10K_WRITE_REG(hw, FM10K_TXQCTL(i), FM10K_TXQCTL_PF |
+ FM10K_TXQCTL_UNLIMITED_BW | vid);
+ FM10K_WRITE_REG(hw, FM10K_RXQCTL(i), FM10K_RXQCTL_PF);
+ }
+
+ /* PF should have already updated VFITR2[0] */
+
+ /* update all ITR registers to flow to VFITR2[0] */
+ for (i = FM10K_ITR_REG_COUNT_PF + 1; i < FM10K_ITR_REG_COUNT; i++) {
+ if (!(i & (vpp - 1)))
+ FM10K_WRITE_REG(hw, FM10K_ITR2(i), i - vpp);
+ else
+ FM10K_WRITE_REG(hw, FM10K_ITR2(i), i - 1);
+ }
+
+ /* update PF ITR2[0] to reference the last vector */
+ FM10K_WRITE_REG(hw, FM10K_ITR2(0),
+ fm10k_vf_vector_index(hw, num_vfs - 1));
+
+ /* loop through rings populating rings and TCs */
+ for (i = 0; i < num_vfs; i++) {
+ /* record index for VF queue 0 for use in end of loop */
+ vf_q_idx0 = vf_q_idx;
+
+ for (j = 0; j < qpp; j++, qmap_idx++, vf_q_idx++) {
+ /* assign VF and locked TC to queues */
+ FM10K_WRITE_REG(hw, FM10K_TXDCTL(vf_q_idx), 0);
+ FM10K_WRITE_REG(hw, FM10K_TXQCTL(vf_q_idx),
+ (i << FM10K_TXQCTL_TC_SHIFT) | i |
+ FM10K_TXQCTL_VF | vid);
+ FM10K_WRITE_REG(hw, FM10K_RXDCTL(vf_q_idx),
+ FM10K_RXDCTL_WRITE_BACK_MIN_DELAY |
+ FM10K_RXDCTL_DROP_ON_EMPTY);
+ FM10K_WRITE_REG(hw, FM10K_RXQCTL(vf_q_idx),
+ FM10K_RXQCTL_VF |
+ (i << FM10K_RXQCTL_VF_SHIFT));
+
+ /* map queue pair to VF */
+ FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
+ FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx), vf_q_idx);
+ }
+
+ /* repeat the first ring for all of the remaining VF rings */
+ for (; j < qmap_stride; j++, qmap_idx++) {
+ FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), vf_q_idx0);
+ FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx), vf_q_idx0);
+ }
+ }
+
+ /* loop through remaining indexes assigning all to queue 0 */
+ while (qmap_idx < FM10K_TQMAP_TABLE_SIZE) {
+ FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), 0);
+ FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx), 0);
+ qmap_idx++;
+ }
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_iov_configure_tc_pf - Configure the shaping group for VF
+ * @hw: pointer to the HW structure
+ * @vf_idx: index of VF receiving GLORT
+ * @rate: Rate indicated in Mb/s
+ *
+ * Configured the TC for a given VF to allow only up to a given number
+ * of Mb/s of outgoing Tx throughput.
+ **/
+STATIC s32 fm10k_iov_configure_tc_pf(struct fm10k_hw *hw, u16 vf_idx, int rate)
+{
+ /* configure defaults */
+ u32 interval = FM10K_TC_RATE_INTERVAL_4US_GEN3;
+ u32 tc_rate = FM10K_TC_RATE_QUANTA_MASK;
+
+ /* verify vf is in range */
+ if (vf_idx >= hw->iov.num_vfs)
+ return FM10K_ERR_PARAM;
+
+ /* set interval to align with 4.096 usec in all modes */
+ switch (hw->bus.speed) {
+ case fm10k_bus_speed_2500:
+ interval = FM10K_TC_RATE_INTERVAL_4US_GEN1;
+ break;
+ case fm10k_bus_speed_5000:
+ interval = FM10K_TC_RATE_INTERVAL_4US_GEN2;
+ break;
+ default:
+ break;
+ }
+
+ if (rate) {
+ if (rate > FM10K_VF_TC_MAX || rate < FM10K_VF_TC_MIN)
+ return FM10K_ERR_PARAM;
+
+ /* The quanta is measured in Bytes per 4.096 or 8.192 usec
+ * The rate is provided in Mbits per second
+ * To tralslate from rate to quanta we need to multiply the
+ * rate by 8.192 usec and divide by 8 bits/byte. To avoid
+ * dealing with floating point we can round the values up
+ * to the nearest whole number ratio which gives us 128 / 125.
+ */
+ tc_rate = (rate * 128) / 125;
+
+ /* try to keep the rate limiting accurate by increasing
+ * the number of credits and interval for rates less than 4Gb/s
+ */
+ if (rate < 4000)
+ interval <<= 1;
+ else
+ tc_rate >>= 1;
+ }
+
+ /* update rate limiter with new values */
+ FM10K_WRITE_REG(hw, FM10K_TC_RATE(vf_idx), tc_rate | interval);
+ FM10K_WRITE_REG(hw, FM10K_TC_MAXCREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K);
+ FM10K_WRITE_REG(hw, FM10K_TC_CREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_iov_assign_int_moderator_pf - Add VF interrupts to moderator list
+ * @hw: pointer to the HW structure
+ * @vf_idx: index of VF receiving GLORT
+ *
+ * Update the interrupt moderator linked list to include any MSI-X
+ * interrupts which the VF has enabled in the MSI-X vector table.
+ **/
+STATIC s32 fm10k_iov_assign_int_moderator_pf(struct fm10k_hw *hw, u16 vf_idx)
+{
+ u16 vf_v_idx, vf_v_limit, i;
+
+ /* verify vf is in range */
+ if (vf_idx >= hw->iov.num_vfs)
+ return FM10K_ERR_PARAM;
+
+ /* determine vector offset and count */
+ vf_v_idx = fm10k_vf_vector_index(hw, vf_idx);
+ vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw);
+
+ /* search for first vector that is not masked */
+ for (i = vf_v_limit - 1; i > vf_v_idx; i--) {
+ if (!FM10K_READ_REG(hw, FM10K_MSIX_VECTOR_MASK(i)))
+ break;
+ }
+
+ /* reset linked list so it now includes our active vectors */
+ if (vf_idx == (hw->iov.num_vfs - 1))
+ FM10K_WRITE_REG(hw, FM10K_ITR2(0), i);
+ else
+ FM10K_WRITE_REG(hw, FM10K_ITR2(vf_v_limit), i);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_iov_assign_default_mac_vlan_pf - Assign a MAC and VLAN to VF
+ * @hw: pointer to the HW structure
+ * @vf_info: pointer to VF information structure
+ *
+ * Assign a MAC address and default VLAN to a VF and notify it of the update
+ **/
+STATIC s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
+ struct fm10k_vf_info *vf_info)
+{
+ u16 qmap_stride, queues_per_pool, vf_q_idx, timeout, qmap_idx, i;
+ u32 msg[4], txdctl, txqctl, tdbal = 0, tdbah = 0;
+ s32 err = FM10K_SUCCESS;
+ u16 vf_idx, vf_vid;
+
+ /* verify vf is in range */
+ if (!vf_info || vf_info->vf_idx >= hw->iov.num_vfs)
+ return FM10K_ERR_PARAM;
+
+ /* determine qmap offsets and counts */
+ qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256;
+ queues_per_pool = fm10k_queues_per_pool(hw);
+
+ /* calculate starting index for queues */
+ vf_idx = vf_info->vf_idx;
+ vf_q_idx = fm10k_vf_queue_index(hw, vf_idx);
+ qmap_idx = qmap_stride * vf_idx;
+
+ /* MAP Tx queue back to 0 temporarily, and disable it */
+ FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), 0);
+ FM10K_WRITE_REG(hw, FM10K_TXDCTL(vf_q_idx), 0);
+
+ /* determine correct default VLAN ID */
+ if (vf_info->pf_vid)
+ vf_vid = vf_info->pf_vid | FM10K_VLAN_CLEAR;
+ else
+ vf_vid = vf_info->sw_vid;
+
+ /* generate MAC_ADDR request */
+ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN);
+ fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_DEFAULT_MAC,
+ vf_info->mac, vf_vid);
+
+ /* load onto outgoing mailbox, ignore any errors on enqueue */
+ if (vf_info->mbx.ops.enqueue_tx)
+ vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
+
+ /* verify ring has disabled before modifying base address registers */
+ txdctl = FM10K_READ_REG(hw, FM10K_TXDCTL(vf_q_idx));
+ for (timeout = 0; txdctl & FM10K_TXDCTL_ENABLE; timeout++) {
+ /* limit ourselves to a 1ms timeout */
+ if (timeout == 10) {
+ err = FM10K_ERR_DMA_PENDING;
+ goto err_out;
+ }
+
+ usec_delay(100);
+ txdctl = FM10K_READ_REG(hw, FM10K_TXDCTL(vf_q_idx));
+ }
+
+ /* Update base address registers to contain MAC address */
+ if (FM10K_IS_VALID_ETHER_ADDR(vf_info->mac)) {
+ tdbal = (((u32)vf_info->mac[3]) << 24) |
+ (((u32)vf_info->mac[4]) << 16) |
+ (((u32)vf_info->mac[5]) << 8);
+
+ tdbah = (((u32)0xFF) << 24) |
+ (((u32)vf_info->mac[0]) << 16) |
+ (((u32)vf_info->mac[1]) << 8) |
+ ((u32)vf_info->mac[2]);
+ }
+
+ /* Record the base address into queue 0 */
+ FM10K_WRITE_REG(hw, FM10K_TDBAL(vf_q_idx), tdbal);
+ FM10K_WRITE_REG(hw, FM10K_TDBAH(vf_q_idx), tdbah);
+
+ /* Provide the VF the ITR scale, using software-defined fields in TDLEN
+ * to pass the information during VF initialization
+ */
+ FM10K_WRITE_REG(hw, FM10K_TDLEN(vf_q_idx), hw->mac.itr_scale <<
+ FM10K_TDLEN_ITR_SCALE_SHIFT);
+
+err_out:
+ /* configure Queue control register */
+ txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) &
+ FM10K_TXQCTL_VID_MASK;
+ txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
+ FM10K_TXQCTL_VF | vf_idx;
+
+ /* assign VID */
+ for (i = 0; i < queues_per_pool; i++)
+ FM10K_WRITE_REG(hw, FM10K_TXQCTL(vf_q_idx + i), txqctl);
+
+ /* restore the queue back to VF ownership */
+ FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
+ return err;
+}
+
+/**
+ * fm10k_iov_reset_resources_pf - Reassign queues and interrupts to a VF
+ * @hw: pointer to the HW structure
+ * @vf_info: pointer to VF information structure
+ *
+ * Reassign the interrupts and queues to a VF following an FLR
+ **/
+STATIC s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
+ struct fm10k_vf_info *vf_info)
+{
+ u16 qmap_stride, queues_per_pool, vf_q_idx, qmap_idx;
+ u32 tdbal = 0, tdbah = 0, txqctl, rxqctl;
+ u16 vf_v_idx, vf_v_limit, vf_vid;
+ u8 vf_idx = vf_info->vf_idx;
+ int i;
+
+ /* verify vf is in range */
+ if (vf_idx >= hw->iov.num_vfs)
+ return FM10K_ERR_PARAM;
+
+ /* clear event notification of VF FLR */
+ FM10K_WRITE_REG(hw, FM10K_PFVFLREC(vf_idx / 32), 1 << (vf_idx % 32));
+
+ /* force timeout and then disconnect the mailbox */
+ vf_info->mbx.timeout = 0;
+ if (vf_info->mbx.ops.disconnect)
+ vf_info->mbx.ops.disconnect(hw, &vf_info->mbx);
+
+ /* determine vector offset and count */
+ vf_v_idx = fm10k_vf_vector_index(hw, vf_idx);
+ vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw);
+
+ /* determine qmap offsets and counts */
+ qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256;
+ queues_per_pool = fm10k_queues_per_pool(hw);
+ qmap_idx = qmap_stride * vf_idx;
+
+ /* make all the queues inaccessible to the VF */
+ for (i = qmap_idx; i < (qmap_idx + qmap_stride); i++) {
+ FM10K_WRITE_REG(hw, FM10K_TQMAP(i), 0);
+ FM10K_WRITE_REG(hw, FM10K_RQMAP(i), 0);
+ }
+
+ /* calculate starting index for queues */
+ vf_q_idx = fm10k_vf_queue_index(hw, vf_idx);
+
+ /* determine correct default VLAN ID */
+ if (vf_info->pf_vid)
+ vf_vid = vf_info->pf_vid;
+ else
+ vf_vid = vf_info->sw_vid;
+
+ /* configure Queue control register */
+ txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) |
+ (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
+ FM10K_TXQCTL_VF | vf_idx;
+ rxqctl = FM10K_RXQCTL_VF | (vf_idx << FM10K_RXQCTL_VF_SHIFT);
+
+ /* stop further DMA and reset queue ownership back to VF */
+ for (i = vf_q_idx; i < (queues_per_pool + vf_q_idx); i++) {
+ FM10K_WRITE_REG(hw, FM10K_TXDCTL(i), 0);
+ FM10K_WRITE_REG(hw, FM10K_TXQCTL(i), txqctl);
+ FM10K_WRITE_REG(hw, FM10K_RXDCTL(i),
+ FM10K_RXDCTL_WRITE_BACK_MIN_DELAY |
+ FM10K_RXDCTL_DROP_ON_EMPTY);
+ FM10K_WRITE_REG(hw, FM10K_RXQCTL(i), rxqctl);
+ }
+
+ /* reset TC with -1 credits and no quanta to prevent transmit */
+ FM10K_WRITE_REG(hw, FM10K_TC_MAXCREDIT(vf_idx), 0);
+ FM10K_WRITE_REG(hw, FM10K_TC_RATE(vf_idx), 0);
+ FM10K_WRITE_REG(hw, FM10K_TC_CREDIT(vf_idx),
+ FM10K_TC_CREDIT_CREDIT_MASK);
+
+ /* update our first entry in the table based on previous VF */
+ if (!vf_idx)
+ hw->mac.ops.update_int_moderator(hw);
+ else
+ hw->iov.ops.assign_int_moderator(hw, vf_idx - 1);
+
+ /* reset linked list so it now includes our active vectors */
+ if (vf_idx == (hw->iov.num_vfs - 1))
+ FM10K_WRITE_REG(hw, FM10K_ITR2(0), vf_v_idx);
+ else
+ FM10K_WRITE_REG(hw, FM10K_ITR2(vf_v_limit), vf_v_idx);
+
+ /* link remaining vectors so that next points to previous */
+ for (vf_v_idx++; vf_v_idx < vf_v_limit; vf_v_idx++)
+ FM10K_WRITE_REG(hw, FM10K_ITR2(vf_v_idx), vf_v_idx - 1);
+
+ /* zero out MBMEM, VLAN_TABLE, RETA, RSSRK, and MRQC registers */
+ for (i = FM10K_VFMBMEM_LEN; i--;)
+ FM10K_WRITE_REG(hw, FM10K_MBMEM_VF(vf_idx, i), 0);
+ for (i = FM10K_VLAN_TABLE_SIZE; i--;)
+ FM10K_WRITE_REG(hw, FM10K_VLAN_TABLE(vf_info->vsi, i), 0);
+ for (i = FM10K_RETA_SIZE; i--;)
+ FM10K_WRITE_REG(hw, FM10K_RETA(vf_info->vsi, i), 0);
+ for (i = FM10K_RSSRK_SIZE; i--;)
+ FM10K_WRITE_REG(hw, FM10K_RSSRK(vf_info->vsi, i), 0);
+ FM10K_WRITE_REG(hw, FM10K_MRQC(vf_info->vsi), 0);
+
+ /* Update base address registers to contain MAC address */
+ if (FM10K_IS_VALID_ETHER_ADDR(vf_info->mac)) {
+ tdbal = (((u32)vf_info->mac[3]) << 24) |
+ (((u32)vf_info->mac[4]) << 16) |
+ (((u32)vf_info->mac[5]) << 8);
+ tdbah = (((u32)0xFF) << 24) |
+ (((u32)vf_info->mac[0]) << 16) |
+ (((u32)vf_info->mac[1]) << 8) |
+ ((u32)vf_info->mac[2]);
+ }
+
+ /* map queue pairs back to VF from last to first */
+ for (i = queues_per_pool; i--;) {
+ FM10K_WRITE_REG(hw, FM10K_TDBAL(vf_q_idx + i), tdbal);
+ FM10K_WRITE_REG(hw, FM10K_TDBAH(vf_q_idx + i), tdbah);
+ FM10K_WRITE_REG(hw, FM10K_TDLEN(vf_q_idx + i),
+ hw->mac.itr_scale <<
+ FM10K_TDLEN_ITR_SCALE_SHIFT);
+ FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx + i);
+ FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx + i);
+ }
+
+ /* repeat the first ring for all of the remaining VF rings */
+ for (i = queues_per_pool; i < qmap_stride; i++) {
+ FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx);
+ FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx);
+ }
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_iov_set_lport_pf - Assign and enable a logical port for a given VF
+ * @hw: pointer to hardware structure
+ * @vf_info: pointer to VF information structure
+ * @lport_idx: Logical port offset from the hardware glort
+ * @flags: Set of capability flags to extend port beyond basic functionality
+ *
+ * This function allows enabling a VF port by assigning it a GLORT and
+ * setting the flags so that it can enable an Rx mode.
+ **/
+STATIC s32 fm10k_iov_set_lport_pf(struct fm10k_hw *hw,
+ struct fm10k_vf_info *vf_info,
+ u16 lport_idx, u8 flags)
+{
+ u16 glort = (hw->mac.dglort_map + lport_idx) & FM10K_DGLORTMAP_NONE;
+
+ DEBUGFUNC("fm10k_iov_set_lport_state_pf");
+
+ /* if glort is not valid return error */
+ if (!fm10k_glort_valid_pf(hw, glort))
+ return FM10K_ERR_PARAM;
+
+ vf_info->vf_flags = flags | FM10K_VF_FLAG_NONE_CAPABLE;
+ vf_info->glort = glort;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_iov_reset_lport_pf - Disable a logical port for a given VF
+ * @hw: pointer to hardware structure
+ * @vf_info: pointer to VF information structure
+ *
+ * This function disables a VF port by stripping it of a GLORT and
+ * setting the flags so that it cannot enable any Rx mode.
+ **/
+STATIC void fm10k_iov_reset_lport_pf(struct fm10k_hw *hw,
+ struct fm10k_vf_info *vf_info)
+{
+ u32 msg[1];
+
+ DEBUGFUNC("fm10k_iov_reset_lport_state_pf");
+
+ /* need to disable the port if it is already enabled */
+ if (FM10K_VF_FLAG_ENABLED(vf_info)) {
+ /* notify switch that this port has been disabled */
+ fm10k_update_lport_state_pf(hw, vf_info->glort, 1, false);
+
+ /* generate port state response to notify VF it is not ready */
+ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
+ vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
+ }
+
+ /* clear flags and glort if it exists */
+ vf_info->vf_flags = 0;
+ vf_info->glort = 0;
+}
+
+/**
+ * fm10k_iov_update_stats_pf - Updates hardware related statistics for VFs
+ * @hw: pointer to hardware structure
+ * @q: stats for all queues of a VF
+ * @vf_idx: index of VF
+ *
+ * This function collects queue stats for VFs.
+ **/
+STATIC void fm10k_iov_update_stats_pf(struct fm10k_hw *hw,
+ struct fm10k_hw_stats_q *q,
+ u16 vf_idx)
+{
+ u32 idx, qpp;
+
+ /* get stats for all of the queues */
+ qpp = fm10k_queues_per_pool(hw);
+ idx = fm10k_vf_queue_index(hw, vf_idx);
+ fm10k_update_hw_stats_q(hw, q, idx, qpp);
+}
+
+/**
+ * fm10k_iov_msg_msix_pf - Message handler for MSI-X request from VF
+ * @hw: Pointer to hardware structure
+ * @results: Pointer array to message, results[0] is pointer to message
+ * @mbx: Pointer to mailbox information structure
+ *
+ * This function is a default handler for MSI-X requests from the VF. The
+ * assumption is that in this case it is acceptable to just directly
+ * hand off the message from the VF to the underlying shared code.
+ **/
+s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
+ u8 vf_idx = vf_info->vf_idx;
+
+ UNREFERENCED_1PARAMETER(results);
+ DEBUGFUNC("fm10k_iov_msg_msix_pf");
+
+ return hw->iov.ops.assign_int_moderator(hw, vf_idx);
+}
+
+/**
+ * fm10k_iov_select_vid - Select correct default vid
+ * @hw: Pointer to hardware structure
+ * @vid: vid to correct
+ *
+ * Will report an error if vid is out of range. For vid = 0, it will return
+ * either the pf_vid or sw_vid depending on which one is set.
+ */
+STATIC s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid)
+{
+ if (!vid)
+ return vf_info->pf_vid ? vf_info->pf_vid : vf_info->sw_vid;
+ else if (vf_info->pf_vid && vid != vf_info->pf_vid)
+ return FM10K_ERR_PARAM;
+ else
+ return vid;
+}
+
+/**
+ * fm10k_iov_msg_mac_vlan_pf - Message handler for MAC/VLAN request from VF
+ * @hw: Pointer to hardware structure
+ * @results: Pointer array to message, results[0] is pointer to message
+ * @mbx: Pointer to mailbox information structure
+ *
+ * This function is a default handler for MAC/VLAN requests from the VF.
+ * The assumption is that in this case it is acceptable to just directly
+ * hand off the message from the VF to the underlying shared code.
+ **/
+s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
+ int err = FM10K_SUCCESS;
+ u8 mac[ETH_ALEN];
+ u32 *result;
+ bool set;
+ u16 vlan;
+ u32 vid;
+
+ DEBUGFUNC("fm10k_iov_msg_mac_vlan_pf");
+
+ /* we shouldn't be updating rules on a disabled interface */
+ if (!FM10K_VF_FLAG_ENABLED(vf_info))
+ err = FM10K_ERR_PARAM;
+
+ if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) {
+ result = results[FM10K_MAC_VLAN_MSG_VLAN];
+
+ /* record VLAN id requested */
+ err = fm10k_tlv_attr_get_u32(result, &vid);
+ if (err)
+ return err;
+
+ /* verify upper 16 bits are zero */
+ if (vid >> 16)
+ return FM10K_ERR_PARAM;
+
+ set = !(vid & FM10K_VLAN_CLEAR);
+ vid &= ~FM10K_VLAN_CLEAR;
+
+ err = fm10k_iov_select_vid(vf_info, (u16)vid);
+ if (err < 0)
+ return err;
+ else
+ vid = err;
+
+ /* update VSI info for VF in regards to VLAN table */
+ err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
+ }
+
+ if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) {
+ result = results[FM10K_MAC_VLAN_MSG_MAC];
+
+ /* record unicast MAC address requested */
+ err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
+ if (err)
+ return err;
+
+ /* block attempts to set MAC for a locked device */
+ if (FM10K_IS_VALID_ETHER_ADDR(vf_info->mac) &&
+ memcmp(mac, vf_info->mac, ETH_ALEN))
+ return FM10K_ERR_PARAM;
+
+ set = !(vlan & FM10K_VLAN_CLEAR);
+ vlan &= ~FM10K_VLAN_CLEAR;
+
+ err = fm10k_iov_select_vid(vf_info, vlan);
+ if (err < 0)
+ return err;
+ else
+ vlan = (u16)err;
+
+ /* notify switch of request for new unicast address */
+ err = hw->mac.ops.update_uc_addr(hw, vf_info->glort,
+ mac, vlan, set, 0);
+ }
+
+ if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) {
+ result = results[FM10K_MAC_VLAN_MSG_MULTICAST];
+
+ /* record multicast MAC address requested */
+ err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
+ if (err)
+ return err;
+
+ /* verify that the VF is allowed to request multicast */
+ if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED))
+ return FM10K_ERR_PARAM;
+
+ set = !(vlan & FM10K_VLAN_CLEAR);
+ vlan &= ~FM10K_VLAN_CLEAR;
+
+ err = fm10k_iov_select_vid(vf_info, vlan);
+ if (err < 0)
+ return err;
+ else
+ vlan = (u16)err;
+
+ /* notify switch of request for new multicast address */
+ err = hw->mac.ops.update_mc_addr(hw, vf_info->glort,
+ mac, vlan, set);
+ }
+
+ return err;
+}
+
+/**
+ * fm10k_iov_supported_xcast_mode_pf - Determine best match for xcast mode
+ * @vf_info: VF info structure containing capability flags
+ * @mode: Requested xcast mode
+ *
+ * This function outputs the mode that most closely matches the requested
+ * mode. If not modes match it will request we disable the port
+ **/
+STATIC u8 fm10k_iov_supported_xcast_mode_pf(struct fm10k_vf_info *vf_info,
+ u8 mode)
+{
+ u8 vf_flags = vf_info->vf_flags;
+
+ /* match up mode to capabilities as best as possible */
+ switch (mode) {
+ case FM10K_XCAST_MODE_PROMISC:
+ if (vf_flags & FM10K_VF_FLAG_PROMISC_CAPABLE)
+ return FM10K_XCAST_MODE_PROMISC;
+ /* fallthough */
+ case FM10K_XCAST_MODE_ALLMULTI:
+ if (vf_flags & FM10K_VF_FLAG_ALLMULTI_CAPABLE)
+ return FM10K_XCAST_MODE_ALLMULTI;
+ /* fallthough */
+ case FM10K_XCAST_MODE_MULTI:
+ if (vf_flags & FM10K_VF_FLAG_MULTI_CAPABLE)
+ return FM10K_XCAST_MODE_MULTI;
+ /* fallthough */
+ case FM10K_XCAST_MODE_NONE:
+ if (vf_flags & FM10K_VF_FLAG_NONE_CAPABLE)
+ return FM10K_XCAST_MODE_NONE;
+ /* fallthough */
+ default:
+ break;
+ }
+
+ /* disable interface as it should not be able to request any */
+ return FM10K_XCAST_MODE_DISABLE;
+}
+
+/**
+ * fm10k_iov_msg_lport_state_pf - Message handler for port state requests
+ * @hw: Pointer to hardware structure
+ * @results: Pointer array to message, results[0] is pointer to message
+ * @mbx: Pointer to mailbox information structure
+ *
+ * This function is a default handler for port state requests. The port
+ * state requests for now are basic and consist of enabling or disabling
+ * the port.
+ **/
+s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
+ u32 *result;
+ s32 err = FM10K_SUCCESS;
+ u32 msg[2];
+ u8 mode = 0;
+
+ DEBUGFUNC("fm10k_iov_msg_lport_state_pf");
+
+ /* verify VF is allowed to enable even minimal mode */
+ if (!(vf_info->vf_flags & FM10K_VF_FLAG_NONE_CAPABLE))
+ return FM10K_ERR_PARAM;
+
+ if (!!results[FM10K_LPORT_STATE_MSG_XCAST_MODE]) {
+ result = results[FM10K_LPORT_STATE_MSG_XCAST_MODE];
+
+ /* XCAST mode update requested */
+ err = fm10k_tlv_attr_get_u8(result, &mode);
+ if (err)
+ return FM10K_ERR_PARAM;
+
+ /* prep for possible demotion depending on capabilities */
+ mode = fm10k_iov_supported_xcast_mode_pf(vf_info, mode);
+
+ /* if mode is not currently enabled, enable it */
+ if (!(FM10K_VF_FLAG_ENABLED(vf_info) & (1 << mode)))
+ fm10k_update_xcast_mode_pf(hw, vf_info->glort, mode);
+
+ /* swap mode back to a bit flag */
+ mode = FM10K_VF_FLAG_SET_MODE(mode);
+ } else if (!results[FM10K_LPORT_STATE_MSG_DISABLE]) {
+ /* need to disable the port if it is already enabled */
+ if (FM10K_VF_FLAG_ENABLED(vf_info))
+ err = fm10k_update_lport_state_pf(hw, vf_info->glort,
+ 1, false);
+
+ /* need to clear VF_FLAG_ENABLED in order to ensure that we
+ * actually re-enable the lport state below. Note that this
+ * has no impact if VF is already disabled, as the flags are
+ * already zeroed.
+ */
+ if (!err)
+ vf_info->vf_flags = FM10K_VF_FLAG_CAPABLE(vf_info);
+
+ /* when enabling the port we should reset the rate limiters */
+ hw->iov.ops.configure_tc(hw, vf_info->vf_idx, vf_info->rate);
+
+ /* set mode for minimal functionality */
+ mode = FM10K_VF_FLAG_SET_MODE_NONE;
+
+ /* generate port state response to notify VF it is ready */
+ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
+ fm10k_tlv_attr_put_bool(msg, FM10K_LPORT_STATE_MSG_READY);
+ mbx->ops.enqueue_tx(hw, mbx, msg);
+ }
+
+ /* if enable state toggled note the update */
+ if (!err && (!FM10K_VF_FLAG_ENABLED(vf_info) != !mode))
+ err = fm10k_update_lport_state_pf(hw, vf_info->glort, 1,
+ !!mode);
+
+ /* if state change succeeded, then update our stored state */
+ mode |= FM10K_VF_FLAG_CAPABLE(vf_info);
+ if (!err)
+ vf_info->vf_flags = mode;
+
+ return err;
+}
+
+const struct fm10k_msg_data fm10k_iov_msg_data_pf[] = {
+ FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
+ FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf),
+ FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_mac_vlan_pf),
+ FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf),
+ FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
+};
+
+/**
+ * fm10k_update_stats_hw_pf - Updates hardware related statistics of PF
+ * @hw: pointer to hardware structure
+ * @stats: pointer to the stats structure to update
+ *
+ * This function collects and aggregates global and per queue hardware
+ * statistics.
+ **/
+STATIC void fm10k_update_hw_stats_pf(struct fm10k_hw *hw,
+ struct fm10k_hw_stats *stats)
+{
+ u32 timeout, ur, ca, um, xec, vlan_drop, loopback_drop, nodesc_drop;
+ u32 id, id_prev;
+
+ DEBUGFUNC("fm10k_update_hw_stats_pf");
+
+ /* Use Tx queue 0 as a canary to detect a reset */
+ id = FM10K_READ_REG(hw, FM10K_TXQCTL(0));
+
+ /* Read Global Statistics */
+ do {
+ timeout = fm10k_read_hw_stats_32b(hw, FM10K_STATS_TIMEOUT,
+ &stats->timeout);
+ ur = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UR, &stats->ur);
+ ca = fm10k_read_hw_stats_32b(hw, FM10K_STATS_CA, &stats->ca);
+ um = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UM, &stats->um);
+ xec = fm10k_read_hw_stats_32b(hw, FM10K_STATS_XEC, &stats->xec);
+ vlan_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_VLAN_DROP,
+ &stats->vlan_drop);
+ loopback_drop = fm10k_read_hw_stats_32b(hw,
+ FM10K_STATS_LOOPBACK_DROP,
+ &stats->loopback_drop);
+ nodesc_drop = fm10k_read_hw_stats_32b(hw,
+ FM10K_STATS_NODESC_DROP,
+ &stats->nodesc_drop);
+
+ /* if value has not changed then we have consistent data */
+ id_prev = id;
+ id = FM10K_READ_REG(hw, FM10K_TXQCTL(0));
+ } while ((id ^ id_prev) & FM10K_TXQCTL_ID_MASK);
+
+ /* drop non-ID bits and set VALID ID bit */
+ id &= FM10K_TXQCTL_ID_MASK;
+ id |= FM10K_STAT_VALID;
+
+ /* Update Global Statistics */
+ if (stats->stats_idx == id) {
+ stats->timeout.count += timeout;
+ stats->ur.count += ur;
+ stats->ca.count += ca;
+ stats->um.count += um;
+ stats->xec.count += xec;
+ stats->vlan_drop.count += vlan_drop;
+ stats->loopback_drop.count += loopback_drop;
+ stats->nodesc_drop.count += nodesc_drop;
+ }
+
+ /* Update bases and record current PF id */
+ fm10k_update_hw_base_32b(&stats->timeout, timeout);
+ fm10k_update_hw_base_32b(&stats->ur, ur);
+ fm10k_update_hw_base_32b(&stats->ca, ca);
+ fm10k_update_hw_base_32b(&stats->um, um);
+ fm10k_update_hw_base_32b(&stats->xec, xec);
+ fm10k_update_hw_base_32b(&stats->vlan_drop, vlan_drop);
+ fm10k_update_hw_base_32b(&stats->loopback_drop, loopback_drop);
+ fm10k_update_hw_base_32b(&stats->nodesc_drop, nodesc_drop);
+ stats->stats_idx = id;
+
+ /* Update Queue Statistics */
+ fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues);
+}
+
+/**
+ * fm10k_rebind_hw_stats_pf - Resets base for hardware statistics of PF
+ * @hw: pointer to hardware structure
+ * @stats: pointer to the stats structure to update
+ *
+ * This function resets the base for global and per queue hardware
+ * statistics.
+ **/
+STATIC void fm10k_rebind_hw_stats_pf(struct fm10k_hw *hw,
+ struct fm10k_hw_stats *stats)
+{
+ DEBUGFUNC("fm10k_rebind_hw_stats_pf");
+
+ /* Unbind Global Statistics */
+ fm10k_unbind_hw_stats_32b(&stats->timeout);
+ fm10k_unbind_hw_stats_32b(&stats->ur);
+ fm10k_unbind_hw_stats_32b(&stats->ca);
+ fm10k_unbind_hw_stats_32b(&stats->um);
+ fm10k_unbind_hw_stats_32b(&stats->xec);
+ fm10k_unbind_hw_stats_32b(&stats->vlan_drop);
+ fm10k_unbind_hw_stats_32b(&stats->loopback_drop);
+ fm10k_unbind_hw_stats_32b(&stats->nodesc_drop);
+
+ /* Unbind Queue Statistics */
+ fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues);
+
+ /* Reinitialize bases for all stats */
+ fm10k_update_hw_stats_pf(hw, stats);
+}
+
+/**
+ * fm10k_set_dma_mask_pf - Configures PhyAddrSpace to limit DMA to system
+ * @hw: pointer to hardware structure
+ * @dma_mask: 64 bit DMA mask required for platform
+ *
+ * This function sets the PHYADDR.PhyAddrSpace bits for the endpoint in order
+ * to limit the access to memory beyond what is physically in the system.
+ **/
+STATIC void fm10k_set_dma_mask_pf(struct fm10k_hw *hw, u64 dma_mask)
+{
+ /* we need to write the upper 32 bits of DMA mask to PhyAddrSpace */
+ u32 phyaddr = (u32)(dma_mask >> 32);
+
+ DEBUGFUNC("fm10k_set_dma_mask_pf");
+
+ FM10K_WRITE_REG(hw, FM10K_PHYADDR, phyaddr);
+}
+
+/**
+ * fm10k_get_fault_pf - Record a fault in one of the interface units
+ * @hw: pointer to hardware structure
+ * @type: pointer to fault type register offset
+ * @fault: pointer to memory location to record the fault
+ *
+ * Record the fault register contents to the fault data structure and
+ * clear the entry from the register.
+ *
+ * Returns ERR_PARAM if invalid register is specified or no error is present.
+ **/
+STATIC s32 fm10k_get_fault_pf(struct fm10k_hw *hw, int type,
+ struct fm10k_fault *fault)
+{
+ u32 func;
+
+ DEBUGFUNC("fm10k_get_fault_pf");
+
+ /* verify the fault register is in range and is aligned */
+ switch (type) {
+ case FM10K_PCA_FAULT:
+ case FM10K_THI_FAULT:
+ case FM10K_FUM_FAULT:
+ break;
+ default:
+ return FM10K_ERR_PARAM;
+ }
+
+ /* only service faults that are valid */
+ func = FM10K_READ_REG(hw, type + FM10K_FAULT_FUNC);
+ if (!(func & FM10K_FAULT_FUNC_VALID))
+ return FM10K_ERR_PARAM;
+
+ /* read remaining fields */
+ fault->address = FM10K_READ_REG(hw, type + FM10K_FAULT_ADDR_HI);
+ fault->address <<= 32;
+ fault->address = FM10K_READ_REG(hw, type + FM10K_FAULT_ADDR_LO);
+ fault->specinfo = FM10K_READ_REG(hw, type + FM10K_FAULT_SPECINFO);
+
+ /* clear valid bit to allow for next error */
+ FM10K_WRITE_REG(hw, type + FM10K_FAULT_FUNC, FM10K_FAULT_FUNC_VALID);
+
+ /* Record which function triggered the error */
+ if (func & FM10K_FAULT_FUNC_PF)
+ fault->func = 0;
+ else
+ fault->func = 1 + ((func & FM10K_FAULT_FUNC_VF_MASK) >>
+ FM10K_FAULT_FUNC_VF_SHIFT);
+
+ /* record fault type */
+ fault->type = func & FM10K_FAULT_FUNC_TYPE_MASK;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_request_lport_map_pf - Request LPORT map from the switch API
+ * @hw: pointer to hardware structure
+ *
+ **/
+STATIC s32 fm10k_request_lport_map_pf(struct fm10k_hw *hw)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ u32 msg[1];
+
+ DEBUGFUNC("fm10k_request_lport_pf");
+
+ /* issue request asking for LPORT map */
+ fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_LPORT_MAP);
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+/**
+ * fm10k_get_host_state_pf - Returns the state of the switch and mailbox
+ * @hw: pointer to hardware structure
+ * @switch_ready: pointer to boolean value that will record switch state
+ *
+ * This funciton will check the DMA_CTRL2 register and mailbox in order
+ * to determine if the switch is ready for the PF to begin requesting
+ * addresses and mapping traffic to the local interface.
+ **/
+STATIC s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready)
+{
+ s32 ret_val = FM10K_SUCCESS;
+ u32 dma_ctrl2;
+
+ DEBUGFUNC("fm10k_get_host_state_pf");
+
+ /* verify the switch is ready for interaction */
+ dma_ctrl2 = FM10K_READ_REG(hw, FM10K_DMA_CTRL2);
+ if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY))
+ goto out;
+
+ /* retrieve generic host state info */
+ ret_val = fm10k_get_host_state_generic(hw, switch_ready);
+ if (ret_val)
+ goto out;
+
+ /* interface cannot receive traffic without logical ports */
+ if (hw->mac.dglort_map == FM10K_DGLORTMAP_NONE)
+ ret_val = fm10k_request_lport_map_pf(hw);
+
+out:
+ return ret_val;
+}
+
+/* This structure defines the attibutes to be parsed below */
+const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = {
+ FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_LPORT_MAP),
+ FM10K_TLV_ATTR_LAST
+};
+
+/**
+ * fm10k_msg_lport_map_pf - Message handler for lport_map message from SM
+ * @hw: Pointer to hardware structure
+ * @results: pointer array containing parsed data
+ * @mbx: Pointer to mailbox information structure
+ *
+ * This handler configures the lport mapping based on the reply from the
+ * switch API.
+ **/
+s32 fm10k_msg_lport_map_pf(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ u16 glort, mask;
+ u32 dglort_map;
+ s32 err;
+
+ UNREFERENCED_1PARAMETER(mbx);
+ DEBUGFUNC("fm10k_msg_lport_map_pf");
+
+ err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_LPORT_MAP],
+ &dglort_map);
+ if (err)
+ return err;
+
+ /* extract values out of the header */
+ glort = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_GLORT);
+ mask = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_MASK);
+
+ /* verify mask is set and none of the masked bits in glort are set */
+ if (!mask || (glort & ~mask))
+ return FM10K_ERR_PARAM;
+
+ /* verify the mask is contiguous, and that it is 1's followed by 0's */
+ if (((~(mask - 1) & mask) + mask) & FM10K_DGLORTMAP_NONE)
+ return FM10K_ERR_PARAM;
+
+ /* record the glort, mask, and port count */
+ hw->mac.dglort_map = dglort_map;
+
+ return FM10K_SUCCESS;
+}
+
+const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[] = {
+ FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_UPDATE_PVID),
+ FM10K_TLV_ATTR_LAST
+};
+
+/**
+ * fm10k_msg_update_pvid_pf - Message handler for port VLAN message from SM
+ * @hw: Pointer to hardware structure
+ * @results: pointer array containing parsed data
+ * @mbx: Pointer to mailbox information structure
+ *
+ * This handler configures the default VLAN for the PF
+ **/
+s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ u16 glort, pvid;
+ u32 pvid_update;
+ s32 err;
+
+ UNREFERENCED_1PARAMETER(mbx);
+ DEBUGFUNC("fm10k_msg_update_pvid_pf");
+
+ err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID],
+ &pvid_update);
+ if (err)
+ return err;
+
+ /* extract values from the pvid update */
+ glort = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_GLORT);
+ pvid = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_PVID);
+
+ /* if glort is not valid return error */
+ if (!fm10k_glort_valid_pf(hw, glort))
+ return FM10K_ERR_PARAM;
+
+ /* verify VID is valid */
+ if (pvid >= FM10K_VLAN_TABLE_VID_MAX)
+ return FM10K_ERR_PARAM;
+
+ /* record the port VLAN ID value */
+ hw->mac.default_vid = pvid;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_record_global_table_data - Move global table data to swapi table info
+ * @from: pointer to source table data structure
+ * @to: pointer to destination table info structure
+ *
+ * This function is will copy table_data to the table_info contained in
+ * the hw struct.
+ **/
+static void fm10k_record_global_table_data(struct fm10k_global_table_data *from,
+ struct fm10k_swapi_table_info *to)
+{
+ /* convert from le32 struct to CPU byte ordered values */
+ to->used = FM10K_LE32_TO_CPU(from->used);
+ to->avail = FM10K_LE32_TO_CPU(from->avail);
+}
+
+const struct fm10k_tlv_attr fm10k_err_msg_attr[] = {
+ FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR,
+ sizeof(struct fm10k_swapi_error)),
+ FM10K_TLV_ATTR_LAST
+};
+
+/**
+ * fm10k_msg_err_pf - Message handler for error reply
+ * @hw: Pointer to hardware structure
+ * @results: pointer array containing parsed data
+ * @mbx: Pointer to mailbox information structure
+ *
+ * This handler will capture the data for any error replies to previous
+ * messages that the PF has sent.
+ **/
+s32 fm10k_msg_err_pf(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ struct fm10k_swapi_error err_msg;
+ s32 err;
+
+ UNREFERENCED_1PARAMETER(mbx);
+ DEBUGFUNC("fm10k_msg_err_pf");
+
+ /* extract structure from message */
+ err = fm10k_tlv_attr_get_le_struct(results[FM10K_PF_ATTR_ID_ERR],
+ &err_msg, sizeof(err_msg));
+ if (err)
+ return err;
+
+ /* record table status */
+ fm10k_record_global_table_data(&err_msg.mac, &hw->swapi.mac);
+ fm10k_record_global_table_data(&err_msg.nexthop, &hw->swapi.nexthop);
+ fm10k_record_global_table_data(&err_msg.ffu, &hw->swapi.ffu);
+
+ /* record SW API status value */
+ hw->swapi.status = FM10K_LE32_TO_CPU(err_msg.status);
+
+ return FM10K_SUCCESS;
+}
+
+/* currently there is no shared 1588 timestamp handler */
+
+const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[] = {
+ FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_1588_TIMESTAMP,
+ sizeof(struct fm10k_swapi_1588_timestamp)),
+ FM10K_TLV_ATTR_LAST
+};
+
+const struct fm10k_tlv_attr fm10k_1588_clock_owner_attr[] = {
+ FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_1588_CLOCK_OWNER,
+ sizeof(struct fm10k_swapi_1588_clock_owner)),
+ FM10K_TLV_ATTR_LAST
+};
+
+const struct fm10k_tlv_attr fm10k_master_clk_offset_attr[] = {
+ FM10K_TLV_ATTR_U64(FM10K_PF_ATTR_ID_MASTER_CLK_OFFSET),
+ FM10K_TLV_ATTR_LAST
+};
+
+/**
+ * fm10k_iov_notify_offset_pf - Notify VF of change in PTP offset
+ * @hw: pointer to hardware structure
+ * @vf_info: pointer to the vf info structure
+ * @offset: 64bit unsigned offset from hardware SYSTIME
+ *
+ * This function sends a message to a given VF to notify it of PTP offset
+ * changes.
+ **/
+STATIC void fm10k_iov_notify_offset_pf(struct fm10k_hw *hw,
+ struct fm10k_vf_info *vf_info,
+ u64 offset)
+{
+ u32 msg[4];
+
+ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_1588);
+ fm10k_tlv_attr_put_u64(msg, FM10K_1588_MSG_CLK_OFFSET, offset);
+
+ if (vf_info->mbx.ops.enqueue_tx)
+ vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
+}
+
+/**
+ * fm10k_msg_1588_clock_owner_pf - Message handler for clock ownership from SM
+ * @hw: pointer to hardware structure
+ * @results: pointer to array containing parsed data,
+ * @mbx: Pointer to mailbox information structure
+ *
+ * This handler configures the FM10K_HW_FLAG_CLOCK_OWNER field for the PF
+ */
+s32 fm10k_msg_1588_clock_owner_pf(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ struct fm10k_swapi_1588_clock_owner msg;
+ u16 glort;
+ s32 err;
+
+ UNREFERENCED_1PARAMETER(mbx);
+ DEBUGFUNC("fm10k_msg_1588_clock_owner");
+
+ err = fm10k_tlv_attr_get_le_struct(
+ results[FM10K_PF_ATTR_ID_1588_CLOCK_OWNER],
+ &msg, sizeof(msg));
+ if (err)
+ return err;
+
+ /* We own the clock iff the glort matches us and the enabled field is
+ * true. Otherwise, the clock must belong to some other port.
+ */
+ glort = le16_to_cpu(msg.glort);
+ if (fm10k_glort_valid_pf(hw, glort) && msg.enabled)
+ hw->flags |= FM10K_HW_FLAG_CLOCK_OWNER;
+ else
+ hw->flags &= ~FM10K_HW_FLAG_CLOCK_OWNER;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_adjust_systime_pf - Adjust systime frequency
+ * @hw: pointer to hardware structure
+ * @ppb: adjustment rate in parts per billion
+ *
+ * This function will adjust the SYSTIME_CFG register contained in BAR 4
+ * if this function is supported for BAR 4 access. The adjustment amount
+ * is based on the parts per billion value provided and adjusted to a
+ * value based on parts per 2^48 clock cycles.
+ *
+ * If adjustment is not supported or the requested value is too large
+ * we will return an error.
+ **/
+STATIC s32 fm10k_adjust_systime_pf(struct fm10k_hw *hw, s32 ppb)
+{
+ u64 systime_adjust;
+
+ DEBUGFUNC("fm10k_adjust_systime_pf");
+
+ /* ensure that we control the clock */
+ if (!(hw->flags & FM10K_HW_FLAG_CLOCK_OWNER))
+ return FM10K_ERR_DEVICE_NOT_SUPPORTED;
+
+ /* if sw_addr is not set we don't have switch register access */
+ if (!hw->sw_addr)
+ return ppb ? FM10K_ERR_PARAM : FM10K_SUCCESS;
+
+ /* we must convert the value from parts per billion to parts per
+ * 2^48 cycles. In addition I have opted to only use the 30 most
+ * significant bits of the adjustment value as the 8 least
+ * significant bits are located in another register and represent
+ * a value significantly less than a part per billion, the result
+ * of dropping the 8 least significant bits is that the adjustment
+ * value is effectively multiplied by 2^8 when we write it.
+ *
+ * As a result of all this the math for this breaks down as follows:
+ * ppb / 10^9 == adjust * 2^8 / 2^48
+ * If we solve this for adjust, and simplify it comes out as:
+ * ppb * 2^31 / 5^9 == adjust
+ */
+ systime_adjust = (ppb < 0) ? -ppb : ppb;
+ systime_adjust <<= 31;
+ do_div(systime_adjust, 1953125);
+
+ /* verify the requested adjustment value is in range */
+ if (systime_adjust > FM10K_SW_SYSTIME_ADJUST_MASK)
+ return FM10K_ERR_PARAM;
+
+ if (ppb > 0)
+ systime_adjust |= FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE;
+
+ FM10K_WRITE_SW_REG(hw, FM10K_SW_SYSTIME_ADJUST, (u32)systime_adjust);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_notify_offset_pf - Notify switch of change in PTP offset
+ * @hw: pointer to hardware structure
+ * @offset: 64bit unsigned offset of SYSTIME
+ *
+ * This function sends a message to the switch to indicate a change in the
+ * offset of the hardware SYSTIME registers. The switch manager is
+ * responsible for transmitting this message to other hosts.
+ */
+STATIC s32 fm10k_notify_offset_pf(struct fm10k_hw *hw, u64 offset)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ u32 msg[4];
+
+ DEBUGFUNC("fm10k_notify_offset_pf");
+
+ /* ensure that we control the clock */
+ if (!(hw->flags & FM10K_HW_FLAG_CLOCK_OWNER))
+ return FM10K_ERR_DEVICE_NOT_SUPPORTED;
+
+ fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_MASTER_CLK_OFFSET);
+ fm10k_tlv_attr_put_u64(msg, FM10K_PF_ATTR_ID_MASTER_CLK_OFFSET, offset);
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+/**
+ * fm10k_read_systime_pf - Reads value of systime registers
+ * @hw: pointer to the hardware structure
+ *
+ * Function reads the content of 2 registers, combined to represent a 64 bit
+ * value measured in nanosecods. In order to guarantee the value is accurate
+ * we check the 32 most significant bits both before and after reading the
+ * 32 least significant bits to verify they didn't change as we were reading
+ * the registers.
+ **/
+static u64 fm10k_read_systime_pf(struct fm10k_hw *hw)
+{
+ u32 systime_l, systime_h, systime_tmp;
+
+ systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1);
+
+ do {
+ systime_tmp = systime_h;
+ systime_l = fm10k_read_reg(hw, FM10K_SYSTIME);
+ systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1);
+ } while (systime_tmp != systime_h);
+
+ return ((u64)systime_h << 32) | systime_l;
+}
+
+static const struct fm10k_msg_data fm10k_msg_data_pf[] = {
+ FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
+ FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
+ FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
+ FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
+ FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
+ FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
+ FM10K_PF_MSG_1588_CLOCK_OWNER_HANDLER(fm10k_msg_1588_clock_owner_pf),
+ FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
+};
+
+/**
+ * fm10k_init_ops_pf - Inits func ptrs and MAC type
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the function pointers and assign the MAC type for PF.
+ * Does not touch the hardware.
+ **/
+s32 fm10k_init_ops_pf(struct fm10k_hw *hw)
+{
+ struct fm10k_mac_info *mac = &hw->mac;
+ struct fm10k_iov_info *iov = &hw->iov;
+
+ DEBUGFUNC("fm10k_init_ops_pf");
+
+ fm10k_init_ops_generic(hw);
+
+ mac->ops.reset_hw = &fm10k_reset_hw_pf;
+ mac->ops.init_hw = &fm10k_init_hw_pf;
+ mac->ops.start_hw = &fm10k_start_hw_generic;
+ mac->ops.stop_hw = &fm10k_stop_hw_generic;
+ mac->ops.is_slot_appropriate = &fm10k_is_slot_appropriate_pf;
+ mac->ops.update_vlan = &fm10k_update_vlan_pf;
+ mac->ops.read_mac_addr = &fm10k_read_mac_addr_pf;
+ mac->ops.update_uc_addr = &fm10k_update_uc_addr_pf;
+ mac->ops.update_mc_addr = &fm10k_update_mc_addr_pf;
+ mac->ops.update_xcast_mode = &fm10k_update_xcast_mode_pf;
+ mac->ops.update_int_moderator = &fm10k_update_int_moderator_pf;
+ mac->ops.update_lport_state = &fm10k_update_lport_state_pf;
+ mac->ops.update_hw_stats = &fm10k_update_hw_stats_pf;
+ mac->ops.rebind_hw_stats = &fm10k_rebind_hw_stats_pf;
+ mac->ops.configure_dglort_map = &fm10k_configure_dglort_map_pf;
+ mac->ops.set_dma_mask = &fm10k_set_dma_mask_pf;
+ mac->ops.get_fault = &fm10k_get_fault_pf;
+ mac->ops.get_host_state = &fm10k_get_host_state_pf;
+ mac->ops.adjust_systime = &fm10k_adjust_systime_pf;
+ mac->ops.notify_offset = &fm10k_notify_offset_pf;
+ mac->ops.read_systime = &fm10k_read_systime_pf;
+
+ mac->max_msix_vectors = fm10k_get_pcie_msix_count_generic(hw);
+
+ iov->ops.assign_resources = &fm10k_iov_assign_resources_pf;
+ iov->ops.configure_tc = &fm10k_iov_configure_tc_pf;
+ iov->ops.assign_int_moderator = &fm10k_iov_assign_int_moderator_pf;
+ iov->ops.assign_default_mac_vlan = fm10k_iov_assign_default_mac_vlan_pf;
+ iov->ops.reset_resources = &fm10k_iov_reset_resources_pf;
+ iov->ops.set_lport = &fm10k_iov_set_lport_pf;
+ iov->ops.reset_lport = &fm10k_iov_reset_lport_pf;
+ iov->ops.update_stats = &fm10k_iov_update_stats_pf;
+ iov->ops.notify_offset = &fm10k_iov_notify_offset_pf;
+
+ return fm10k_sm_mbx_init(hw, &hw->mbx, fm10k_msg_data_pf);
+}
diff --git a/src/dpdk22/drivers/net/fm10k/base/fm10k_pf.h b/src/dpdk22/drivers/net/fm10k/base/fm10k_pf.h
new file mode 100644
index 00000000..44bd193f
--- /dev/null
+++ b/src/dpdk22/drivers/net/fm10k/base/fm10k_pf.h
@@ -0,0 +1,189 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _FM10K_PF_H_
+#define _FM10K_PF_H_
+
+#include "fm10k_type.h"
+#include "fm10k_common.h"
+
+bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort);
+u16 fm10k_queues_per_pool(struct fm10k_hw *hw);
+u16 fm10k_vf_queue_index(struct fm10k_hw *hw, u16 vf_idx);
+
+enum fm10k_pf_tlv_msg_id_v1 {
+ FM10K_PF_MSG_ID_TEST = 0x000, /* msg ID reserved */
+ FM10K_PF_MSG_ID_XCAST_MODES = 0x001,
+ FM10K_PF_MSG_ID_UPDATE_MAC_FWD_RULE = 0x002,
+ FM10K_PF_MSG_ID_LPORT_MAP = 0x100,
+ FM10K_PF_MSG_ID_LPORT_CREATE = 0x200,
+ FM10K_PF_MSG_ID_LPORT_DELETE = 0x201,
+ FM10K_PF_MSG_ID_CONFIG = 0x300,
+ FM10K_PF_MSG_ID_UPDATE_PVID = 0x400,
+ FM10K_PF_MSG_ID_CREATE_FLOW_TABLE = 0x501,
+ FM10K_PF_MSG_ID_DELETE_FLOW_TABLE = 0x502,
+ FM10K_PF_MSG_ID_UPDATE_FLOW = 0x503,
+ FM10K_PF_MSG_ID_DELETE_FLOW = 0x504,
+ FM10K_PF_MSG_ID_SET_FLOW_STATE = 0x505,
+ FM10K_PF_MSG_ID_GET_1588_INFO = 0x506,
+ FM10K_PF_MSG_ID_1588_TIMESTAMP = 0x701,
+ FM10K_PF_MSG_ID_1588_CLOCK_OWNER = 0x702,
+ FM10K_PF_MSG_ID_MASTER_CLK_OFFSET = 0x703,
+};
+
+enum fm10k_pf_tlv_attr_id_v1 {
+ FM10K_PF_ATTR_ID_ERR = 0x00,
+ FM10K_PF_ATTR_ID_LPORT_MAP = 0x01,
+ FM10K_PF_ATTR_ID_XCAST_MODE = 0x02,
+ FM10K_PF_ATTR_ID_MAC_UPDATE = 0x03,
+ FM10K_PF_ATTR_ID_VLAN_UPDATE = 0x04,
+ FM10K_PF_ATTR_ID_CONFIG = 0x05,
+ FM10K_PF_ATTR_ID_CREATE_FLOW_TABLE = 0x06,
+ FM10K_PF_ATTR_ID_DELETE_FLOW_TABLE = 0x07,
+ FM10K_PF_ATTR_ID_UPDATE_FLOW = 0x08,
+ FM10K_PF_ATTR_ID_FLOW_STATE = 0x09,
+ FM10K_PF_ATTR_ID_FLOW_HANDLE = 0x0A,
+ FM10K_PF_ATTR_ID_DELETE_FLOW = 0x0B,
+ FM10K_PF_ATTR_ID_PORT = 0x0C,
+ FM10K_PF_ATTR_ID_UPDATE_PVID = 0x0D,
+ FM10K_PF_ATTR_ID_1588_TIMESTAMP = 0x10,
+ FM10K_PF_ATTR_ID_1588_CLOCK_OWNER = 0x12,
+ FM10K_PF_ATTR_ID_MASTER_CLK_OFFSET = 0x14,
+};
+
+#define FM10K_MSG_LPORT_MAP_GLORT_SHIFT 0
+#define FM10K_MSG_LPORT_MAP_GLORT_SIZE 16
+#define FM10K_MSG_LPORT_MAP_MASK_SHIFT 16
+#define FM10K_MSG_LPORT_MAP_MASK_SIZE 16
+
+#define FM10K_MSG_UPDATE_PVID_GLORT_SHIFT 0
+#define FM10K_MSG_UPDATE_PVID_GLORT_SIZE 16
+#define FM10K_MSG_UPDATE_PVID_PVID_SHIFT 16
+#define FM10K_MSG_UPDATE_PVID_PVID_SIZE 16
+
+/* The following data structures are overlayed specifically to TLV mailbox
+ * messages, and must not have gaps between their values. They must line up
+ * correctly to the TLV definition.
+ */
+#ifdef C99
+#pragma pack(push, 1)
+#else
+#pragma pack(1)
+#endif /* C99 */
+
+struct fm10k_mac_update {
+ __le32 mac_lower;
+ __le16 mac_upper;
+ __le16 vlan;
+ __le16 glort;
+ u8 flags;
+ u8 action;
+};
+
+struct fm10k_global_table_data {
+ __le32 used;
+ __le32 avail;
+};
+
+struct fm10k_swapi_error {
+ __le32 status;
+ struct fm10k_global_table_data mac;
+ struct fm10k_global_table_data nexthop;
+ struct fm10k_global_table_data ffu;
+};
+
+struct fm10k_swapi_1588_timestamp {
+ __le64 egress;
+ __le64 ingress;
+ __le16 dglort;
+ __le16 sglort;
+};
+
+struct fm10k_swapi_1588_clock_owner {
+ __le16 glort;
+ __le16 enabled;
+};
+
+#ifdef C99
+#pragma pack(pop)
+#else
+#pragma pack()
+#endif /* C99 */
+
+#define FM10K_PF_MSG_LPORT_CREATE_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_LPORT_CREATE, NULL, func)
+#define FM10K_PF_MSG_LPORT_DELETE_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_LPORT_DELETE, NULL, func)
+s32 fm10k_msg_lport_map_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
+extern const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[];
+#define FM10K_PF_MSG_LPORT_MAP_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_LPORT_MAP, \
+ fm10k_lport_map_msg_attr, func)
+s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *, u32 **,
+ struct fm10k_mbx_info *);
+extern const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[];
+#define FM10K_PF_MSG_UPDATE_PVID_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_UPDATE_PVID, \
+ fm10k_update_pvid_msg_attr, func)
+
+s32 fm10k_msg_err_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
+extern const struct fm10k_tlv_attr fm10k_err_msg_attr[];
+#define FM10K_PF_MSG_ERR_HANDLER(msg, func) \
+ FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_##msg, fm10k_err_msg_attr, func)
+
+extern const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[];
+#define FM10K_PF_MSG_1588_TIMESTAMP_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_1588_TIMESTAMP, \
+ fm10k_1588_timestamp_msg_attr, func)
+
+s32 fm10k_msg_1588_clock_owner_pf(struct fm10k_hw *, u32 **,
+ struct fm10k_mbx_info *);
+extern const struct fm10k_tlv_attr fm10k_1588_clock_owner_attr[];
+#define FM10K_PF_MSG_1588_CLOCK_OWNER_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_1588_CLOCK_OWNER, \
+ fm10k_1588_clock_owner_attr, func)
+
+extern const struct fm10k_tlv_attr fm10k_master_clk_offset_attr[];
+#define FM10K_PF_MSG_MASTER_CLK_OFFSET_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_MASTER_CLK_OFFSET, \
+ fm10k_master_clk_offset_attr, func)
+
+s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
+s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *, u32 **,
+ struct fm10k_mbx_info *);
+s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *, u32 **,
+ struct fm10k_mbx_info *);
+extern const struct fm10k_msg_data fm10k_iov_msg_data_pf[];
+
+s32 fm10k_init_ops_pf(struct fm10k_hw *hw);
+#endif /* _FM10K_PF_H */
diff --git a/src/dpdk22/drivers/net/fm10k/base/fm10k_tlv.c b/src/dpdk22/drivers/net/fm10k/base/fm10k_tlv.c
new file mode 100644
index 00000000..1d9d7d88
--- /dev/null
+++ b/src/dpdk22/drivers/net/fm10k/base/fm10k_tlv.c
@@ -0,0 +1,914 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "fm10k_tlv.h"
+
+/**
+ * fm10k_tlv_msg_init - Initialize message block for TLV data storage
+ * @msg: Pointer to message block
+ * @msg_id: Message ID indicating message type
+ *
+ * This function return success if provided with a valid message pointer
+ **/
+s32 fm10k_tlv_msg_init(u32 *msg, u16 msg_id)
+{
+ DEBUGFUNC("fm10k_tlv_msg_init");
+
+ /* verify pointer is not NULL */
+ if (!msg)
+ return FM10K_ERR_PARAM;
+
+ *msg = (FM10K_TLV_FLAGS_MSG << FM10K_TLV_FLAGS_SHIFT) | msg_id;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_put_null_string - Place null terminated string on message
+ * @msg: Pointer to message block
+ * @attr_id: Attribute ID
+ * @string: Pointer to string to be stored in attribute
+ *
+ * This function will reorder a string to be CPU endian and store it in
+ * the attribute buffer. It will return success if provided with a valid
+ * pointers.
+ **/
+s32 fm10k_tlv_attr_put_null_string(u32 *msg, u16 attr_id,
+ const unsigned char *string)
+{
+ u32 attr_data = 0, len = 0;
+ u32 *attr;
+
+ DEBUGFUNC("fm10k_tlv_attr_put_null_string");
+
+ /* verify pointers are not NULL */
+ if (!string || !msg)
+ return FM10K_ERR_PARAM;
+
+ attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
+
+ /* copy string into local variable and then write to msg */
+ do {
+ /* write data to message */
+ if (len && !(len % 4)) {
+ attr[len / 4] = attr_data;
+ attr_data = 0;
+ }
+
+ /* record character to offset location */
+ attr_data |= (u32)(*string) << (8 * (len % 4));
+ len++;
+
+ /* test for NULL and then increment */
+ } while (*(string++));
+
+ /* write last piece of data to message */
+ attr[(len + 3) / 4] = attr_data;
+
+ /* record attribute header, update message length */
+ len <<= FM10K_TLV_LEN_SHIFT;
+ attr[0] = len | attr_id;
+
+ /* add header length to length */
+ len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT;
+ *msg += FM10K_TLV_LEN_ALIGN(len);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_get_null_string - Get null terminated string from attribute
+ * @attr: Pointer to attribute
+ * @string: Pointer to location of destination string
+ *
+ * This function pulls the string back out of the attribute and will place
+ * it in the array pointed by by string. It will return success if provided
+ * with a valid pointers.
+ **/
+s32 fm10k_tlv_attr_get_null_string(u32 *attr, unsigned char *string)
+{
+ u32 len;
+
+ DEBUGFUNC("fm10k_tlv_attr_get_null_string");
+
+ /* verify pointers are not NULL */
+ if (!string || !attr)
+ return FM10K_ERR_PARAM;
+
+ len = *attr >> FM10K_TLV_LEN_SHIFT;
+ attr++;
+
+ while (len--)
+ string[len] = (u8)(attr[len / 4] >> (8 * (len % 4)));
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_put_mac_vlan - Store MAC/VLAN attribute in message
+ * @msg: Pointer to message block
+ * @attr_id: Attribute ID
+ * @mac_addr: MAC address to be stored
+ *
+ * This function will reorder a MAC address to be CPU endian and store it
+ * in the attribute buffer. It will return success if provided with a
+ * valid pointers.
+ **/
+s32 fm10k_tlv_attr_put_mac_vlan(u32 *msg, u16 attr_id,
+ const u8 *mac_addr, u16 vlan)
+{
+ u32 len = ETH_ALEN << FM10K_TLV_LEN_SHIFT;
+ u32 *attr;
+
+ DEBUGFUNC("fm10k_tlv_attr_put_mac_vlan");
+
+ /* verify pointers are not NULL */
+ if (!msg || !mac_addr)
+ return FM10K_ERR_PARAM;
+
+ attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
+
+ /* record attribute header, update message length */
+ attr[0] = len | attr_id;
+
+ /* copy value into local variable and then write to msg */
+ attr[1] = FM10K_LE32_TO_CPU(*(const __le32 *)&mac_addr[0]);
+ attr[2] = FM10K_LE16_TO_CPU(*(const __le16 *)&mac_addr[4]);
+ attr[2] |= (u32)vlan << 16;
+
+ /* add header length to length */
+ len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT;
+ *msg += FM10K_TLV_LEN_ALIGN(len);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_get_mac_vlan - Get MAC/VLAN stored in attribute
+ * @attr: Pointer to attribute
+ * @attr_id: Attribute ID
+ * @mac_addr: location of buffer to store MAC address
+ *
+ * This function pulls the MAC address back out of the attribute and will
+ * place it in the array pointed by by mac_addr. It will return success
+ * if provided with a valid pointers.
+ **/
+s32 fm10k_tlv_attr_get_mac_vlan(u32 *attr, u8 *mac_addr, u16 *vlan)
+{
+ DEBUGFUNC("fm10k_tlv_attr_get_mac_vlan");
+
+ /* verify pointers are not NULL */
+ if (!mac_addr || !attr)
+ return FM10K_ERR_PARAM;
+
+ *(__le32 *)&mac_addr[0] = FM10K_CPU_TO_LE32(attr[1]);
+ *(__le16 *)&mac_addr[4] = FM10K_CPU_TO_LE16((u16)(attr[2]));
+ *vlan = (u16)(attr[2] >> 16);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_put_bool - Add header indicating value "true"
+ * @msg: Pointer to message block
+ * @attr_id: Attribute ID
+ *
+ * This function will simply add an attribute header, the fact
+ * that the header is here means the attribute value is true, else
+ * it is false. The function will return success if provided with a
+ * valid pointers.
+ **/
+s32 fm10k_tlv_attr_put_bool(u32 *msg, u16 attr_id)
+{
+ DEBUGFUNC("fm10k_tlv_attr_put_bool");
+
+ /* verify pointers are not NULL */
+ if (!msg)
+ return FM10K_ERR_PARAM;
+
+ /* record attribute header */
+ msg[FM10K_TLV_DWORD_LEN(*msg)] = attr_id;
+
+ /* add header length to length */
+ *msg += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_put_value - Store integer value attribute in message
+ * @msg: Pointer to message block
+ * @attr_id: Attribute ID
+ * @value: Value to be written
+ * @len: Size of value
+ *
+ * This function will place an integer value of up to 8 bytes in size
+ * in a message attribute. The function will return success provided
+ * that msg is a valid pointer, and len is 1, 2, 4, or 8.
+ **/
+s32 fm10k_tlv_attr_put_value(u32 *msg, u16 attr_id, s64 value, u32 len)
+{
+ u32 *attr;
+
+ DEBUGFUNC("fm10k_tlv_attr_put_value");
+
+ /* verify non-null msg and len is 1, 2, 4, or 8 */
+ if (!msg || !len || len > 8 || (len & (len - 1)))
+ return FM10K_ERR_PARAM;
+
+ attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
+
+ if (len < 4) {
+ attr[1] = (u32)value & ((0x1ul << (8 * len)) - 1);
+ } else {
+ attr[1] = (u32)value;
+ if (len > 4)
+ attr[2] = (u32)(value >> 32);
+ }
+
+ /* record attribute header, update message length */
+ len <<= FM10K_TLV_LEN_SHIFT;
+ attr[0] = len | attr_id;
+
+ /* add header length to length */
+ len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT;
+ *msg += FM10K_TLV_LEN_ALIGN(len);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_get_value - Get integer value stored in attribute
+ * @attr: Pointer to attribute
+ * @value: Pointer to destination buffer
+ * @len: Size of value
+ *
+ * This function will place an integer value of up to 8 bytes in size
+ * in the offset pointed to by value. The function will return success
+ * provided that pointers are valid and the len value matches the
+ * attribute length.
+ **/
+s32 fm10k_tlv_attr_get_value(u32 *attr, void *value, u32 len)
+{
+ DEBUGFUNC("fm10k_tlv_attr_get_value");
+
+ /* verify pointers are not NULL */
+ if (!attr || !value)
+ return FM10K_ERR_PARAM;
+
+ if ((*attr >> FM10K_TLV_LEN_SHIFT) != len)
+ return FM10K_ERR_PARAM;
+
+ if (len == 8)
+ *(u64 *)value = ((u64)attr[2] << 32) | attr[1];
+ else if (len == 4)
+ *(u32 *)value = attr[1];
+ else if (len == 2)
+ *(u16 *)value = (u16)attr[1];
+ else
+ *(u8 *)value = (u8)attr[1];
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_put_le_struct - Store little endian structure in message
+ * @msg: Pointer to message block
+ * @attr_id: Attribute ID
+ * @le_struct: Pointer to structure to be written
+ * @len: Size of le_struct
+ *
+ * This function will place a little endian structure value in a message
+ * attribute. The function will return success provided that all pointers
+ * are valid and length is a non-zero multiple of 4.
+ **/
+s32 fm10k_tlv_attr_put_le_struct(u32 *msg, u16 attr_id,
+ const void *le_struct, u32 len)
+{
+ const __le32 *le32_ptr = (const __le32 *)le_struct;
+ u32 *attr;
+ u32 i;
+
+ DEBUGFUNC("fm10k_tlv_attr_put_le_struct");
+
+ /* verify non-null msg and len is in 32 bit words */
+ if (!msg || !len || (len % 4))
+ return FM10K_ERR_PARAM;
+
+ attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
+
+ /* copy le32 structure into host byte order at 32b boundaries */
+ for (i = 0; i < (len / 4); i++)
+ attr[i + 1] = FM10K_LE32_TO_CPU(le32_ptr[i]);
+
+ /* record attribute header, update message length */
+ len <<= FM10K_TLV_LEN_SHIFT;
+ attr[0] = len | attr_id;
+
+ /* add header length to length */
+ len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT;
+ *msg += FM10K_TLV_LEN_ALIGN(len);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_get_le_struct - Get little endian struct form attribute
+ * @attr: Pointer to attribute
+ * @le_struct: Pointer to structure to be written
+ * @len: Size of structure
+ *
+ * This function will place a little endian structure in the buffer
+ * pointed to by le_struct. The function will return success
+ * provided that pointers are valid and the len value matches the
+ * attribute length.
+ **/
+s32 fm10k_tlv_attr_get_le_struct(u32 *attr, void *le_struct, u32 len)
+{
+ __le32 *le32_ptr = (__le32 *)le_struct;
+ u32 i;
+
+ DEBUGFUNC("fm10k_tlv_attr_get_le_struct");
+
+ /* verify pointers are not NULL */
+ if (!le_struct || !attr)
+ return FM10K_ERR_PARAM;
+
+ if ((*attr >> FM10K_TLV_LEN_SHIFT) != len)
+ return FM10K_ERR_PARAM;
+
+ attr++;
+
+ for (i = 0; len; i++, len -= 4)
+ le32_ptr[i] = FM10K_CPU_TO_LE32(attr[i]);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_nest_start - Start a set of nested attributes
+ * @msg: Pointer to message block
+ * @attr_id: Attribute ID
+ *
+ * This function will mark off a new nested region for encapsulating
+ * a given set of attributes. The idea is if you wish to place a secondary
+ * structure within the message this mechanism allows for that. The
+ * function will return NULL on failure, and a pointer to the start
+ * of the nested attributes on success.
+ **/
+u32 *fm10k_tlv_attr_nest_start(u32 *msg, u16 attr_id)
+{
+ u32 *attr;
+
+ DEBUGFUNC("fm10k_tlv_attr_nest_start");
+
+ /* verify pointer is not NULL */
+ if (!msg)
+ return NULL;
+
+ attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
+
+ attr[0] = attr_id;
+
+ /* return pointer to nest header */
+ return attr;
+}
+
+/**
+ * fm10k_tlv_attr_nest_start - Start a set of nested attributes
+ * @msg: Pointer to message block
+ *
+ * This function closes off an existing set of nested attributes. The
+ * message pointer should be pointing to the parent of the nest. So in
+ * the case of a nest within the nest this would be the outer nest pointer.
+ * This function will return success provided all pointers are valid.
+ **/
+s32 fm10k_tlv_attr_nest_stop(u32 *msg)
+{
+ u32 *attr;
+ u32 len;
+
+ DEBUGFUNC("fm10k_tlv_attr_nest_stop");
+
+ /* verify pointer is not NULL */
+ if (!msg)
+ return FM10K_ERR_PARAM;
+
+ /* locate the nested header and retrieve its length */
+ attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
+ len = (attr[0] >> FM10K_TLV_LEN_SHIFT) << FM10K_TLV_LEN_SHIFT;
+
+ /* only include nest if data was added to it */
+ if (len) {
+ len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT;
+ *msg += len;
+ }
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_validate - Validate attribute metadata
+ * @attr: Pointer to attribute
+ * @tlv_attr: Type and length info for attribute
+ *
+ * This function does some basic validation of the input TLV. It
+ * verifies the length, and in the case of null terminated strings
+ * it verifies that the last byte is null. The function will
+ * return FM10K_ERR_PARAM if any attribute is malformed, otherwise
+ * it returns 0.
+ **/
+STATIC s32 fm10k_tlv_attr_validate(u32 *attr,
+ const struct fm10k_tlv_attr *tlv_attr)
+{
+ u32 attr_id = *attr & FM10K_TLV_ID_MASK;
+ u16 len = *attr >> FM10K_TLV_LEN_SHIFT;
+
+ DEBUGFUNC("fm10k_tlv_attr_validate");
+
+ /* verify this is an attribute and not a message */
+ if (*attr & (FM10K_TLV_FLAGS_MSG << FM10K_TLV_FLAGS_SHIFT))
+ return FM10K_ERR_PARAM;
+
+ /* search through the list of attributes to find a matching ID */
+ while (tlv_attr->id < attr_id)
+ tlv_attr++;
+
+ /* if didn't find a match then we should exit */
+ if (tlv_attr->id != attr_id)
+ return FM10K_NOT_IMPLEMENTED;
+
+ /* move to start of attribute data */
+ attr++;
+
+ switch (tlv_attr->type) {
+ case FM10K_TLV_NULL_STRING:
+ if (!len ||
+ (attr[(len - 1) / 4] & (0xFF << (8 * ((len - 1) % 4)))))
+ return FM10K_ERR_PARAM;
+ if (len > tlv_attr->len)
+ return FM10K_ERR_PARAM;
+ break;
+ case FM10K_TLV_MAC_ADDR:
+ if (len != ETH_ALEN)
+ return FM10K_ERR_PARAM;
+ break;
+ case FM10K_TLV_BOOL:
+ if (len)
+ return FM10K_ERR_PARAM;
+ break;
+ case FM10K_TLV_UNSIGNED:
+ case FM10K_TLV_SIGNED:
+ if (len != tlv_attr->len)
+ return FM10K_ERR_PARAM;
+ break;
+ case FM10K_TLV_LE_STRUCT:
+ /* struct must be 4 byte aligned */
+ if ((len % 4) || len != tlv_attr->len)
+ return FM10K_ERR_PARAM;
+ break;
+ case FM10K_TLV_NESTED:
+ /* nested attributes must be 4 byte aligned */
+ if (len % 4)
+ return FM10K_ERR_PARAM;
+ break;
+ default:
+ /* attribute id is mapped to bad value */
+ return FM10K_ERR_PARAM;
+ }
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_attr_parse - Parses stream of attribute data
+ * @attr: Pointer to attribute list
+ * @results: Pointer array to store pointers to attributes
+ * @tlv_attr: Type and length info for attributes
+ *
+ * This function validates a stream of attributes and parses them
+ * up into an array of pointers stored in results. The function will
+ * return FM10K_ERR_PARAM on any input or message error,
+ * FM10K_NOT_IMPLEMENTED for any attribute that is outside of the array
+ * and 0 on success.
+ **/
+s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results,
+ const struct fm10k_tlv_attr *tlv_attr)
+{
+ u32 i, attr_id, offset = 0;
+ s32 err = 0;
+ u16 len;
+
+ DEBUGFUNC("fm10k_tlv_attr_parse");
+
+ /* verify pointers are not NULL */
+ if (!attr || !results)
+ return FM10K_ERR_PARAM;
+
+ /* initialize results to NULL */
+ for (i = 0; i < FM10K_TLV_RESULTS_MAX; i++)
+ results[i] = NULL;
+
+ /* pull length from the message header */
+ len = *attr >> FM10K_TLV_LEN_SHIFT;
+
+ /* no attributes to parse if there is no length */
+ if (!len)
+ return FM10K_SUCCESS;
+
+ /* no attributes to parse, just raw data, message becomes attribute */
+ if (!tlv_attr) {
+ results[0] = attr;
+ return FM10K_SUCCESS;
+ }
+
+ /* move to start of attribute data */
+ attr++;
+
+ /* run through list parsing all attributes */
+ while (offset < len) {
+ attr_id = *attr & FM10K_TLV_ID_MASK;
+
+ if (attr_id < FM10K_TLV_RESULTS_MAX)
+ err = fm10k_tlv_attr_validate(attr, tlv_attr);
+ else
+ err = FM10K_NOT_IMPLEMENTED;
+
+ if (err < 0)
+ return err;
+ if (!err)
+ results[attr_id] = attr;
+
+ /* update offset */
+ offset += FM10K_TLV_DWORD_LEN(*attr) * 4;
+
+ /* move to next attribute */
+ attr = &attr[FM10K_TLV_DWORD_LEN(*attr)];
+ }
+
+ /* we should find ourselves at the end of the list */
+ if (offset != len)
+ return FM10K_ERR_PARAM;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_tlv_msg_parse - Parses message header and calls function handler
+ * @hw: Pointer to hardware structure
+ * @msg: Pointer to message
+ * @mbx: Pointer to mailbox information structure
+ * @func: Function array containing list of message handling functions
+ *
+ * This function should be the first function called upon receiving a
+ * message. The handler will identify the message type and call the correct
+ * handler for the given message. It will return the value from the function
+ * call on a recognized message type, otherwise it will return
+ * FM10K_NOT_IMPLEMENTED on an unrecognized type.
+ **/
+s32 fm10k_tlv_msg_parse(struct fm10k_hw *hw, u32 *msg,
+ struct fm10k_mbx_info *mbx,
+ const struct fm10k_msg_data *data)
+{
+ u32 *results[FM10K_TLV_RESULTS_MAX];
+ u32 msg_id;
+ s32 err;
+
+ DEBUGFUNC("fm10k_tlv_msg_parse");
+
+ /* verify pointer is not NULL */
+ if (!msg || !data)
+ return FM10K_ERR_PARAM;
+
+ /* verify this is a message and not an attribute */
+ if (!(*msg & (FM10K_TLV_FLAGS_MSG << FM10K_TLV_FLAGS_SHIFT)))
+ return FM10K_ERR_PARAM;
+
+ /* grab message ID */
+ msg_id = *msg & FM10K_TLV_ID_MASK;
+
+ while (data->id < msg_id)
+ data++;
+
+ /* if we didn't find it then pass it up as an error */
+ if (data->id != msg_id) {
+ while (data->id != FM10K_TLV_ERROR)
+ data++;
+ }
+
+ /* parse the attributes into the results list */
+ err = fm10k_tlv_attr_parse(msg, results, data->attr);
+ if (err < 0)
+ return err;
+
+ return data->func(hw, results, mbx);
+}
+
+/**
+ * fm10k_tlv_msg_error - Default handler for unrecognized TLV message IDs
+ * @hw: Pointer to hardware structure
+ * @results: Pointer array to message, results[0] is pointer to message
+ * @mbx: Unused mailbox pointer
+ *
+ * This function is a default handler for unrecognized messages. At a
+ * a minimum it just indicates that the message requested was
+ * unimplemented.
+ **/
+s32 fm10k_tlv_msg_error(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ UNREFERENCED_3PARAMETER(hw, results, mbx);
+ DEBUGOUT1("Unknown message ID %u\n", **results & FM10K_TLV_ID_MASK);
+ return FM10K_NOT_IMPLEMENTED;
+}
+
+STATIC const unsigned char test_str[] = "fm10k";
+STATIC const unsigned char test_mac[ETH_ALEN] = { 0x12, 0x34, 0x56,
+ 0x78, 0x9a, 0xbc };
+STATIC const u16 test_vlan = 0x0FED;
+STATIC const u64 test_u64 = 0xfedcba9876543210ull;
+STATIC const u32 test_u32 = 0x87654321;
+STATIC const u16 test_u16 = 0x8765;
+STATIC const u8 test_u8 = 0x87;
+STATIC const s64 test_s64 = -0x123456789abcdef0ll;
+STATIC const s32 test_s32 = -0x1235678;
+STATIC const s16 test_s16 = -0x1234;
+STATIC const s8 test_s8 = -0x12;
+STATIC const __le32 test_le[2] = { FM10K_CPU_TO_LE32(0x12345678),
+ FM10K_CPU_TO_LE32(0x9abcdef0)};
+
+/* The message below is meant to be used as a test message to demonstrate
+ * how to use the TLV interface and to test the types. Normally this code
+ * be compiled out by stripping the code wrapped in FM10K_TLV_TEST_MSG
+ */
+const struct fm10k_tlv_attr fm10k_tlv_msg_test_attr[] = {
+ FM10K_TLV_ATTR_NULL_STRING(FM10K_TEST_MSG_STRING, 80),
+ FM10K_TLV_ATTR_MAC_ADDR(FM10K_TEST_MSG_MAC_ADDR),
+ FM10K_TLV_ATTR_U8(FM10K_TEST_MSG_U8),
+ FM10K_TLV_ATTR_U16(FM10K_TEST_MSG_U16),
+ FM10K_TLV_ATTR_U32(FM10K_TEST_MSG_U32),
+ FM10K_TLV_ATTR_U64(FM10K_TEST_MSG_U64),
+ FM10K_TLV_ATTR_S8(FM10K_TEST_MSG_S8),
+ FM10K_TLV_ATTR_S16(FM10K_TEST_MSG_S16),
+ FM10K_TLV_ATTR_S32(FM10K_TEST_MSG_S32),
+ FM10K_TLV_ATTR_S64(FM10K_TEST_MSG_S64),
+ FM10K_TLV_ATTR_LE_STRUCT(FM10K_TEST_MSG_LE_STRUCT, 8),
+ FM10K_TLV_ATTR_NESTED(FM10K_TEST_MSG_NESTED),
+ FM10K_TLV_ATTR_S32(FM10K_TEST_MSG_RESULT),
+ FM10K_TLV_ATTR_LAST
+};
+
+/**
+ * fm10k_tlv_msg_test_generate_data - Stuff message with data
+ * @msg: Pointer to message
+ * @attr_flags: List of flags indicating what attributes to add
+ *
+ * This function is meant to load a message buffer with attribute data
+ **/
+STATIC void fm10k_tlv_msg_test_generate_data(u32 *msg, u32 attr_flags)
+{
+ DEBUGFUNC("fm10k_tlv_msg_test_generate_data");
+
+ if (attr_flags & (1 << FM10K_TEST_MSG_STRING))
+ fm10k_tlv_attr_put_null_string(msg, FM10K_TEST_MSG_STRING,
+ test_str);
+ if (attr_flags & (1 << FM10K_TEST_MSG_MAC_ADDR))
+ fm10k_tlv_attr_put_mac_vlan(msg, FM10K_TEST_MSG_MAC_ADDR,
+ test_mac, test_vlan);
+ if (attr_flags & (1 << FM10K_TEST_MSG_U8))
+ fm10k_tlv_attr_put_u8(msg, FM10K_TEST_MSG_U8, test_u8);
+ if (attr_flags & (1 << FM10K_TEST_MSG_U16))
+ fm10k_tlv_attr_put_u16(msg, FM10K_TEST_MSG_U16, test_u16);
+ if (attr_flags & (1 << FM10K_TEST_MSG_U32))
+ fm10k_tlv_attr_put_u32(msg, FM10K_TEST_MSG_U32, test_u32);
+ if (attr_flags & (1 << FM10K_TEST_MSG_U64))
+ fm10k_tlv_attr_put_u64(msg, FM10K_TEST_MSG_U64, test_u64);
+ if (attr_flags & (1 << FM10K_TEST_MSG_S8))
+ fm10k_tlv_attr_put_s8(msg, FM10K_TEST_MSG_S8, test_s8);
+ if (attr_flags & (1 << FM10K_TEST_MSG_S16))
+ fm10k_tlv_attr_put_s16(msg, FM10K_TEST_MSG_S16, test_s16);
+ if (attr_flags & (1 << FM10K_TEST_MSG_S32))
+ fm10k_tlv_attr_put_s32(msg, FM10K_TEST_MSG_S32, test_s32);
+ if (attr_flags & (1 << FM10K_TEST_MSG_S64))
+ fm10k_tlv_attr_put_s64(msg, FM10K_TEST_MSG_S64, test_s64);
+ if (attr_flags & (1 << FM10K_TEST_MSG_LE_STRUCT))
+ fm10k_tlv_attr_put_le_struct(msg, FM10K_TEST_MSG_LE_STRUCT,
+ test_le, 8);
+}
+
+/**
+ * fm10k_tlv_msg_test_create - Create a test message testing all attributes
+ * @msg: Pointer to message
+ * @attr_flags: List of flags indicating what attributes to add
+ *
+ * This function is meant to load a message buffer with all attribute types
+ * including a nested attribute.
+ **/
+void fm10k_tlv_msg_test_create(u32 *msg, u32 attr_flags)
+{
+ u32 *nest = NULL;
+
+ DEBUGFUNC("fm10k_tlv_msg_test_create");
+
+ fm10k_tlv_msg_init(msg, FM10K_TLV_MSG_ID_TEST);
+
+ fm10k_tlv_msg_test_generate_data(msg, attr_flags);
+
+ /* check for nested attributes */
+ attr_flags >>= FM10K_TEST_MSG_NESTED;
+
+ if (attr_flags) {
+ nest = fm10k_tlv_attr_nest_start(msg, FM10K_TEST_MSG_NESTED);
+
+ fm10k_tlv_msg_test_generate_data(nest, attr_flags);
+
+ fm10k_tlv_attr_nest_stop(msg);
+ }
+}
+
+/**
+ * fm10k_tlv_msg_test - Validate all results on test message receive
+ * @hw: Pointer to hardware structure
+ * @results: Pointer array to attributes in the message
+ * @mbx: Pointer to mailbox information structure
+ *
+ * This function does a check to verify all attributes match what the test
+ * message placed in the message buffer. It is the default handler
+ * for TLV test messages.
+ **/
+s32 fm10k_tlv_msg_test(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ u32 *nest_results[FM10K_TLV_RESULTS_MAX];
+ unsigned char result_str[80];
+ unsigned char result_mac[ETH_ALEN];
+ s32 err = FM10K_SUCCESS;
+ __le32 result_le[2];
+ u16 result_vlan;
+ u64 result_u64;
+ u32 result_u32;
+ u16 result_u16;
+ u8 result_u8;
+ s64 result_s64;
+ s32 result_s32;
+ s16 result_s16;
+ s8 result_s8;
+ u32 reply[3];
+
+ DEBUGFUNC("fm10k_tlv_msg_test");
+
+ /* retrieve results of a previous test */
+ if (!!results[FM10K_TEST_MSG_RESULT])
+ return fm10k_tlv_attr_get_s32(results[FM10K_TEST_MSG_RESULT],
+ &mbx->test_result);
+
+parse_nested:
+ if (!!results[FM10K_TEST_MSG_STRING]) {
+ err = fm10k_tlv_attr_get_null_string(
+ results[FM10K_TEST_MSG_STRING],
+ result_str);
+ if (!err && memcmp(test_str, result_str, sizeof(test_str)))
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+ if (!!results[FM10K_TEST_MSG_MAC_ADDR]) {
+ err = fm10k_tlv_attr_get_mac_vlan(
+ results[FM10K_TEST_MSG_MAC_ADDR],
+ result_mac, &result_vlan);
+ if (!err && memcmp(test_mac, result_mac, ETH_ALEN))
+ err = FM10K_ERR_INVALID_VALUE;
+ if (!err && test_vlan != result_vlan)
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+ if (!!results[FM10K_TEST_MSG_U8]) {
+ err = fm10k_tlv_attr_get_u8(results[FM10K_TEST_MSG_U8],
+ &result_u8);
+ if (!err && test_u8 != result_u8)
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+ if (!!results[FM10K_TEST_MSG_U16]) {
+ err = fm10k_tlv_attr_get_u16(results[FM10K_TEST_MSG_U16],
+ &result_u16);
+ if (!err && test_u16 != result_u16)
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+ if (!!results[FM10K_TEST_MSG_U32]) {
+ err = fm10k_tlv_attr_get_u32(results[FM10K_TEST_MSG_U32],
+ &result_u32);
+ if (!err && test_u32 != result_u32)
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+ if (!!results[FM10K_TEST_MSG_U64]) {
+ err = fm10k_tlv_attr_get_u64(results[FM10K_TEST_MSG_U64],
+ &result_u64);
+ if (!err && test_u64 != result_u64)
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+ if (!!results[FM10K_TEST_MSG_S8]) {
+ err = fm10k_tlv_attr_get_s8(results[FM10K_TEST_MSG_S8],
+ &result_s8);
+ if (!err && test_s8 != result_s8)
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+ if (!!results[FM10K_TEST_MSG_S16]) {
+ err = fm10k_tlv_attr_get_s16(results[FM10K_TEST_MSG_S16],
+ &result_s16);
+ if (!err && test_s16 != result_s16)
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+ if (!!results[FM10K_TEST_MSG_S32]) {
+ err = fm10k_tlv_attr_get_s32(results[FM10K_TEST_MSG_S32],
+ &result_s32);
+ if (!err && test_s32 != result_s32)
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+ if (!!results[FM10K_TEST_MSG_S64]) {
+ err = fm10k_tlv_attr_get_s64(results[FM10K_TEST_MSG_S64],
+ &result_s64);
+ if (!err && test_s64 != result_s64)
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+ if (!!results[FM10K_TEST_MSG_LE_STRUCT]) {
+ err = fm10k_tlv_attr_get_le_struct(
+ results[FM10K_TEST_MSG_LE_STRUCT],
+ result_le,
+ sizeof(result_le));
+ if (!err && memcmp(test_le, result_le, sizeof(test_le)))
+ err = FM10K_ERR_INVALID_VALUE;
+ if (err)
+ goto report_result;
+ }
+
+ if (!!results[FM10K_TEST_MSG_NESTED]) {
+ /* clear any pointers */
+ memset(nest_results, 0, sizeof(nest_results));
+
+ /* parse the nested attributes into the nest results list */
+ err = fm10k_tlv_attr_parse(results[FM10K_TEST_MSG_NESTED],
+ nest_results,
+ fm10k_tlv_msg_test_attr);
+ if (err)
+ goto report_result;
+
+ /* loop back through to the start */
+ results = nest_results;
+ goto parse_nested;
+ }
+
+report_result:
+ /* generate reply with test result */
+ fm10k_tlv_msg_init(reply, FM10K_TLV_MSG_ID_TEST);
+ fm10k_tlv_attr_put_s32(reply, FM10K_TEST_MSG_RESULT, err);
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, reply);
+}
diff --git a/src/dpdk22/drivers/net/fm10k/base/fm10k_tlv.h b/src/dpdk22/drivers/net/fm10k/base/fm10k_tlv.h
new file mode 100644
index 00000000..ad972363
--- /dev/null
+++ b/src/dpdk22/drivers/net/fm10k/base/fm10k_tlv.h
@@ -0,0 +1,199 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _FM10K_TLV_H_
+#define _FM10K_TLV_H_
+
+/* forward declaration */
+struct fm10k_msg_data;
+
+#include "fm10k_type.h"
+
+/* Message / Argument header format
+ * 3 2 1 0
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Length | Flags | Type / ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * The message header format described here is used for messages that are
+ * passed between the PF and the VF. To allow for messages larger then
+ * mailbox size we will provide a message with the above header and it
+ * will be segmented and transported to the mailbox to the other side where
+ * it is reassembled. It contains the following fields:
+ * Len: Length of the message in bytes excluding the message header
+ * Flags: TBD
+ * Rule: These will be the message/argument types we pass
+ */
+/* message data header */
+#define FM10K_TLV_ID_SHIFT 0
+#define FM10K_TLV_ID_SIZE 16
+#define FM10K_TLV_ID_MASK ((1u << FM10K_TLV_ID_SIZE) - 1)
+#define FM10K_TLV_FLAGS_SHIFT 16
+#define FM10K_TLV_FLAGS_MSG 0x1
+#define FM10K_TLV_FLAGS_SIZE 4
+#define FM10K_TLV_LEN_SHIFT 20
+#define FM10K_TLV_LEN_SIZE 12
+
+#define FM10K_TLV_HDR_LEN 4ul
+#define FM10K_TLV_LEN_ALIGN_MASK \
+ ((FM10K_TLV_HDR_LEN - 1) << FM10K_TLV_LEN_SHIFT)
+#define FM10K_TLV_LEN_ALIGN(tlv) \
+ (((tlv) + FM10K_TLV_LEN_ALIGN_MASK) & ~FM10K_TLV_LEN_ALIGN_MASK)
+#define FM10K_TLV_DWORD_LEN(tlv) \
+ ((u16)((FM10K_TLV_LEN_ALIGN(tlv)) >> (FM10K_TLV_LEN_SHIFT + 2)) + 1)
+
+#define FM10K_TLV_RESULTS_MAX 32
+
+enum fm10k_tlv_type {
+ FM10K_TLV_NULL_STRING,
+ FM10K_TLV_MAC_ADDR,
+ FM10K_TLV_BOOL,
+ FM10K_TLV_UNSIGNED,
+ FM10K_TLV_SIGNED,
+ FM10K_TLV_LE_STRUCT,
+ FM10K_TLV_NESTED,
+ FM10K_TLV_MAX_TYPE
+};
+
+#define FM10K_TLV_ERROR (~0u)
+
+struct fm10k_tlv_attr {
+ unsigned int id;
+ enum fm10k_tlv_type type;
+ u16 len;
+};
+
+#define FM10K_TLV_ATTR_NULL_STRING(id, len) { id, FM10K_TLV_NULL_STRING, len }
+#define FM10K_TLV_ATTR_MAC_ADDR(id) { id, FM10K_TLV_MAC_ADDR, 6 }
+#define FM10K_TLV_ATTR_BOOL(id) { id, FM10K_TLV_BOOL, 0 }
+#define FM10K_TLV_ATTR_U8(id) { id, FM10K_TLV_UNSIGNED, 1 }
+#define FM10K_TLV_ATTR_U16(id) { id, FM10K_TLV_UNSIGNED, 2 }
+#define FM10K_TLV_ATTR_U32(id) { id, FM10K_TLV_UNSIGNED, 4 }
+#define FM10K_TLV_ATTR_U64(id) { id, FM10K_TLV_UNSIGNED, 8 }
+#define FM10K_TLV_ATTR_S8(id) { id, FM10K_TLV_SIGNED, 1 }
+#define FM10K_TLV_ATTR_S16(id) { id, FM10K_TLV_SIGNED, 2 }
+#define FM10K_TLV_ATTR_S32(id) { id, FM10K_TLV_SIGNED, 4 }
+#define FM10K_TLV_ATTR_S64(id) { id, FM10K_TLV_SIGNED, 8 }
+#define FM10K_TLV_ATTR_LE_STRUCT(id, len) { id, FM10K_TLV_LE_STRUCT, len }
+#define FM10K_TLV_ATTR_NESTED(id) { id, FM10K_TLV_NESTED }
+#define FM10K_TLV_ATTR_LAST { FM10K_TLV_ERROR }
+
+struct fm10k_msg_data {
+ unsigned int id;
+ const struct fm10k_tlv_attr *attr;
+ s32 (*func)(struct fm10k_hw *, u32 **,
+ struct fm10k_mbx_info *);
+};
+
+#define FM10K_MSG_HANDLER(id, attr, func) { id, attr, func }
+
+s32 fm10k_tlv_msg_init(u32 *, u16);
+s32 fm10k_tlv_attr_put_null_string(u32 *, u16, const unsigned char *);
+s32 fm10k_tlv_attr_get_null_string(u32 *, unsigned char *);
+s32 fm10k_tlv_attr_put_mac_vlan(u32 *, u16, const u8 *, u16);
+s32 fm10k_tlv_attr_get_mac_vlan(u32 *, u8 *, u16 *);
+s32 fm10k_tlv_attr_put_bool(u32 *, u16);
+s32 fm10k_tlv_attr_put_value(u32 *, u16, s64, u32);
+#define fm10k_tlv_attr_put_u8(msg, attr_id, val) \
+ fm10k_tlv_attr_put_value(msg, attr_id, val, 1)
+#define fm10k_tlv_attr_put_u16(msg, attr_id, val) \
+ fm10k_tlv_attr_put_value(msg, attr_id, val, 2)
+#define fm10k_tlv_attr_put_u32(msg, attr_id, val) \
+ fm10k_tlv_attr_put_value(msg, attr_id, val, 4)
+#define fm10k_tlv_attr_put_u64(msg, attr_id, val) \
+ fm10k_tlv_attr_put_value(msg, attr_id, val, 8)
+#define fm10k_tlv_attr_put_s8(msg, attr_id, val) \
+ fm10k_tlv_attr_put_value(msg, attr_id, val, 1)
+#define fm10k_tlv_attr_put_s16(msg, attr_id, val) \
+ fm10k_tlv_attr_put_value(msg, attr_id, val, 2)
+#define fm10k_tlv_attr_put_s32(msg, attr_id, val) \
+ fm10k_tlv_attr_put_value(msg, attr_id, val, 4)
+#define fm10k_tlv_attr_put_s64(msg, attr_id, val) \
+ fm10k_tlv_attr_put_value(msg, attr_id, val, 8)
+s32 fm10k_tlv_attr_get_value(u32 *, void *, u32);
+#define fm10k_tlv_attr_get_u8(attr, ptr) \
+ fm10k_tlv_attr_get_value(attr, ptr, sizeof(u8))
+#define fm10k_tlv_attr_get_u16(attr, ptr) \
+ fm10k_tlv_attr_get_value(attr, ptr, sizeof(u16))
+#define fm10k_tlv_attr_get_u32(attr, ptr) \
+ fm10k_tlv_attr_get_value(attr, ptr, sizeof(u32))
+#define fm10k_tlv_attr_get_u64(attr, ptr) \
+ fm10k_tlv_attr_get_value(attr, ptr, sizeof(u64))
+#define fm10k_tlv_attr_get_s8(attr, ptr) \
+ fm10k_tlv_attr_get_value(attr, ptr, sizeof(s8))
+#define fm10k_tlv_attr_get_s16(attr, ptr) \
+ fm10k_tlv_attr_get_value(attr, ptr, sizeof(s16))
+#define fm10k_tlv_attr_get_s32(attr, ptr) \
+ fm10k_tlv_attr_get_value(attr, ptr, sizeof(s32))
+#define fm10k_tlv_attr_get_s64(attr, ptr) \
+ fm10k_tlv_attr_get_value(attr, ptr, sizeof(s64))
+s32 fm10k_tlv_attr_put_le_struct(u32 *, u16, const void *, u32);
+s32 fm10k_tlv_attr_get_le_struct(u32 *, void *, u32);
+u32 *fm10k_tlv_attr_nest_start(u32 *, u16);
+s32 fm10k_tlv_attr_nest_stop(u32 *);
+s32 fm10k_tlv_attr_parse(u32 *, u32 **, const struct fm10k_tlv_attr *);
+s32 fm10k_tlv_msg_parse(struct fm10k_hw *, u32 *, struct fm10k_mbx_info *,
+ const struct fm10k_msg_data *);
+s32 fm10k_tlv_msg_error(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *);
+
+#define FM10K_TLV_MSG_ID_TEST 0
+
+enum fm10k_tlv_test_attr_id {
+ FM10K_TEST_MSG_UNSET,
+ FM10K_TEST_MSG_STRING,
+ FM10K_TEST_MSG_MAC_ADDR,
+ FM10K_TEST_MSG_U8,
+ FM10K_TEST_MSG_U16,
+ FM10K_TEST_MSG_U32,
+ FM10K_TEST_MSG_U64,
+ FM10K_TEST_MSG_S8,
+ FM10K_TEST_MSG_S16,
+ FM10K_TEST_MSG_S32,
+ FM10K_TEST_MSG_S64,
+ FM10K_TEST_MSG_LE_STRUCT,
+ FM10K_TEST_MSG_NESTED,
+ FM10K_TEST_MSG_RESULT,
+ FM10K_TEST_MSG_MAX
+};
+
+extern const struct fm10k_tlv_attr fm10k_tlv_msg_test_attr[];
+void fm10k_tlv_msg_test_create(u32 *, u32);
+s32 fm10k_tlv_msg_test(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
+
+#define FM10K_TLV_MSG_TEST_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_TLV_MSG_ID_TEST, fm10k_tlv_msg_test_attr, func)
+#define FM10K_TLV_MSG_ERROR_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_TLV_ERROR, NULL, func)
+#endif /* _FM10K_MSG_H_ */
diff --git a/src/dpdk22/drivers/net/fm10k/base/fm10k_type.h b/src/dpdk22/drivers/net/fm10k/base/fm10k_type.h
new file mode 100644
index 00000000..df1d2761
--- /dev/null
+++ b/src/dpdk22/drivers/net/fm10k/base/fm10k_type.h
@@ -0,0 +1,964 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _FM10K_TYPE_H_
+#define _FM10K_TYPE_H_
+
+/* forward declaration */
+struct fm10k_hw;
+
+#include "fm10k_osdep.h"
+#include "fm10k_mbx.h"
+
+#define FM10K_INTEL_VENDOR_ID 0x8086
+#define FM10K_DEV_ID_PF 0x15A4
+#define FM10K_DEV_ID_VF 0x15A5
+#ifdef BOULDER_RAPIDS_HW
+#define FM10K_DEV_ID_SDI_FM10420_QDA2 0x15D0
+#endif /* BOULDER_RAPIDS_HW */
+#ifdef ATWOOD_CHANNEL_HW
+#define FM10K_DEV_ID_SDI_FM10420_DA2 0x15D5
+#endif /* ATWOOD_CHANNEL_HW */
+
+#define FM10K_MAX_QUEUES 256
+#define FM10K_MAX_QUEUES_PF 128
+#define FM10K_MAX_QUEUES_POOL 16
+
+#define FM10K_48_BIT_MASK 0x0000FFFFFFFFFFFFull
+#define FM10K_STAT_VALID 0x80000000
+
+/* PCI Bus Info */
+#define FM10K_PCIE_LINK_CAP 0x7C
+#define FM10K_PCIE_LINK_STATUS 0x82
+#define FM10K_PCIE_LINK_WIDTH 0x3F0
+#define FM10K_PCIE_LINK_WIDTH_1 0x10
+#define FM10K_PCIE_LINK_WIDTH_2 0x20
+#define FM10K_PCIE_LINK_WIDTH_4 0x40
+#define FM10K_PCIE_LINK_WIDTH_8 0x80
+#define FM10K_PCIE_LINK_SPEED 0xF
+#define FM10K_PCIE_LINK_SPEED_2500 0x1
+#define FM10K_PCIE_LINK_SPEED_5000 0x2
+#define FM10K_PCIE_LINK_SPEED_8000 0x3
+
+/* PCIe payload size */
+#define FM10K_PCIE_DEV_CAP 0x74
+#define FM10K_PCIE_DEV_CAP_PAYLOAD 0x07
+#define FM10K_PCIE_DEV_CAP_PAYLOAD_128 0x00
+#define FM10K_PCIE_DEV_CAP_PAYLOAD_256 0x01
+#define FM10K_PCIE_DEV_CAP_PAYLOAD_512 0x02
+#define FM10K_PCIE_DEV_CTRL 0x78
+#define FM10K_PCIE_DEV_CTRL_PAYLOAD 0xE0
+#define FM10K_PCIE_DEV_CTRL_PAYLOAD_128 0x00
+#define FM10K_PCIE_DEV_CTRL_PAYLOAD_256 0x20
+#define FM10K_PCIE_DEV_CTRL_PAYLOAD_512 0x40
+
+/* PCIe MSI-X Capability info */
+#define FM10K_PCI_MSIX_MSG_CTRL 0xB2
+#define FM10K_PCI_MSIX_MSG_CTRL_TBL_SZ_MASK 0x7FF
+#define FM10K_MAX_MSIX_VECTORS 256
+#define FM10K_MAX_VECTORS_PF 256
+#define FM10K_MAX_VECTORS_POOL 32
+
+/* PCIe SR-IOV Info */
+#define FM10K_PCIE_SRIOV_CTRL 0x190
+#define FM10K_PCIE_SRIOV_CTRL_VFARI 0x10
+
+#define FM10K_SUCCESS 0
+#define FM10K_ERR_DEVICE_NOT_SUPPORTED -1
+#define FM10K_ERR_PARAM -2
+#define FM10K_ERR_NO_RESOURCES -3
+#define FM10K_ERR_REQUESTS_PENDING -4
+#define FM10K_ERR_RESET_REQUESTED -5
+#define FM10K_ERR_DMA_PENDING -6
+#define FM10K_ERR_RESET_FAILED -7
+#define FM10K_ERR_INVALID_MAC_ADDR -8
+#define FM10K_ERR_INVALID_VALUE -9
+#define FM10K_NOT_IMPLEMENTED 0x7FFFFFFF
+
+#define UNREFERENCED_XPARAMETER
+#define UNREFERENCED_1PARAMETER(_p) (_p)
+#define UNREFERENCED_2PARAMETER(_p, _q) do { (_p); (_q); } while (0)
+#define UNREFERENCED_3PARAMETER(_p, _q, _r) do { (_p); (_q); (_r); } while (0)
+
+/* Start of PF registers */
+#define FM10K_CTRL 0x0000
+#define FM10K_CTRL_BAR4_ALLOWED 0x00000004
+
+#define FM10K_CTRL_EXT 0x0001
+#define FM10K_CTRL_EXT_NS_DIS 0x00000001
+#define FM10K_CTRL_EXT_RO_DIS 0x00000002
+#define FM10K_CTRL_EXT_SWITCH_LOOPBACK 0x00000004
+#define FM10K_EXVET 0x0002
+#define FM10K_EXVET_ETHERTYPE_MASK 0x000000FF
+#define FM10K_EXVET_TAG_SIZE_SHIFT 16
+#define FM10K_EXVET_AFTER_VLAN 0x00040000
+#define FM10K_GCR 0x0003
+#define FM10K_FACTPS 0x0004
+#define FM10K_GCR_EXT 0x0005
+
+/* Interrupt control registers */
+#define FM10K_EICR 0x0006
+#define FM10K_EICR_PCA_FAULT 0x00000001
+#define FM10K_EICR_THI_FAULT 0x00000004
+#define FM10K_EICR_FUM_FAULT 0x00000020
+#define FM10K_EICR_FAULT_MASK 0x0000003F
+#define FM10K_EICR_MAILBOX 0x00000040
+#define FM10K_EICR_SWITCHREADY 0x00000080
+#define FM10K_EICR_SWITCHNOTREADY 0x00000100
+#define FM10K_EICR_SWITCHINTERRUPT 0x00000200
+#define FM10K_EICR_SRAMERROR 0x00000400
+#define FM10K_EICR_VFLR 0x00000800
+#define FM10K_EICR_MAXHOLDTIME 0x00001000
+#define FM10K_EIMR 0x0007
+#define FM10K_EIMR_PCA_FAULT 0x00000001
+#define FM10K_EIMR_THI_FAULT 0x00000010
+#define FM10K_EIMR_FUM_FAULT 0x00000400
+#define FM10K_EIMR_MAILBOX 0x00001000
+#define FM10K_EIMR_SWITCHREADY 0x00004000
+#define FM10K_EIMR_SWITCHNOTREADY 0x00010000
+#define FM10K_EIMR_SWITCHINTERRUPT 0x00040000
+#define FM10K_EIMR_SRAMERROR 0x00100000
+#define FM10K_EIMR_VFLR 0x00400000
+#define FM10K_EIMR_MAXHOLDTIME 0x01000000
+#define FM10K_EIMR_ALL 0x55555555
+#define FM10K_EIMR_DISABLE(NAME) ((FM10K_EIMR_ ## NAME) << 0)
+#define FM10K_EIMR_ENABLE(NAME) ((FM10K_EIMR_ ## NAME) << 1)
+#define FM10K_FAULT_ADDR_LO 0x0
+#define FM10K_FAULT_ADDR_HI 0x1
+#define FM10K_FAULT_SPECINFO 0x2
+#define FM10K_FAULT_FUNC 0x3
+#define FM10K_FAULT_SIZE 0x4
+#define FM10K_FAULT_FUNC_VALID 0x00008000
+#define FM10K_FAULT_FUNC_PF 0x00004000
+#define FM10K_FAULT_FUNC_VF_MASK 0x00003F00
+#define FM10K_FAULT_FUNC_VF_SHIFT 8
+#define FM10K_FAULT_FUNC_TYPE_MASK 0x000000FF
+
+#define FM10K_PCA_FAULT 0x0008
+#define FM10K_THI_FAULT 0x0010
+#define FM10K_FUM_FAULT 0x001C
+
+/* Rx queue timeout indicator */
+#define FM10K_MAXHOLDQ(_n) ((_n) + 0x0020)
+
+/* Switch Manager info */
+#define FM10K_SM_AREA(_n) ((_n) + 0x0028)
+
+/* GLORT mapping registers */
+#define FM10K_DGLORTMAP(_n) ((_n) + 0x0030)
+#define FM10K_DGLORT_COUNT 8
+#define FM10K_DGLORTMAP_MASK_SHIFT 16
+#define FM10K_DGLORTMAP_ANY 0x00000000
+#define FM10K_DGLORTMAP_NONE 0x0000FFFF
+#define FM10K_DGLORTMAP_ZERO 0xFFFF0000
+#define FM10K_DGLORTDEC(_n) ((_n) + 0x0038)
+#define FM10K_DGLORTDEC_VSILENGTH_SHIFT 4
+#define FM10K_DGLORTDEC_VSIBASE_SHIFT 7
+#define FM10K_DGLORTDEC_PCLENGTH_SHIFT 14
+#define FM10K_DGLORTDEC_QBASE_SHIFT 16
+#define FM10K_DGLORTDEC_RSSLENGTH_SHIFT 24
+#define FM10K_DGLORTDEC_INNERRSS_ENABLE 0x08000000
+#define FM10K_TUNNEL_CFG 0x0040
+#define FM10K_TUNNEL_CFG_NVGRE_SHIFT 16
+#define FM10K_TUNNEL_CFG_GENEVE 0x0041
+#define FM10K_SWPRI_MAP(_n) ((_n) + 0x0050)
+#define FM10K_SWPRI_MAX 16
+#define FM10K_RSSRK(_n, _m) (((_n) * 0x10) + (_m) + 0x0800)
+#define FM10K_RSSRK_SIZE 10
+#define FM10K_RSSRK_ENTRIES_PER_REG 4
+#define FM10K_RETA(_n, _m) (((_n) * 0x20) + (_m) + 0x1000)
+#define FM10K_RETA_SIZE 32
+#define FM10K_RETA_ENTRIES_PER_REG 4
+#define FM10K_MAX_RSS_INDICES 128
+
+/* Rate limiting registers */
+#define FM10K_TC_CREDIT(_n) ((_n) + 0x2000)
+#define FM10K_TC_CREDIT_CREDIT_MASK 0x001FFFFF
+#define FM10K_TC_MAXCREDIT(_n) ((_n) + 0x2040)
+#define FM10K_TC_MAXCREDIT_64K 0x00010000
+#define FM10K_TC_RATE(_n) ((_n) + 0x2080)
+#define FM10K_TC_RATE_QUANTA_MASK 0x0000FFFF
+#define FM10K_TC_RATE_INTERVAL_4US_GEN1 0x00020000
+#define FM10K_TC_RATE_INTERVAL_4US_GEN2 0x00040000
+#define FM10K_TC_RATE_INTERVAL_4US_GEN3 0x00080000
+#define FM10K_TC_RATE_STATUS 0x20C0
+#define FM10K_PAUSE 0x20C2
+
+/* DMA control registers */
+#define FM10K_DMA_CTRL 0x20C3
+#define FM10K_DMA_CTRL_TX_ENABLE 0x00000001
+#define FM10K_DMA_CTRL_TX_HOST_PENDING 0x00000002
+#define FM10K_DMA_CTRL_TX_DATA 0x00000004
+#define FM10K_DMA_CTRL_TX_ACTIVE 0x00000008
+#define FM10K_DMA_CTRL_RX_ENABLE 0x00000010
+#define FM10K_DMA_CTRL_RX_HOST_PENDING 0x00000020
+#define FM10K_DMA_CTRL_RX_DATA 0x00000040
+#define FM10K_DMA_CTRL_RX_ACTIVE 0x00000080
+#define FM10K_DMA_CTRL_RX_DESC_SIZE 0x00000100
+#define FM10K_DMA_CTRL_MINMSS_SHIFT 9
+#define FM10K_DMA_CTRL_MINMSS_64 0x00008000
+#define FM10K_DMA_CTRL_MAX_HOLD_TIME_SHIFT 23
+#define FM10K_DMA_CTRL_MAX_HOLD_1US_GEN3 0x04800000
+#define FM10K_DMA_CTRL_MAX_HOLD_1US_GEN2 0x04000000
+#define FM10K_DMA_CTRL_MAX_HOLD_1US_GEN1 0x03800000
+#define FM10K_DMA_CTRL_DATAPATH_RESET 0x20000000
+#define FM10K_DMA_CTRL_MAXNUMOFQ_MASK 0xC0000000
+#define FM10K_DMA_CTRL_32_DESC 0x00000000
+#define FM10K_DMA_CTRL_64_DESC 0x40000000
+#define FM10K_DMA_CTRL_128_DESC 0x80000000
+
+#define FM10K_DMA_CTRL2 0x20C4
+#define FM10K_DMA_CTRL2_TX_FRAME_SPACING_SHIFT 5
+#define FM10K_DMA_CTRL2_SWITCH_READY 0x00002000
+#define FM10K_DMA_CTRL2_RX_DESC_READ_PRIO_SHIFT 14
+#define FM10K_DMA_CTRL2_TX_DESC_READ_PRIO_SHIFT 17
+#define FM10K_DMA_CTRL2_TX_DATA_READ_PRIO_SHIFT 20
+
+/* TSO flags configuration
+ * First packet contains all flags except for fin and psh
+ * Middle packet contains only urg and ack
+ * Last packet contains urg, ack, fin, and psh
+ */
+#define FM10K_TSO_FLAGS_LOW 0x00300FF6
+#define FM10K_TSO_FLAGS_HI 0x00000039
+#define FM10K_DTXTCPFLGL 0x20C5
+#define FM10K_DTXTCPFLGH 0x20C6
+
+#define FM10K_TPH_CTRL 0x20C7
+#define FM10K_TPH_CTRL_DISABLE_READ_HINT 0x00000080
+#define FM10K_MRQC(_n) ((_n) + 0x2100)
+#define FM10K_MRQC_TCP_IPV4 0x00000001
+#define FM10K_MRQC_IPV4 0x00000002
+#define FM10K_MRQC_IPV6 0x00000010
+#define FM10K_MRQC_TCP_IPV6 0x00000020
+#define FM10K_MRQC_UDP_IPV4 0x00000040
+#define FM10K_MRQC_UDP_IPV6 0x00000080
+
+#define FM10K_TQMAP(_n) ((_n) + 0x2800)
+#define FM10K_TQMAP_TABLE_SIZE 2048
+#define FM10K_RQMAP(_n) ((_n) + 0x3000)
+#define FM10K_RQMAP_TABLE_SIZE 2048
+
+/* Hardware Statistics */
+#define FM10K_STATS_TIMEOUT 0x3800
+#define FM10K_STATS_UR 0x3801
+#define FM10K_STATS_CA 0x3802
+#define FM10K_STATS_UM 0x3803
+#define FM10K_STATS_XEC 0x3804
+#define FM10K_STATS_VLAN_DROP 0x3805
+#define FM10K_STATS_LOOPBACK_DROP 0x3806
+#define FM10K_STATS_NODESC_DROP 0x3807
+
+/* Timesync registers */
+#define FM10K_RRTIME_CFG 0x3808
+#define FM10K_RRTIME_LIMIT(_n) ((_n) + 0x380C)
+#define FM10K_RRTIME_COUNT(_n) ((_n) + 0x3810)
+#define FM10K_SYSTIME 0x3814
+#define FM10K_SYSTIME0 0x3816
+#define FM10K_SYSTIME_CFG 0x3818
+#define FM10K_SYSTIME_CFG_STEP_MASK 0x0000000F
+
+/* PCIe state registers */
+#define FM10K_PFVFBME(_n) ((_n) + 0x381A)
+#define FM10K_PHYADDR 0x381C
+
+/* Rx ring registers */
+#define FM10K_RDBAL(_n) ((0x40 * (_n)) + 0x4000)
+#define FM10K_RDBAH(_n) ((0x40 * (_n)) + 0x4001)
+#define FM10K_RDLEN(_n) ((0x40 * (_n)) + 0x4002)
+#define FM10K_TPH_RXCTRL(_n) ((0x40 * (_n)) + 0x4003)
+#define FM10K_TPH_RXCTRL_DESC_TPHEN 0x00000020
+#define FM10K_TPH_RXCTRL_HDR_TPHEN 0x00000040
+#define FM10K_TPH_RXCTRL_DATA_TPHEN 0x00000080
+#define FM10K_TPH_RXCTRL_DESC_RROEN 0x00000200
+#define FM10K_TPH_RXCTRL_DATA_WROEN 0x00002000
+#define FM10K_TPH_RXCTRL_HDR_WROEN 0x00008000
+#define FM10K_RDH(_n) ((0x40 * (_n)) + 0x4004)
+#define FM10K_RDT(_n) ((0x40 * (_n)) + 0x4005)
+#define FM10K_RXQCTL(_n) ((0x40 * (_n)) + 0x4006)
+#define FM10K_RXQCTL_ENABLE 0x00000001
+#define FM10K_RXQCTL_PF 0x000000FC
+#define FM10K_RXQCTL_VF_SHIFT 2
+#define FM10K_RXQCTL_VF 0x00000100
+#define FM10K_RXQCTL_ID_MASK (FM10K_RXQCTL_PF | FM10K_RXQCTL_VF)
+#define FM10K_RXDCTL(_n) ((0x40 * (_n)) + 0x4007)
+#define FM10K_RXDCTL_WRITE_BACK_MIN_DELAY 0x00000001
+#define FM10K_RXDCTL_WRITE_BACK_IMM 0x00000100
+#define FM10K_RXDCTL_DROP_ON_EMPTY 0x00000200
+#define FM10K_RXINT(_n) ((0x40 * (_n)) + 0x4008)
+#define FM10K_RXINT_TIMER_SHIFT 8
+#define FM10K_SRRCTL(_n) ((0x40 * (_n)) + 0x4009)
+#define FM10K_SRRCTL_BSIZEPKT_SHIFT 8 /* shift _right_ */
+#define FM10K_SRRCTL_BSIZEHDR_SHIFT 2 /* shift _left_ */
+#define FM10K_SRRCTL_BSIZEHDR_MASK 0x00003F00
+#define FM10K_SRRCTL_DESCTYPE_HDR_SPLIT 0x00004000
+#define FM10K_SRRCTL_DESCTYPE_SIZE_SPLIT 0x00008000
+#define FM10K_SRRCTL_PSRTYPE_INNER_TCPHDR 0x00010000
+#define FM10K_SRRCTL_PSRTYPE_INNER_UDPHDR 0x00020000
+#define FM10K_SRRCTL_PSRTYPE_INNER_IPV4HDR 0x00040000
+#define FM10K_SRRCTL_PSRTYPE_INNER_IPV6HDR 0x00080000
+#define FM10K_SRRCTL_PSRTYPE_INNER_L2HDR 0x00100000
+#define FM10K_SRRCTL_PSRTYPE_ENCAPHDR 0x00200000
+#define FM10K_SRRCTL_PSRTYPE_TCPHDR 0x00400000
+#define FM10K_SRRCTL_PSRTYPE_UDPHDR 0x00800000
+#define FM10K_SRRCTL_PSRTYPE_IPV4HDR 0x01000000
+#define FM10K_SRRCTL_PSRTYPE_IPV6HDR 0x02000000
+#define FM10K_SRRCTL_PSRTYPE_L2HDR 0x04000000
+#define FM10K_SRRCTL_LOOPBACK_SUPPRESS 0x40000000
+#define FM10K_SRRCTL_BUFFER_CHAINING_EN 0x80000000
+
+/* Rx Statistics */
+#define FM10K_QPRC(_n) ((0x40 * (_n)) + 0x400A)
+#define FM10K_QPRDC(_n) ((0x40 * (_n)) + 0x400B)
+#define FM10K_QBRC_L(_n) ((0x40 * (_n)) + 0x400C)
+#define FM10K_QBRC_H(_n) ((0x40 * (_n)) + 0x400D)
+
+/* Rx GLORT register */
+#define FM10K_RX_SGLORT(_n) ((0x40 * (_n)) + 0x400E)
+
+/* Tx ring registers */
+#define FM10K_TDBAL(_n) ((0x40 * (_n)) + 0x8000)
+#define FM10K_TDBAH(_n) ((0x40 * (_n)) + 0x8001)
+#define FM10K_TDLEN(_n) ((0x40 * (_n)) + 0x8002)
+#define FM10K_TDLEN_ITR_SCALE_SHIFT 9
+#define FM10K_TDLEN_ITR_SCALE_MASK 0x00000E00
+#define FM10K_TDLEN_ITR_SCALE_GEN1 4
+#define FM10K_TDLEN_ITR_SCALE_GEN2 2
+#define FM10K_TDLEN_ITR_SCALE_GEN3 1
+#define FM10K_TPH_TXCTRL(_n) ((0x40 * (_n)) + 0x8003)
+#define FM10K_TPH_TXCTRL_DESC_TPHEN 0x00000020
+#define FM10K_TPH_TXCTRL_DESC_RROEN 0x00000200
+#define FM10K_TPH_TXCTRL_DESC_WROEN 0x00000800
+#define FM10K_TPH_TXCTRL_DATA_RROEN 0x00002000
+#define FM10K_TDH(_n) ((0x40 * (_n)) + 0x8004)
+#define FM10K_TDT(_n) ((0x40 * (_n)) + 0x8005)
+#define FM10K_TXDCTL(_n) ((0x40 * (_n)) + 0x8006)
+#define FM10K_TXDCTL_ENABLE 0x00004000
+#define FM10K_TXDCTL_MAX_TIME_SHIFT 16
+#define FM10K_TXDCTL_PUSH_DESC 0x10000000
+#define FM10K_TXQCTL(_n) ((0x40 * (_n)) + 0x8007)
+#define FM10K_TXQCTL_PF 0x0000003F
+#define FM10K_TXQCTL_VF 0x00000040
+#define FM10K_TXQCTL_ID_MASK (FM10K_TXQCTL_PF | FM10K_TXQCTL_VF)
+#define FM10K_TXQCTL_PC_SHIFT 7
+#define FM10K_TXQCTL_PC_MASK 0x00000380
+#define FM10K_TXQCTL_TC_SHIFT 10
+#define FM10K_TXQCTL_TC_MASK 0x0000FC00
+#define FM10K_TXQCTL_VID_SHIFT 16
+#define FM10K_TXQCTL_VID_MASK 0x0FFF0000
+#define FM10K_TXQCTL_UNLIMITED_BW 0x10000000
+#define FM10K_TXQCTL_PUSHMODEDIS 0x20000000
+#define FM10K_TXINT(_n) ((0x40 * (_n)) + 0x8008)
+#define FM10K_TXINT_TIMER_SHIFT 8
+
+/* Tx Statistics */
+#define FM10K_QPTC(_n) ((0x40 * (_n)) + 0x8009)
+#define FM10K_QBTC_L(_n) ((0x40 * (_n)) + 0x800A)
+#define FM10K_QBTC_H(_n) ((0x40 * (_n)) + 0x800B)
+
+/* Tx Push registers */
+#define FM10K_TQDLOC(_n) ((0x40 * (_n)) + 0x800C)
+#define FM10K_TQDLOC_BASE_32_DESC 0x08
+#define FM10K_TQDLOC_BASE_64_DESC 0x10
+#define FM10K_TQDLOC_BASE_128_DESC 0x20
+#define FM10K_TQDLOC_SIZE_32_DESC 0x00050000
+#define FM10K_TQDLOC_SIZE_64_DESC 0x00060000
+#define FM10K_TQDLOC_SIZE_128_DESC 0x00070000
+#define FM10K_TQDLOC_SIZE_SHIFT 16
+#define FM10K_TX_DCACHE(_n, _m) ((0x400 * (_n)) + (0x4 * (_m)) + 0x40000)
+
+/* Tx GLORT registers */
+#define FM10K_TX_SGLORT(_n) ((0x40 * (_n)) + 0x800D)
+#define FM10K_PFVTCTL(_n) ((0x40 * (_n)) + 0x800E)
+#define FM10K_PFVTCTL_FTAG_DESC_ENABLE 0x00000001
+
+/* Interrupt moderation and control registers */
+#define FM10K_PBACL(_n) ((_n) + 0x10000)
+#define FM10K_INT_MAP(_n) ((_n) + 0x10080)
+#define FM10K_INT_MAP_TIMER0 0x00000000
+#define FM10K_INT_MAP_TIMER1 0x00000100
+#define FM10K_INT_MAP_IMMEDIATE 0x00000200
+#define FM10K_INT_MAP_DISABLE 0x00000300
+#define FM10K_MSIX_VECTOR_ADDR_LO(_n) ((0x4 * (_n)) + 0x11000)
+#define FM10K_MSIX_VECTOR_ADDR_HI(_n) ((0x4 * (_n)) + 0x11001)
+#define FM10K_MSIX_VECTOR_DATA(_n) ((0x4 * (_n)) + 0x11002)
+#define FM10K_MSIX_VECTOR_MASK(_n) ((0x4 * (_n)) + 0x11003)
+#define FM10K_INT_CTRL 0x12000
+#define FM10K_INT_CTRL_ENABLEMODERATOR 0x00000400
+#define FM10K_ITR(_n) ((_n) + 0x12400)
+#define FM10K_ITR_INTERVAL1_SHIFT 12
+#define FM10K_ITR_TIMER0_EXPIRED 0x01000000
+#define FM10K_ITR_TIMER1_EXPIRED 0x02000000
+#define FM10K_ITR_PENDING0 0x04000000
+#define FM10K_ITR_PENDING1 0x08000000
+#define FM10K_ITR_PENDING2 0x10000000
+#define FM10K_ITR_AUTOMASK 0x20000000
+#define FM10K_ITR_MASK_SET 0x40000000
+#define FM10K_ITR_MASK_CLEAR 0x80000000
+#define FM10K_ITR2(_n) ((0x2 * (_n)) + 0x12800)
+#define FM10K_ITR2_LP(_n) ((0x2 * (_n)) + 0x12801)
+#define FM10K_ITR_REG_COUNT 768
+#define FM10K_ITR_REG_COUNT_PF 256
+
+/* Switch manager interrupt registers */
+#define FM10K_IP 0x13000
+#define FM10K_IP_HOT_RESET 0x00000001
+#define FM10K_IP_DEVICE_STATE_CHANGE 0x00000002
+#define FM10K_IP_MAILBOX 0x00000004
+#define FM10K_IP_VPD_REQUEST 0x00000008
+#define FM10K_IP_SRAMERROR 0x00000010
+#define FM10K_IP_PFLR 0x00000020
+#define FM10K_IP_DATAPATHRESET 0x00000040
+#define FM10K_IP_OUTOFRESET 0x00000080
+#define FM10K_IP_NOTINRESET 0x00000100
+#define FM10K_IP_TIMEOUT 0x00000200
+#define FM10K_IP_VFLR 0x00000400
+#define FM10K_IM 0x13001
+#define FM10K_IB 0x13002
+#define FM10K_SRAM_IP 0x13003
+#define FM10K_SRAM_IM 0x13004
+
+/* VLAN registers */
+#define FM10K_VLAN_TABLE(_n, _m) ((0x80 * (_n)) + (_m) + 0x14000)
+#define FM10K_VLAN_TABLE_SIZE 128
+
+/* VLAN specific message offsets */
+#define FM10K_VLAN_TABLE_VID_MAX 4096
+#define FM10K_VLAN_TABLE_VSI_MAX 64
+#define FM10K_VLAN_LENGTH_SHIFT 16
+#define FM10K_VLAN_CLEAR (1 << 15)
+#define FM10K_VLAN_ALL \
+ ((FM10K_VLAN_TABLE_VID_MAX - 1) << FM10K_VLAN_LENGTH_SHIFT)
+
+/* VF FLR event notification registers */
+#define FM10K_PFVFLRE(_n) ((0x1 * (_n)) + 0x18844)
+#define FM10K_PFVFLREC(_n) ((0x1 * (_n)) + 0x18846)
+
+/* Defines for size of uncacheable and write-combining memories */
+#define FM10K_UC_ADDR_START 0x000000 /* start of standard regs */
+#define FM10K_WC_ADDR_START 0x100000 /* start of Tx Desc Cache */
+#define FM10K_DBI_ADDR_START 0x200000 /* start of debug registers */
+#define FM10K_UC_ADDR_SIZE (FM10K_WC_ADDR_START - FM10K_UC_ADDR_START)
+#define FM10K_WC_ADDR_SIZE (FM10K_DBI_ADDR_START - FM10K_WC_ADDR_START)
+
+/* Define timeouts for resets and disables */
+#define FM10K_QUEUE_DISABLE_TIMEOUT 100
+#define FM10K_RESET_TIMEOUT 150
+
+/* Maximum supported combined inner and outer header length for encapsulation */
+#define FM10K_TUNNEL_HEADER_LENGTH 184
+
+/* VF registers */
+#define FM10K_VFCTRL 0x00000
+#define FM10K_VFCTRL_RST 0x00000008
+#define FM10K_VFINT_MAP 0x00030
+#define FM10K_VFSYSTIME 0x00040
+#define FM10K_VFITR(_n) ((_n) + 0x00060)
+#define FM10K_VFPBACL(_n) ((_n) + 0x00008)
+
+/* Registers contained in BAR 4 for Switch management */
+#define FM10K_SW_SYSTIME_CFG 0x0224C
+#define FM10K_SW_SYSTIME_CFG_STEP_SHIFT 4
+#define FM10K_SW_SYSTIME_CFG_ADJUST_MASK 0xFF000000
+#define FM10K_SW_SYSTIME_ADJUST 0x0224D
+#define FM10K_SW_SYSTIME_ADJUST_MASK 0x3FFFFFFF
+#define FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE 0x80000000
+#define FM10K_SW_SYSTIME_PULSE(_n) ((_n) + 0x02252)
+
+#ifndef ETH_ALEN
+#define ETH_ALEN 6
+#endif /* ETH_ALEN */
+
+#ifndef FM10K_IS_ZERO_ETHER_ADDR
+/* make certain address is not 0 */
+#define FM10K_IS_ZERO_ETHER_ADDR(addr) \
+(!((addr)[0] | (addr)[1] | (addr)[2] | (addr)[3] | (addr)[4] | (addr)[5]))
+#endif
+
+#ifndef FM10K_IS_MULTICAST_ETHER_ADDR
+#define FM10K_IS_MULTICAST_ETHER_ADDR(addr) ((addr)[0] & 0x1)
+#endif
+
+#ifndef FM10K_IS_VALID_ETHER_ADDR
+/* make certain address is not multicast or 0 */
+#define FM10K_IS_VALID_ETHER_ADDR(addr) \
+(!FM10K_IS_MULTICAST_ETHER_ADDR(addr) && !FM10K_IS_ZERO_ETHER_ADDR(addr))
+#endif
+
+enum fm10k_int_source {
+ fm10k_int_Mailbox = 0,
+ fm10k_int_PCIeFault = 1,
+ fm10k_int_SwitchUpDown = 2,
+ fm10k_int_SwitchEvent = 3,
+ fm10k_int_SRAM = 4,
+ fm10k_int_VFLR = 5,
+ fm10k_int_MaxHoldTime = 6,
+ fm10k_int_sources_max_pf
+};
+
+/* PCIe bus speeds */
+enum fm10k_bus_speed {
+ fm10k_bus_speed_unknown = 0,
+ fm10k_bus_speed_2500 = 2500,
+ fm10k_bus_speed_5000 = 5000,
+ fm10k_bus_speed_8000 = 8000,
+ fm10k_bus_speed_reserved
+};
+
+/* PCIe bus widths */
+enum fm10k_bus_width {
+ fm10k_bus_width_unknown = 0,
+ fm10k_bus_width_pcie_x1 = 1,
+ fm10k_bus_width_pcie_x2 = 2,
+ fm10k_bus_width_pcie_x4 = 4,
+ fm10k_bus_width_pcie_x8 = 8,
+ fm10k_bus_width_reserved
+};
+
+/* PCIe payload sizes */
+enum fm10k_bus_payload {
+ fm10k_bus_payload_unknown = 0,
+ fm10k_bus_payload_128 = 1,
+ fm10k_bus_payload_256 = 2,
+ fm10k_bus_payload_512 = 3,
+ fm10k_bus_payload_reserved
+};
+
+/* Bus parameters */
+struct fm10k_bus_info {
+ enum fm10k_bus_speed speed;
+ enum fm10k_bus_width width;
+ enum fm10k_bus_payload payload;
+};
+
+/* Statistics related declarations */
+struct fm10k_hw_stat {
+ u64 count;
+ u32 base_l;
+ u32 base_h;
+};
+
+struct fm10k_hw_stats_q {
+ struct fm10k_hw_stat tx_bytes;
+ struct fm10k_hw_stat tx_packets;
+#define tx_stats_idx tx_packets.base_h
+ struct fm10k_hw_stat rx_bytes;
+ struct fm10k_hw_stat rx_packets;
+#define rx_stats_idx rx_packets.base_h
+ struct fm10k_hw_stat rx_drops;
+};
+
+struct fm10k_hw_stats {
+ struct fm10k_hw_stat timeout;
+#define stats_idx timeout.base_h
+ struct fm10k_hw_stat ur;
+ struct fm10k_hw_stat ca;
+ struct fm10k_hw_stat um;
+ struct fm10k_hw_stat xec;
+ struct fm10k_hw_stat vlan_drop;
+ struct fm10k_hw_stat loopback_drop;
+ struct fm10k_hw_stat nodesc_drop;
+ struct fm10k_hw_stats_q q[FM10K_MAX_QUEUES_PF];
+};
+
+/* Establish DGLORT feature priority */
+enum fm10k_dglortdec_idx {
+ fm10k_dglort_default = 0,
+ fm10k_dglort_vf_rsvd0 = 1,
+ fm10k_dglort_vf_rss = 2,
+ fm10k_dglort_pf_rsvd0 = 3,
+ fm10k_dglort_pf_queue = 4,
+ fm10k_dglort_pf_vsi = 5,
+ fm10k_dglort_pf_rsvd1 = 6,
+ fm10k_dglort_pf_rss = 7
+};
+
+struct fm10k_dglort_cfg {
+ u16 glort; /* GLORT base */
+ u16 queue_b; /* Base value for queue */
+ u8 vsi_b; /* Base value for VSI */
+ u8 idx; /* index of DGLORTDEC entry */
+ u8 rss_l; /* RSS indices */
+ u8 pc_l; /* Priority Class indices */
+ u8 vsi_l; /* Number of bits from GLORT used to determine VSI */
+ u8 queue_l; /* Number of bits from GLORT used to determine queue */
+ u8 shared_l; /* Ignored bits from GLORT resulting in shared VSI */
+ u8 inner_rss; /* Boolean value if inner header is used for RSS */
+};
+
+enum fm10k_pca_fault {
+ PCA_NO_FAULT,
+ PCA_UNMAPPED_ADDR,
+ PCA_BAD_QACCESS_PF,
+ PCA_BAD_QACCESS_VF,
+ PCA_MALICIOUS_REQ,
+ PCA_POISONED_TLP,
+ PCA_TLP_ABORT,
+ __PCA_MAX
+};
+
+enum fm10k_thi_fault {
+ THI_NO_FAULT,
+ THI_MAL_DIS_Q_FAULT,
+ __THI_MAX
+};
+
+enum fm10k_fum_fault {
+ FUM_NO_FAULT,
+ FUM_UNMAPPED_ADDR,
+ FUM_POISONED_TLP,
+ FUM_BAD_VF_QACCESS,
+ FUM_ADD_DECODE_ERR,
+ FUM_RO_ERROR,
+ FUM_QPRC_CRC_ERROR,
+ FUM_CSR_TIMEOUT,
+ FUM_INVALID_TYPE,
+ FUM_INVALID_LENGTH,
+ FUM_INVALID_BE,
+ FUM_INVALID_ALIGN,
+ __FUM_MAX
+};
+
+struct fm10k_fault {
+ u64 address; /* Address at the time fault was detected */
+ u32 specinfo; /* Extra info on this fault (fault dependent) */
+ u8 type; /* Fault value dependent on subunit */
+ u8 func; /* Function number of the fault */
+};
+
+struct fm10k_mac_ops {
+ /* basic bring-up and tear-down */
+ s32 (*reset_hw)(struct fm10k_hw *);
+ s32 (*init_hw)(struct fm10k_hw *);
+ s32 (*start_hw)(struct fm10k_hw *);
+ s32 (*stop_hw)(struct fm10k_hw *);
+ s32 (*get_bus_info)(struct fm10k_hw *);
+ s32 (*get_host_state)(struct fm10k_hw *, bool *);
+ bool (*is_slot_appropriate)(struct fm10k_hw *);
+ s32 (*update_vlan)(struct fm10k_hw *, u32, u8, bool);
+ s32 (*read_mac_addr)(struct fm10k_hw *);
+ s32 (*update_uc_addr)(struct fm10k_hw *, u16, const u8 *,
+ u16, bool, u8);
+ s32 (*update_mc_addr)(struct fm10k_hw *, u16, const u8 *, u16, bool);
+ s32 (*update_xcast_mode)(struct fm10k_hw *, u16, u8);
+ void (*update_int_moderator)(struct fm10k_hw *);
+ s32 (*update_lport_state)(struct fm10k_hw *, u16, u16, bool);
+ void (*update_hw_stats)(struct fm10k_hw *, struct fm10k_hw_stats *);
+ void (*rebind_hw_stats)(struct fm10k_hw *, struct fm10k_hw_stats *);
+ s32 (*configure_dglort_map)(struct fm10k_hw *,
+ struct fm10k_dglort_cfg *);
+ void (*set_dma_mask)(struct fm10k_hw *, u64);
+ s32 (*get_fault)(struct fm10k_hw *, int, struct fm10k_fault *);
+ void (*request_lport_map)(struct fm10k_hw *);
+ s32 (*adjust_systime)(struct fm10k_hw *, s32 ppb);
+ s32 (*notify_offset)(struct fm10k_hw *, u64 offset);
+ u64 (*read_systime)(struct fm10k_hw *);
+};
+
+enum fm10k_mac_type {
+ fm10k_mac_unknown = 0,
+ fm10k_mac_pf,
+ fm10k_mac_vf,
+ fm10k_num_macs
+};
+
+struct fm10k_mac_info {
+ struct fm10k_mac_ops ops;
+ enum fm10k_mac_type type;
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ u16 default_vid;
+ u16 max_msix_vectors;
+ u16 max_queues;
+ bool vlan_override;
+ bool get_host_state;
+ bool tx_ready;
+ u32 dglort_map;
+ u8 itr_scale;
+};
+
+struct fm10k_swapi_table_info {
+ u32 used;
+ u32 avail;
+};
+
+struct fm10k_swapi_info {
+ u32 status;
+ struct fm10k_swapi_table_info mac;
+ struct fm10k_swapi_table_info nexthop;
+ struct fm10k_swapi_table_info ffu;
+};
+
+enum fm10k_xcast_modes {
+ FM10K_XCAST_MODE_ALLMULTI = 0,
+ FM10K_XCAST_MODE_MULTI = 1,
+ FM10K_XCAST_MODE_PROMISC = 2,
+ FM10K_XCAST_MODE_NONE = 3,
+ FM10K_XCAST_MODE_DISABLE = 4
+};
+
+enum fm10k_timestamp_modes {
+ FM10K_TIMESTAMP_MODE_NONE = 0,
+ FM10K_TIMESTAMP_MODE_PEP_TO_PEP = 1,
+ FM10K_TIMESTAMP_MODE_PEP_TO_ANY = 2,
+};
+
+#define FM10K_VF_TC_MAX 100000 /* 100,000 Mb/s aka 100Gb/s */
+#define FM10K_VF_TC_MIN 1 /* 1 Mb/s is the slowest rate */
+
+struct fm10k_vf_info {
+ /* mbx must be first field in struct unless all default IOV message
+ * handlers are redone as the assumption is that vf_info starts
+ * at the same offset as the mailbox
+ */
+ struct fm10k_mbx_info mbx; /* PF side of VF mailbox */
+ int rate; /* Tx BW cap as defined by OS */
+ u16 glort; /* resource tag for this VF */
+ u16 sw_vid; /* Switch API assigned VLAN */
+ u16 pf_vid; /* PF assigned Default VLAN */
+ u8 mac[ETH_ALEN]; /* PF Default MAC address */
+ u8 vsi; /* VSI identifier */
+ u8 vf_idx; /* which VF this is */
+ u8 vf_flags; /* flags indicating what modes
+ * are supported for the port
+ */
+};
+
+#define FM10K_VF_FLAG_ALLMULTI_CAPABLE ((u8)1 << FM10K_XCAST_MODE_ALLMULTI)
+#define FM10K_VF_FLAG_MULTI_CAPABLE ((u8)1 << FM10K_XCAST_MODE_MULTI)
+#define FM10K_VF_FLAG_PROMISC_CAPABLE ((u8)1 << FM10K_XCAST_MODE_PROMISC)
+#define FM10K_VF_FLAG_NONE_CAPABLE ((u8)1 << FM10K_XCAST_MODE_NONE)
+#define FM10K_VF_FLAG_CAPABLE(vf_info) ((vf_info)->vf_flags & (u8)0xF)
+#define FM10K_VF_FLAG_ENABLED(vf_info) ((vf_info)->vf_flags >> 4)
+#define FM10K_VF_FLAG_SET_MODE(mode) ((u8)0x10 << (mode))
+#define FM10K_VF_FLAG_ENABLED_MODE_SHIFT 4
+#define FM10K_VF_FLAG_SET_MODE_MASK ((u8)0xF0)
+#define FM10K_VF_FLAG_SET_MODE_NONE \
+ FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_NONE)
+#define FM10K_VF_FLAG_MULTI_ENABLED \
+ (FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_ALLMULTI) | \
+ FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_MULTI) | \
+ FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_PROMISC))
+
+struct fm10k_iov_ops {
+ /* IOV related bring-up and tear-down */
+ s32 (*assign_resources)(struct fm10k_hw *, u16, u16);
+ s32 (*configure_tc)(struct fm10k_hw *, u16, int);
+ s32 (*assign_int_moderator)(struct fm10k_hw *, u16);
+ s32 (*assign_default_mac_vlan)(struct fm10k_hw *,
+ struct fm10k_vf_info *);
+ s32 (*reset_resources)(struct fm10k_hw *,
+ struct fm10k_vf_info *);
+ s32 (*set_lport)(struct fm10k_hw *, struct fm10k_vf_info *, u16, u8);
+ void (*reset_lport)(struct fm10k_hw *, struct fm10k_vf_info *);
+ void (*update_stats)(struct fm10k_hw *, struct fm10k_hw_stats_q *, u16);
+ void (*notify_offset)(struct fm10k_hw *, struct fm10k_vf_info*, u64);
+};
+
+struct fm10k_iov_info {
+ struct fm10k_iov_ops ops;
+ u16 total_vfs;
+ u16 num_vfs;
+ u16 num_pools;
+};
+
+struct fm10k_hw {
+ u32 *hw_addr;
+ u32 *sw_addr;
+ void *back;
+ struct fm10k_mac_info mac;
+ struct fm10k_bus_info bus;
+ struct fm10k_bus_info bus_caps;
+ struct fm10k_iov_info iov;
+ struct fm10k_mbx_info mbx;
+ struct fm10k_swapi_info swapi;
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ u32 flags;
+#define FM10K_HW_FLAG_CLOCK_OWNER (u32)(1 << 0)
+};
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define FM10K_REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define FM10K_REQ_RX_DESCRIPTOR_MULTIPLE 8
+
+/* Transmit Descriptor */
+struct fm10k_tx_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ __le16 buflen; /* Length of data to be DMAed */
+ __le16 vlan; /* VLAN_ID and VPRI to be inserted in FTAG */
+ __le16 mss; /* MSS for segmentation offload */
+ u8 hdrlen; /* Header size for segmentation offload */
+ u8 flags; /* Status and offload request flags */
+};
+
+/* Transmit Descriptor Cache Structure */
+struct fm10k_tx_desc_cache {
+ struct fm10k_tx_desc tx_desc[256];
+};
+
+#define FM10K_TXD_FLAG_INT 0x01
+#define FM10K_TXD_FLAG_TIME 0x02
+#define FM10K_TXD_FLAG_CSUM 0x04
+#define FM10K_TXD_FLAG_CSUM2 0x08
+#define FM10K_TXD_FLAG_FTAG 0x10
+#define FM10K_TXD_FLAG_RS 0x20
+#define FM10K_TXD_FLAG_LAST 0x40
+#define FM10K_TXD_FLAG_DONE 0x80
+
+#define FM10K_TXD_VLAN_PRI_SHIFT 12
+
+/* These macros are meant to enable optimal placement of the RS and INT
+ * bits. It will point us to the last descriptor in the cache for either the
+ * start of the packet, or the end of the packet. If the index is actually
+ * at the start of the FIFO it will point to the offset for the last index
+ * in the FIFO to prevent an unnecessary write.
+ */
+#define FM10K_TXD_WB_FIFO_SIZE 4
+#define FM10K_TXD_WB_IDX(idx) \
+ (((idx) - 1) | (FM10K_TXD_WB_FIFO_SIZE - 1))
+
+/* Receive Descriptor - 32B */
+union fm10k_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ __le64 reserved; /* Empty space, RSS hash */
+ __le64 timestamp;
+ } q; /* Read, Writeback, 64b quad-words */
+ struct {
+ __le32 data; /* RSS and header data */
+ __le32 rss; /* RSS Hash */
+ __le32 staterr;
+ __le32 vlan_len;
+ __le32 glort; /* sglort/dglort */
+ } d; /* Writeback, 32b double-words */
+ struct {
+ __le16 pkt_info; /* RSS, Pkt type */
+ __le16 hdr_info; /* Splithdr, hdrlen, xC */
+ __le16 rss_lower;
+ __le16 rss_upper;
+ __le16 status; /* status/error */
+ __le16 csum_err; /* checksum or extended error value */
+ __le16 length; /* Packet length */
+ __le16 vlan; /* VLAN tag */
+ __le16 dglort;
+ __le16 sglort;
+ } w; /* Writeback, 16b words */
+};
+
+#define FM10K_RXD_RSSTYPE_MASK 0x000F
+enum fm10k_rdesc_rss_type {
+ FM10K_RSSTYPE_NONE = 0x0,
+ FM10K_RSSTYPE_IPV4_TCP = 0x1,
+ FM10K_RSSTYPE_IPV4 = 0x2,
+ FM10K_RSSTYPE_IPV6_TCP = 0x3,
+ /* Reserved 0x4 */
+ FM10K_RSSTYPE_IPV6 = 0x5,
+ /* Reserved 0x6 */
+ FM10K_RSSTYPE_IPV4_UDP = 0x7,
+ FM10K_RSSTYPE_IPV6_UDP = 0x8
+ /* Reserved 0x9 - 0xF */
+};
+
+#define FM10K_RXD_PKTTYPE_MASK 0x03F0
+#define FM10K_RXD_PKTTYPE_MASK_L3 0x0070
+#define FM10K_RXD_PKTTYPE_MASK_L4 0x0380
+#define FM10K_RXD_PKTTYPE_SHIFT 4
+#define FM10K_RXD_PKTTYPE_INNER_MASK_L3 0x1C00
+#define FM10K_RXD_PKTTYPE_INNER_MASK_L4 0xE000
+#define FM10K_RXD_PKTTYPE_INNER_SHIFT 10
+enum fm10k_rdesc_pkt_type {
+ /* L3 type */
+ FM10K_PKTTYPE_OTHER = 0x00,
+ FM10K_PKTTYPE_IPV4 = 0x01,
+ FM10K_PKTTYPE_IPV4_EX = 0x02,
+ FM10K_PKTTYPE_IPV6 = 0x03,
+ FM10K_PKTTYPE_IPV6_EX = 0x04,
+
+ /* L4 type */
+ FM10K_PKTTYPE_TCP = 0x08,
+ FM10K_PKTTYPE_UDP = 0x10,
+ FM10K_PKTTYPE_GRE = 0x18,
+ FM10K_PKTTYPE_VXLAN = 0x20,
+ FM10K_PKTTYPE_NVGRE = 0x28,
+ FM10K_PKTTYPE_GENEVE = 0x30
+};
+
+#define FM10K_RXD_HDR_INFO_XC_MASK 0x0006
+enum fm10k_rxdesc_xc {
+ FM10K_XC_UNICAST = 0x0,
+ FM10K_XC_MULTICAST = 0x4,
+ FM10K_XC_BROADCAST = 0x6
+};
+
+#define FM10K_RXD_HDR_INFO_LEN_SHIFT 5
+#define FM10K_RXD_HDR_INFO_SPH 0x8000
+
+#define FM10K_RXD_STATUS_DD 0x0001 /* Descriptor done */
+#define FM10K_RXD_STATUS_EOP 0x0002 /* End of packet */
+#define FM10K_RXD_STATUS_VEXT 0x0004 /* A VLAN tag is present */
+#define FM10K_RXD_STATUS_IPCS 0x0008 /* Indicates IPv4 csum */
+#define FM10K_RXD_STATUS_L4CS 0x0010 /* Indicates an L4 csum */
+#define FM10K_RXD_STATUS_IPCS2 0x0020 /* Inner header IPv4 csum */
+#define FM10K_RXD_STATUS_L4CS2 0x0040 /* Inner header L4 csum */
+#define FM10K_RXD_STATUS_IPFRAG_MASK 0x0180 /* Fragment mask */
+#define FM10K_RXD_STATUS_IPFRAG_CSUM 0x0100 /* Fragment w/ CSUM field */
+#define FM10K_RXD_STATUS_VEXT2 0x0200 /* A custom tag is present */
+#define FM10K_RXD_STATUS_HBO 0x0400 /* header buffer overrun */
+#define FM10K_RXD_STATUS_L4E2 0x0800 /* Inner header L4 csum err */
+#define FM10K_RXD_STATUS_IPE2 0x1000 /* Inner header IPv4 csum err */
+#define FM10K_RXD_STATUS_RXE 0x2000 /* Generic Rx error */
+#define FM10K_RXD_STATUS_L4E 0x4000 /* L4 csum error */
+#define FM10K_RXD_STATUS_IPE 0x8000 /* IPv4 csum error */
+
+#define FM10K_RXD_ERR_SWITCH_ERROR 0x0001 /* Switch found bad packet */
+#define FM10K_RXD_ERR_NO_DESCRIPTOR 0x0002 /* No descriptor available */
+#define FM10K_RXD_ERR_PP_ERROR 0x0004 /* RAM error during processing */
+#define FM10K_RXD_ERR_SWITCH_READY 0x0008 /* Link transition mid-packet */
+#define FM10K_RXD_ERR_TOO_BIG 0x0010 /* Pkt too big for single buf */
+
+#define FM10K_RXD_VLAN_ID_MASK 0x0FFF
+#define FM10K_RXD_VLAN_PRI_SHIFT FM10K_TXD_VLAN_PRI_SHIFT
+
+struct fm10k_ftag {
+ __be16 swpri_type_user;
+ __be16 vlan;
+ __be16 sglort;
+ __be16 dglort;
+};
+
+#endif /* _FM10K_TYPE_H */
diff --git a/src/dpdk22/drivers/net/fm10k/base/fm10k_vf.c b/src/dpdk22/drivers/net/fm10k/base/fm10k_vf.c
new file mode 100644
index 00000000..a46b4886
--- /dev/null
+++ b/src/dpdk22/drivers/net/fm10k/base/fm10k_vf.c
@@ -0,0 +1,659 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "fm10k_vf.h"
+
+/**
+ * fm10k_stop_hw_vf - Stop Tx/Rx units
+ * @hw: pointer to hardware structure
+ *
+ **/
+STATIC s32 fm10k_stop_hw_vf(struct fm10k_hw *hw)
+{
+ u8 *perm_addr = hw->mac.perm_addr;
+ u32 bal = 0, bah = 0, tdlen;
+ s32 err;
+ u16 i;
+
+ DEBUGFUNC("fm10k_stop_hw_vf");
+
+ /* we need to disable the queues before taking further steps */
+ err = fm10k_stop_hw_generic(hw);
+ if (err)
+ return err;
+
+ /* If permanent address is set then we need to restore it */
+ if (FM10K_IS_VALID_ETHER_ADDR(perm_addr)) {
+ bal = (((u32)perm_addr[3]) << 24) |
+ (((u32)perm_addr[4]) << 16) |
+ (((u32)perm_addr[5]) << 8);
+ bah = (((u32)0xFF) << 24) |
+ (((u32)perm_addr[0]) << 16) |
+ (((u32)perm_addr[1]) << 8) |
+ ((u32)perm_addr[2]);
+ }
+
+ /* restore default itr_scale for next VF initialization */
+ tdlen = hw->mac.itr_scale << FM10K_TDLEN_ITR_SCALE_SHIFT;
+
+ /* The queues have already been disabled so we just need to
+ * update their base address registers
+ */
+ for (i = 0; i < hw->mac.max_queues; i++) {
+ FM10K_WRITE_REG(hw, FM10K_TDBAL(i), bal);
+ FM10K_WRITE_REG(hw, FM10K_TDBAH(i), bah);
+ FM10K_WRITE_REG(hw, FM10K_RDBAL(i), bal);
+ FM10K_WRITE_REG(hw, FM10K_RDBAH(i), bah);
+ FM10K_WRITE_REG(hw, FM10K_TDLEN(i), tdlen);
+ }
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_reset_hw_vf - VF hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * This function should return the hardware to a state similar to the
+ * one it is in after just being initialized.
+ **/
+STATIC s32 fm10k_reset_hw_vf(struct fm10k_hw *hw)
+{
+ s32 err;
+
+ DEBUGFUNC("fm10k_reset_hw_vf");
+
+ /* shut down queues we own and reset DMA configuration */
+ err = fm10k_stop_hw_vf(hw);
+ if (err)
+ return err;
+
+ /* Inititate VF reset */
+ FM10K_WRITE_REG(hw, FM10K_VFCTRL, FM10K_VFCTRL_RST);
+
+ /* Flush write and allow 100us for reset to complete */
+ FM10K_WRITE_FLUSH(hw);
+ usec_delay(FM10K_RESET_TIMEOUT);
+
+ /* Clear reset bit and verify it was cleared */
+ FM10K_WRITE_REG(hw, FM10K_VFCTRL, 0);
+ if (FM10K_READ_REG(hw, FM10K_VFCTRL) & FM10K_VFCTRL_RST)
+ err = FM10K_ERR_RESET_FAILED;
+
+ return err;
+}
+
+/**
+ * fm10k_init_hw_vf - VF hardware initialization
+ * @hw: pointer to hardware structure
+ *
+ **/
+STATIC s32 fm10k_init_hw_vf(struct fm10k_hw *hw)
+{
+ u32 tqdloc, tqdloc0 = ~FM10K_READ_REG(hw, FM10K_TQDLOC(0));
+ s32 err;
+ u16 i;
+
+ DEBUGFUNC("fm10k_init_hw_vf");
+
+ /* verify we have at least 1 queue */
+ if (!~FM10K_READ_REG(hw, FM10K_TXQCTL(0)) ||
+ !~FM10K_READ_REG(hw, FM10K_RXQCTL(0)))
+ return FM10K_ERR_NO_RESOURCES;
+
+ /* determine how many queues we have */
+ for (i = 1; tqdloc0 && (i < FM10K_MAX_QUEUES_POOL); i++) {
+ /* verify the Descriptor cache offsets are increasing */
+ tqdloc = ~FM10K_READ_REG(hw, FM10K_TQDLOC(i));
+ if (!tqdloc || (tqdloc == tqdloc0))
+ break;
+
+ /* check to verify the PF doesn't own any of our queues */
+ if (!~FM10K_READ_REG(hw, FM10K_TXQCTL(i)) ||
+ !~FM10K_READ_REG(hw, FM10K_RXQCTL(i)))
+ break;
+ }
+
+ /* shut down queues we own and reset DMA configuration */
+ err = fm10k_disable_queues_generic(hw, i);
+ if (err)
+ return err;
+
+ /* record maximum queue count */
+ hw->mac.max_queues = i;
+
+ /* fetch default VLAN and ITR scale */
+ hw->mac.default_vid = (FM10K_READ_REG(hw, FM10K_TXQCTL(0)) &
+ FM10K_TXQCTL_VID_MASK) >> FM10K_TXQCTL_VID_SHIFT;
+ hw->mac.itr_scale = (FM10K_READ_REG(hw, FM10K_TDLEN(0)) &
+ FM10K_TDLEN_ITR_SCALE_MASK) >>
+ FM10K_TDLEN_ITR_SCALE_SHIFT;
+
+ /* ensure a non-zero itr scale */
+ if (!hw->mac.itr_scale)
+ hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN3;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_is_slot_appropriate_vf - Indicate appropriate slot for this SKU
+ * @hw: pointer to hardware structure
+ *
+ * Looks at the PCIe bus info to confirm whether or not this slot can support
+ * the necessary bandwidth for this device. Since the VF has no control over
+ * the "slot" it is in, always indicate that the slot is appropriate.
+ **/
+STATIC bool fm10k_is_slot_appropriate_vf(struct fm10k_hw *hw)
+{
+ UNREFERENCED_1PARAMETER(hw);
+ DEBUGFUNC("fm10k_is_slot_appropriate_vf");
+
+ return TRUE;
+}
+
+/* This structure defines the attibutes to be parsed below */
+const struct fm10k_tlv_attr fm10k_mac_vlan_msg_attr[] = {
+ FM10K_TLV_ATTR_U32(FM10K_MAC_VLAN_MSG_VLAN),
+ FM10K_TLV_ATTR_BOOL(FM10K_MAC_VLAN_MSG_SET),
+ FM10K_TLV_ATTR_MAC_ADDR(FM10K_MAC_VLAN_MSG_MAC),
+ FM10K_TLV_ATTR_MAC_ADDR(FM10K_MAC_VLAN_MSG_DEFAULT_MAC),
+ FM10K_TLV_ATTR_MAC_ADDR(FM10K_MAC_VLAN_MSG_MULTICAST),
+ FM10K_TLV_ATTR_LAST
+};
+
+/**
+ * fm10k_update_vlan_vf - Update status of VLAN ID in VLAN filter table
+ * @hw: pointer to hardware structure
+ * @vid: VLAN ID to add to table
+ * @vsi: Reserved, should always be 0
+ * @set: Indicates if this is a set or clear operation
+ *
+ * This function adds or removes the corresponding VLAN ID from the VLAN
+ * filter table for this VF.
+ **/
+STATIC s32 fm10k_update_vlan_vf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ u32 msg[4];
+
+ /* verify the index is not set */
+ if (vsi)
+ return FM10K_ERR_PARAM;
+
+ /* verify upper 4 bits of vid and length are 0 */
+ if ((vid << 16 | vid) >> 28)
+ return FM10K_ERR_PARAM;
+
+ /* encode set bit into the VLAN ID */
+ if (!set)
+ vid |= FM10K_VLAN_CLEAR;
+
+ /* generate VLAN request */
+ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN);
+ fm10k_tlv_attr_put_u32(msg, FM10K_MAC_VLAN_MSG_VLAN, vid);
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+/**
+ * fm10k_msg_mac_vlan_vf - Read device MAC address from mailbox message
+ * @hw: pointer to the HW structure
+ * @results: Attributes for message
+ * @mbx: unused mailbox data
+ *
+ * This function should determine the MAC address for the VF
+ **/
+s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ u8 perm_addr[ETH_ALEN];
+ u16 vid;
+ s32 err;
+
+ UNREFERENCED_1PARAMETER(mbx);
+ DEBUGFUNC("fm10k_msg_mac_vlan_vf");
+
+ /* record MAC address requested */
+ err = fm10k_tlv_attr_get_mac_vlan(
+ results[FM10K_MAC_VLAN_MSG_DEFAULT_MAC],
+ perm_addr, &vid);
+ if (err)
+ return err;
+
+ memcpy(hw->mac.perm_addr, perm_addr, ETH_ALEN);
+ hw->mac.default_vid = vid & (FM10K_VLAN_TABLE_VID_MAX - 1);
+ hw->mac.vlan_override = !!(vid & FM10K_VLAN_CLEAR);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_read_mac_addr_vf - Read device MAC address
+ * @hw: pointer to the HW structure
+ *
+ * This function should determine the MAC address for the VF
+ **/
+STATIC s32 fm10k_read_mac_addr_vf(struct fm10k_hw *hw)
+{
+ u8 perm_addr[ETH_ALEN];
+ u32 base_addr;
+
+ DEBUGFUNC("fm10k_read_mac_addr_vf");
+
+ base_addr = FM10K_READ_REG(hw, FM10K_TDBAL(0));
+
+ /* last byte should be 0 */
+ if (base_addr << 24)
+ return FM10K_ERR_INVALID_MAC_ADDR;
+
+ perm_addr[3] = (u8)(base_addr >> 24);
+ perm_addr[4] = (u8)(base_addr >> 16);
+ perm_addr[5] = (u8)(base_addr >> 8);
+
+ base_addr = FM10K_READ_REG(hw, FM10K_TDBAH(0));
+
+ /* first byte should be all 1's */
+ if ((~base_addr) >> 24)
+ return FM10K_ERR_INVALID_MAC_ADDR;
+
+ perm_addr[0] = (u8)(base_addr >> 16);
+ perm_addr[1] = (u8)(base_addr >> 8);
+ perm_addr[2] = (u8)(base_addr);
+
+ memcpy(hw->mac.perm_addr, perm_addr, ETH_ALEN);
+ memcpy(hw->mac.addr, perm_addr, ETH_ALEN);
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_update_uc_addr_vf - Update device unicast addresses
+ * @hw: pointer to the HW structure
+ * @glort: unused
+ * @mac: MAC address to add/remove from table
+ * @vid: VLAN ID to add/remove from table
+ * @add: Indicates if this is an add or remove operation
+ * @flags: flags field to indicate add and secure - unused
+ *
+ * This function is used to add or remove unicast MAC addresses for
+ * the VF.
+ **/
+STATIC s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort,
+ const u8 *mac, u16 vid, bool add, u8 flags)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ u32 msg[7];
+
+ DEBUGFUNC("fm10k_update_uc_addr_vf");
+
+ UNREFERENCED_2PARAMETER(glort, flags);
+
+ /* verify VLAN ID is valid */
+ if (vid >= FM10K_VLAN_TABLE_VID_MAX)
+ return FM10K_ERR_PARAM;
+
+ /* verify MAC address is valid */
+ if (!FM10K_IS_VALID_ETHER_ADDR(mac))
+ return FM10K_ERR_PARAM;
+
+ /* verify we are not locked down on the MAC address */
+ if (FM10K_IS_VALID_ETHER_ADDR(hw->mac.perm_addr) &&
+ memcmp(hw->mac.perm_addr, mac, ETH_ALEN))
+ return FM10K_ERR_PARAM;
+
+ /* add bit to notify us if this is a set or clear operation */
+ if (!add)
+ vid |= FM10K_VLAN_CLEAR;
+
+ /* generate VLAN request */
+ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN);
+ fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_MAC, mac, vid);
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+/**
+ * fm10k_update_mc_addr_vf - Update device multicast addresses
+ * @hw: pointer to the HW structure
+ * @glort: unused
+ * @mac: MAC address to add/remove from table
+ * @vid: VLAN ID to add/remove from table
+ * @add: Indicates if this is an add or remove operation
+ *
+ * This function is used to add or remove multicast MAC addresses for
+ * the VF.
+ **/
+STATIC s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw, u16 glort,
+ const u8 *mac, u16 vid, bool add)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ u32 msg[7];
+
+ DEBUGFUNC("fm10k_update_uc_addr_vf");
+
+ UNREFERENCED_1PARAMETER(glort);
+
+ /* verify VLAN ID is valid */
+ if (vid >= FM10K_VLAN_TABLE_VID_MAX)
+ return FM10K_ERR_PARAM;
+
+ /* verify multicast address is valid */
+ if (!FM10K_IS_MULTICAST_ETHER_ADDR(mac))
+ return FM10K_ERR_PARAM;
+
+ /* add bit to notify us if this is a set or clear operation */
+ if (!add)
+ vid |= FM10K_VLAN_CLEAR;
+
+ /* generate VLAN request */
+ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN);
+ fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_MULTICAST,
+ mac, vid);
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+/**
+ * fm10k_update_int_moderator_vf - Request update of interrupt moderator list
+ * @hw: pointer to hardware structure
+ *
+ * This function will issue a request to the PF to rescan our MSI-X table
+ * and to update the interrupt moderator linked list.
+ **/
+STATIC void fm10k_update_int_moderator_vf(struct fm10k_hw *hw)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ u32 msg[1];
+
+ /* generate MSI-X request */
+ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MSIX);
+
+ /* load onto outgoing mailbox */
+ mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+/* This structure defines the attibutes to be parsed below */
+const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[] = {
+ FM10K_TLV_ATTR_BOOL(FM10K_LPORT_STATE_MSG_DISABLE),
+ FM10K_TLV_ATTR_U8(FM10K_LPORT_STATE_MSG_XCAST_MODE),
+ FM10K_TLV_ATTR_BOOL(FM10K_LPORT_STATE_MSG_READY),
+ FM10K_TLV_ATTR_LAST
+};
+
+extern const struct fm10k_tlv_attr fm10k_1588_msg_attr[];
+
+/**
+ * fm10k_msg_lport_state_vf - Message handler for lport_state message from PF
+ * @hw: Pointer to hardware structure
+ * @results: pointer array containing parsed data
+ * @mbx: Pointer to mailbox information structure
+ *
+ * This handler is meant to capture the indication from the PF that we
+ * are ready to bring up the interface.
+ **/
+s32 fm10k_msg_lport_state_vf(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ UNREFERENCED_1PARAMETER(mbx);
+ DEBUGFUNC("fm10k_msg_lport_state_vf");
+
+ hw->mac.dglort_map = !results[FM10K_LPORT_STATE_MSG_READY] ?
+ FM10K_DGLORTMAP_NONE : FM10K_DGLORTMAP_ZERO;
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_update_lport_state_vf - Update device state in lower device
+ * @hw: pointer to the HW structure
+ * @glort: unused
+ * @count: number of logical ports to enable - unused (always 1)
+ * @enable: boolean value indicating if this is an enable or disable request
+ *
+ * Notify the lower device of a state change. If the lower device is
+ * enabled we can add filters, if it is disabled all filters for this
+ * logical port are flushed.
+ **/
+STATIC s32 fm10k_update_lport_state_vf(struct fm10k_hw *hw, u16 glort,
+ u16 count, bool enable)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ u32 msg[2];
+
+ UNREFERENCED_2PARAMETER(glort, count);
+ DEBUGFUNC("fm10k_update_lport_state_vf");
+
+ /* reset glort mask 0 as we have to wait to be enabled */
+ hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
+
+ /* generate port state request */
+ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
+ if (!enable)
+ fm10k_tlv_attr_put_bool(msg, FM10K_LPORT_STATE_MSG_DISABLE);
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+/**
+ * fm10k_update_xcast_mode_vf - Request update of multicast mode
+ * @hw: pointer to hardware structure
+ * @glort: unused
+ * @mode: integer value indicating mode being requested
+ *
+ * This function will attempt to request a higher mode for the port
+ * so that it can enable either multicast, multicast promiscuous, or
+ * promiscuous mode of operation.
+ **/
+STATIC s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw, u16 glort, u8 mode)
+{
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ u32 msg[3];
+
+ UNREFERENCED_1PARAMETER(glort);
+ DEBUGFUNC("fm10k_update_xcast_mode_vf");
+
+ if (mode > FM10K_XCAST_MODE_NONE)
+ return FM10K_ERR_PARAM;
+
+ /* generate message requesting to change xcast mode */
+ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
+ fm10k_tlv_attr_put_u8(msg, FM10K_LPORT_STATE_MSG_XCAST_MODE, mode);
+
+ /* load onto outgoing mailbox */
+ return mbx->ops.enqueue_tx(hw, mbx, msg);
+}
+
+const struct fm10k_tlv_attr fm10k_1588_msg_attr[] = {
+ FM10K_TLV_ATTR_U64(FM10K_1588_MSG_CLK_OFFSET),
+ FM10K_TLV_ATTR_LAST
+};
+
+/* currently there is no shared 1588 message handler */
+
+/**
+ * fm10k_update_hw_stats_vf - Updates hardware related statistics of VF
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ *
+ * This function collects and aggregates per queue hardware statistics.
+ **/
+STATIC void fm10k_update_hw_stats_vf(struct fm10k_hw *hw,
+ struct fm10k_hw_stats *stats)
+{
+ DEBUGFUNC("fm10k_update_hw_stats_vf");
+
+ fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues);
+}
+
+/**
+ * fm10k_rebind_hw_stats_vf - Resets base for hardware statistics of VF
+ * @hw: pointer to hardware structure
+ * @stats: pointer to the stats structure to update
+ *
+ * This function resets the base for queue hardware statistics.
+ **/
+STATIC void fm10k_rebind_hw_stats_vf(struct fm10k_hw *hw,
+ struct fm10k_hw_stats *stats)
+{
+ DEBUGFUNC("fm10k_rebind_hw_stats_vf");
+
+ /* Unbind Queue Statistics */
+ fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues);
+
+ /* Reinitialize bases for all stats */
+ fm10k_update_hw_stats_vf(hw, stats);
+}
+
+/**
+ * fm10k_configure_dglort_map_vf - Configures GLORT entry and queues
+ * @hw: pointer to hardware structure
+ * @dglort: pointer to dglort configuration structure
+ *
+ * Reads the configuration structure contained in dglort_cfg and uses
+ * that information to then populate a DGLORTMAP/DEC entry and the queues
+ * to which it has been assigned.
+ **/
+STATIC s32 fm10k_configure_dglort_map_vf(struct fm10k_hw *hw,
+ struct fm10k_dglort_cfg *dglort)
+{
+ UNREFERENCED_1PARAMETER(hw);
+ DEBUGFUNC("fm10k_configure_dglort_map_vf");
+
+ /* verify the dglort pointer */
+ if (!dglort)
+ return FM10K_ERR_PARAM;
+
+ /* stub for now until we determine correct message for this */
+
+ return FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_adjust_systime_vf - Adjust systime frequency
+ * @hw: pointer to hardware structure
+ * @ppb: adjustment rate in parts per billion
+ *
+ * This function takes an adjustment rate in parts per billion and will
+ * verify that this value is 0 as the VF cannot support adjusting the
+ * systime clock.
+ *
+ * If the ppb value is non-zero the return is ERR_PARAM else success
+ **/
+STATIC s32 fm10k_adjust_systime_vf(struct fm10k_hw *hw, s32 ppb)
+{
+ UNREFERENCED_1PARAMETER(hw);
+ DEBUGFUNC("fm10k_adjust_systime_vf");
+
+ /* The VF cannot adjust the clock frequency, however it should
+ * already have a syntonic clock with whichever host interface is
+ * running as the master for the host interface clock domain so
+ * there should be not frequency adjustment necessary.
+ */
+ return ppb ? FM10K_ERR_PARAM : FM10K_SUCCESS;
+}
+
+/**
+ * fm10k_read_systime_vf - Reads value of systime registers
+ * @hw: pointer to the hardware structure
+ *
+ * Function reads the content of 2 registers, combined to represent a 64 bit
+ * value measured in nanoseconds. In order to guarantee the value is accurate
+ * we check the 32 most significant bits both before and after reading the
+ * 32 least significant bits to verify they didn't change as we were reading
+ * the registers.
+ **/
+static u64 fm10k_read_systime_vf(struct fm10k_hw *hw)
+{
+ u32 systime_l, systime_h, systime_tmp;
+
+ systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1);
+
+ do {
+ systime_tmp = systime_h;
+ systime_l = fm10k_read_reg(hw, FM10K_VFSYSTIME);
+ systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1);
+ } while (systime_tmp != systime_h);
+
+ return ((u64)systime_h << 32) | systime_l;
+}
+
+static const struct fm10k_msg_data fm10k_msg_data_vf[] = {
+ FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
+ FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
+ FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
+ FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
+};
+
+/**
+ * fm10k_init_ops_vf - Inits func ptrs and MAC type
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the function pointers and assign the MAC type for VF.
+ * Does not touch the hardware.
+ **/
+s32 fm10k_init_ops_vf(struct fm10k_hw *hw)
+{
+ struct fm10k_mac_info *mac = &hw->mac;
+
+ DEBUGFUNC("fm10k_init_ops_vf");
+
+ fm10k_init_ops_generic(hw);
+
+ mac->ops.reset_hw = &fm10k_reset_hw_vf;
+ mac->ops.init_hw = &fm10k_init_hw_vf;
+ mac->ops.start_hw = &fm10k_start_hw_generic;
+ mac->ops.stop_hw = &fm10k_stop_hw_vf;
+ mac->ops.is_slot_appropriate = &fm10k_is_slot_appropriate_vf;
+ mac->ops.update_vlan = &fm10k_update_vlan_vf;
+ mac->ops.read_mac_addr = &fm10k_read_mac_addr_vf;
+ mac->ops.update_uc_addr = &fm10k_update_uc_addr_vf;
+ mac->ops.update_mc_addr = &fm10k_update_mc_addr_vf;
+ mac->ops.update_xcast_mode = &fm10k_update_xcast_mode_vf;
+ mac->ops.update_int_moderator = &fm10k_update_int_moderator_vf;
+ mac->ops.update_lport_state = &fm10k_update_lport_state_vf;
+ mac->ops.update_hw_stats = &fm10k_update_hw_stats_vf;
+ mac->ops.rebind_hw_stats = &fm10k_rebind_hw_stats_vf;
+ mac->ops.configure_dglort_map = &fm10k_configure_dglort_map_vf;
+ mac->ops.get_host_state = &fm10k_get_host_state_generic;
+ mac->ops.adjust_systime = &fm10k_adjust_systime_vf;
+ mac->ops.read_systime = &fm10k_read_systime_vf;
+
+ mac->max_msix_vectors = fm10k_get_pcie_msix_count_generic(hw);
+
+ return fm10k_pfvf_mbx_init(hw, &hw->mbx, fm10k_msg_data_vf, 0);
+}
diff --git a/src/dpdk22/drivers/net/fm10k/base/fm10k_vf.h b/src/dpdk22/drivers/net/fm10k/base/fm10k_vf.h
new file mode 100644
index 00000000..116c56fc
--- /dev/null
+++ b/src/dpdk22/drivers/net/fm10k/base/fm10k_vf.h
@@ -0,0 +1,92 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _FM10K_VF_H_
+#define _FM10K_VF_H_
+
+#include "fm10k_type.h"
+#include "fm10k_common.h"
+
+enum fm10k_vf_tlv_msg_id {
+ FM10K_VF_MSG_ID_TEST = 0, /* msg ID reserved for testing */
+ FM10K_VF_MSG_ID_MSIX,
+ FM10K_VF_MSG_ID_MAC_VLAN,
+ FM10K_VF_MSG_ID_LPORT_STATE,
+ FM10K_VF_MSG_ID_1588,
+ FM10K_VF_MSG_ID_MAX,
+};
+
+enum fm10k_tlv_mac_vlan_attr_id {
+ FM10K_MAC_VLAN_MSG_VLAN,
+ FM10K_MAC_VLAN_MSG_SET,
+ FM10K_MAC_VLAN_MSG_MAC,
+ FM10K_MAC_VLAN_MSG_DEFAULT_MAC,
+ FM10K_MAC_VLAN_MSG_MULTICAST,
+ FM10K_MAC_VLAN_MSG_ID_MAX
+};
+
+enum fm10k_tlv_lport_state_attr_id {
+ FM10K_LPORT_STATE_MSG_DISABLE,
+ FM10K_LPORT_STATE_MSG_XCAST_MODE,
+ FM10K_LPORT_STATE_MSG_READY,
+ FM10K_LPORT_STATE_MSG_MAX
+};
+
+enum fm10k_tlv_1588_attr_id {
+ FM10K_1588_MSG_TIMESTAMP = 0, /* deprecated */
+ FM10K_1588_MSG_CLK_OFFSET,
+ FM10K_1588_MSG_MAX
+};
+
+#define FM10K_VF_MSG_MSIX_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_MSIX, NULL, func)
+
+s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
+extern const struct fm10k_tlv_attr fm10k_mac_vlan_msg_attr[];
+#define FM10K_VF_MSG_MAC_VLAN_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_MAC_VLAN, \
+ fm10k_mac_vlan_msg_attr, func)
+
+s32 fm10k_msg_lport_state_vf(struct fm10k_hw *, u32 **,
+ struct fm10k_mbx_info *);
+extern const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[];
+#define FM10K_VF_MSG_LPORT_STATE_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_LPORT_STATE, \
+ fm10k_lport_state_msg_attr, func)
+
+extern const struct fm10k_tlv_attr fm10k_1588_msg_attr[];
+#define FM10K_VF_MSG_1588_HANDLER(func) \
+ FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_1588, fm10k_1588_msg_attr, func)
+
+s32 fm10k_init_ops_vf(struct fm10k_hw *hw);
+#endif /* _FM10K_VF_H */
diff --git a/src/dpdk22/drivers/net/fm10k/fm10k.h b/src/dpdk22/drivers/net/fm10k/fm10k.h
new file mode 100644
index 00000000..cd38af2d
--- /dev/null
+++ b/src/dpdk22/drivers/net/fm10k/fm10k.h
@@ -0,0 +1,362 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _FM10K_H_
+#define _FM10K_H_
+
+#include <stdint.h>
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_spinlock.h>
+#include "fm10k_logs.h"
+#include "base/fm10k_type.h"
+
+/* descriptor ring base addresses must be aligned to the following */
+#define FM10K_ALIGN_RX_DESC 128
+#define FM10K_ALIGN_TX_DESC 128
+
+/* The maximum packet size that FM10K supports */
+#define FM10K_MAX_PKT_SIZE (15 * 1024)
+
+/* Minimum size of RX buffer FM10K supported */
+#define FM10K_MIN_RX_BUF_SIZE 256
+
+/* The maximum of SRIOV VFs per port supported */
+#define FM10K_MAX_VF_NUM 64
+
+/* number of descriptors must be a multiple of the following */
+#define FM10K_MULT_RX_DESC FM10K_REQ_RX_DESCRIPTOR_MULTIPLE
+#define FM10K_MULT_TX_DESC FM10K_REQ_TX_DESCRIPTOR_MULTIPLE
+
+/* maximum size of descriptor rings */
+#define FM10K_MAX_RX_RING_SZ (512 * 1024)
+#define FM10K_MAX_TX_RING_SZ (512 * 1024)
+
+/* minimum and maximum number of descriptors in a ring */
+#define FM10K_MIN_RX_DESC 32
+#define FM10K_MIN_TX_DESC 32
+#define FM10K_MAX_RX_DESC (FM10K_MAX_RX_RING_SZ / sizeof(union fm10k_rx_desc))
+#define FM10K_MAX_TX_DESC (FM10K_MAX_TX_RING_SZ / sizeof(struct fm10k_tx_desc))
+
+/*
+ * byte aligment for HW RX data buffer
+ * Datasheet requires RX buffer addresses shall either be 512-byte aligned or
+ * be 8-byte aligned but without crossing host memory pages (4KB alignment
+ * boundaries). Satisfy first option.
+ */
+#define FM10K_RX_DATABUF_ALIGN 512
+
+/*
+ * threshold default, min, max, and divisor constraints
+ * the configured values must satisfy the following:
+ * MIN <= value <= MAX
+ * DIV % value == 0
+ */
+#define FM10K_RX_FREE_THRESH_DEFAULT(rxq) 32
+#define FM10K_RX_FREE_THRESH_MIN(rxq) 1
+#define FM10K_RX_FREE_THRESH_MAX(rxq) ((rxq)->nb_desc - 1)
+#define FM10K_RX_FREE_THRESH_DIV(rxq) ((rxq)->nb_desc)
+
+#define FM10K_TX_FREE_THRESH_DEFAULT(txq) 32
+#define FM10K_TX_FREE_THRESH_MIN(txq) 1
+#define FM10K_TX_FREE_THRESH_MAX(txq) ((txq)->nb_desc - 3)
+#define FM10K_TX_FREE_THRESH_DIV(txq) 0
+
+#define FM10K_DEFAULT_RX_PTHRESH 8
+#define FM10K_DEFAULT_RX_HTHRESH 8
+#define FM10K_DEFAULT_RX_WTHRESH 0
+
+#define FM10K_DEFAULT_TX_PTHRESH 32
+#define FM10K_DEFAULT_TX_HTHRESH 0
+#define FM10K_DEFAULT_TX_WTHRESH 0
+
+#define FM10K_TX_RS_THRESH_DEFAULT(txq) 32
+#define FM10K_TX_RS_THRESH_MIN(txq) 1
+#define FM10K_TX_RS_THRESH_MAX(txq) \
+ RTE_MIN(((txq)->nb_desc - 2), (txq)->free_thresh)
+#define FM10K_TX_RS_THRESH_DIV(txq) ((txq)->nb_desc)
+
+#define FM10K_VLAN_TAG_SIZE 4
+
+/* Maximum number of MAC addresses per PF/VF */
+#define FM10K_MAX_MACADDR_NUM 64
+
+#define FM10K_UINT32_BIT_SIZE (CHAR_BIT * sizeof(uint32_t))
+#define FM10K_VFTA_SIZE (4096 / FM10K_UINT32_BIT_SIZE)
+
+/* vlan_id is a 12 bit number.
+ * The VFTA array is actually a 4096 bit array, 128 of 32bit elements.
+ * 2^5 = 32. The val of lower 5 bits specifies the bit in the 32bit element.
+ * The higher 7 bit val specifies VFTA array index.
+ */
+#define FM10K_VFTA_BIT(vlan_id) (1 << ((vlan_id) & 0x1F))
+#define FM10K_VFTA_IDX(vlan_id) ((vlan_id) >> 5)
+
+#define RTE_FM10K_RXQ_REARM_THRESH 32
+#define RTE_FM10K_VPMD_TX_BURST 32
+#define RTE_FM10K_MAX_RX_BURST RTE_FM10K_RXQ_REARM_THRESH
+#define RTE_FM10K_TX_MAX_FREE_BUF_SZ 64
+#define RTE_FM10K_DESCS_PER_LOOP 4
+
+#define FM10K_SIMPLE_TX_FLAG ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
+ ETH_TXQ_FLAGS_NOOFFLOADS)
+
+struct fm10k_macvlan_filter_info {
+ uint16_t vlan_num; /* Total VLAN number */
+ uint16_t mac_num; /* Total mac number */
+ uint16_t nb_queue_pools; /* Active queue pools number */
+ /* VMDQ ID for each MAC address */
+ uint8_t mac_vmdq_id[FM10K_MAX_MACADDR_NUM];
+ uint32_t vfta[FM10K_VFTA_SIZE]; /* VLAN bitmap */
+};
+
+struct fm10k_dev_info {
+ volatile uint32_t enable;
+ volatile uint32_t glort;
+ /* Protect the mailbox to avoid race condition */
+ rte_spinlock_t mbx_lock;
+ struct fm10k_macvlan_filter_info macvlan;
+ /* Flag to indicate if RX vector conditions satisfied */
+ bool rx_vec_allowed;
+};
+
+/*
+ * Structure to store private data for each driver instance.
+ */
+struct fm10k_adapter {
+ struct fm10k_hw hw;
+ struct fm10k_hw_stats stats;
+ struct fm10k_dev_info info;
+};
+
+#define FM10K_DEV_PRIVATE_TO_HW(adapter) \
+ (&((struct fm10k_adapter *)adapter)->hw)
+
+#define FM10K_DEV_PRIVATE_TO_STATS(adapter) \
+ (&((struct fm10k_adapter *)adapter)->stats)
+
+#define FM10K_DEV_PRIVATE_TO_INFO(adapter) \
+ (&((struct fm10k_adapter *)adapter)->info)
+
+#define FM10K_DEV_PRIVATE_TO_MBXLOCK(adapter) \
+ (&(((struct fm10k_adapter *)adapter)->info.mbx_lock))
+
+#define FM10K_DEV_PRIVATE_TO_MACVLAN(adapter) \
+ (&(((struct fm10k_adapter *)adapter)->info.macvlan))
+
+struct fm10k_rx_queue {
+ struct rte_mempool *mp;
+ struct rte_mbuf **sw_ring;
+ volatile union fm10k_rx_desc *hw_ring;
+ struct rte_mbuf *pkt_first_seg; /* First segment of current packet. */
+ struct rte_mbuf *pkt_last_seg; /* Last segment of current packet. */
+ uint64_t hw_ring_phys_addr;
+ uint64_t mbuf_initializer; /* value to init mbufs */
+ /* need to alloc dummy mbuf, for wraparound when scanning hw ring */
+ struct rte_mbuf fake_mbuf;
+ uint16_t next_dd;
+ uint16_t next_alloc;
+ uint16_t next_trigger;
+ uint16_t alloc_thresh;
+ volatile uint32_t *tail_ptr;
+ uint16_t nb_desc;
+ /* Number of faked desc added at the tail for Vector RX function */
+ uint16_t nb_fake_desc;
+ uint16_t queue_id;
+ /* Below 2 fields only valid in case vPMD is applied. */
+ uint16_t rxrearm_nb; /* number of remaining to be re-armed */
+ uint16_t rxrearm_start; /* the idx we start the re-arming from */
+ uint16_t rx_using_sse; /* indicates that vector RX is in use */
+ uint8_t port_id;
+ uint8_t drop_en;
+ uint8_t rx_deferred_start; /* don't start this queue in dev start. */
+};
+
+/*
+ * a FIFO is used to track which descriptors have their RS bit set for Tx
+ * queues which are configured to allow multiple descriptors per packet
+ */
+struct fifo {
+ uint16_t *list;
+ uint16_t *head;
+ uint16_t *tail;
+ uint16_t *endp;
+};
+
+struct fm10k_txq_ops;
+
+struct fm10k_tx_queue {
+ struct rte_mbuf **sw_ring;
+ struct fm10k_tx_desc *hw_ring;
+ uint64_t hw_ring_phys_addr;
+ struct fifo rs_tracker;
+ const struct fm10k_txq_ops *ops; /* txq ops */
+ uint16_t last_free;
+ uint16_t next_free;
+ uint16_t nb_free;
+ uint16_t nb_used;
+ uint16_t free_thresh;
+ uint16_t rs_thresh;
+ /* Below 2 fields only valid in case vPMD is applied. */
+ uint16_t next_rs; /* Next pos to set RS flag */
+ uint16_t next_dd; /* Next pos to check DD flag */
+ volatile uint32_t *tail_ptr;
+ uint32_t txq_flags; /* Holds flags for this TXq */
+ uint16_t nb_desc;
+ uint8_t port_id;
+ uint8_t tx_deferred_start; /** don't start this queue in dev start. */
+ uint16_t queue_id;
+};
+
+struct fm10k_txq_ops {
+ void (*reset)(struct fm10k_tx_queue *txq);
+};
+
+#define MBUF_DMA_ADDR(mb) \
+ ((uint64_t) ((mb)->buf_physaddr + (mb)->data_off))
+
+/* enforce 512B alignment on default Rx DMA addresses */
+#define MBUF_DMA_ADDR_DEFAULT(mb) \
+ ((uint64_t) RTE_ALIGN(((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM),\
+ FM10K_RX_DATABUF_ALIGN))
+
+static inline void fifo_reset(struct fifo *fifo, uint32_t len)
+{
+ fifo->head = fifo->tail = fifo->list;
+ fifo->endp = fifo->list + len;
+}
+
+static inline void fifo_insert(struct fifo *fifo, uint16_t val)
+{
+ *fifo->head = val;
+ if (++fifo->head == fifo->endp)
+ fifo->head = fifo->list;
+}
+
+/* do not worry about list being empty since we only check it once we know
+ * we have used enough descriptors to set the RS bit at least once */
+static inline uint16_t fifo_peek(struct fifo *fifo)
+{
+ return *fifo->tail;
+}
+
+static inline uint16_t fifo_remove(struct fifo *fifo)
+{
+ uint16_t val;
+ val = *fifo->tail;
+ if (++fifo->tail == fifo->endp)
+ fifo->tail = fifo->list;
+ return val;
+}
+
+static inline void
+fm10k_pktmbuf_reset(struct rte_mbuf *mb, uint8_t in_port)
+{
+ rte_mbuf_refcnt_set(mb, 1);
+ mb->next = NULL;
+ mb->nb_segs = 1;
+
+ /* enforce 512B alignment on default Rx virtual addresses */
+ mb->data_off = (uint16_t)(RTE_PTR_ALIGN((char *)mb->buf_addr +
+ RTE_PKTMBUF_HEADROOM, FM10K_RX_DATABUF_ALIGN)
+ - (char *)mb->buf_addr);
+ mb->port = in_port;
+}
+
+/*
+ * Verify Rx packet buffer alignment is valid.
+ *
+ * Hardware requires specific alignment for Rx packet buffers. At
+ * least one of the following two conditions must be satisfied.
+ * 1. Address is 512B aligned
+ * 2. Address is 8B aligned and buffer does not cross 4K boundary.
+ *
+ * Return 1 if buffer alignment satisfies at least one condition,
+ * otherwise return 0.
+ *
+ * Note: Alignment is checked by the driver when the Rx queue is reset. It
+ * is assumed that if an entire descriptor ring can be filled with
+ * buffers containing valid alignment, then all buffers in that mempool
+ * have valid address alignment. It is the responsibility of the user
+ * to ensure all buffers have valid alignment, as it is the user who
+ * creates the mempool.
+ * Note: It is assumed the buffer needs only to store a maximum size Ethernet
+ * frame.
+ */
+static inline int
+fm10k_addr_alignment_valid(struct rte_mbuf *mb)
+{
+ uint64_t addr = MBUF_DMA_ADDR_DEFAULT(mb);
+ uint64_t boundary1, boundary2;
+
+ /* 512B aligned? */
+ if (RTE_ALIGN(addr, FM10K_RX_DATABUF_ALIGN) == addr)
+ return 1;
+
+ /* 8B aligned, and max Ethernet frame would not cross a 4KB boundary? */
+ if (RTE_ALIGN(addr, 8) == addr) {
+ boundary1 = RTE_ALIGN_FLOOR(addr, 4096);
+ boundary2 = RTE_ALIGN_FLOOR(addr + ETHER_MAX_VLAN_FRAME_LEN,
+ 4096);
+ if (boundary1 == boundary2)
+ return 1;
+ }
+
+ PMD_INIT_LOG(ERR, "Error: Invalid buffer alignment!");
+
+ return 0;
+}
+
+/* Rx and Tx prototypes */
+uint16_t fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t fm10k_recv_scattered_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
+uint16_t fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+int fm10k_rxq_vec_setup(struct fm10k_rx_queue *rxq);
+int fm10k_rx_vec_condition_check(struct rte_eth_dev *);
+void fm10k_rx_queue_release_mbufs_vec(struct fm10k_rx_queue *rxq);
+uint16_t fm10k_recv_pkts_vec(void *, struct rte_mbuf **, uint16_t);
+uint16_t fm10k_recv_scattered_pkts_vec(void *, struct rte_mbuf **,
+ uint16_t);
+uint16_t fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+void fm10k_txq_vec_setup(struct fm10k_tx_queue *txq);
+int fm10k_tx_vec_condition_check(struct fm10k_tx_queue *txq);
+
+#endif
diff --git a/src/dpdk22/drivers/net/fm10k/fm10k_ethdev.c b/src/dpdk22/drivers/net/fm10k/fm10k_ethdev.c
new file mode 100644
index 00000000..e4aed94b
--- /dev/null
+++ b/src/dpdk22/drivers/net/fm10k/fm10k_ethdev.c
@@ -0,0 +1,2780 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_string_fns.h>
+#include <rte_dev.h>
+#include <rte_spinlock.h>
+
+#include "fm10k.h"
+#include "base/fm10k_api.h"
+
+/* Default delay to acquire mailbox lock */
+#define FM10K_MBXLOCK_DELAY_US 20
+#define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
+
+#define MAIN_VSI_POOL_NUMBER 0
+
+/* Max try times to acquire switch status */
+#define MAX_QUERY_SWITCH_STATE_TIMES 10
+/* Wait interval to get switch status */
+#define WAIT_SWITCH_MSG_US 100000
+/* Number of chars per uint32 type */
+#define CHARS_PER_UINT32 (sizeof(uint32_t))
+#define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
+
+static void fm10k_close_mbx_service(struct fm10k_hw *hw);
+static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static inline int fm10k_glort_valid(struct fm10k_hw *hw);
+static int
+fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
+static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
+ const u8 *mac, bool add, uint32_t pool);
+static void fm10k_tx_queue_release(void *queue);
+static void fm10k_rx_queue_release(void *queue);
+static void fm10k_set_rx_function(struct rte_eth_dev *dev);
+static void fm10k_set_tx_function(struct rte_eth_dev *dev);
+
+struct fm10k_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned offset;
+};
+
+struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = {
+ {"completion_timeout_count", offsetof(struct fm10k_hw_stats, timeout)},
+ {"unsupported_requests_count", offsetof(struct fm10k_hw_stats, ur)},
+ {"completer_abort_count", offsetof(struct fm10k_hw_stats, ca)},
+ {"unsupported_message_count", offsetof(struct fm10k_hw_stats, um)},
+ {"checksum_error_count", offsetof(struct fm10k_hw_stats, xec)},
+ {"vlan_dropped", offsetof(struct fm10k_hw_stats, vlan_drop)},
+ {"loopback_dropped", offsetof(struct fm10k_hw_stats, loopback_drop)},
+ {"rx_mbuf_allocation_errors", offsetof(struct fm10k_hw_stats,
+ nodesc_drop)},
+};
+
+#define FM10K_NB_HW_XSTATS (sizeof(fm10k_hw_stats_strings) / \
+ sizeof(fm10k_hw_stats_strings[0]))
+
+struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = {
+ {"packets", offsetof(struct fm10k_hw_stats_q, rx_packets)},
+ {"bytes", offsetof(struct fm10k_hw_stats_q, rx_bytes)},
+ {"dropped", offsetof(struct fm10k_hw_stats_q, rx_drops)},
+};
+
+#define FM10K_NB_RX_Q_XSTATS (sizeof(fm10k_hw_stats_rx_q_strings) / \
+ sizeof(fm10k_hw_stats_rx_q_strings[0]))
+
+struct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = {
+ {"packets", offsetof(struct fm10k_hw_stats_q, tx_packets)},
+ {"bytes", offsetof(struct fm10k_hw_stats_q, tx_bytes)},
+};
+
+#define FM10K_NB_TX_Q_XSTATS (sizeof(fm10k_hw_stats_tx_q_strings) / \
+ sizeof(fm10k_hw_stats_tx_q_strings[0]))
+
+#define FM10K_NB_XSTATS (FM10K_NB_HW_XSTATS + FM10K_MAX_QUEUES_PF * \
+ (FM10K_NB_RX_Q_XSTATS + FM10K_NB_TX_Q_XSTATS))
+
+static void
+fm10k_mbx_initlock(struct fm10k_hw *hw)
+{
+ rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
+}
+
+static void
+fm10k_mbx_lock(struct fm10k_hw *hw)
+{
+ while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
+ rte_delay_us(FM10K_MBXLOCK_DELAY_US);
+}
+
+static void
+fm10k_mbx_unlock(struct fm10k_hw *hw)
+{
+ rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
+}
+
+/* Stubs needed for linkage when vPMD is disabled */
+int __attribute__((weak))
+fm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev *dev)
+{
+ return -1;
+}
+
+uint16_t __attribute__((weak))
+fm10k_recv_pkts_vec(
+ __rte_unused void *rx_queue,
+ __rte_unused struct rte_mbuf **rx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+uint16_t __attribute__((weak))
+fm10k_recv_scattered_pkts_vec(
+ __rte_unused void *rx_queue,
+ __rte_unused struct rte_mbuf **rx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+int __attribute__((weak))
+fm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue *rxq)
+
+{
+ return -1;
+}
+
+void __attribute__((weak))
+fm10k_rx_queue_release_mbufs_vec(
+ __rte_unused struct fm10k_rx_queue *rxq)
+{
+ return;
+}
+
+void __attribute__((weak))
+fm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue *txq)
+{
+ return;
+}
+
+int __attribute__((weak))
+fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq)
+{
+ return -1;
+}
+
+uint16_t __attribute__((weak))
+fm10k_xmit_pkts_vec(__rte_unused void *tx_queue,
+ __rte_unused struct rte_mbuf **tx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+/*
+ * reset queue to initial state, allocate software buffers used when starting
+ * device.
+ * return 0 on success
+ * return -ENOMEM if buffers cannot be allocated
+ * return -EINVAL if buffers do not satisfy alignment condition
+ */
+static inline int
+rx_queue_reset(struct fm10k_rx_queue *q)
+{
+ static const union fm10k_rx_desc zero = {{0} };
+ uint64_t dma_addr;
+ int i, diag;
+ PMD_INIT_FUNC_TRACE();
+
+ diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
+ if (diag != 0)
+ return -ENOMEM;
+
+ for (i = 0; i < q->nb_desc; ++i) {
+ fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
+ if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
+ rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
+ q->nb_desc);
+ return -EINVAL;
+ }
+ dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
+ q->hw_ring[i].q.pkt_addr = dma_addr;
+ q->hw_ring[i].q.hdr_addr = dma_addr;
+ }
+
+ /* initialize extra software ring entries. Space for these extra
+ * entries is always allocated.
+ */
+ memset(&q->fake_mbuf, 0x0, sizeof(q->fake_mbuf));
+ for (i = 0; i < q->nb_fake_desc; ++i) {
+ q->sw_ring[q->nb_desc + i] = &q->fake_mbuf;
+ q->hw_ring[q->nb_desc + i] = zero;
+ }
+
+ q->next_dd = 0;
+ q->next_alloc = 0;
+ q->next_trigger = q->alloc_thresh - 1;
+ FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
+ q->rxrearm_start = 0;
+ q->rxrearm_nb = 0;
+
+ return 0;
+}
+
+/*
+ * clean queue, descriptor rings, free software buffers used when stopping
+ * device.
+ */
+static inline void
+rx_queue_clean(struct fm10k_rx_queue *q)
+{
+ union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
+ uint32_t i;
+ PMD_INIT_FUNC_TRACE();
+
+ /* zero descriptor rings */
+ for (i = 0; i < q->nb_desc; ++i)
+ q->hw_ring[i] = zero;
+
+ /* zero faked descriptors */
+ for (i = 0; i < q->nb_fake_desc; ++i)
+ q->hw_ring[q->nb_desc + i] = zero;
+
+ /* vPMD driver has a different way of releasing mbufs. */
+ if (q->rx_using_sse) {
+ fm10k_rx_queue_release_mbufs_vec(q);
+ return;
+ }
+
+ /* free software buffers */
+ for (i = 0; i < q->nb_desc; ++i) {
+ if (q->sw_ring[i]) {
+ rte_pktmbuf_free_seg(q->sw_ring[i]);
+ q->sw_ring[i] = NULL;
+ }
+ }
+}
+
+/*
+ * free all queue memory used when releasing the queue (i.e. configure)
+ */
+static inline void
+rx_queue_free(struct fm10k_rx_queue *q)
+{
+ PMD_INIT_FUNC_TRACE();
+ if (q) {
+ PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
+ rx_queue_clean(q);
+ if (q->sw_ring) {
+ rte_free(q->sw_ring);
+ q->sw_ring = NULL;
+ }
+ rte_free(q);
+ q = NULL;
+ }
+}
+
+/*
+ * disable RX queue, wait unitl HW finished necessary flush operation
+ */
+static inline int
+rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
+{
+ uint32_t reg, i;
+
+ reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
+ FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
+ reg & ~FM10K_RXQCTL_ENABLE);
+
+ /* Wait 100us at most */
+ for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
+ rte_delay_us(1);
+ reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
+ if (!(reg & FM10K_RXQCTL_ENABLE))
+ break;
+ }
+
+ if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * reset queue to initial state, allocate software buffers used when starting
+ * device
+ */
+static inline void
+tx_queue_reset(struct fm10k_tx_queue *q)
+{
+ PMD_INIT_FUNC_TRACE();
+ q->last_free = 0;
+ q->next_free = 0;
+ q->nb_used = 0;
+ q->nb_free = q->nb_desc - 1;
+ fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
+ FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
+}
+
+/*
+ * clean queue, descriptor rings, free software buffers used when stopping
+ * device
+ */
+static inline void
+tx_queue_clean(struct fm10k_tx_queue *q)
+{
+ struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
+ uint32_t i;
+ PMD_INIT_FUNC_TRACE();
+
+ /* zero descriptor rings */
+ for (i = 0; i < q->nb_desc; ++i)
+ q->hw_ring[i] = zero;
+
+ /* free software buffers */
+ for (i = 0; i < q->nb_desc; ++i) {
+ if (q->sw_ring[i]) {
+ rte_pktmbuf_free_seg(q->sw_ring[i]);
+ q->sw_ring[i] = NULL;
+ }
+ }
+}
+
+/*
+ * free all queue memory used when releasing the queue (i.e. configure)
+ */
+static inline void
+tx_queue_free(struct fm10k_tx_queue *q)
+{
+ PMD_INIT_FUNC_TRACE();
+ if (q) {
+ PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
+ tx_queue_clean(q);
+ if (q->rs_tracker.list) {
+ rte_free(q->rs_tracker.list);
+ q->rs_tracker.list = NULL;
+ }
+ if (q->sw_ring) {
+ rte_free(q->sw_ring);
+ q->sw_ring = NULL;
+ }
+ rte_free(q);
+ q = NULL;
+ }
+}
+
+/*
+ * disable TX queue, wait unitl HW finished necessary flush operation
+ */
+static inline int
+tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
+{
+ uint32_t reg, i;
+
+ reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
+ FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
+ reg & ~FM10K_TXDCTL_ENABLE);
+
+ /* Wait 100us at most */
+ for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
+ rte_delay_us(1);
+ reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
+ if (!(reg & FM10K_TXDCTL_ENABLE))
+ break;
+ }
+
+ if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
+ return -1;
+
+ return 0;
+}
+
+static int
+fm10k_check_mq_mode(struct rte_eth_dev *dev)
+{
+ enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_vmdq_rx_conf *vmdq_conf;
+ uint16_t nb_rx_q = dev->data->nb_rx_queues;
+
+ vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
+
+ if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
+ PMD_INIT_LOG(ERR, "DCB mode is not supported.");
+ return -EINVAL;
+ }
+
+ if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
+ return 0;
+
+ if (hw->mac.type == fm10k_mac_vf) {
+ PMD_INIT_LOG(ERR, "VMDQ mode is not supported in VF.");
+ return -EINVAL;
+ }
+
+ /* Check VMDQ queue pool number */
+ if (vmdq_conf->nb_queue_pools >
+ sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT ||
+ vmdq_conf->nb_queue_pools > nb_rx_q) {
+ PMD_INIT_LOG(ERR, "Too many of queue pools: %d",
+ vmdq_conf->nb_queue_pools);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct fm10k_txq_ops def_txq_ops = {
+ .reset = tx_queue_reset,
+};
+
+static int
+fm10k_dev_configure(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
+ PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
+ /* multipe queue mode checking */
+ ret = fm10k_check_mq_mode(dev);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* fls = find last set bit = 32 minus the number of leading zeros */
+#ifndef fls
+#define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
+#endif
+
+static void
+fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_vmdq_rx_conf *vmdq_conf;
+ uint32_t i;
+
+ vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
+
+ for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
+ if (!vmdq_conf->pool_map[i].pools)
+ continue;
+ fm10k_mbx_lock(hw);
+ fm10k_update_vlan(hw, vmdq_conf->pool_map[i].vlan_id, 0, true);
+ fm10k_mbx_unlock(hw);
+ }
+}
+
+static void
+fm10k_dev_pf_main_vsi_reset(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Add default mac address */
+ fm10k_MAC_filter_set(dev, hw->mac.addr, true,
+ MAIN_VSI_POOL_NUMBER);
+}
+
+static void
+fm10k_dev_rss_configure(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ uint32_t mrqc, *key, i, reta, j;
+ uint64_t hf;
+
+#define RSS_KEY_SIZE 40
+ static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
+ 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+ 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+ 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+ 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+ 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
+ };
+
+ if (dev->data->nb_rx_queues == 1 ||
+ dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
+ dev_conf->rx_adv_conf.rss_conf.rss_hf == 0)
+ return;
+
+ /* random key is rss_intel_key (default) or user provided (rss_key) */
+ if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
+ key = (uint32_t *)rss_intel_key;
+ else
+ key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
+
+ /* Now fill our hash function seeds, 4 bytes at a time */
+ for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
+ FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
+
+ /*
+ * Fill in redirection table
+ * The byte-swap is needed because NIC registers are in
+ * little-endian order.
+ */
+ reta = 0;
+ for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) {
+ if (j == dev->data->nb_rx_queues)
+ j = 0;
+ reta = (reta << CHAR_BIT) | j;
+ if ((i & 3) == 3)
+ FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
+ rte_bswap32(reta));
+ }
+
+ /*
+ * Generate RSS hash based on packet types, TCP/UDP
+ * port numbers and/or IPv4/v6 src and dst addresses
+ */
+ hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
+ mrqc = 0;
+ mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
+ mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
+ mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
+ mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
+ mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
+ mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
+ mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
+ mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
+ mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
+
+ if (mrqc == 0) {
+ PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
+ "supported", hf);
+ return;
+ }
+
+ FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
+}
+
+static void
+fm10k_dev_logic_port_update(struct rte_eth_dev *dev,
+ uint16_t nb_lport_old, uint16_t nb_lport_new)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t i;
+
+ fm10k_mbx_lock(hw);
+ /* Disable previous logic ports */
+ if (nb_lport_old)
+ hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
+ nb_lport_old, false);
+ /* Enable new logic ports */
+ hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
+ nb_lport_new, true);
+ fm10k_mbx_unlock(hw);
+
+ for (i = 0; i < nb_lport_new; i++) {
+ /* Set unicast mode by default. App can change
+ * to other mode in other API func.
+ */
+ fm10k_mbx_lock(hw);
+ hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map + i,
+ FM10K_XCAST_MODE_NONE);
+ fm10k_mbx_unlock(hw);
+ }
+}
+
+static void
+fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_vmdq_rx_conf *vmdq_conf;
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ struct fm10k_macvlan_filter_info *macvlan;
+ uint16_t nb_queue_pools = 0; /* pool number in configuration */
+ uint16_t nb_lport_new, nb_lport_old;
+
+ macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
+ vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
+
+ fm10k_dev_rss_configure(dev);
+
+ /* only PF supports VMDQ */
+ if (hw->mac.type != fm10k_mac_pf)
+ return;
+
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+ nb_queue_pools = vmdq_conf->nb_queue_pools;
+
+ /* no pool number change, no need to update logic port and VLAN/MAC */
+ if (macvlan->nb_queue_pools == nb_queue_pools)
+ return;
+
+ nb_lport_old = macvlan->nb_queue_pools ? macvlan->nb_queue_pools : 1;
+ nb_lport_new = nb_queue_pools ? nb_queue_pools : 1;
+ fm10k_dev_logic_port_update(dev, nb_lport_old, nb_lport_new);
+
+ /* reset MAC/VLAN as it's based on VMDQ or PF main VSI */
+ memset(dev->data->mac_addrs, 0,
+ ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
+ ether_addr_copy((const struct ether_addr *)hw->mac.addr,
+ &dev->data->mac_addrs[0]);
+ memset(macvlan, 0, sizeof(*macvlan));
+ macvlan->nb_queue_pools = nb_queue_pools;
+
+ if (nb_queue_pools)
+ fm10k_dev_vmdq_rx_configure(dev);
+ else
+ fm10k_dev_pf_main_vsi_reset(dev);
+}
+
+static int
+fm10k_dev_tx_init(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int i, ret;
+ struct fm10k_tx_queue *txq;
+ uint64_t base_addr;
+ uint32_t size;
+
+ /* Disable TXINT to avoid possible interrupt */
+ for (i = 0; i < hw->mac.max_queues; i++)
+ FM10K_WRITE_REG(hw, FM10K_TXINT(i),
+ 3 << FM10K_TXINT_TIMER_SHIFT);
+
+ /* Setup TX queue */
+ for (i = 0; i < dev->data->nb_tx_queues; ++i) {
+ txq = dev->data->tx_queues[i];
+ base_addr = txq->hw_ring_phys_addr;
+ size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
+
+ /* disable queue to avoid issues while updating state */
+ ret = tx_queue_disable(hw, i);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
+ return -1;
+ }
+
+ /* set location and size for descriptor ring */
+ FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
+ base_addr & UINT64_LOWER_32BITS_MASK);
+ FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
+ base_addr >> (CHAR_BIT * sizeof(uint32_t)));
+ FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
+ }
+
+ /* set up vector or scalar TX function as appropriate */
+ fm10k_set_tx_function(dev);
+
+ return 0;
+}
+
+static int
+fm10k_dev_rx_init(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int i, ret;
+ struct fm10k_rx_queue *rxq;
+ uint64_t base_addr;
+ uint32_t size;
+ uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
+ uint16_t buf_size;
+
+ /* Disable RXINT to avoid possible interrupt */
+ for (i = 0; i < hw->mac.max_queues; i++)
+ FM10K_WRITE_REG(hw, FM10K_RXINT(i),
+ 3 << FM10K_RXINT_TIMER_SHIFT);
+
+ /* Setup RX queues */
+ for (i = 0; i < dev->data->nb_rx_queues; ++i) {
+ rxq = dev->data->rx_queues[i];
+ base_addr = rxq->hw_ring_phys_addr;
+ size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
+
+ /* disable queue to avoid issues while updating state */
+ ret = rx_queue_disable(hw, i);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
+ return -1;
+ }
+
+ /* Setup the Base and Length of the Rx Descriptor Ring */
+ FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
+ base_addr & UINT64_LOWER_32BITS_MASK);
+ FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
+ base_addr >> (CHAR_BIT * sizeof(uint32_t)));
+ FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
+
+ /* Configure the Rx buffer size for one buff without split */
+ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
+ RTE_PKTMBUF_HEADROOM);
+ /* As RX buffer is aligned to 512B within mbuf, some bytes are
+ * reserved for this purpose, and the worst case could be 511B.
+ * But SRR reg assumes all buffers have the same size. In order
+ * to fill the gap, we'll have to consider the worst case and
+ * assume 512B is reserved. If we don't do so, it's possible
+ * for HW to overwrite data to next mbuf.
+ */
+ buf_size -= FM10K_RX_DATABUF_ALIGN;
+
+ FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
+ buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT);
+
+ /* It adds dual VLAN length for supporting dual VLAN */
+ if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ 2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
+ dev->data->dev_conf.rxmode.enable_scatter) {
+ uint32_t reg;
+ dev->data->scattered_rx = 1;
+ reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
+ reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
+ FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
+ }
+
+ /* Enable drop on empty, it's RO for VF */
+ if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
+ rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
+
+ FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
+ FM10K_WRITE_FLUSH(hw);
+ }
+
+ /* Configure VMDQ/RSS if applicable */
+ fm10k_dev_mq_rx_configure(dev);
+
+ /* Decide the best RX function */
+ fm10k_set_rx_function(dev);
+
+ return 0;
+}
+
+static int
+fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int err = -1;
+ uint32_t reg;
+ struct fm10k_rx_queue *rxq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rx_queue_id < dev->data->nb_rx_queues) {
+ rxq = dev->data->rx_queues[rx_queue_id];
+ err = rx_queue_reset(rxq);
+ if (err == -ENOMEM) {
+ PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
+ return err;
+ } else if (err == -EINVAL) {
+ PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
+ " %d", err);
+ return err;
+ }
+
+ /* Setup the HW Rx Head and Tail Descriptor Pointers
+ * Note: this must be done AFTER the queue is enabled on real
+ * hardware, but BEFORE the queue is enabled when using the
+ * emulation platform. Do it in both places for now and remove
+ * this comment and the following two register writes when the
+ * emulation platform is no longer being used.
+ */
+ FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
+ FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
+
+ /* Set PF ownership flag for PF devices */
+ reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
+ if (hw->mac.type == fm10k_mac_pf)
+ reg |= FM10K_RXQCTL_PF;
+ reg |= FM10K_RXQCTL_ENABLE;
+ /* enable RX queue */
+ FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
+ FM10K_WRITE_FLUSH(hw);
+
+ /* Setup the HW Rx Head and Tail Descriptor Pointers
+ * Note: this must be done AFTER the queue is enabled
+ */
+ FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
+ FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ }
+
+ return err;
+}
+
+static int
+fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rx_queue_id < dev->data->nb_rx_queues) {
+ /* Disable RX queue */
+ rx_queue_disable(hw, rx_queue_id);
+
+ /* Free mbuf and clean HW ring */
+ rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+
+ return 0;
+}
+
+static int
+fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ /** @todo - this should be defined in the shared code */
+#define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000
+ uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
+ int err = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (tx_queue_id < dev->data->nb_tx_queues) {
+ struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
+
+ q->ops->reset(q);
+
+ /* reset head and tail pointers */
+ FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
+ FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
+
+ /* enable TX queue */
+ FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
+ FM10K_TXDCTL_ENABLE | txdctl);
+ FM10K_WRITE_FLUSH(hw);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ } else
+ err = -1;
+
+ return err;
+}
+
+static int
+fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (tx_queue_id < dev->data->nb_tx_queues) {
+ tx_queue_disable(hw, tx_queue_id);
+ tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+
+ return 0;
+}
+
+static inline int fm10k_glort_valid(struct fm10k_hw *hw)
+{
+ return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
+ != FM10K_DGLORTMAP_NONE);
+}
+
+static void
+fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int status;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Return if it didn't acquire valid glort range */
+ if (!fm10k_glort_valid(hw))
+ return;
+
+ fm10k_mbx_lock(hw);
+ status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
+ FM10K_XCAST_MODE_PROMISC);
+ fm10k_mbx_unlock(hw);
+
+ if (status != FM10K_SUCCESS)
+ PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
+}
+
+static void
+fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint8_t mode;
+ int status;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Return if it didn't acquire valid glort range */
+ if (!fm10k_glort_valid(hw))
+ return;
+
+ if (dev->data->all_multicast == 1)
+ mode = FM10K_XCAST_MODE_ALLMULTI;
+ else
+ mode = FM10K_XCAST_MODE_NONE;
+
+ fm10k_mbx_lock(hw);
+ status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
+ mode);
+ fm10k_mbx_unlock(hw);
+
+ if (status != FM10K_SUCCESS)
+ PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
+}
+
+static void
+fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int status;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Return if it didn't acquire valid glort range */
+ if (!fm10k_glort_valid(hw))
+ return;
+
+ /* If promiscuous mode is enabled, it doesn't make sense to enable
+ * allmulticast and disable promiscuous since fm10k only can select
+ * one of the modes.
+ */
+ if (dev->data->promiscuous) {
+ PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
+ "needn't enable allmulticast");
+ return;
+ }
+
+ fm10k_mbx_lock(hw);
+ status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
+ FM10K_XCAST_MODE_ALLMULTI);
+ fm10k_mbx_unlock(hw);
+
+ if (status != FM10K_SUCCESS)
+ PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
+}
+
+static void
+fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int status;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Return if it didn't acquire valid glort range */
+ if (!fm10k_glort_valid(hw))
+ return;
+
+ if (dev->data->promiscuous) {
+ PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
+ "since promisc mode is enabled");
+ return;
+ }
+
+ fm10k_mbx_lock(hw);
+ /* Change mode to unicast mode */
+ status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
+ FM10K_XCAST_MODE_NONE);
+ fm10k_mbx_unlock(hw);
+
+ if (status != FM10K_SUCCESS)
+ PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
+}
+
+static void
+fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t dglortdec, pool_len, rss_len, i;
+ uint16_t nb_queue_pools;
+ struct fm10k_macvlan_filter_info *macvlan;
+
+ macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
+ nb_queue_pools = macvlan->nb_queue_pools;
+ pool_len = nb_queue_pools ? fls(nb_queue_pools - 1) : 0;
+ rss_len = fls(dev->data->nb_rx_queues - 1) - pool_len;
+ dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len;
+
+ /* Establish only MAP 0 as valid */
+ FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), FM10K_DGLORTMAP_ANY);
+
+ /* Configure VMDQ/RSS DGlort Decoder */
+ FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec);
+
+ /* Invalidate all other GLORT entries */
+ for (i = 1; i < FM10K_DGLORT_COUNT; i++)
+ FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
+ FM10K_DGLORTMAP_NONE);
+}
+
+#define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
+static int
+fm10k_dev_start(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int i, diag;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* stop, init, then start the hw */
+ diag = fm10k_stop_hw(hw);
+ if (diag != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
+ return -EIO;
+ }
+
+ diag = fm10k_init_hw(hw);
+ if (diag != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
+ return -EIO;
+ }
+
+ diag = fm10k_start_hw(hw);
+ if (diag != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
+ return -EIO;
+ }
+
+ diag = fm10k_dev_tx_init(dev);
+ if (diag) {
+ PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
+ return diag;
+ }
+
+ diag = fm10k_dev_rx_init(dev);
+ if (diag) {
+ PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
+ return diag;
+ }
+
+ if (hw->mac.type == fm10k_mac_pf)
+ fm10k_dev_dglort_map_configure(dev);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct fm10k_rx_queue *rxq;
+ rxq = dev->data->rx_queues[i];
+
+ if (rxq->rx_deferred_start)
+ continue;
+ diag = fm10k_dev_rx_queue_start(dev, i);
+ if (diag != 0) {
+ int j;
+ for (j = 0; j < i; ++j)
+ rx_queue_clean(dev->data->rx_queues[j]);
+ return diag;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct fm10k_tx_queue *txq;
+ txq = dev->data->tx_queues[i];
+
+ if (txq->tx_deferred_start)
+ continue;
+ diag = fm10k_dev_tx_queue_start(dev, i);
+ if (diag != 0) {
+ int j;
+ for (j = 0; j < i; ++j)
+ tx_queue_clean(dev->data->tx_queues[j]);
+ for (j = 0; j < dev->data->nb_rx_queues; ++j)
+ rx_queue_clean(dev->data->rx_queues[j]);
+ return diag;
+ }
+ }
+
+ /* Update default vlan when not in VMDQ mode */
+ if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
+ fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
+
+ return 0;
+}
+
+static void
+fm10k_dev_stop(struct rte_eth_dev *dev)
+{
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev->data->tx_queues)
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ fm10k_dev_tx_queue_stop(dev, i);
+
+ if (dev->data->rx_queues)
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ fm10k_dev_rx_queue_stop(dev, i);
+}
+
+static void
+fm10k_dev_queue_release(struct rte_eth_dev *dev)
+{
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev->data->tx_queues) {
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct fm10k_tx_queue *txq = dev->data->tx_queues[i];
+
+ tx_queue_free(txq);
+ }
+ }
+
+ if (dev->data->rx_queues) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ fm10k_rx_queue_release(dev->data->rx_queues[i]);
+ }
+}
+
+static void
+fm10k_dev_close(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t nb_lport;
+ struct fm10k_macvlan_filter_info *macvlan;
+
+ PMD_INIT_FUNC_TRACE();
+
+ macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
+ nb_lport = macvlan->nb_queue_pools ? macvlan->nb_queue_pools : 1;
+ fm10k_mbx_lock(hw);
+ hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
+ nb_lport, false);
+ fm10k_mbx_unlock(hw);
+
+ /* Stop mailbox service first */
+ fm10k_close_mbx_service(hw);
+ fm10k_dev_stop(dev);
+ fm10k_dev_queue_release(dev);
+ fm10k_stop_hw(hw);
+}
+
+static int
+fm10k_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ /* The host-interface link is always up. The speed is ~50Gbps per Gen3
+ * x8 PCIe interface. For now, we leave the speed undefined since there
+ * is no 50Gbps Ethernet. */
+ dev->data->dev_link.link_speed = 0;
+ dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ dev->data->dev_link.link_status = 1;
+
+ return 0;
+}
+
+static int
+fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
+ unsigned n)
+{
+ struct fm10k_hw_stats *hw_stats =
+ FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ unsigned i, q, count = 0;
+
+ if (n < FM10K_NB_XSTATS)
+ return FM10K_NB_XSTATS;
+
+ /* Global stats */
+ for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
+ snprintf(xstats[count].name, sizeof(xstats[count].name),
+ "%s", fm10k_hw_stats_strings[count].name);
+ xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
+ fm10k_hw_stats_strings[count].offset);
+ count++;
+ }
+
+ /* PF queue stats */
+ for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
+ for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
+ snprintf(xstats[count].name, sizeof(xstats[count].name),
+ "rx_q%u_%s", q,
+ fm10k_hw_stats_rx_q_strings[i].name);
+ xstats[count].value =
+ *(uint64_t *)(((char *)&hw_stats->q[q]) +
+ fm10k_hw_stats_rx_q_strings[i].offset);
+ count++;
+ }
+ for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
+ snprintf(xstats[count].name, sizeof(xstats[count].name),
+ "tx_q%u_%s", q,
+ fm10k_hw_stats_tx_q_strings[i].name);
+ xstats[count].value =
+ *(uint64_t *)(((char *)&hw_stats->q[q]) +
+ fm10k_hw_stats_tx_q_strings[i].offset);
+ count++;
+ }
+ }
+
+ return FM10K_NB_XSTATS;
+}
+
+static void
+fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ uint64_t ipackets, opackets, ibytes, obytes;
+ struct fm10k_hw *hw =
+ FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct fm10k_hw_stats *hw_stats =
+ FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ fm10k_update_hw_stats(hw, hw_stats);
+
+ ipackets = opackets = ibytes = obytes = 0;
+ for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
+ (i < hw->mac.max_queues); ++i) {
+ stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
+ stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
+ stats->q_ibytes[i] = hw_stats->q[i].rx_bytes.count;
+ stats->q_obytes[i] = hw_stats->q[i].tx_bytes.count;
+ ipackets += stats->q_ipackets[i];
+ opackets += stats->q_opackets[i];
+ ibytes += stats->q_ibytes[i];
+ obytes += stats->q_obytes[i];
+ }
+ stats->ipackets = ipackets;
+ stats->opackets = opackets;
+ stats->ibytes = ibytes;
+ stats->obytes = obytes;
+}
+
+static void
+fm10k_stats_reset(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct fm10k_hw_stats *hw_stats =
+ FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ memset(hw_stats, 0, sizeof(*hw_stats));
+ fm10k_rebind_hw_stats(hw, hw_stats);
+}
+
+static void
+fm10k_dev_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE;
+ dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE;
+ dev_info->max_rx_queues = hw->mac.max_queues;
+ dev_info->max_tx_queues = hw->mac.max_queues;
+ dev_info->max_mac_addrs = FM10K_MAX_MACADDR_NUM;
+ dev_info->max_hash_mac_addrs = 0;
+ dev_info->max_vfs = dev->pci_dev->max_vfs;
+ dev_info->vmdq_pool_base = 0;
+ dev_info->vmdq_queue_base = 0;
+ dev_info->max_vmdq_pools = ETH_32_POOLS;
+ dev_info->vmdq_queue_num = FM10K_MAX_QUEUES_PF;
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
+
+ dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
+ dev_info->reta_size = FM10K_MAX_RSS_INDICES;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = FM10K_DEFAULT_RX_PTHRESH,
+ .hthresh = FM10K_DEFAULT_RX_HTHRESH,
+ .wthresh = FM10K_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
+ .rx_drop_en = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = FM10K_DEFAULT_TX_PTHRESH,
+ .hthresh = FM10K_DEFAULT_TX_HTHRESH,
+ .wthresh = FM10K_DEFAULT_TX_WTHRESH,
+ },
+ .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
+ .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
+ .txq_flags = FM10K_SIMPLE_TX_FLAG,
+ };
+
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = FM10K_MAX_RX_DESC,
+ .nb_min = FM10K_MIN_RX_DESC,
+ .nb_align = FM10K_MULT_RX_DESC,
+ };
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = FM10K_MAX_TX_DESC,
+ .nb_min = FM10K_MIN_TX_DESC,
+ .nb_align = FM10K_MULT_TX_DESC,
+ };
+}
+
+static int
+fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ s32 result;
+ uint16_t mac_num = 0;
+ uint32_t vid_idx, vid_bit, mac_index;
+ struct fm10k_hw *hw;
+ struct fm10k_macvlan_filter_info *macvlan;
+ struct rte_eth_dev_data *data = dev->data;
+
+ hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
+
+ if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */
+ PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode");
+ return (-EINVAL);
+ }
+
+ if (vlan_id > ETH_VLAN_ID_MAX) {
+ PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
+ return (-EINVAL);
+ }
+
+ vid_idx = FM10K_VFTA_IDX(vlan_id);
+ vid_bit = FM10K_VFTA_BIT(vlan_id);
+ /* this VLAN ID is already in the VLAN filter table, return SUCCESS */
+ if (on && (macvlan->vfta[vid_idx] & vid_bit))
+ return 0;
+ /* this VLAN ID is NOT in the VLAN filter table, cannot remove */
+ if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
+ PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
+ "in the VLAN filter table");
+ return (-EINVAL);
+ }
+
+ fm10k_mbx_lock(hw);
+ result = fm10k_update_vlan(hw, vlan_id, 0, on);
+ fm10k_mbx_unlock(hw);
+ if (result != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
+ return (-EIO);
+ }
+
+ for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
+ (result == FM10K_SUCCESS); mac_index++) {
+ if (is_zero_ether_addr(&data->mac_addrs[mac_index]))
+ continue;
+ if (mac_num > macvlan->mac_num - 1) {
+ PMD_INIT_LOG(ERR, "MAC address number "
+ "not match");
+ break;
+ }
+ fm10k_mbx_lock(hw);
+ result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
+ data->mac_addrs[mac_index].addr_bytes,
+ vlan_id, on, 0);
+ fm10k_mbx_unlock(hw);
+ mac_num++;
+ }
+ if (result != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
+ return (-EIO);
+ }
+
+ if (on) {
+ macvlan->vlan_num++;
+ macvlan->vfta[vid_idx] |= vid_bit;
+ } else {
+ macvlan->vlan_num--;
+ macvlan->vfta[vid_idx] &= ~vid_bit;
+ }
+ return 0;
+}
+
+static void
+fm10k_vlan_offload_set(__rte_unused struct rte_eth_dev *dev, int mask)
+{
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ if (!dev->data->dev_conf.rxmode.hw_vlan_strip)
+ PMD_INIT_LOG(ERR, "VLAN stripping is "
+ "always on in fm10k");
+ }
+
+ if (mask & ETH_VLAN_EXTEND_MASK) {
+ if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+ PMD_INIT_LOG(ERR, "VLAN QinQ is not "
+ "supported in fm10k");
+ }
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ if (!dev->data->dev_conf.rxmode.hw_vlan_filter)
+ PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
+ }
+}
+
+/* Add/Remove a MAC address, and update filters to main VSI */
+static void fm10k_MAC_filter_set_main_vsi(struct rte_eth_dev *dev,
+ const u8 *mac, bool add, uint32_t pool)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct fm10k_macvlan_filter_info *macvlan;
+ uint32_t i, j, k;
+
+ macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
+
+ if (pool != MAIN_VSI_POOL_NUMBER) {
+ PMD_DRV_LOG(ERR, "VMDQ not enabled, can't set "
+ "mac to pool %u", pool);
+ return;
+ }
+ for (i = 0, j = 0; j < FM10K_VFTA_SIZE; j++) {
+ if (!macvlan->vfta[j])
+ continue;
+ for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
+ if (!(macvlan->vfta[j] & (1 << k)))
+ continue;
+ if (i + 1 > macvlan->vlan_num) {
+ PMD_INIT_LOG(ERR, "vlan number not match");
+ return;
+ }
+ fm10k_mbx_lock(hw);
+ fm10k_update_uc_addr(hw, hw->mac.dglort_map, mac,
+ j * FM10K_UINT32_BIT_SIZE + k, add, 0);
+ fm10k_mbx_unlock(hw);
+ i++;
+ }
+ }
+}
+
+/* Add/Remove a MAC address, and update filters to VMDQ */
+static void fm10k_MAC_filter_set_vmdq(struct rte_eth_dev *dev,
+ const u8 *mac, bool add, uint32_t pool)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct fm10k_macvlan_filter_info *macvlan;
+ struct rte_eth_vmdq_rx_conf *vmdq_conf;
+ uint32_t i;
+
+ macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
+ vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
+
+ if (pool > macvlan->nb_queue_pools) {
+ PMD_DRV_LOG(ERR, "Pool number %u invalid."
+ " Max pool is %u",
+ pool, macvlan->nb_queue_pools);
+ return;
+ }
+ for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
+ if (!(vmdq_conf->pool_map[i].pools & (1UL << pool)))
+ continue;
+ fm10k_mbx_lock(hw);
+ fm10k_update_uc_addr(hw, hw->mac.dglort_map + pool, mac,
+ vmdq_conf->pool_map[i].vlan_id, add, 0);
+ fm10k_mbx_unlock(hw);
+ }
+}
+
+/* Add/Remove a MAC address, and update filters */
+static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
+ const u8 *mac, bool add, uint32_t pool)
+{
+ struct fm10k_macvlan_filter_info *macvlan;
+
+ macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
+
+ if (macvlan->nb_queue_pools > 0) /* VMDQ mode */
+ fm10k_MAC_filter_set_vmdq(dev, mac, add, pool);
+ else
+ fm10k_MAC_filter_set_main_vsi(dev, mac, add, pool);
+
+ if (add)
+ macvlan->mac_num++;
+ else
+ macvlan->mac_num--;
+}
+
+/* Add a MAC address, and update filters */
+static void
+fm10k_macaddr_add(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index,
+ uint32_t pool)
+{
+ struct fm10k_macvlan_filter_info *macvlan;
+
+ macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
+ fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool);
+ macvlan->mac_vmdq_id[index] = pool;
+}
+
+/* Remove a MAC address, and update filters */
+static void
+fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct fm10k_macvlan_filter_info *macvlan;
+
+ macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
+ fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
+ FALSE, macvlan->mac_vmdq_id[index]);
+ macvlan->mac_vmdq_id[index] = 0;
+}
+
+static inline int
+check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
+{
+ if ((request < min) || (request > max) || ((request % mult) != 0))
+ return -1;
+ else
+ return 0;
+}
+
+
+static inline int
+check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
+{
+ if ((request < min) || (request > max) || ((div % request) != 0))
+ return -1;
+ else
+ return 0;
+}
+
+static inline int
+handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
+{
+ uint16_t rx_free_thresh;
+
+ if (conf->rx_free_thresh == 0)
+ rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
+ else
+ rx_free_thresh = conf->rx_free_thresh;
+
+ /* make sure the requested threshold satisfies the constraints */
+ if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
+ FM10K_RX_FREE_THRESH_MAX(q),
+ FM10K_RX_FREE_THRESH_DIV(q),
+ rx_free_thresh)) {
+ PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
+ "less than or equal to %u, "
+ "greater than or equal to %u, "
+ "and a divisor of %u",
+ rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
+ FM10K_RX_FREE_THRESH_MIN(q),
+ FM10K_RX_FREE_THRESH_DIV(q));
+ return (-EINVAL);
+ }
+
+ q->alloc_thresh = rx_free_thresh;
+ q->drop_en = conf->rx_drop_en;
+ q->rx_deferred_start = conf->rx_deferred_start;
+
+ return 0;
+}
+
+/*
+ * Hardware requires specific alignment for Rx packet buffers. At
+ * least one of the following two conditions must be satisfied.
+ * 1. Address is 512B aligned
+ * 2. Address is 8B aligned and buffer does not cross 4K boundary.
+ *
+ * As such, the driver may need to adjust the DMA address within the
+ * buffer by up to 512B.
+ *
+ * return 1 if the element size is valid, otherwise return 0.
+ */
+static int
+mempool_element_size_valid(struct rte_mempool *mp)
+{
+ uint32_t min_size;
+
+ /* elt_size includes mbuf header and headroom */
+ min_size = mp->elt_size - sizeof(struct rte_mbuf) -
+ RTE_PKTMBUF_HEADROOM;
+
+ /* account for up to 512B of alignment */
+ min_size -= FM10K_RX_DATABUF_ALIGN;
+
+ /* sanity check for overflow */
+ if (min_size > mp->elt_size)
+ return 0;
+
+ /* size is valid */
+ return 1;
+}
+
+static int
+fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev);
+ struct fm10k_rx_queue *q;
+ const struct rte_memzone *mz;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* make sure the mempool element size can account for alignment. */
+ if (!mempool_element_size_valid(mp)) {
+ PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
+ return (-EINVAL);
+ }
+
+ /* make sure a valid number of descriptors have been requested */
+ if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
+ FM10K_MULT_RX_DESC, nb_desc)) {
+ PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
+ "less than or equal to %"PRIu32", "
+ "greater than or equal to %u, "
+ "and a multiple of %u",
+ nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
+ FM10K_MULT_RX_DESC);
+ return (-EINVAL);
+ }
+
+ /*
+ * if this queue existed already, free the associated memory. The
+ * queue cannot be reused in case we need to allocate memory on
+ * different socket than was previously used.
+ */
+ if (dev->data->rx_queues[queue_id] != NULL) {
+ rx_queue_free(dev->data->rx_queues[queue_id]);
+ dev->data->rx_queues[queue_id] = NULL;
+ }
+
+ /* allocate memory for the queue structure */
+ q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (q == NULL) {
+ PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
+ return (-ENOMEM);
+ }
+
+ /* setup queue */
+ q->mp = mp;
+ q->nb_desc = nb_desc;
+ q->nb_fake_desc = FM10K_MULT_RX_DESC;
+ q->port_id = dev->data->port_id;
+ q->queue_id = queue_id;
+ q->tail_ptr = (volatile uint32_t *)
+ &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
+ if (handle_rxconf(q, conf))
+ return (-EINVAL);
+
+ /* allocate memory for the software ring */
+ q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
+ (nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (q->sw_ring == NULL) {
+ PMD_INIT_LOG(ERR, "Cannot allocate software ring");
+ rte_free(q);
+ return (-ENOMEM);
+ }
+
+ /*
+ * allocate memory for the hardware descriptor ring. A memzone large
+ * enough to hold the maximum ring size is requested to allow for
+ * resizing in later calls to the queue setup function.
+ */
+ mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
+ FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC,
+ socket_id);
+ if (mz == NULL) {
+ PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
+ rte_free(q->sw_ring);
+ rte_free(q);
+ return (-ENOMEM);
+ }
+ q->hw_ring = mz->addr;
+ q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
+
+ /* Check if number of descs satisfied Vector requirement */
+ if (!rte_is_power_of_2(nb_desc)) {
+ PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
+ "preconditions - canceling the feature for "
+ "the whole port[%d]",
+ q->queue_id, q->port_id);
+ dev_info->rx_vec_allowed = false;
+ } else
+ fm10k_rxq_vec_setup(q);
+
+ dev->data->rx_queues[queue_id] = q;
+ return 0;
+}
+
+static void
+fm10k_rx_queue_release(void *queue)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ rx_queue_free(queue);
+}
+
+static inline int
+handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
+{
+ uint16_t tx_free_thresh;
+ uint16_t tx_rs_thresh;
+
+ /* constraint MACROs require that tx_free_thresh is configured
+ * before tx_rs_thresh */
+ if (conf->tx_free_thresh == 0)
+ tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
+ else
+ tx_free_thresh = conf->tx_free_thresh;
+
+ /* make sure the requested threshold satisfies the constraints */
+ if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
+ FM10K_TX_FREE_THRESH_MAX(q),
+ FM10K_TX_FREE_THRESH_DIV(q),
+ tx_free_thresh)) {
+ PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
+ "less than or equal to %u, "
+ "greater than or equal to %u, "
+ "and a divisor of %u",
+ tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
+ FM10K_TX_FREE_THRESH_MIN(q),
+ FM10K_TX_FREE_THRESH_DIV(q));
+ return (-EINVAL);
+ }
+
+ q->free_thresh = tx_free_thresh;
+
+ if (conf->tx_rs_thresh == 0)
+ tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
+ else
+ tx_rs_thresh = conf->tx_rs_thresh;
+
+ q->tx_deferred_start = conf->tx_deferred_start;
+
+ /* make sure the requested threshold satisfies the constraints */
+ if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
+ FM10K_TX_RS_THRESH_MAX(q),
+ FM10K_TX_RS_THRESH_DIV(q),
+ tx_rs_thresh)) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
+ "less than or equal to %u, "
+ "greater than or equal to %u, "
+ "and a divisor of %u",
+ tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
+ FM10K_TX_RS_THRESH_MIN(q),
+ FM10K_TX_RS_THRESH_DIV(q));
+ return (-EINVAL);
+ }
+
+ q->rs_thresh = tx_rs_thresh;
+
+ return 0;
+}
+
+static int
+fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *conf)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct fm10k_tx_queue *q;
+ const struct rte_memzone *mz;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* make sure a valid number of descriptors have been requested */
+ if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
+ FM10K_MULT_TX_DESC, nb_desc)) {
+ PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
+ "less than or equal to %"PRIu32", "
+ "greater than or equal to %u, "
+ "and a multiple of %u",
+ nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
+ FM10K_MULT_TX_DESC);
+ return (-EINVAL);
+ }
+
+ /*
+ * if this queue existed already, free the associated memory. The
+ * queue cannot be reused in case we need to allocate memory on
+ * different socket than was previously used.
+ */
+ if (dev->data->tx_queues[queue_id] != NULL) {
+ struct fm10k_tx_queue *txq = dev->data->tx_queues[queue_id];
+
+ tx_queue_free(txq);
+ dev->data->tx_queues[queue_id] = NULL;
+ }
+
+ /* allocate memory for the queue structure */
+ q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (q == NULL) {
+ PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
+ return (-ENOMEM);
+ }
+
+ /* setup queue */
+ q->nb_desc = nb_desc;
+ q->port_id = dev->data->port_id;
+ q->queue_id = queue_id;
+ q->txq_flags = conf->txq_flags;
+ q->ops = &def_txq_ops;
+ q->tail_ptr = (volatile uint32_t *)
+ &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
+ if (handle_txconf(q, conf))
+ return (-EINVAL);
+
+ /* allocate memory for the software ring */
+ q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
+ nb_desc * sizeof(struct rte_mbuf *),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (q->sw_ring == NULL) {
+ PMD_INIT_LOG(ERR, "Cannot allocate software ring");
+ rte_free(q);
+ return (-ENOMEM);
+ }
+
+ /*
+ * allocate memory for the hardware descriptor ring. A memzone large
+ * enough to hold the maximum ring size is requested to allow for
+ * resizing in later calls to the queue setup function.
+ */
+ mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id,
+ FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC,
+ socket_id);
+ if (mz == NULL) {
+ PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
+ rte_free(q->sw_ring);
+ rte_free(q);
+ return (-ENOMEM);
+ }
+ q->hw_ring = mz->addr;
+ q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
+
+ /*
+ * allocate memory for the RS bit tracker. Enough slots to hold the
+ * descriptor index for each RS bit needing to be set are required.
+ */
+ q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
+ ((nb_desc + 1) / q->rs_thresh) *
+ sizeof(uint16_t),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (q->rs_tracker.list == NULL) {
+ PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
+ rte_free(q->sw_ring);
+ rte_free(q);
+ return (-ENOMEM);
+ }
+
+ dev->data->tx_queues[queue_id] = q;
+ return 0;
+}
+
+static void
+fm10k_tx_queue_release(void *queue)
+{
+ struct fm10k_tx_queue *q = queue;
+ PMD_INIT_FUNC_TRACE();
+
+ tx_queue_free(q);
+}
+
+static int
+fm10k_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t i, j, idx, shift;
+ uint8_t mask;
+ uint32_t reta;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (reta_size > FM10K_MAX_RSS_INDICES) {
+ PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
+ return -EINVAL;
+ }
+
+ /*
+ * Update Redirection Table RETA[n], n=0..31. The redirection table has
+ * 128-entries in 32 registers
+ */
+ for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+ BIT_MASK_PER_UINT32);
+ if (mask == 0)
+ continue;
+
+ reta = 0;
+ if (mask != BIT_MASK_PER_UINT32)
+ reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
+
+ for (j = 0; j < CHARS_PER_UINT32; j++) {
+ if (mask & (0x1 << j)) {
+ if (mask != 0xF)
+ reta &= ~(UINT8_MAX << CHAR_BIT * j);
+ reta |= reta_conf[idx].reta[shift + j] <<
+ (CHAR_BIT * j);
+ }
+ }
+ FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
+ }
+
+ return 0;
+}
+
+static int
+fm10k_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t i, j, idx, shift;
+ uint8_t mask;
+ uint32_t reta;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (reta_size < FM10K_MAX_RSS_INDICES) {
+ PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
+ return -EINVAL;
+ }
+
+ /*
+ * Read Redirection Table RETA[n], n=0..31. The redirection table has
+ * 128-entries in 32 registers
+ */
+ for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+ BIT_MASK_PER_UINT32);
+ if (mask == 0)
+ continue;
+
+ reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
+ for (j = 0; j < CHARS_PER_UINT32; j++) {
+ if (mask & (0x1 << j))
+ reta_conf[idx].reta[shift + j] = ((reta >>
+ CHAR_BIT * j) & UINT8_MAX);
+ }
+ }
+
+ return 0;
+}
+
+static int
+fm10k_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t *key = (uint32_t *)rss_conf->rss_key;
+ uint32_t mrqc;
+ uint64_t hf = rss_conf->rss_hf;
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
+ FM10K_RSSRK_ENTRIES_PER_REG)
+ return -EINVAL;
+
+ if (hf == 0)
+ return -EINVAL;
+
+ mrqc = 0;
+ mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
+ mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
+ mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
+ mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
+ mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
+ mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
+ mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
+ mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
+ mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
+
+ /* If the mapping doesn't fit any supported, return */
+ if (mrqc == 0)
+ return -EINVAL;
+
+ if (key != NULL)
+ for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
+ FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
+
+ FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
+
+ return 0;
+}
+
+static int
+fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t *key = (uint32_t *)rss_conf->rss_key;
+ uint32_t mrqc;
+ uint64_t hf;
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
+ FM10K_RSSRK_ENTRIES_PER_REG)
+ return -EINVAL;
+
+ if (key != NULL)
+ for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
+ key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
+
+ mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
+ hf = 0;
+ hf |= (mrqc & FM10K_MRQC_IPV4) ? ETH_RSS_IPV4 : 0;
+ hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6 : 0;
+ hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6_EX : 0;
+ hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
+ hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
+ hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX : 0;
+ hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
+ hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
+ hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX : 0;
+
+ rss_conf->rss_hf = hf;
+
+ return 0;
+}
+
+static void
+fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
+
+ /* Bind all local non-queue interrupt to vector 0 */
+ int_map |= 0;
+
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map);
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map);
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map);
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map);
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map);
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map);
+
+ /* Enable misc causes */
+ FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
+ FM10K_EIMR_ENABLE(THI_FAULT) |
+ FM10K_EIMR_ENABLE(FUM_FAULT) |
+ FM10K_EIMR_ENABLE(MAILBOX) |
+ FM10K_EIMR_ENABLE(SWITCHREADY) |
+ FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
+ FM10K_EIMR_ENABLE(SRAMERROR) |
+ FM10K_EIMR_ENABLE(VFLR));
+
+ /* Enable ITR 0 */
+ FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
+ FM10K_ITR_MASK_CLEAR);
+ FM10K_WRITE_FLUSH(hw);
+}
+
+static void
+fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t int_map = FM10K_INT_MAP_DISABLE;
+
+ int_map |= 0;
+
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map);
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map);
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map);
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map);
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map);
+ FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map);
+
+ /* Disable misc causes */
+ FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) |
+ FM10K_EIMR_DISABLE(THI_FAULT) |
+ FM10K_EIMR_DISABLE(FUM_FAULT) |
+ FM10K_EIMR_DISABLE(MAILBOX) |
+ FM10K_EIMR_DISABLE(SWITCHREADY) |
+ FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
+ FM10K_EIMR_DISABLE(SRAMERROR) |
+ FM10K_EIMR_DISABLE(VFLR));
+
+ /* Disable ITR 0 */
+ FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET);
+ FM10K_WRITE_FLUSH(hw);
+}
+
+static void
+fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
+
+ /* Bind all local non-queue interrupt to vector 0 */
+ int_map |= 0;
+
+ /* Only INT 0 available, other 15 are reserved. */
+ FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
+
+ /* Enable ITR 0 */
+ FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
+ FM10K_ITR_MASK_CLEAR);
+ FM10K_WRITE_FLUSH(hw);
+}
+
+static void
+fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t int_map = FM10K_INT_MAP_DISABLE;
+
+ int_map |= 0;
+
+ /* Only INT 0 available, other 15 are reserved. */
+ FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
+
+ /* Disable ITR 0 */
+ FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET);
+ FM10K_WRITE_FLUSH(hw);
+}
+
+static int
+fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
+{
+ struct fm10k_fault fault;
+ int err;
+ const char *estr = "Unknown error";
+
+ /* Process PCA fault */
+ if (eicr & FM10K_EICR_PCA_FAULT) {
+ err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
+ if (err)
+ goto error;
+ switch (fault.type) {
+ case PCA_NO_FAULT:
+ estr = "PCA_NO_FAULT"; break;
+ case PCA_UNMAPPED_ADDR:
+ estr = "PCA_UNMAPPED_ADDR"; break;
+ case PCA_BAD_QACCESS_PF:
+ estr = "PCA_BAD_QACCESS_PF"; break;
+ case PCA_BAD_QACCESS_VF:
+ estr = "PCA_BAD_QACCESS_VF"; break;
+ case PCA_MALICIOUS_REQ:
+ estr = "PCA_MALICIOUS_REQ"; break;
+ case PCA_POISONED_TLP:
+ estr = "PCA_POISONED_TLP"; break;
+ case PCA_TLP_ABORT:
+ estr = "PCA_TLP_ABORT"; break;
+ default:
+ goto error;
+ }
+ PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
+ estr, fault.func ? "VF" : "PF", fault.func,
+ fault.address, fault.specinfo);
+ }
+
+ /* Process THI fault */
+ if (eicr & FM10K_EICR_THI_FAULT) {
+ err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
+ if (err)
+ goto error;
+ switch (fault.type) {
+ case THI_NO_FAULT:
+ estr = "THI_NO_FAULT"; break;
+ case THI_MAL_DIS_Q_FAULT:
+ estr = "THI_MAL_DIS_Q_FAULT"; break;
+ default:
+ goto error;
+ }
+ PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
+ estr, fault.func ? "VF" : "PF", fault.func,
+ fault.address, fault.specinfo);
+ }
+
+ /* Process FUM fault */
+ if (eicr & FM10K_EICR_FUM_FAULT) {
+ err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
+ if (err)
+ goto error;
+ switch (fault.type) {
+ case FUM_NO_FAULT:
+ estr = "FUM_NO_FAULT"; break;
+ case FUM_UNMAPPED_ADDR:
+ estr = "FUM_UNMAPPED_ADDR"; break;
+ case FUM_POISONED_TLP:
+ estr = "FUM_POISONED_TLP"; break;
+ case FUM_BAD_VF_QACCESS:
+ estr = "FUM_BAD_VF_QACCESS"; break;
+ case FUM_ADD_DECODE_ERR:
+ estr = "FUM_ADD_DECODE_ERR"; break;
+ case FUM_RO_ERROR:
+ estr = "FUM_RO_ERROR"; break;
+ case FUM_QPRC_CRC_ERROR:
+ estr = "FUM_QPRC_CRC_ERROR"; break;
+ case FUM_CSR_TIMEOUT:
+ estr = "FUM_CSR_TIMEOUT"; break;
+ case FUM_INVALID_TYPE:
+ estr = "FUM_INVALID_TYPE"; break;
+ case FUM_INVALID_LENGTH:
+ estr = "FUM_INVALID_LENGTH"; break;
+ case FUM_INVALID_BE:
+ estr = "FUM_INVALID_BE"; break;
+ case FUM_INVALID_ALIGN:
+ estr = "FUM_INVALID_ALIGN"; break;
+ default:
+ goto error;
+ }
+ PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
+ estr, fault.func ? "VF" : "PF", fault.func,
+ fault.address, fault.specinfo);
+ }
+
+ return 0;
+error:
+ PMD_INIT_LOG(ERR, "Failed to handle fault event.");
+ return err;
+}
+
+/**
+ * PF interrupt handler triggered by NIC for handling specific interrupt.
+ *
+ * @param handle
+ * Pointer to interrupt handle.
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ * void
+ */
+static void
+fm10k_dev_interrupt_handler_pf(
+ __rte_unused struct rte_intr_handle *handle,
+ void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t cause, status;
+
+ if (hw->mac.type != fm10k_mac_pf)
+ return;
+
+ cause = FM10K_READ_REG(hw, FM10K_EICR);
+
+ /* Handle PCI fault cases */
+ if (cause & FM10K_EICR_FAULT_MASK) {
+ PMD_INIT_LOG(ERR, "INT: find fault!");
+ fm10k_dev_handle_fault(hw, cause);
+ }
+
+ /* Handle switch up/down */
+ if (cause & FM10K_EICR_SWITCHNOTREADY)
+ PMD_INIT_LOG(ERR, "INT: Switch is not ready");
+
+ if (cause & FM10K_EICR_SWITCHREADY)
+ PMD_INIT_LOG(INFO, "INT: Switch is ready");
+
+ /* Handle mailbox message */
+ fm10k_mbx_lock(hw);
+ hw->mbx.ops.process(hw, &hw->mbx);
+ fm10k_mbx_unlock(hw);
+
+ /* Handle SRAM error */
+ if (cause & FM10K_EICR_SRAMERROR) {
+ PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
+
+ status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
+ /* Write to clear pending bits */
+ FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
+
+ /* Todo: print out error message after shared code updates */
+ }
+
+ /* Clear these 3 events if having any */
+ cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
+ FM10K_EICR_SWITCHREADY;
+ if (cause)
+ FM10K_WRITE_REG(hw, FM10K_EICR, cause);
+
+ /* Re-enable interrupt from device side */
+ FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
+ FM10K_ITR_MASK_CLEAR);
+ /* Re-enable interrupt from host side */
+ rte_intr_enable(&(dev->pci_dev->intr_handle));
+}
+
+/**
+ * VF interrupt handler triggered by NIC for handling specific interrupt.
+ *
+ * @param handle
+ * Pointer to interrupt handle.
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ * void
+ */
+static void
+fm10k_dev_interrupt_handler_vf(
+ __rte_unused struct rte_intr_handle *handle,
+ void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (hw->mac.type != fm10k_mac_vf)
+ return;
+
+ /* Handle mailbox message if lock is acquired */
+ fm10k_mbx_lock(hw);
+ hw->mbx.ops.process(hw, &hw->mbx);
+ fm10k_mbx_unlock(hw);
+
+ /* Re-enable interrupt from device side */
+ FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
+ FM10K_ITR_MASK_CLEAR);
+ /* Re-enable interrupt from host side */
+ rte_intr_enable(&(dev->pci_dev->intr_handle));
+}
+
+/* Mailbox message handler in VF */
+static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
+ FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
+ FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
+ FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
+ FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
+};
+
+/* Mailbox message handler in PF */
+static const struct fm10k_msg_data fm10k_msgdata_pf[] = {
+ FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
+ FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
+ FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
+ FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
+ FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
+ FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
+ FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
+};
+
+static int
+fm10k_setup_mbx_service(struct fm10k_hw *hw)
+{
+ int err;
+
+ /* Initialize mailbox lock */
+ fm10k_mbx_initlock(hw);
+
+ /* Replace default message handler with new ones */
+ if (hw->mac.type == fm10k_mac_pf)
+ err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_pf);
+ else
+ err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
+
+ if (err) {
+ PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
+ err);
+ return err;
+ }
+ /* Connect to SM for PF device or PF for VF device */
+ return hw->mbx.ops.connect(hw, &hw->mbx);
+}
+
+static void
+fm10k_close_mbx_service(struct fm10k_hw *hw)
+{
+ /* Disconnect from SM for PF device or PF for VF device */
+ hw->mbx.ops.disconnect(hw, &hw->mbx);
+}
+
+static const struct eth_dev_ops fm10k_eth_dev_ops = {
+ .dev_configure = fm10k_dev_configure,
+ .dev_start = fm10k_dev_start,
+ .dev_stop = fm10k_dev_stop,
+ .dev_close = fm10k_dev_close,
+ .promiscuous_enable = fm10k_dev_promiscuous_enable,
+ .promiscuous_disable = fm10k_dev_promiscuous_disable,
+ .allmulticast_enable = fm10k_dev_allmulticast_enable,
+ .allmulticast_disable = fm10k_dev_allmulticast_disable,
+ .stats_get = fm10k_stats_get,
+ .xstats_get = fm10k_xstats_get,
+ .stats_reset = fm10k_stats_reset,
+ .xstats_reset = fm10k_stats_reset,
+ .link_update = fm10k_link_update,
+ .dev_infos_get = fm10k_dev_infos_get,
+ .vlan_filter_set = fm10k_vlan_filter_set,
+ .vlan_offload_set = fm10k_vlan_offload_set,
+ .mac_addr_add = fm10k_macaddr_add,
+ .mac_addr_remove = fm10k_macaddr_remove,
+ .rx_queue_start = fm10k_dev_rx_queue_start,
+ .rx_queue_stop = fm10k_dev_rx_queue_stop,
+ .tx_queue_start = fm10k_dev_tx_queue_start,
+ .tx_queue_stop = fm10k_dev_tx_queue_stop,
+ .rx_queue_setup = fm10k_rx_queue_setup,
+ .rx_queue_release = fm10k_rx_queue_release,
+ .tx_queue_setup = fm10k_tx_queue_setup,
+ .tx_queue_release = fm10k_tx_queue_release,
+ .reta_update = fm10k_reta_update,
+ .reta_query = fm10k_reta_query,
+ .rss_hash_update = fm10k_rss_hash_update,
+ .rss_hash_conf_get = fm10k_rss_hash_conf_get,
+};
+
+static void __attribute__((cold))
+fm10k_set_tx_function(struct rte_eth_dev *dev)
+{
+ struct fm10k_tx_queue *txq;
+ int i;
+ int use_sse = 1;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ /* Check if Vector Tx is satisfied */
+ if (fm10k_tx_vec_condition_check(txq)) {
+ use_sse = 0;
+ break;
+ }
+ }
+
+ if (use_sse) {
+ PMD_INIT_LOG(DEBUG, "Use vector Tx func");
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ fm10k_txq_vec_setup(txq);
+ }
+ dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
+ } else {
+ dev->tx_pkt_burst = fm10k_xmit_pkts;
+ PMD_INIT_LOG(DEBUG, "Use regular Tx func");
+ }
+}
+
+static void __attribute__((cold))
+fm10k_set_rx_function(struct rte_eth_dev *dev)
+{
+ struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev);
+ uint16_t i, rx_using_sse;
+
+ /* In order to allow Vector Rx there are a few configuration
+ * conditions to be met.
+ */
+ if (!fm10k_rx_vec_condition_check(dev) && dev_info->rx_vec_allowed) {
+ if (dev->data->scattered_rx)
+ dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec;
+ else
+ dev->rx_pkt_burst = fm10k_recv_pkts_vec;
+ } else if (dev->data->scattered_rx)
+ dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
+ else
+ dev->rx_pkt_burst = fm10k_recv_pkts;
+
+ rx_using_sse =
+ (dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec ||
+ dev->rx_pkt_burst == fm10k_recv_pkts_vec);
+
+ if (rx_using_sse)
+ PMD_INIT_LOG(DEBUG, "Use vector Rx func");
+ else
+ PMD_INIT_LOG(DEBUG, "Use regular Rx func");
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct fm10k_rx_queue *rxq = dev->data->rx_queues[i];
+
+ rxq->rx_using_sse = rx_using_sse;
+ }
+}
+
+static void
+fm10k_params_init(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct fm10k_dev_info *info = FM10K_DEV_PRIVATE_TO_INFO(dev);
+
+ /* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
+ * there is no way to get link status without reading BAR4. Until this
+ * works, assume we have maximum bandwidth.
+ * @todo - fix bus info
+ */
+ hw->bus_caps.speed = fm10k_bus_speed_8000;
+ hw->bus_caps.width = fm10k_bus_width_pcie_x8;
+ hw->bus_caps.payload = fm10k_bus_payload_512;
+ hw->bus.speed = fm10k_bus_speed_8000;
+ hw->bus.width = fm10k_bus_width_pcie_x8;
+ hw->bus.payload = fm10k_bus_payload_256;
+
+ info->rx_vec_allowed = true;
+}
+
+static int
+eth_fm10k_dev_init(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int diag;
+ struct fm10k_macvlan_filter_info *macvlan;
+
+ PMD_INIT_FUNC_TRACE();
+
+ dev->dev_ops = &fm10k_eth_dev_ops;
+ dev->rx_pkt_burst = &fm10k_recv_pkts;
+ dev->tx_pkt_burst = &fm10k_xmit_pkts;
+
+ /* only initialize in the primary process */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ rte_eth_copy_pci_info(dev, dev->pci_dev);
+
+ macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
+ memset(macvlan, 0, sizeof(*macvlan));
+ /* Vendor and Device ID need to be set before init of shared code */
+ memset(hw, 0, sizeof(*hw));
+ hw->device_id = dev->pci_dev->id.device_id;
+ hw->vendor_id = dev->pci_dev->id.vendor_id;
+ hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
+ hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
+ hw->revision_id = 0;
+ hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr;
+ if (hw->hw_addr == NULL) {
+ PMD_INIT_LOG(ERR, "Bad mem resource."
+ " Try to blacklist unused devices.");
+ return -EIO;
+ }
+
+ /* Store fm10k_adapter pointer */
+ hw->back = dev->data->dev_private;
+
+ /* Initialize the shared code */
+ diag = fm10k_init_shared_code(hw);
+ if (diag != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
+ return -EIO;
+ }
+
+ /* Initialize parameters */
+ fm10k_params_init(dev);
+
+ /* Initialize the hw */
+ diag = fm10k_init_hw(hw);
+ if (diag != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
+ return -EIO;
+ }
+
+ /* Initialize MAC address(es) */
+ dev->data->mac_addrs = rte_zmalloc("fm10k",
+ ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
+ if (dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
+ return -ENOMEM;
+ }
+
+ diag = fm10k_read_mac_addr(hw);
+
+ ether_addr_copy((const struct ether_addr *)hw->mac.addr,
+ &dev->data->mac_addrs[0]);
+
+ if (diag != FM10K_SUCCESS ||
+ !is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
+
+ /* Generate a random addr */
+ eth_random_addr(hw->mac.addr);
+ memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
+ ether_addr_copy((const struct ether_addr *)hw->mac.addr,
+ &dev->data->mac_addrs[0]);
+ }
+
+ /* Reset the hw statistics */
+ fm10k_stats_reset(dev);
+
+ /* Reset the hw */
+ diag = fm10k_reset_hw(hw);
+ if (diag != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
+ return -EIO;
+ }
+
+ /* Setup mailbox service */
+ diag = fm10k_setup_mbx_service(hw);
+ if (diag != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
+ return -EIO;
+ }
+
+ /*PF/VF has different interrupt handling mechanism */
+ if (hw->mac.type == fm10k_mac_pf) {
+ /* register callback func to eal lib */
+ rte_intr_callback_register(&(dev->pci_dev->intr_handle),
+ fm10k_dev_interrupt_handler_pf, (void *)dev);
+
+ /* enable MISC interrupt */
+ fm10k_dev_enable_intr_pf(dev);
+ } else { /* VF */
+ rte_intr_callback_register(&(dev->pci_dev->intr_handle),
+ fm10k_dev_interrupt_handler_vf, (void *)dev);
+
+ fm10k_dev_enable_intr_vf(dev);
+ }
+
+ /* Enable uio intr after callback registered */
+ rte_intr_enable(&(dev->pci_dev->intr_handle));
+
+ hw->mac.ops.update_int_moderator(hw);
+
+ /* Make sure Switch Manager is ready before going forward. */
+ if (hw->mac.type == fm10k_mac_pf) {
+ int switch_ready = 0;
+ int i;
+
+ for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
+ fm10k_mbx_lock(hw);
+ hw->mac.ops.get_host_state(hw, &switch_ready);
+ fm10k_mbx_unlock(hw);
+ if (switch_ready)
+ break;
+ /* Delay some time to acquire async LPORT_MAP info. */
+ rte_delay_us(WAIT_SWITCH_MSG_US);
+ }
+
+ if (switch_ready == 0) {
+ PMD_INIT_LOG(ERR, "switch is not ready");
+ return -1;
+ }
+ }
+
+ /*
+ * Below function will trigger operations on mailbox, acquire lock to
+ * avoid race condition from interrupt handler. Operations on mailbox
+ * FIFO will trigger interrupt to PF/SM, in which interrupt handler
+ * will handle and generate an interrupt to our side. Then, FIFO in
+ * mailbox will be touched.
+ */
+ fm10k_mbx_lock(hw);
+ /* Enable port first */
+ hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map, 1, 1);
+
+ /* Set unicast mode by default. App can change to other mode in other
+ * API func.
+ */
+ hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
+ FM10K_XCAST_MODE_NONE);
+
+ fm10k_mbx_unlock(hw);
+
+ /* Add default mac address */
+ fm10k_MAC_filter_set(dev, hw->mac.addr, true,
+ MAIN_VSI_POOL_NUMBER);
+
+ return 0;
+}
+
+static int
+eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* only uninitialize in the primary process */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ /* safe to close dev here */
+ fm10k_dev_close(dev);
+
+ dev->dev_ops = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+
+ /* disable uio/vfio intr */
+ rte_intr_disable(&(dev->pci_dev->intr_handle));
+
+ /*PF/VF has different interrupt handling mechanism */
+ if (hw->mac.type == fm10k_mac_pf) {
+ /* disable interrupt */
+ fm10k_dev_disable_intr_pf(dev);
+
+ /* unregister callback func to eal lib */
+ rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
+ fm10k_dev_interrupt_handler_pf, (void *)dev);
+ } else {
+ /* disable interrupt */
+ fm10k_dev_disable_intr_vf(dev);
+
+ rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
+ fm10k_dev_interrupt_handler_vf, (void *)dev);
+ }
+
+ /* free mac memory */
+ if (dev->data->mac_addrs) {
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+ }
+
+ memset(hw, 0, sizeof(*hw));
+
+ return 0;
+}
+
+/*
+ * The set of PCI devices this driver supports. This driver will enable both PF
+ * and SRIOV-VF devices.
+ */
+static const struct rte_pci_id pci_id_fm10k_map[] = {
+#define RTE_PCI_DEV_ID_DECL_FM10K(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
+#define RTE_PCI_DEV_ID_DECL_FM10KVF(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
+#include "rte_pci_dev_ids.h"
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+static struct eth_driver rte_pmd_fm10k = {
+ .pci_drv = {
+ .name = "rte_pmd_fm10k",
+ .id_table = pci_id_fm10k_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
+ },
+ .eth_dev_init = eth_fm10k_dev_init,
+ .eth_dev_uninit = eth_fm10k_dev_uninit,
+ .dev_private_size = sizeof(struct fm10k_adapter),
+};
+
+/*
+ * Driver initialization routine.
+ * Invoked once at EAL init time.
+ * Register itself as the [Poll Mode] Driver of PCI FM10K devices.
+ */
+static int
+rte_pmd_fm10k_init(__rte_unused const char *name,
+ __rte_unused const char *params)
+{
+ PMD_INIT_FUNC_TRACE();
+ rte_eth_driver_register(&rte_pmd_fm10k);
+ return 0;
+}
+
+static struct rte_driver rte_fm10k_driver = {
+ .type = PMD_PDEV,
+ .init = rte_pmd_fm10k_init,
+};
+
+PMD_REGISTER_DRIVER(rte_fm10k_driver);
diff --git a/src/dpdk_lib18/librte_power/channel_commands.h b/src/dpdk22/drivers/net/fm10k/fm10k_logs.h
index 7e78a8b3..31384c9e 100755..100644
--- a/src/dpdk_lib18/librte_power/channel_commands.h
+++ b/src/dpdk22/drivers/net/fm10k/fm10k_logs.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,47 +31,49 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef CHANNEL_COMMANDS_H_
-#define CHANNEL_COMMANDS_H_
+#ifndef _FM10K_LOGS_H_
+#define _FM10K_LOGS_H_
-#ifdef __cplusplus
-extern "C" {
-#endif
+#include <rte_log.h>
-#include <stdint.h>
+#define PMD_INIT_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args)
-/* Maximum number of CPUs */
-#define CHANNEL_CMDS_MAX_CPUS 64
-#if CHANNEL_CMDS_MAX_CPUS > 64
-#error Maximum number of cores is 64, overflow is guaranteed to \
- cause problems with VM Power Management
+#ifdef RTE_LIBRTE_FM10K_DEBUG_INIT
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+#else
+#define PMD_INIT_FUNC_TRACE() do { } while (0)
#endif
-/* Maximum number of channels per VM */
-#define CHANNEL_CMDS_MAX_VM_CHANNELS 64
-
-/* Maximum number of channels per VM */
-#define CHANNEL_CMDS_MAX_VM_CHANNELS 64
-
-/* Valid Commands */
-#define CPU_POWER 1
-#define CPU_POWER_CONNECT 2
-
-/* CPU Power Command Scaling */
-#define CPU_POWER_SCALE_UP 1
-#define CPU_POWER_SCALE_DOWN 2
-#define CPU_POWER_SCALE_MAX 3
-#define CPU_POWER_SCALE_MIN 4
+#ifdef RTE_LIBRTE_FM10K_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
+#endif
-struct channel_packet {
- uint64_t resource_id; /**< core_num, device */
- uint32_t unit; /**< scale down/up/min/max */
- uint32_t command; /**< Power, IO, etc */
-};
+#ifdef RTE_LIBRTE_FM10K_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#endif
+#ifdef RTE_LIBRTE_FM10K_DEBUG_TX_FREE
+#define PMD_TX_FREE_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0)
+#endif
-#ifdef __cplusplus
-}
+#ifdef RTE_LIBRTE_FM10K_DEBUG_DRIVER
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
+#else
+#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
#endif
-#endif /* CHANNEL_COMMANDS_H_ */
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+#endif /* _FM10K_LOGS_H_ */
diff --git a/src/dpdk22/drivers/net/fm10k/fm10k_rxtx.c b/src/dpdk22/drivers/net/fm10k/fm10k_rxtx.c
new file mode 100644
index 00000000..e9588659
--- /dev/null
+++ b/src/dpdk22/drivers/net/fm10k/fm10k_rxtx.c
@@ -0,0 +1,517 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+
+#include <rte_ethdev.h>
+#include <rte_common.h>
+#include "fm10k.h"
+#include "base/fm10k_type.h"
+
+#ifdef RTE_PMD_PACKET_PREFETCH
+#define rte_packet_prefetch(p) rte_prefetch1(p)
+#else
+#define rte_packet_prefetch(p) do {} while (0)
+#endif
+
+#ifdef RTE_LIBRTE_FM10K_DEBUG_RX
+static inline void dump_rxd(union fm10k_rx_desc *rxd)
+{
+ PMD_RX_LOG(DEBUG, "+----------------|----------------+");
+ PMD_RX_LOG(DEBUG, "| GLORT | PKT HDR & TYPE |");
+ PMD_RX_LOG(DEBUG, "| 0x%08x | 0x%08x |", rxd->d.glort,
+ rxd->d.data);
+ PMD_RX_LOG(DEBUG, "+----------------|----------------+");
+ PMD_RX_LOG(DEBUG, "| VLAN & LEN | STATUS |");
+ PMD_RX_LOG(DEBUG, "| 0x%08x | 0x%08x |", rxd->d.vlan_len,
+ rxd->d.staterr);
+ PMD_RX_LOG(DEBUG, "+----------------|----------------+");
+ PMD_RX_LOG(DEBUG, "| RESERVED | RSS_HASH |");
+ PMD_RX_LOG(DEBUG, "| 0x%08x | 0x%08x |", 0, rxd->d.rss);
+ PMD_RX_LOG(DEBUG, "+----------------|----------------+");
+ PMD_RX_LOG(DEBUG, "| TIME TAG |");
+ PMD_RX_LOG(DEBUG, "| 0x%016"PRIx64" |", rxd->q.timestamp);
+ PMD_RX_LOG(DEBUG, "+----------------|----------------+");
+}
+#endif
+
+static inline void
+rx_desc_to_ol_flags(struct rte_mbuf *m, const union fm10k_rx_desc *d)
+{
+ static const uint32_t
+ ptype_table[FM10K_RXD_PKTTYPE_MASK >> FM10K_RXD_PKTTYPE_SHIFT]
+ __rte_cache_aligned = {
+ [FM10K_PKTTYPE_OTHER] = RTE_PTYPE_L2_ETHER,
+ [FM10K_PKTTYPE_IPV4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
+ [FM10K_PKTTYPE_IPV4_EX] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT,
+ [FM10K_PKTTYPE_IPV6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6,
+ [FM10K_PKTTYPE_IPV6_EX] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT,
+ [FM10K_PKTTYPE_IPV4 | FM10K_PKTTYPE_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+ [FM10K_PKTTYPE_IPV6 | FM10K_PKTTYPE_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+ [FM10K_PKTTYPE_IPV4 | FM10K_PKTTYPE_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+ [FM10K_PKTTYPE_IPV6 | FM10K_PKTTYPE_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+ };
+
+ m->packet_type = ptype_table[(d->w.pkt_info & FM10K_RXD_PKTTYPE_MASK)
+ >> FM10K_RXD_PKTTYPE_SHIFT];
+
+ if (d->w.pkt_info & FM10K_RXD_RSSTYPE_MASK)
+ m->ol_flags |= PKT_RX_RSS_HASH;
+
+ if (unlikely((d->d.staterr &
+ (FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE)) ==
+ (FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE)))
+ m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+
+ if (unlikely((d->d.staterr &
+ (FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E)) ==
+ (FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E)))
+ m->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+
+ if (unlikely(d->d.staterr & FM10K_RXD_STATUS_HBO))
+ m->ol_flags |= PKT_RX_HBUF_OVERFLOW;
+
+ if (unlikely(d->d.staterr & FM10K_RXD_STATUS_RXE))
+ m->ol_flags |= PKT_RX_RECIP_ERR;
+}
+
+uint16_t
+fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct rte_mbuf *mbuf;
+ union fm10k_rx_desc desc;
+ struct fm10k_rx_queue *q = rx_queue;
+ uint16_t count = 0;
+ int alloc = 0;
+ uint16_t next_dd;
+ int ret;
+
+ next_dd = q->next_dd;
+
+ nb_pkts = RTE_MIN(nb_pkts, q->alloc_thresh);
+ for (count = 0; count < nb_pkts; ++count) {
+ mbuf = q->sw_ring[next_dd];
+ desc = q->hw_ring[next_dd];
+ if (!(desc.d.staterr & FM10K_RXD_STATUS_DD))
+ break;
+#ifdef RTE_LIBRTE_FM10K_DEBUG_RX
+ dump_rxd(&desc);
+#endif
+ rte_pktmbuf_pkt_len(mbuf) = desc.w.length;
+ rte_pktmbuf_data_len(mbuf) = desc.w.length;
+
+ mbuf->ol_flags = 0;
+#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
+ rx_desc_to_ol_flags(mbuf, &desc);
+#endif
+
+ mbuf->hash.rss = desc.d.rss;
+ /**
+ * Packets in fm10k device always carry at least one VLAN tag.
+ * For those packets coming in without VLAN tag,
+ * the port default VLAN tag will be used.
+ * So, always PKT_RX_VLAN_PKT flag is set and vlan_tci
+ * is valid for each RX packet's mbuf.
+ */
+ mbuf->ol_flags |= PKT_RX_VLAN_PKT;
+ mbuf->vlan_tci = desc.w.vlan;
+
+ rx_pkts[count] = mbuf;
+ if (++next_dd == q->nb_desc) {
+ next_dd = 0;
+ alloc = 1;
+ }
+
+ /* Prefetch next mbuf while processing current one. */
+ rte_prefetch0(q->sw_ring[next_dd]);
+
+ /*
+ * When next RX descriptor is on a cache-line boundary,
+ * prefetch the next 4 RX descriptors and the next 8 pointers
+ * to mbufs.
+ */
+ if ((next_dd & 0x3) == 0) {
+ rte_prefetch0(&q->hw_ring[next_dd]);
+ rte_prefetch0(&q->sw_ring[next_dd]);
+ }
+ }
+
+ q->next_dd = next_dd;
+
+ if ((q->next_dd > q->next_trigger) || (alloc == 1)) {
+ ret = rte_mempool_get_bulk(q->mp,
+ (void **)&q->sw_ring[q->next_alloc],
+ q->alloc_thresh);
+
+ if (unlikely(ret != 0)) {
+ uint8_t port = q->port_id;
+ PMD_RX_LOG(ERR, "Failed to alloc mbuf");
+ /*
+ * Need to restore next_dd if we cannot allocate new
+ * buffers to replenish the old ones.
+ */
+ q->next_dd = (q->next_dd + q->nb_desc - count) %
+ q->nb_desc;
+ rte_eth_devices[port].data->rx_mbuf_alloc_failed++;
+ return 0;
+ }
+
+ for (; q->next_alloc <= q->next_trigger; ++q->next_alloc) {
+ mbuf = q->sw_ring[q->next_alloc];
+
+ /* setup static mbuf fields */
+ fm10k_pktmbuf_reset(mbuf, q->port_id);
+
+ /* write descriptor */
+ desc.q.pkt_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
+ desc.q.hdr_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
+ q->hw_ring[q->next_alloc] = desc;
+ }
+ FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_trigger);
+ q->next_trigger += q->alloc_thresh;
+ if (q->next_trigger >= q->nb_desc) {
+ q->next_trigger = q->alloc_thresh - 1;
+ q->next_alloc = 0;
+ }
+ }
+
+ return count;
+}
+
+uint16_t
+fm10k_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct rte_mbuf *mbuf;
+ union fm10k_rx_desc desc;
+ struct fm10k_rx_queue *q = rx_queue;
+ uint16_t count = 0;
+ uint16_t nb_rcv, nb_seg;
+ int alloc = 0;
+ uint16_t next_dd;
+ struct rte_mbuf *first_seg = q->pkt_first_seg;
+ struct rte_mbuf *last_seg = q->pkt_last_seg;
+ int ret;
+
+ next_dd = q->next_dd;
+ nb_rcv = 0;
+
+ nb_seg = RTE_MIN(nb_pkts, q->alloc_thresh);
+ for (count = 0; count < nb_seg; count++) {
+ mbuf = q->sw_ring[next_dd];
+ desc = q->hw_ring[next_dd];
+ if (!(desc.d.staterr & FM10K_RXD_STATUS_DD))
+ break;
+#ifdef RTE_LIBRTE_FM10K_DEBUG_RX
+ dump_rxd(&desc);
+#endif
+
+ if (++next_dd == q->nb_desc) {
+ next_dd = 0;
+ alloc = 1;
+ }
+
+ /* Prefetch next mbuf while processing current one. */
+ rte_prefetch0(q->sw_ring[next_dd]);
+
+ /*
+ * When next RX descriptor is on a cache-line boundary,
+ * prefetch the next 4 RX descriptors and the next 8 pointers
+ * to mbufs.
+ */
+ if ((next_dd & 0x3) == 0) {
+ rte_prefetch0(&q->hw_ring[next_dd]);
+ rte_prefetch0(&q->sw_ring[next_dd]);
+ }
+
+ /* Fill data length */
+ rte_pktmbuf_data_len(mbuf) = desc.w.length;
+
+ /*
+ * If this is the first buffer of the received packet,
+ * set the pointer to the first mbuf of the packet and
+ * initialize its context.
+ * Otherwise, update the total length and the number of segments
+ * of the current scattered packet, and update the pointer to
+ * the last mbuf of the current packet.
+ */
+ if (!first_seg) {
+ first_seg = mbuf;
+ first_seg->pkt_len = desc.w.length;
+ } else {
+ first_seg->pkt_len =
+ (uint16_t)(first_seg->pkt_len +
+ rte_pktmbuf_data_len(mbuf));
+ first_seg->nb_segs++;
+ last_seg->next = mbuf;
+ }
+
+ /*
+ * If this is not the last buffer of the received packet,
+ * update the pointer to the last mbuf of the current scattered
+ * packet and continue to parse the RX ring.
+ */
+ if (!(desc.d.staterr & FM10K_RXD_STATUS_EOP)) {
+ last_seg = mbuf;
+ continue;
+ }
+
+ first_seg->ol_flags = 0;
+#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
+ rx_desc_to_ol_flags(first_seg, &desc);
+#endif
+ first_seg->hash.rss = desc.d.rss;
+ /**
+ * Packets in fm10k device always carry at least one VLAN tag.
+ * For those packets coming in without VLAN tag,
+ * the port default VLAN tag will be used.
+ * So, always PKT_RX_VLAN_PKT flag is set and vlan_tci
+ * is valid for each RX packet's mbuf.
+ */
+ mbuf->ol_flags |= PKT_RX_VLAN_PKT;
+ first_seg->vlan_tci = desc.w.vlan;
+
+ /* Prefetch data of first segment, if configured to do so. */
+ rte_packet_prefetch((char *)first_seg->buf_addr +
+ first_seg->data_off);
+
+ /*
+ * Store the mbuf address into the next entry of the array
+ * of returned packets.
+ */
+ rx_pkts[nb_rcv++] = first_seg;
+
+ /*
+ * Setup receipt context for a new packet.
+ */
+ first_seg = NULL;
+ }
+
+ q->next_dd = next_dd;
+
+ if ((q->next_dd > q->next_trigger) || (alloc == 1)) {
+ ret = rte_mempool_get_bulk(q->mp,
+ (void **)&q->sw_ring[q->next_alloc],
+ q->alloc_thresh);
+
+ if (unlikely(ret != 0)) {
+ uint8_t port = q->port_id;
+ PMD_RX_LOG(ERR, "Failed to alloc mbuf");
+ /*
+ * Need to restore next_dd if we cannot allocate new
+ * buffers to replenish the old ones.
+ */
+ q->next_dd = (q->next_dd + q->nb_desc - count) %
+ q->nb_desc;
+ rte_eth_devices[port].data->rx_mbuf_alloc_failed++;
+ return 0;
+ }
+
+ for (; q->next_alloc <= q->next_trigger; ++q->next_alloc) {
+ mbuf = q->sw_ring[q->next_alloc];
+
+ /* setup static mbuf fields */
+ fm10k_pktmbuf_reset(mbuf, q->port_id);
+
+ /* write descriptor */
+ desc.q.pkt_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
+ desc.q.hdr_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
+ q->hw_ring[q->next_alloc] = desc;
+ }
+ FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_trigger);
+ q->next_trigger += q->alloc_thresh;
+ if (q->next_trigger >= q->nb_desc) {
+ q->next_trigger = q->alloc_thresh - 1;
+ q->next_alloc = 0;
+ }
+ }
+
+ q->pkt_first_seg = first_seg;
+ q->pkt_last_seg = last_seg;
+
+ return nb_rcv;
+}
+
+static inline void tx_free_descriptors(struct fm10k_tx_queue *q)
+{
+ uint16_t next_rs, count = 0;
+
+ next_rs = fifo_peek(&q->rs_tracker);
+ if (!(q->hw_ring[next_rs].flags & FM10K_TXD_FLAG_DONE))
+ return;
+
+ /* the DONE flag is set on this descriptor so remove the ID
+ * from the RS bit tracker and free the buffers */
+ fifo_remove(&q->rs_tracker);
+
+ /* wrap around? if so, free buffers from last_free up to but NOT
+ * including nb_desc */
+ if (q->last_free > next_rs) {
+ count = q->nb_desc - q->last_free;
+ while (q->last_free < q->nb_desc) {
+ rte_pktmbuf_free_seg(q->sw_ring[q->last_free]);
+ q->sw_ring[q->last_free] = NULL;
+ ++q->last_free;
+ }
+ q->last_free = 0;
+ }
+
+ /* adjust free descriptor count before the next loop */
+ q->nb_free += count + (next_rs + 1 - q->last_free);
+
+ /* free buffers from last_free, up to and including next_rs */
+ while (q->last_free <= next_rs) {
+ rte_pktmbuf_free_seg(q->sw_ring[q->last_free]);
+ q->sw_ring[q->last_free] = NULL;
+ ++q->last_free;
+ }
+
+ if (q->last_free == q->nb_desc)
+ q->last_free = 0;
+}
+
+static inline void tx_xmit_pkt(struct fm10k_tx_queue *q, struct rte_mbuf *mb)
+{
+ uint16_t last_id;
+ uint8_t flags, hdrlen;
+
+ /* always set the LAST flag on the last descriptor used to
+ * transmit the packet */
+ flags = FM10K_TXD_FLAG_LAST;
+ last_id = q->next_free + mb->nb_segs - 1;
+ if (last_id >= q->nb_desc)
+ last_id = last_id - q->nb_desc;
+
+ /* but only set the RS flag on the last descriptor if rs_thresh
+ * descriptors will be used since the RS flag was last set */
+ if ((q->nb_used + mb->nb_segs) >= q->rs_thresh) {
+ flags |= FM10K_TXD_FLAG_RS;
+ fifo_insert(&q->rs_tracker, last_id);
+ q->nb_used = 0;
+ } else {
+ q->nb_used = q->nb_used + mb->nb_segs;
+ }
+
+ q->nb_free -= mb->nb_segs;
+
+ q->hw_ring[q->next_free].flags = 0;
+ /* set checksum flags on first descriptor of packet. SCTP checksum
+ * offload is not supported, but we do not explicitly check for this
+ * case in favor of greatly simplified processing. */
+ if (mb->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK | PKT_TX_TCP_SEG))
+ q->hw_ring[q->next_free].flags |= FM10K_TXD_FLAG_CSUM;
+
+ /* set vlan if requested */
+ if (mb->ol_flags & PKT_TX_VLAN_PKT)
+ q->hw_ring[q->next_free].vlan = mb->vlan_tci;
+
+ q->sw_ring[q->next_free] = mb;
+ q->hw_ring[q->next_free].buffer_addr =
+ rte_cpu_to_le_64(MBUF_DMA_ADDR(mb));
+ q->hw_ring[q->next_free].buflen =
+ rte_cpu_to_le_16(rte_pktmbuf_data_len(mb));
+
+ if (mb->ol_flags & PKT_TX_TCP_SEG) {
+ hdrlen = mb->outer_l2_len + mb->outer_l3_len + mb->l2_len +
+ mb->l3_len + mb->l4_len;
+ if (q->hw_ring[q->next_free].flags & FM10K_TXD_FLAG_FTAG)
+ hdrlen += sizeof(struct fm10k_ftag);
+
+ if (likely((hdrlen >= FM10K_TSO_MIN_HEADERLEN) &&
+ (hdrlen <= FM10K_TSO_MAX_HEADERLEN) &&
+ (mb->tso_segsz >= FM10K_TSO_MINMSS))) {
+ q->hw_ring[q->next_free].mss = mb->tso_segsz;
+ q->hw_ring[q->next_free].hdrlen = hdrlen;
+ }
+ }
+
+ if (++q->next_free == q->nb_desc)
+ q->next_free = 0;
+
+ /* fill up the rings */
+ for (mb = mb->next; mb != NULL; mb = mb->next) {
+ q->sw_ring[q->next_free] = mb;
+ q->hw_ring[q->next_free].buffer_addr =
+ rte_cpu_to_le_64(MBUF_DMA_ADDR(mb));
+ q->hw_ring[q->next_free].buflen =
+ rte_cpu_to_le_16(rte_pktmbuf_data_len(mb));
+ q->hw_ring[q->next_free].flags = 0;
+ if (++q->next_free == q->nb_desc)
+ q->next_free = 0;
+ }
+
+ q->hw_ring[last_id].flags |= flags;
+}
+
+uint16_t
+fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct fm10k_tx_queue *q = tx_queue;
+ struct rte_mbuf *mb;
+ uint16_t count;
+
+ for (count = 0; count < nb_pkts; ++count) {
+ mb = tx_pkts[count];
+
+ /* running low on descriptors? try to free some... */
+ if (q->nb_free < q->free_thresh)
+ tx_free_descriptors(q);
+
+ /* make sure there are enough free descriptors to transmit the
+ * entire packet before doing anything */
+ if (q->nb_free < mb->nb_segs)
+ break;
+
+ /* sanity check to make sure the mbuf is valid */
+ if ((mb->nb_segs == 0) ||
+ ((mb->nb_segs > 1) && (mb->next == NULL)))
+ break;
+
+ /* process the packet */
+ tx_xmit_pkt(q, mb);
+ }
+
+ /* update the tail pointer if any packets were processed */
+ if (likely(count > 0))
+ FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_free);
+
+ return count;
+}
diff --git a/src/dpdk22/drivers/net/fm10k/fm10k_rxtx_vec.c b/src/dpdk22/drivers/net/fm10k/fm10k_rxtx_vec.c
new file mode 100644
index 00000000..2a57eef8
--- /dev/null
+++ b/src/dpdk22/drivers/net/fm10k/fm10k_rxtx_vec.c
@@ -0,0 +1,829 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+
+#include <rte_ethdev.h>
+#include <rte_common.h>
+#include "fm10k.h"
+#include "base/fm10k_type.h"
+
+#include <tmmintrin.h>
+
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+static void
+fm10k_reset_tx_queue(struct fm10k_tx_queue *txq);
+
+/* Handling the offload flags (olflags) field takes computation
+ * time when receiving packets. Therefore we provide a flag to disable
+ * the processing of the olflags field when they are not needed. This
+ * gives improved performance, at the cost of losing the offload info
+ * in the received packet
+ */
+#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
+
+/* Vlan present flag shift */
+#define VP_SHIFT (2)
+/* L3 type shift */
+#define L3TYPE_SHIFT (4)
+/* L4 type shift */
+#define L4TYPE_SHIFT (7)
+
+static inline void
+fm10k_desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
+{
+ __m128i ptype0, ptype1, vtag0, vtag1;
+ union {
+ uint16_t e[4];
+ uint64_t dword;
+ } vol;
+
+ const __m128i pkttype_msk = _mm_set_epi16(
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT,
+ PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT);
+
+ /* mask everything except rss type */
+ const __m128i rsstype_msk = _mm_set_epi16(
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x000F, 0x000F, 0x000F, 0x000F);
+
+ /* map rss type to rss hash flag */
+ const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, PKT_RX_RSS_HASH,
+ PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0,
+ PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0);
+
+ ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
+ ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]);
+ vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]);
+ vtag1 = _mm_unpackhi_epi16(descs[2], descs[3]);
+
+ ptype0 = _mm_unpacklo_epi32(ptype0, ptype1);
+ ptype0 = _mm_and_si128(ptype0, rsstype_msk);
+ ptype0 = _mm_shuffle_epi8(rss_flags, ptype0);
+
+ vtag1 = _mm_unpacklo_epi32(vtag0, vtag1);
+ vtag1 = _mm_srli_epi16(vtag1, VP_SHIFT);
+ vtag1 = _mm_and_si128(vtag1, pkttype_msk);
+
+ vtag1 = _mm_or_si128(ptype0, vtag1);
+ vol.dword = _mm_cvtsi128_si64(vtag1);
+
+ rx_pkts[0]->ol_flags = vol.e[0];
+ rx_pkts[1]->ol_flags = vol.e[1];
+ rx_pkts[2]->ol_flags = vol.e[2];
+ rx_pkts[3]->ol_flags = vol.e[3];
+}
+
+static inline void
+fm10k_desc_to_pktype_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
+{
+ __m128i l3l4type0, l3l4type1, l3type, l4type;
+ union {
+ uint16_t e[4];
+ uint64_t dword;
+ } vol;
+
+ /* L3 pkt type mask Bit4 to Bit6 */
+ const __m128i l3type_msk = _mm_set_epi16(
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0070, 0x0070, 0x0070, 0x0070);
+
+ /* L4 pkt type mask Bit7 to Bit9 */
+ const __m128i l4type_msk = _mm_set_epi16(
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0380, 0x0380, 0x0380, 0x0380);
+
+ /* convert RRC l3 type to mbuf format */
+ const __m128i l3type_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L3_IPV6, RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV4, 0);
+
+ /* Convert RRC l4 type to mbuf format l4type_flags shift-left 8 bits
+ * to fill into8 bits length.
+ */
+ const __m128i l4type_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0,
+ RTE_PTYPE_TUNNEL_GENEVE >> 8,
+ RTE_PTYPE_TUNNEL_NVGRE >> 8,
+ RTE_PTYPE_TUNNEL_VXLAN >> 8,
+ RTE_PTYPE_TUNNEL_GRE >> 8,
+ RTE_PTYPE_L4_UDP >> 8,
+ RTE_PTYPE_L4_TCP >> 8,
+ 0);
+
+ l3l4type0 = _mm_unpacklo_epi16(descs[0], descs[1]);
+ l3l4type1 = _mm_unpacklo_epi16(descs[2], descs[3]);
+ l3l4type0 = _mm_unpacklo_epi32(l3l4type0, l3l4type1);
+
+ l3type = _mm_and_si128(l3l4type0, l3type_msk);
+ l4type = _mm_and_si128(l3l4type0, l4type_msk);
+
+ l3type = _mm_srli_epi16(l3type, L3TYPE_SHIFT);
+ l4type = _mm_srli_epi16(l4type, L4TYPE_SHIFT);
+
+ l3type = _mm_shuffle_epi8(l3type_flags, l3type);
+ /* l4type_flags shift-left for 8 bits, need shift-right back */
+ l4type = _mm_shuffle_epi8(l4type_flags, l4type);
+
+ l4type = _mm_slli_epi16(l4type, 8);
+ l3l4type0 = _mm_or_si128(l3type, l4type);
+ vol.dword = _mm_cvtsi128_si64(l3l4type0);
+
+ rx_pkts[0]->packet_type = vol.e[0];
+ rx_pkts[1]->packet_type = vol.e[1];
+ rx_pkts[2]->packet_type = vol.e[2];
+ rx_pkts[3]->packet_type = vol.e[3];
+}
+#else
+#define fm10k_desc_to_olflags_v(desc, rx_pkts) do {} while (0)
+#define fm10k_desc_to_pktype_v(desc, rx_pkts) do {} while (0)
+#endif
+
+int __attribute__((cold))
+fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)
+{
+#ifndef RTE_LIBRTE_IEEE1588
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+
+#ifndef RTE_FM10K_RX_OLFLAGS_ENABLE
+ /* whithout rx ol_flags, no VP flag report */
+ if (rxmode->hw_vlan_extend != 0)
+ return -1;
+#endif
+
+ /* no fdir support */
+ if (fconf->mode != RTE_FDIR_MODE_NONE)
+ return -1;
+
+ /* - no csum error report support
+ * - no header split support
+ */
+ if (rxmode->hw_ip_checksum == 1 ||
+ rxmode->header_split == 1)
+ return -1;
+
+ return 0;
+#else
+ RTE_SET_USED(dev);
+ return -1;
+#endif
+}
+
+int __attribute__((cold))
+fm10k_rxq_vec_setup(struct fm10k_rx_queue *rxq)
+{
+ uintptr_t p;
+ struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
+
+ mb_def.nb_segs = 1;
+ /* data_off will be ajusted after new mbuf allocated for 512-byte
+ * alignment.
+ */
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.port = rxq->port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ p = (uintptr_t)&mb_def.rearm_data;
+ rxq->mbuf_initializer = *(uint64_t *)p;
+ return 0;
+}
+
+static inline void
+fm10k_rxq_rearm(struct fm10k_rx_queue *rxq)
+{
+ int i;
+ uint16_t rx_id;
+ volatile union fm10k_rx_desc *rxdp;
+ struct rte_mbuf **mb_alloc = &rxq->sw_ring[rxq->rxrearm_start];
+ struct rte_mbuf *mb0, *mb1;
+ __m128i head_off = _mm_set_epi64x(
+ RTE_PKTMBUF_HEADROOM + FM10K_RX_DATABUF_ALIGN - 1,
+ RTE_PKTMBUF_HEADROOM + FM10K_RX_DATABUF_ALIGN - 1);
+ __m128i dma_addr0, dma_addr1;
+ /* Rx buffer need to be aligned with 512 byte */
+ const __m128i hba_msk = _mm_set_epi64x(0,
+ UINT64_MAX - FM10K_RX_DATABUF_ALIGN + 1);
+
+ rxdp = rxq->hw_ring + rxq->rxrearm_start;
+
+ /* Pull 'n' more MBUFs into the software ring */
+ if (rte_mempool_get_bulk(rxq->mp,
+ (void *)mb_alloc,
+ RTE_FM10K_RXQ_REARM_THRESH) < 0) {
+ dma_addr0 = _mm_setzero_si128();
+ /* Clean up all the HW/SW ring content */
+ for (i = 0; i < RTE_FM10K_RXQ_REARM_THRESH; i++) {
+ mb_alloc[i] = &rxq->fake_mbuf;
+ _mm_store_si128((__m128i *)&rxdp[i].q,
+ dma_addr0);
+ }
+
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ RTE_FM10K_RXQ_REARM_THRESH;
+ return;
+ }
+
+ /* Initialize the mbufs in vector, process 2 mbufs in one loop */
+ for (i = 0; i < RTE_FM10K_RXQ_REARM_THRESH; i += 2, mb_alloc += 2) {
+ __m128i vaddr0, vaddr1;
+ uintptr_t p0, p1;
+
+ mb0 = mb_alloc[0];
+ mb1 = mb_alloc[1];
+
+ /* Flush mbuf with pkt template.
+ * Data to be rearmed is 6 bytes long.
+ * Though, RX will overwrite ol_flags that are coming next
+ * anyway. So overwrite whole 8 bytes with one load:
+ * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
+ */
+ p0 = (uintptr_t)&mb0->rearm_data;
+ *(uint64_t *)p0 = rxq->mbuf_initializer;
+ p1 = (uintptr_t)&mb1->rearm_data;
+ *(uint64_t *)p1 = rxq->mbuf_initializer;
+
+ /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+ vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
+ vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
+
+ /* convert pa to dma_addr hdr/data */
+ dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
+ dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
+
+ /* add headroom to pa values */
+ dma_addr0 = _mm_add_epi64(dma_addr0, head_off);
+ dma_addr1 = _mm_add_epi64(dma_addr1, head_off);
+
+ /* Do 512 byte alignment to satisfy HW requirement, in the
+ * meanwhile, set Header Buffer Address to zero.
+ */
+ dma_addr0 = _mm_and_si128(dma_addr0, hba_msk);
+ dma_addr1 = _mm_and_si128(dma_addr1, hba_msk);
+
+ /* flush desc with pa dma_addr */
+ _mm_store_si128((__m128i *)&rxdp++->q, dma_addr0);
+ _mm_store_si128((__m128i *)&rxdp++->q, dma_addr1);
+
+ /* enforce 512B alignment on default Rx virtual addresses */
+ mb0->data_off = (uint16_t)(RTE_PTR_ALIGN((char *)mb0->buf_addr
+ + RTE_PKTMBUF_HEADROOM, FM10K_RX_DATABUF_ALIGN)
+ - (char *)mb0->buf_addr);
+ mb1->data_off = (uint16_t)(RTE_PTR_ALIGN((char *)mb1->buf_addr
+ + RTE_PKTMBUF_HEADROOM, FM10K_RX_DATABUF_ALIGN)
+ - (char *)mb1->buf_addr);
+ }
+
+ rxq->rxrearm_start += RTE_FM10K_RXQ_REARM_THRESH;
+ if (rxq->rxrearm_start >= rxq->nb_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= RTE_FM10K_RXQ_REARM_THRESH;
+
+ rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+ (rxq->nb_desc - 1) : (rxq->rxrearm_start - 1));
+
+ /* Update the tail pointer on the NIC */
+ FM10K_PCI_REG_WRITE(rxq->tail_ptr, rx_id);
+}
+
+void __attribute__((cold))
+fm10k_rx_queue_release_mbufs_vec(struct fm10k_rx_queue *rxq)
+{
+ const unsigned mask = rxq->nb_desc - 1;
+ unsigned i;
+
+ if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_desc)
+ return;
+
+ /* free all mbufs that are valid in the ring */
+ for (i = rxq->next_dd; i != rxq->rxrearm_start; i = (i + 1) & mask)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+ rxq->rxrearm_nb = rxq->nb_desc;
+
+ /* set all entries to NULL */
+ memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_desc);
+}
+
+static inline uint16_t
+fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts, uint8_t *split_packet)
+{
+ volatile union fm10k_rx_desc *rxdp;
+ struct rte_mbuf **mbufp;
+ uint16_t nb_pkts_recd;
+ int pos;
+ struct fm10k_rx_queue *rxq = rx_queue;
+ uint64_t var;
+ __m128i shuf_msk;
+ __m128i dd_check, eop_check;
+ uint16_t next_dd;
+
+ next_dd = rxq->next_dd;
+
+ /* Just the act of getting into the function from the application is
+ * going to cost about 7 cycles
+ */
+ rxdp = rxq->hw_ring + next_dd;
+
+ _mm_prefetch((const void *)rxdp, _MM_HINT_T0);
+
+ /* See if we need to rearm the RX queue - gives the prefetch a bit
+ * of time to act
+ */
+ if (rxq->rxrearm_nb > RTE_FM10K_RXQ_REARM_THRESH)
+ fm10k_rxq_rearm(rxq);
+
+ /* Before we start moving massive data around, check to see if
+ * there is actually a packet available
+ */
+ if (!(rxdp->d.staterr & FM10K_RXD_STATUS_DD))
+ return 0;
+
+ /* Vecotr RX will process 4 packets at a time, strip the unaligned
+ * tails in case it's not multiple of 4.
+ */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_FM10K_DESCS_PER_LOOP);
+
+ /* 4 packets DD mask */
+ dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
+
+ /* 4 packets EOP mask */
+ eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);
+
+ /* mask to shuffle from desc. to mbuf */
+ shuf_msk = _mm_set_epi8(
+ 7, 6, 5, 4, /* octet 4~7, 32bits rss */
+ 15, 14, /* octet 14~15, low 16 bits vlan_macip */
+ 13, 12, /* octet 12~13, 16 bits data_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 13, 12, /* octet 12~13, low 16 bits pkt_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_type */
+ 0xFF, 0xFF /* Skip pkt_type field in shuffle operation */
+ );
+
+ /* Cache is empty -> need to scan the buffer rings, but first move
+ * the next 'n' mbufs into the cache
+ */
+ mbufp = &rxq->sw_ring[next_dd];
+
+ /* A. load 4 packet in one loop
+ * [A*. mask out 4 unused dirty field in desc]
+ * B. copy 4 mbuf point from swring to rx_pkts
+ * C. calc the number of DD bits among the 4 packets
+ * [C*. extract the end-of-packet bit, if requested]
+ * D. fill info. from desc to mbuf
+ */
+ for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
+ pos += RTE_FM10K_DESCS_PER_LOOP,
+ rxdp += RTE_FM10K_DESCS_PER_LOOP) {
+ __m128i descs0[RTE_FM10K_DESCS_PER_LOOP];
+ __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+ __m128i zero, staterr, sterr_tmp1, sterr_tmp2;
+ __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
+
+ /* B.1 load 1 mbuf point */
+ mbp1 = _mm_loadu_si128((__m128i *)&mbufp[pos]);
+
+ /* Read desc statuses backwards to avoid race condition */
+ /* A.1 load 4 pkts desc */
+ descs0[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
+
+ /* B.1 load 1 mbuf point */
+ mbp2 = _mm_loadu_si128((__m128i *)&mbufp[pos+2]);
+
+ descs0[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
+ /* B.1 load 2 mbuf point */
+ descs0[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
+ descs0[0] = _mm_loadu_si128((__m128i *)(rxdp));
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+
+ /* avoid compiler reorder optimization */
+ rte_compiler_barrier();
+
+ if (split_packet) {
+ rte_prefetch0(&rx_pkts[pos]->cacheline1);
+ rte_prefetch0(&rx_pkts[pos + 1]->cacheline1);
+ rte_prefetch0(&rx_pkts[pos + 2]->cacheline1);
+ rte_prefetch0(&rx_pkts[pos + 3]->cacheline1);
+ }
+
+ /* D.1 pkt 3,4 convert format from desc to pktmbuf */
+ pkt_mb4 = _mm_shuffle_epi8(descs0[3], shuf_msk);
+ pkt_mb3 = _mm_shuffle_epi8(descs0[2], shuf_msk);
+
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp2 = _mm_unpackhi_epi32(descs0[3], descs0[2]);
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp1 = _mm_unpackhi_epi32(descs0[1], descs0[0]);
+
+ /* set ol_flags with vlan packet type */
+ fm10k_desc_to_olflags_v(descs0, &rx_pkts[pos]);
+
+ /* D.1 pkt 1,2 convert format from desc to pktmbuf */
+ pkt_mb2 = _mm_shuffle_epi8(descs0[1], shuf_msk);
+ pkt_mb1 = _mm_shuffle_epi8(descs0[0], shuf_msk);
+
+ /* C.2 get 4 pkts staterr value */
+ zero = _mm_xor_si128(dd_check, dd_check);
+ staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
+
+ /* D.3 copy final 3,4 data to rx_pkts */
+ _mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1,
+ pkt_mb4);
+ _mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1,
+ pkt_mb3);
+
+ /* C* extract and record EOP bit */
+ if (split_packet) {
+ __m128i eop_shuf_mask = _mm_set_epi8(
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x04, 0x0C, 0x00, 0x08
+ );
+
+ /* and with mask to extract bits, flipping 1-0 */
+ __m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
+ /* the staterr values are not in order, as the count
+ * count of dd bits doesn't care. However, for end of
+ * packet tracking, we do care, so shuffle. This also
+ * compresses the 32-bit values to 8-bit
+ */
+ eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
+ /* store the resulting 32-bit value */
+ *(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
+ split_packet += RTE_FM10K_DESCS_PER_LOOP;
+
+ /* zero-out next pointers */
+ rx_pkts[pos]->next = NULL;
+ rx_pkts[pos + 1]->next = NULL;
+ rx_pkts[pos + 2]->next = NULL;
+ rx_pkts[pos + 3]->next = NULL;
+ }
+
+ /* C.3 calc available number of desc */
+ staterr = _mm_and_si128(staterr, dd_check);
+ staterr = _mm_packs_epi32(staterr, zero);
+
+ /* D.3 copy final 1,2 data to rx_pkts */
+ _mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1,
+ pkt_mb2);
+ _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
+ pkt_mb1);
+
+ fm10k_desc_to_pktype_v(descs0, &rx_pkts[pos]);
+
+ /* C.4 calc avaialbe number of desc */
+ var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
+ nb_pkts_recd += var;
+ if (likely(var != RTE_FM10K_DESCS_PER_LOOP))
+ break;
+ }
+
+ /* Update our internal tail pointer */
+ rxq->next_dd = (uint16_t)(rxq->next_dd + nb_pkts_recd);
+ rxq->next_dd = (uint16_t)(rxq->next_dd & (rxq->nb_desc - 1));
+ rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
+
+ return nb_pkts_recd;
+}
+
+/* vPMD receive routine
+ *
+ * Notice:
+ * - don't support ol_flags for rss and csum err
+ */
+uint16_t
+fm10k_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return fm10k_recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+static inline uint16_t
+fm10k_reassemble_packets(struct fm10k_rx_queue *rxq,
+ struct rte_mbuf **rx_bufs,
+ uint16_t nb_bufs, uint8_t *split_flags)
+{
+ struct rte_mbuf *pkts[RTE_FM10K_MAX_RX_BURST]; /*finished pkts*/
+ struct rte_mbuf *start = rxq->pkt_first_seg;
+ struct rte_mbuf *end = rxq->pkt_last_seg;
+ unsigned pkt_idx, buf_idx;
+
+ for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
+ if (end != NULL) {
+ /* processing a split packet */
+ end->next = rx_bufs[buf_idx];
+ start->nb_segs++;
+ start->pkt_len += rx_bufs[buf_idx]->data_len;
+ end = end->next;
+
+ if (!split_flags[buf_idx]) {
+ /* it's the last packet of the set */
+ start->hash = end->hash;
+ start->ol_flags = end->ol_flags;
+ pkts[pkt_idx++] = start;
+ start = end = NULL;
+ }
+ } else {
+ /* not processing a split packet */
+ if (!split_flags[buf_idx]) {
+ /* not a split packet, save and skip */
+ pkts[pkt_idx++] = rx_bufs[buf_idx];
+ continue;
+ }
+ end = start = rx_bufs[buf_idx];
+ }
+ }
+
+ /* save the partial packet for next time */
+ rxq->pkt_first_seg = start;
+ rxq->pkt_last_seg = end;
+ memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
+ return pkt_idx;
+}
+
+/*
+ * vPMD receive routine that reassembles scattered packets
+ *
+ * Notice:
+ * - don't support ol_flags for rss and csum err
+ * - nb_pkts > RTE_FM10K_MAX_RX_BURST, only scan RTE_FM10K_MAX_RX_BURST
+ * numbers of DD bit
+ */
+uint16_t
+fm10k_recv_scattered_pkts_vec(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct fm10k_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[RTE_FM10K_MAX_RX_BURST] = {0};
+ unsigned i = 0;
+
+ /* Split_flags only can support max of RTE_FM10K_MAX_RX_BURST */
+ nb_pkts = RTE_MIN(nb_pkts, RTE_FM10K_MAX_RX_BURST);
+ /* get some new buffers */
+ uint16_t nb_bufs = fm10k_recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
+ split_flags);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+ if (rxq->pkt_first_seg == NULL &&
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly*/
+ if (rxq->pkt_first_seg == NULL) {
+ /* find the first split flag, and only reassemble then*/
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ }
+ return i + fm10k_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
+static const struct fm10k_txq_ops vec_txq_ops = {
+ .reset = fm10k_reset_tx_queue,
+};
+
+void __attribute__((cold))
+fm10k_txq_vec_setup(struct fm10k_tx_queue *txq)
+{
+ txq->ops = &vec_txq_ops;
+}
+
+int __attribute__((cold))
+fm10k_tx_vec_condition_check(struct fm10k_tx_queue *txq)
+{
+ /* Vector TX can't offload any features yet */
+ if ((txq->txq_flags & FM10K_SIMPLE_TX_FLAG) != FM10K_SIMPLE_TX_FLAG)
+ return -1;
+
+ return 0;
+}
+
+static inline void
+vtx1(volatile struct fm10k_tx_desc *txdp,
+ struct rte_mbuf *pkt, uint64_t flags)
+{
+ __m128i descriptor = _mm_set_epi64x(flags << 56 |
+ pkt->vlan_tci << 16 | pkt->data_len,
+ MBUF_DMA_ADDR(pkt));
+ _mm_store_si128((__m128i *)txdp, descriptor);
+}
+
+static inline void
+vtx(volatile struct fm10k_tx_desc *txdp,
+ struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
+{
+ int i;
+
+ for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+ vtx1(txdp, *pkt, flags);
+}
+
+static inline int __attribute__((always_inline))
+fm10k_tx_free_bufs(struct fm10k_tx_queue *txq)
+{
+ struct rte_mbuf **txep;
+ uint8_t flags;
+ uint32_t n;
+ uint32_t i;
+ int nb_free = 0;
+ struct rte_mbuf *m, *free[RTE_FM10K_TX_MAX_FREE_BUF_SZ];
+
+ /* check DD bit on threshold descriptor */
+ flags = txq->hw_ring[txq->next_dd].flags;
+ if (!(flags & FM10K_TXD_FLAG_DONE))
+ return 0;
+
+ n = txq->rs_thresh;
+
+ /* First buffer to free from S/W ring is at index
+ * next_dd - (rs_thresh-1)
+ */
+ txep = &txq->sw_ring[txq->next_dd - (n - 1)];
+ m = __rte_pktmbuf_prefree_seg(txep[0]);
+ if (likely(m != NULL)) {
+ free[0] = m;
+ nb_free = 1;
+ for (i = 1; i < n; i++) {
+ m = __rte_pktmbuf_prefree_seg(txep[i]);
+ if (likely(m != NULL)) {
+ if (likely(m->pool == free[0]->pool))
+ free[nb_free++] = m;
+ else {
+ rte_mempool_put_bulk(free[0]->pool,
+ (void *)free, nb_free);
+ free[0] = m;
+ nb_free = 1;
+ }
+ }
+ }
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+ } else {
+ for (i = 1; i < n; i++) {
+ m = __rte_pktmbuf_prefree_seg(txep[i]);
+ if (m != NULL)
+ rte_mempool_put(m->pool, m);
+ }
+ }
+
+ /* buffers were freed, update counters */
+ txq->nb_free = (uint16_t)(txq->nb_free + txq->rs_thresh);
+ txq->next_dd = (uint16_t)(txq->next_dd + txq->rs_thresh);
+ if (txq->next_dd >= txq->nb_desc)
+ txq->next_dd = (uint16_t)(txq->rs_thresh - 1);
+
+ return txq->rs_thresh;
+}
+
+static inline void __attribute__((always_inline))
+tx_backlog_entry(struct rte_mbuf **txep,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i;
+
+ for (i = 0; i < (int)nb_pkts; ++i)
+ txep[i] = tx_pkts[i];
+}
+
+uint16_t
+fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue;
+ volatile struct fm10k_tx_desc *txdp;
+ struct rte_mbuf **txep;
+ uint16_t n, nb_commit, tx_id;
+ uint64_t flags = FM10K_TXD_FLAG_LAST;
+ uint64_t rs = FM10K_TXD_FLAG_RS | FM10K_TXD_FLAG_LAST;
+ int i;
+
+ /* cross rx_thresh boundary is not allowed */
+ nb_pkts = RTE_MIN(nb_pkts, txq->rs_thresh);
+
+ if (txq->nb_free < txq->free_thresh)
+ fm10k_tx_free_bufs(txq);
+
+ nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_free, nb_pkts);
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ tx_id = txq->next_free;
+ txdp = &txq->hw_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+
+ txq->nb_free = (uint16_t)(txq->nb_free - nb_pkts);
+
+ n = (uint16_t)(txq->nb_desc - tx_id);
+ if (nb_commit >= n) {
+ tx_backlog_entry(txep, tx_pkts, n);
+
+ for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+ vtx1(txdp, *tx_pkts, flags);
+
+ vtx1(txdp, *tx_pkts++, rs);
+
+ nb_commit = (uint16_t)(nb_commit - n);
+
+ tx_id = 0;
+ txq->next_rs = (uint16_t)(txq->rs_thresh - 1);
+
+ /* avoid reach the end of ring */
+ txdp = &(txq->hw_ring[tx_id]);
+ txep = &txq->sw_ring[tx_id];
+ }
+
+ tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+ vtx(txdp, tx_pkts, nb_commit, flags);
+
+ tx_id = (uint16_t)(tx_id + nb_commit);
+ if (tx_id > txq->next_rs) {
+ txq->hw_ring[txq->next_rs].flags |= FM10K_TXD_FLAG_RS;
+ txq->next_rs = (uint16_t)(txq->next_rs + txq->rs_thresh);
+ }
+
+ txq->next_free = tx_id;
+
+ FM10K_PCI_REG_WRITE(txq->tail_ptr, txq->next_free);
+
+ return nb_pkts;
+}
+
+static void __attribute__((cold))
+fm10k_reset_tx_queue(struct fm10k_tx_queue *txq)
+{
+ static const struct fm10k_tx_desc zeroed_desc = {0};
+ struct rte_mbuf **txe = txq->sw_ring;
+ uint16_t i;
+
+ /* Zero out HW ring memory */
+ for (i = 0; i < txq->nb_desc; i++)
+ txq->hw_ring[i] = zeroed_desc;
+
+ /* Initialize SW ring entries */
+ for (i = 0; i < txq->nb_desc; i++)
+ txe[i] = NULL;
+
+ txq->next_dd = (uint16_t)(txq->rs_thresh - 1);
+ txq->next_rs = (uint16_t)(txq->rs_thresh - 1);
+
+ txq->next_free = 0;
+ txq->nb_used = 0;
+ /* Always allow 1 descriptor to be un-allocated to avoid
+ * a H/W race condition
+ */
+ txq->nb_free = (uint16_t)(txq->nb_desc - 1);
+ FM10K_PCI_REG_WRITE(txq->tail_ptr, 0);
+}
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_adminq.c b/src/dpdk22/drivers/net/i40e/base/i40e_adminq.c
index e098ed69..998582cd 100755..100644
--- a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_adminq.c
+++ b/src/dpdk22/drivers/net/i40e/base/i40e_adminq.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2013 - 2014, Intel Corporation
+Copyright (c) 2013 - 2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -37,7 +37,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include "i40e_adminq.h"
#include "i40e_prototype.h"
-#ifndef VF_DRIVER
+#ifdef PF_DRIVER
/**
* i40e_is_nvm_update_op - return true if this is an NVM update operation
* @desc: API request descriptor
@@ -48,7 +48,7 @@ STATIC INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update));
}
-#endif /* VF_DRIVER */
+#endif /* PF_DRIVER */
/**
* i40e_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
@@ -58,7 +58,7 @@ STATIC INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
STATIC void i40e_adminq_init_regs(struct i40e_hw *hw)
{
/* set head and tail registers in our local struct */
- if (hw->mac.type == I40E_MAC_VF) {
+ if (i40e_is_vf(hw)) {
hw->aq.asq.tail = I40E_VF_ATQT1;
hw->aq.asq.head = I40E_VF_ATQH1;
hw->aq.asq.len = I40E_VF_ATQLEN1;
@@ -69,6 +69,7 @@ STATIC void i40e_adminq_init_regs(struct i40e_hw *hw)
hw->aq.arq.len = I40E_VF_ARQLEN1;
hw->aq.arq.bal = I40E_VF_ARQBAL1;
hw->aq.arq.bah = I40E_VF_ARQBAH1;
+#ifdef PF_DRIVER
} else {
hw->aq.asq.tail = I40E_PF_ATQT;
hw->aq.asq.head = I40E_PF_ATQH;
@@ -80,6 +81,7 @@ STATIC void i40e_adminq_init_regs(struct i40e_hw *hw)
hw->aq.arq.len = I40E_PF_ARQLEN;
hw->aq.arq.bal = I40E_PF_ARQBAL;
hw->aq.arq.bah = I40E_PF_ARQBAH;
+#endif
}
}
@@ -316,8 +318,26 @@ STATIC enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
wr32(hw, hw->aq.asq.tail, 0);
/* set starting point */
+#ifdef PF_DRIVER
+#ifdef INTEGRATED_VF
+ if (!i40e_is_vf(hw))
+ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ I40E_PF_ATQLEN_ATQENABLE_MASK));
+#else
wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
I40E_PF_ATQLEN_ATQENABLE_MASK));
+#endif /* INTEGRATED_VF */
+#endif /* PF_DRIVER */
+#ifdef VF_DRIVER
+#ifdef INTEGRATED_VF
+ if (i40e_is_vf(hw))
+ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ I40E_VF_ATQLEN1_ATQENABLE_MASK));
+#else
+ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ I40E_VF_ATQLEN1_ATQENABLE_MASK));
+#endif /* INTEGRATED_VF */
+#endif /* VF_DRIVER */
wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
@@ -345,8 +365,26 @@ STATIC enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
wr32(hw, hw->aq.arq.tail, 0);
/* set starting point */
+#ifdef PF_DRIVER
+#ifdef INTEGRATED_VF
+ if (!i40e_is_vf(hw))
+ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ I40E_PF_ARQLEN_ARQENABLE_MASK));
+#else
wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
I40E_PF_ARQLEN_ARQENABLE_MASK));
+#endif /* INTEGRATED_VF */
+#endif /* PF_DRIVER */
+#ifdef VF_DRIVER
+#ifdef INTEGRATED_VF
+ if (i40e_is_vf(hw))
+ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ I40E_VF_ARQLEN1_ARQENABLE_MASK));
+#else
+ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ I40E_VF_ARQLEN1_ARQENABLE_MASK));
+#endif /* INTEGRATED_VF */
+#endif /* VF_DRIVER */
wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
@@ -489,8 +527,12 @@ enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
{
enum i40e_status_code ret_code = I40E_SUCCESS;
- if (hw->aq.asq.count == 0)
- return I40E_ERR_NOT_READY;
+ i40e_acquire_spinlock(&hw->aq.asq_spinlock);
+
+ if (hw->aq.asq.count == 0) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto shutdown_asq_out;
+ }
/* Stop firmware AdminQ processing */
wr32(hw, hw->aq.asq.head, 0);
@@ -499,16 +541,13 @@ enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
wr32(hw, hw->aq.asq.bal, 0);
wr32(hw, hw->aq.asq.bah, 0);
- /* make sure spinlock is available */
- i40e_acquire_spinlock(&hw->aq.asq_spinlock);
-
hw->aq.asq.count = 0; /* to indicate uninitialized queue */
/* free ring buffers */
i40e_free_asq_bufs(hw);
+shutdown_asq_out:
i40e_release_spinlock(&hw->aq.asq_spinlock);
-
return ret_code;
}
@@ -522,8 +561,12 @@ enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
{
enum i40e_status_code ret_code = I40E_SUCCESS;
- if (hw->aq.arq.count == 0)
- return I40E_ERR_NOT_READY;
+ i40e_acquire_spinlock(&hw->aq.arq_spinlock);
+
+ if (hw->aq.arq.count == 0) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto shutdown_arq_out;
+ }
/* Stop firmware AdminQ processing */
wr32(hw, hw->aq.arq.head, 0);
@@ -532,16 +575,13 @@ enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
wr32(hw, hw->aq.arq.bal, 0);
wr32(hw, hw->aq.arq.bah, 0);
- /* make sure spinlock is available */
- i40e_acquire_spinlock(&hw->aq.arq_spinlock);
-
hw->aq.arq.count = 0; /* to indicate uninitialized queue */
/* free ring buffers */
i40e_free_arq_bufs(hw);
+shutdown_arq_out:
i40e_release_spinlock(&hw->aq.arq_spinlock);
-
return ret_code;
}
@@ -559,11 +599,11 @@ enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
{
enum i40e_status_code ret_code;
-#ifndef VF_DRIVER
+#ifdef PF_DRIVER
u16 eetrack_lo, eetrack_hi;
+ u16 cfg_ptr, oem_hi, oem_lo;
int retry = 0;
#endif
-
/* verify input for valid configuration */
if ((hw->aq.num_arq_entries == 0) ||
(hw->aq.num_asq_entries == 0) ||
@@ -593,7 +633,12 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
if (ret_code != I40E_SUCCESS)
goto init_adminq_free_asq;
-#ifndef VF_DRIVER
+#ifdef PF_DRIVER
+#ifdef INTEGRATED_VF
+ /* VF has no need of firmware */
+ if (i40e_is_vf(hw))
+ goto init_adminq_exit;
+#endif
/* There are some cases where the firmware may not be quite ready
* for AdminQ operations, so we retry the AdminQ setup a few times
* if we see timeouts in this first AQ call.
@@ -602,6 +647,7 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
ret_code = i40e_aq_get_firmware_version(hw,
&hw->aq.fw_maj_ver,
&hw->aq.fw_min_ver,
+ &hw->aq.fw_build,
&hw->aq.api_maj_ver,
&hw->aq.api_min_ver,
NULL);
@@ -615,10 +661,17 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
goto init_adminq_free_arq;
/* get the NVM version info */
- i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version);
+ i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
+ &hw->nvm.version);
i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
+ i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
+ i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
+ &oem_hi);
+ i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
+ &oem_lo);
+ hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
@@ -627,19 +680,20 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
/* pre-emptive resource lock release */
i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
- hw->aq.nvm_busy = false;
+ hw->aq.nvm_release_on_done = false;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
ret_code = i40e_aq_set_hmc_resource_profile(hw,
I40E_HMC_PROFILE_DEFAULT,
0,
NULL);
+#endif /* PF_DRIVER */
ret_code = I40E_SUCCESS;
-#endif /* VF_DRIVER */
/* success! */
goto init_adminq_exit;
-#ifndef VF_DRIVER
+#ifdef PF_DRIVER
init_adminq_free_arq:
i40e_shutdown_arq(hw);
#endif
@@ -671,6 +725,9 @@ enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
i40e_destroy_spinlock(&hw->aq.asq_spinlock);
i40e_destroy_spinlock(&hw->aq.arq_spinlock);
+ if (hw->nvm_buff.va)
+ i40e_free_virt_mem(hw, &hw->nvm_buff);
+
return ret_code;
}
@@ -690,16 +747,16 @@ u16 i40e_clean_asq(struct i40e_hw *hw)
desc = I40E_ADMINQ_DESC(*asq, ntc);
details = I40E_ADMINQ_DETAILS(*asq, ntc);
+
while (rd32(hw, hw->aq.asq.head) != ntc) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
- "%s: ntc %d head %d.\n", __FUNCTION__, ntc,
- rd32(hw, hw->aq.asq.head));
+ "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
if (details->callback) {
I40E_ADMINQ_CALLBACK cb_func =
(I40E_ADMINQ_CALLBACK)details->callback;
- i40e_memcpy(&desc_cb, desc,
- sizeof(struct i40e_aq_desc), I40E_DMA_TO_DMA);
+ i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
+ I40E_DMA_TO_DMA);
cb_func(hw, &desc_cb);
}
i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
@@ -757,29 +814,25 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
u16 retval = 0;
u32 val = 0;
- val = rd32(hw, hw->aq.asq.head);
- if (val >= hw->aq.num_asq_entries) {
- i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
- "AQTX: head overrun at %d\n", val);
- status = I40E_ERR_QUEUE_EMPTY;
- goto asq_send_command_exit;
- }
+ i40e_acquire_spinlock(&hw->aq.asq_spinlock);
+
+ hw->aq.asq_last_status = I40E_AQ_RC_OK;
if (hw->aq.asq.count == 0) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: Admin queue not initialized.\n");
status = I40E_ERR_QUEUE_EMPTY;
- goto asq_send_command_exit;
+ goto asq_send_command_error;
}
-#ifndef VF_DRIVER
- if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
- i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
- status = I40E_ERR_NVM;
- goto asq_send_command_exit;
+ val = rd32(hw, hw->aq.asq.head);
+ if (val >= hw->aq.num_asq_entries) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: head overrun at %d\n", val);
+ status = I40E_ERR_QUEUE_EMPTY;
+ goto asq_send_command_error;
}
-#endif
details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
if (cmd_details) {
i40e_memcpy(details,
@@ -807,8 +860,6 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
desc->flags &= ~CPU_TO_LE16(details->flags_dis);
desc->flags |= CPU_TO_LE16(details->flags_ena);
- i40e_acquire_spinlock(&hw->aq.asq_spinlock);
-
if (buff_size > hw->aq.asq_buf_size) {
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
@@ -922,6 +973,11 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
"AQTX: desc and buffer writeback:\n");
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
+ /* save writeback aq if requested */
+ if (details->wb_desc)
+ i40e_memcpy(details->wb_desc, desc_on_ring,
+ sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
+
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
@@ -931,14 +987,8 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
}
-#ifndef VF_DRIVER
- if (!status && i40e_is_nvm_update_op(desc))
- hw->aq.nvm_busy = true;
-
-#endif /* VF_DRIVER */
asq_send_command_error:
i40e_release_spinlock(&hw->aq.asq_spinlock);
-asq_send_command_exit:
return status;
}
@@ -986,12 +1036,24 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
i40e_acquire_spinlock(&hw->aq.arq_spinlock);
/* set next_to_use to head */
+#ifdef PF_DRIVER
+#ifdef INTEGRATED_VF
+ if (!i40e_is_vf(hw))
+ ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+#else
ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+#endif /* INTEGRATED_VF */
+#endif /* PF_DRIVER */
+#ifdef VF_DRIVER
+#ifdef INTEGRATED_VF
+ if (i40e_is_vf(hw))
+ ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
+#else
+ ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
+#endif /* INTEGRATED_VF */
+#endif /* VF_DRIVER */
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
- i40e_debug(hw,
- I40E_DEBUG_AQ_MESSAGE,
- "AQRX: Queue is empty.\n");
ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
goto clean_arq_element_out;
}
@@ -1053,13 +1115,25 @@ clean_arq_element_out:
*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
i40e_release_spinlock(&hw->aq.arq_spinlock);
-#ifndef VF_DRIVER
+#ifdef PF_DRIVER
if (i40e_is_nvm_update_op(&e->desc)) {
- hw->aq.nvm_busy = false;
if (hw->aq.nvm_release_on_done) {
i40e_release_nvm(hw);
hw->aq.nvm_release_on_done = false;
}
+
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ break;
+
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+ break;
+
+ default:
+ break;
+ }
}
#endif
@@ -1072,9 +1146,6 @@ void i40e_resume_aq(struct i40e_hw *hw)
hw->aq.asq.next_to_use = 0;
hw->aq.asq.next_to_clean = 0;
-#if (I40E_VF_ATQLEN_ATQENABLE_MASK != I40E_PF_ATQLEN_ATQENABLE_MASK)
-#error I40E_VF_ATQLEN_ATQENABLE_MASK != I40E_PF_ATQLEN_ATQENABLE_MASK
-#endif
i40e_config_asq_regs(hw);
hw->aq.arq.next_to_use = 0;
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_adminq.h b/src/dpdk22/drivers/net/i40e/base/i40e_adminq.h
index ea611bd9..40c86d9d 100755..100644
--- a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_adminq.h
+++ b/src/dpdk22/drivers/net/i40e/base/i40e_adminq.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2013 - 2014, Intel Corporation
+Copyright (c) 2013 - 2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -35,6 +35,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define _I40E_ADMINQ_H_
#include "i40e_osdep.h"
+#include "i40e_status.h"
#include "i40e_adminq_cmd.h"
#define I40E_ADMINQ_DESC(R, i) \
@@ -75,6 +76,7 @@ struct i40e_asq_cmd_details {
u16 flags_dis;
bool async;
bool postpone;
+ struct i40e_aq_desc *wb_desc;
};
#define I40E_ADMINQ_DETAILS(R, i) \
@@ -99,9 +101,9 @@ struct i40e_adminq_info {
u16 asq_buf_size; /* send queue buffer size */
u16 fw_maj_ver; /* firmware major version */
u16 fw_min_ver; /* firmware minor version */
+ u32 fw_build; /* firmware build number */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
- bool nvm_busy;
bool nvm_release_on_done;
struct i40e_spinlock asq_spinlock; /* Send queue spinlock */
@@ -114,9 +116,10 @@ struct i40e_adminq_info {
/**
* i40e_aq_rc_to_posix - convert errors to user-land codes
- * aq_rc: AdminQ error code to convert
+ * aq_ret: AdminQ handler error code can override aq_rc
+ * aq_rc: AdminQ firmware error code to convert
**/
-STATIC inline int i40e_aq_rc_to_posix(u16 aq_rc)
+STATIC INLINE int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
{
int aq_to_posix[] = {
0, /* I40E_AQ_RC_OK */
@@ -144,12 +147,22 @@ STATIC inline int i40e_aq_rc_to_posix(u16 aq_rc)
-EFBIG, /* I40E_AQ_RC_EFBIG */
};
+ /* aq_rc is invalid if AQ timed out */
+ if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT)
+ return -EAGAIN;
+
+ if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
+ return -ERANGE;
+
return aq_to_posix[aq_rc];
}
/* general information */
#define I40E_AQ_LARGE_BUF 512
-#define I40E_ASQ_CMD_TIMEOUT 100 /* msecs */
+#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */
+#ifdef I40E_ESS_SUPPORT
+#define I40E_ASQ_CMD_TIMEOUT_ESS 50000 /* msecs */
+#endif
void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
u16 opcode);
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_adminq_cmd.h b/src/dpdk22/drivers/net/i40e/base/i40e_adminq_cmd.h
index 5ea9b7db..18746535 100755..100644
--- a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_adminq_cmd.h
+++ b/src/dpdk22/drivers/net/i40e/base/i40e_adminq_cmd.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2013 - 2014, Intel Corporation
+Copyright (c) 2013 - 2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -41,7 +41,7 @@ POSSIBILITY OF SUCH DAMAGE.
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0002
+#define I40E_FW_API_VERSION_MINOR 0x0004
struct i40e_aq_desc {
__le16 flags;
@@ -139,12 +139,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_list_func_capabilities = 0x000A,
i40e_aqc_opc_list_dev_capabilities = 0x000B,
- i40e_aqc_opc_set_cppm_configuration = 0x0103,
- i40e_aqc_opc_set_arp_proxy_entry = 0x0104,
- i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
-
/* LAA */
- i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */
i40e_aqc_opc_mac_address_read = 0x0107,
i40e_aqc_opc_mac_address_write = 0x0108,
@@ -239,6 +234,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_nvm_update = 0x0703,
i40e_aqc_opc_nvm_config_read = 0x0704,
i40e_aqc_opc_nvm_config_write = 0x0705,
+ i40e_aqc_opc_oem_post_update = 0x0720,
/* virtualization commands */
i40e_aqc_opc_send_msg_to_pf = 0x0801,
@@ -262,11 +258,27 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
i40e_aqc_opc_lldp_stop = 0x0A05,
i40e_aqc_opc_lldp_start = 0x0A06,
+ i40e_aqc_opc_get_cee_dcb_cfg = 0x0A07,
+ i40e_aqc_opc_lldp_set_local_mib = 0x0A08,
+ i40e_aqc_opc_lldp_stop_start_spec_agent = 0x0A09,
/* Tunnel commands */
i40e_aqc_opc_add_udp_tunnel = 0x0B00,
i40e_aqc_opc_del_udp_tunnel = 0x0B01,
- i40e_aqc_opc_tunnel_key_structure = 0x0B10,
+#ifdef X722_SUPPORT
+ i40e_aqc_opc_set_rss_key = 0x0B02,
+ i40e_aqc_opc_set_rss_lut = 0x0B03,
+ i40e_aqc_opc_get_rss_key = 0x0B04,
+ i40e_aqc_opc_get_rss_lut = 0x0B05,
+
+ /* WoL commands */
+ i40e_aqc_opc_set_wol_filter = 0x0120,
+ i40e_aqc_opc_get_wake_reason = 0x0121,
+#endif
+
+ /* Proxy commands */
+ i40e_aqc_opc_set_proxy_config = 0x0104,
+ i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105,
/* Async Events */
i40e_aqc_opc_event_lan_overflow = 0x1001,
@@ -274,15 +286,14 @@ enum i40e_admin_queue_opc {
/* OEM commands */
i40e_aqc_opc_oem_parameter_change = 0xFE00,
i40e_aqc_opc_oem_device_status_change = 0xFE01,
+ i40e_aqc_opc_oem_ocsd_initialize = 0xFE02,
+ i40e_aqc_opc_oem_ocbb_initialize = 0xFE03,
/* debug commands */
- i40e_aqc_opc_debug_get_deviceid = 0xFF00,
- i40e_aqc_opc_debug_set_mode = 0xFF01,
i40e_aqc_opc_debug_read_reg = 0xFF03,
i40e_aqc_opc_debug_write_reg = 0xFF04,
i40e_aqc_opc_debug_modify_reg = 0xFF07,
i40e_aqc_opc_debug_dump_internals = 0xFF08,
- i40e_aqc_opc_debug_modify_internals = 0xFF09,
};
/* command structures and indirect data structures */
@@ -416,6 +427,7 @@ struct i40e_aqc_list_capabilities_element_resp {
#define I40E_AQ_CAP_ID_VSI 0x0017
#define I40E_AQ_CAP_ID_DCB 0x0018
#define I40E_AQ_CAP_ID_FCOE 0x0021
+#define I40E_AQ_CAP_ID_ISCSI 0x0022
#define I40E_AQ_CAP_ID_RSS 0x0040
#define I40E_AQ_CAP_ID_RXQ 0x0041
#define I40E_AQ_CAP_ID_TXQ 0x0042
@@ -460,8 +472,11 @@ struct i40e_aqc_arp_proxy_data {
__le32 pfpm_proxyfc;
__le32 ip_addr;
u8 mac_addr[6];
+ u8 reserved[2];
};
+I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data);
+
/* Set NS Proxy Table Entry Command (indirect 0x0105) */
struct i40e_aqc_ns_proxy_data {
__le16 table_idx_mac_addr_0;
@@ -487,6 +502,8 @@ struct i40e_aqc_ns_proxy_data {
u8 ipv6_addr_1[16];
};
+I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data);
+
/* Manage LAA Command (0x0106) - obsolete */
struct i40e_aqc_mng_laa {
__le16 command_flags;
@@ -497,6 +514,8 @@ struct i40e_aqc_mng_laa {
u8 reserved2[6];
};
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa);
+
/* Manage MAC Address Read Command (indirect 0x0107) */
struct i40e_aqc_mac_address_read {
__le16 command_flags;
@@ -504,7 +523,8 @@ struct i40e_aqc_mac_address_read {
#define I40E_AQC_SAN_ADDR_VALID 0x20
#define I40E_AQC_PORT_ADDR_VALID 0x40
#define I40E_AQC_WOL_ADDR_VALID 0x80
-#define I40E_AQC_ADDR_VALID_MASK 0xf0
+#define I40E_AQC_MC_MAG_EN_VALID 0x100
+#define I40E_AQC_ADDR_VALID_MASK 0x1F0
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
@@ -527,7 +547,9 @@ struct i40e_aqc_mac_address_write {
#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
#define I40E_AQC_WRITE_TYPE_PORT 0x8000
-#define I40E_AQC_WRITE_TYPE_MASK 0xc000
+#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000
+#define I40E_AQC_WRITE_TYPE_MASK 0xC000
+
__le16 mac_sah;
__le32 mac_sal;
u8 reserved[8];
@@ -568,6 +590,8 @@ struct i40e_aqc_get_switch_config_header_resp {
u8 reserved[12];
};
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp);
+
struct i40e_aqc_switch_config_element_resp {
u8 element_type;
#define I40E_AQ_SW_ELEM_TYPE_MAC 1
@@ -593,6 +617,8 @@ struct i40e_aqc_switch_config_element_resp {
__le16 element_info;
};
+I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp);
+
/* Get Switch Configuration (indirect 0x0200)
* an array of elements are returned in the response buffer
* the first in the array is the header, remainder are elements
@@ -602,6 +628,8 @@ struct i40e_aqc_get_switch_config_resp {
struct i40e_aqc_switch_config_element_resp element[1];
};
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp);
+
/* Add Statistics (direct 0x0201)
* Remove Statistics (direct 0x0202)
*/
@@ -667,6 +695,8 @@ struct i40e_aqc_switch_resource_alloc_element_resp {
u8 reserved2[6];
};
+I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
+
/* Add VSI (indirect 0x0210)
* this indirect command uses struct i40e_aqc_vsi_properties_data
* as the indirect buffer (128 bytes)
@@ -815,6 +845,10 @@ struct i40e_aqc_vsi_properties_data {
u8 queueing_opt_flags;
#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+#ifdef X722_SUPPORT
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
+#endif
u8 queueing_opt_reserved[3];
/* scheduler section */
u8 up_enable_bits;
@@ -1055,6 +1089,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
__le16 seid;
#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
__le16 vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF
#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
u8 reserved[8];
};
@@ -1098,6 +1133,8 @@ struct i40e_aqc_remove_tag {
u8 reserved[12];
};
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag);
+
/* Add multicast E-Tag (direct 0x0257)
* del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
* and no external data
@@ -1213,7 +1250,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
} ipaddr;
__le16 flags;
#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
-#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
+#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
/* 0x0000 reserved */
#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
@@ -1246,7 +1283,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
u8 reserved[4];
__le16 queue_number;
#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
-#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \
+#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \
I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
u8 reserved2[14];
/* response section */
@@ -1365,6 +1402,8 @@ struct i40e_aqc_configure_vsi_ets_sla_bw_data {
u8 reserved1[28];
};
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data);
+
/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
* responds with i40e_aqc_qs_handles_resp
*/
@@ -1376,6 +1415,8 @@ struct i40e_aqc_configure_vsi_tc_bw_data {
__le16 qs_handles[8];
};
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data);
+
/* Query vsi bw configuration (indirect 0x0408) */
struct i40e_aqc_query_vsi_bw_config_resp {
u8 tc_valid_bits;
@@ -1389,6 +1430,8 @@ struct i40e_aqc_query_vsi_bw_config_resp {
u8 reserved3[23];
};
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp);
+
/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
struct i40e_aqc_query_vsi_ets_sla_config_resp {
u8 tc_valid_bits;
@@ -1400,6 +1443,8 @@ struct i40e_aqc_query_vsi_ets_sla_config_resp {
__le16 tc_bw_max[2];
};
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp);
+
/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
struct i40e_aqc_configure_switching_comp_bw_limit {
__le16 seid;
@@ -1427,6 +1472,8 @@ struct i40e_aqc_configure_switching_comp_ets_data {
u8 reserved2[96];
};
+I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data);
+
/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
u8 tc_valid_bits;
@@ -1438,6 +1485,8 @@ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
u8 reserved1[28];
};
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_switching_comp_ets_bw_limit_data);
+
/* Configure Switching Component Bandwidth Allocation per Tc
* (indirect 0x0417)
*/
@@ -1449,6 +1498,8 @@ struct i40e_aqc_configure_switching_comp_bw_config_data {
u8 reserved1[20];
};
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data);
+
/* Query Switching Component Configuration (indirect 0x0418) */
struct i40e_aqc_query_switching_comp_ets_config_resp {
u8 tc_valid_bits;
@@ -1459,6 +1510,8 @@ struct i40e_aqc_query_switching_comp_ets_config_resp {
u8 reserved2[23];
};
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp);
+
/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
struct i40e_aqc_query_port_ets_config_resp {
u8 reserved[4];
@@ -1474,6 +1527,8 @@ struct i40e_aqc_query_port_ets_config_resp {
u8 reserved3[32];
};
+I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp);
+
/* Query Switching Component Bandwidth Allocation per Traffic Type
* (indirect 0x041A)
*/
@@ -1488,6 +1543,8 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
__le16 tc_bw_max[2];
};
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp);
+
/* Suspend/resume port TX traffic
* (direct 0x041B and 0x041C) uses the generic SEID struct
*/
@@ -1501,6 +1558,8 @@ struct i40e_aqc_configure_partition_bw_data {
u8 max_bw[16]; /* bandwidth limit */
};
+I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
+
/* Get and set the active HMC resource profile and status.
* (direct 0x0500) and (direct 0x0501)
*/
@@ -1583,6 +1642,8 @@ struct i40e_aqc_module_desc {
u8 reserved2[8];
};
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc);
+
struct i40e_aq_get_phy_abilities_resp {
__le32 phy_type; /* bitmap using the above enum for offsets */
u8 link_speed; /* bitmap using the above enum bit patterns */
@@ -1611,6 +1672,8 @@ struct i40e_aq_get_phy_abilities_resp {
struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
};
+I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp);
+
/* Set PHY Config (direct 0x0601) */
struct i40e_aq_set_phy_config { /* same bits as above in all */
__le32 phy_type;
@@ -1676,11 +1739,13 @@ struct i40e_aqc_get_link_status {
u8 phy_type; /* i40e_aq_phy_type */
u8 link_speed; /* i40e_aq_link_speed */
u8 link_info;
-#define I40E_AQ_LINK_UP 0x01
+#define I40E_AQ_LINK_UP 0x01 /* obsolete */
+#define I40E_AQ_LINK_UP_FUNCTION 0x01
#define I40E_AQ_LINK_FAULT 0x02
#define I40E_AQ_LINK_FAULT_TX 0x04
#define I40E_AQ_LINK_FAULT_RX 0x08
#define I40E_AQ_LINK_FAULT_REMOTE 0x10
+#define I40E_AQ_LINK_UP_PORT 0x20
#define I40E_AQ_MEDIA_AVAILABLE 0x40
#define I40E_AQ_SIGNAL_DETECT 0x80
u8 an_info;
@@ -1794,12 +1859,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
/* NVM Config Read (indirect 0x0704) */
struct i40e_aqc_nvm_config_read {
__le16 cmd_flags;
-#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
-#define ANVM_READ_SINGLE_FEATURE 0
-#define ANVM_READ_MULTIPLE_FEATURES 1
+#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
+#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0
+#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1
__le16 element_count;
- __le16 element_id; /* Feature/field ID */
- u8 reserved[2];
+ __le16 element_id; /* Feature/field ID */
+ __le16 element_id_msw; /* MSWord of field ID */
__le32 address_high;
__le32 address_low;
};
@@ -1817,21 +1882,51 @@ struct i40e_aqc_nvm_config_write {
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
+/* Used for 0x0704 as well as for 0x0705 commands */
+#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1
+#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_FEATURE 0
+#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT)
struct i40e_aqc_nvm_config_data_feature {
__le16 feature_id;
- __le16 instance_id;
+#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01
+#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08
+#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10
__le16 feature_options;
__le16 feature_selection;
};
+I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature);
+
struct i40e_aqc_nvm_config_data_immediate_field {
-#define ANVM_FEATURE_OR_IMMEDIATE_MASK 0x2
- __le16 field_id;
- __le16 instance_id;
+ __le32 field_id;
+ __le32 field_value;
__le16 field_options;
- __le16 field_value;
+ __le16 reserved;
};
+I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field);
+
+/* OEM Post Update (indirect 0x0720)
+ * no command data struct used
+ */
+ struct i40e_aqc_nvm_oem_post_update {
+#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01
+ u8 sel_data;
+ u8 reserved[7];
+};
+
+I40E_CHECK_STRUCT_LEN(0x8, i40e_aqc_nvm_oem_post_update);
+
+struct i40e_aqc_nvm_oem_post_update_buffer {
+ u8 str_len;
+ u8 dev_addr;
+ __le16 eeprom_addr;
+ u8 data[36];
+};
+
+I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer);
+
/* Send to PF command (indirect 0x0801) id is only used by PF
* Send to VF command (indirect 0x0802) id is only used by PF
* Send to Peer PF command (indirect 0x0803)
@@ -1994,9 +2089,99 @@ struct i40e_aqc_lldp_start {
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
-/* Apply MIB changes (0x0A07)
- * uses the generic struc as it contains no data
+/* Get CEE DCBX Oper Config (0x0A07)
+ * uses the generic descriptor struct
+ * returns below as indirect response
+ */
+
+#define I40E_AQC_CEE_APP_FCOE_SHIFT 0x0
+#define I40E_AQC_CEE_APP_FCOE_MASK (0x7 << I40E_AQC_CEE_APP_FCOE_SHIFT)
+#define I40E_AQC_CEE_APP_ISCSI_SHIFT 0x3
+#define I40E_AQC_CEE_APP_ISCSI_MASK (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT)
+#define I40E_AQC_CEE_APP_FIP_SHIFT 0x8
+#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT)
+
+#define I40E_AQC_CEE_PG_STATUS_SHIFT 0x0
+#define I40E_AQC_CEE_PG_STATUS_MASK (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT)
+#define I40E_AQC_CEE_PFC_STATUS_SHIFT 0x3
+#define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT)
+#define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8
+#define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT)
+#define I40E_AQC_CEE_FCOE_STATUS_SHIFT 0x8
+#define I40E_AQC_CEE_FCOE_STATUS_MASK (0x7 << I40E_AQC_CEE_FCOE_STATUS_SHIFT)
+#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT 0xB
+#define I40E_AQC_CEE_ISCSI_STATUS_MASK (0x7 << I40E_AQC_CEE_ISCSI_STATUS_SHIFT)
+#define I40E_AQC_CEE_FIP_STATUS_SHIFT 0x10
+#define I40E_AQC_CEE_FIP_STATUS_MASK (0x7 << I40E_AQC_CEE_FIP_STATUS_SHIFT)
+
+/* struct i40e_aqc_get_cee_dcb_cfg_v1_resp was originally defined with
+ * word boundary layout issues, which the Linux compilers silently deal
+ * with by adding padding, making the actual struct larger than designed.
+ * However, the FW compiler for the NIC is less lenient and complains
+ * about the struct. Hence, the struct defined here has an extra byte in
+ * fields reserved3 and reserved4 to directly acknowledge that padding,
+ * and the new length is used in the length check macro.
+ */
+struct i40e_aqc_get_cee_dcb_cfg_v1_resp {
+ u8 reserved1;
+ u8 oper_num_tc;
+ u8 oper_prio_tc[4];
+ u8 reserved2;
+ u8 oper_tc_bw[8];
+ u8 oper_pfc_en;
+ u8 reserved3[2];
+ __le16 oper_app_prio;
+ u8 reserved4[2];
+ __le16 tlv_status;
+};
+
+I40E_CHECK_STRUCT_LEN(0x18, i40e_aqc_get_cee_dcb_cfg_v1_resp);
+
+struct i40e_aqc_get_cee_dcb_cfg_resp {
+ u8 oper_num_tc;
+ u8 oper_prio_tc[4];
+ u8 oper_tc_bw[8];
+ u8 oper_pfc_en;
+ __le16 oper_app_prio;
+ __le32 tlv_status;
+ u8 reserved[12];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp);
+
+/* Set Local LLDP MIB (indirect 0x0A08)
+ * Used to replace the local MIB of a given LLDP agent. e.g. DCBx
*/
+struct i40e_aqc_lldp_set_local_mib {
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << \
+ SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK (1 << \
+ SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1
+ u8 type;
+ u8 reserved0;
+ __le16 length;
+ u8 reserved1[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib);
+
+/* Stop/Start LLDP Agent (direct 0x0A09)
+ * Used for stopping/starting specific LLDP agent. e.g. DCBx
+ */
+struct i40e_aqc_lldp_stop_start_specific_agent {
+#define I40E_AQC_START_SPECIFIC_AGENT_SHIFT 0
+#define I40E_AQC_START_SPECIFIC_AGENT_MASK (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
+ u8 command;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop_start_specific_agent);
/* Add Udp Tunnel command and completion (direct 0x0B00) */
struct i40e_aqc_add_udp_tunnel {
@@ -2041,6 +2226,48 @@ struct i40e_aqc_del_udp_tunnel_completion {
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
+#ifdef X722_SUPPORT
+
+struct i40e_aqc_get_set_rss_key {
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+ __le16 vsi_id;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
+
+struct i40e_aqc_get_set_rss_key_data {
+ u8 standard_rss_key[0x28];
+ u8 extended_hash_key[0xc];
+};
+
+I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
+
+struct i40e_aqc_get_set_rss_lut {
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+ __le16 vsi_id;
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
+ __le16 flags;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
+#endif
/* tunnel key structure 0x0B10 */
@@ -2072,7 +2299,8 @@ struct i40e_aqc_oem_param_change {
#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
#define I40E_AQ_OEM_PARAM_MAC 2
__le32 param_value1;
- u8 param_value2[8];
+ __le16 param_value2;
+ u8 reserved[6];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
@@ -2086,6 +2314,28 @@ struct i40e_aqc_oem_state_change {
I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
+/* Initialize OCSD (0xFE02, direct) */
+struct i40e_aqc_opc_oem_ocsd_initialize {
+ u8 type_status;
+ u8 reserved1[3];
+ __le32 ocsd_memory_block_addr_high;
+ __le32 ocsd_memory_block_addr_low;
+ __le32 requested_update_interval;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize);
+
+/* Initialize OCBB (0xFE03, direct) */
+struct i40e_aqc_opc_oem_ocbb_initialize {
+ u8 type_status;
+ u8 reserved1[3];
+ __le32 ocbb_memory_block_addr_high;
+ __le32 ocbb_memory_block_addr_low;
+ u8 reserved2[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize);
+
/* debug commands */
/* get device id (0xFF00) uses the generic structure */
@@ -2176,4 +2426,84 @@ struct i40e_aqc_debug_modify_internals {
I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals);
+#ifdef X722_SUPPORT
+struct i40e_aqc_set_proxy_config {
+ u8 reserved_1[4];
+ u8 reserved_2[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_proxy_config);
+
+struct i40e_aqc_set_proxy_config_resp {
+ u8 reserved[8];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_proxy_config_resp);
+
+struct i40e_aqc_set_ns_proxy_table_entry {
+ u8 reserved_1[4];
+ u8 reserved_2[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_ns_proxy_table_entry);
+
+struct i40e_aqc_set_ns_proxy_table_entry_resp {
+ u8 reserved_1[4];
+ u8 reserved_2[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_ns_proxy_table_entry_resp);
+
+struct i40e_aqc_set_wol_filter {
+ __le16 filter_index;
+#define I40E_AQC_MAX_NUM_WOL_FILTERS 8
+ __le16 cmd_flags;
+#define I40E_AQC_SET_WOL_FILTER 0x8000
+#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000
+ __le16 valid_flags;
+#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000
+#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000
+ u8 reserved[2];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter);
+
+struct i40e_aqc_set_wol_filter_resp {
+ u8 reserved[8];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter_resp);
+
+struct i40e_aqc_get_wol_wake_reason {
+ u8 reserved[16];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wol_wake_reason);
+
+struct i40e_aqc_get_wake_reason_completion {
+ u8 reserved_1[2];
+ __le16 wake_reason;
+ u8 reserved_2[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion);
+
+struct i40e_aqc_set_wol_filter_data {
+ u8 filter[128];
+ u8 mask[16];
+};
+
#endif
+#endif /* _I40E_ADMINQ_CMD_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_alloc.h b/src/dpdk22/drivers/net/i40e/base/i40e_alloc.h
index 6e81cd5b..38c2f655 100755..100644
--- a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_alloc.h
+++ b/src/dpdk22/drivers/net/i40e/base/i40e_alloc.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2013 - 2014, Intel Corporation
+Copyright (c) 2013 - 2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_common.c b/src/dpdk22/drivers/net/i40e/base/i40e_common.c
index ffaa777e..d7c940d3 100755..100644
--- a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_common.c
+++ b/src/dpdk22/drivers/net/i40e/base/i40e_common.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2013 - 2014, Intel Corporation
+Copyright (c) 2013 - 2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -36,6 +36,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include "i40e_prototype.h"
#include "i40e_virtchnl.h"
+
/**
* i40e_set_mac_type - Sets MAC type
* @hw: pointer to the HW structure
@@ -43,7 +44,7 @@ POSSIBILITY OF SUCH DAMAGE.
* This function sets the mac type of the adapter based on the
* vendor ID and device ID stored in the hw structure.
**/
-#ifdef VF_DRIVER
+#if defined(INTEGRATED_VF) || defined(VF_DRIVER)
enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
#else
STATIC enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
@@ -64,8 +65,27 @@ STATIC enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C:
case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_20G_KR2:
+ case I40E_DEV_ID_20G_KR2_A:
hw->mac.type = I40E_MAC_XL710;
break;
+#ifdef X722_SUPPORT
+#ifdef X722_A0_SUPPORT
+ case I40E_DEV_ID_X722_A0:
+#endif
+ case I40E_DEV_ID_SFP_X722:
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ hw->mac.type = I40E_MAC_X722;
+ break;
+#endif
+#ifdef X722_SUPPORT
+ case I40E_DEV_ID_X722_VF:
+ case I40E_DEV_ID_X722_VF_HV:
+ hw->mac.type = I40E_MAC_X722_VF;
+ break;
+#endif
case I40E_DEV_ID_VF:
case I40E_DEV_ID_VF_HV:
hw->mac.type = I40E_MAC_VF;
@@ -83,6 +103,214 @@ STATIC enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
return status;
}
+#ifndef I40E_NDIS_SUPPORT
+/**
+ * i40e_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+{
+ switch (aq_err) {
+ case I40E_AQ_RC_OK:
+ return "OK";
+ case I40E_AQ_RC_EPERM:
+ return "I40E_AQ_RC_EPERM";
+ case I40E_AQ_RC_ENOENT:
+ return "I40E_AQ_RC_ENOENT";
+ case I40E_AQ_RC_ESRCH:
+ return "I40E_AQ_RC_ESRCH";
+ case I40E_AQ_RC_EINTR:
+ return "I40E_AQ_RC_EINTR";
+ case I40E_AQ_RC_EIO:
+ return "I40E_AQ_RC_EIO";
+ case I40E_AQ_RC_ENXIO:
+ return "I40E_AQ_RC_ENXIO";
+ case I40E_AQ_RC_E2BIG:
+ return "I40E_AQ_RC_E2BIG";
+ case I40E_AQ_RC_EAGAIN:
+ return "I40E_AQ_RC_EAGAIN";
+ case I40E_AQ_RC_ENOMEM:
+ return "I40E_AQ_RC_ENOMEM";
+ case I40E_AQ_RC_EACCES:
+ return "I40E_AQ_RC_EACCES";
+ case I40E_AQ_RC_EFAULT:
+ return "I40E_AQ_RC_EFAULT";
+ case I40E_AQ_RC_EBUSY:
+ return "I40E_AQ_RC_EBUSY";
+ case I40E_AQ_RC_EEXIST:
+ return "I40E_AQ_RC_EEXIST";
+ case I40E_AQ_RC_EINVAL:
+ return "I40E_AQ_RC_EINVAL";
+ case I40E_AQ_RC_ENOTTY:
+ return "I40E_AQ_RC_ENOTTY";
+ case I40E_AQ_RC_ENOSPC:
+ return "I40E_AQ_RC_ENOSPC";
+ case I40E_AQ_RC_ENOSYS:
+ return "I40E_AQ_RC_ENOSYS";
+ case I40E_AQ_RC_ERANGE:
+ return "I40E_AQ_RC_ERANGE";
+ case I40E_AQ_RC_EFLUSHED:
+ return "I40E_AQ_RC_EFLUSHED";
+ case I40E_AQ_RC_BAD_ADDR:
+ return "I40E_AQ_RC_BAD_ADDR";
+ case I40E_AQ_RC_EMODE:
+ return "I40E_AQ_RC_EMODE";
+ case I40E_AQ_RC_EFBIG:
+ return "I40E_AQ_RC_EFBIG";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+ return hw->err_str;
+}
+
+/**
+ * i40e_stat_str - convert status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err)
+{
+ switch (stat_err) {
+ case I40E_SUCCESS:
+ return "OK";
+ case I40E_ERR_NVM:
+ return "I40E_ERR_NVM";
+ case I40E_ERR_NVM_CHECKSUM:
+ return "I40E_ERR_NVM_CHECKSUM";
+ case I40E_ERR_PHY:
+ return "I40E_ERR_PHY";
+ case I40E_ERR_CONFIG:
+ return "I40E_ERR_CONFIG";
+ case I40E_ERR_PARAM:
+ return "I40E_ERR_PARAM";
+ case I40E_ERR_MAC_TYPE:
+ return "I40E_ERR_MAC_TYPE";
+ case I40E_ERR_UNKNOWN_PHY:
+ return "I40E_ERR_UNKNOWN_PHY";
+ case I40E_ERR_LINK_SETUP:
+ return "I40E_ERR_LINK_SETUP";
+ case I40E_ERR_ADAPTER_STOPPED:
+ return "I40E_ERR_ADAPTER_STOPPED";
+ case I40E_ERR_INVALID_MAC_ADDR:
+ return "I40E_ERR_INVALID_MAC_ADDR";
+ case I40E_ERR_DEVICE_NOT_SUPPORTED:
+ return "I40E_ERR_DEVICE_NOT_SUPPORTED";
+ case I40E_ERR_MASTER_REQUESTS_PENDING:
+ return "I40E_ERR_MASTER_REQUESTS_PENDING";
+ case I40E_ERR_INVALID_LINK_SETTINGS:
+ return "I40E_ERR_INVALID_LINK_SETTINGS";
+ case I40E_ERR_AUTONEG_NOT_COMPLETE:
+ return "I40E_ERR_AUTONEG_NOT_COMPLETE";
+ case I40E_ERR_RESET_FAILED:
+ return "I40E_ERR_RESET_FAILED";
+ case I40E_ERR_SWFW_SYNC:
+ return "I40E_ERR_SWFW_SYNC";
+ case I40E_ERR_NO_AVAILABLE_VSI:
+ return "I40E_ERR_NO_AVAILABLE_VSI";
+ case I40E_ERR_NO_MEMORY:
+ return "I40E_ERR_NO_MEMORY";
+ case I40E_ERR_BAD_PTR:
+ return "I40E_ERR_BAD_PTR";
+ case I40E_ERR_RING_FULL:
+ return "I40E_ERR_RING_FULL";
+ case I40E_ERR_INVALID_PD_ID:
+ return "I40E_ERR_INVALID_PD_ID";
+ case I40E_ERR_INVALID_QP_ID:
+ return "I40E_ERR_INVALID_QP_ID";
+ case I40E_ERR_INVALID_CQ_ID:
+ return "I40E_ERR_INVALID_CQ_ID";
+ case I40E_ERR_INVALID_CEQ_ID:
+ return "I40E_ERR_INVALID_CEQ_ID";
+ case I40E_ERR_INVALID_AEQ_ID:
+ return "I40E_ERR_INVALID_AEQ_ID";
+ case I40E_ERR_INVALID_SIZE:
+ return "I40E_ERR_INVALID_SIZE";
+ case I40E_ERR_INVALID_ARP_INDEX:
+ return "I40E_ERR_INVALID_ARP_INDEX";
+ case I40E_ERR_INVALID_FPM_FUNC_ID:
+ return "I40E_ERR_INVALID_FPM_FUNC_ID";
+ case I40E_ERR_QP_INVALID_MSG_SIZE:
+ return "I40E_ERR_QP_INVALID_MSG_SIZE";
+ case I40E_ERR_QP_TOOMANY_WRS_POSTED:
+ return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
+ case I40E_ERR_INVALID_FRAG_COUNT:
+ return "I40E_ERR_INVALID_FRAG_COUNT";
+ case I40E_ERR_QUEUE_EMPTY:
+ return "I40E_ERR_QUEUE_EMPTY";
+ case I40E_ERR_INVALID_ALIGNMENT:
+ return "I40E_ERR_INVALID_ALIGNMENT";
+ case I40E_ERR_FLUSHED_QUEUE:
+ return "I40E_ERR_FLUSHED_QUEUE";
+ case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
+ return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
+ case I40E_ERR_INVALID_IMM_DATA_SIZE:
+ return "I40E_ERR_INVALID_IMM_DATA_SIZE";
+ case I40E_ERR_TIMEOUT:
+ return "I40E_ERR_TIMEOUT";
+ case I40E_ERR_OPCODE_MISMATCH:
+ return "I40E_ERR_OPCODE_MISMATCH";
+ case I40E_ERR_CQP_COMPL_ERROR:
+ return "I40E_ERR_CQP_COMPL_ERROR";
+ case I40E_ERR_INVALID_VF_ID:
+ return "I40E_ERR_INVALID_VF_ID";
+ case I40E_ERR_INVALID_HMCFN_ID:
+ return "I40E_ERR_INVALID_HMCFN_ID";
+ case I40E_ERR_BACKING_PAGE_ERROR:
+ return "I40E_ERR_BACKING_PAGE_ERROR";
+ case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
+ return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
+ case I40E_ERR_INVALID_PBLE_INDEX:
+ return "I40E_ERR_INVALID_PBLE_INDEX";
+ case I40E_ERR_INVALID_SD_INDEX:
+ return "I40E_ERR_INVALID_SD_INDEX";
+ case I40E_ERR_INVALID_PAGE_DESC_INDEX:
+ return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
+ case I40E_ERR_INVALID_SD_TYPE:
+ return "I40E_ERR_INVALID_SD_TYPE";
+ case I40E_ERR_MEMCPY_FAILED:
+ return "I40E_ERR_MEMCPY_FAILED";
+ case I40E_ERR_INVALID_HMC_OBJ_INDEX:
+ return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
+ case I40E_ERR_INVALID_HMC_OBJ_COUNT:
+ return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
+ case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
+ return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
+ case I40E_ERR_SRQ_ENABLED:
+ return "I40E_ERR_SRQ_ENABLED";
+ case I40E_ERR_ADMIN_QUEUE_ERROR:
+ return "I40E_ERR_ADMIN_QUEUE_ERROR";
+ case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
+ return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
+ case I40E_ERR_BUF_TOO_SHORT:
+ return "I40E_ERR_BUF_TOO_SHORT";
+ case I40E_ERR_ADMIN_QUEUE_FULL:
+ return "I40E_ERR_ADMIN_QUEUE_FULL";
+ case I40E_ERR_ADMIN_QUEUE_NO_WORK:
+ return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
+ case I40E_ERR_BAD_IWARP_CQE:
+ return "I40E_ERR_BAD_IWARP_CQE";
+ case I40E_ERR_NVM_BLANK_MODE:
+ return "I40E_ERR_NVM_BLANK_MODE";
+ case I40E_ERR_NOT_IMPLEMENTED:
+ return "I40E_ERR_NOT_IMPLEMENTED";
+ case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
+ return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
+ case I40E_ERR_DIAG_TEST_FAILED:
+ return "I40E_ERR_DIAG_TEST_FAILED";
+ case I40E_ERR_NOT_READY:
+ return "I40E_ERR_NOT_READY";
+ case I40E_NOT_SUPPORTED:
+ return "I40E_NOT_SUPPORTED";
+ case I40E_ERR_FIRMWARE_API_VERSION:
+ return "I40E_ERR_FIRMWARE_API_VERSION";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+ return hw->err_str;
+}
+
+#endif /* I40E_NDIS_SUPPORT */
/**
* i40e_debug_aq
* @hw: debug mask related to admin queue
@@ -98,47 +326,55 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
{
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
u16 len = LE16_TO_CPU(aq_desc->datalen);
- u8 *aq_buffer = (u8 *)buffer;
- u32 data[4];
- u32 i = 0;
+ u8 *buf = (u8 *)buffer;
+ u16 i = 0;
if ((!(mask & hw->debug_mask)) || (desc == NULL))
return;
i40e_debug(hw, mask,
"AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
- aq_desc->opcode, aq_desc->flags, aq_desc->datalen,
- aq_desc->retval);
+ LE16_TO_CPU(aq_desc->opcode),
+ LE16_TO_CPU(aq_desc->flags),
+ LE16_TO_CPU(aq_desc->datalen),
+ LE16_TO_CPU(aq_desc->retval));
i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
- aq_desc->cookie_high, aq_desc->cookie_low);
+ LE32_TO_CPU(aq_desc->cookie_high),
+ LE32_TO_CPU(aq_desc->cookie_low));
i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
- aq_desc->params.internal.param0,
- aq_desc->params.internal.param1);
+ LE32_TO_CPU(aq_desc->params.internal.param0),
+ LE32_TO_CPU(aq_desc->params.internal.param1));
i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
- aq_desc->params.external.addr_high,
- aq_desc->params.external.addr_low);
+ LE32_TO_CPU(aq_desc->params.external.addr_high),
+ LE32_TO_CPU(aq_desc->params.external.addr_low));
if ((buffer != NULL) && (aq_desc->datalen != 0)) {
- i40e_memset(data, 0, sizeof(data), I40E_NONDMA_MEM);
i40e_debug(hw, mask, "AQ CMD Buffer:\n");
if (buf_len < len)
len = buf_len;
- for (i = 0; i < len; i++) {
- data[((i % 16) / 4)] |=
- ((u32)aq_buffer[i]) << (8 * (i % 4));
- if ((i % 16) == 15) {
- i40e_debug(hw, mask,
- "\t0x%04X %08X %08X %08X %08X\n",
- i - 15, data[0], data[1], data[2],
- data[3]);
- i40e_memset(data, 0, sizeof(data),
- I40E_NONDMA_MEM);
- }
+ /* write the full 16-byte chunks */
+ for (i = 0; i < (len - 16); i += 16)
+ i40e_debug(hw, mask,
+ "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ i, buf[i], buf[i+1], buf[i+2], buf[i+3],
+ buf[i+4], buf[i+5], buf[i+6], buf[i+7],
+ buf[i+8], buf[i+9], buf[i+10], buf[i+11],
+ buf[i+12], buf[i+13], buf[i+14], buf[i+15]);
+ /* the most we could have left is 16 bytes, pad with zeros */
+ if (i < len) {
+ char d_buf[16];
+ int j;
+
+ memset(d_buf, 0, sizeof(d_buf));
+ for (j = 0; i < len; j++, i++)
+ d_buf[j] = buf[i];
+ i40e_debug(hw, mask,
+ "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ i, d_buf[0], d_buf[1], d_buf[2], d_buf[3],
+ d_buf[4], d_buf[5], d_buf[6], d_buf[7],
+ d_buf[8], d_buf[9], d_buf[10], d_buf[11],
+ d_buf[12], d_buf[13], d_buf[14], d_buf[15]);
}
- if ((i % 16) != 0)
- i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n",
- i - (i % 16), data[0], data[1], data[2],
- data[3]);
}
}
@@ -151,9 +387,27 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
bool i40e_check_asq_alive(struct i40e_hw *hw)
{
if (hw->aq.asq.len)
- return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK);
- else
- return false;
+#ifdef PF_DRIVER
+#ifdef INTEGRATED_VF
+ if (!i40e_is_vf(hw))
+ return !!(rd32(hw, hw->aq.asq.len) &
+ I40E_PF_ATQLEN_ATQENABLE_MASK);
+#else
+ return !!(rd32(hw, hw->aq.asq.len) &
+ I40E_PF_ATQLEN_ATQENABLE_MASK);
+#endif /* INTEGRATED_VF */
+#endif /* PF_DRIVER */
+#ifdef VF_DRIVER
+#ifdef INTEGRATED_VF
+ if (i40e_is_vf(hw))
+ return !!(rd32(hw, hw->aq.asq.len) &
+ I40E_VF_ATQLEN1_ATQENABLE_MASK);
+#else
+ return !!(rd32(hw, hw->aq.asq.len) &
+ I40E_VF_ATQLEN1_ATQENABLE_MASK);
+#endif /* INTEGRATED_VF */
+#endif /* VF_DRIVER */
+ return false;
}
/**
@@ -181,6 +435,166 @@ enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw,
return status;
}
+#ifdef X722_SUPPORT
+
+/**
+ * i40e_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ * @set: set true to set the table, false to get the table
+ *
+ * Internal function to get or set RSS look up table
+ **/
+STATIC enum i40e_status_code i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+ u16 vsi_id, bool pf_lut,
+ u8 *lut, u16 lut_size,
+ bool set)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_set_rss_lut *cmd_resp =
+ (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+
+ if (set)
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_rss_lut);
+ else
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_rss_lut);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ CPU_TO_LE16((u16)((vsi_id <<
+ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
+ cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
+
+ if (pf_lut)
+ cmd_resp->flags |= CPU_TO_LE16((u16)
+ ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ else
+ cmd_resp->flags |= CPU_TO_LE16((u16)
+ ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+
+ status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ **/
+enum i40e_status_code i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
+ false);
+}
+
+/**
+ * i40e_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ **/
+enum i40e_status_code i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
+}
+
+/**
+ * i40e_aq_get_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ * @set: set true to set the key, false to get the key
+ *
+ * get the RSS key per VSI
+ **/
+STATIC enum i40e_status_code i40e_aq_get_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key,
+ bool set)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_set_rss_key *cmd_resp =
+ (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
+ u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+
+ if (set)
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_rss_key);
+ else
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_rss_key);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ CPU_TO_LE16((u16)((vsi_id <<
+ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
+ I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
+ cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ **/
+enum i40e_status_code i40e_aq_get_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
+{
+ return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
+}
+
+/**
+ * i40e_aq_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ **/
+enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
+{
+ return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
+}
+#endif /* X722_SUPPORT */
/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
* hardware to a bit-field that can be used by SW to more easily determine the
@@ -547,7 +961,31 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
I40E_PTT_UNUSED_ENTRY(255)
};
-#ifndef VF_DRIVER
+
+/**
+ * i40e_validate_mac_addr - Validate unicast MAC address
+ * @mac_addr: pointer to MAC address
+ *
+ * Tests a MAC address to ensure it is a valid Individual Address
+ **/
+enum i40e_status_code i40e_validate_mac_addr(u8 *mac_addr)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ DEBUGFUNC("i40e_validate_mac_addr");
+
+ /* Broadcast addresses ARE multicast addresses
+ * Make sure it is not a multicast address
+ * Reject the zero address
+ */
+ if (I40E_IS_MULTICAST(mac_addr) ||
+ (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
+ mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0))
+ status = I40E_ERR_INVALID_MAC_ADDR;
+
+ return status;
+}
+#ifdef PF_DRIVER
/**
* i40e_init_shared_code - Initialize the shared code
@@ -564,7 +1002,7 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
{
enum i40e_status_code status = I40E_SUCCESS;
- u32 reg;
+ u32 port, ari, func_rid;
DEBUGFUNC("i40e_init_shared_code");
@@ -572,6 +1010,9 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
switch (hw->mac.type) {
case I40E_MAC_XL710:
+#ifdef X722_SUPPORT
+ case I40E_MAC_X722:
+#endif
break;
default:
return I40E_ERR_DEVICE_NOT_SUPPORTED;
@@ -579,18 +1020,17 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
hw->phy.get_link_info = true;
- /* Determine port number */
- reg = rd32(hw, I40E_PFGEN_PORTNUM);
- reg = ((reg & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >>
- I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT);
- hw->port = (u8)reg;
-
- /* Determine the PF number based on the PCI fn */
- reg = rd32(hw, I40E_GLPCI_CAPSUP);
- if (reg & I40E_GLPCI_CAPSUP_ARI_EN_MASK)
- hw->pf_id = (u8)((hw->bus.device << 3) | hw->bus.func);
+ /* Determine port number and PF number*/
+ port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK)
+ >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
+ hw->port = (u8)port;
+ ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >>
+ I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
+ func_rid = rd32(hw, I40E_PF_FUNC_RID);
+ if (ari)
+ hw->pf_id = (u8)(func_rid & 0xff);
else
- hw->pf_id = (u8)hw->bus.func;
+ hw->pf_id = (u8)(func_rid & 0x7);
status = i40e_init_nvm(hw);
return status;
@@ -732,25 +1172,60 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
}
/**
- * i40e_validate_mac_addr - Validate unicast MAC address
- * @mac_addr: pointer to MAC address
+ * i40e_read_pba_string - Reads part number string from EEPROM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the EEPROM
+ * @pba_num_size: part number string buffer length
*
- * Tests a MAC address to ensure it is a valid Individual Address
+ * Reads the part number string from the EEPROM.
**/
-enum i40e_status_code i40e_validate_mac_addr(u8 *mac_addr)
+enum i40e_status_code i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
{
enum i40e_status_code status = I40E_SUCCESS;
+ u16 pba_word = 0;
+ u16 pba_size = 0;
+ u16 pba_ptr = 0;
+ u16 i = 0;
+
+ status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
+ if ((status != I40E_SUCCESS) || (pba_word != 0xFAFA)) {
+ DEBUGOUT("Failed to read PBA flags or flag is invalid.\n");
+ return status;
+ }
- DEBUGFUNC("i40e_validate_mac_addr");
+ status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
+ if (status != I40E_SUCCESS) {
+ DEBUGOUT("Failed to read PBA Block pointer.\n");
+ return status;
+ }
- /* Broadcast addresses ARE multicast addresses
- * Make sure it is not a multicast address
- * Reject the zero address
+ status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
+ if (status != I40E_SUCCESS) {
+ DEBUGOUT("Failed to read PBA Block size.\n");
+ return status;
+ }
+
+ /* Subtract one to get PBA word count (PBA Size word is included in
+ * total size)
*/
- if (I40E_IS_MULTICAST(mac_addr) ||
- (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
- mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0))
- status = I40E_ERR_INVALID_MAC_ADDR;
+ pba_size--;
+ if (pba_num_size < (((u32)pba_size * 2) + 1)) {
+ DEBUGOUT("Buffer to small for PBA data.\n");
+ return I40E_ERR_PARAM;
+ }
+
+ for (i = 0; i < pba_size; i++) {
+ status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word);
+ if (status != I40E_SUCCESS) {
+ DEBUGOUT1("Failed to read PBA Block word %d.\n", i);
+ return status;
+ }
+
+ pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
+ pba_num[(i * 2) + 1] = pba_word & 0xFF;
+ }
+ pba_num[(pba_size * 2)] = '\0';
return status;
}
@@ -782,12 +1257,15 @@ STATIC enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_10GBASE_CR1:
case I40E_PHY_TYPE_40GBASE_CR4:
case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+ case I40E_PHY_TYPE_40GBASE_AOC:
+ case I40E_PHY_TYPE_10GBASE_AOC:
media = I40E_MEDIA_TYPE_DA;
break;
case I40E_PHY_TYPE_1000BASE_KX:
case I40E_PHY_TYPE_10GBASE_KX4:
case I40E_PHY_TYPE_10GBASE_KR:
case I40E_PHY_TYPE_40GBASE_KR4:
+ case I40E_PHY_TYPE_20GBASE_KR2:
media = I40E_MEDIA_TYPE_BACKPLANE;
break;
case I40E_PHY_TYPE_SGMII:
@@ -803,7 +1281,7 @@ STATIC enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
return media;
}
-#define I40E_PF_RESET_WAIT_COUNT 100
+#define I40E_PF_RESET_WAIT_COUNT 200
/**
* i40e_pf_reset - Reset the PF
* @hw: pointer to the hardware structure
@@ -822,9 +1300,14 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
* The grst delay value is in 100ms units, and we'll wait a
* couple counts longer to be sure we don't just miss the end.
*/
- grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK
- >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
- for (cnt = 0; cnt < grst_del + 2; cnt++) {
+ grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
+ I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
+ I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
+#ifdef I40E_ESS_SUPPORT
+ /* It can take upto 15 secs for GRST steady state */
+ grst_del = grst_del * 20; /* bump it to 16 secs max to be safe */
+#endif
+ for (cnt = 0; cnt < grst_del + 10; cnt++) {
reg = rd32(hw, I40E_GLGEN_RSTAT);
if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
break;
@@ -1014,8 +1497,11 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
return gpio_val;
}
-#define I40E_LED0 22
+#define I40E_COMBINED_ACTIVITY 0xA
+#define I40E_FILTER_ACTIVITY 0xE
#define I40E_LINK_ACTIVITY 0xC
+#define I40E_MAC_ACTIVITY 0xD
+#define I40E_LED0 22
/**
* i40e_led_get - return current on/off mode
@@ -1028,6 +1514,7 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
**/
u32 i40e_led_get(struct i40e_hw *hw)
{
+ u32 current_mode = 0;
u32 mode = 0;
int i;
@@ -1040,6 +1527,18 @@ u32 i40e_led_get(struct i40e_hw *hw)
if (!gpio_val)
continue;
+ /* ignore gpio LED src mode entries related to the activity LEDs */
+ current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
+ I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
+ switch (current_mode) {
+ case I40E_COMBINED_ACTIVITY:
+ case I40E_FILTER_ACTIVITY:
+ case I40E_MAC_ACTIVITY:
+ continue;
+ default:
+ break;
+ }
+
mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
break;
@@ -1059,6 +1558,7 @@ u32 i40e_led_get(struct i40e_hw *hw)
**/
void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
{
+ u32 current_mode = 0;
int i;
if (mode & 0xfffffff0)
@@ -1073,6 +1573,18 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
if (!gpio_val)
continue;
+ /* ignore gpio LED src mode entries related to the activity LEDs */
+ current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
+ I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
+ switch (current_mode) {
+ case I40E_COMBINED_ACTIVITY:
+ case I40E_FILTER_ACTIVITY:
+ case I40E_MAC_ACTIVITY:
+ continue;
+ default:
+ break;
+ }
+
gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
/* this & is a bit of paranoia, but serves as a range check */
gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
@@ -1081,8 +1593,10 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
if (mode == I40E_LINK_ACTIVITY)
blink = false;
- gpio_val |= (blink ? 1 : 0) <<
- I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT;
+ if (blink)
+ gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+ else
+ gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
break;
@@ -1134,6 +1648,9 @@ enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
if (hw->aq.asq_last_status == I40E_AQ_RC_EIO)
status = I40E_ERR_UNKNOWN_PHY;
+ if (report_init)
+ hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type);
+
return status;
}
@@ -1211,7 +1728,7 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
return status;
}
- memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
+ memset(&config, 0, sizeof(config));
/* clear the old pause settings */
config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
~(I40E_AQ_PHY_FLAG_PAUSE_RX);
@@ -1234,14 +1751,14 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
*aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
}
/* Update the link info */
- status = i40e_update_link_info(hw, true);
+ status = i40e_update_link_info(hw);
if (status) {
/* Wait a little bit (on 40G cards it sometimes takes a really
* long time for link to come back from the atomic reset)
* and try once more
*/
i40e_msec_delay(1000);
- status = i40e_update_link_info(hw, true);
+ status = i40e_update_link_info(hw);
}
if (status)
*aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
@@ -1379,7 +1896,7 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
/* save off old link status information */
i40e_memcpy(&hw->phy.link_info_old, hw_link_info,
- sizeof(struct i40e_link_status), I40E_NONDMA_TO_NONDMA);
+ sizeof(*hw_link_info), I40E_NONDMA_TO_NONDMA);
/* update link status */
hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
@@ -1414,9 +1931,13 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
else
hw_link_info->lse_enable = false;
+ if ((hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
+ hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
+ hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
+
/* save link status information */
if (link)
- i40e_memcpy(link, hw_link_info, sizeof(struct i40e_link_status),
+ i40e_memcpy(link, hw_link_info, sizeof(*hw_link_info),
I40E_NONDMA_TO_NONDMA);
/* flag cleared so helper functions don't call AQ again */
@@ -1427,36 +1948,6 @@ aq_get_link_info_exit:
}
/**
- * i40e_update_link_info
- * @hw: pointer to the hw struct
- * @enable_lse: enable/disable LinkStatusEvent reporting
- *
- * Returns the link status of the adapter
- **/
-enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw,
- bool enable_lse)
-{
- struct i40e_aq_get_phy_abilities_resp abilities;
- enum i40e_status_code status;
-
- status = i40e_aq_get_link_info(hw, enable_lse, NULL, NULL);
- if (status)
- return status;
-
- status = i40e_aq_get_phy_capabilities(hw, false, false,
- &abilities, NULL);
- if (status)
- return status;
-
- if (abilities.abilities & I40E_AQ_PHY_AN_ENABLED)
- hw->phy.link_info.an_enabled = true;
- else
- hw->phy.link_info.an_enabled = false;
-
- return status;
-}
-
-/**
* i40e_aq_set_phy_int_mask
* @hw: pointer to the hw struct
* @mask: interrupt mask to be set
@@ -1767,6 +2258,74 @@ enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
}
/**
+ * i40e_aq_set_vsi_mc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_uc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_set_vsi_broadcast
* @hw: pointer to the hw struct
* @seid: vsi number
@@ -1909,6 +2468,7 @@ enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw,
* @hw: pointer to the hw struct
* @fw_major_version: firmware major version
* @fw_minor_version: firmware minor version
+ * @fw_build: firmware build number
* @api_major_version: major queue version
* @api_minor_version: minor queue version
* @cmd_details: pointer to command details structure or NULL
@@ -1917,6 +2477,7 @@ enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw,
**/
enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw,
u16 *fw_major_version, u16 *fw_minor_version,
+ u32 *fw_build,
u16 *api_major_version, u16 *api_minor_version,
struct i40e_asq_cmd_details *cmd_details)
{
@@ -1934,6 +2495,8 @@ enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw,
*fw_major_version = LE16_TO_CPU(resp->fw_major);
if (fw_minor_version != NULL)
*fw_minor_version = LE16_TO_CPU(resp->fw_minor);
+ if (fw_build != NULL)
+ *fw_build = LE32_TO_CPU(resp->fw_build);
if (api_major_version != NULL)
*api_major_version = LE16_TO_CPU(resp->api_major);
if (api_minor_version != NULL)
@@ -1974,7 +2537,7 @@ enum i40e_status_code i40e_aq_send_driver_version(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
- desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_SI);
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
cmd->driver_major_ver = dv->major_version;
cmd->driver_minor_ver = dv->minor_version;
cmd->driver_build_ver = dv->build_version;
@@ -1994,27 +2557,52 @@ enum i40e_status_code i40e_aq_send_driver_version(struct i40e_hw *hw,
/**
* i40e_get_link_status - get status of the HW network link
* @hw: pointer to the hw struct
+ * @link_up: pointer to bool (true/false = linkup/linkdown)
*
- * Returns true if link is up, false if link is down.
+ * Variable link_up true if link is up, false if link is down.
+ * The variable link_up is invalid if returned value of status != I40E_SUCCESS
*
* Side effect: LinkStatusEvent reporting becomes enabled
**/
-bool i40e_get_link_status(struct i40e_hw *hw)
+enum i40e_status_code i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
{
enum i40e_status_code status = I40E_SUCCESS;
- bool link_status = false;
if (hw->phy.get_link_info) {
- status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+ status = i40e_update_link_info(hw);
if (status != I40E_SUCCESS)
- goto i40e_get_link_status_exit;
+ i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
+ status);
}
- link_status = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
+ *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
-i40e_get_link_status_exit:
- return link_status;
+ return status;
+}
+
+/**
+ * i40e_updatelink_status - update status of the HW network link
+ * @hw: pointer to the hw struct
+ **/
+enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw)
+{
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+ if (status)
+ return status;
+
+ status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
+ NULL);
+ if (status)
+ return status;
+
+ memcpy(hw->phy.link_info.module_type, &abilities.module_type,
+ sizeof(hw->phy.link_info.module_type));
+
+ return status;
}
/**
@@ -2178,7 +2766,7 @@ enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
if (count == 0 || !mv_list || !hw)
return I40E_ERR_PARAM;
- buf_size = count * sizeof(struct i40e_aqc_add_macvlan_element_data);
+ buf_size = count * sizeof(*mv_list);
/* prep the rest of the request */
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
@@ -2220,7 +2808,7 @@ enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
if (count == 0 || !mv_list || !hw)
return I40E_ERR_PARAM;
- buf_size = count * sizeof(struct i40e_aqc_remove_macvlan_element_data);
+ buf_size = count * sizeof(*mv_list);
/* prep the rest of the request */
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
@@ -2260,7 +2848,7 @@ enum i40e_status_code i40e_aq_add_vlan(struct i40e_hw *hw, u16 seid,
if (count == 0 || !v_list || !hw)
return I40E_ERR_PARAM;
- buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
+ buf_size = count * sizeof(*v_list);
/* prep the rest of the request */
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vlan);
@@ -2300,7 +2888,7 @@ enum i40e_status_code i40e_aq_remove_vlan(struct i40e_hw *hw, u16 seid,
if (count == 0 || !v_list || !hw)
return I40E_ERR_PARAM;
- buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
+ buf_size = count * sizeof(*v_list);
/* prep the rest of the request */
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_vlan);
@@ -2358,6 +2946,41 @@ enum i40e_status_code i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
}
/**
+ * i40e_aq_debug_read_register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the register using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_debug_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_debug_reg_read_write *cmd_resp =
+ (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (reg_val == NULL)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
+
+ cmd_resp->address = CPU_TO_LE32(reg_addr);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status == I40E_SUCCESS) {
+ *reg_val = ((u64)LE32_TO_CPU(cmd_resp->value_high) << 32) |
+ (u64)LE32_TO_CPU(cmd_resp->value_low);
+ }
+
+ return status;
+}
+
+/**
* i40e_aq_debug_write_register
* @hw: pointer to the hw struct
* @reg_addr: register address
@@ -2571,6 +3194,99 @@ i40e_aq_read_nvm_exit:
}
/**
+ * i40e_aq_read_nvm_config - read an nvm config block
+ * @hw: pointer to the hw struct
+ * @cmd_flags: NVM access admin command bits
+ * @field_id: field or feature id
+ * @data: buffer for result
+ * @buf_size: buffer size
+ * @element_count: pointer to count of elements read by FW
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_read_nvm_config(struct i40e_hw *hw,
+ u8 cmd_flags, u32 field_id, void *data,
+ u16 buf_size, u16 *element_count,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_nvm_config_read *cmd =
+ (struct i40e_aqc_nvm_config_read *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_read);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
+ cmd->element_id = CPU_TO_LE16((u16)(0xffff & field_id));
+ if (cmd_flags & I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK)
+ cmd->element_id_msw = CPU_TO_LE16((u16)(field_id >> 16));
+ else
+ cmd->element_id_msw = 0;
+
+ status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details);
+
+ if (!status && element_count)
+ *element_count = LE16_TO_CPU(cmd->element_count);
+
+ return status;
+}
+
+/**
+ * i40e_aq_write_nvm_config - write an nvm config block
+ * @hw: pointer to the hw struct
+ * @cmd_flags: NVM access admin command bits
+ * @data: buffer for result
+ * @buf_size: buffer size
+ * @element_count: count of elements to be written
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_write_nvm_config(struct i40e_hw *hw,
+ u8 cmd_flags, void *data, u16 buf_size,
+ u16 element_count,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_nvm_config_write *cmd =
+ (struct i40e_aqc_nvm_config_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_write);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ cmd->element_count = CPU_TO_LE16(element_count);
+ cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
+ status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_oem_post_update - triggers an OEM specific flow after update
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_oem_post_update(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ UNREFERENCED_2PARAMETER(buff, buff_size);
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_oem_post_update);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ if (status && LE16_TO_CPU(desc.retval) == I40E_AQ_RC_ESRCH)
+ status = I40E_ERR_NOT_IMPLEMENTED;
+
+ return status;
+}
+
+/**
* i40e_aq_erase_nvm
* @hw: pointer to the hw struct
* @module_pointer: module pointer location in words from the NVM beginning
@@ -2618,6 +3334,9 @@ i40e_aq_erase_nvm_exit:
#define I40E_DEV_FUNC_CAP_NPAR 0x03
#define I40E_DEV_FUNC_CAP_OS2BMC 0x04
#define I40E_DEV_FUNC_CAP_VALID_FUNC 0x05
+#ifdef X722_SUPPORT
+#define I40E_DEV_FUNC_CAP_WOL_PROXY 0x08
+#endif
#define I40E_DEV_FUNC_CAP_SRIOV_1_1 0x12
#define I40E_DEV_FUNC_CAP_VF 0x13
#define I40E_DEV_FUNC_CAP_VMDQ 0x14
@@ -2626,6 +3345,7 @@ i40e_aq_erase_nvm_exit:
#define I40E_DEV_FUNC_CAP_VSI 0x17
#define I40E_DEV_FUNC_CAP_DCB 0x18
#define I40E_DEV_FUNC_CAP_FCOE 0x21
+#define I40E_DEV_FUNC_CAP_ISCSI 0x22
#define I40E_DEV_FUNC_CAP_RSS 0x40
#define I40E_DEV_FUNC_CAP_RX_QUEUES 0x41
#define I40E_DEV_FUNC_CAP_TX_QUEUES 0x42
@@ -2633,12 +3353,13 @@ i40e_aq_erase_nvm_exit:
#define I40E_DEV_FUNC_CAP_MSIX_VF 0x44
#define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR 0x45
#define I40E_DEV_FUNC_CAP_IEEE_1588 0x46
-#define I40E_DEV_FUNC_CAP_MFP_MODE_1 0xF1
+#define I40E_DEV_FUNC_CAP_FLEX10 0xF1
#define I40E_DEV_FUNC_CAP_CEM 0xF2
#define I40E_DEV_FUNC_CAP_IWARP 0x51
#define I40E_DEV_FUNC_CAP_LED 0x61
#define I40E_DEV_FUNC_CAP_SDP 0x62
#define I40E_DEV_FUNC_CAP_MDIO 0x63
+#define I40E_DEV_FUNC_CAP_WR_CSR_PROT 0x64
/**
* i40e_parse_discover_capabilities
@@ -2654,8 +3375,10 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
enum i40e_admin_queue_opc list_type_opc)
{
struct i40e_aqc_list_capabilities_element_resp *cap;
+ u32 valid_functions, num_functions;
u32 number, logical_id, phys_id;
struct i40e_hw_capabilities *p;
+ u8 major_rev;
u32 i = 0;
u16 id;
@@ -2673,6 +3396,7 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
number = LE32_TO_CPU(cap->number);
logical_id = LE32_TO_CPU(cap->logical_id);
phys_id = LE32_TO_CPU(cap->phys_id);
+ major_rev = cap->major_rev;
switch (id) {
case I40E_DEV_FUNC_CAP_SWITCH_MODE:
@@ -2724,6 +3448,10 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
if (number == 1)
p->fcoe = true;
break;
+ case I40E_DEV_FUNC_CAP_ISCSI:
+ if (number == 1)
+ p->iscsi = true;
+ break;
case I40E_DEV_FUNC_CAP_RSS:
p->rss = true;
p->rss_table_size = number;
@@ -2743,9 +3471,21 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
case I40E_DEV_FUNC_CAP_MSIX_VF:
p->num_msix_vectors_vf = number;
break;
- case I40E_DEV_FUNC_CAP_MFP_MODE_1:
- if (number == 1)
- p->mfp_mode_1 = true;
+ case I40E_DEV_FUNC_CAP_FLEX10:
+ if (major_rev == 1) {
+ if (number == 1) {
+ p->flex10_enable = true;
+ p->flex10_capable = true;
+ }
+ } else {
+ /* Capability revision >= 2 */
+ if (number & 1)
+ p->flex10_enable = true;
+ if (number & 2)
+ p->flex10_capable = true;
+ }
+ p->flex10_mode = logical_id;
+ p->flex10_status = phys_id;
break;
case I40E_DEV_FUNC_CAP_CEM:
if (number == 1)
@@ -2778,16 +3518,69 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
p->fd_filters_guaranteed = number;
p->fd_filters_best_effort = logical_id;
break;
+ case I40E_DEV_FUNC_CAP_WR_CSR_PROT:
+ p->wr_csr_prot = (u64)number;
+ p->wr_csr_prot |= (u64)logical_id << 32;
+ break;
+#ifdef X722_SUPPORT
+ case I40E_DEV_FUNC_CAP_WOL_PROXY:
+ hw->num_wol_proxy_filters = (u16)number;
+ hw->wol_proxy_vsi_seid = (u16)logical_id;
+ p->apm_wol_support = phys_id & I40E_WOL_SUPPORT_MASK;
+ if (phys_id & I40E_ACPI_PROGRAMMING_METHOD_MASK)
+ p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK;
+ else
+ p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_HW_FVL;
+ p->proxy_support = (phys_id & I40E_PROXY_SUPPORT_MASK) ? 1 : 0;
+ p->proxy_support = p->proxy_support;
+ break;
+#endif
default:
break;
}
}
+ if (p->fcoe)
+ i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
+
+#ifdef I40E_FCOE_ENA
/* Software override ensuring FCoE is disabled if npar or mfp
* mode because it is not supported in these modes.
*/
- if (p->npar_enable || p->mfp_mode_1)
+ if (p->npar_enable || p->flex10_enable)
p->fcoe = false;
+#else
+ /* Always disable FCoE if compiled without the I40E_FCOE_ENA flag */
+ p->fcoe = false;
+#endif
+
+ /* count the enabled ports (aka the "not disabled" ports) */
+ hw->num_ports = 0;
+ for (i = 0; i < 4; i++) {
+ u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i);
+ u64 port_cfg = 0;
+
+ /* use AQ read to get the physical register offset instead
+ * of the port relative offset
+ */
+ i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL);
+ if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK))
+ hw->num_ports++;
+ }
+
+ valid_functions = p->valid_functions;
+ num_functions = 0;
+ while (valid_functions) {
+ if (valid_functions & 1)
+ num_functions++;
+ valid_functions >>= 1;
+ }
+
+ /* partition id is 1-based, and functions are evenly spread
+ * across the ports as partitions
+ */
+ hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
+ hw->num_partitions = num_functions / hw->num_ports;
/* additional HW specific goodies that might
* someday be HW version specific
@@ -2944,6 +3737,45 @@ enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
return status;
}
+ /**
+ * i40e_aq_set_lldp_mib - Set the LLDP MIB
+ * @hw: pointer to the hw struct
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buff: pointer to a user supplied buffer to store the MIB block
+ * @buff_size: size of the buffer (in bytes)
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set the LLDP MIB.
+ **/
+enum i40e_status_code i40e_aq_set_lldp_mib(struct i40e_hw *hw,
+ u8 mib_type, void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_set_local_mib *cmd =
+ (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_lldp_set_local_mib);
+ /* Indirect Command */
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ cmd->type = mib_type;
+ cmd->length = CPU_TO_LE16(buff_size);
+ cmd->address_high = CPU_TO_LE32(I40E_HI_WORD((u64)buff));
+ cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)buff));
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ return status;
+}
+
/**
* i40e_aq_cfg_lldp_mib_change_event
* @hw: pointer to the hw struct
@@ -3173,6 +4005,64 @@ enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
}
/**
+ * i40e_aq_get_cee_dcb_config
+ * @hw: pointer to the hw struct
+ * @buff: response buffer that stores CEE operational configuration
+ * @buff_size: size of the buffer passed
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get CEE DCBX mode operational configuration from firmware
+ **/
+enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_start_stop_dcbx - Start/Stop DCBx service in FW
+ * @hw: pointer to the hw struct
+ * @start_agent: True if DCBx Agent needs to be Started
+ * False if DCBx Agent needs to be Stopped
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Start/Stop the embedded dcbx Agent
+ **/
+enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw,
+ bool start_agent,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_stop_start_specific_agent *cmd =
+ (struct i40e_aqc_lldp_stop_start_specific_agent *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_lldp_stop_start_spec_agent);
+
+ if (start_agent)
+ cmd->command = I40E_AQC_START_SPECIFIC_AGENT_MASK;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_add_udp_tunnel
* @hw: pointer to the hw struct
* @udp_port: the UDP port to add
@@ -3200,7 +4090,7 @@ enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
- if (!status)
+ if (!status && filter_index)
*filter_index = resp->index;
return status;
@@ -3251,8 +4141,7 @@ enum i40e_status_code i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw,
struct i40e_aqc_get_switch_resource_alloc *cmd_resp =
(struct i40e_aqc_get_switch_resource_alloc *)&desc.params.raw;
enum i40e_status_code status;
- u16 length = count
- * sizeof(struct i40e_aqc_switch_resource_alloc_element_resp);
+ u16 length = count * sizeof(*buf);
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_switch_resource_alloc);
@@ -3263,7 +4152,7 @@ enum i40e_status_code i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw,
status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details);
- if (!status)
+ if (!status && num_entries)
*num_entries = cmd_resp->num_entries;
return status;
@@ -3678,7 +4567,7 @@ enum i40e_status_code i40e_aq_add_statistics(struct i40e_hw *hw, u16 seid,
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
- if (!status)
+ if (!status && stat_index)
*stat_index = LE16_TO_CPU(cmd_resp->stat_index);
return status;
@@ -3912,6 +4801,40 @@ enum i40e_status_code i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
}
/**
+ * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component connected to Physical Port
+ * @ets_data: Buffer holding ETS parameters
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,
+ sizeof(*ets_data), opcode, cmd_details);
+}
+
+/**
+ * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_configure_switching_comp_bw_config,
+ cmd_details);
+}
+
+/**
* i40e_aq_config_switch_comp_ets_bw_limit - Config Switch comp BW Limit per TC
* @hw: pointer to the hw struct
* @seid: seid of the switching component
@@ -4240,6 +5163,28 @@ enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
}
/**
+ * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid to add ethertype filter from
+ **/
+#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
+void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+ u16 seid)
+{
+ u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
+ I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
+ I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
+ u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
+ enum i40e_status_code status;
+
+ status = i40e_aq_add_rem_control_packet_filter(hw, 0, ethtype, flag,
+ seid, 0, true, NULL,
+ NULL);
+ if (status)
+ DEBUGOUT("Ethtype Filter Add failed: Error pruning Tx flow control frames\n");
+}
+
+/**
* i40e_aq_add_cloud_filters
* @hw: pointer to the hardware structure
* @seid: VSI seid to add cloud filters from
@@ -4265,8 +5210,7 @@ enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_cloud_filters);
- buff_len = sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data) *
- filter_count;
+ buff_len = filter_count * sizeof(*filters);
desc.datalen = CPU_TO_LE16(buff_len);
desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
cmd->num_filters = filter_count;
@@ -4303,8 +5247,7 @@ enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_remove_cloud_filters);
- buff_len = sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data) *
- filter_count;
+ buff_len = filter_count * sizeof(*filters);
desc.datalen = CPU_TO_LE16(buff_len);
desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
cmd->num_filters = filter_count;
@@ -4380,8 +5323,6 @@ enum i40e_status_code i40e_aq_alternate_write_indirect(struct i40e_hw *hw,
cmd_resp->address = CPU_TO_LE32(addr);
cmd_resp->length = CPU_TO_LE32(dw_count);
- cmd_resp->addr_high = CPU_TO_LE32(I40E_HI_WORD((u64)buffer));
- cmd_resp->addr_low = CPU_TO_LE32(I40E_LO_DWORD((u64)buffer));
status = i40e_asq_send_command(hw, &desc, buffer,
I40E_LO_DWORD(4*dw_count), NULL);
@@ -4463,8 +5404,6 @@ enum i40e_status_code i40e_aq_alternate_read_indirect(struct i40e_hw *hw,
cmd_resp->address = CPU_TO_LE32(addr);
cmd_resp->length = CPU_TO_LE32(dw_count);
- cmd_resp->addr_high = CPU_TO_LE32(I40E_HI_DWORD((u64)buffer));
- cmd_resp->addr_low = CPU_TO_LE32(I40E_LO_DWORD((u64)buffer));
status = i40e_asq_send_command(hw, &desc, buffer,
I40E_LO_DWORD(4*dw_count), NULL);
@@ -4519,7 +5458,7 @@ enum i40e_status_code i40e_aq_alternate_write_done(struct i40e_hw *hw,
cmd->cmd_flags = CPU_TO_LE16(bios_mode);
status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
- if (!status)
+ if (!status && reset_needed)
*reset_needed = ((LE16_TO_CPU(cmd->cmd_flags) &
I40E_AQ_ALTERNATE_RESET_NEEDED) != 0);
@@ -4619,6 +5558,63 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
}
/**
+ * i40e_aq_debug_dump
+ * @hw: pointer to the hardware structure
+ * @cluster_id: specific cluster to dump
+ * @table_id: table id within cluster
+ * @start_index: index of line in the block to read
+ * @buff_size: dump buffer size
+ * @buff: dump buffer
+ * @ret_buff_size: actual buffer size returned
+ * @ret_next_table: next block to read
+ * @ret_next_index: next index to read
+ *
+ * Dump internal FW/HW data for debug purposes.
+ *
+ **/
+enum i40e_status_code i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+ u8 table_id, u32 start_index, u16 buff_size,
+ void *buff, u16 *ret_buff_size,
+ u8 *ret_next_table, u32 *ret_next_index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_debug_dump_internals *cmd =
+ (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
+ struct i40e_aqc_debug_dump_internals *resp =
+ (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_debug_dump_internals);
+ /* Indirect Command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ cmd->cluster_id = cluster_id;
+ cmd->table_id = table_id;
+ cmd->idx = CPU_TO_LE32(start_index);
+
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (ret_buff_size != NULL)
+ *ret_buff_size = LE16_TO_CPU(desc.datalen);
+ if (ret_next_table != NULL)
+ *ret_next_table = resp->table_id;
+ if (ret_next_index != NULL)
+ *ret_next_index = LE32_TO_CPU(resp->idx);
+ }
+
+ return status;
+}
+
+/**
* i40e_read_bw_from_alt_ram
* @hw: pointer to the hardware structure
* @max_bw: pointer for max_bw read
@@ -4637,11 +5633,11 @@ enum i40e_status_code i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
/* Calculate the address of the min/max bw registers */
max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
- I40E_ALT_STRUCT_MAX_BW_OFFSET +
- (I40E_ALT_STRUCT_DWORDS_PER_PF*hw->pf_id);
+ I40E_ALT_STRUCT_MAX_BW_OFFSET +
+ (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
- I40E_ALT_STRUCT_MIN_BW_OFFSET +
- (I40E_ALT_STRUCT_DWORDS_PER_PF*hw->pf_id);
+ I40E_ALT_STRUCT_MIN_BW_OFFSET +
+ (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
/* Read the bandwidths from alt ram */
status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw,
@@ -4674,7 +5670,7 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
{
enum i40e_status_code status;
struct i40e_aq_desc desc;
- u16 bwd_size = sizeof(struct i40e_aqc_configure_partition_bw_data);
+ u16 bwd_size = sizeof(*bw_data);
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_configure_partition_bw);
@@ -4692,6 +5688,8 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
return status;
}
+#endif /* PF_DRIVER */
+#ifdef VF_DRIVER
/**
* i40e_aq_send_msg_to_pf
@@ -4791,3 +5789,162 @@ enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw)
I40E_SUCCESS, NULL, 0, NULL);
}
#endif /* VF_DRIVER */
+#ifdef X722_SUPPORT
+
+/**
+ * i40e_aq_set_arp_proxy_config
+ * @hw: pointer to the HW structure
+ * @proxy_config - pointer to proxy config command table struct
+ * @cmd_details: pointer to command details
+ *
+ * Set ARP offload parameters from pre-populated
+ * i40e_aqc_arp_proxy_data struct
+ **/
+enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
+ struct i40e_aqc_arp_proxy_data *proxy_config,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_proxy_config *cmd =
+ (struct i40e_aqc_set_proxy_config *) &desc.params.raw;
+ enum i40e_status_code status;
+
+ if (!proxy_config)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_proxy_config);
+
+ cmd->address_high = CPU_TO_LE32(I40E_HI_DWORD((u64)proxy_config));
+ cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)proxy_config));
+
+ status = i40e_asq_send_command(hw, &desc, proxy_config,
+ sizeof(struct i40e_aqc_arp_proxy_data),
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_opc_set_ns_proxy_table_entry
+ * @hw: pointer to the HW structure
+ * @ns_proxy_table_entry: pointer to NS table entry command struct
+ * @cmd_details: pointer to command details
+ *
+ * Set IPv6 Neighbor Solicitation (NS) protocol offload parameters
+ * from pre-populated i40e_aqc_ns_proxy_data struct
+ **/
+enum i40e_status_code i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw,
+ struct i40e_aqc_ns_proxy_data *ns_proxy_table_entry,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_ns_proxy_table_entry *cmd =
+ (struct i40e_aqc_set_ns_proxy_table_entry *) &desc.params.raw;
+ enum i40e_status_code status;
+
+ if (!ns_proxy_table_entry)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_ns_proxy_table_entry);
+
+ cmd->address_high =
+ CPU_TO_LE32(I40E_HI_DWORD((u64)ns_proxy_table_entry));
+ cmd->address_low =
+ CPU_TO_LE32(I40E_LO_DWORD((u64)ns_proxy_table_entry));
+
+ status = i40e_asq_send_command(hw, &desc, ns_proxy_table_entry,
+ sizeof(struct i40e_aqc_ns_proxy_data),
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_clear_wol_filter
+ * @hw: pointer to the hw struct
+ * @filter_index: index of filter to modify (0-7)
+ * @filter: buffer containing filter to be set
+ * @set_filter: true to set filter, false to clear filter
+ * @no_wol_tco: if true, pass through packets cannot cause wake-up
+ * if false, pass through packets may cause wake-up
+ * @filter_valid: true if filter action is valid
+ * @no_wol_tco_valid: true if no WoL in TCO traffic action valid
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set or clear WoL filter for port attached to the PF
+ **/
+enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
+ u8 filter_index,
+ struct i40e_aqc_set_wol_filter_data *filter,
+ bool set_filter, bool no_wol_tco,
+ bool filter_valid, bool no_wol_tco_valid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_wol_filter *cmd =
+ (struct i40e_aqc_set_wol_filter *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 cmd_flags = 0;
+ u16 valid_flags = 0;
+ u16 buff_len = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_wol_filter);
+
+ if (filter_index >= I40E_AQC_MAX_NUM_WOL_FILTERS)
+ return I40E_ERR_PARAM;
+ cmd->filter_index = CPU_TO_LE16(filter_index);
+
+ if (set_filter) {
+ if (!filter)
+ return I40E_ERR_PARAM;
+ cmd_flags |= I40E_AQC_SET_WOL_FILTER;
+ buff_len = sizeof(*filter);
+ }
+ if (no_wol_tco)
+ cmd_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL;
+ cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
+
+ if (filter_valid)
+ valid_flags |= I40E_AQC_SET_WOL_FILTER_ACTION_VALID;
+ if (no_wol_tco_valid)
+ valid_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID;
+ cmd->valid_flags = CPU_TO_LE16(valid_flags);
+
+ cmd->address_high = CPU_TO_LE32(I40E_HI_DWORD((u64)filter));
+ cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)filter));
+
+ status = i40e_asq_send_command(hw, &desc, filter,
+ buff_len, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_wake_event_reason
+ * @hw: pointer to the hw struct
+ * @wake_reason: return value, index of matching filter
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get information for the reason of a Wake Up event
+ **/
+enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
+ u16 *wake_reason,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_wake_reason_completion *resp =
+ (struct i40e_aqc_get_wake_reason_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_wake_reason);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status == I40E_SUCCESS)
+ *wake_reason = LE16_TO_CPU(resp->wake_reason);
+
+ return status;
+}
+
+#endif /* X722_SUPPORT */
diff --git a/src/dpdk22/drivers/net/i40e/base/i40e_dcb.c b/src/dpdk22/drivers/net/i40e/base/i40e_dcb.c
new file mode 100644
index 00000000..d71387ff
--- /dev/null
+++ b/src/dpdk22/drivers/net/i40e/base/i40e_dcb.c
@@ -0,0 +1,1297 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include "i40e_dcb.h"
+
+/**
+ * i40e_get_dcbx_status
+ * @hw: pointer to the hw struct
+ * @status: Embedded DCBX Engine Status
+ *
+ * Get the DCBX status from the Firmware
+ **/
+enum i40e_status_code i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
+{
+ u32 reg;
+
+ if (!status)
+ return I40E_ERR_PARAM;
+
+ reg = rd32(hw, I40E_PRTDCB_GENS);
+ *status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >>
+ I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT);
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_parse_ieee_etscfg_tlv
+ * @tlv: IEEE 802.1Qaz ETS CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses IEEE 802.1Qaz ETS CFG TLV
+ **/
+static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ struct i40e_dcb_ets_config *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ u8 priority;
+ int i;
+
+ /* First Octet post subtype
+ * --------------------------
+ * |will-|CBS | Re- | Max |
+ * |ing | |served| TCs |
+ * --------------------------
+ * |1bit | 1bit|3 bits|3bits|
+ */
+ etscfg = &dcbcfg->etscfg;
+ etscfg->willing = (u8)((buf[offset] & I40E_IEEE_ETS_WILLING_MASK) >>
+ I40E_IEEE_ETS_WILLING_SHIFT);
+ etscfg->cbs = (u8)((buf[offset] & I40E_IEEE_ETS_CBS_MASK) >>
+ I40E_IEEE_ETS_CBS_SHIFT);
+ etscfg->maxtcs = (u8)((buf[offset] & I40E_IEEE_ETS_MAXTC_MASK) >>
+ I40E_IEEE_ETS_MAXTC_SHIFT);
+
+ /* Move offset to Priority Assignment Table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
+ I40E_IEEE_ETS_PRIO_1_SHIFT);
+ etscfg->prioritytable[i * 2] = priority;
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
+ I40E_IEEE_ETS_PRIO_0_SHIFT);
+ etscfg->prioritytable[i * 2 + 1] = priority;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ etscfg->tcbwtable[i] = buf[offset++];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ etscfg->tsatable[i] = buf[offset++];
+}
+
+/**
+ * i40e_parse_ieee_etsrec_tlv
+ * @tlv: IEEE 802.1Qaz ETS REC TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Parses IEEE 802.1Qaz ETS REC TLV
+ **/
+static void i40e_parse_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ u8 priority;
+ int i;
+
+ /* Move offset to priority table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
+ I40E_IEEE_ETS_PRIO_1_SHIFT);
+ dcbcfg->etsrec.prioritytable[i*2] = priority;
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
+ I40E_IEEE_ETS_PRIO_0_SHIFT);
+ dcbcfg->etsrec.prioritytable[i*2 + 1] = priority;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etsrec.tcbwtable[i] = buf[offset++];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etsrec.tsatable[i] = buf[offset++];
+}
+
+/**
+ * i40e_parse_ieee_pfccfg_tlv
+ * @tlv: IEEE 802.1Qaz PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses IEEE 802.1Qaz PFC CFG TLV
+ **/
+static void i40e_parse_ieee_pfccfg_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ /* ----------------------------------------
+ * |will-|MBC | Re- | PFC | PFC Enable |
+ * |ing | |served| cap | |
+ * -----------------------------------------
+ * |1bit | 1bit|2 bits|4bits| 1 octet |
+ */
+ dcbcfg->pfc.willing = (u8)((buf[0] & I40E_IEEE_PFC_WILLING_MASK) >>
+ I40E_IEEE_PFC_WILLING_SHIFT);
+ dcbcfg->pfc.mbc = (u8)((buf[0] & I40E_IEEE_PFC_MBC_MASK) >>
+ I40E_IEEE_PFC_MBC_SHIFT);
+ dcbcfg->pfc.pfccap = (u8)((buf[0] & I40E_IEEE_PFC_CAP_MASK) >>
+ I40E_IEEE_PFC_CAP_SHIFT);
+ dcbcfg->pfc.pfcenable = buf[1];
+}
+
+/**
+ * i40e_parse_ieee_app_tlv
+ * @tlv: IEEE 802.1Qaz APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses IEEE 802.1Qaz APP PRIO TLV
+ **/
+static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 typelength;
+ u16 offset = 0;
+ u16 length;
+ int i = 0;
+ u8 *buf;
+
+ typelength = I40E_NTOHS(tlv->typelength);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ buf = tlv->tlvinfo;
+
+ /* The App priority table starts 5 octets after TLV header */
+ length -= (sizeof(tlv->ouisubtype) + 1);
+
+ /* Move offset to App Priority Table */
+ offset++;
+
+ /* Application Priority Table (3 octets)
+ * Octets:| 1 | 2 | 3 |
+ * -----------------------------------------
+ * |Priority|Rsrvd| Sel | Protocol ID |
+ * -----------------------------------------
+ * Bits:|23 21|20 19|18 16|15 0|
+ * -----------------------------------------
+ */
+ while (offset < length) {
+ dcbcfg->app[i].priority = (u8)((buf[offset] &
+ I40E_IEEE_APP_PRIO_MASK) >>
+ I40E_IEEE_APP_PRIO_SHIFT);
+ dcbcfg->app[i].selector = (u8)((buf[offset] &
+ I40E_IEEE_APP_SEL_MASK) >>
+ I40E_IEEE_APP_SEL_SHIFT);
+ dcbcfg->app[i].protocolid = (buf[offset + 1] << 0x8) |
+ buf[offset + 2];
+ /* Move to next app */
+ offset += 3;
+ i++;
+ if (i >= I40E_DCBX_MAX_APPS)
+ break;
+ }
+
+ dcbcfg->numapps = i;
+}
+
+/**
+ * i40e_parse_ieee_etsrec_tlv
+ * @tlv: IEEE 802.1Qaz TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ **/
+static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u32 ouisubtype;
+ u8 subtype;
+
+ ouisubtype = I40E_NTOHL(tlv->ouisubtype);
+ subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
+ I40E_LLDP_TLV_SUBTYPE_SHIFT);
+ switch (subtype) {
+ case I40E_IEEE_SUBTYPE_ETS_CFG:
+ i40e_parse_ieee_etscfg_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_ETS_REC:
+ i40e_parse_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_PFC_CFG:
+ i40e_parse_ieee_pfccfg_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_APP_PRI:
+ i40e_parse_ieee_app_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_parse_cee_pgcfg_tlv
+ * @tlv: CEE DCBX PG CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses CEE DCBX PG CFG TLV
+ **/
+static void i40e_parse_cee_pgcfg_tlv(struct i40e_cee_feat_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ struct i40e_dcb_ets_config *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ u8 priority;
+ int i;
+
+ etscfg = &dcbcfg->etscfg;
+
+ if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK)
+ etscfg->willing = 1;
+
+ etscfg->cbs = 0;
+ /* Priority Group Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_1_MASK) >>
+ I40E_CEE_PGID_PRIO_1_SHIFT);
+ etscfg->prioritytable[i * 2] = priority;
+ priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_0_MASK) >>
+ I40E_CEE_PGID_PRIO_0_SHIFT);
+ etscfg->prioritytable[i * 2 + 1] = priority;
+ offset++;
+ }
+
+ /* PG Percentage Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ etscfg->tcbwtable[i] = buf[offset++];
+
+ /* Number of TCs supported (1 octet) */
+ etscfg->maxtcs = buf[offset];
+}
+
+/**
+ * i40e_parse_cee_pfccfg_tlv
+ * @tlv: CEE DCBX PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses CEE DCBX PFC CFG TLV
+ **/
+static void i40e_parse_cee_pfccfg_tlv(struct i40e_cee_feat_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK)
+ dcbcfg->pfc.willing = 1;
+
+ /* ------------------------
+ * | PFC Enable | PFC TCs |
+ * ------------------------
+ * | 1 octet | 1 octet |
+ */
+ dcbcfg->pfc.pfcenable = buf[0];
+ dcbcfg->pfc.pfccap = buf[1];
+}
+
+/**
+ * i40e_parse_cee_app_tlv
+ * @tlv: CEE DCBX APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses CEE DCBX APP PRIO TLV
+ **/
+static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 length, typelength, offset = 0;
+ struct i40e_cee_app_prio *app;
+ u8 i, up, selector;
+
+ typelength = I40E_NTOHS(tlv->hdr.typelen);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+
+ dcbcfg->numapps = length/sizeof(*app);
+ if (!dcbcfg->numapps)
+ return;
+
+ for (i = 0; i < dcbcfg->numapps; i++) {
+ app = (struct i40e_cee_app_prio *)(tlv->tlvinfo + offset);
+ for (up = 0; up < I40E_MAX_USER_PRIORITY; up++) {
+ if (app->prio_map & BIT(up))
+ break;
+ }
+ dcbcfg->app[i].priority = up;
+ /* Get Selector from lower 2 bits, and convert to IEEE */
+ selector = (app->upper_oui_sel & I40E_CEE_APP_SELECTOR_MASK);
+ if (selector == I40E_CEE_APP_SEL_ETHTYPE)
+ dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+ else if (selector == I40E_CEE_APP_SEL_TCPIP)
+ dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP;
+ else
+ /* Keep selector as it is for unknown types */
+ dcbcfg->app[i].selector = selector;
+ dcbcfg->app[i].protocolid = I40E_NTOHS(app->protocol);
+ /* Move to next app */
+ offset += sizeof(*app);
+ }
+}
+
+/**
+ * i40e_parse_cee_tlv
+ * @tlv: CEE DCBX TLV
+ * @dcbcfg: Local store to update DCBX config data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ **/
+static void i40e_parse_cee_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 len, tlvlen, sublen, typelength;
+ struct i40e_cee_feat_tlv *sub_tlv;
+ u8 subtype, feat_tlv_count = 0;
+ u32 ouisubtype;
+
+ ouisubtype = I40E_NTOHL(tlv->ouisubtype);
+ subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
+ I40E_LLDP_TLV_SUBTYPE_SHIFT);
+ /* Return if not CEE DCBX */
+ if (subtype != I40E_CEE_DCBX_TYPE)
+ return;
+
+ typelength = I40E_NTOHS(tlv->typelength);
+ tlvlen = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ len = sizeof(tlv->typelength) + sizeof(ouisubtype) +
+ sizeof(struct i40e_cee_ctrl_tlv);
+ /* Return if no CEE DCBX Feature TLVs */
+ if (tlvlen <= len)
+ return;
+
+ sub_tlv = (struct i40e_cee_feat_tlv *)((char *)tlv + len);
+ while (feat_tlv_count < I40E_CEE_MAX_FEAT_TYPE) {
+ typelength = I40E_NTOHS(sub_tlv->hdr.typelen);
+ sublen = (u16)((typelength &
+ I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ subtype = (u8)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
+ I40E_LLDP_TLV_TYPE_SHIFT);
+ switch (subtype) {
+ case I40E_CEE_SUBTYPE_PG_CFG:
+ i40e_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg);
+ break;
+ case I40E_CEE_SUBTYPE_PFC_CFG:
+ i40e_parse_cee_pfccfg_tlv(sub_tlv, dcbcfg);
+ break;
+ case I40E_CEE_SUBTYPE_APP_PRI:
+ i40e_parse_cee_app_tlv(sub_tlv, dcbcfg);
+ break;
+ default:
+ return; /* Invalid Sub-type return */
+ }
+ feat_tlv_count++;
+ /* Move to next sub TLV */
+ sub_tlv = (struct i40e_cee_feat_tlv *)((char *)sub_tlv +
+ sizeof(sub_tlv->hdr.typelen) +
+ sublen);
+ }
+}
+
+/**
+ * i40e_parse_org_tlv
+ * @tlv: Organization specific TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Currently only IEEE 802.1Qaz TLV is supported, all others
+ * will be returned
+ **/
+static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u32 ouisubtype;
+ u32 oui;
+
+ ouisubtype = I40E_NTOHL(tlv->ouisubtype);
+ oui = (u32)((ouisubtype & I40E_LLDP_TLV_OUI_MASK) >>
+ I40E_LLDP_TLV_OUI_SHIFT);
+ switch (oui) {
+ case I40E_IEEE_8021QAZ_OUI:
+ i40e_parse_ieee_tlv(tlv, dcbcfg);
+ break;
+ case I40E_CEE_DCBX_OUI:
+ i40e_parse_cee_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_lldp_to_dcb_config
+ * @lldpmib: LLDPDU to be parsed
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Parse DCB configuration from the LLDPDU
+ **/
+enum i40e_status_code i40e_lldp_to_dcb_config(u8 *lldpmib,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_lldp_org_tlv *tlv;
+ u16 type;
+ u16 length;
+ u16 typelength;
+ u16 offset = 0;
+
+ if (!lldpmib || !dcbcfg)
+ return I40E_ERR_PARAM;
+
+ /* set to the start of LLDPDU */
+ lldpmib += I40E_LLDP_MIB_HLEN;
+ tlv = (struct i40e_lldp_org_tlv *)lldpmib;
+ while (1) {
+ typelength = I40E_NTOHS(tlv->typelength);
+ type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
+ I40E_LLDP_TLV_TYPE_SHIFT);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ offset += sizeof(typelength) + length;
+
+ /* END TLV or beyond LLDPDU size */
+ if ((type == I40E_TLV_TYPE_END) || (offset > I40E_LLDPDU_SIZE))
+ break;
+
+ switch (type) {
+ case I40E_TLV_TYPE_ORG:
+ i40e_parse_org_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+
+ /* Move to next TLV */
+ tlv = (struct i40e_lldp_org_tlv *)((char *)tlv +
+ sizeof(tlv->typelength) +
+ length);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_aq_get_dcb_config
+ * @hw: pointer to the hw struct
+ * @mib_type: mib type for the query
+ * @bridgetype: bridge type for the query (remote)
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Query DCB configuration from the Firmware
+ **/
+enum i40e_status_code i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+ u8 bridgetype,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_virt_mem mem;
+ u8 *lldpmib;
+
+ /* Allocate the LLDPDU */
+ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
+ if (ret)
+ return ret;
+
+ lldpmib = (u8 *)mem.va;
+ ret = i40e_aq_get_lldp_mib(hw, bridgetype, mib_type,
+ (void *)lldpmib, I40E_LLDPDU_SIZE,
+ NULL, NULL, NULL);
+ if (ret)
+ goto free_mem;
+
+ /* Parse LLDP MIB to get dcb configuration */
+ ret = i40e_lldp_to_dcb_config(lldpmib, dcbcfg);
+
+free_mem:
+ i40e_free_virt_mem(hw, &mem);
+ return ret;
+}
+
+/**
+ * i40e_cee_to_dcb_v1_config
+ * @cee_cfg: pointer to CEE v1 response configuration struct
+ * @dcbcfg: DCB configuration struct
+ *
+ * Convert CEE v1 configuration from firmware to DCB configuration
+ **/
+static void i40e_cee_to_dcb_v1_config(
+ struct i40e_aqc_get_cee_dcb_cfg_v1_resp *cee_cfg,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 status, tlv_status = LE16_TO_CPU(cee_cfg->tlv_status);
+ u16 app_prio = LE16_TO_CPU(cee_cfg->oper_app_prio);
+ u8 i, tc, err;
+
+ /* CEE PG data to ETS config */
+ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
+
+ /* Note that the FW creates the oper_prio_tc nibbles reversed
+ * from those in the CEE Priority Group sub-TLV.
+ */
+ for (i = 0; i < 4; i++) {
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_0_MASK) >>
+ I40E_CEE_PGID_PRIO_0_SHIFT);
+ dcbcfg->etscfg.prioritytable[i*2] = tc;
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_1_MASK) >>
+ I40E_CEE_PGID_PRIO_1_SHIFT);
+ dcbcfg->etscfg.prioritytable[i*2 + 1] = tc;
+ }
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i];
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) {
+ /* Map it to next empty TC */
+ dcbcfg->etscfg.prioritytable[i] =
+ cee_cfg->oper_num_tc - 1;
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT;
+ } else {
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
+ }
+ }
+
+ /* CEE PFC data to ETS config */
+ dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en;
+ dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+
+ status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >>
+ I40E_AQC_CEE_APP_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ /* Add APPs if Error is False */
+ if (!err) {
+ /* CEE operating configuration supports FCoE/iSCSI/FIP only */
+ dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS;
+
+ /* FCoE APP */
+ dcbcfg->app[0].priority =
+ (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >>
+ I40E_AQC_CEE_APP_FCOE_SHIFT;
+ dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
+
+ /* iSCSI APP */
+ dcbcfg->app[1].priority =
+ (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >>
+ I40E_AQC_CEE_APP_ISCSI_SHIFT;
+ dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP;
+ dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI;
+
+ /* FIP APP */
+ dcbcfg->app[2].priority =
+ (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >>
+ I40E_AQC_CEE_APP_FIP_SHIFT;
+ dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP;
+ }
+}
+
+/**
+ * i40e_cee_to_dcb_config
+ * @cee_cfg: pointer to CEE configuration struct
+ * @dcbcfg: DCB configuration struct
+ *
+ * Convert CEE configuration from firmware to DCB configuration
+ **/
+static void i40e_cee_to_dcb_config(
+ struct i40e_aqc_get_cee_dcb_cfg_resp *cee_cfg,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u32 status, tlv_status = LE32_TO_CPU(cee_cfg->tlv_status);
+ u16 app_prio = LE16_TO_CPU(cee_cfg->oper_app_prio);
+ u8 i, tc, err, sync, oper;
+
+ /* CEE PG data to ETS config */
+ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
+
+ /* Note that the FW creates the oper_prio_tc nibbles reversed
+ * from those in the CEE Priority Group sub-TLV.
+ */
+ for (i = 0; i < 4; i++) {
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_0_MASK) >>
+ I40E_CEE_PGID_PRIO_0_SHIFT);
+ dcbcfg->etscfg.prioritytable[i*2] = tc;
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_1_MASK) >>
+ I40E_CEE_PGID_PRIO_1_SHIFT);
+ dcbcfg->etscfg.prioritytable[i*2 + 1] = tc;
+ }
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i];
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) {
+ /* Map it to next empty TC */
+ dcbcfg->etscfg.prioritytable[i] =
+ cee_cfg->oper_num_tc - 1;
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT;
+ } else {
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
+ }
+ }
+
+ /* CEE PFC data to ETS config */
+ dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en;
+ dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+
+ i = 0;
+ status = (tlv_status & I40E_AQC_CEE_FCOE_STATUS_MASK) >>
+ I40E_AQC_CEE_FCOE_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+ oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+ /* Add FCoE APP if Error is False and Oper/Sync is True */
+ if (!err && sync && oper) {
+ /* FCoE APP */
+ dcbcfg->app[i].priority =
+ (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >>
+ I40E_AQC_CEE_APP_FCOE_SHIFT;
+ dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FCOE;
+ i++;
+ }
+
+ status = (tlv_status & I40E_AQC_CEE_ISCSI_STATUS_MASK) >>
+ I40E_AQC_CEE_ISCSI_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+ oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+ /* Add iSCSI APP if Error is False and Oper/Sync is True */
+ if (!err && sync && oper) {
+ /* iSCSI APP */
+ dcbcfg->app[i].priority =
+ (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >>
+ I40E_AQC_CEE_APP_ISCSI_SHIFT;
+ dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP;
+ dcbcfg->app[i].protocolid = I40E_APP_PROTOID_ISCSI;
+ i++;
+ }
+
+ status = (tlv_status & I40E_AQC_CEE_FIP_STATUS_MASK) >>
+ I40E_AQC_CEE_FIP_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+ oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+ /* Add FIP APP if Error is False and Oper/Sync is True */
+ if (!err && sync && oper) {
+ /* FIP APP */
+ dcbcfg->app[i].priority =
+ (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >>
+ I40E_AQC_CEE_APP_FIP_SHIFT;
+ dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FIP;
+ i++;
+ }
+ dcbcfg->numapps = i;
+}
+
+/**
+ * i40e_get_ieee_dcb_config
+ * @hw: pointer to the hw struct
+ *
+ * Get IEEE mode DCB configuration from the Firmware
+ **/
+STATIC enum i40e_status_code i40e_get_ieee_dcb_config(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+
+ /* IEEE mode */
+ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
+ /* Get Local DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
+ &hw->local_dcbx_config);
+ if (ret)
+ goto out;
+
+ /* Get Remote DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+ I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+ &hw->remote_dcbx_config);
+ /* Don't treat ENOENT as an error for Remote MIBs */
+ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ ret = I40E_SUCCESS;
+
+out:
+ return ret;
+}
+
+/**
+ * i40e_get_dcb_config
+ * @hw: pointer to the hw struct
+ *
+ * Get DCB configuration from the Firmware
+ **/
+enum i40e_status_code i40e_get_dcb_config(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg;
+ struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg;
+
+ /* If Firmware version < v4.33 IEEE only */
+ if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
+ (hw->aq.fw_maj_ver < 4))
+ return i40e_get_ieee_dcb_config(hw);
+
+ /* If Firmware version == v4.33 use old CEE struct */
+ if ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33)) {
+ ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg,
+ sizeof(cee_v1_cfg), NULL);
+ if (ret == I40E_SUCCESS) {
+ /* CEE mode */
+ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
+ hw->local_dcbx_config.tlv_status =
+ LE16_TO_CPU(cee_v1_cfg.tlv_status);
+ i40e_cee_to_dcb_v1_config(&cee_v1_cfg,
+ &hw->local_dcbx_config);
+ }
+ } else {
+ ret = i40e_aq_get_cee_dcb_config(hw, &cee_cfg,
+ sizeof(cee_cfg), NULL);
+ if (ret == I40E_SUCCESS) {
+ /* CEE mode */
+ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
+ hw->local_dcbx_config.tlv_status =
+ LE32_TO_CPU(cee_cfg.tlv_status);
+ i40e_cee_to_dcb_config(&cee_cfg,
+ &hw->local_dcbx_config);
+ }
+ }
+
+ /* CEE mode not enabled try querying IEEE data */
+ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ return i40e_get_ieee_dcb_config(hw);
+
+ if (ret != I40E_SUCCESS)
+ goto out;
+
+ /* Get CEE DCB Desired Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
+ &hw->desired_dcbx_config);
+ if (ret)
+ goto out;
+
+ /* Get Remote DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+ I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+ &hw->remote_dcbx_config);
+ /* Don't treat ENOENT as an error for Remote MIBs */
+ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ ret = I40E_SUCCESS;
+
+out:
+ return ret;
+}
+
+/**
+ * i40e_init_dcb
+ * @hw: pointer to the hw struct
+ *
+ * Update DCB configuration from the Firmware
+ **/
+enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_lldp_variables lldp_cfg;
+ u8 adminstatus = 0;
+
+ if (!hw->func_caps.dcb)
+ return ret;
+
+ /* Read LLDP NVM area */
+ ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
+ if (ret)
+ return ret;
+
+ /* Get the LLDP AdminStatus for the current port */
+ adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
+ adminstatus &= 0xF;
+
+ /* LLDP agent disabled */
+ if (!adminstatus) {
+ hw->dcbx_status = I40E_DCBX_STATUS_DISABLED;
+ return ret;
+ }
+
+ /* Get DCBX status */
+ ret = i40e_get_dcbx_status(hw, &hw->dcbx_status);
+ if (ret)
+ return ret;
+
+ /* Check the DCBX Status */
+ switch (hw->dcbx_status) {
+ case I40E_DCBX_STATUS_DONE:
+ case I40E_DCBX_STATUS_IN_PROGRESS:
+ /* Get current DCBX configuration */
+ ret = i40e_get_dcb_config(hw);
+ if (ret)
+ return ret;
+ break;
+ case I40E_DCBX_STATUS_DISABLED:
+ return ret;
+ case I40E_DCBX_STATUS_NOT_STARTED:
+ case I40E_DCBX_STATUS_MULTIPLE_PEERS:
+ default:
+ break;
+ }
+
+ /* Configure the LLDP MIB change event */
+ ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+/**
+ * i40e_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format
+ * @tlv: Fill the ETS config data in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS CFG TLV
+ **/
+static void i40e_add_ieee_ets_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 priority0, priority1, maxtcwilling = 0;
+ struct i40e_dcb_ets_config *etscfg;
+ u16 offset = 0, typelength, i;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ I40E_IEEE_ETS_TLV_LENGTH);
+ tlv->typelength = I40E_HTONS(typelength);
+
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_ETS_CFG);
+ tlv->ouisubtype = I40E_HTONL(ouisubtype);
+
+ /* First Octet post subtype
+ * --------------------------
+ * |will-|CBS | Re- | Max |
+ * |ing | |served| TCs |
+ * --------------------------
+ * |1bit | 1bit|3 bits|3bits|
+ */
+ etscfg = &dcbcfg->etscfg;
+ if (etscfg->willing)
+ maxtcwilling = BIT(I40E_IEEE_ETS_WILLING_SHIFT);
+ maxtcwilling |= etscfg->maxtcs & I40E_IEEE_ETS_MAXTC_MASK;
+ buf[offset] = maxtcwilling;
+
+ /* Move offset to Priority Assignment Table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority0 = etscfg->prioritytable[i * 2] & 0xF;
+ priority1 = etscfg->prioritytable[i * 2 + 1] & 0xF;
+ buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) |
+ priority1;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etscfg->tcbwtable[i];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etscfg->tsatable[i];
+}
+
+/**
+ * i40e_add_ieee_etsrec_tlv - Prepare ETS Recommended TLV in IEEE format
+ * @tlv: Fill ETS Recommended TLV in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS REC TLV
+ **/
+static void i40e_add_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ struct i40e_dcb_ets_config *etsrec;
+ u16 offset = 0, typelength, i;
+ u8 priority0, priority1;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ I40E_IEEE_ETS_TLV_LENGTH);
+ tlv->typelength = I40E_HTONS(typelength);
+
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_ETS_REC);
+ tlv->ouisubtype = I40E_HTONL(ouisubtype);
+
+ etsrec = &dcbcfg->etsrec;
+ /* First Octet is reserved */
+ /* Move offset to Priority Assignment Table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority0 = etsrec->prioritytable[i * 2] & 0xF;
+ priority1 = etsrec->prioritytable[i * 2 + 1] & 0xF;
+ buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) |
+ priority1;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etsrec->tcbwtable[i];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etsrec->tsatable[i];
+}
+
+ /**
+ * i40e_add_ieee_pfc_tlv - Prepare PFC TLV in IEEE format
+ * @tlv: Fill PFC TLV in IEEE format
+ * @dcbcfg: Local store to get PFC CFG data
+ *
+ * Prepare IEEE 802.1Qaz PFC CFG TLV
+ **/
+static void i40e_add_ieee_pfc_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u16 typelength;
+
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ I40E_IEEE_PFC_TLV_LENGTH);
+ tlv->typelength = I40E_HTONS(typelength);
+
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_PFC_CFG);
+ tlv->ouisubtype = I40E_HTONL(ouisubtype);
+
+ /* ----------------------------------------
+ * |will-|MBC | Re- | PFC | PFC Enable |
+ * |ing | |served| cap | |
+ * -----------------------------------------
+ * |1bit | 1bit|2 bits|4bits| 1 octet |
+ */
+ if (dcbcfg->pfc.willing)
+ buf[0] = BIT(I40E_IEEE_PFC_WILLING_SHIFT);
+
+ if (dcbcfg->pfc.mbc)
+ buf[0] |= BIT(I40E_IEEE_PFC_MBC_SHIFT);
+
+ buf[0] |= dcbcfg->pfc.pfccap & 0xF;
+ buf[1] = dcbcfg->pfc.pfcenable;
+}
+
+/**
+ * i40e_add_ieee_app_pri_tlv - Prepare APP TLV in IEEE format
+ * @tlv: Fill APP TLV in IEEE format
+ * @dcbcfg: Local store to get APP CFG data
+ *
+ * Prepare IEEE 802.1Qaz APP CFG TLV
+ **/
+static void i40e_add_ieee_app_pri_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 typelength, length, offset = 0;
+ u8 priority, selector, i = 0;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+
+ /* No APP TLVs then just return */
+ if (dcbcfg->numapps == 0)
+ return;
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_APP_PRI);
+ tlv->ouisubtype = I40E_HTONL(ouisubtype);
+
+ /* Move offset to App Priority Table */
+ offset++;
+ /* Application Priority Table (3 octets)
+ * Octets:| 1 | 2 | 3 |
+ * -----------------------------------------
+ * |Priority|Rsrvd| Sel | Protocol ID |
+ * -----------------------------------------
+ * Bits:|23 21|20 19|18 16|15 0|
+ * -----------------------------------------
+ */
+ while (i < dcbcfg->numapps) {
+ priority = dcbcfg->app[i].priority & 0x7;
+ selector = dcbcfg->app[i].selector & 0x7;
+ buf[offset] = (priority << I40E_IEEE_APP_PRIO_SHIFT) | selector;
+ buf[offset + 1] = (dcbcfg->app[i].protocolid >> 0x8) & 0xFF;
+ buf[offset + 2] = dcbcfg->app[i].protocolid & 0xFF;
+ /* Move to next app */
+ offset += 3;
+ i++;
+ if (i >= I40E_DCBX_MAX_APPS)
+ break;
+ }
+ /* length includes size of ouisubtype + 1 reserved + 3*numapps */
+ length = sizeof(tlv->ouisubtype) + 1 + (i*3);
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ (length & 0x1FF));
+ tlv->typelength = I40E_HTONS(typelength);
+}
+
+ /**
+ * i40e_add_dcb_tlv - Add all IEEE TLVs
+ * @tlv: pointer to org tlv
+ *
+ * add tlv information
+ **/
+static void i40e_add_dcb_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg,
+ u16 tlvid)
+{
+ switch (tlvid) {
+ case I40E_IEEE_TLV_ID_ETS_CFG:
+ i40e_add_ieee_ets_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_TLV_ID_ETS_REC:
+ i40e_add_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_TLV_ID_PFC_CFG:
+ i40e_add_ieee_pfc_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_TLV_ID_APP_PRI:
+ i40e_add_ieee_app_pri_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+ /**
+ * i40e_set_dcb_config - Set the local LLDP MIB to FW
+ * @hw: pointer to the hw struct
+ *
+ * Set DCB configuration to the Firmware
+ **/
+enum i40e_status_code i40e_set_dcb_config(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_dcbx_config *dcbcfg;
+ struct i40e_virt_mem mem;
+ u8 mib_type, *lldpmib;
+ u16 miblen;
+
+ /* update the hw local config */
+ dcbcfg = &hw->local_dcbx_config;
+ /* Allocate the LLDPDU */
+ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
+ if (ret)
+ return ret;
+
+ mib_type = SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB;
+ if (dcbcfg->app_mode == I40E_DCBX_APPS_NON_WILLING) {
+ mib_type |= SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS <<
+ SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT;
+ }
+ lldpmib = (u8 *)mem.va;
+ ret = i40e_dcb_config_to_lldp(lldpmib, &miblen, dcbcfg);
+ ret = i40e_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, miblen, NULL);
+
+ i40e_free_virt_mem(hw, &mem);
+ return ret;
+}
+
+/**
+ * i40e_dcb_config_to_lldp - Convert Dcbconfig to MIB format
+ * @hw: pointer to the hw struct
+ * @dcbcfg: store for LLDPDU data
+ *
+ * send DCB configuration to FW
+ **/
+enum i40e_status_code i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 length, offset = 0, tlvid = I40E_TLV_ID_START;
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_lldp_org_tlv *tlv;
+ u16 type, typelength;
+
+ tlv = (struct i40e_lldp_org_tlv *)lldpmib;
+ while (1) {
+ i40e_add_dcb_tlv(tlv, dcbcfg, tlvid++);
+ typelength = I40E_NTOHS(tlv->typelength);
+ type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
+ I40E_LLDP_TLV_TYPE_SHIFT);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ if (length)
+ offset += length + 2;
+ /* END TLV or beyond LLDPDU size */
+ if ((tlvid >= I40E_TLV_ID_END_OF_LLDPPDU) ||
+ (offset > I40E_LLDPDU_SIZE))
+ break;
+ /* Move to next TLV */
+ if (length)
+ tlv = (struct i40e_lldp_org_tlv *)((char *)tlv +
+ sizeof(tlv->typelength) + length);
+ }
+ *miblen = offset;
+ return ret;
+}
+
+
+/**
+ * i40e_read_lldp_cfg - read LLDP Configuration data from NVM
+ * @hw: pointer to the HW structure
+ * @lldp_cfg: pointer to hold lldp configuration variables
+ *
+ * Reads the LLDP configuration data from NVM
+ **/
+enum i40e_status_code i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ u32 offset = (2 * I40E_NVM_LLDP_CFG_PTR);
+
+ if (!lldp_cfg)
+ return I40E_ERR_PARAM;
+
+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret != I40E_SUCCESS)
+ goto err_lldp_cfg;
+
+ ret = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, offset,
+ sizeof(struct i40e_lldp_variables),
+ (u8 *)lldp_cfg,
+ true, NULL);
+ i40e_release_nvm(hw);
+
+err_lldp_cfg:
+ return ret;
+}
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_dcb.h b/src/dpdk22/drivers/net/i40e/base/i40e_dcb.h
index 2261e080..3b709efd 100755..100644
--- a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_dcb.h
+++ b/src/dpdk22/drivers/net/i40e/base/i40e_dcb.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2013 - 2014, Intel Corporation
+Copyright (c) 2013 - 2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -54,6 +54,15 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_IEEE_SUBTYPE_PFC_CFG 11
#define I40E_IEEE_SUBTYPE_APP_PRI 12
+#define I40E_CEE_DCBX_OUI 0x001b21
+#define I40E_CEE_DCBX_TYPE 2
+
+#define I40E_CEE_SUBTYPE_CTRL 1
+#define I40E_CEE_SUBTYPE_PG_CFG 2
+#define I40E_CEE_SUBTYPE_PFC_CFG 3
+#define I40E_CEE_SUBTYPE_APP_PRI 4
+
+#define I40E_CEE_MAX_FEAT_TYPE 3
#define I40E_LLDP_ADMINSTATUS_DISABLED 0
#define I40E_LLDP_ADMINSTATUS_ENABLED_RX 1
#define I40E_LLDP_ADMINSTATUS_ENABLED_TX 2
@@ -74,13 +83,18 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_IEEE_ETS_MAXTC_SHIFT 0
#define I40E_IEEE_ETS_MAXTC_MASK (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT)
#define I40E_IEEE_ETS_CBS_SHIFT 6
-#define I40E_IEEE_ETS_CBS_MASK (0x1 << I40E_IEEE_ETS_CBS_SHIFT)
+#define I40E_IEEE_ETS_CBS_MASK BIT(I40E_IEEE_ETS_CBS_SHIFT)
#define I40E_IEEE_ETS_WILLING_SHIFT 7
-#define I40E_IEEE_ETS_WILLING_MASK (0x1 << I40E_IEEE_ETS_WILLING_SHIFT)
+#define I40E_IEEE_ETS_WILLING_MASK BIT(I40E_IEEE_ETS_WILLING_SHIFT)
#define I40E_IEEE_ETS_PRIO_0_SHIFT 0
#define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT)
#define I40E_IEEE_ETS_PRIO_1_SHIFT 4
#define I40E_IEEE_ETS_PRIO_1_MASK (0x7 << I40E_IEEE_ETS_PRIO_1_SHIFT)
+#define I40E_CEE_PGID_PRIO_0_SHIFT 0
+#define I40E_CEE_PGID_PRIO_0_MASK (0xF << I40E_CEE_PGID_PRIO_0_SHIFT)
+#define I40E_CEE_PGID_PRIO_1_SHIFT 4
+#define I40E_CEE_PGID_PRIO_1_MASK (0xF << I40E_CEE_PGID_PRIO_1_SHIFT)
+#define I40E_CEE_PGID_STRICT 15
/* Defines for IEEE TSA types */
#define I40E_IEEE_TSA_STRICT 0
@@ -92,9 +106,9 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_IEEE_PFC_CAP_SHIFT 0
#define I40E_IEEE_PFC_CAP_MASK (0xF << I40E_IEEE_PFC_CAP_SHIFT)
#define I40E_IEEE_PFC_MBC_SHIFT 6
-#define I40E_IEEE_PFC_MBC_MASK (0x1 << I40E_IEEE_PFC_MBC_SHIFT)
+#define I40E_IEEE_PFC_MBC_MASK BIT(I40E_IEEE_PFC_MBC_SHIFT)
#define I40E_IEEE_PFC_WILLING_SHIFT 7
-#define I40E_IEEE_PFC_WILLING_MASK (0x1 << I40E_IEEE_PFC_WILLING_SHIFT)
+#define I40E_IEEE_PFC_WILLING_MASK BIT(I40E_IEEE_PFC_WILLING_SHIFT)
/* Defines for IEEE APP TLV */
#define I40E_IEEE_APP_SEL_SHIFT 0
@@ -102,6 +116,20 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_IEEE_APP_PRIO_SHIFT 5
#define I40E_IEEE_APP_PRIO_MASK (0x7 << I40E_IEEE_APP_PRIO_SHIFT)
+/* TLV definitions for preparing MIB */
+#define I40E_TLV_ID_CHASSIS_ID 0
+#define I40E_TLV_ID_PORT_ID 1
+#define I40E_TLV_ID_TIME_TO_LIVE 2
+#define I40E_IEEE_TLV_ID_ETS_CFG 3
+#define I40E_IEEE_TLV_ID_ETS_REC 4
+#define I40E_IEEE_TLV_ID_PFC_CFG 5
+#define I40E_IEEE_TLV_ID_APP_PRI 6
+#define I40E_TLV_ID_END_OF_LLDPPDU 7
+#define I40E_TLV_ID_START I40E_IEEE_TLV_ID_ETS_CFG
+
+#define I40E_IEEE_ETS_TLV_LENGTH 25
+#define I40E_IEEE_PFC_TLV_LENGTH 6
+#define I40E_IEEE_APP_TLV_LENGTH 11
#pragma pack(1)
@@ -117,6 +145,36 @@ struct i40e_lldp_org_tlv {
__be32 ouisubtype;
u8 tlvinfo[1];
};
+
+struct i40e_cee_tlv_hdr {
+ __be16 typelen;
+ u8 operver;
+ u8 maxver;
+};
+
+struct i40e_cee_ctrl_tlv {
+ struct i40e_cee_tlv_hdr hdr;
+ __be32 seqno;
+ __be32 ackno;
+};
+
+struct i40e_cee_feat_tlv {
+ struct i40e_cee_tlv_hdr hdr;
+ u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */
+#define I40E_CEE_FEAT_TLV_ENABLE_MASK 0x80
+#define I40E_CEE_FEAT_TLV_WILLING_MASK 0x40
+#define I40E_CEE_FEAT_TLV_ERR_MASK 0x20
+ u8 subtype;
+ u8 tlvinfo[1];
+};
+
+struct i40e_cee_app_prio {
+ __be16 protocol;
+ u8 upper_oui_sel; /* Bits: |Upper OUI(6)|Selector(2)| */
+#define I40E_CEE_APP_SELECTOR_MASK 0x03
+ __be16 lower_oui;
+ u8 prio_map;
+};
#pragma pack()
/*
@@ -158,4 +216,8 @@ enum i40e_status_code i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
struct i40e_dcbx_config *dcbcfg);
enum i40e_status_code i40e_get_dcb_config(struct i40e_hw *hw);
enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw);
+enum i40e_status_code i40e_set_dcb_config(struct i40e_hw *hw);
+enum i40e_status_code i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+ struct i40e_dcbx_config *dcbcfg);
+
#endif /* _I40E_DCB_H_ */
diff --git a/src/dpdk22/drivers/net/i40e/base/i40e_devids.h b/src/dpdk22/drivers/net/i40e/base/i40e_devids.h
new file mode 100644
index 00000000..26cfd549
--- /dev/null
+++ b/src/dpdk22/drivers/net/i40e/base/i40e_devids.h
@@ -0,0 +1,70 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_DEVIDS_H_
+#define _I40E_DEVIDS_H_
+
+/* Vendor ID */
+#define I40E_INTEL_VENDOR_ID 0x8086
+
+/* Device IDs */
+#define I40E_DEV_ID_SFP_XL710 0x1572
+#define I40E_DEV_ID_QEMU 0x1574
+#define I40E_DEV_ID_KX_A 0x157F
+#define I40E_DEV_ID_KX_B 0x1580
+#define I40E_DEV_ID_KX_C 0x1581
+#define I40E_DEV_ID_QSFP_A 0x1583
+#define I40E_DEV_ID_QSFP_B 0x1584
+#define I40E_DEV_ID_QSFP_C 0x1585
+#define I40E_DEV_ID_10G_BASE_T 0x1586
+#define I40E_DEV_ID_20G_KR2 0x1587
+#define I40E_DEV_ID_20G_KR2_A 0x1588
+#define I40E_DEV_ID_10G_BASE_T4 0x1589
+#define I40E_DEV_ID_VF 0x154C
+#define I40E_DEV_ID_VF_HV 0x1571
+#ifdef X722_SUPPORT
+#ifdef X722_A0_SUPPORT
+#define I40E_DEV_ID_X722_A0 0x374C
+#endif
+#define I40E_DEV_ID_SFP_X722 0x37D0
+#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
+#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
+#define I40E_DEV_ID_X722_VF 0x37CD
+#define I40E_DEV_ID_X722_VF_HV 0x37D9
+#endif /* X722_SUPPORT */
+
+#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
+ (d) == I40E_DEV_ID_QSFP_B || \
+ (d) == I40E_DEV_ID_QSFP_C)
+
+#endif /* _I40E_DEVIDS_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_diag.c b/src/dpdk22/drivers/net/i40e/base/i40e_diag.c
index 167fcf8e..c3c76a0c 100755..100644
--- a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_diag.c
+++ b/src/dpdk22/drivers/net/i40e/base/i40e_diag.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2013 - 2014, Intel Corporation
+Copyright (c) 2013 - 2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -156,13 +156,10 @@ enum i40e_status_code i40e_diag_eeprom_test(struct i40e_hw *hw)
ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
if ((ret_code == I40E_SUCCESS) &&
((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
- (0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) {
- ret_code = i40e_validate_nvm_checksum(hw, NULL);
- } else {
- ret_code = I40E_ERR_DIAG_TEST_FAILED;
- }
-
- return ret_code;
+ BIT(I40E_SR_CONTROL_WORD_1_SHIFT)))
+ return i40e_validate_nvm_checksum(hw, NULL);
+ else
+ return I40E_ERR_DIAG_TEST_FAILED;
}
/**
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_diag.h b/src/dpdk22/drivers/net/i40e/base/i40e_diag.h
index feb4d4b7..105b1191 100755..100644
--- a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_diag.h
+++ b/src/dpdk22/drivers/net/i40e/base/i40e_diag.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2013 - 2014, Intel Corporation
+Copyright (c) 2013 - 2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_hmc.c b/src/dpdk22/drivers/net/i40e/base/i40e_hmc.c
index ae6896a6..75d38412 100755..100644
--- a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_hmc.c
+++ b/src/dpdk22/drivers/net/i40e/base/i40e_hmc.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2013 - 2014, Intel Corporation
+Copyright (c) 2013 - 2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -127,6 +127,7 @@ exit:
* @hw: pointer to our HW structure
* @hmc_info: pointer to the HMC configuration information structure
* @pd_index: which page descriptor index to manipulate
+ * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
*
* This function:
* 1. Initializes the pd entry
@@ -140,12 +141,14 @@ exit:
**/
enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
- u32 pd_index)
+ u32 pd_index,
+ struct i40e_dma_mem *rsrc_pg)
{
enum i40e_status_code ret_code = I40E_SUCCESS;
struct i40e_hmc_pd_table *pd_table;
struct i40e_hmc_pd_entry *pd_entry;
struct i40e_dma_mem mem;
+ struct i40e_dma_mem *page = &mem;
u32 sd_idx, rel_pd_idx;
u64 *pd_addr;
u64 page_desc;
@@ -166,19 +169,25 @@ enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw,
pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
pd_entry = &pd_table->pd_entry[rel_pd_idx];
if (!pd_entry->valid) {
- /* allocate a 4K backing page */
- ret_code = i40e_allocate_dma_mem(hw, &mem, i40e_mem_bp,
- I40E_HMC_PAGED_BP_SIZE,
- I40E_HMC_PD_BP_BUF_ALIGNMENT);
- if (ret_code)
- goto exit;
+ if (rsrc_pg) {
+ pd_entry->rsrc_pg = true;
+ page = rsrc_pg;
+ } else {
+ /* allocate a 4K backing page */
+ ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp,
+ I40E_HMC_PAGED_BP_SIZE,
+ I40E_HMC_PD_BP_BUF_ALIGNMENT);
+ if (ret_code)
+ goto exit;
+ pd_entry->rsrc_pg = false;
+ }
- i40e_memcpy(&pd_entry->bp.addr, &mem,
+ i40e_memcpy(&pd_entry->bp.addr, page,
sizeof(struct i40e_dma_mem), I40E_NONDMA_TO_NONDMA);
pd_entry->bp.sd_pd_index = pd_index;
pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
/* Set page address and valid bit */
- page_desc = mem.pa | 0x1;
+ page_desc = page->pa | 0x1;
pd_addr = (u64 *)pd_table->pd_page_addr.va;
pd_addr += rel_pd_idx;
@@ -253,7 +262,8 @@ enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw,
I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
/* free memory here */
- ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
+ if (!pd_entry->rsrc_pg)
+ ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
if (I40E_SUCCESS != ret_code)
goto exit;
if (!pd_table->ref_cnt)
@@ -300,21 +310,15 @@ enum i40e_status_code i40e_remove_sd_bp_new(struct i40e_hw *hw,
u32 idx, bool is_pf)
{
struct i40e_hmc_sd_entry *sd_entry;
- enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (!is_pf)
+ return I40E_NOT_SUPPORTED;
/* get the entry and decrease its ref counter */
sd_entry = &hmc_info->sd_table.sd_entry[idx];
- if (is_pf) {
- I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
- } else {
- ret_code = I40E_NOT_SUPPORTED;
- goto exit;
- }
- ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
- if (I40E_SUCCESS != ret_code)
- goto exit;
-exit:
- return ret_code;
+ I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
+
+ return i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
}
/**
@@ -354,20 +358,13 @@ enum i40e_status_code i40e_remove_pd_page_new(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 idx, bool is_pf)
{
- enum i40e_status_code ret_code = I40E_SUCCESS;
struct i40e_hmc_sd_entry *sd_entry;
+ if (!is_pf)
+ return I40E_NOT_SUPPORTED;
+
sd_entry = &hmc_info->sd_table.sd_entry[idx];
- if (is_pf) {
- I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
- } else {
- ret_code = I40E_NOT_SUPPORTED;
- goto exit;
- }
- /* free memory here */
- ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
- if (I40E_SUCCESS != ret_code)
- goto exit;
-exit:
- return ret_code;
+ I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
+
+ return i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
}
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_hmc.h b/src/dpdk22/drivers/net/i40e/base/i40e_hmc.h
index eb629fc6..343b251f 100755..100644
--- a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_hmc.h
+++ b/src/dpdk22/drivers/net/i40e/base/i40e_hmc.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2013 - 2014, Intel Corporation
+Copyright (c) 2013 - 2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -69,6 +69,7 @@ struct i40e_hmc_bp {
struct i40e_hmc_pd_entry {
struct i40e_hmc_bp bp;
u32 sd_index;
+ bool rsrc_pg;
bool valid;
};
@@ -133,8 +134,8 @@ struct i40e_hmc_info {
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
- (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
- val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
+ val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
wr32((hw), I40E_PFHMC_SDCMD, val3); \
@@ -153,7 +154,7 @@ struct i40e_hmc_info {
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
- val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
wr32((hw), I40E_PFHMC_SDCMD, val3); \
@@ -225,7 +226,8 @@ enum i40e_status_code i40e_add_sd_table_entry(struct i40e_hw *hw,
enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
- u32 pd_index);
+ u32 pd_index,
+ struct i40e_dma_mem *rsrc_pg);
enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 idx);
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_lan_hmc.c b/src/dpdk22/drivers/net/i40e/base/i40e_lan_hmc.c
index b08534bf..65117671 100755..100644
--- a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_lan_hmc.c
+++ b/src/dpdk22/drivers/net/i40e/base/i40e_lan_hmc.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2013 - 2014, Intel Corporation
+Copyright (c) 2013 - 2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -136,7 +136,7 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
obj->cnt = txq_num;
obj->base = 0;
size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
- obj->size = (u64)1 << size_exp;
+ obj->size = BIT_ULL(size_exp);
/* validate values requested by driver don't exceed HMC capacity */
if (txq_num > obj->max_cnt) {
@@ -159,7 +159,7 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
obj->base = i40e_align_l2obj_base(obj->base);
size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
- obj->size = (u64)1 << size_exp;
+ obj->size = BIT_ULL(size_exp);
/* validate values requested by driver don't exceed HMC capacity */
if (rxq_num > obj->max_cnt) {
@@ -182,7 +182,7 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
obj->base = i40e_align_l2obj_base(obj->base);
size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
- obj->size = (u64)1 << size_exp;
+ obj->size = BIT_ULL(size_exp);
/* validate values requested by driver don't exceed HMC capacity */
if (fcoe_cntx_num > obj->max_cnt) {
@@ -205,7 +205,7 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
obj->base = i40e_align_l2obj_base(obj->base);
size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
- obj->size = (u64)1 << size_exp;
+ obj->size = BIT_ULL(size_exp);
/* validate values requested by driver don't exceed HMC capacity */
if (fcoe_filt_num > obj->max_cnt) {
@@ -394,7 +394,7 @@ enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw,
/* update the pd table entry */
ret_code = i40e_add_pd_table_entry(hw,
info->hmc_info,
- i);
+ i, NULL);
if (I40E_SUCCESS != ret_code) {
pd_error = true;
break;
@@ -438,9 +438,8 @@ exit_sd_error:
pd_idx1 = max(pd_idx,
((j - 1) * I40E_HMC_MAX_BP_COUNT));
pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
- for (i = pd_idx1; i < pd_lmt1; i++) {
+ for (i = pd_idx1; i < pd_lmt1; i++)
i40e_remove_pd_bp(hw, info->hmc_info, i);
- }
i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
break;
case I40E_SD_TYPE_DIRECT:
@@ -770,7 +769,7 @@ static void i40e_write_byte(u8 *hmc_bits,
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
- mask = ((u8)1 << ce_info->width) - 1;
+ mask = BIT(ce_info->width) - 1;
src_byte = *from;
src_byte &= mask;
@@ -811,7 +810,7 @@ static void i40e_write_word(u8 *hmc_bits,
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
- mask = ((u16)1 << ce_info->width) - 1;
+ mask = BIT(ce_info->width) - 1;
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
@@ -861,9 +860,9 @@ static void i40e_write_dword(u8 *hmc_bits,
* to 5 bits so the shift will do nothing
*/
if (ce_info->width < 32)
- mask = ((u32)1 << ce_info->width) - 1;
+ mask = BIT(ce_info->width) - 1;
else
- mask = 0xFFFFFFFF;
+ mask = ~(u32)0;
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
@@ -913,9 +912,9 @@ static void i40e_write_qword(u8 *hmc_bits,
* to 6 bits so the shift will do nothing
*/
if (ce_info->width < 64)
- mask = ((u64)1 << ce_info->width) - 1;
+ mask = BIT_ULL(ce_info->width) - 1;
else
- mask = 0xFFFFFFFFFFFFFFFFUL;
+ mask = ~(u64)0;
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
@@ -955,7 +954,7 @@ static void i40e_read_byte(u8 *hmc_bits,
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
- mask = ((u8)1 << ce_info->width) - 1;
+ mask = BIT(ce_info->width) - 1;
/* shift to correct alignment */
mask <<= shift_width;
@@ -993,7 +992,7 @@ static void i40e_read_word(u8 *hmc_bits,
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
- mask = ((u16)1 << ce_info->width) - 1;
+ mask = BIT(ce_info->width) - 1;
/* shift to correct alignment */
mask <<= shift_width;
@@ -1043,9 +1042,9 @@ static void i40e_read_dword(u8 *hmc_bits,
* to 5 bits so the shift will do nothing
*/
if (ce_info->width < 32)
- mask = ((u32)1 << ce_info->width) - 1;
+ mask = BIT(ce_info->width) - 1;
else
- mask = 0xFFFFFFFF;
+ mask = ~(u32)0;
/* shift to correct alignment */
mask <<= shift_width;
@@ -1096,9 +1095,9 @@ static void i40e_read_qword(u8 *hmc_bits,
* to 6 bits so the shift will do nothing
*/
if (ce_info->width < 64)
- mask = ((u64)1 << ce_info->width) - 1;
+ mask = BIT_ULL(ce_info->width) - 1;
else
- mask = 0xFFFFFFFFFFFFFFFFUL;
+ mask = ~(u64)0;
/* shift to correct alignment */
mask <<= shift_width;
@@ -1217,7 +1216,7 @@ static enum i40e_status_code i40e_set_hmc_context(u8 *context_bytes,
/**
* i40e_hmc_get_object_va - retrieves an object's virtual address
- * @hmc_info: pointer to i40e_hmc_info struct
+ * @hw: pointer to the hw structure
* @object_base: pointer to u64 to get the va
* @rsrc_type: the hmc resource type
* @obj_idx: hmc object index
@@ -1226,12 +1225,13 @@ static enum i40e_status_code i40e_set_hmc_context(u8 *context_bytes,
* base pointer. This function is used for LAN Queue contexts.
**/
STATIC
-enum i40e_status_code i40e_hmc_get_object_va(struct i40e_hmc_info *hmc_info,
+enum i40e_status_code i40e_hmc_get_object_va(struct i40e_hw *hw,
u8 **object_base,
enum i40e_hmc_lan_rsrc_type rsrc_type,
u32 obj_idx)
{
u32 obj_offset_in_sd, obj_offset_in_pd;
+ struct i40e_hmc_info *hmc_info = &hw->hmc;
struct i40e_hmc_sd_entry *sd_entry;
struct i40e_hmc_pd_entry *pd_entry;
u32 pd_idx, pd_lmt, rel_pd_idx;
@@ -1303,8 +1303,7 @@ enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
enum i40e_status_code err;
u8 *context_bytes;
- err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
- I40E_HMC_LAN_TX, queue);
+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
if (err < 0)
return err;
@@ -1323,8 +1322,7 @@ enum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
enum i40e_status_code err;
u8 *context_bytes;
- err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
- I40E_HMC_LAN_TX, queue);
+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
if (err < 0)
return err;
@@ -1344,8 +1342,7 @@ enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
enum i40e_status_code err;
u8 *context_bytes;
- err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
- I40E_HMC_LAN_TX, queue);
+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
if (err < 0)
return err;
@@ -1366,8 +1363,7 @@ enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
enum i40e_status_code err;
u8 *context_bytes;
- err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
- I40E_HMC_LAN_RX, queue);
+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
if (err < 0)
return err;
@@ -1386,8 +1382,7 @@ enum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
enum i40e_status_code err;
u8 *context_bytes;
- err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
- I40E_HMC_LAN_RX, queue);
+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
if (err < 0)
return err;
@@ -1407,8 +1402,7 @@ enum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
enum i40e_status_code err;
u8 *context_bytes;
- err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
- I40E_HMC_LAN_RX, queue);
+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
if (err < 0)
return err;
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_lan_hmc.h b/src/dpdk22/drivers/net/i40e/base/i40e_lan_hmc.h
index 70ef65cb..b2a43104 100755..100644
--- a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_lan_hmc.h
+++ b/src/dpdk22/drivers/net/i40e/base/i40e_lan_hmc.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2013 - 2014, Intel Corporation
+Copyright (c) 2013 - 2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_nvm.c b/src/dpdk22/drivers/net/i40e/base/i40e_nvm.c
index c62f5eb7..60f2bb9e 100755..100644
--- a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_nvm.c
+++ b/src/dpdk22/drivers/net/i40e/base/i40e_nvm.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2013 - 2014, Intel Corporation
+Copyright (c) 2013 - 2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -33,6 +33,18 @@ POSSIBILITY OF SUCH DAMAGE.
#include "i40e_prototype.h"
+enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *data);
+enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
+ u16 *data);
+enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data);
+enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data);
+enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 words, void *data,
+ bool last_command);
+
/**
* i40e_init_nvm_ops - Initialize NVM function pointers
* @hw: pointer to the HW structure
@@ -59,7 +71,7 @@ enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
I40E_GLNVM_GENS_SR_SIZE_SHIFT);
/* Switching to words (sr_size contains power of 2KB) */
- nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB;
+ nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
/* Check if we are in the normal or blank NVM programming mode */
fla = rd32(hw, I40E_GLNVM_FLA);
@@ -70,7 +82,7 @@ enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
} else { /* Blank programming mode */
nvm->blank_nvm_mode = true;
ret_code = I40E_ERR_NVM_BLANK_MODE;
- DEBUGOUT("NVM init error: unsupported blank mode.\n");
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
}
return ret_code;
@@ -89,7 +101,7 @@ enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
u64 gtime, timeout;
- u64 time = 0;
+ u64 time_left = 0;
DEBUGFUNC("i40e_acquire_nvm");
@@ -97,40 +109,39 @@ enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
goto i40e_i40e_acquire_nvm_exit;
ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
- 0, &time, NULL);
+ 0, &time_left, NULL);
/* Reading the Global Device Timer */
gtime = rd32(hw, I40E_GLVFGEN_TIMER);
/* Store the timeout */
- hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time) + gtime;
+ hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
- if (ret_code != I40E_SUCCESS) {
- /* Set the polling timeout */
- if (time > I40E_MAX_NVM_TIMEOUT)
- timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT)
- + gtime;
- else
- timeout = hw->nvm.hw_semaphore_timeout;
+ if (ret_code)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
+ access, time_left, ret_code, hw->aq.asq_last_status);
+
+ if (ret_code && time_left) {
/* Poll until the current NVM owner timeouts */
- while (gtime < timeout) {
+ timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
+ while ((gtime < timeout) && time_left) {
i40e_msec_delay(10);
+ gtime = rd32(hw, I40E_GLVFGEN_TIMER);
ret_code = i40e_aq_request_resource(hw,
I40E_NVM_RESOURCE_ID,
- access, 0, &time,
+ access, 0, &time_left,
NULL);
if (ret_code == I40E_SUCCESS) {
hw->nvm.hw_semaphore_timeout =
- I40E_MS_TO_GTIME(time) + gtime;
+ I40E_MS_TO_GTIME(time_left) + gtime;
break;
}
- gtime = rd32(hw, I40E_GLVFGEN_TIMER);
}
if (ret_code != I40E_SUCCESS) {
hw->nvm.hw_semaphore_timeout = 0;
- hw->nvm.hw_semaphore_wait =
- I40E_MS_TO_GTIME(time) + gtime;
- DEBUGOUT1("NVM acquire timed out, wait %llu ms before trying again.\n",
- time);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
+ time_left, ret_code, hw->aq.asq_last_status);
}
}
@@ -146,10 +157,26 @@ i40e_i40e_acquire_nvm_exit:
**/
void i40e_release_nvm(struct i40e_hw *hw)
{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u32 total_delay = 0;
+
DEBUGFUNC("i40e_release_nvm");
- if (!hw->nvm.blank_nvm_mode)
- i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+ if (hw->nvm.blank_nvm_mode)
+ return;
+
+ ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+
+ /* there are some rare cases when trying to release the resource
+ * results in an admin Q timeout, so handle them correctly
+ */
+ while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
+ (total_delay < hw->aq.asq_cmd_timeout)) {
+ i40e_msec_delay(1);
+ ret_code = i40e_aq_release_resource(hw,
+ I40E_NVM_RESOURCE_ID, 0, NULL);
+ total_delay++;
+ }
}
/**
@@ -175,7 +202,7 @@ static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
i40e_usec_delay(5);
}
if (ret_code == I40E_ERR_TIMEOUT)
- DEBUGOUT("Done bit in GLNVM_SRCTL not set");
+ i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
return ret_code;
}
@@ -190,13 +217,33 @@ static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data)
{
+#ifdef X722_SUPPORT
+ if (hw->mac.type == I40E_MAC_X722)
+ return i40e_read_nvm_word_aq(hw, offset, data);
+#endif
+ return i40e_read_nvm_word_srctl(hw, offset, data);
+}
+
+/**
+ * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *data)
+{
enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
u32 sr_reg;
- DEBUGFUNC("i40e_read_nvm_srctl");
+ DEBUGFUNC("i40e_read_nvm_word_srctl");
if (offset >= hw->nvm.sr_size) {
- DEBUGOUT("NVM read error: Offset beyond Shadow RAM limit.\n");
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM read error: Offset %d beyond Shadow RAM limit %d\n",
+ offset, hw->nvm.sr_size);
ret_code = I40E_ERR_PARAM;
goto read_nvm_exit;
}
@@ -205,8 +252,8 @@ enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
ret_code = i40e_poll_sr_srctl_done_bit(hw);
if (ret_code == I40E_SUCCESS) {
/* Write the address and start reading */
- sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
- (1 << I40E_GLNVM_SRCTL_START_SHIFT);
+ sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
+ BIT(I40E_GLNVM_SRCTL_START_SHIFT);
wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
/* Poll I40E_GLNVM_SRCTL until the done bit is set */
@@ -219,14 +266,36 @@ enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
}
}
if (ret_code != I40E_SUCCESS)
- DEBUGOUT1("NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
- offset);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
+ offset);
read_nvm_exit:
return ret_code;
}
/**
+ * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
+ u16 *data)
+{
+ enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
+
+ DEBUGFUNC("i40e_read_nvm_word_aq");
+
+ ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
+ *data = LE16_TO_CPU(*(__le16 *)data);
+
+ return ret_code;
+}
+
+/**
* i40e_read_nvm_buffer - Reads Shadow RAM buffer
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
@@ -240,15 +309,36 @@ read_nvm_exit:
enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data)
{
+#ifdef X722_SUPPORT
+ if (hw->mac.type == I40E_MAC_X722)
+ return i40e_read_nvm_buffer_aq(hw, offset, words, data);
+#endif
+ return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+}
+
+/**
+ * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
+{
enum i40e_status_code ret_code = I40E_SUCCESS;
u16 index, word;
- DEBUGFUNC("i40e_read_nvm_buffer");
+ DEBUGFUNC("i40e_read_nvm_buffer_srctl");
/* Loop thru the selected region */
for (word = 0; word < *words; word++) {
index = offset + word;
- ret_code = i40e_read_nvm_word(hw, index, &data[word]);
+ ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
if (ret_code != I40E_SUCCESS)
break;
}
@@ -258,6 +348,118 @@ enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
return ret_code;
}
+
+/**
+ * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
+{
+ enum i40e_status_code ret_code;
+ u16 read_size = *words;
+ bool last_cmd = false;
+ u16 words_read = 0;
+ u16 i = 0;
+
+ DEBUGFUNC("i40e_read_nvm_buffer_aq");
+
+ do {
+ /* Calculate number of bytes we should read in this step.
+ * FVL AQ do not allow to read more than one page at a time or
+ * to cross page boundaries.
+ */
+ if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
+ read_size = min(*words,
+ (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
+ (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
+ else
+ read_size = min((*words - words_read),
+ I40E_SR_SECTOR_SIZE_IN_WORDS);
+
+ /* Check if this is last command, if so set proper flag */
+ if ((words_read + read_size) >= *words)
+ last_cmd = true;
+
+ ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
+ data + words_read, last_cmd);
+ if (ret_code != I40E_SUCCESS)
+ goto read_nvm_buffer_aq_exit;
+
+ /* Increment counter for words already read and move offset to
+ * new read location
+ */
+ words_read += read_size;
+ offset += read_size;
+ } while (words_read < *words);
+
+ for (i = 0; i < *words; i++)
+ data[i] = LE16_TO_CPU(((__le16 *)data)[i]);
+
+read_nvm_buffer_aq_exit:
+ *words = words_read;
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_aq - Read Shadow RAM.
+ * @hw: pointer to the HW structure.
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in words from module start
+ * @words: number of words to write
+ * @data: buffer with words to write to the Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ **/
+enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 words, void *data,
+ bool last_command)
+{
+ enum i40e_status_code ret_code = I40E_ERR_NVM;
+ struct i40e_asq_cmd_details cmd_details;
+
+ DEBUGFUNC("i40e_read_nvm_aq");
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ /* Here we are checking the SR limit only for the flat memory model.
+ * We cannot do it for the module-based model, as we did not acquire
+ * the NVM resource yet (we cannot get the module pointer value).
+ * Firmware will check the module-based model.
+ */
+ if ((offset + words) > hw->nvm.sr_size)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write error: offset %d beyond Shadow RAM limit %d\n",
+ (offset + words), hw->nvm.sr_size);
+ else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
+ /* We can write only up to 4KB (one sector), in one AQ write */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write fail error: tried to write %d words, limit is %d.\n",
+ words, I40E_SR_SECTOR_SIZE_IN_WORDS);
+ else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
+ != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
+ /* A single write cannot spread over two sectors */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
+ offset, words);
+ else
+ ret_code = i40e_aq_read_nvm(hw, module_pointer,
+ 2 * offset, /*bytes*/
+ 2 * words, /*bytes*/
+ data, last_command, &cmd_details);
+
+ return ret_code;
+}
+
/**
* i40e_write_nvm_aq - Writes Shadow RAM.
* @hw: pointer to the HW structure.
@@ -274,9 +476,13 @@ enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
bool last_command)
{
enum i40e_status_code ret_code = I40E_ERR_NVM;
+ struct i40e_asq_cmd_details cmd_details;
DEBUGFUNC("i40e_write_nvm_aq");
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
/* Here we are checking the SR limit only for the flat memory model.
* We cannot do it for the module-based model, as we did not acquire
* the NVM resource yet (we cannot get the module pointer value).
@@ -295,7 +501,7 @@ enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
ret_code = i40e_aq_update_nvm(hw, module_pointer,
2 * offset, /*bytes*/
2 * words, /*bytes*/
- data, last_command, NULL);
+ data, last_command, &cmd_details);
return ret_code;
}
@@ -316,6 +522,8 @@ enum i40e_status_code i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
{
DEBUGFUNC("i40e_write_nvm_word");
+ *((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
+
/* Value 0x00 below means that we treat SR as a flat mem */
return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, false);
}
@@ -337,8 +545,15 @@ enum i40e_status_code i40e_write_nvm_buffer(struct i40e_hw *hw,
u8 module_pointer, u32 offset,
u16 words, void *data)
{
+ __le16 *le_word_ptr = (__le16 *)data;
+ u16 *word_ptr = (u16 *)data;
+ u32 i = 0;
+
DEBUGFUNC("i40e_write_nvm_buffer");
+ for (i = 0; i < words; i++)
+ le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]);
+
/* Here we will only write one buffer as the size of the modules
* mirrored in the Shadow RAM is always less than 4K.
*/
@@ -359,14 +574,21 @@ enum i40e_status_code i40e_write_nvm_buffer(struct i40e_hw *hw,
enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
{
enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_virt_mem vmem;
u16 pcie_alt_module = 0;
u16 checksum_local = 0;
u16 vpd_module = 0;
- u16 word = 0;
- u32 i = 0;
+ u16 *data;
+ u16 i = 0;
DEBUGFUNC("i40e_calc_nvm_checksum");
+ ret_code = i40e_allocate_virt_mem(hw, &vmem,
+ I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
+ if (ret_code)
+ goto i40e_calc_nvm_checksum_exit;
+ data = (u16 *)vmem.va;
+
/* read pointer to VPD area */
ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
if (ret_code != I40E_SUCCESS) {
@@ -376,7 +598,7 @@ enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
/* read pointer to PCIe Alt Auto-load module */
ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
- &pcie_alt_module);
+ &pcie_alt_module);
if (ret_code != I40E_SUCCESS) {
ret_code = I40E_ERR_NVM_CHECKSUM;
goto i40e_calc_nvm_checksum_exit;
@@ -386,33 +608,40 @@ enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
* except the VPD and PCIe ALT Auto-load modules
*/
for (i = 0; i < hw->nvm.sr_size; i++) {
+ /* Read SR page */
+ if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
+ u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
+
+ ret_code = i40e_read_nvm_buffer(hw, i, &words, data);
+ if (ret_code != I40E_SUCCESS) {
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+ goto i40e_calc_nvm_checksum_exit;
+ }
+ }
+
/* Skip Checksum word */
if (i == I40E_SR_SW_CHECKSUM_WORD)
- i++;
+ continue;
/* Skip VPD module (convert byte size to word count) */
- if (i == (u32)vpd_module) {
- i += (I40E_SR_VPD_MODULE_MAX_SIZE / 2);
- if (i >= hw->nvm.sr_size)
- break;
+ if ((i >= (u32)vpd_module) &&
+ (i < ((u32)vpd_module +
+ (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
+ continue;
}
/* Skip PCIe ALT module (convert byte size to word count) */
- if (i == (u32)pcie_alt_module) {
- i += (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2);
- if (i >= hw->nvm.sr_size)
- break;
+ if ((i >= (u32)pcie_alt_module) &&
+ (i < ((u32)pcie_alt_module +
+ (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
+ continue;
}
- ret_code = i40e_read_nvm_word(hw, (u16)i, &word);
- if (ret_code != I40E_SUCCESS) {
- ret_code = I40E_ERR_NVM_CHECKSUM;
- goto i40e_calc_nvm_checksum_exit;
- }
- checksum_local += word;
+ checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
}
*checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
i40e_calc_nvm_checksum_exit:
+ i40e_free_virt_mem(hw, &vmem);
return ret_code;
}
@@ -428,13 +657,15 @@ enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw)
{
enum i40e_status_code ret_code = I40E_SUCCESS;
u16 checksum;
+ __le16 le_sum;
DEBUGFUNC("i40e_update_nvm_checksum");
ret_code = i40e_calc_nvm_checksum(hw, &checksum);
+ le_sum = CPU_TO_LE16(checksum);
if (ret_code == I40E_SUCCESS)
ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
- 1, &checksum, true);
+ 1, &le_sum, true);
return ret_code;
}
@@ -481,71 +712,128 @@ i40e_validate_nvm_checksum_exit:
STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- u8 *bytes, int *err);
+ u8 *bytes, int *perrno);
STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- u8 *bytes, int *err);
+ u8 *bytes, int *perrno);
STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- u8 *bytes, int *err);
+ u8 *bytes, int *perrno);
STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- int *err);
+ int *perrno);
STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- int *err);
+ int *perrno);
STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- u8 *bytes, int *err);
+ u8 *bytes, int *perrno);
STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- u8 *bytes, int *err);
-STATIC inline u8 i40e_nvmupd_get_module(u32 val)
+ u8 *bytes, int *perrno);
+STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+STATIC INLINE u8 i40e_nvmupd_get_module(u32 val)
{
return (u8)(val & I40E_NVM_MOD_PNT_MASK);
}
-STATIC inline u8 i40e_nvmupd_get_transaction(u32 val)
+STATIC INLINE u8 i40e_nvmupd_get_transaction(u32 val)
{
return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
}
+STATIC const char *i40e_nvm_update_state_str[] = {
+ "I40E_NVMUPD_INVALID",
+ "I40E_NVMUPD_READ_CON",
+ "I40E_NVMUPD_READ_SNT",
+ "I40E_NVMUPD_READ_LCB",
+ "I40E_NVMUPD_READ_SA",
+ "I40E_NVMUPD_WRITE_ERA",
+ "I40E_NVMUPD_WRITE_CON",
+ "I40E_NVMUPD_WRITE_SNT",
+ "I40E_NVMUPD_WRITE_LCB",
+ "I40E_NVMUPD_WRITE_SA",
+ "I40E_NVMUPD_CSUM_CON",
+ "I40E_NVMUPD_CSUM_SA",
+ "I40E_NVMUPD_CSUM_LCB",
+ "I40E_NVMUPD_STATUS",
+ "I40E_NVMUPD_EXEC_AQ",
+ "I40E_NVMUPD_GET_AQ_RESULT",
+};
+
/**
* i40e_nvmupd_command - Process an NVM update command
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command
* @bytes: pointer to the data buffer
- * @err: pointer to return error code
+ * @perrno: pointer to return error code
*
* Dispatches command depending on what update state is current
**/
enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- u8 *bytes, int *err)
+ u8 *bytes, int *perrno)
{
enum i40e_status_code status;
+ enum i40e_nvmupd_cmd upd_cmd;
DEBUGFUNC("i40e_nvmupd_command");
/* assume success */
- *err = 0;
+ *perrno = 0;
+
+ /* early check for status command and debug msgs */
+ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n",
+ i40e_nvm_update_state_str[upd_cmd],
+ hw->nvmupd_state,
+ hw->aq.nvm_release_on_done);
+
+ if (upd_cmd == I40E_NVMUPD_INVALID) {
+ *perrno = -EFAULT;
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_validate_command returns %d errno %d\n",
+ upd_cmd, *perrno);
+ }
+
+ /* a status request returns immediately rather than
+ * going into the state machine
+ */
+ if (upd_cmd == I40E_NVMUPD_STATUS) {
+ bytes[0] = hw->nvmupd_state;
+ return I40E_SUCCESS;
+ }
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT:
- status = i40e_nvmupd_state_init(hw, cmd, bytes, err);
+ status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
break;
case I40E_NVMUPD_STATE_READING:
- status = i40e_nvmupd_state_reading(hw, cmd, bytes, err);
+ status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
break;
case I40E_NVMUPD_STATE_WRITING:
- status = i40e_nvmupd_state_writing(hw, cmd, bytes, err);
+ status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
+ break;
+
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ status = I40E_ERR_NOT_READY;
+ *perrno = -EBUSY;
break;
default:
/* invalid state, should never happen */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: no such state %d\n", hw->nvmupd_state);
status = I40E_NOT_SUPPORTED;
- *err = -ESRCH;
+ *perrno = -ESRCH;
break;
}
return status;
@@ -556,29 +844,30 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
* @bytes: pointer to the data buffer
- * @err: pointer to return error code
+ * @perrno: pointer to return error code
*
* Process legitimate commands of the Init state and conditionally set next
* state. Reject all other commands.
**/
STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- u8 *bytes, int *err)
+ u8 *bytes, int *perrno)
{
enum i40e_status_code status = I40E_SUCCESS;
enum i40e_nvmupd_cmd upd_cmd;
DEBUGFUNC("i40e_nvmupd_state_init");
- upd_cmd = i40e_nvmupd_validate_command(hw, cmd, err);
+ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
switch (upd_cmd) {
case I40E_NVMUPD_READ_SA:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (status) {
- *err = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
} else {
- status = i40e_nvmupd_nvm_read(hw, cmd, bytes, err);
+ status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
i40e_release_nvm(hw);
}
break;
@@ -586,69 +875,97 @@ STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
case I40E_NVMUPD_READ_SNT:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (status) {
- *err = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
} else {
- status = i40e_nvmupd_nvm_read(hw, cmd, bytes, err);
- hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
+ status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
+ if (status)
+ i40e_release_nvm(hw);
+ else
+ hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
}
break;
case I40E_NVMUPD_WRITE_ERA:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
- *err = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
} else {
- status = i40e_nvmupd_nvm_erase(hw, cmd, err);
- if (status)
+ status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
+ if (status) {
i40e_release_nvm(hw);
- else
+ } else {
hw->aq.nvm_release_on_done = true;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
}
break;
case I40E_NVMUPD_WRITE_SA:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
- *err = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
} else {
- status = i40e_nvmupd_nvm_write(hw, cmd, bytes, err);
- if (status)
+ status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+ if (status) {
i40e_release_nvm(hw);
- else
+ } else {
hw->aq.nvm_release_on_done = true;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
}
break;
case I40E_NVMUPD_WRITE_SNT:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
- *err = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
} else {
- status = i40e_nvmupd_nvm_write(hw, cmd, bytes, err);
- hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+ status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+ if (status)
+ i40e_release_nvm(hw);
+ else
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
}
break;
case I40E_NVMUPD_CSUM_SA:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
- *err = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
} else {
status = i40e_update_nvm_checksum(hw);
if (status) {
- *err = hw->aq.asq_last_status ?
- i40e_aq_rc_to_posix(hw->aq.asq_last_status) :
+ *perrno = hw->aq.asq_last_status ?
+ i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status) :
-EIO;
i40e_release_nvm(hw);
} else {
hw->aq.nvm_release_on_done = true;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
}
break;
+ case I40E_NVMUPD_EXEC_AQ:
+ status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
+ break;
+
+ case I40E_NVMUPD_GET_AQ_RESULT:
+ status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
+ break;
+
default:
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: bad cmd %s in init state\n",
+ i40e_nvm_update_state_str[upd_cmd]);
status = I40E_ERR_NVM;
- *err = -ESRCH;
+ *perrno = -ESRCH;
break;
}
return status;
@@ -659,37 +976,40 @@ STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
* @bytes: pointer to the data buffer
- * @err: pointer to return error code
+ * @perrno: pointer to return error code
*
* NVM ownership is already held. Process legitimate commands and set any
* change in state; reject all other commands.
**/
STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- u8 *bytes, int *err)
+ u8 *bytes, int *perrno)
{
- enum i40e_status_code status;
+ enum i40e_status_code status = I40E_SUCCESS;
enum i40e_nvmupd_cmd upd_cmd;
DEBUGFUNC("i40e_nvmupd_state_reading");
- upd_cmd = i40e_nvmupd_validate_command(hw, cmd, err);
+ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
switch (upd_cmd) {
case I40E_NVMUPD_READ_SA:
case I40E_NVMUPD_READ_CON:
- status = i40e_nvmupd_nvm_read(hw, cmd, bytes, err);
+ status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
break;
case I40E_NVMUPD_READ_LCB:
- status = i40e_nvmupd_nvm_read(hw, cmd, bytes, err);
+ status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
i40e_release_nvm(hw);
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
break;
default:
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: bad cmd %s in reading state.\n",
+ i40e_nvm_update_state_str[upd_cmd]);
status = I40E_NOT_SUPPORTED;
- *err = -ESRCH;
+ *perrno = -ESRCH;
break;
}
return status;
@@ -700,60 +1020,113 @@ STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
* @bytes: pointer to the data buffer
- * @err: pointer to return error code
+ * @perrno: pointer to return error code
*
* NVM ownership is already held. Process legitimate commands and set any
* change in state; reject all other commands
**/
STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- u8 *bytes, int *err)
+ u8 *bytes, int *perrno)
{
- enum i40e_status_code status;
+ enum i40e_status_code status = I40E_SUCCESS;
enum i40e_nvmupd_cmd upd_cmd;
+ bool retry_attempt = false;
DEBUGFUNC("i40e_nvmupd_state_writing");
- upd_cmd = i40e_nvmupd_validate_command(hw, cmd, err);
+ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+retry:
switch (upd_cmd) {
case I40E_NVMUPD_WRITE_CON:
- status = i40e_nvmupd_nvm_write(hw, cmd, bytes, err);
+ status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+ if (!status)
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
break;
case I40E_NVMUPD_WRITE_LCB:
- status = i40e_nvmupd_nvm_write(hw, cmd, bytes, err);
- if (!status) {
- hw->aq.nvm_release_on_done = true;
+ status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+ if (status) {
+ *perrno = hw->aq.asq_last_status ?
+ i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status) :
+ -EIO;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ } else {
+ hw->aq.nvm_release_on_done = true;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
break;
case I40E_NVMUPD_CSUM_CON:
status = i40e_update_nvm_checksum(hw);
- if (status)
- *err = hw->aq.asq_last_status ?
- i40e_aq_rc_to_posix(hw->aq.asq_last_status) :
+ if (status) {
+ *perrno = hw->aq.asq_last_status ?
+ i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status) :
-EIO;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ } else {
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+ }
break;
case I40E_NVMUPD_CSUM_LCB:
status = i40e_update_nvm_checksum(hw);
if (status) {
- *err = hw->aq.asq_last_status ?
- i40e_aq_rc_to_posix(hw->aq.asq_last_status) :
+ *perrno = hw->aq.asq_last_status ?
+ i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status) :
-EIO;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
} else {
hw->aq.nvm_release_on_done = true;
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
break;
default:
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: bad cmd %s in writing state.\n",
+ i40e_nvm_update_state_str[upd_cmd]);
status = I40E_NOT_SUPPORTED;
- *err = -ESRCH;
+ *perrno = -ESRCH;
break;
}
+
+ /* In some circumstances, a multi-write transaction takes longer
+ * than the default 3 minute timeout on the write semaphore. If
+ * the write failed with an EBUSY status, this is likely the problem,
+ * so here we try to reacquire the semaphore then retry the write.
+ * We only do one retry, then give up.
+ */
+ if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
+ !retry_attempt) {
+ enum i40e_status_code old_status = status;
+ u32 old_asq_status = hw->aq.asq_last_status;
+ u32 gtime;
+
+ gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+ if (gtime >= hw->nvm.hw_semaphore_timeout) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
+ gtime, hw->nvm.hw_semaphore_timeout);
+ i40e_release_nvm(hw);
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
+ hw->aq.asq_last_status);
+ status = old_status;
+ hw->aq.asq_last_status = old_asq_status;
+ } else {
+ retry_attempt = true;
+ goto retry;
+ }
+ }
+ }
+
return status;
}
@@ -761,16 +1134,16 @@ STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
* i40e_nvmupd_validate_command - Validate given command
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
- * @err: pointer to return error code
+ * @perrno: pointer to return error code
*
* Return one of the valid command types or I40E_NVMUPD_INVALID
**/
STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- int *err)
+ int *perrno)
{
enum i40e_nvmupd_cmd upd_cmd;
- u8 transaction, module;
+ u8 module, transaction;
DEBUGFUNC("i40e_nvmupd_validate_command\n");
@@ -783,9 +1156,10 @@ STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
/* limits on data size */
if ((cmd->data_size < 1) ||
(cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
- DEBUGOUT1("i40e_nvmupd_validate_command data_size %d\n",
- cmd->data_size);
- *err = -EFAULT;
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_validate_command data_size %d\n",
+ cmd->data_size);
+ *perrno = -EFAULT;
return I40E_NVMUPD_INVALID;
}
@@ -804,6 +1178,12 @@ STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
case I40E_NVM_SA:
upd_cmd = I40E_NVMUPD_READ_SA;
break;
+ case I40E_NVM_EXEC:
+ if (module == 0xf)
+ upd_cmd = I40E_NVMUPD_STATUS;
+ else if (module == 0)
+ upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
+ break;
}
break;
@@ -833,32 +1213,171 @@ STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
case (I40E_NVM_CSUM|I40E_NVM_LCB):
upd_cmd = I40E_NVMUPD_CSUM_LCB;
break;
+ case I40E_NVM_EXEC:
+ if (module == 0)
+ upd_cmd = I40E_NVMUPD_EXEC_AQ;
+ break;
}
break;
}
- if (upd_cmd == I40E_NVMUPD_INVALID) {
- *err = -EFAULT;
- DEBUGOUT2(
- "i40e_nvmupd_validate_command returns %d err: %d\n",
- upd_cmd, *err);
- }
return upd_cmd;
}
/**
+ * i40e_nvmupd_exec_aq - Run an AQ command
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ struct i40e_asq_cmd_details cmd_details;
+ enum i40e_status_code status;
+ struct i40e_aq_desc *aq_desc;
+ u32 buff_size = 0;
+ u8 *buff = NULL;
+ u32 aq_desc_len;
+ u32 aq_data_len;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ aq_desc_len = sizeof(struct i40e_aq_desc);
+ memset(&hw->nvm_wb_desc, 0, aq_desc_len);
+
+ /* get the aq descriptor */
+ if (cmd->data_size < aq_desc_len) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
+ cmd->data_size, aq_desc_len);
+ *perrno = -EINVAL;
+ return I40E_ERR_PARAM;
+ }
+ aq_desc = (struct i40e_aq_desc *)bytes;
+
+ /* if data buffer needed, make sure it's ready */
+ aq_data_len = cmd->data_size - aq_desc_len;
+ buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen));
+ if (buff_size) {
+ if (!hw->nvm_buff.va) {
+ status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
+ hw->aq.asq_buf_size);
+ if (status)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
+ status);
+ }
+
+ if (hw->nvm_buff.va) {
+ buff = hw->nvm_buff.va;
+ memcpy(buff, &bytes[aq_desc_len], aq_data_len);
+ }
+ }
+
+ /* and away we go! */
+ status = i40e_asq_send_command(hw, aq_desc, buff,
+ buff_size, &cmd_details);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_exec_aq err %s aq_err %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ u32 aq_total_len;
+ u32 aq_desc_len;
+ int remainder;
+ u8 *buff;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+
+ aq_desc_len = sizeof(struct i40e_aq_desc);
+ aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen);
+
+ /* check offset range */
+ if (cmd->offset > aq_total_len) {
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
+ __func__, cmd->offset, aq_total_len);
+ *perrno = -EINVAL;
+ return I40E_ERR_PARAM;
+ }
+
+ /* check copylength range */
+ if (cmd->data_size > (aq_total_len - cmd->offset)) {
+ int new_len = aq_total_len - cmd->offset;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
+ __func__, cmd->data_size, new_len);
+ cmd->data_size = new_len;
+ }
+
+ remainder = cmd->data_size;
+ if (cmd->offset < aq_desc_len) {
+ u32 len = aq_desc_len - cmd->offset;
+
+ len = min(len, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
+ __func__, cmd->offset, cmd->offset + len);
+
+ buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
+ memcpy(bytes, buff, len);
+
+ bytes += len;
+ remainder -= len;
+ buff = hw->nvm_buff.va;
+ } else {
+ buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len);
+ }
+
+ if (remainder > 0) {
+ int start_byte = buff - (u8 *)hw->nvm_buff.va;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
+ __func__, start_byte, start_byte + remainder);
+ memcpy(bytes, buff, remainder);
+ }
+
+ return I40E_SUCCESS;
+}
+
+/**
* i40e_nvmupd_nvm_read - Read NVM
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
* @bytes: pointer to the data buffer
- * @err: pointer to return error code
+ * @perrno: pointer to return error code
*
* cmd structure contains identifiers and data buffer
**/
STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- u8 *bytes, int *err)
+ u8 *bytes, int *perrno)
{
+ struct i40e_asq_cmd_details cmd_details;
enum i40e_status_code status;
u8 module, transaction;
bool last;
@@ -866,14 +1385,21 @@ STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
transaction = i40e_nvmupd_get_transaction(cmd->config);
module = i40e_nvmupd_get_module(cmd->config);
last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
- DEBUGOUT3("i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
- module, cmd->offset, cmd->data_size);
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
- bytes, last, NULL);
- DEBUGOUT1("i40e_nvmupd_nvm_read status %d\n", status);
- if (status != I40E_SUCCESS)
- *err = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ bytes, last, &cmd_details);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
+ module, cmd->offset, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_read status %d aq %d\n",
+ status, hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ }
return status;
}
@@ -882,28 +1408,37 @@ STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
* i40e_nvmupd_nvm_erase - Erase an NVM module
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
- * @err: pointer to return error code
+ * @perrno: pointer to return error code
*
* module, offset, data_size and data are in cmd structure
**/
STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- int *err)
+ int *perrno)
{
enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_asq_cmd_details cmd_details;
u8 module, transaction;
bool last;
transaction = i40e_nvmupd_get_transaction(cmd->config);
module = i40e_nvmupd_get_module(cmd->config);
last = (transaction & I40E_NVM_LCB);
- DEBUGOUT3("i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
- module, cmd->offset, cmd->data_size);
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
- last, NULL);
- DEBUGOUT1("i40e_nvmupd_nvm_erase status %d\n", status);
- if (status != I40E_SUCCESS)
- *err = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ last, &cmd_details);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
+ module, cmd->offset, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_erase status %d aq %d\n",
+ status, hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ }
return status;
}
@@ -913,28 +1448,38 @@ STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
* @bytes: pointer to the data buffer
- * @err: pointer to return error code
+ * @perrno: pointer to return error code
*
* module, offset, data_size and data are in cmd structure
**/
STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- u8 *bytes, int *err)
+ u8 *bytes, int *perrno)
{
enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_asq_cmd_details cmd_details;
u8 module, transaction;
bool last;
transaction = i40e_nvmupd_get_transaction(cmd->config);
module = i40e_nvmupd_get_module(cmd->config);
last = (transaction & I40E_NVM_LCB);
- DEBUGOUT3("i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
- module, cmd->offset, cmd->data_size);
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
status = i40e_aq_update_nvm(hw, module, cmd->offset,
- (u16)cmd->data_size, bytes, last, NULL);
- DEBUGOUT1("i40e_nvmupd_nvm_write status %d\n", status);
- if (status != I40E_SUCCESS)
- *err = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ (u16)cmd->data_size, bytes, last,
+ &cmd_details);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
+ module, cmd->offset, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_write status %d aq %d\n",
+ status, hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ }
return status;
}
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_osdep.h b/src/dpdk22/drivers/net/i40e/base/i40e_osdep.h
index de71b0d8..71077f0b 100755..100644
--- a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_osdep.h
+++ b/src/dpdk22/drivers/net/i40e/base/i40e_osdep.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2014, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -122,10 +122,10 @@ do { \
((volatile uint32_t *)((char *)(a)->hw_addr + (reg)))
static inline uint32_t i40e_read_addr(volatile void *addr)
{
- return I40E_PCI_REG(addr);
+ return rte_le_to_cpu_32(I40E_PCI_REG(addr));
}
#define I40E_PCI_REG_WRITE(reg, value) \
- do {I40E_PCI_REG((reg)) = (value);} while(0)
+ do { I40E_PCI_REG((reg)) = rte_cpu_to_le_32(value); } while (0)
#define I40E_WRITE_FLUSH(a) I40E_READ_REG(a, I40E_GLGEN_STAT)
#define I40EVF_WRITE_FLUSH(a) I40E_READ_REG(a, I40E_VFGEN_RSTAT)
@@ -146,7 +146,7 @@ struct i40e_dma_mem {
void *va;
u64 pa;
u32 size;
- u64 id;
+ const void *zone;
} __attribute__((packed));
#define i40e_allocate_dma_mem(h, m, unused, s, a) \
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_prototype.h b/src/dpdk22/drivers/net/i40e/base/i40e_prototype.h
index f3215cf8..76dc5b6c 100755..100644
--- a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_prototype.h
+++ b/src/dpdk22/drivers/net/i40e/base/i40e_prototype.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2013 - 2014, Intel Corporation
+Copyright (c) 2013 - 2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -57,6 +57,7 @@ enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw);
u16 i40e_clean_asq(struct i40e_hw *hw);
void i40e_free_adminq_asq(struct i40e_hw *hw);
void i40e_free_adminq_arq(struct i40e_hw *hw);
+enum i40e_status_code i40e_validate_mac_addr(u8 *mac_addr);
void i40e_adminq_init_ring_data(struct i40e_hw *hw);
enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
struct i40e_arq_event_info *e,
@@ -76,8 +77,25 @@ void i40e_idle_aq(struct i40e_hw *hw);
void i40e_resume_aq(struct i40e_hw *hw);
bool i40e_check_asq_alive(struct i40e_hw *hw);
enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+#ifdef X722_SUPPORT
+
+enum i40e_status_code i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+enum i40e_status_code i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+enum i40e_status_code i40e_aq_get_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
+enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
+#endif
+#ifndef I40E_NDIS_SUPPORT
+const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err);
+#endif /* I40E_NDIS_SUPPORT */
-#ifndef VF_DRIVER
+#ifdef PF_DRIVER
u32 i40e_led_get(struct i40e_hw *hw);
void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
@@ -86,11 +104,15 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw,
u16 *fw_major_version, u16 *fw_minor_version,
+ u32 *fw_build,
u16 *api_major_version, u16 *api_minor_version,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw,
u32 reg_addr, u64 reg_val,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_debug_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
@@ -124,8 +146,6 @@ enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw,
enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
bool enable_lse, struct i40e_link_status *link,
struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw,
- bool enable_lse);
enum i40e_status_code i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
u64 advt_reg,
struct i40e_asq_cmd_details *cmd_details);
@@ -142,6 +162,12 @@ enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_vsi_params(struct i40e_hw *hw,
struct i40e_vsi_context *vsi_ctx,
struct i40e_asq_cmd_details *cmd_details);
@@ -193,6 +219,17 @@ enum i40e_status_code i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
enum i40e_status_code i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, bool last_command,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_read_nvm_config(struct i40e_hw *hw,
+ u8 cmd_flags, u32 field_id, void *data,
+ u16 buf_size, u16 *element_count,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_write_nvm_config(struct i40e_hw *hw,
+ u8 cmd_flags, void *data, u16 buf_size,
+ u16 element_count,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_oem_post_update(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw,
void *buff, u16 buff_size, u16 *data_size,
enum i40e_admin_queue_opc list_type_opc,
@@ -205,6 +242,9 @@ enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
u8 mib_type, void *buff, u16 buff_size,
u16 *local_len, u16 *remote_len,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_lldp_mib(struct i40e_hw *hw,
+ u8 mib_type, void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
bool enable_update,
struct i40e_asq_cmd_details *cmd_details);
@@ -225,6 +265,12 @@ enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw,
+ bool start_agent,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
u16 udp_port, u8 protocol_index,
u8 *filter_index,
@@ -302,6 +348,15 @@ enum i40e_status_code i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
enum i40e_status_code i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
@@ -324,6 +379,8 @@ enum i40e_status_code i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_resume_port_tx(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg);
enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
u16 vsi,
struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
@@ -355,7 +412,8 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw);
enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw);
void i40e_clear_hw(struct i40e_hw *hw);
void i40e_clear_pxe_mode(struct i40e_hw *hw);
-bool i40e_get_link_status(struct i40e_hw *hw);
+enum i40e_status_code i40e_get_link_status(struct i40e_hw *hw, bool *link_up);
+enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw);
enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
enum i40e_status_code i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
u32 *max_bw, u32 *min_bw, bool *min_valid, bool *max_valid);
@@ -363,16 +421,15 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
struct i40e_aqc_configure_partition_bw_data *bw_data,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+enum i40e_status_code i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
-enum i40e_status_code i40e_validate_mac_addr(u8 *mac_addr);
enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw);
/* prototype for functions used for NVM access */
enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw);
enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
enum i40e_aq_resource_access_type access);
void i40e_release_nvm(struct i40e_hw *hw);
-enum i40e_status_code i40e_read_nvm_srrd(struct i40e_hw *hw, u16 offset,
- u16 *data);
enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data);
enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
@@ -392,7 +449,7 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
-#endif /* VF_DRIVER */
+#endif /* PF_DRIVER */
#if defined(I40E_QV) || defined(VF_DRIVER)
enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw);
@@ -427,4 +484,28 @@ enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
u16 vsi_seid, u16 queue, bool is_add,
struct i40e_control_filter_stats *stats,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+ u8 table_id, u32 start_index, u16 buff_size,
+ void *buff, u16 *ret_buff_size,
+ u8 *ret_next_table, u32 *ret_next_index,
+ struct i40e_asq_cmd_details *cmd_details);
+void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+ u16 vsi_seid);
+#ifdef X722_SUPPORT
+enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
+ struct i40e_aqc_arp_proxy_data *proxy_config,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw,
+ struct i40e_aqc_ns_proxy_data *ns_proxy_table_entry,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
+ u8 filter_index,
+ struct i40e_aqc_set_wol_filter_data *filter,
+ bool set_filter, bool no_wol_tco,
+ bool filter_valid, bool no_wol_tco_valid,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
+ u16 *wake_reason,
+ struct i40e_asq_cmd_details *cmd_details);
+#endif
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_register.h b/src/dpdk22/drivers/net/i40e/base/i40e_register.h
index f236c395..c6bac1ee 100755..100644
--- a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_register.h
+++ b/src/dpdk22/drivers/net/i40e/base/i40e_register.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2013 - 2014, Intel Corporation
+Copyright (c) 2013 - 2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -318,6 +318,10 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
+#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7
+#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0
+#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT)
#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */
#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
@@ -429,6 +433,8 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26
+#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT)
#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */
#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
@@ -492,7 +498,9 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x3FFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT)
#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
@@ -556,9 +564,6 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
-#define I40E_GLGEN_RSTENA_EMP 0x000B818C /* Reset: POR */
-#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0
-#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT)
#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */
#define I40E_GLGEN_RTRIG_CORER_SHIFT 0
#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT)
@@ -876,6 +881,13 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
#define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_GLINT_CTL 0x0003F800 /* Reset: CORER */
+#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT 0
+#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT)
+#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT 1
+#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT)
+#define I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT 2
+#define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT)
#define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */
#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
#define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
@@ -1074,7 +1086,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT)
#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
-#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: PFR */
+#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */
#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
@@ -1179,7 +1191,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_VFINT_ITRN_MAX_INDEX 2
#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT)
-#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
@@ -1604,6 +1616,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_MSIX_TVCTRL_MAX_INDEX 128
#define I40E_MSIX_TVCTRL_MASK_SHIFT 0
#define I40E_MSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_MSIX_TVCTRL_MASK_SHIFT)
+#endif /* PF_DRIVER */
#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */
#define I40E_VFMSIX_PBA1_MAX_INDEX 19
#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
@@ -1626,6 +1639,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639
#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
+#ifdef PF_DRIVER
#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0
#define I40E_GLNVM_FLA_FL_SCK_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SCK_SHIFT)
@@ -1811,9 +1825,6 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
-#define I40E_GLPCI_LATCT 0x0009C4B4 /* Reset: PCIR */
-#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0
-#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT)
#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
@@ -1910,6 +1921,11 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
+#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */
+#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9
+#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT)
+#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11
+#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT)
#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */
#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
@@ -2382,20 +2398,20 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT)
#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_BPRCH_MAX_INDEX 3
-#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT)
#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_BPRCL_MAX_INDEX 3
-#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPRCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_UPRCH_SHIFT)
+#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT)
#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_BPTCH_MAX_INDEX 3
-#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPTCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT)
#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_BPTCL_MAX_INDEX 3
-#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPTCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT)
#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_CRCERRS_MAX_INDEX 3
#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
@@ -2628,10 +2644,6 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_GLPRT_TDOLD_MAX_INDEX 3
#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
-#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_TDPC_MAX_INDEX 3
-#define I40E_GLPRT_TDPC_TDPC_SHIFT 0
-#define I40E_GLPRT_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDPC_TDPC_SHIFT)
#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_UPRCH_MAX_INDEX 3
#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
@@ -2998,9 +3010,6 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */
#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
-#define I40E_GLSCD_QUANTA 0x000B2080 /* Reset: CORER */
-#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0
-#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK I40E_MASK(0x7, I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT)
#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */
#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT)
@@ -3150,6 +3159,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_PRTPM_SAL_MAX_INDEX 3
#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0
#define I40E_PRTPM_SAL_PFPM_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
+#endif /* PF_DRIVER */
#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT)
@@ -3266,7 +3276,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_VFINT_ITRN1_MAX_INDEX 2
#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT)
-#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: VFR */
+#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */
#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
@@ -3374,4 +3384,1937 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
-#endif
+#ifdef X722_SUPPORT
+
+#ifdef PF_DRIVER
+#define I40E_MNGSB_FDCRC 0x000B7050 /* Reset: POR */
+#define I40E_MNGSB_FDCRC_CRC_RES_SHIFT 0
+#define I40E_MNGSB_FDCRC_CRC_RES_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCRC_CRC_RES_SHIFT)
+#define I40E_MNGSB_FDCS 0x000B7040 /* Reset: POR */
+#define I40E_MNGSB_FDCS_CRC_CONT_SHIFT 2
+#define I40E_MNGSB_FDCS_CRC_CONT_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_CONT_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT 3
+#define I40E_MNGSB_FDCS_CRC_SEED_EN_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT 4
+#define I40E_MNGSB_FDCS_CRC_WR_INH_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_SEED_SHIFT 8
+#define I40E_MNGSB_FDCS_CRC_SEED_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCS_CRC_SEED_SHIFT)
+#define I40E_MNGSB_FDS 0x000B7048 /* Reset: POR */
+#define I40E_MNGSB_FDS_START_BC_SHIFT 0
+#define I40E_MNGSB_FDS_START_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_START_BC_SHIFT)
+#define I40E_MNGSB_FDS_LAST_BC_SHIFT 16
+#define I40E_MNGSB_FDS_LAST_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_LAST_BC_SHIFT)
+
+#define I40E_GL_VF_CTRL_RX(_VF) (0x00083600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_GL_VF_CTRL_RX_MAX_INDEX 127
+#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT 0
+#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT)
+#define I40E_GL_VF_CTRL_TX(_VF) (0x00083400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_GL_VF_CTRL_TX_MAX_INDEX 127
+#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT 0
+#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT)
+
+#define I40E_GLCM_LAN_CACHESIZE 0x0010C4D8 /* Reset: CORER */
+#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT 12
+#define I40E_GLCM_LAN_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT)
+#define I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT 16
+#define I40E_GLCM_LAN_CACHESIZE_WAYS_MASK I40E_MASK(0x3FF, I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE 0x00138FE4 /* Reset: CORER */
+#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE_SETS_SHIFT 12
+#define I40E_GLCM_PE_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_PE_CACHESIZE_SETS_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT 16
+#define I40E_GLCM_PE_CACHESIZE_WAYS_MASK I40E_MASK(0x1FF, I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT)
+#define I40E_PFCM_PE_ERRDATA 0x00138D00 /* Reset: PFR */
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_PE_ERRINFO 0x00138C80 /* Reset: PFR */
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+
+#define I40E_PRTDCB_TFMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TFMSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TFMSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TFMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TFMSTC_MSTC_SHIFT)
+#define I40E_GL_FWSTS_FWROWD_SHIFT 8
+#define I40E_GL_FWSTS_FWROWD_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWROWD_SHIFT)
+#define I40E_GLFOC_CACHESIZE 0x000AA0DC /* Reset: CORER */
+#define I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLFOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLFOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLFOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLFOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLFOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLFOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLFOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_CEQPART_MAX_INDEX 15
+#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_DBCQMAX 0x000C20F0 /* Reset: CORER */
+#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT 0
+#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_MASK I40E_MASK(0x3FFFF, I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT)
+#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_DBCQPART_MAX_INDEX 15
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_DBQPMAX 0x000C20EC /* Reset: CORER */
+#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT 0
+#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_MASK I40E_MASK(0x7FFFF, I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT)
+#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_DBQPPART_MAX_INDEX 15
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_PEARPMAX 0x000C2038 /* Reset: CORER */
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
+#define I40E_GLHMC_PEARPOBJSZ 0x000C2034 /* Reset: CORER */
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK I40E_MASK(0x7, I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
+#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PECQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PECQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_PECQOBJSZ 0x000C2020 /* Reset: CORER */
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c /* Reset: CORER */
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTMAX 0x000C2030 /* Reset: CORER */
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK I40E_MASK(0x1FFFFF, I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
+#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_PEMRMAX 0x000C2040 /* Reset: CORER */
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
+#define I40E_GLHMC_PEMROBJSZ 0x000C203c /* Reset: CORER */
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
+#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_PEPBLMAX 0x000C206c /* Reset: CORER */
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
+#define I40E_GLHMC_PEPFFIRSTSD 0x000C20E4 /* Reset: CORER */
+#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT 0
+#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_MASK I40E_MASK(0xFFF, I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT)
+#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_PEQ1FLMAX 0x000C2058 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1MAX 0x000C2054 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
+#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
+#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_PEQPOBJSZ 0x000C201c /* Reset: CORER */
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_PESRQMAX 0x000C2028 /* Reset: CORER */
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ 0x000C2024 /* Reset: CORER */
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
+#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_PETIMERMAX 0x000C2084 /* Reset: CORER */
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
+#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080 /* Reset: CORER */
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_PEXFFLMAX 0x000C204c /* Reset: CORER */
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK I40E_MASK(0x1FFFFFF, I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
+#define I40E_GLHMC_PEXFMAX 0x000C2048 /* Reset: CORER */
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ 0x000C2044 /* Reset: CORER */
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
+#define I40E_GLHMC_PFPESDPART(_i) (0x000C0880 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PFPESDPART_MAX_INDEX 15
+#define I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_PFPESDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_PFPESDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT)
+#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPDINV_MAX_INDEX 31
+#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0
+#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16
+#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
+#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFSDPART_MAX_INDEX 31
+#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE 0x000A80BC /* Reset: CORER */
+#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPBLOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPBLOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPBLOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLPDOC_CACHESIZE 0x000D0088 /* Reset: CORER */
+#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPDOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPDOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPDOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPDOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPDOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPDOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLPEOC_CACHESIZE 0x000A60E8 /* Reset: CORER */
+#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPEOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPEOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPEOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPEOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPEOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPEOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_SDCMD_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT)
+#define I40E_GL_PPRS_SPARE 0x000856E0 /* Reset: CORER */
+#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT 0
+#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT)
+#define I40E_GL_TLAN_SPARE 0x000E64E0 /* Reset: CORER */
+#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT 0
+#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT)
+#define I40E_GL_TUPM_SPARE 0x000a2230 /* Reset: CORER */
+#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT 0
+#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG 0x000B81C0 /* Reset: POR */
+#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT 0
+#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT 1
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT 2
+#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT 3
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT 4
+#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT 5
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT 6
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT 7
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT 8
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT 9
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT 10
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT 11
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT 12
+#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT 13
+#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT 14
+#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT)
+#define I40E_GLGEN_MISC_SPARE 0x000880E0 /* Reset: POR */
+#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT 0
+#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT)
+#define I40E_GL_UFUSE_SOC 0x000BE550 /* Reset: POR */
+#define I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT 0
+#define I40E_GL_UFUSE_SOC_PORT_MODE_MASK I40E_MASK(0x3, I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT)
+#define I40E_GL_UFUSE_SOC_NIC_ID_SHIFT 2
+#define I40E_GL_UFUSE_SOC_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_SOC_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT 3
+#define I40E_GL_UFUSE_SOC_SPARE_FUSES_MASK I40E_MASK(0x1FFF, I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT)
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
+#define I40E_VPLAN_QBASE(_VF) (0x00074800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPLAN_QBASE_MAX_INDEX 127
+#define I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT 0
+#define I40E_VPLAN_QBASE_VFFIRSTQ_MASK I40E_MASK(0x7FF, I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT)
+#define I40E_VPLAN_QBASE_VFNUMQ_SHIFT 11
+#define I40E_VPLAN_QBASE_VFNUMQ_MASK I40E_MASK(0xFF, I40E_VPLAN_QBASE_VFNUMQ_SHIFT)
+#define I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT 31
+#define I40E_VPLAN_QBASE_VFQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT)
+#define I40E_PRTMAC_LINK_DOWN_COUNTER 0x001E2440 /* Reset: GLOBR */
+#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT 0
+#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT)
+#define I40E_GLNVM_AL_REQ 0x000B6164 /* Reset: POR */
+#define I40E_GLNVM_AL_REQ_POR_SHIFT 0
+#define I40E_GLNVM_AL_REQ_POR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_POR_SHIFT)
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT 1
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT)
+#define I40E_GLNVM_AL_REQ_GLOBR_SHIFT 2
+#define I40E_GLNVM_AL_REQ_GLOBR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_GLOBR_SHIFT)
+#define I40E_GLNVM_AL_REQ_CORER_SHIFT 3
+#define I40E_GLNVM_AL_REQ_CORER_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_CORER_SHIFT)
+#define I40E_GLNVM_AL_REQ_PE_SHIFT 4
+#define I40E_GLNVM_AL_REQ_PE_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PE_SHIFT)
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT 5
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT)
+#define I40E_GLNVM_ALTIMERS 0x000B6140 /* Reset: POR */
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT 0
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_MASK I40E_MASK(0xFFF, I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT)
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT 12
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_MASK I40E_MASK(0xFFFFF, I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
+
+#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
+#define I40E_GLNVM_ULD_PCIER_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_PCIER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_SHIFT)
+#define I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT 1
+#define I40E_GLNVM_ULD_PCIER_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT)
+#define I40E_GLNVM_ULD_CORER_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CORER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CORER_DONE_SHIFT)
+#define I40E_GLNVM_ULD_GLOBR_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_GLOBR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_POR_DONE_1_SHIFT 8
+#define I40E_GLNVM_ULD_POR_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_1_SHIFT)
+#define I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT 9
+#define I40E_GLNVM_ULD_PCIER_DONE_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT)
+#define I40E_GLNVM_ULD_PE_DONE_SHIFT 10
+#define I40E_GLNVM_ULD_PE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PE_DONE_SHIFT)
+#define I40E_GLNVM_ULT 0x000B6154 /* Reset: POR */
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT 0
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT 1
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_1_SHIFT 2
+#define I40E_GLNVM_ULT_RESERVED_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_1_SHIFT)
+#define I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT 3
+#define I40E_GLNVM_ULT_CONF_CORE_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT 4
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_POR_AE_SHIFT 5
+#define I40E_GLNVM_ULT_CONF_POR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_POR_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_2_SHIFT 6
+#define I40E_GLNVM_ULT_RESERVED_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_2_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_3_SHIFT 7
+#define I40E_GLNVM_ULT_RESERVED_3_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_3_SHIFT)
+#define I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT 8
+#define I40E_GLNVM_ULT_CONF_EMP_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT 9
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_4_SHIFT 10
+#define I40E_GLNVM_ULT_RESERVED_4_MASK I40E_MASK(0x3FFFFF, I40E_GLNVM_ULT_RESERVED_4_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT 0x000B615C /* Reset: POR */
+#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT 0
+#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT 1
+#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT 2
+#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT 3
+#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT 4
+#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT 5
+#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT 6
+#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT 7
+#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT 8
+#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT 9
+#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT 10
+#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT 11
+#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT 12
+#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT 13
+#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT 14
+#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT 15
+#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT 16
+#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT)
+#define I40E_MNGSB_DADD 0x000B7030 /* Reset: POR */
+#define I40E_MNGSB_DADD_ADDR_SHIFT 0
+#define I40E_MNGSB_DADD_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DADD_ADDR_SHIFT)
+#define I40E_MNGSB_DCNT 0x000B7034 /* Reset: POR */
+#define I40E_MNGSB_DCNT_BYTE_CNT_SHIFT 0
+#define I40E_MNGSB_DCNT_BYTE_CNT_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DCNT_BYTE_CNT_SHIFT)
+#define I40E_MNGSB_MSGCTL 0x000B7020 /* Reset: POR */
+#define I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT 0
+#define I40E_MNGSB_MSGCTL_HDR_DWS_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT)
+#define I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT 8
+#define I40E_MNGSB_MSGCTL_EXP_RDW_MASK I40E_MASK(0x1FF, I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT)
+#define I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT 26
+#define I40E_MNGSB_MSGCTL_MSG_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT)
+#define I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT 28
+#define I40E_MNGSB_MSGCTL_TOKEN_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT)
+#define I40E_MNGSB_MSGCTL_BARCLR_SHIFT 30
+#define I40E_MNGSB_MSGCTL_BARCLR_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_BARCLR_SHIFT)
+#define I40E_MNGSB_MSGCTL_CMDV_SHIFT 31
+#define I40E_MNGSB_MSGCTL_CMDV_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_CMDV_SHIFT)
+#define I40E_MNGSB_RDATA 0x000B7300 /* Reset: POR */
+#define I40E_MNGSB_RDATA_DATA_SHIFT 0
+#define I40E_MNGSB_RDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_RDATA_DATA_SHIFT)
+#define I40E_MNGSB_RHDR0 0x000B72FC /* Reset: POR */
+#define I40E_MNGSB_RHDR0_DESTINATION_SHIFT 0
+#define I40E_MNGSB_RHDR0_DESTINATION_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_DESTINATION_SHIFT)
+#define I40E_MNGSB_RHDR0_SOURCE_SHIFT 8
+#define I40E_MNGSB_RHDR0_SOURCE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_SOURCE_SHIFT)
+#define I40E_MNGSB_RHDR0_OPCODE_SHIFT 16
+#define I40E_MNGSB_RHDR0_OPCODE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_OPCODE_SHIFT)
+#define I40E_MNGSB_RHDR0_TAG_SHIFT 24
+#define I40E_MNGSB_RHDR0_TAG_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_TAG_SHIFT)
+#define I40E_MNGSB_RHDR0_RESPONSE_SHIFT 27
+#define I40E_MNGSB_RHDR0_RESPONSE_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_RESPONSE_SHIFT)
+#define I40E_MNGSB_RHDR0_EH_SHIFT 31
+#define I40E_MNGSB_RHDR0_EH_MASK I40E_MASK(0x1, I40E_MNGSB_RHDR0_EH_SHIFT)
+#define I40E_MNGSB_RSPCTL 0x000B7024 /* Reset: POR */
+#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT 0
+#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_MASK I40E_MASK(0x1FF, I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT 26
+#define I40E_MNGSB_RSPCTL_RSP_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT 30
+#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT 31
+#define I40E_MNGSB_RSPCTL_RSP_ERR_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT)
+#define I40E_MNGSB_WDATA 0x000B7100 /* Reset: POR */
+#define I40E_MNGSB_WDATA_DATA_SHIFT 0
+#define I40E_MNGSB_WDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WDATA_DATA_SHIFT)
+#define I40E_MNGSB_WHDR0 0x000B70F4 /* Reset: POR */
+#define I40E_MNGSB_WHDR0_RAW_DEST_SHIFT 0
+#define I40E_MNGSB_WHDR0_RAW_DEST_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_RAW_DEST_SHIFT)
+#define I40E_MNGSB_WHDR0_DEST_SEL_SHIFT 12
+#define I40E_MNGSB_WHDR0_DEST_SEL_MASK I40E_MASK(0xF, I40E_MNGSB_WHDR0_DEST_SEL_SHIFT)
+#define I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT 16
+#define I40E_MNGSB_WHDR0_OPCODE_SEL_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT)
+#define I40E_MNGSB_WHDR0_TAG_SHIFT 24
+#define I40E_MNGSB_WHDR0_TAG_MASK I40E_MASK(0x7F, I40E_MNGSB_WHDR0_TAG_SHIFT)
+#define I40E_MNGSB_WHDR1 0x000B70F8 /* Reset: POR */
+#define I40E_MNGSB_WHDR1_ADDR_SHIFT 0
+#define I40E_MNGSB_WHDR1_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR1_ADDR_SHIFT)
+#define I40E_MNGSB_WHDR2 0x000B70FC /* Reset: POR */
+#define I40E_MNGSB_WHDR2_LENGTH_SHIFT 0
+#define I40E_MNGSB_WHDR2_LENGTH_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR2_LENGTH_SHIFT)
+
+#define I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT 21
+#define I40E_GLPCI_CAPSUP_WAKUP_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT)
+
+#define I40E_GLPCI_CUR_CLNT_COMMON 0x0009CA18 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT)
+#define I40E_GLPCI_CUR_CLNT_PIPEMON 0x0009CA20 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_ALWD 0x0009c514 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_MNG_RSVD 0x0009c594 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_MNG_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_ALWD 0x0009c510 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_RSVD 0x0009c590 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_ALWD 0x0009c500 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_RSVD 0x0009c580 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_ALWD 0x0009c508 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_RSVD 0x0009c588 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_ALWD 0x0009c518 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_RSVD 0x0009c598 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_ALWD 0x0009c504 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_RSVD 0x0009c584 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_ALWD 0x0009c50C /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_RSVD 0x0009c58c /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON 0x0009CA28 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT)
+
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_NPQ_CFG 0x0009CA00 /* Reset: PCIR */
+#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT 0
+#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT 1
+#define I40E_GLPCI_NPQ_CFG_SMALL_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT 2
+#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT 6
+#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_MASK I40E_MASK(0x3FF, I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT 16
+#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT)
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON 0x0009CA30 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_MNG_ALWD 0x0009CB14 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_PMAT_ALWD 0x0009CB10 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_RLAN_ALWD 0x0009CB00 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_RXPE_ALWD 0x0009CB08 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TLAN_ALWD 0x0009CB04 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TPDU_ALWD 0x0009CB18 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TXPE_ALWD 0x0009CB0c /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
+#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
+#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
+#define I40E_GLPE_CPUTRIG0 0x0000D060 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT 0
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK I40E_MASK(0xFFFF, I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT)
+#define I40E_GLPE_DUAL40_RUPM 0x0000DA04 /* Reset: PE_CORER */
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK I40E_MASK(0x1, I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT)
+#define I40E_GLPE_PFAEQEDROPCNT(_i) (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCEQEDROPCNT(_i) (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCQEDROPCNT(_i) (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_RUPM_CQPPOOL 0x0000DACC /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT)
+#define I40E_GLPE_RUPM_FLRPOOL 0x0000DAC4 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL 0x0000DA00 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT 0
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT 30
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT 31
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT)
+#define I40E_GLPE_RUPM_PTXPOOL 0x0000DAC8 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT)
+#define I40E_GLPE_RUPM_PUSHPOOL 0x0000DAC0 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT)
+#define I40E_GLPE_RUPM_TXHOST_EN 0x0000DA08 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT)
+#define I40E_GLPE_VFAEQEDROPCNT(_i) (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCEQEDROPCNT(_i) (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCQEDROPCNT(_i) (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */
+#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_PFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */
+#define I40E_PFPE_CQACK_PECQID_SHIFT 0
+#define I40E_PFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQACK_PECQID_SHIFT)
+#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */
+#define I40E_PFPE_CQARM_PECQID_SHIFT 0
+#define I40E_PFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQARM_PECQID_SHIFT)
+#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */
+#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_PFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */
+#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_PFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 /* Reset: PFR */
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_FLMXMITALLOCERR 0x00008900 /* Reset: PFR */
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_IPCONFIG0 0x00008280 /* Reset: PFR */
+#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_PFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_PFPE_MRTEIDXMASK 0x00008600 /* Reset: PFR */
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 /* Reset: PFR */
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_PFPE_TCPNOWTIMER 0x00008580 /* Reset: PFR */
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_PFPE_UDACTRL 0x00008700 /* Reset: PFR */
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN 0x00008780 /* Reset: PFR */
+#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0
+#define I40E_PFPE_UDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
+#define I40E_PFPE_UDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */
+#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_PFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_PRTDCB_RLPMC 0x0001F140 /* Reset: PE_CORER */
+#define I40E_PRTDCB_RLPMC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_RLPMC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RLPMC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCMSTC_RLPM(_i) (0x0001F040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: PE_CORER */
+#define I40E_PRTDCB_TCMSTC_RLPM_MAX_INDEX 7
+#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM 0x0001F1A0 /* Reset: PE_CORER */
+#define I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_RLPM_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_RLPM_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03 0x0000DAE0 /* Reset: PE_CORER */
+#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT 0
+#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT 8
+#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT 16
+#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT 24
+#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_CNTR 0x0000DB20 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_CNTR_COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_CTL 0x0000DA40 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_CTL_LLTC_SHIFT 13
+#define I40E_PRTPE_RUPM_CTL_LLTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CTL_LLTC_SHIFT)
+#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT 30
+#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT)
+#define I40E_PRTPE_RUPM_PFCCTL 0x0000DA60 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT)
+#define I40E_PRTPE_RUPM_PFCPC 0x0000DA80 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC 0x0000DAA0 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT 16
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT 31
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47 0x0000DB60 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03 0x0000DB40 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47 0x0000DB00 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_THRES 0x0000DA20 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT 0
+#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT)
+#define I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT 8
+#define I40E_PRTPE_RUPM_THRES_MAXSPADS_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT)
+#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT 16
+#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT)
+#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
+#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQACK_MAX_INDEX 127
+#define I40E_VFPE_CQACK_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK_PECQID_SHIFT)
+#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQARM_MAX_INDEX 127
+#define I40E_VFPE_CQARM_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPDB_MAX_INDEX 127
+#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
+#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
+#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
+#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
+#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
+#define I40E_GLPES_RDMARXUNALIGN 0x0001E000 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLGEN_PME_TO 0x000B81BC /* Reset: POR */
+#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT 0
+#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_MASK I40E_MASK(0x1, I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT)
+#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset: CORER */
+#define I40E_GLQF_APBVT_MAX_INDEX 2047
+#define I40E_GLQF_APBVT_APBVT_SHIFT 0
+#define I40E_GLQF_APBVT_APBVT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_APBVT_APBVT_SHIFT)
+#define I40E_GLQF_FD_PCTYPES(_i) (0x00268000 + ((_i) * 4)) /* _i=0...63 */ /* Reset: POR */
+#define I40E_GLQF_FD_PCTYPES_MAX_INDEX 63
+#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT 0
+#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_MASK I40E_MASK(0x3F, I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT)
+#define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GLQF_FDEVICTENA_MAX_INDEX 1
+#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT 0
+#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT)
+#define I40E_GLQF_FDEVICTFLAG 0x00270280 /* Reset: CORER */
+#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT 0
+#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT)
+#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT 8
+#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT)
+#define I40E_PFQF_CTL_2 0x00270300 /* Reset: CORER */
+#define I40E_PFQF_CTL_2_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_2_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_2_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_2_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEDSIZE_SHIFT)
+/* Redefined for X722 family */
+#define I40E_X722_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_X722_PFQF_HLUT_MAX_INDEX 127
+#define I40E_X722_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_X722_PFQF_HLUT_LUT0_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_X722_PFQF_HLUT_LUT1_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_X722_PFQF_HLUT_LUT2_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_X722_PFQF_HLUT_LUT3_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PFQF_HREGION_MAX_INDEX 7
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_PFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_0_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_PFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_1_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_PFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_2_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_PFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_3_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_PFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_4_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_PFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_5_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_PFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_6_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_PFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_7_SHIFT)
+#define I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT 8
+#define I40E_VSIQF_CTL_RSS_LUT_TYPE_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT)
+#define I40E_VSIQF_HKEY(_i, _VSI) (0x002A0000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...12, _VSI=0...383 */ /* Reset: CORER */
+#define I40E_VSIQF_HKEY_MAX_INDEX 12
+#define I40E_VSIQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VSIQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_0_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VSIQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_1_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VSIQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_2_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VSIQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_3_SHIFT)
+#define I40E_VSIQF_HLUT(_i, _VSI) (0x00220000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...15, _VSI=0...383 */ /* Reset: CORER */
+#define I40E_VSIQF_HLUT_MAX_INDEX 15
+#define I40E_VSIQF_HLUT_LUT0_SHIFT 0
+#define I40E_VSIQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT0_SHIFT)
+#define I40E_VSIQF_HLUT_LUT1_SHIFT 8
+#define I40E_VSIQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT1_SHIFT)
+#define I40E_VSIQF_HLUT_LUT2_SHIFT 16
+#define I40E_VSIQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT2_SHIFT)
+#define I40E_VSIQF_HLUT_LUT3_SHIFT 24
+#define I40E_VSIQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT3_SHIFT)
+#define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */
+#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT 0
+#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT)
+#define I40E_GLGEN_STAT_HALT 0x00390000 /* Reset: CORER */
+#define I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT 0
+#define I40E_GLGEN_STAT_HALT_HALT_CELLS_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT)
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
+#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+
+#endif /* X722_SUPPORT */
+#endif /* _I40E_REGISTER_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_status.h b/src/dpdk22/drivers/net/i40e/base/i40e_status.h
index 2e693a39..5632ff2b 100755..100644
--- a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_status.h
+++ b/src/dpdk22/drivers/net/i40e/base/i40e_status.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2013 - 2014, Intel Corporation
+Copyright (c) 2013 - 2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_type.h b/src/dpdk22/drivers/net/i40e/base/i40e_type.h
index bb876402..94838842 100755..100644
--- a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_type.h
+++ b/src/dpdk22/drivers/net/i40e/base/i40e_type.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2013 - 2014, Intel Corporation
+Copyright (c) 2013 - 2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -40,6 +40,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include "i40e_adminq.h"
#include "i40e_hmc.h"
#include "i40e_lan_hmc.h"
+#include "i40e_devids.h"
#define UNREFERENCED_XPARAMETER
#define UNREFERENCED_1PARAMETER(_p) (_p);
@@ -48,25 +49,14 @@ POSSIBILITY OF SUCH DAMAGE.
#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) (_p); (_q); (_r); (_s);
#define UNREFERENCED_5PARAMETER(_p, _q, _r, _s, _t) (_p); (_q); (_r); (_s); (_t);
-/* Vendor ID */
-#define I40E_INTEL_VENDOR_ID 0x8086
-
-/* Device IDs */
-#define I40E_DEV_ID_SFP_XL710 0x1572
-#define I40E_DEV_ID_QEMU 0x1574
-#define I40E_DEV_ID_KX_A 0x157F
-#define I40E_DEV_ID_KX_B 0x1580
-#define I40E_DEV_ID_KX_C 0x1581
-#define I40E_DEV_ID_QSFP_A 0x1583
-#define I40E_DEV_ID_QSFP_B 0x1584
-#define I40E_DEV_ID_QSFP_C 0x1585
-#define I40E_DEV_ID_10G_BASE_T 0x1586
-#define I40E_DEV_ID_VF 0x154C
-#define I40E_DEV_ID_VF_HV 0x1571
-
-#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
- (d) == I40E_DEV_ID_QSFP_B || \
- (d) == I40E_DEV_ID_QSFP_C)
+#ifndef LINUX_MACROS
+#ifndef BIT
+#define BIT(a) (1UL << (a))
+#endif /* BIT */
+#ifndef BIT_ULL
+#define BIT_ULL(a) (1ULL << (a))
+#endif /* BIT_ULL */
+#endif /* LINUX_MACROS */
#ifndef I40E_MASK
/* I40E_MASK is a macro used on 32 bit registers */
@@ -181,6 +171,13 @@ enum i40e_memcpy_type {
I40E_DMA_TO_NONDMA
};
+
+#ifdef X722_SUPPORT
+#define I40E_FW_API_VERSION_MINOR_X722 0x0003
+#endif
+#define I40E_FW_API_VERSION_MINOR_X710 0x0004
+
+
/* These are structs for managing the hardware information and the operations.
* The structures of function pointers are filled out at init time when we
* know for sure exactly which hardware we're working with. This gives us the
@@ -194,6 +191,10 @@ enum i40e_mac_type {
I40E_MAC_X710,
I40E_MAC_XL710,
I40E_MAC_VF,
+#ifdef X722_SUPPORT
+ I40E_MAC_X722,
+ I40E_MAC_X722_VF,
+#endif
I40E_MAC_GENERIC,
};
@@ -225,14 +226,14 @@ enum i40e_set_fc_aq_failures {
};
enum i40e_vsi_type {
- I40E_VSI_MAIN = 0,
- I40E_VSI_VMDQ1,
- I40E_VSI_VMDQ2,
- I40E_VSI_CTRL,
- I40E_VSI_FCOE,
- I40E_VSI_MIRROR,
- I40E_VSI_SRIOV,
- I40E_VSI_FDIR,
+ I40E_VSI_MAIN = 0,
+ I40E_VSI_VMDQ1 = 1,
+ I40E_VSI_VMDQ2 = 2,
+ I40E_VSI_CTRL = 3,
+ I40E_VSI_FCOE = 4,
+ I40E_VSI_MIRROR = 5,
+ I40E_VSI_SRIOV = 6,
+ I40E_VSI_FDIR = 7,
I40E_VSI_TYPE_UNKNOWN
};
@@ -250,28 +251,87 @@ struct i40e_link_status {
u8 an_info;
u8 ext_info;
u8 loopback;
- bool an_enabled;
/* is Link Status Event notification to SW enabled */
bool lse_enable;
u16 max_frame_size;
bool crc_enable;
u8 pacing;
+ u8 requested_speeds;
+ u8 module_type[3];
+ /* 1st byte: module identifier */
+#define I40E_MODULE_TYPE_SFP 0x03
+#define I40E_MODULE_TYPE_QSFP 0x0D
+ /* 2nd byte: ethernet compliance codes for 10/40G */
+#define I40E_MODULE_TYPE_40G_ACTIVE 0x01
+#define I40E_MODULE_TYPE_40G_LR4 0x02
+#define I40E_MODULE_TYPE_40G_SR4 0x04
+#define I40E_MODULE_TYPE_40G_CR4 0x08
+#define I40E_MODULE_TYPE_10G_BASE_SR 0x10
+#define I40E_MODULE_TYPE_10G_BASE_LR 0x20
+#define I40E_MODULE_TYPE_10G_BASE_LRM 0x40
+#define I40E_MODULE_TYPE_10G_BASE_ER 0x80
+ /* 3rd byte: ethernet compliance codes for 1G */
+#define I40E_MODULE_TYPE_1000BASE_SX 0x01
+#define I40E_MODULE_TYPE_1000BASE_LX 0x02
+#define I40E_MODULE_TYPE_1000BASE_CX 0x04
+#define I40E_MODULE_TYPE_1000BASE_T 0x08
+};
+
+enum i40e_aq_capabilities_phy_type {
+ I40E_CAP_PHY_TYPE_SGMII = BIT(I40E_PHY_TYPE_SGMII),
+ I40E_CAP_PHY_TYPE_1000BASE_KX = BIT(I40E_PHY_TYPE_1000BASE_KX),
+ I40E_CAP_PHY_TYPE_10GBASE_KX4 = BIT(I40E_PHY_TYPE_10GBASE_KX4),
+ I40E_CAP_PHY_TYPE_10GBASE_KR = BIT(I40E_PHY_TYPE_10GBASE_KR),
+ I40E_CAP_PHY_TYPE_40GBASE_KR4 = BIT(I40E_PHY_TYPE_40GBASE_KR4),
+ I40E_CAP_PHY_TYPE_XAUI = BIT(I40E_PHY_TYPE_XAUI),
+ I40E_CAP_PHY_TYPE_XFI = BIT(I40E_PHY_TYPE_XFI),
+ I40E_CAP_PHY_TYPE_SFI = BIT(I40E_PHY_TYPE_SFI),
+ I40E_CAP_PHY_TYPE_XLAUI = BIT(I40E_PHY_TYPE_XLAUI),
+ I40E_CAP_PHY_TYPE_XLPPI = BIT(I40E_PHY_TYPE_XLPPI),
+ I40E_CAP_PHY_TYPE_40GBASE_CR4_CU = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU),
+ I40E_CAP_PHY_TYPE_10GBASE_CR1_CU = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU),
+ I40E_CAP_PHY_TYPE_10GBASE_AOC = BIT(I40E_PHY_TYPE_10GBASE_AOC),
+ I40E_CAP_PHY_TYPE_40GBASE_AOC = BIT(I40E_PHY_TYPE_40GBASE_AOC),
+ I40E_CAP_PHY_TYPE_100BASE_TX = BIT(I40E_PHY_TYPE_100BASE_TX),
+ I40E_CAP_PHY_TYPE_1000BASE_T = BIT(I40E_PHY_TYPE_1000BASE_T),
+ I40E_CAP_PHY_TYPE_10GBASE_T = BIT(I40E_PHY_TYPE_10GBASE_T),
+ I40E_CAP_PHY_TYPE_10GBASE_SR = BIT(I40E_PHY_TYPE_10GBASE_SR),
+ I40E_CAP_PHY_TYPE_10GBASE_LR = BIT(I40E_PHY_TYPE_10GBASE_LR),
+ I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU),
+ I40E_CAP_PHY_TYPE_10GBASE_CR1 = BIT(I40E_PHY_TYPE_10GBASE_CR1),
+ I40E_CAP_PHY_TYPE_40GBASE_CR4 = BIT(I40E_PHY_TYPE_40GBASE_CR4),
+ I40E_CAP_PHY_TYPE_40GBASE_SR4 = BIT(I40E_PHY_TYPE_40GBASE_SR4),
+ I40E_CAP_PHY_TYPE_40GBASE_LR4 = BIT(I40E_PHY_TYPE_40GBASE_LR4),
+ I40E_CAP_PHY_TYPE_1000BASE_SX = BIT(I40E_PHY_TYPE_1000BASE_SX),
+ I40E_CAP_PHY_TYPE_1000BASE_LX = BIT(I40E_PHY_TYPE_1000BASE_LX),
+ I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL = BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL),
+ I40E_CAP_PHY_TYPE_20GBASE_KR2 = BIT(I40E_PHY_TYPE_20GBASE_KR2)
};
struct i40e_phy_info {
struct i40e_link_status link_info;
struct i40e_link_status link_info_old;
- u32 autoneg_advertised;
- u32 phy_id;
- u32 module_type;
bool get_link_info;
enum i40e_media_type media_type;
+ /* all the phy types the NVM is capable of */
+ u32 phy_types;
};
#define I40E_HW_CAP_MAX_GPIO 30
#define I40E_HW_CAP_MDIO_PORT_MODE_MDIO 0
#define I40E_HW_CAP_MDIO_PORT_MODE_I2C 1
+#ifdef X722_SUPPORT
+enum i40e_acpi_programming_method {
+ I40E_ACPI_PROGRAMMING_METHOD_HW_FVL = 0,
+ I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK = 1
+};
+
+#define I40E_WOL_SUPPORT_MASK 1
+#define I40E_ACPI_PROGRAMMING_METHOD_MASK (1 << 1)
+#define I40E_PROXY_SUPPORT_MASK (1 << 2)
+
+#endif
/* Capabilities of a PF or a VF or the whole device */
struct i40e_hw_capabilities {
u32 switch_mode;
@@ -289,7 +349,18 @@ struct i40e_hw_capabilities {
bool evb_802_1_qbh; /* Bridge Port Extension */
bool dcb;
bool fcoe;
- bool mfp_mode_1;
+ bool iscsi; /* Indicates iSCSI enabled */
+ bool flex10_enable;
+ bool flex10_capable;
+ u32 flex10_mode;
+#define I40E_FLEX10_MODE_UNKNOWN 0x0
+#define I40E_FLEX10_MODE_DCC 0x1
+#define I40E_FLEX10_MODE_DCI 0x2
+
+ u32 flex10_status;
+#define I40E_FLEX10_STATUS_DCC_ERROR 0x1
+#define I40E_FLEX10_STATUS_VC_MODE 0x2
+
bool mgmt_cem;
bool ieee_1588;
bool iwarp;
@@ -318,6 +389,12 @@ struct i40e_hw_capabilities {
u8 rx_buf_chain_len;
u32 enabled_tcmap;
u32 maxtc;
+ u64 wr_csr_prot;
+#ifdef X722_SUPPORT
+ bool apm_wol_support;
+ enum i40e_acpi_programming_method acpi_prog_method;
+ bool proxy_support;
+#endif
};
struct i40e_mac_info {
@@ -339,13 +416,13 @@ enum i40e_aq_resource_access_type {
};
struct i40e_nvm_info {
- u64 hw_semaphore_timeout; /* 2usec global time (GTIME resolution) */
- u64 hw_semaphore_wait; /* - || - */
+ u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */
u32 timeout; /* [ms] */
u16 sr_size; /* Shadow RAM size in words */
bool blank_nvm_mode; /* is NVM empty (no FW present)*/
u16 version; /* NVM package version */
u32 eetrack; /* NVM data version */
+ u32 oem_ver; /* OEM version info */
};
/* definitions used in NVM update support */
@@ -364,12 +441,17 @@ enum i40e_nvmupd_cmd {
I40E_NVMUPD_CSUM_CON,
I40E_NVMUPD_CSUM_SA,
I40E_NVMUPD_CSUM_LCB,
+ I40E_NVMUPD_STATUS,
+ I40E_NVMUPD_EXEC_AQ,
+ I40E_NVMUPD_GET_AQ_RESULT,
};
enum i40e_nvmupd_state {
I40E_NVMUPD_STATE_INIT,
I40E_NVMUPD_STATE_READING,
- I40E_NVMUPD_STATE_WRITING
+ I40E_NVMUPD_STATE_WRITING,
+ I40E_NVMUPD_STATE_INIT_WAIT,
+ I40E_NVMUPD_STATE_WRITE_WAIT,
};
/* nvm_access definition and its masks/shifts need to be accessible to
@@ -388,6 +470,7 @@ enum i40e_nvmupd_state {
#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
#define I40E_NVM_ERA 0x4
#define I40E_NVM_CSUM 0x8
+#define I40E_NVM_EXEC 0xf
#define I40E_NVM_ADAPT_SHIFT 16
#define I40E_NVM_ADAPT_MASK (0xffffULL << I40E_NVM_ADAPT_SHIFT)
@@ -459,9 +542,20 @@ struct i40e_fc_info {
#define I40E_MAX_USER_PRIORITY 8
#define I40E_DCBX_MAX_APPS 32
#define I40E_LLDPDU_SIZE 1500
-
-/* IEEE 802.1Qaz ETS Configuration data */
-struct i40e_ieee_ets_config {
+#define I40E_TLV_STATUS_OPER 0x1
+#define I40E_TLV_STATUS_SYNC 0x2
+#define I40E_TLV_STATUS_ERR 0x4
+#define I40E_CEE_OPER_MAX_APPS 3
+#define I40E_APP_PROTOID_FCOE 0x8906
+#define I40E_APP_PROTOID_ISCSI 0x0cbc
+#define I40E_APP_PROTOID_FIP 0x8914
+#define I40E_APP_SEL_ETHTYPE 0x1
+#define I40E_APP_SEL_TCPIP 0x2
+#define I40E_CEE_APP_SEL_ETHTYPE 0x0
+#define I40E_CEE_APP_SEL_TCPIP 0x1
+
+/* CEE or IEEE 802.1Qaz ETS Configuration data */
+struct i40e_dcb_ets_config {
u8 willing;
u8 cbs;
u8 maxtcs;
@@ -470,34 +564,33 @@ struct i40e_ieee_ets_config {
u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
};
-/* IEEE 802.1Qaz ETS Recommendation data */
-struct i40e_ieee_ets_recommend {
- u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
- u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
- u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
-};
-
-/* IEEE 802.1Qaz PFC Configuration data */
-struct i40e_ieee_pfc_config {
+/* CEE or IEEE 802.1Qaz PFC Configuration data */
+struct i40e_dcb_pfc_config {
u8 willing;
u8 mbc;
u8 pfccap;
u8 pfcenable;
};
-/* IEEE 802.1Qaz Application Priority data */
-struct i40e_ieee_app_priority_table {
+/* CEE or IEEE 802.1Qaz Application Priority data */
+struct i40e_dcb_app_priority_table {
u8 priority;
u8 selector;
u16 protocolid;
};
struct i40e_dcbx_config {
+ u8 dcbx_mode;
+#define I40E_DCBX_MODE_CEE 0x1
+#define I40E_DCBX_MODE_IEEE 0x2
+ u8 app_mode;
+#define I40E_DCBX_APPS_NON_WILLING 0x1
u32 numapps;
- struct i40e_ieee_ets_config etscfg;
- struct i40e_ieee_ets_recommend etsrec;
- struct i40e_ieee_pfc_config pfc;
- struct i40e_ieee_app_priority_table app[I40E_DCBX_MAX_APPS];
+ u32 tlv_status; /* CEE mode TLV status */
+ struct i40e_dcb_ets_config etscfg;
+ struct i40e_dcb_ets_config etsrec;
+ struct i40e_dcb_pfc_config pfc;
+ struct i40e_dcb_app_priority_table app[I40E_DCBX_MAX_APPS];
};
/* Port hardware description */
@@ -505,7 +598,7 @@ struct i40e_hw {
u8 *hw_addr;
void *back;
- /* function pointer structs */
+ /* subsystem structs */
struct i40e_phy_info phy;
struct i40e_mac_info mac;
struct i40e_bus_info bus;
@@ -532,6 +625,11 @@ struct i40e_hw {
u8 pf_id;
u16 main_vsi_seid;
+ /* for multi-function MACs */
+ u16 partition_id;
+ u16 num_partitions;
+ u16 num_ports;
+
/* Closest numa node to the device */
u16 numa_node;
@@ -540,6 +638,8 @@ struct i40e_hw {
/* state of nvm update process */
enum i40e_nvmupd_state nvmupd_state;
+ struct i40e_aq_desc nvm_wb_desc;
+ struct i40e_virt_mem nvm_buff;
/* HMC info */
struct i40e_hmc_info hmc; /* HMC info struct */
@@ -548,12 +648,32 @@ struct i40e_hw {
u16 dcbx_status;
/* DCBX info */
- struct i40e_dcbx_config local_dcbx_config;
- struct i40e_dcbx_config remote_dcbx_config;
+ struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */
+ struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
+ struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
+
+#ifdef X722_SUPPORT
+ /* WoL and proxy support */
+ u16 num_wol_proxy_filters;
+ u16 wol_proxy_vsi_seid;
+#endif
/* debug mask */
u32 debug_mask;
-};
+#ifndef I40E_NDIS_SUPPORT
+ char err_str[16];
+#endif /* I40E_NDIS_SUPPORT */
+};
+
+STATIC INLINE bool i40e_is_vf(struct i40e_hw *hw)
+{
+#ifdef X722_SUPPORT
+ return (hw->mac.type == I40E_MAC_VF ||
+ hw->mac.type == I40E_MAC_X722_VF);
+#else
+ return hw->mac.type == I40E_MAC_VF;
+#endif
+}
struct i40e_driver_version {
u8 major_version;
@@ -656,19 +776,28 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
- I40E_RX_DESC_STATUS_PIF_SHIFT = 8,
+#ifdef X722_SUPPORT
+ I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
+#else
+ I40E_RX_DESC_STATUS_RESERVED1_SHIFT = 8,
+#endif
+
I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
- I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
+ I40E_RX_DESC_STATUS_RESERVED2_SHIFT = 16, /* 2 BITS */
+#ifdef X722_SUPPORT
+ I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
+#else
I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
+#endif
I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
};
#define I40E_RXD_QW1_STATUS_SHIFT 0
-#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) << \
+#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) << \
I40E_RXD_QW1_STATUS_SHIFT)
#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
@@ -676,8 +805,7 @@ enum i40e_rx_desc_status_bits {
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
-#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \
- I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
#define I40E_RXD_QW1_STATUS_UMBCAST_SHIFT I40E_RX_DESC_STATUS_UMBCAST
#define I40E_RXD_QW1_STATUS_UMBCAST_MASK (0x3UL << \
@@ -823,8 +951,7 @@ enum i40e_rx_ptype_payload_layer {
I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
-#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \
- I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
#define I40E_RXD_QW1_NEXTP_SHIFT 38
#define I40E_RXD_QW1_NEXTP_MASK (0x1FFFULL << I40E_RXD_QW1_NEXTP_SHIFT)
@@ -1027,12 +1154,11 @@ enum i40e_tx_ctx_desc_eipt_offload {
#define I40E_TXD_CTX_QW0_NATT_SHIFT 9
#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
-#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
-#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \
- I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
@@ -1044,6 +1170,10 @@ enum i40e_tx_ctx_desc_eipt_offload {
#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
I40E_TXD_CTX_QW0_DECTTL_SHIFT)
+#ifdef X722_SUPPORT
+#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23
+#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
+#endif
struct i40e_nop_desc {
__le64 rsvd;
__le64 dtype_cmd;
@@ -1080,15 +1210,38 @@ struct i40e_filter_program_desc {
/* Packet Classifier Types for filters */
enum i40e_filter_pctype {
+#ifdef X722_SUPPORT
+ /* Note: Values 0-28 are reserved for future use.
+ * Value 29, 30, 32 are not supported on XL710 and X710.
+ */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
+#else
/* Note: Values 0-30 are reserved for future use */
+#endif
I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
+#ifdef X722_SUPPORT
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
+#else
/* Note: Value 32 is reserved for future use */
+#endif
I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
+#ifdef X722_SUPPORT
+ /* Note: Values 37-38 are reserved for future use.
+ * Value 39, 40, 42 are not supported on XL710 and X710.
+ */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
+#else
/* Note: Values 37-40 are reserved for future use */
+#endif
I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+#ifdef X722_SUPPORT
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
+#endif
I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
@@ -1137,13 +1290,18 @@ enum i40e_filter_program_desc_pcmd {
#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
-#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \
- I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \
I40E_TXD_FLTR_QW1_CMD_SHIFT)
#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
+#ifdef X722_SUPPORT
+
+#define I40E_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT)
+#endif
#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
@@ -1197,6 +1355,14 @@ struct i40e_eth_stats {
u64 tx_errors; /* tepc */
};
+/* Statistics collected per VEB per TC */
+struct i40e_veb_tc_stats {
+ u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS];
+ u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS];
+ u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS];
+ u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS];
+};
+
/* Statistics collected by the MAC */
struct i40e_hw_port_stats {
/* eth stats collected by the port */
@@ -1242,6 +1408,9 @@ struct i40e_hw_port_stats {
/* flow director stats */
u64 fd_atr_match;
u64 fd_sb_match;
+ u64 fd_atr_tunnel_match;
+ u32 fd_atr_status;
+ u32 fd_sb_status;
/* EEE LPI */
u32 tx_lpi_status;
u32 rx_lpi_status;
@@ -1264,17 +1433,24 @@ struct i40e_hw_port_stats {
#define I40E_SR_CSR_PROTECTED_LIST_PTR 0x0D
#define I40E_SR_MNG_CONFIG_PTR 0x0E
#define I40E_SR_EMP_MODULE_PTR 0x0F
+#define I40E_SR_PBA_FLAGS 0x15
#define I40E_SR_PBA_BLOCK_PTR 0x16
#define I40E_SR_BOOT_CONFIG_PTR 0x17
-#define I40E_SR_NVM_IMAGE_VERSION 0x18
+#define I40E_NVM_OEM_VER_OFF 0x83
+#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18
#define I40E_SR_NVM_WAKE_ON_LAN 0x19
#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
#define I40E_SR_PERMANENT_SAN_MAC_ADDRESS_PTR 0x28
+#define I40E_SR_NVM_MAP_VERSION 0x29
+#define I40E_SR_NVM_IMAGE_VERSION 0x2A
+#define I40E_SR_NVM_STRUCTURE_VERSION 0x2B
#define I40E_SR_NVM_EETRACK_LO 0x2D
#define I40E_SR_NVM_EETRACK_HI 0x2E
#define I40E_SR_VPD_PTR 0x2F
#define I40E_SR_PXE_SETUP_PTR 0x30
#define I40E_SR_PXE_CONFIG_CUST_OPTIONS_PTR 0x31
+#define I40E_SR_NVM_ORIGINAL_EETRACK_LO 0x34
+#define I40E_SR_NVM_ORIGINAL_EETRACK_HI 0x35
#define I40E_SR_SW_ETHERNET_MAC_ADDRESS_PTR 0x37
#define I40E_SR_POR_REGS_AUTO_LOAD_PTR 0x38
#define I40E_SR_EMPR_REGS_AUTO_LOAD_PTR 0x3A
@@ -1287,6 +1463,9 @@ struct i40e_hw_port_stats {
#define I40E_SR_3RD_FREE_PROVISION_AREA_PTR 0x44
#define I40E_SR_2ND_FREE_PROVISION_AREA_PTR 0x46
#define I40E_SR_EMP_SR_SETTINGS_PTR 0x48
+#define I40E_SR_FEATURE_CONFIGURATION_PTR 0x49
+#define I40E_SR_CONFIGURATION_METADATA_PTR 0x4D
+#define I40E_SR_IMMEDIATE_VALUES_PTR 0x4E
/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
#define I40E_SR_VPD_MODULE_MAX_SIZE 1024
@@ -1407,6 +1586,18 @@ enum i40e_reset_type {
I40E_RESET_EMPR = 3,
};
+/* IEEE 802.1AB LLDP Agent Variables from NVM */
+#define I40E_NVM_LLDP_CFG_PTR 0xD
+struct i40e_lldp_variables {
+ u16 length;
+ u16 adminstatus;
+ u16 msgfasttx;
+ u16 msgtxinterval;
+ u16 txparams;
+ u16 timers;
+ u16 crc8;
+};
+
/* Offsets into Alternate Ram */
#define I40E_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */
#define I40E_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_virtchnl.h b/src/dpdk22/drivers/net/i40e/base/i40e_virtchnl.h
index 5257b5de..81065820 100755..100644
--- a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_virtchnl.h
+++ b/src/dpdk22/drivers/net/i40e/base/i40e_virtchnl.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2013 - 2014, Intel Corporation
+Copyright (c) 2013 - 2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -66,30 +66,31 @@ POSSIBILITY OF SUCH DAMAGE.
* of the virtchnl_msg structure.
*/
enum i40e_virtchnl_ops {
-/* VF sends req. to pf for the following
- * ops.
+/* The PF sends status change events to VFs using
+ * the I40E_VIRTCHNL_OP_EVENT opcode.
+ * VFs send requests to the PF using the other ops.
*/
I40E_VIRTCHNL_OP_UNKNOWN = 0,
I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
- I40E_VIRTCHNL_OP_RESET_VF,
- I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
- I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE,
- I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
- I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
- I40E_VIRTCHNL_OP_ENABLE_QUEUES,
- I40E_VIRTCHNL_OP_DISABLE_QUEUES,
- I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
- I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
- I40E_VIRTCHNL_OP_ADD_VLAN,
- I40E_VIRTCHNL_OP_DEL_VLAN,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
- I40E_VIRTCHNL_OP_GET_STATS,
- I40E_VIRTCHNL_OP_FCOE,
-/* PF sends status change events to vfs using
- * the following op.
- */
- I40E_VIRTCHNL_OP_EVENT,
+ I40E_VIRTCHNL_OP_RESET_VF = 2,
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3,
+ I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
+ I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
+ I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8,
+ I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9,
+ I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10,
+ I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11,
+ I40E_VIRTCHNL_OP_ADD_VLAN = 12,
+ I40E_VIRTCHNL_OP_DEL_VLAN = 13,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
+ I40E_VIRTCHNL_OP_GET_STATS = 15,
+ I40E_VIRTCHNL_OP_FCOE = 16,
+ I40E_VIRTCHNL_OP_EVENT = 17,
+#ifdef I40E_SOL_VF_SUPPORT
+ I40E_VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG = 19,
+#endif
};
/* Virtual channel message descriptor. This overlays the admin queue
@@ -118,7 +119,9 @@ struct i40e_virtchnl_msg {
* error regardless of version mismatch.
*/
#define I40E_VIRTCHNL_VERSION_MAJOR 1
-#define I40E_VIRTCHNL_VERSION_MINOR 0
+#define I40E_VIRTCHNL_VERSION_MINOR 1
+#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
+
struct i40e_virtchnl_version_info {
u32 major;
u32 minor;
@@ -137,7 +140,8 @@ struct i40e_virtchnl_version_info {
*/
/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
- * VF sends this request to PF with no parameters
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
* PF responds with an indirect message containing
* i40e_virtchnl_vf_resource and one or more
* i40e_virtchnl_vsi_resource structures.
@@ -151,10 +155,14 @@ struct i40e_virtchnl_vsi_resource {
u8 default_mac_addr[I40E_ETH_LENGTH_OF_ADDRESS];
};
/* VF offload flags */
-#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
-#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
-#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
-#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
+#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
@@ -282,6 +290,23 @@ struct i40e_virtchnl_ether_addr_list {
struct i40e_virtchnl_ether_addr list[1];
};
+#ifdef I40E_SOL_VF_SUPPORT
+/* I40E_VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG
+ * VF sends this message to get the default MTU and list of additional ethernet
+ * addresses it is allowed to use.
+ * PF responds with an indirect message containing
+ * i40e_virtchnl_addnl_solaris_config with zero or more
+ * i40e_virtchnl_ether_addr structures.
+ *
+ * It is expected that this operation will only ever be needed for Solaris VFs
+ * running under a Solaris PF.
+ */
+struct i40e_virtchnl_addnl_solaris_config {
+ u16 default_mtu;
+ struct i40e_virtchnl_ether_addr_list al;
+};
+
+#endif
/* I40E_VIRTCHNL_OP_ADD_VLAN
* VF sends this message to add one or more VLAN tag filters for receives.
* PF adds the filters and returns status.
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e_ethdev.c b/src/dpdk22/drivers/net/i40e/i40e_ethdev.c
index b0e00464..57de71d5 100755..100644
--- a/src/dpdk_lib18/librte_pmd_i40e/i40e_ethdev.c
+++ b/src/dpdk22/drivers/net/i40e/i40e_ethdev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -39,6 +39,7 @@
#include <unistd.h>
#include <stdarg.h>
#include <inttypes.h>
+#include <assert.h>
#include <rte_string_fns.h>
#include <rte_pci.h>
@@ -52,9 +53,11 @@
#include <rte_eth_ctrl.h>
#include "i40e_logs.h"
-#include "i40e/i40e_prototype.h"
-#include "i40e/i40e_adminq_cmd.h"
-#include "i40e/i40e_type.h"
+#include "base/i40e_prototype.h"
+#include "base/i40e_adminq_cmd.h"
+#include "base/i40e_type.h"
+#include "base/i40e_register.h"
+#include "base/i40e_dcb.h"
#include "i40e_ethdev.h"
#include "i40e_rxtx.h"
#include "i40e_pf.h"
@@ -73,13 +76,29 @@
/* Maximun number of VSI */
#define I40E_MAX_NUM_VSIS (384UL)
-/* Default queue interrupt throttling time in microseconds*/
-#define I40E_ITR_INDEX_DEFAULT 0
-#define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
-#define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
-
#define I40E_PRE_TX_Q_CFG_WAIT_US 10 /* 10 us */
+/* Flow control default timer */
+#define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
+
+/* Flow control default high water */
+#define I40E_DEFAULT_HIGH_WATER (0x1C40/1024)
+
+/* Flow control default low water */
+#define I40E_DEFAULT_LOW_WATER (0x1A40/1024)
+
+/* Flow control enable fwd bit */
+#define I40E_PRTMAC_FWD_CTRL 0x00000001
+
+/* Receive Packet Buffer size */
+#define I40E_RXPBSIZE (968 * 1024)
+
+/* Kilobytes shift */
+#define I40E_KILOSHIFT 10
+
+/* Receive Average Packet Size in Byte*/
+#define I40E_PACKET_AVERAGE_SIZE 128
+
/* Mask of PF interrupt causes */
#define I40E_PFINT_ICR0_ENA_MASK ( \
I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
@@ -93,9 +112,169 @@
I40E_PFINT_ICR0_ENA_VFLR_MASK | \
I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
-static int eth_i40e_dev_init(\
- __attribute__((unused)) struct eth_driver *eth_drv,
- struct rte_eth_dev *eth_dev);
+#define I40E_FLOW_TYPES ( \
+ (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
+ (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
+ (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
+ (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
+ (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
+ (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
+ (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
+ (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
+ (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
+ (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
+ (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
+
+/* Additional timesync values. */
+#define I40E_PTP_40GB_INCVAL 0x0199999999ULL
+#define I40E_PTP_10GB_INCVAL 0x0333333333ULL
+#define I40E_PTP_1GB_INCVAL 0x2000000000ULL
+#define I40E_PRTTSYN_TSYNENA 0x80000000
+#define I40E_PRTTSYN_TSYNTYPE 0x0e000000
+#define I40E_CYCLECOUNTER_MASK 0xffffffffffffffffULL
+
+#define I40E_MAX_PERCENT 100
+#define I40E_DEFAULT_DCB_APP_NUM 1
+#define I40E_DEFAULT_DCB_APP_PRIO 3
+
+#define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32))
+#define I40E_GLQF_FD_MSK(_i, _j) (0x00267200 + ((_i) * 4 + (_j) * 8))
+#define I40E_GLQF_FD_MSK_FIELD 0x0000FFFF
+#define I40E_GLQF_HASH_INSET(_i, _j) (0x00267600 + ((_i) * 4 + (_j) * 8))
+#define I40E_GLQF_HASH_MSK(_i, _j) (0x00267A00 + ((_i) * 4 + (_j) * 8))
+#define I40E_GLQF_HASH_MSK_FIELD 0x0000FFFF
+
+#define I40E_INSET_NONE 0x00000000000000000ULL
+
+/* bit0 ~ bit 7 */
+#define I40E_INSET_DMAC 0x0000000000000001ULL
+#define I40E_INSET_SMAC 0x0000000000000002ULL
+#define I40E_INSET_VLAN_OUTER 0x0000000000000004ULL
+#define I40E_INSET_VLAN_INNER 0x0000000000000008ULL
+#define I40E_INSET_VLAN_TUNNEL 0x0000000000000010ULL
+
+/* bit 8 ~ bit 15 */
+#define I40E_INSET_IPV4_SRC 0x0000000000000100ULL
+#define I40E_INSET_IPV4_DST 0x0000000000000200ULL
+#define I40E_INSET_IPV6_SRC 0x0000000000000400ULL
+#define I40E_INSET_IPV6_DST 0x0000000000000800ULL
+#define I40E_INSET_SRC_PORT 0x0000000000001000ULL
+#define I40E_INSET_DST_PORT 0x0000000000002000ULL
+#define I40E_INSET_SCTP_VT 0x0000000000004000ULL
+
+/* bit 16 ~ bit 31 */
+#define I40E_INSET_IPV4_TOS 0x0000000000010000ULL
+#define I40E_INSET_IPV4_PROTO 0x0000000000020000ULL
+#define I40E_INSET_IPV4_TTL 0x0000000000040000ULL
+#define I40E_INSET_IPV6_TC 0x0000000000080000ULL
+#define I40E_INSET_IPV6_FLOW 0x0000000000100000ULL
+#define I40E_INSET_IPV6_NEXT_HDR 0x0000000000200000ULL
+#define I40E_INSET_IPV6_HOP_LIMIT 0x0000000000400000ULL
+#define I40E_INSET_TCP_FLAGS 0x0000000000800000ULL
+
+/* bit 32 ~ bit 47, tunnel fields */
+#define I40E_INSET_TUNNEL_IPV4_DST 0x0000000100000000ULL
+#define I40E_INSET_TUNNEL_IPV6_DST 0x0000000200000000ULL
+#define I40E_INSET_TUNNEL_DMAC 0x0000000400000000ULL
+#define I40E_INSET_TUNNEL_SRC_PORT 0x0000000800000000ULL
+#define I40E_INSET_TUNNEL_DST_PORT 0x0000001000000000ULL
+#define I40E_INSET_TUNNEL_ID 0x0000002000000000ULL
+
+/* bit 48 ~ bit 55 */
+#define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
+
+/* bit 56 ~ bit 63, Flex Payload */
+#define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD \
+ (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \
+ I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \
+ I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
+ I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
+
+/**
+ * Below are values for writing un-exposed registers suggested
+ * by silicon experts
+ */
+/* Destination MAC address */
+#define I40E_REG_INSET_L2_DMAC 0xE000000000000000ULL
+/* Source MAC address */
+#define I40E_REG_INSET_L2_SMAC 0x1C00000000000000ULL
+/* VLAN tag in the outer L2 header */
+#define I40E_REG_INSET_L2_OUTER_VLAN 0x0000000000800000ULL
+/* VLAN tag in the inner L2 header */
+#define I40E_REG_INSET_L2_INNER_VLAN 0x0000000001000000ULL
+/* Source IPv4 address */
+#define I40E_REG_INSET_L3_SRC_IP4 0x0001800000000000ULL
+/* Destination IPv4 address */
+#define I40E_REG_INSET_L3_DST_IP4 0x0000001800000000ULL
+/* IPv4 Type of Service (TOS) */
+#define I40E_REG_INSET_L3_IP4_TOS 0x0040000000000000ULL
+/* IPv4 Protocol */
+#define I40E_REG_INSET_L3_IP4_PROTO 0x0004000000000000ULL
+/* Source IPv6 address */
+#define I40E_REG_INSET_L3_SRC_IP6 0x0007F80000000000ULL
+/* Destination IPv6 address */
+#define I40E_REG_INSET_L3_DST_IP6 0x000007F800000000ULL
+/* IPv6 Traffic Class (TC) */
+#define I40E_REG_INSET_L3_IP6_TC 0x0040000000000000ULL
+/* IPv6 Next Header */
+#define I40E_REG_INSET_L3_IP6_NEXT_HDR 0x0008000000000000ULL
+/* Source L4 port */
+#define I40E_REG_INSET_L4_SRC_PORT 0x0000000400000000ULL
+/* Destination L4 port */
+#define I40E_REG_INSET_L4_DST_PORT 0x0000000200000000ULL
+/* SCTP verification tag */
+#define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG 0x0000000180000000ULL
+/* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
+#define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC 0x0000000001C00000ULL
+/* Source port of tunneling UDP */
+#define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT 0x0000000000200000ULL
+/* Destination port of tunneling UDP */
+#define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT 0x0000000000100000ULL
+/* UDP Tunneling ID, NVGRE/GRE key */
+#define I40E_REG_INSET_TUNNEL_ID 0x00000000000C0000ULL
+/* Last ether type */
+#define I40E_REG_INSET_LAST_ETHER_TYPE 0x0000000000004000ULL
+/* Tunneling outer destination IPv4 address */
+#define I40E_REG_INSET_TUNNEL_L3_DST_IP4 0x00000000000000C0ULL
+/* Tunneling outer destination IPv6 address */
+#define I40E_REG_INSET_TUNNEL_L3_DST_IP6 0x0000000000003FC0ULL
+/* 1st word of flex payload */
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD1 0x0000000000002000ULL
+/* 2nd word of flex payload */
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD2 0x0000000000001000ULL
+/* 3rd word of flex payload */
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD3 0x0000000000000800ULL
+/* 4th word of flex payload */
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD4 0x0000000000000400ULL
+/* 5th word of flex payload */
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD5 0x0000000000000200ULL
+/* 6th word of flex payload */
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD6 0x0000000000000100ULL
+/* 7th word of flex payload */
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD7 0x0000000000000080ULL
+/* 8th word of flex payload */
+#define I40E_REG_INSET_FLEX_PAYLOAD_WORD8 0x0000000000000040ULL
+
+#define I40E_REG_INSET_MASK_DEFAULT 0x0000000000000000ULL
+
+#define I40E_TRANSLATE_INSET 0
+#define I40E_TRANSLATE_REG 1
+
+#define I40E_INSET_IPV4_TOS_MASK 0x0009FF00UL
+#define I40E_INSET_IPV4_PROTO_MASK 0x000DFF00UL
+#define I40E_INSET_IPV6_TC_MASK 0x0009F00FUL
+#define I40E_INSET_IPV6_NEXT_HDR_MASK 0x000C00FFUL
+
+static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev);
+static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
static int i40e_dev_configure(struct rte_eth_dev *dev);
static int i40e_dev_start(struct rte_eth_dev *dev);
static void i40e_dev_stop(struct rte_eth_dev *dev);
@@ -108,6 +287,8 @@ static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
static void i40e_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
+static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstats *xstats, unsigned n);
static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
uint16_t queue_id,
@@ -126,6 +307,8 @@ static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
static int i40e_dev_led_on(struct rte_eth_dev *dev);
static int i40e_dev_led_off(struct rte_eth_dev *dev);
+static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
@@ -147,6 +330,8 @@ static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
static int i40e_pf_setup(struct i40e_pf *pf);
static int i40e_dev_rxtx_init(struct i40e_pf *pf);
static int i40e_vmdq_setup(struct rte_eth_dev *dev);
+static int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb);
+static int i40e_dcb_setup(struct rte_eth_dev *dev);
static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
bool offset_loaded, uint64_t *offset, uint64_t *stat);
static void i40e_stat_update_48(struct i40e_hw *hw,
@@ -155,7 +340,7 @@ static void i40e_stat_update_48(struct i40e_hw *hw,
bool offset_loaded,
uint64_t *offset,
uint64_t *stat);
-static void i40e_pf_config_irq0(struct i40e_hw *hw);
+static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
static void i40e_dev_interrupt_handler(
__rte_unused struct rte_intr_handle *handle, void *param);
static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
@@ -198,18 +383,45 @@ static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg);
+static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
+ struct rte_eth_dcb_info *dcb_info);
static void i40e_configure_registers(struct i40e_hw *hw);
-
-/* Default hash key buffer for RSS */
-static uint32_t rss_key_default[I40E_PFQF_HKEY_MAX_INDEX + 1];
-
-static struct rte_pci_id pci_id_i40e_map[] = {
+static void i40e_hw_init(struct i40e_hw *hw);
+static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
+static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
+ struct rte_eth_mirror_conf *mirror_conf,
+ uint8_t sw_id, uint8_t on);
+static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
+
+static int i40e_timesync_enable(struct rte_eth_dev *dev);
+static int i40e_timesync_disable(struct rte_eth_dev *dev);
+static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp,
+ uint32_t flags);
+static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp);
+static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
+
+static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
+
+static int i40e_timesync_read_time(struct rte_eth_dev *dev,
+ struct timespec *timestamp);
+static int i40e_timesync_write_time(struct rte_eth_dev *dev,
+ const struct timespec *timestamp);
+
+static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+
+
+static const struct rte_pci_id pci_id_i40e_map[] = {
#define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
#include "rte_pci_dev_ids.h"
{ .vendor_id = 0, /* sentinel */ },
};
-static struct eth_dev_ops i40e_eth_dev_ops = {
+static const struct eth_dev_ops i40e_eth_dev_ops = {
.dev_configure = i40e_dev_configure,
.dev_start = i40e_dev_start,
.dev_stop = i40e_dev_stop,
@@ -222,7 +434,9 @@ static struct eth_dev_ops i40e_eth_dev_ops = {
.dev_set_link_down = i40e_dev_set_link_down,
.link_update = i40e_dev_link_update,
.stats_get = i40e_dev_stats_get,
+ .xstats_get = i40e_dev_xstats_get,
.stats_reset = i40e_dev_stats_reset,
+ .xstats_reset = i40e_dev_stats_reset,
.queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set,
.dev_infos_get = i40e_dev_info_get,
.vlan_filter_set = i40e_vlan_filter_set,
@@ -235,6 +449,8 @@ static struct eth_dev_ops i40e_eth_dev_ops = {
.tx_queue_start = i40e_dev_tx_queue_start,
.tx_queue_stop = i40e_dev_tx_queue_stop,
.rx_queue_setup = i40e_dev_rx_queue_setup,
+ .rx_queue_intr_enable = i40e_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = i40e_dev_rx_queue_intr_disable,
.rx_queue_release = i40e_dev_rx_queue_release,
.rx_queue_count = i40e_dev_rx_queue_count,
.rx_descriptor_done = i40e_dev_rx_descriptor_done,
@@ -242,6 +458,7 @@ static struct eth_dev_ops i40e_eth_dev_ops = {
.tx_queue_release = i40e_dev_tx_queue_release,
.dev_led_on = i40e_dev_led_on,
.dev_led_off = i40e_dev_led_off,
+ .flow_ctrl_get = i40e_flow_ctrl_get,
.flow_ctrl_set = i40e_flow_ctrl_set,
.priority_flow_ctrl_set = i40e_priority_flow_ctrl_set,
.mac_addr_add = i40e_macaddr_add,
@@ -253,27 +470,146 @@ static struct eth_dev_ops i40e_eth_dev_ops = {
.udp_tunnel_add = i40e_dev_udp_tunnel_add,
.udp_tunnel_del = i40e_dev_udp_tunnel_del,
.filter_ctrl = i40e_dev_filter_ctrl,
+ .rxq_info_get = i40e_rxq_info_get,
+ .txq_info_get = i40e_txq_info_get,
+ .mirror_rule_set = i40e_mirror_rule_set,
+ .mirror_rule_reset = i40e_mirror_rule_reset,
+ .timesync_enable = i40e_timesync_enable,
+ .timesync_disable = i40e_timesync_disable,
+ .timesync_read_rx_timestamp = i40e_timesync_read_rx_timestamp,
+ .timesync_read_tx_timestamp = i40e_timesync_read_tx_timestamp,
+ .get_dcb_info = i40e_dev_get_dcb_info,
+ .timesync_adjust_time = i40e_timesync_adjust_time,
+ .timesync_read_time = i40e_timesync_read_time,
+ .timesync_write_time = i40e_timesync_write_time,
+};
+
+/* store statistics names and its offset in stats structure */
+struct rte_i40e_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned offset;
+};
+
+static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
+ {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
+ {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
+ {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
+ {"rx_dropped", offsetof(struct i40e_eth_stats, rx_discards)},
+ {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
+ rx_unknown_protocol)},
+ {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
+ {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
+ {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
+ {"tx_dropped", offsetof(struct i40e_eth_stats, tx_discards)},
+};
+
+#define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
+ sizeof(rte_i40e_stats_strings[0]))
+
+static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
+ {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
+ tx_dropped_link_down)},
+ {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
+ {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
+ illegal_bytes)},
+ {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
+ {"mac_local_errors", offsetof(struct i40e_hw_port_stats,
+ mac_local_faults)},
+ {"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
+ mac_remote_faults)},
+ {"rx_length_errors", offsetof(struct i40e_hw_port_stats,
+ rx_length_errors)},
+ {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
+ {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
+ {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
+ {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
+ {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
+ {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
+ rx_size_127)},
+ {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
+ rx_size_255)},
+ {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
+ rx_size_511)},
+ {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
+ rx_size_1023)},
+ {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
+ rx_size_1522)},
+ {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
+ rx_size_big)},
+ {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
+ rx_undersize)},
+ {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
+ rx_oversize)},
+ {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
+ mac_short_packet_dropped)},
+ {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
+ rx_fragments)},
+ {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
+ {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
+ {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
+ tx_size_127)},
+ {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
+ tx_size_255)},
+ {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
+ tx_size_511)},
+ {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
+ tx_size_1023)},
+ {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
+ tx_size_1522)},
+ {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
+ tx_size_big)},
+ {"rx_flow_director_atr_match_packets",
+ offsetof(struct i40e_hw_port_stats, fd_atr_match)},
+ {"rx_flow_director_sb_match_packets",
+ offsetof(struct i40e_hw_port_stats, fd_sb_match)},
+ {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
+ tx_lpi_status)},
+ {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
+ rx_lpi_status)},
+ {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
+ tx_lpi_count)},
+ {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
+ rx_lpi_count)},
+};
+
+#define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
+ sizeof(rte_i40e_hw_port_strings[0]))
+
+static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
+ {"xon_packets", offsetof(struct i40e_hw_port_stats,
+ priority_xon_rx)},
+ {"xoff_packets", offsetof(struct i40e_hw_port_stats,
+ priority_xoff_rx)},
+};
+
+#define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
+ sizeof(rte_i40e_rxq_prio_strings[0]))
+
+static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
+ {"xon_packets", offsetof(struct i40e_hw_port_stats,
+ priority_xon_tx)},
+ {"xoff_packets", offsetof(struct i40e_hw_port_stats,
+ priority_xoff_tx)},
+ {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
+ priority_xon_2_xoff)},
};
+#define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
+ sizeof(rte_i40e_txq_prio_strings[0]))
+
static struct eth_driver rte_i40e_pmd = {
- {
+ .pci_drv = {
.name = "rte_i40e_pmd",
.id_table = pci_id_i40e_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_DETACHABLE,
},
.eth_dev_init = eth_i40e_dev_init,
+ .eth_dev_uninit = eth_i40e_dev_uninit,
.dev_private_size = sizeof(struct i40e_adapter),
};
static inline int
-i40e_align_floor(int n)
-{
- if (n == 0)
- return 0;
- return (1 << (sizeof(n) * CHAR_BIT - 1 - __builtin_clz(n)));
-}
-
-static inline int
rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
struct rte_eth_link *link)
{
@@ -352,95 +688,32 @@ static inline void i40e_flex_payload_reg_init(struct i40e_hw *hw)
I40E_WRITE_REG(hw, I40E_GLQF_PIT(17), 0x00007440);
}
-#define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32))
-#define I40E_GLQF_FD_MSK(_i, _j) (0x00267200 + ((_i) * 4 + (_j) * 8))
-
-void dump_regs(struct i40e_hw *hw)
-{
- int reg_nums[] = {31, 33, 34, 35, 41, 43};
- int i;
- uint32_t reg;
-
- for (i =0; i < sizeof (reg_nums)/sizeof(int); i++) {
- reg = I40E_READ_REG(hw,I40E_PRTQF_FD_INSET(reg_nums[i], 0));
- printf("I40E_PRTQF_FD_INSET(%d, 0): 0x%08x\n", reg_nums[i], reg);
- reg = I40E_READ_REG(hw,I40E_PRTQF_FD_INSET(reg_nums[i], 1));
- printf("I40E_PRTQF_FD_INSET(%d, 1): 0x%08x\n", reg_nums[i], reg);
- }
-}
+#define I40E_FLOW_CONTROL_ETHERTYPE 0x8808
-static inline void i40e_fillter_fields_reg_init(struct i40e_hw *hw)
+/*
+ * Add a ethertype filter to drop all flow control frames transmitted
+ * from VSIs.
+*/
+static void
+i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
{
- uint32_t reg;
-
-
- reg = I40E_READ_REG(hw,I40E_GLQF_ORT(12));
- //printf("GLQF_ORT(12) = 0x%08x\n", reg);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(12), 0x00000062);
-
- reg = I40E_READ_REG(hw,I40E_GLQF_PIT(2));
- //printf("I40E_GLQF_PIT(2) = 0x%08x\n", reg);
- I40E_WRITE_REG(hw, I40E_GLQF_PIT(2), 0x000024A0);
-
- reg = I40E_READ_REG(hw,I40E_PRTQF_FD_INSET(31, 0));
- //printf("I40E_PRTQF_FD_INSET(31, 0) = 0x%08x\n", reg);
- I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(31, 0), 0);
- reg = I40E_READ_REG(hw,I40E_PRTQF_FD_INSET(31, 1));
- //printf("I40E_PRTQF_FD_INSET(31, 1) = 0x%08x\n", reg);
- I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(31, 1), 0x00040000);
-
- reg = I40E_READ_REG(hw,I40E_PRTQF_FD_INSET(33, 0));
- //printf("I40E_PRTQF_FD_INSET(33, 0) = 0x%08x\n", reg);
- I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(33, 0), 0);
- reg = I40E_READ_REG(hw,I40E_PRTQF_FD_INSET(33, 1));
- //printf("I40E_PRTQF_FD_INSET(33, 1) = 0x%08x\n", reg);
- I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(33, 1), 0x00040000);
-
- reg = I40E_READ_REG(hw,I40E_PRTQF_FD_INSET(41, 0));
- //printf("I40E_PRTQF_FD_INSET(41, 0) = 0x%08x\n", reg);
- I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(41, 0), 0);
- reg = I40E_READ_REG(hw,I40E_PRTQF_FD_INSET(41, 1));
- //printf("I40E_PRTQF_FD_INSET(41, 1) = 0x%08x\n", reg);
- I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(41, 1), 0x00080000);
-
- reg = I40E_READ_REG(hw,I40E_PRTQF_FD_INSET(43, 0));
- //printf("I40E_PRTQF_FD_INSET(43, 0) = 0x%08x\n", reg);
- I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(43, 0), 0);
- reg = I40E_READ_REG(hw,I40E_PRTQF_FD_INSET(43, 1));
- //printf("I40E_PRTQF_FD_INSET(43, 1) = 0x%08x\n", reg);
- I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(43, 1), 0x00080000);
-
- reg = I40E_READ_REG(hw,I40E_PRTQF_FD_INSET(34, 0));
- //printf("I40E_PRTQF_FD_INSET(34, 0) = 0x%08x\n", reg);
- I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(34, 0), 0);
- reg = I40E_READ_REG(hw,I40E_PRTQF_FD_INSET(34, 1));
- //printf("I40E_PRTQF_FD_INSET(34, 1) = 0x%08x\n", reg);
- I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(34, 1), 0x00040000);
-
- // filter IP according to ttl and L4 protocol
- I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(35, 0), 0);
- I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(35, 1), 0x00040000);
-
- reg = I40E_READ_REG(hw,I40E_PRTQF_FD_INSET(44, 0));
- //printf("I40E_PRTQF_FD_INSET(44, 0) = 0x%08x\n", reg);
- I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(44, 0), 0);
- reg = I40E_READ_REG(hw,I40E_PRTQF_FD_INSET(44, 1));
- //printf("I40E_PRTQF_FD_INSET(44, 1) = 0x%08x\n", reg);
- I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(44, 1), 0x00080000);
-
- reg = I40E_READ_REG(hw,I40E_GLQF_FD_MSK(0, 34));
- //printf("I40E_GLQF_FD_MSK(0, 34) = 0x%08x\n", reg);
- I40E_WRITE_REG(hw, I40E_GLQF_FD_MSK(0, 34), 0x000DFF00);
- reg = I40E_READ_REG(hw,I40E_GLQF_FD_MSK(0, 44));
- //printf("I40E_GLQF_FD_MSK(0, 44) = 0x%08x\n", reg);
- I40E_WRITE_REG(hw, I40E_GLQF_FD_MSK(0,44), 0x000C00FF);
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
+ I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
+ I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
+ int ret;
- I40E_WRITE_FLUSH(hw);
+ ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
+ I40E_FLOW_CONTROL_ETHERTYPE, flags,
+ pf->main_vsi_seid, 0,
+ TRUE, NULL, NULL);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to add filter to drop flow control "
+ " frames from VSIs.");
}
static int
-eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
- struct rte_eth_dev *dev)
+eth_i40e_dev_init(struct rte_eth_dev *dev)
{
struct rte_pci_device *pci_dev;
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
@@ -460,11 +733,14 @@ eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
* has already done this work. Only check we don't need a different
* RX function */
if (rte_eal_process_type() != RTE_PROC_PRIMARY){
- if (dev->data->scattered_rx)
- dev->rx_pkt_burst = i40e_recv_scattered_pkts;
+ i40e_set_rx_function(dev);
+ i40e_set_tx_function(dev);
return 0;
}
pci_dev = dev->pci_dev;
+
+ rte_eth_copy_pci_info(dev, pci_dev);
+
pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
pf->adapter->eth_dev = dev;
pf->dev_data = dev->data;
@@ -483,10 +759,14 @@ eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
hw->bus.device = pci_dev->addr.devid;
hw->bus.func = pci_dev->addr.function;
+ hw->adapter_stopped = 0;
/* Make sure all is clean before doing PF reset */
i40e_clear_hw(hw);
+ /* Initialize the hardware */
+ i40e_hw_init(hw);
+
/* Reset here to make sure all is clean for each PF */
ret = i40e_pf_reset(hw);
if (ret) {
@@ -506,8 +786,7 @@ eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
* for flexible payload by software.
* It should be removed once issues are fixed in NVM.
*/
- //i40e_flex_payload_reg_init(hw);
- i40e_fillter_fields_reg_init(hw);
+ i40e_flex_payload_reg_init(hw);
/* Initialize the parameters for adminq */
i40e_init_adminq_parameter(hw);
@@ -523,11 +802,6 @@ eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
((hw->nvm.version >> 4) & 0xff),
(hw->nvm.version & 0xf), hw->nvm.eetrack);
- /* Disable LLDP */
- ret = i40e_aq_stop_lldp(hw, true, NULL);
- if (ret != I40E_SUCCESS) /* Its failure can be ignored */
- PMD_INIT_LOG(INFO, "Failed to stop lldp");
-
/* Clear PXE mode */
i40e_clear_pxe_mode(hw);
@@ -633,11 +907,32 @@ eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
i40e_dev_interrupt_handler, (void *)dev);
/* configure and enable device interrupt */
- i40e_pf_config_irq0(hw);
+ i40e_pf_config_irq0(hw, TRUE);
i40e_pf_enable_irq0(hw);
/* enable uio intr after callback register */
rte_intr_enable(&(pci_dev->intr_handle));
+ /*
+ * Add an ethertype filter to drop all flow control frames transmitted
+ * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
+ * frames to wire.
+ */
+ i40e_add_tx_flow_control_drop_filter(pf);
+
+ /* Set the max frame size to 0x2600 by default,
+ * in case other drivers changed the default value.
+ */
+ i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, 0, NULL);
+
+ /* initialize mirror rule list */
+ TAILQ_INIT(&pf->mirror_list);
+
+ /* Init dcb to sw mode by default */
+ ret = i40e_dcb_init_configure(dev, TRUE);
+ if (ret != I40E_SUCCESS) {
+ PMD_INIT_LOG(INFO, "Failed to init dcb.");
+ pf->flags &= ~I40E_FLAG_DCB;
+ }
return 0;
@@ -660,11 +955,80 @@ err_get_capabilities:
}
static int
+eth_i40e_dev_uninit(struct rte_eth_dev *dev)
+{
+ struct rte_pci_device *pci_dev;
+ struct i40e_hw *hw;
+ struct i40e_filter_control_settings settings;
+ int ret;
+ uint8_t aq_fail = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ pci_dev = dev->pci_dev;
+
+ if (hw->adapter_stopped == 0)
+ i40e_dev_close(dev);
+
+ dev->dev_ops = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+
+ /* Disable LLDP */
+ ret = i40e_aq_stop_lldp(hw, true, NULL);
+ if (ret != I40E_SUCCESS) /* Its failure can be ignored */
+ PMD_INIT_LOG(INFO, "Failed to stop lldp");
+
+ /* Clear PXE mode */
+ i40e_clear_pxe_mode(hw);
+
+ /* Unconfigure filter control */
+ memset(&settings, 0, sizeof(settings));
+ ret = i40e_set_filter_control(hw, &settings);
+ if (ret)
+ PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
+ ret);
+
+ /* Disable flow control */
+ hw->fc.requested_mode = I40E_FC_NONE;
+ i40e_set_fc(hw, &aq_fail, TRUE);
+
+ /* uninitialize pf host driver */
+ i40e_pf_host_uninit(dev);
+
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+
+ /* disable uio intr before callback unregister */
+ rte_intr_disable(&(pci_dev->intr_handle));
+
+ /* register callback func to eal lib */
+ rte_intr_callback_unregister(&(pci_dev->intr_handle),
+ i40e_dev_interrupt_handler, (void *)dev);
+
+ return 0;
+}
+
+static int
i40e_dev_configure(struct rte_eth_dev *dev)
{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
- int ret;
+ int i, ret;
+
+ /* Initialize to TRUE. If any of Rx queues doesn't meet the
+ * bulk allocation or vector Rx preconditions we will reset it.
+ */
+ ad->rx_bulk_alloc_allowed = true;
+ ad->rx_vec_allowed = true;
+ ad->tx_simple_allowed = true;
+ ad->tx_vec_allowed = true;
if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
ret = i40e_fdir_setup(pf);
@@ -701,8 +1065,27 @@ i40e_dev_configure(struct rte_eth_dev *dev)
if (ret)
goto err;
}
+
+ if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
+ ret = i40e_dcb_setup(dev);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "failed to configure DCB.");
+ goto err_dcb;
+ }
+ }
+
return 0;
+
+err_dcb:
+ /* need to release vmdq resource if exists */
+ for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
+ i40e_vsi_release(pf->vmdq[i].vsi);
+ pf->vmdq[i].vsi = NULL;
+ }
+ rte_free(pf->vmdq);
+ pf->vmdq = NULL;
err:
+ /* need to release fdir resource if exists */
i40e_fdir_teardown(pf);
return ret;
}
@@ -710,6 +1093,8 @@ err:
void
i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
{
+ struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t msix_vect = vsi->msix_intr;
uint16_t i;
@@ -721,52 +1106,50 @@ i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
}
if (vsi->type != I40E_VSI_SRIOV) {
- I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1), 0);
- I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
- msix_vect - 1), 0);
+ if (!rte_intr_allow_others(intr_handle)) {
+ I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
+ I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
+ I40E_WRITE_REG(hw,
+ I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
+ 0);
+ } else {
+ I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
+ I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
+ I40E_WRITE_REG(hw,
+ I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
+ msix_vect - 1), 0);
+ }
} else {
uint32_t reg;
reg = (hw->func_caps.num_msix_vectors_vf - 1) *
vsi->user_param + (msix_vect - 1);
- I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), 0);
+ I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
+ I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
}
I40E_WRITE_FLUSH(hw);
}
-static inline uint16_t
-i40e_calc_itr_interval(int16_t interval)
-{
- if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX)
- interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
-
- /* Convert to hardware count, as writing each 1 represents 2 us */
- return (interval/2);
-}
-
-void
-i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
+static void
+__vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
+ int base_queue, int nb_queue)
{
+ int i;
uint32_t val;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
- uint16_t msix_vect = vsi->msix_intr;
- int i;
-
- for (i = 0; i < vsi->nb_qps; i++)
- I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
/* Bind all RX queues to allocated MSIX interrupt */
- for (i = 0; i < vsi->nb_qps; i++) {
+ for (i = 0; i < nb_queue; i++) {
val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
I40E_QINT_RQCTL_ITR_INDX_MASK |
- ((vsi->base_queue + i + 1) <<
- I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ ((base_queue + i + 1) <<
+ I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
(0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
I40E_QINT_RQCTL_CAUSE_ENA_MASK;
- if (i == vsi->nb_qps - 1)
+ if (i == nb_queue - 1)
val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
- I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), val);
+ I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
}
/* Write first RX queue to Link list register as the head element */
@@ -774,56 +1157,172 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
uint16_t interval =
i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
- I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
- (vsi->base_queue <<
- I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
- (0x0 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
-
- I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
- msix_vect - 1), interval);
-
-#ifndef I40E_GLINT_CTL
-#define I40E_GLINT_CTL 0x0003F800
-#define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK 0x4
-#endif
- /* Disable auto-mask on enabling of all none-zero interrupt */
- I40E_WRITE_REG(hw, I40E_GLINT_CTL,
- I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK);
+ if (msix_vect == I40E_MISC_VEC_ID) {
+ I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
+ (base_queue <<
+ I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
+ (0x0 <<
+ I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
+ I40E_WRITE_REG(hw,
+ I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
+ interval);
+ } else {
+ I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
+ (base_queue <<
+ I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
+ (0x0 <<
+ I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
+ I40E_WRITE_REG(hw,
+ I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
+ msix_vect - 1),
+ interval);
+ }
} else {
uint32_t reg;
- /* num_msix_vectors_vf needs to minus irq0 */
- reg = (hw->func_caps.num_msix_vectors_vf - 1) *
- vsi->user_param + (msix_vect - 1);
+ if (msix_vect == I40E_MISC_VEC_ID) {
+ I40E_WRITE_REG(hw,
+ I40E_VPINT_LNKLST0(vsi->user_param),
+ (base_queue <<
+ I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
+ (0x0 <<
+ I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
+ } else {
+ /* num_msix_vectors_vf needs to minus irq0 */
+ reg = (hw->func_caps.num_msix_vectors_vf - 1) *
+ vsi->user_param + (msix_vect - 1);
- I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), (vsi->base_queue <<
+ I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
+ (base_queue <<
I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
- (0x0 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
+ (0x0 <<
+ I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
+ }
}
I40E_WRITE_FLUSH(hw);
}
+void
+i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
+{
+ struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint16_t msix_vect = vsi->msix_intr;
+ uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
+ uint16_t queue_idx = 0;
+ int record = 0;
+ uint32_t val;
+ int i;
+
+ for (i = 0; i < vsi->nb_qps; i++) {
+ I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
+ I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
+ }
+
+ /* INTENA flag is not auto-cleared for interrupt */
+ val = I40E_READ_REG(hw, I40E_GLINT_CTL);
+ val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
+ I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK |
+ I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
+ I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
+
+ /* VF bind interrupt */
+ if (vsi->type == I40E_VSI_SRIOV) {
+ __vsi_queues_bind_intr(vsi, msix_vect,
+ vsi->base_queue, vsi->nb_qps);
+ return;
+ }
+
+ /* PF & VMDq bind interrupt */
+ if (rte_intr_dp_is_en(intr_handle)) {
+ if (vsi->type == I40E_VSI_MAIN) {
+ queue_idx = 0;
+ record = 1;
+ } else if (vsi->type == I40E_VSI_VMDQ2) {
+ struct i40e_vsi *main_vsi =
+ I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
+ queue_idx = vsi->base_queue - main_vsi->nb_qps;
+ record = 1;
+ }
+ }
+
+ for (i = 0; i < vsi->nb_used_qps; i++) {
+ if (nb_msix <= 1) {
+ if (!rte_intr_allow_others(intr_handle))
+ /* allow to share MISC_VEC_ID */
+ msix_vect = I40E_MISC_VEC_ID;
+
+ /* no enough msix_vect, map all to one */
+ __vsi_queues_bind_intr(vsi, msix_vect,
+ vsi->base_queue + i,
+ vsi->nb_used_qps - i);
+ for (; !!record && i < vsi->nb_used_qps; i++)
+ intr_handle->intr_vec[queue_idx + i] =
+ msix_vect;
+ break;
+ }
+ /* 1:1 queue/msix_vect mapping */
+ __vsi_queues_bind_intr(vsi, msix_vect,
+ vsi->base_queue + i, 1);
+ if (!!record)
+ intr_handle->intr_vec[queue_idx + i] = msix_vect;
+
+ msix_vect++;
+ nb_msix--;
+ }
+}
+
static void
i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
{
+ struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t interval = i40e_calc_itr_interval(\
- RTE_LIBRTE_I40E_ITR_INTERVAL);
-
- I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1),
- I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ RTE_LIBRTE_I40E_ITR_INTERVAL);
+ uint16_t msix_intr, i;
+
+ if (rte_intr_allow_others(intr_handle))
+ for (i = 0; i < vsi->nb_msix; i++) {
+ msix_intr = vsi->msix_intr + i;
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
+ I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
(0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
- (interval << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
+ (interval <<
+ I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
+ }
+ else
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+ (0 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) |
+ (interval <<
+ I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT));
+
+ I40E_WRITE_FLUSH(hw);
}
static void
i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
{
+ struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint16_t msix_intr, i;
+
+ if (rte_intr_allow_others(intr_handle))
+ for (i = 0; i < vsi->nb_msix; i++) {
+ msix_intr = vsi->msix_intr + i;
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
+ 0);
+ }
+ else
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
- I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1), 0);
+ I40E_WRITE_FLUSH(hw);
}
static inline uint8_t
@@ -853,54 +1352,15 @@ i40e_parse_link_speed(uint16_t eth_link_speed)
}
static int
-i40e_phy_conf_link(struct i40e_hw *hw, uint8_t abilities, uint8_t force_speed)
-{
- enum i40e_status_code status;
- struct i40e_aq_get_phy_abilities_resp phy_ab;
- struct i40e_aq_set_phy_config phy_conf;
- const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
- I40E_AQ_PHY_FLAG_PAUSE_RX |
- I40E_AQ_PHY_FLAG_LOW_POWER;
- const uint8_t advt = I40E_LINK_SPEED_40GB |
- I40E_LINK_SPEED_10GB |
- I40E_LINK_SPEED_1GB |
- I40E_LINK_SPEED_100MB;
- int ret = -ENOTSUP;
-
- status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
- NULL);
- if (status)
- return ret;
-
- memset(&phy_conf, 0, sizeof(phy_conf));
-
- /* bits 0-2 use the values from get_phy_abilities_resp */
- abilities &= ~mask;
- abilities |= phy_ab.abilities & mask;
-
- /* update ablities and speed */
- if (abilities & I40E_AQ_PHY_AN_ENABLED)
- phy_conf.link_speed = advt;
- else
- phy_conf.link_speed = force_speed;
-
- phy_conf.abilities = abilities;
-
- /* use get_phy_abilities_resp value for the rest */
- phy_conf.phy_type = phy_ab.phy_type;
- phy_conf.eee_capability = phy_ab.eee_capability;
- phy_conf.eeer = phy_ab.eeer_val;
- phy_conf.low_power_ctrl = phy_ab.d3_lpan;
-
- PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
- phy_ab.abilities, phy_ab.link_speed);
- PMD_DRV_LOG(DEBUG, "\tConfig: abilities %x, link_speed %x",
- phy_conf.abilities, phy_conf.link_speed);
-
- status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
- if (status)
- return ret;
-
+i40e_phy_conf_link(__rte_unused struct i40e_hw *hw,
+ __rte_unused uint8_t abilities,
+ __rte_unused uint8_t force_speed)
+{
+ /* Skip any phy config on both 10G and 40G interfaces, as a workaround
+ * for the link control limitation of that all link control should be
+ * handled by firmware. It should follow up if link control will be
+ * opened to software driver in future firmware versions.
+ */
return I40E_SUCCESS;
}
@@ -929,6 +1389,10 @@ i40e_dev_start(struct rte_eth_dev *dev)
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *main_vsi = pf->main_vsi;
int ret, i;
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ uint32_t intr_vector = 0;
+
+ hw->adapter_stopped = 0;
if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
(dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
@@ -938,6 +1402,28 @@ i40e_dev_start(struct rte_eth_dev *dev)
return -EINVAL;
}
+ rte_intr_disable(intr_handle);
+
+ if ((rte_intr_cap_multiple(intr_handle) ||
+ !RTE_ETH_DEV_SRIOV(dev).active) &&
+ dev->data->dev_conf.intr_conf.rxq != 0) {
+ intr_vector = dev->data->nb_rx_queues;
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -1;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int),
+ 0);
+ if (!intr_handle->intr_vec) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+ " intr_vec\n", dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+ }
+
/* Initialize VSI */
ret = i40e_dev_rxtx_init(pf);
if (ret != I40E_SUCCESS) {
@@ -946,11 +1432,14 @@ i40e_dev_start(struct rte_eth_dev *dev)
}
/* Map queues with MSIX interrupt */
+ main_vsi->nb_used_qps = dev->data->nb_rx_queues -
+ pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
i40e_vsi_queues_bind_intr(main_vsi);
i40e_vsi_enable_queues_intr(main_vsi);
/* Map VMDQ VSI queues with MSIX interrupt */
for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
+ pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi);
i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
}
@@ -987,6 +1476,22 @@ i40e_dev_start(struct rte_eth_dev *dev)
goto err_up;
}
+ if (!rte_intr_allow_others(intr_handle)) {
+ rte_intr_callback_unregister(intr_handle,
+ i40e_dev_interrupt_handler,
+ (void *)dev);
+ /* configure and enable device interrupt */
+ i40e_pf_config_irq0(hw, FALSE);
+ i40e_pf_enable_irq0(hw);
+
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ PMD_INIT_LOG(INFO, "lsc won't enable because of"
+ " no intr multiplex\n");
+ }
+
+ /* enable uio intr after callback register */
+ rte_intr_enable(intr_handle);
+
return I40E_SUCCESS;
err_up:
@@ -1001,6 +1506,8 @@ i40e_dev_stop(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_vsi *main_vsi = pf->main_vsi;
+ struct i40e_mirror_rule *p_mirror;
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
int i;
/* Disable all queues */
@@ -1016,8 +1523,8 @@ i40e_dev_stop(struct rte_eth_dev *dev)
}
if (pf->fdir.fdir_vsi) {
- i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
- i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
+ i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi);
+ i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
}
/* Clear all queues and release memory */
i40e_dev_clear_queues(dev);
@@ -1025,6 +1532,25 @@ i40e_dev_stop(struct rte_eth_dev *dev)
/* Set link down */
i40e_dev_set_link_down(dev);
+ /* Remove all mirror rules */
+ while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
+ TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
+ rte_free(p_mirror);
+ }
+ pf->nb_mirror_rule = 0;
+
+ if (!rte_intr_allow_others(intr_handle))
+ /* resume to the default handler */
+ rte_intr_callback_register(intr_handle,
+ i40e_dev_interrupt_handler,
+ (void *)dev);
+
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
}
static void
@@ -1033,10 +1559,13 @@ i40e_dev_close(struct rte_eth_dev *dev)
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t reg;
+ int i;
PMD_INIT_FUNC_TRACE();
i40e_dev_stop(dev);
+ hw->adapter_stopped = 1;
+ i40e_dev_free_queues(dev);
/* Disable interrupt */
i40e_pf_disable_irq0(hw);
@@ -1049,6 +1578,14 @@ i40e_dev_close(struct rte_eth_dev *dev)
i40e_fdir_teardown(pf);
i40e_vsi_release(pf->main_vsi);
+ for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
+ i40e_vsi_release(pf->vmdq[i].vsi);
+ pf->vmdq[i].vsi = NULL;
+ }
+
+ rte_free(pf->vmdq);
+ pf->vmdq = NULL;
+
/* shutdown the adminq */
i40e_aq_queue_shutdown(hw, true);
i40e_shutdown_adminq(hw);
@@ -1157,28 +1694,37 @@ i40e_dev_set_link_down(__rte_unused struct rte_eth_dev *dev)
int
i40e_dev_link_update(struct rte_eth_dev *dev,
- __rte_unused int wait_to_complete)
+ int wait_to_complete)
{
+#define CHECK_INTERVAL 100 /* 100ms */
+#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_link_status link_status;
struct rte_eth_link link, old;
int status;
+ unsigned rep_cnt = MAX_REPEAT_TIME;
memset(&link, 0, sizeof(link));
memset(&old, 0, sizeof(old));
memset(&link_status, 0, sizeof(link_status));
rte_i40e_dev_atomic_read_link_status(dev, &old);
- /* Get link status information from hardware */
- status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
- if (status != I40E_SUCCESS) {
- link.link_speed = ETH_LINK_SPEED_100;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
- PMD_DRV_LOG(ERR, "Failed to get link info");
- goto out;
- }
+ do {
+ /* Get link status information from hardware */
+ status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
+ if (status != I40E_SUCCESS) {
+ link.link_speed = ETH_LINK_SPEED_100;
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ PMD_DRV_LOG(ERR, "Failed to get link info");
+ goto out;
+ }
+
+ link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
+ if (!wait_to_complete)
+ break;
- link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
+ rte_delay_ms(CHECK_INTERVAL);
+ } while (!link.link_status && rep_cnt--);
if (!link.link_status)
goto out;
@@ -1263,30 +1809,27 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi)
PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
vsi->vsi_id);
- PMD_DRV_LOG(DEBUG, "rx_bytes: %lu", nes->rx_bytes);
- PMD_DRV_LOG(DEBUG, "rx_unicast: %lu", nes->rx_unicast);
- PMD_DRV_LOG(DEBUG, "rx_multicast: %lu", nes->rx_multicast);
- PMD_DRV_LOG(DEBUG, "rx_broadcast: %lu", nes->rx_broadcast);
- PMD_DRV_LOG(DEBUG, "rx_discards: %lu", nes->rx_discards);
- PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %lu",
+ PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", nes->rx_bytes);
+ PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", nes->rx_unicast);
+ PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", nes->rx_multicast);
+ PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", nes->rx_broadcast);
+ PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", nes->rx_discards);
+ PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
nes->rx_unknown_protocol);
- PMD_DRV_LOG(DEBUG, "tx_bytes: %lu", nes->tx_bytes);
- PMD_DRV_LOG(DEBUG, "tx_unicast: %lu", nes->tx_unicast);
- PMD_DRV_LOG(DEBUG, "tx_multicast: %lu", nes->tx_multicast);
- PMD_DRV_LOG(DEBUG, "tx_broadcast: %lu", nes->tx_broadcast);
- PMD_DRV_LOG(DEBUG, "tx_discards: %lu", nes->tx_discards);
- PMD_DRV_LOG(DEBUG, "tx_errors: %lu", nes->tx_errors);
+ PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", nes->tx_bytes);
+ PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", nes->tx_unicast);
+ PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", nes->tx_multicast);
+ PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", nes->tx_broadcast);
+ PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", nes->tx_discards);
+ PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", nes->tx_errors);
PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
vsi->vsi_id);
}
-/* Get all statistics of a port */
static void
-i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
{
- uint32_t i;
- struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ unsigned int i;
struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
@@ -1307,6 +1850,12 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
I40E_GLPRT_BPRCL(hw->port),
pf->offset_loaded, &os->eth.rx_broadcast,
&ns->eth.rx_broadcast);
+ /* Workaround: CRC size should not be included in byte statistics,
+ * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
+ */
+ ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
+ ns->eth.rx_broadcast) * ETHER_CRC_LEN;
+
i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
pf->offset_loaded, &os->eth.rx_discards,
&ns->eth.rx_discards);
@@ -1332,9 +1881,8 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
I40E_GLPRT_BPTCL(hw->port),
pf->offset_loaded, &os->eth.tx_broadcast,
&ns->eth.tx_broadcast);
- i40e_stat_update_32(hw, I40E_GLPRT_TDPC(hw->port),
- pf->offset_loaded, &os->eth.tx_discards,
- &ns->eth.tx_discards);
+ ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
+ ns->eth.tx_broadcast) * ETHER_CRC_LEN;
/* GLPRT_TEPC not supported */
/* additional port specific stats */
@@ -1471,92 +2019,109 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
if (pf->main_vsi)
i40e_update_vsi_stats(pf->main_vsi);
+}
- stats->ipackets = ns->eth.rx_unicast + ns->eth.rx_multicast +
- ns->eth.rx_broadcast;
- stats->opackets = ns->eth.tx_unicast + ns->eth.tx_multicast +
- ns->eth.tx_broadcast;
+/* Get all statistics of a port */
+static void
+i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
+ unsigned i;
+
+ /* call read registers - updates values, now write them to struct */
+ i40e_read_stats_registers(pf, hw);
+
+ stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
+ pf->main_vsi->eth_stats.rx_multicast +
+ pf->main_vsi->eth_stats.rx_broadcast -
+ pf->main_vsi->eth_stats.rx_discards;
+ stats->opackets = pf->main_vsi->eth_stats.tx_unicast +
+ pf->main_vsi->eth_stats.tx_multicast +
+ pf->main_vsi->eth_stats.tx_broadcast;
stats->ibytes = ns->eth.rx_bytes;
stats->obytes = ns->eth.tx_bytes;
- stats->oerrors = ns->eth.tx_errors;
- stats->imcasts = ns->eth.rx_multicast;
- stats->fdirmatch = ns->fd_sb_match;
+ stats->oerrors = ns->eth.tx_errors +
+ pf->main_vsi->eth_stats.tx_errors;
+ stats->imcasts = pf->main_vsi->eth_stats.rx_multicast;
/* Rx Errors */
- stats->ibadcrc = ns->crc_errors;
- stats->ibadlen = ns->rx_length_errors + ns->rx_undersize +
- ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
- stats->imissed = ns->eth.rx_discards;
- stats->ierrors = stats->ibadcrc + stats->ibadlen + stats->imissed;
+ stats->imissed = ns->eth.rx_discards +
+ pf->main_vsi->eth_stats.rx_discards;
+ stats->ierrors = ns->crc_errors +
+ ns->rx_length_errors + ns->rx_undersize +
+ ns->rx_oversize + ns->rx_fragments + ns->rx_jabber +
+ stats->imissed;
PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
- PMD_DRV_LOG(DEBUG, "rx_bytes: %lu", ns->eth.rx_bytes);
- PMD_DRV_LOG(DEBUG, "rx_unicast: %lu", ns->eth.rx_unicast);
- PMD_DRV_LOG(DEBUG, "rx_multicast: %lu", ns->eth.rx_multicast);
- PMD_DRV_LOG(DEBUG, "rx_broadcast: %lu", ns->eth.rx_broadcast);
- PMD_DRV_LOG(DEBUG, "rx_discards: %lu", ns->eth.rx_discards);
- PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %lu",
+ PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", ns->eth.rx_bytes);
+ PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast);
+ PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", ns->eth.rx_multicast);
+ PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", ns->eth.rx_broadcast);
+ PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", ns->eth.rx_discards);
+ PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
ns->eth.rx_unknown_protocol);
- PMD_DRV_LOG(DEBUG, "tx_bytes: %lu", ns->eth.tx_bytes);
- PMD_DRV_LOG(DEBUG, "tx_unicast: %lu", ns->eth.tx_unicast);
- PMD_DRV_LOG(DEBUG, "tx_multicast: %lu", ns->eth.tx_multicast);
- PMD_DRV_LOG(DEBUG, "tx_broadcast: %lu", ns->eth.tx_broadcast);
- PMD_DRV_LOG(DEBUG, "tx_discards: %lu", ns->eth.tx_discards);
- PMD_DRV_LOG(DEBUG, "tx_errors: %lu", ns->eth.tx_errors);
-
- PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %lu",
+ PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", ns->eth.tx_bytes);
+ PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast);
+ PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", ns->eth.tx_multicast);
+ PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", ns->eth.tx_broadcast);
+ PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", ns->eth.tx_discards);
+ PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", ns->eth.tx_errors);
+
+ PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %"PRIu64"",
ns->tx_dropped_link_down);
- PMD_DRV_LOG(DEBUG, "crc_errors: %lu", ns->crc_errors);
- PMD_DRV_LOG(DEBUG, "illegal_bytes: %lu",
+ PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors);
+ PMD_DRV_LOG(DEBUG, "illegal_bytes: %"PRIu64"",
ns->illegal_bytes);
- PMD_DRV_LOG(DEBUG, "error_bytes: %lu", ns->error_bytes);
- PMD_DRV_LOG(DEBUG, "mac_local_faults: %lu",
+ PMD_DRV_LOG(DEBUG, "error_bytes: %"PRIu64"", ns->error_bytes);
+ PMD_DRV_LOG(DEBUG, "mac_local_faults: %"PRIu64"",
ns->mac_local_faults);
- PMD_DRV_LOG(DEBUG, "mac_remote_faults: %lu",
+ PMD_DRV_LOG(DEBUG, "mac_remote_faults: %"PRIu64"",
ns->mac_remote_faults);
- PMD_DRV_LOG(DEBUG, "rx_length_errors: %lu",
+ PMD_DRV_LOG(DEBUG, "rx_length_errors: %"PRIu64"",
ns->rx_length_errors);
- PMD_DRV_LOG(DEBUG, "link_xon_rx: %lu", ns->link_xon_rx);
- PMD_DRV_LOG(DEBUG, "link_xoff_rx: %lu", ns->link_xoff_rx);
+ PMD_DRV_LOG(DEBUG, "link_xon_rx: %"PRIu64"", ns->link_xon_rx);
+ PMD_DRV_LOG(DEBUG, "link_xoff_rx: %"PRIu64"", ns->link_xoff_rx);
for (i = 0; i < 8; i++) {
- PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]: %lu",
+ PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]: %"PRIu64"",
i, ns->priority_xon_rx[i]);
- PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]: %lu",
+ PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]: %"PRIu64"",
i, ns->priority_xoff_rx[i]);
}
- PMD_DRV_LOG(DEBUG, "link_xon_tx: %lu", ns->link_xon_tx);
- PMD_DRV_LOG(DEBUG, "link_xoff_tx: %lu", ns->link_xoff_tx);
+ PMD_DRV_LOG(DEBUG, "link_xon_tx: %"PRIu64"", ns->link_xon_tx);
+ PMD_DRV_LOG(DEBUG, "link_xoff_tx: %"PRIu64"", ns->link_xoff_tx);
for (i = 0; i < 8; i++) {
- PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]: %lu",
+ PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]: %"PRIu64"",
i, ns->priority_xon_tx[i]);
- PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]: %lu",
+ PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]: %"PRIu64"",
i, ns->priority_xoff_tx[i]);
- PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]: %lu",
+ PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]: %"PRIu64"",
i, ns->priority_xon_2_xoff[i]);
}
- PMD_DRV_LOG(DEBUG, "rx_size_64: %lu", ns->rx_size_64);
- PMD_DRV_LOG(DEBUG, "rx_size_127: %lu", ns->rx_size_127);
- PMD_DRV_LOG(DEBUG, "rx_size_255: %lu", ns->rx_size_255);
- PMD_DRV_LOG(DEBUG, "rx_size_511: %lu", ns->rx_size_511);
- PMD_DRV_LOG(DEBUG, "rx_size_1023: %lu", ns->rx_size_1023);
- PMD_DRV_LOG(DEBUG, "rx_size_1522: %lu", ns->rx_size_1522);
- PMD_DRV_LOG(DEBUG, "rx_size_big: %lu", ns->rx_size_big);
- PMD_DRV_LOG(DEBUG, "rx_undersize: %lu", ns->rx_undersize);
- PMD_DRV_LOG(DEBUG, "rx_fragments: %lu", ns->rx_fragments);
- PMD_DRV_LOG(DEBUG, "rx_oversize: %lu", ns->rx_oversize);
- PMD_DRV_LOG(DEBUG, "rx_jabber: %lu", ns->rx_jabber);
- PMD_DRV_LOG(DEBUG, "tx_size_64: %lu", ns->tx_size_64);
- PMD_DRV_LOG(DEBUG, "tx_size_127: %lu", ns->tx_size_127);
- PMD_DRV_LOG(DEBUG, "tx_size_255: %lu", ns->tx_size_255);
- PMD_DRV_LOG(DEBUG, "tx_size_511: %lu", ns->tx_size_511);
- PMD_DRV_LOG(DEBUG, "tx_size_1023: %lu", ns->tx_size_1023);
- PMD_DRV_LOG(DEBUG, "tx_size_1522: %lu", ns->tx_size_1522);
- PMD_DRV_LOG(DEBUG, "tx_size_big: %lu", ns->tx_size_big);
- PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %lu",
+ PMD_DRV_LOG(DEBUG, "rx_size_64: %"PRIu64"", ns->rx_size_64);
+ PMD_DRV_LOG(DEBUG, "rx_size_127: %"PRIu64"", ns->rx_size_127);
+ PMD_DRV_LOG(DEBUG, "rx_size_255: %"PRIu64"", ns->rx_size_255);
+ PMD_DRV_LOG(DEBUG, "rx_size_511: %"PRIu64"", ns->rx_size_511);
+ PMD_DRV_LOG(DEBUG, "rx_size_1023: %"PRIu64"", ns->rx_size_1023);
+ PMD_DRV_LOG(DEBUG, "rx_size_1522: %"PRIu64"", ns->rx_size_1522);
+ PMD_DRV_LOG(DEBUG, "rx_size_big: %"PRIu64"", ns->rx_size_big);
+ PMD_DRV_LOG(DEBUG, "rx_undersize: %"PRIu64"", ns->rx_undersize);
+ PMD_DRV_LOG(DEBUG, "rx_fragments: %"PRIu64"", ns->rx_fragments);
+ PMD_DRV_LOG(DEBUG, "rx_oversize: %"PRIu64"", ns->rx_oversize);
+ PMD_DRV_LOG(DEBUG, "rx_jabber: %"PRIu64"", ns->rx_jabber);
+ PMD_DRV_LOG(DEBUG, "tx_size_64: %"PRIu64"", ns->tx_size_64);
+ PMD_DRV_LOG(DEBUG, "tx_size_127: %"PRIu64"", ns->tx_size_127);
+ PMD_DRV_LOG(DEBUG, "tx_size_255: %"PRIu64"", ns->tx_size_255);
+ PMD_DRV_LOG(DEBUG, "tx_size_511: %"PRIu64"", ns->tx_size_511);
+ PMD_DRV_LOG(DEBUG, "tx_size_1023: %"PRIu64"", ns->tx_size_1023);
+ PMD_DRV_LOG(DEBUG, "tx_size_1522: %"PRIu64"", ns->tx_size_1522);
+ PMD_DRV_LOG(DEBUG, "tx_size_big: %"PRIu64"", ns->tx_size_big);
+ PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
ns->mac_short_packet_dropped);
- PMD_DRV_LOG(DEBUG, "checksum_error: %lu",
+ PMD_DRV_LOG(DEBUG, "checksum_error: %"PRIu64"",
ns->checksum_error);
- PMD_DRV_LOG(DEBUG, "fdir_match: %lu", ns->fd_sb_match);
+ PMD_DRV_LOG(DEBUG, "fdir_match: %"PRIu64"", ns->fd_sb_match);
PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
}
@@ -1565,9 +2130,92 @@ static void
i40e_dev_stats_reset(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- /* It results in reloading the start point of each counter */
+ /* Mark PF and VSI stats to update the offset, aka "reset" */
pf->offset_loaded = false;
+ if (pf->main_vsi)
+ pf->main_vsi->offset_loaded = false;
+
+ /* read the stats, reading current register values into offset */
+ i40e_read_stats_registers(pf, hw);
+}
+
+static uint32_t
+i40e_xstats_calc_num(void)
+{
+ return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
+ (I40E_NB_RXQ_PRIO_XSTATS * 8) +
+ (I40E_NB_TXQ_PRIO_XSTATS * 8);
+}
+
+static int
+i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
+ unsigned n)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ unsigned i, count, prio;
+ struct i40e_hw_port_stats *hw_stats = &pf->stats;
+
+ count = i40e_xstats_calc_num();
+ if (n < count)
+ return count;
+
+ i40e_read_stats_registers(pf, hw);
+
+ if (xstats == NULL)
+ return 0;
+
+ count = 0;
+
+ /* Get stats from i40e_eth_stats struct */
+ for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
+ snprintf(xstats[count].name, sizeof(xstats[count].name),
+ "%s", rte_i40e_stats_strings[i].name);
+ xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
+ rte_i40e_stats_strings[i].offset);
+ count++;
+ }
+
+ /* Get individiual stats from i40e_hw_port struct */
+ for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
+ snprintf(xstats[count].name, sizeof(xstats[count].name),
+ "%s", rte_i40e_hw_port_strings[i].name);
+ xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
+ rte_i40e_hw_port_strings[i].offset);
+ count++;
+ }
+
+ for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
+ for (prio = 0; prio < 8; prio++) {
+ snprintf(xstats[count].name,
+ sizeof(xstats[count].name),
+ "rx_priority%u_%s", prio,
+ rte_i40e_rxq_prio_strings[i].name);
+ xstats[count].value =
+ *(uint64_t *)(((char *)hw_stats) +
+ rte_i40e_rxq_prio_strings[i].offset +
+ (sizeof(uint64_t) * prio));
+ count++;
+ }
+ }
+
+ for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
+ for (prio = 0; prio < 8; prio++) {
+ snprintf(xstats[count].name,
+ sizeof(xstats[count].name),
+ "tx_priority%u_%s", prio,
+ rte_i40e_txq_prio_strings[i].name);
+ xstats[count].value =
+ *(uint64_t *)(((char *)hw_stats) +
+ rte_i40e_txq_prio_strings[i].offset +
+ (sizeof(uint64_t) * prio));
+ count++;
+ }
+ }
+
+ return count;
}
static int
@@ -1595,16 +2243,23 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_vfs = dev->pci_dev->max_vfs;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_QINQ_STRIP |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_QINQ_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM;
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
+ dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t);
dev_info->reta_size = pf->hash_lut_size;
+ dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
@@ -1628,7 +2283,19 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
ETH_TXQ_FLAGS_NOOFFLOADS,
};
- if (pf->flags | I40E_FLAG_VMDQ) {
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = I40E_MAX_RING_DESC,
+ .nb_min = I40E_MIN_RING_DESC,
+ .nb_align = I40E_ALIGN_RING_DESC,
+ };
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = I40E_MAX_RING_DESC,
+ .nb_min = I40E_MIN_RING_DESC,
+ .nb_align = I40E_ALIGN_RING_DESC,
+ };
+
+ if (pf->flags & I40E_FLAG_VMDQ) {
dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
dev_info->vmdq_queue_base = dev_info->max_rx_queues;
dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
@@ -1736,12 +2403,148 @@ i40e_dev_led_off(struct rte_eth_dev *dev)
}
static int
-i40e_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
- __rte_unused struct rte_eth_fc_conf *fc_conf)
+i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ fc_conf->pause_time = pf->fc_conf.pause_time;
+ fc_conf->high_water = pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
+ fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
+
+ /* Return current mode according to actual setting*/
+ switch (hw->fc.current_mode) {
+ case I40E_FC_FULL:
+ fc_conf->mode = RTE_FC_FULL;
+ break;
+ case I40E_FC_TX_PAUSE:
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ break;
+ case I40E_FC_RX_PAUSE:
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ break;
+ case I40E_FC_NONE:
+ default:
+ fc_conf->mode = RTE_FC_NONE;
+ };
+
+ return 0;
+}
+
+static int
+i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ uint32_t mflcn_reg, fctrl_reg, reg;
+ uint32_t max_high_water;
+ uint8_t i, aq_failure;
+ int err;
+ struct i40e_hw *hw;
+ struct i40e_pf *pf;
+ enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
+ [RTE_FC_NONE] = I40E_FC_NONE,
+ [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
+ [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
+ [RTE_FC_FULL] = I40E_FC_FULL
+ };
+
+ /* high_water field in the rte_eth_fc_conf using the kilobytes unit */
+
+ max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
+ if ((fc_conf->high_water > max_high_water) ||
+ (fc_conf->high_water < fc_conf->low_water)) {
+ PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB, "
+ "High_water must <= %d.", max_high_water);
+ return -EINVAL;
+ }
+
+ hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
+
+ pf->fc_conf.pause_time = fc_conf->pause_time;
+ pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
+ pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
+
PMD_INIT_FUNC_TRACE();
- return -ENOSYS;
+ /* All the link flow control related enable/disable register
+ * configuration is handle by the F/W
+ */
+ err = i40e_set_fc(hw, &aq_failure, true);
+ if (err < 0)
+ return -ENOSYS;
+
+ if (i40e_is_40G_device(hw->device_id)) {
+ /* Configure flow control refresh threshold,
+ * the value for stat_tx_pause_refresh_timer[8]
+ * is used for global pause operation.
+ */
+
+ I40E_WRITE_REG(hw,
+ I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
+ pf->fc_conf.pause_time);
+
+ /* configure the timer value included in transmitted pause
+ * frame,
+ * the value for stat_tx_pause_quanta[8] is used for global
+ * pause operation
+ */
+ I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
+ pf->fc_conf.pause_time);
+
+ fctrl_reg = I40E_READ_REG(hw,
+ I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
+
+ if (fc_conf->mac_ctrl_frame_fwd != 0)
+ fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
+ else
+ fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
+
+ I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
+ fctrl_reg);
+ } else {
+ /* Configure pause time (2 TCs per register) */
+ reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
+ I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
+
+ /* Configure flow control refresh threshold value */
+ I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
+ pf->fc_conf.pause_time / 2);
+
+ mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
+
+ /* set or clear MFLCN.PMCF & MFLCN.DPF bits
+ *depending on configuration
+ */
+ if (fc_conf->mac_ctrl_frame_fwd != 0) {
+ mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
+ mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
+ } else {
+ mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
+ mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
+ }
+
+ I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
+ }
+
+ /* config the water marker both based on the packets and bytes */
+ I40E_WRITE_REG(hw, I40E_GLRPB_PHW,
+ (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
+ << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
+ I40E_WRITE_REG(hw, I40E_GLRPB_PLW,
+ (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
+ << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
+ I40E_WRITE_REG(hw, I40E_GLRPB_GHW,
+ pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
+ << I40E_KILOSHIFT);
+ I40E_WRITE_REG(hw, I40E_GLRPB_GLW,
+ pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
+ << I40E_KILOSHIFT);
+
+ I40E_WRITE_FLUSH(hw);
+
+ return 0;
}
static int
@@ -1948,16 +2751,72 @@ i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
}
static int
+i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
+{
+ struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ int ret;
+
+ if (!lut)
+ return -EINVAL;
+
+ if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+ ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, TRUE,
+ lut, lut_size);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
+ return ret;
+ }
+ } else {
+ uint32_t *lut_dw = (uint32_t *)lut;
+ uint16_t i, lut_size_dw = lut_size / 4;
+
+ for (i = 0; i < lut_size_dw; i++)
+ lut_dw[i] = I40E_READ_REG(hw, I40E_PFQF_HLUT(i));
+ }
+
+ return 0;
+}
+
+static int
+i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
+{
+ struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ int ret;
+
+ if (!vsi || !lut)
+ return -EINVAL;
+
+ if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+ ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, TRUE,
+ lut, lut_size);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
+ return ret;
+ }
+ } else {
+ uint32_t *lut_dw = (uint32_t *)lut;
+ uint16_t i, lut_size_dw = lut_size / 4;
+
+ for (i = 0; i < lut_size_dw; i++)
+ I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
+ I40E_WRITE_FLUSH(hw);
+ }
+
+ return 0;
+}
+
+static int
i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t lut, l;
- uint16_t i, j, lut_size = pf->hash_lut_size;
+ uint16_t i, lut_size = pf->hash_lut_size;
uint16_t idx, shift;
- uint8_t mask;
+ uint8_t *lut;
+ int ret;
if (reta_size != lut_size ||
reta_size > ETH_RSS_RETA_SIZE_512) {
@@ -1967,28 +2826,26 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
return -EINVAL;
}
- for (i = 0; i < reta_size; i += I40E_4_BIT_WIDTH) {
+ lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
+ if (!lut) {
+ PMD_DRV_LOG(ERR, "No memory can be allocated");
+ return -ENOMEM;
+ }
+ ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
+ if (ret)
+ goto out;
+ for (i = 0; i < reta_size; i++) {
idx = i / RTE_RETA_GROUP_SIZE;
shift = i % RTE_RETA_GROUP_SIZE;
- mask = (uint8_t)((reta_conf[idx].mask >> shift) &
- I40E_4_BIT_MASK);
- if (!mask)
- continue;
- if (mask == I40E_4_BIT_MASK)
- l = 0;
- else
- l = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
- for (j = 0, lut = 0; j < I40E_4_BIT_WIDTH; j++) {
- if (mask & (0x1 << j))
- lut |= reta_conf[idx].reta[shift + j] <<
- (CHAR_BIT * j);
- else
- lut |= l & (I40E_8_BIT_MASK << (CHAR_BIT * j));
- }
- I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
+ if (reta_conf[idx].mask & (1ULL << shift))
+ lut[i] = reta_conf[idx].reta[shift];
}
+ ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
- return 0;
+out:
+ rte_free(lut);
+
+ return ret;
}
static int
@@ -1997,11 +2854,10 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
uint16_t reta_size)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t lut;
- uint16_t i, j, lut_size = pf->hash_lut_size;
+ uint16_t i, lut_size = pf->hash_lut_size;
uint16_t idx, shift;
- uint8_t mask;
+ uint8_t *lut;
+ int ret;
if (reta_size != lut_size ||
reta_size > ETH_RSS_RETA_SIZE_512) {
@@ -2011,23 +2867,26 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
return -EINVAL;
}
- for (i = 0; i < reta_size; i += I40E_4_BIT_WIDTH) {
+ lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
+ if (!lut) {
+ PMD_DRV_LOG(ERR, "No memory can be allocated");
+ return -ENOMEM;
+ }
+
+ ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
+ if (ret)
+ goto out;
+ for (i = 0; i < reta_size; i++) {
idx = i / RTE_RETA_GROUP_SIZE;
shift = i % RTE_RETA_GROUP_SIZE;
- mask = (uint8_t)((reta_conf[idx].mask >> shift) &
- I40E_4_BIT_MASK);
- if (!mask)
- continue;
-
- lut = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
- for (j = 0; j < I40E_4_BIT_WIDTH; j++) {
- if (mask & (0x1 << j))
- reta_conf[idx].reta[shift + j] = ((lut >>
- (CHAR_BIT * j)) & I40E_8_BIT_MASK);
- }
+ if (reta_conf[idx].mask & (1ULL << shift))
+ reta_conf[idx].reta[shift] = lut[i];
}
- return 0;
+out:
+ rte_free(lut);
+
+ return ret;
}
/**
@@ -2043,32 +2902,24 @@ i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
u64 size,
u32 alignment)
{
- static uint64_t id = 0;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
if (!mem)
return I40E_ERR_PARAM;
- id++;
- snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, id);
-#ifdef RTE_LIBRTE_XEN_DOM0
- mz = rte_memzone_reserve_bounded(z_name, size, 0, 0, alignment,
- RTE_PGSIZE_2M);
-#else
- mz = rte_memzone_reserve_aligned(z_name, size, 0, 0, alignment);
-#endif
+ snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
+ mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
+ alignment, RTE_PGSIZE_2M);
if (!mz)
return I40E_ERR_NO_MEMORY;
- mem->id = id;
mem->size = size;
mem->va = mz->addr;
-#ifdef RTE_LIBRTE_XEN_DOM0
mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
-#else
- mem->pa = mz->phys_addr;
-#endif
+ mem->zone = (const void *)mz;
+ PMD_DRV_LOG(DEBUG, "memzone %s allocated with physical address: "
+ "%"PRIu64, mz->name, mem->pa);
return I40E_SUCCESS;
}
@@ -2082,9 +2933,14 @@ enum i40e_status_code
i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
struct i40e_dma_mem *mem)
{
- if (!mem || !mem->va)
+ if (!mem)
return I40E_ERR_PARAM;
+ PMD_DRV_LOG(DEBUG, "memzone %s to be freed with physical address: "
+ "%"PRIu64, ((const struct rte_memzone *)mem->zone)->name,
+ mem->pa);
+ rte_memzone_free((const struct rte_memzone *)mem->zone);
+ mem->zone = NULL;
mem->va = NULL;
mem->pa = (u64)0;
@@ -2193,113 +3049,116 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- uint16_t sum_queues = 0, sum_vsis, left_queues;
+ uint16_t qp_count = 0, vsi_count = 0;
- /* First check if FW support SRIOV */
if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
return -EINVAL;
}
+ /* Add the parameter init for LFC */
+ pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
+ pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
+ pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
- pf->max_num_vsi = RTE_MIN(hw->func_caps.num_vsis, I40E_MAX_NUM_VSIS);
- PMD_INIT_LOG(INFO, "Max supported VSIs:%u", pf->max_num_vsi);
- /* Allocate queues for pf */
- if (hw->func_caps.rss) {
- pf->flags |= I40E_FLAG_RSS;
- pf->lan_nb_qps = RTE_MIN(hw->func_caps.num_tx_qp,
- (uint32_t)(1 << hw->func_caps.rss_table_entry_width));
- pf->lan_nb_qps = i40e_align_floor(pf->lan_nb_qps);
- } else
+ pf->max_num_vsi = hw->func_caps.num_vsis;
+ pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
+ pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
+ pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
+
+ /* FDir queue/VSI allocation */
+ pf->fdir_qp_offset = 0;
+ if (hw->func_caps.fd) {
+ pf->flags |= I40E_FLAG_FDIR;
+ pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
+ } else {
+ pf->fdir_nb_qps = 0;
+ }
+ qp_count += pf->fdir_nb_qps;
+ vsi_count += 1;
+
+ /* LAN queue/VSI allocation */
+ pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
+ if (!hw->func_caps.rss) {
pf->lan_nb_qps = 1;
- sum_queues = pf->lan_nb_qps;
- /* Default VSI is not counted in */
- sum_vsis = 0;
- PMD_INIT_LOG(INFO, "PF queue pairs:%u", pf->lan_nb_qps);
+ } else {
+ pf->flags |= I40E_FLAG_RSS;
+ if (hw->mac.type == I40E_MAC_X722)
+ pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
+ pf->lan_nb_qps = pf->lan_nb_qp_max;
+ }
+ qp_count += pf->lan_nb_qps;
+ vsi_count += 1;
+ /* VF queue/VSI allocation */
+ pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
pf->flags |= I40E_FLAG_SRIOV;
pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
- if (dev->pci_dev->max_vfs > hw->func_caps.num_vfs) {
- PMD_INIT_LOG(ERR, "Config VF number %u, "
- "max supported %u.",
- dev->pci_dev->max_vfs,
- hw->func_caps.num_vfs);
- return -EINVAL;
- }
- if (pf->vf_nb_qps > I40E_MAX_QP_NUM_PER_VF) {
- PMD_INIT_LOG(ERR, "FVL VF queue %u, "
- "max support %u queues.",
- pf->vf_nb_qps, I40E_MAX_QP_NUM_PER_VF);
- return -EINVAL;
- }
pf->vf_num = dev->pci_dev->max_vfs;
- sum_queues += pf->vf_nb_qps * pf->vf_num;
- sum_vsis += pf->vf_num;
- PMD_INIT_LOG(INFO, "Max VF num:%u each has queue pairs:%u",
- pf->vf_num, pf->vf_nb_qps);
- } else
- pf->vf_num = 0;
-
- if (hw->func_caps.vmdq) {
- pf->flags |= I40E_FLAG_VMDQ;
- pf->vmdq_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
- pf->max_nb_vmdq_vsi = 1;
- /*
- * If VMDQ available, assume a single VSI can be created. Will adjust
- * later.
- */
- sum_queues += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
- sum_vsis += pf->max_nb_vmdq_vsi;
+ PMD_DRV_LOG(DEBUG, "%u VF VSIs, %u queues per VF VSI, "
+ "in total %u queues", pf->vf_num, pf->vf_nb_qps,
+ pf->vf_nb_qps * pf->vf_num);
} else {
- pf->vmdq_nb_qps = 0;
- pf->max_nb_vmdq_vsi = 0;
+ pf->vf_nb_qps = 0;
+ pf->vf_num = 0;
}
- pf->nb_cfg_vmdq_vsi = 0;
+ qp_count += pf->vf_nb_qps * pf->vf_num;
+ vsi_count += pf->vf_num;
- if (hw->func_caps.fd) {
- pf->flags |= I40E_FLAG_FDIR;
- pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
- /**
- * Each flow director consumes one VSI and one queue,
- * but can't calculate out predictably here.
- */
- }
+ /* VMDq queue/VSI allocation */
+ pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
+ pf->vmdq_nb_qps = 0;
+ pf->max_nb_vmdq_vsi = 0;
+ if (hw->func_caps.vmdq) {
+ if (qp_count < hw->func_caps.num_tx_qp &&
+ vsi_count < hw->func_caps.num_vsis) {
+ pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
+ qp_count) / pf->vmdq_nb_qp_max;
- if (sum_vsis > pf->max_num_vsi ||
- sum_queues > hw->func_caps.num_rx_qp) {
- PMD_INIT_LOG(ERR, "VSI/QUEUE setting can't be satisfied");
- PMD_INIT_LOG(ERR, "Max VSIs: %u, asked:%u",
- pf->max_num_vsi, sum_vsis);
- PMD_INIT_LOG(ERR, "Total queue pairs:%u, asked:%u",
- hw->func_caps.num_rx_qp, sum_queues);
- return -EINVAL;
+ /* Limit the maximum number of VMDq vsi to the maximum
+ * ethdev can support
+ */
+ pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
+ hw->func_caps.num_vsis - vsi_count);
+ pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
+ ETH_64_POOLS);
+ if (pf->max_nb_vmdq_vsi) {
+ pf->flags |= I40E_FLAG_VMDQ;
+ pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
+ PMD_DRV_LOG(DEBUG, "%u VMDQ VSIs, %u queues "
+ "per VMDQ VSI, in total %u queues",
+ pf->max_nb_vmdq_vsi,
+ pf->vmdq_nb_qps, pf->vmdq_nb_qps *
+ pf->max_nb_vmdq_vsi);
+ } else {
+ PMD_DRV_LOG(INFO, "No enough queues left for "
+ "VMDq");
+ }
+ } else {
+ PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
+ }
}
+ qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
+ vsi_count += pf->max_nb_vmdq_vsi;
- /* Adjust VMDQ setting to support as many VMs as possible */
- if (pf->flags & I40E_FLAG_VMDQ) {
- left_queues = hw->func_caps.num_rx_qp - sum_queues;
-
- pf->max_nb_vmdq_vsi += RTE_MIN(left_queues / pf->vmdq_nb_qps,
- pf->max_num_vsi - sum_vsis);
-
- /* Limit the max VMDQ number that rte_ether that can support */
- pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
- ETH_64_POOLS - 1);
+ if (hw->func_caps.dcb)
+ pf->flags |= I40E_FLAG_DCB;
- PMD_INIT_LOG(INFO, "Max VMDQ VSI num:%u",
- pf->max_nb_vmdq_vsi);
- PMD_INIT_LOG(INFO, "VMDQ queue pairs:%u", pf->vmdq_nb_qps);
+ if (qp_count > hw->func_caps.num_tx_qp) {
+ PMD_DRV_LOG(ERR, "Failed to allocate %u queues, which exceeds "
+ "the hardware maximum %u", qp_count,
+ hw->func_caps.num_tx_qp);
+ return -EINVAL;
}
-
- /* Each VSI occupy 1 MSIX interrupt at least, plus IRQ0 for misc intr
- * cause */
- if (sum_vsis > hw->func_caps.num_msix_vectors - 1) {
- PMD_INIT_LOG(ERR, "Too many VSIs(%u), MSIX intr(%u) not enough",
- sum_vsis, hw->func_caps.num_msix_vectors);
+ if (vsi_count > hw->func_caps.num_vsis) {
+ PMD_DRV_LOG(ERR, "Failed to allocate %u VSIs, which exceeds "
+ "the hardware maximum %u", vsi_count,
+ hw->func_caps.num_vsis);
return -EINVAL;
}
- return I40E_SUCCESS;
+
+ return 0;
}
static int
@@ -2566,7 +3425,7 @@ bitmap_is_subset(uint8_t src1, uint8_t src2)
return !((src1 ^ src2) & src2);
}
-static int
+static enum i40e_status_code
validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
{
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
@@ -2574,14 +3433,14 @@ validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
/* If DCB is not supported, only default TC is supported */
if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
- return -EINVAL;
+ return I40E_NOT_SUPPORTED;
}
if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
"HW support 0x%x", hw->func_caps.enabled_tcmap,
enabled_tcmap);
- return -EINVAL;
+ return I40E_NOT_SUPPORTED;
}
return I40E_SUCCESS;
}
@@ -2666,12 +3525,13 @@ i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
return I40E_SUCCESS;
}
-static int
+static enum i40e_status_code
i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
struct i40e_aqc_vsi_properties_data *info,
uint8_t enabled_tcmap)
{
- int ret, total_tc = 0, i;
+ enum i40e_status_code ret;
+ int i, total_tc = 0;
uint16_t qpnum_per_tc, bsf, qp_idx;
ret = validate_tcmap_parameter(vsi, enabled_tcmap);
@@ -2689,7 +3549,8 @@ i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
bsf = rte_bsf32(qpnum_per_tc);
/* Adjust the queue number to actual queues that can be applied */
- vsi->nb_qps = qpnum_per_tc * total_tc;
+ if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
+ vsi->nb_qps = qpnum_per_tc * total_tc;
/**
* Configure TC and queue mapping parameters, for enabled TC,
@@ -2719,7 +3580,7 @@ i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
}
- info->valid_sections =
+ info->valid_sections |=
rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
return I40E_SUCCESS;
@@ -2899,14 +3760,22 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi)
return i40e_vsi_add_mac(vsi, &filter);
}
-static int
-i40e_vsi_dump_bw_config(struct i40e_vsi *vsi)
+#define I40E_3_BIT_MASK 0x7
+/*
+ * i40e_vsi_get_bw_config - Query VSI BW Information
+ * @vsi: the VSI to be queried
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static enum i40e_status_code
+i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
{
struct i40e_aqc_query_vsi_bw_config_resp bw_config;
struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
struct i40e_hw *hw = &vsi->adapter->hw;
i40e_status ret;
int i;
+ uint32_t bw_max;
memset(&bw_config, 0, sizeof(bw_config));
ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
@@ -2925,20 +3794,32 @@ i40e_vsi_dump_bw_config(struct i40e_vsi *vsi)
return ret;
}
- /* Not store the info yet, just print out */
- PMD_DRV_LOG(INFO, "VSI bw limit:%u", bw_config.port_bw_limit);
- PMD_DRV_LOG(INFO, "VSI max_bw:%u", bw_config.max_bw);
+ /* store and print out BW info */
+ vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
+ vsi->bw_info.bw_max = bw_config.max_bw;
+ PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
+ PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
+ bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
+ (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
+ I40E_16_BIT_WIDTH);
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- PMD_DRV_LOG(INFO, "\tVSI TC%u:share credits %u", i,
- ets_sla_config.share_credits[i]);
- PMD_DRV_LOG(INFO, "\tVSI TC%u:credits %u", i,
- rte_le_to_cpu_16(ets_sla_config.credits[i]));
- PMD_DRV_LOG(INFO, "\tVSI TC%u: max credits: %u", i,
- rte_le_to_cpu_16(ets_sla_config.credits[i / 4]) >>
- (i * 4));
+ vsi->bw_info.bw_ets_share_credits[i] =
+ ets_sla_config.share_credits[i];
+ vsi->bw_info.bw_ets_credits[i] =
+ rte_le_to_cpu_16(ets_sla_config.credits[i]);
+ /* 4 bits per TC, 4th bit is reserved */
+ vsi->bw_info.bw_ets_max[i] =
+ (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
+ I40E_3_BIT_MASK);
+ PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
+ vsi->bw_info.bw_ets_share_credits[i]);
+ PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
+ vsi->bw_info.bw_ets_credits[i]);
+ PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
+ vsi->bw_info.bw_ets_max[i]);
}
- return 0;
+ return I40E_SUCCESS;
}
/* Setup a VSI */
@@ -3026,15 +3907,30 @@ i40e_vsi_setup(struct i40e_pf *pf,
vsi->base_queue = I40E_FDIR_QUEUE_ID;
/* VF has MSIX interrupt in VF range, don't allocate here */
- if (type != I40E_VSI_SRIOV) {
+ if (type == I40E_VSI_MAIN) {
+ ret = i40e_res_pool_alloc(&pf->msix_pool,
+ RTE_MIN(vsi->nb_qps,
+ RTE_MAX_RXTX_INTR_VEC_ID));
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "VSI MAIN %d get heap failed %d",
+ vsi->seid, ret);
+ goto fail_queue_alloc;
+ }
+ vsi->msix_intr = ret;
+ vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
+ } else if (type != I40E_VSI_SRIOV) {
ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
if (ret < 0) {
PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
goto fail_queue_alloc;
}
vsi->msix_intr = ret;
- } else
+ vsi->nb_msix = 1;
+ } else {
vsi->msix_intr = 0;
+ vsi->nb_msix = 0;
+ }
+
/* Add VSI */
if (type == I40E_VSI_MAIN) {
/* For main VSI, no need to add since it's default one */
@@ -3119,6 +4015,7 @@ i40e_vsi_setup(struct i40e_pf *pf,
* macvlan filter which is expected and cannot be removed.
*/
i40e_update_default_filter_setting(vsi);
+ i40e_config_qinq(hw, vsi);
} else if (type == I40E_VSI_SRIOV) {
memset(&ctxt, 0, sizeof(ctxt));
/**
@@ -3132,11 +4029,15 @@ i40e_vsi_setup(struct i40e_pf *pf,
ctxt.connection_type = 0x1;
ctxt.flags = I40E_AQ_VSI_TYPE_VF;
- /* Configure switch ID */
- ctxt.info.valid_sections |=
- rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
- ctxt.info.switch_id =
- rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ /**
+ * Do not configure switch ID to enable VEB switch by
+ * I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB. Because in Fortville,
+ * if the source mac address of packet sent from VF is not
+ * listed in the VEB's mac table, the VEB will switch the
+ * packet back to the VF. Need to enable it when HW issue
+ * is fixed.
+ */
+
/* Configure port/vlan */
ctxt.info.valid_sections |=
rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
@@ -3155,6 +4056,8 @@ i40e_vsi_setup(struct i40e_pf *pf,
* Since VSI is not created yet, only configure parameter,
* will add vsi below.
*/
+
+ i40e_config_qinq(hw, vsi);
} else if (type == I40E_VSI_VMDQ2) {
memset(&ctxt, 0, sizeof(ctxt));
/*
@@ -3193,7 +4096,7 @@ i40e_vsi_setup(struct i40e_pf *pf,
ctxt.info.valid_sections |=
rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
} else if (type == I40E_VSI_FDIR) {
- memset(&ctxt, 0, sizeof(ctxt));
+ memset(&ctxt, 0, sizeof(ctxt));
vsi->uplink_seid = uplink_vsi->uplink_seid;
ctxt.pf_num = hw->pf_id;
ctxt.vf_num = 0;
@@ -3218,7 +4121,7 @@ i40e_vsi_setup(struct i40e_pf *pf,
if (vsi->type != I40E_VSI_MAIN) {
ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
+ PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
hw->aq.asq_last_status);
goto fail_msix_alloc;
}
@@ -3242,7 +4145,7 @@ i40e_vsi_setup(struct i40e_pf *pf,
}
/* Get VSI BW information */
- i40e_vsi_dump_bw_config(vsi);
+ i40e_vsi_get_bw_config(vsi);
return vsi;
fail_msix_alloc:
i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
@@ -3653,6 +4556,9 @@ i40e_dev_tx_init(struct i40e_pf *pf)
if (ret != I40E_SUCCESS)
break;
}
+ if (ret == I40E_SUCCESS)
+ i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
+ ->eth_dev);
return ret;
}
@@ -3679,6 +4585,9 @@ i40e_dev_rx_init(struct i40e_pf *pf)
break;
}
}
+ if (ret == I40E_SUCCESS)
+ i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
+ ->eth_dev);
return ret;
}
@@ -3865,7 +4774,7 @@ i40e_pf_enable_irq0(struct i40e_hw *hw)
}
static void
-i40e_pf_config_irq0(struct i40e_hw *hw)
+i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
{
/* read pending request and disable first */
i40e_pf_disable_irq0(hw);
@@ -3873,9 +4782,10 @@ i40e_pf_config_irq0(struct i40e_hw *hw)
I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
- /* Link no queues with irq0 */
- I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
- I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
+ if (no_queue)
+ /* Link no queues with irq0 */
+ I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
+ I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
}
static void
@@ -4662,26 +5572,26 @@ i40e_config_hena(uint64_t flags)
if (!flags)
return hena;
- if (flags & ETH_RSS_NONF_IPV4_UDP)
- hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
- if (flags & ETH_RSS_NONF_IPV4_TCP)
+ if (flags & ETH_RSS_FRAG_IPV4)
+ hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
+ if (flags & ETH_RSS_NONFRAG_IPV4_TCP)
hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
- if (flags & ETH_RSS_NONF_IPV4_SCTP)
+ if (flags & ETH_RSS_NONFRAG_IPV4_UDP)
+ hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ if (flags & ETH_RSS_NONFRAG_IPV4_SCTP)
hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
- if (flags & ETH_RSS_NONF_IPV4_OTHER)
+ if (flags & ETH_RSS_NONFRAG_IPV4_OTHER)
hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
- if (flags & ETH_RSS_FRAG_IPV4)
- hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
- if (flags & ETH_RSS_NONF_IPV6_UDP)
- hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
- if (flags & ETH_RSS_NONF_IPV6_TCP)
+ if (flags & ETH_RSS_FRAG_IPV6)
+ hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
+ if (flags & ETH_RSS_NONFRAG_IPV6_TCP)
hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
- if (flags & ETH_RSS_NONF_IPV6_SCTP)
+ if (flags & ETH_RSS_NONFRAG_IPV6_UDP)
+ hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ if (flags & ETH_RSS_NONFRAG_IPV6_SCTP)
hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
- if (flags & ETH_RSS_NONF_IPV6_OTHER)
+ if (flags & ETH_RSS_NONFRAG_IPV6_OTHER)
hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
- if (flags & ETH_RSS_FRAG_IPV6)
- hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
if (flags & ETH_RSS_L2_PAYLOAD)
hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD;
@@ -4696,27 +5606,26 @@ i40e_parse_hena(uint64_t flags)
if (!flags)
return rss_hf;
-
- if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
- rss_hf |= ETH_RSS_NONF_IPV4_UDP;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
+ rss_hf |= ETH_RSS_FRAG_IPV4;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
- rss_hf |= ETH_RSS_NONF_IPV4_TCP;
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
- rss_hf |= ETH_RSS_NONF_IPV4_SCTP;
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_SCTP;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
- rss_hf |= ETH_RSS_NONF_IPV4_OTHER;
- if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
- rss_hf |= ETH_RSS_FRAG_IPV4;
- if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
- rss_hf |= ETH_RSS_NONF_IPV6_UDP;
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_OTHER;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
+ rss_hf |= ETH_RSS_FRAG_IPV6;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
- rss_hf |= ETH_RSS_NONF_IPV6_TCP;
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
- rss_hf |= ETH_RSS_NONF_IPV6_SCTP;
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_SCTP;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
- rss_hf |= ETH_RSS_NONF_IPV6_OTHER;
- if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
- rss_hf |= ETH_RSS_FRAG_IPV6;
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_OTHER;
if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
rss_hf |= ETH_RSS_L2_PAYLOAD;
@@ -4739,23 +5648,83 @@ i40e_pf_disable_rss(struct i40e_pf *pf)
}
static int
-i40e_hw_rss_hash_set(struct i40e_hw *hw, struct rte_eth_rss_conf *rss_conf)
+i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
{
- uint32_t *hash_key;
- uint8_t hash_key_len;
- uint64_t rss_hf;
- uint16_t i;
- uint64_t hena;
+ struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ int ret = 0;
+
+ if (!key || key_len == 0) {
+ PMD_DRV_LOG(DEBUG, "No key to be configured");
+ return 0;
+ } else if (key_len != (I40E_PFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t)) {
+ PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
+ return -EINVAL;
+ }
+
+ if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+ struct i40e_aqc_get_set_rss_key_data *key_dw =
+ (struct i40e_aqc_get_set_rss_key_data *)key;
+
+ ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to configure RSS key "
+ "via AQ");
+ } else {
+ uint32_t *hash_key = (uint32_t *)key;
+ uint16_t i;
- hash_key = (uint32_t *)(rss_conf->rss_key);
- hash_key_len = rss_conf->rss_key_len;
- if (hash_key != NULL && hash_key_len >=
- (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
- /* Fill in RSS hash key */
for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i), hash_key[i]);
+ I40E_WRITE_FLUSH(hw);
}
+ return ret;
+}
+
+static int
+i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
+{
+ struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ int ret;
+
+ if (!key || !key_len)
+ return -EINVAL;
+
+ if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+ ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
+ (struct i40e_aqc_get_set_rss_key_data *)key);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
+ return ret;
+ }
+ } else {
+ uint32_t *key_dw = (uint32_t *)key;
+ uint16_t i;
+
+ for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+ key_dw[i] = I40E_READ_REG(hw, I40E_PFQF_HKEY(i));
+ }
+ *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
+
+ return 0;
+}
+
+static int
+i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint64_t rss_hf;
+ uint64_t hena;
+ int ret;
+
+ ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
+ rss_conf->rss_key_len);
+ if (ret)
+ return ret;
+
rss_hf = rss_conf->rss_hf;
hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
@@ -4772,6 +5741,7 @@ static int
i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
uint64_t hena;
@@ -4787,23 +5757,20 @@ i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
if (rss_hf == 0) /* Disable RSS */
return -EINVAL;
- return i40e_hw_rss_hash_set(hw, rss_conf);
+ return i40e_hw_rss_hash_set(pf, rss_conf);
}
static int
i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t *hash_key = (uint32_t *)(rss_conf->rss_key);
uint64_t hena;
- uint16_t i;
- if (hash_key != NULL) {
- for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
- hash_key[i] = I40E_READ_REG(hw, I40E_PFQF_HKEY(i));
- rss_conf->rss_key_len = i * sizeof(uint32_t);
- }
+ i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
+ &rss_conf->rss_key_len);
+
hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
rss_conf->rss_hf = i40e_parse_hena(hena);
@@ -4884,6 +5851,9 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
case RTE_TUNNEL_TYPE_VXLAN:
tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN;
break;
+ case RTE_TUNNEL_TYPE_NVGRE:
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
+ break;
default:
/* Other tunnel types is not supported. */
PMD_DRV_LOG(ERR, "tunnel type is not supported.");
@@ -5097,12 +6067,12 @@ i40e_pf_config_rss(struct i40e_pf *pf)
* If both VMDQ and RSS enabled, not all of PF queues are configured.
* It's necessary to calulate the actual PF queues that are configured.
*/
- if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
+ if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
num = i40e_pf_calc_configured_queues_num(pf);
- num = i40e_align_floor(num);
- } else
- num = i40e_align_floor(pf->dev_data->nb_rx_queues);
+ else
+ num = pf->dev_data->nb_rx_queues;
+ num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
num);
@@ -5127,20 +6097,23 @@ i40e_pf_config_rss(struct i40e_pf *pf)
}
if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
(I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
- /* Calculate the default hash key */
- for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
- rss_key_default[i] = (uint32_t)rte_rand();
+ /* Random default keys */
+ static uint32_t rss_key_default[] = {0x6b793944,
+ 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
+ 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
+ 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
+
rss_conf.rss_key = (uint8_t *)rss_key_default;
rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
sizeof(uint32_t);
}
- return i40e_hw_rss_hash_set(hw, &rss_conf);
+ return i40e_hw_rss_hash_set(pf, &rss_conf);
}
static int
i40e_tunnel_filter_param_check(struct i40e_pf *pf,
- struct rte_eth_tunnel_filter_conf *filter)
+ struct rte_eth_tunnel_filter_conf *filter)
{
if (pf == NULL || filter == NULL) {
PMD_DRV_LOG(ERR, "Invalid parameter");
@@ -5172,9 +6145,85 @@ i40e_tunnel_filter_param_check(struct i40e_pf *pf,
return 0;
}
+#define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
+#define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4))
+static int
+i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
+{
+ uint32_t val, reg;
+ int ret = -EINVAL;
+
+ val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
+ PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x\n", val);
+
+ if (len == 3) {
+ reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
+ } else if (len == 4) {
+ reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
+ } else {
+ PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
+ return ret;
+ }
+
+ if (reg != val) {
+ ret = i40e_aq_debug_write_register(hw, I40E_GL_PRS_FVBM(2),
+ reg, NULL);
+ if (ret != 0)
+ return ret;
+ } else {
+ ret = 0;
+ }
+ PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x\n",
+ I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
+
+ return ret;
+}
+
+static int
+i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg)
+{
+ int ret = -EINVAL;
+
+ if (!hw || !cfg)
+ return -EINVAL;
+
+ switch (cfg->cfg_type) {
+ case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN:
+ ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type);
+ break;
+ }
+
+ return ret;
+}
+
+static int
+i40e_filter_ctrl_global_config(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret = I40E_ERR_PARAM;
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_SET:
+ ret = i40e_dev_global_config_set(hw,
+ (struct rte_eth_global_cfg *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
+ break;
+ }
+
+ return ret;
+}
+
static int
-i40e_tunnel_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
- void *arg)
+i40e_tunnel_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
{
struct rte_eth_tunnel_filter_conf *filter;
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
@@ -5189,6 +6238,7 @@ i40e_tunnel_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
case RTE_ETH_FILTER_NOP:
if (!(pf->flags & I40E_FLAG_VXLAN))
ret = I40E_NOT_SUPPORTED;
+ break;
case RTE_ETH_FILTER_ADD:
ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
break;
@@ -5210,11 +6260,6 @@ i40e_pf_config_mq_rx(struct i40e_pf *pf)
int ret = 0;
enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
- if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
- PMD_INIT_LOG(ERR, "i40e doesn't support DCB yet");
- return -ENOTSUP;
- }
-
/* RSS setup */
if (mq_mode & ETH_MQ_RX_RSS_FLAG)
ret = i40e_pf_config_rss(pf);
@@ -5224,6 +6269,867 @@ i40e_pf_config_mq_rx(struct i40e_pf *pf)
return ret;
}
+/* Get the symmetric hash enable configurations per port */
+static void
+i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
+{
+ uint32_t reg = I40E_READ_REG(hw, I40E_PRTQF_CTL_0);
+
+ *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
+}
+
+/* Set the symmetric hash enable configurations per port */
+static void
+i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
+{
+ uint32_t reg = I40E_READ_REG(hw, I40E_PRTQF_CTL_0);
+
+ if (enable > 0) {
+ if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
+ PMD_DRV_LOG(INFO, "Symmetric hash has already "
+ "been enabled");
+ return;
+ }
+ reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
+ } else {
+ if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
+ PMD_DRV_LOG(INFO, "Symmetric hash has already "
+ "been disabled");
+ return;
+ }
+ reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
+ }
+ I40E_WRITE_REG(hw, I40E_PRTQF_CTL_0, reg);
+ I40E_WRITE_FLUSH(hw);
+}
+
+/*
+ * Get global configurations of hash function type and symmetric hash enable
+ * per flow type (pctype). Note that global configuration means it affects all
+ * the ports on the same NIC.
+ */
+static int
+i40e_get_hash_filter_global_config(struct i40e_hw *hw,
+ struct rte_eth_hash_global_conf *g_cfg)
+{
+ uint32_t reg, mask = I40E_FLOW_TYPES;
+ uint16_t i;
+ enum i40e_filter_pctype pctype;
+
+ memset(g_cfg, 0, sizeof(*g_cfg));
+ reg = I40E_READ_REG(hw, I40E_GLQF_CTL);
+ if (reg & I40E_GLQF_CTL_HTOEP_MASK)
+ g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
+ else
+ g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
+ PMD_DRV_LOG(DEBUG, "Hash function is %s",
+ (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
+
+ for (i = 0; mask && i < RTE_ETH_FLOW_MAX; i++) {
+ if (!(mask & (1UL << i)))
+ continue;
+ mask &= ~(1UL << i);
+ /* Bit set indicats the coresponding flow type is supported */
+ g_cfg->valid_bit_mask[0] |= (1UL << i);
+ pctype = i40e_flowtype_to_pctype(i);
+ reg = I40E_READ_REG(hw, I40E_GLQF_HSYM(pctype));
+ if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK)
+ g_cfg->sym_hash_enable_mask[0] |= (1UL << i);
+ }
+
+ return 0;
+}
+
+static int
+i40e_hash_global_config_check(struct rte_eth_hash_global_conf *g_cfg)
+{
+ uint32_t i;
+ uint32_t mask0, i40e_mask = I40E_FLOW_TYPES;
+
+ if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
+ g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
+ g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
+ PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
+ g_cfg->hash_func);
+ return -EINVAL;
+ }
+
+ /*
+ * As i40e supports less than 32 flow types, only first 32 bits need to
+ * be checked.
+ */
+ mask0 = g_cfg->valid_bit_mask[0];
+ for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
+ if (i == 0) {
+ /* Check if any unsupported flow type configured */
+ if ((mask0 | i40e_mask) ^ i40e_mask)
+ goto mask_err;
+ } else {
+ if (g_cfg->valid_bit_mask[i])
+ goto mask_err;
+ }
+ }
+
+ return 0;
+
+mask_err:
+ PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
+
+ return -EINVAL;
+}
+
+/*
+ * Set global configurations of hash function type and symmetric hash enable
+ * per flow type (pctype). Note any modifying global configuration will affect
+ * all the ports on the same NIC.
+ */
+static int
+i40e_set_hash_filter_global_config(struct i40e_hw *hw,
+ struct rte_eth_hash_global_conf *g_cfg)
+{
+ int ret;
+ uint16_t i;
+ uint32_t reg;
+ uint32_t mask0 = g_cfg->valid_bit_mask[0];
+ enum i40e_filter_pctype pctype;
+
+ /* Check the input parameters */
+ ret = i40e_hash_global_config_check(g_cfg);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; mask0 && i < UINT32_BIT; i++) {
+ if (!(mask0 & (1UL << i)))
+ continue;
+ mask0 &= ~(1UL << i);
+ pctype = i40e_flowtype_to_pctype(i);
+ reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ?
+ I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
+ I40E_WRITE_REG(hw, I40E_GLQF_HSYM(pctype), reg);
+ }
+
+ reg = I40E_READ_REG(hw, I40E_GLQF_CTL);
+ if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
+ /* Toeplitz */
+ if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
+ PMD_DRV_LOG(DEBUG, "Hash function already set to "
+ "Toeplitz");
+ goto out;
+ }
+ reg |= I40E_GLQF_CTL_HTOEP_MASK;
+ } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
+ /* Simple XOR */
+ if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
+ PMD_DRV_LOG(DEBUG, "Hash function already set to "
+ "Simple XOR");
+ goto out;
+ }
+ reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
+ } else
+ /* Use the default, and keep it as it is */
+ goto out;
+
+ I40E_WRITE_REG(hw, I40E_GLQF_CTL, reg);
+
+out:
+ I40E_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+/**
+ * Valid input sets for hash and flow director filters per PCTYPE
+ */
+static uint64_t
+i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
+ enum rte_filter_type filter)
+{
+ uint64_t valid;
+
+ static const uint64_t valid_hash_inset_table[] = {
+ [I40E_FILTER_PCTYPE_FRAG_IPV4] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
+ I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
+ I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
+ I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
+ I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
+ I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
+ I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
+ I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
+ I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
+ I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
+ I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
+ I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_FRAG_IPV6] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
+ I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
+ I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
+ I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
+ I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
+ I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
+ I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
+ I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
+ I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
+ I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
+ I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
+ I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
+ I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
+ I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
+ I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
+ I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
+ I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
+ I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
+ I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
+ I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
+ I40E_INSET_FLEX_PAYLOAD,
+ };
+
+ /**
+ * Flow director supports only fields defined in
+ * union rte_eth_fdir_flow.
+ */
+ static const uint64_t valid_fdir_inset_table[] = {
+ [I40E_FILTER_PCTYPE_FRAG_IPV4] =
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_FRAG_IPV6] =
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
+ I40E_INSET_LAST_ETHER_TYPE | I40E_INSET_FLEX_PAYLOAD,
+ };
+
+ if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
+ return 0;
+ if (filter == RTE_ETH_FILTER_HASH)
+ valid = valid_hash_inset_table[pctype];
+ else
+ valid = valid_fdir_inset_table[pctype];
+
+ return valid;
+}
+
+/**
+ * Validate if the input set is allowed for a specific PCTYPE
+ */
+static int
+i40e_validate_input_set(enum i40e_filter_pctype pctype,
+ enum rte_filter_type filter, uint64_t inset)
+{
+ uint64_t valid;
+
+ valid = i40e_get_valid_input_set(pctype, filter);
+ if (inset & (~valid))
+ return -EINVAL;
+
+ return 0;
+}
+
+/* default input set fields combination per pctype */
+static uint64_t
+i40e_get_default_input_set(uint16_t pctype)
+{
+ static const uint64_t default_inset_table[] = {
+ [I40E_FILTER_PCTYPE_FRAG_IPV4] =
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_SCTP_VT,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
+ [I40E_FILTER_PCTYPE_FRAG_IPV6] =
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_SCTP_VT,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
+ [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
+ I40E_INSET_LAST_ETHER_TYPE,
+ };
+
+ if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
+ return 0;
+
+ return default_inset_table[pctype];
+}
+
+/**
+ * Parse the input set from index to logical bit masks
+ */
+static int
+i40e_parse_input_set(uint64_t *inset,
+ enum i40e_filter_pctype pctype,
+ enum rte_eth_input_set_field *field,
+ uint16_t size)
+{
+ uint16_t i, j;
+ int ret = -EINVAL;
+
+ static const struct {
+ enum rte_eth_input_set_field field;
+ uint64_t inset;
+ } inset_convert_table[] = {
+ {RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE},
+ {RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC},
+ {RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC},
+ {RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER},
+ {RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER},
+ {RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE},
+ {RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC},
+ {RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST},
+ {RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS},
+ {RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO},
+ {RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC},
+ {RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST},
+ {RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC},
+ {RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER,
+ I40E_INSET_IPV6_NEXT_HDR},
+ {RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT},
+ {RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT},
+ {RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT},
+ {RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT},
+ {RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT},
+ {RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT},
+ {RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG,
+ I40E_INSET_SCTP_VT},
+ {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC,
+ I40E_INSET_TUNNEL_DMAC},
+ {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN,
+ I40E_INSET_VLAN_TUNNEL},
+ {RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY,
+ I40E_INSET_TUNNEL_ID},
+ {RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID},
+ {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD,
+ I40E_INSET_FLEX_PAYLOAD_W1},
+ {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD,
+ I40E_INSET_FLEX_PAYLOAD_W2},
+ {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD,
+ I40E_INSET_FLEX_PAYLOAD_W3},
+ {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD,
+ I40E_INSET_FLEX_PAYLOAD_W4},
+ {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD,
+ I40E_INSET_FLEX_PAYLOAD_W5},
+ {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD,
+ I40E_INSET_FLEX_PAYLOAD_W6},
+ {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD,
+ I40E_INSET_FLEX_PAYLOAD_W7},
+ {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD,
+ I40E_INSET_FLEX_PAYLOAD_W8},
+ };
+
+ if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX)
+ return ret;
+
+ /* Only one item allowed for default or all */
+ if (size == 1) {
+ if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) {
+ *inset = i40e_get_default_input_set(pctype);
+ return 0;
+ } else if (field[0] == RTE_ETH_INPUT_SET_NONE) {
+ *inset = I40E_INSET_NONE;
+ return 0;
+ }
+ }
+
+ for (i = 0, *inset = 0; i < size; i++) {
+ for (j = 0; j < RTE_DIM(inset_convert_table); j++) {
+ if (field[i] == inset_convert_table[j].field) {
+ *inset |= inset_convert_table[j].inset;
+ break;
+ }
+ }
+
+ /* It contains unsupported input set, return immediately */
+ if (j == RTE_DIM(inset_convert_table))
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * Translate the input set from bit masks to register aware bit masks
+ * and vice versa
+ */
+static uint64_t
+i40e_translate_input_set_reg(uint64_t input)
+{
+ uint64_t val = 0;
+ uint16_t i;
+
+ static const struct {
+ uint64_t inset;
+ uint64_t inset_reg;
+ } inset_map[] = {
+ {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
+ {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
+ {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
+ {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
+ {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
+ {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
+ {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
+ {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
+ {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
+ {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
+ {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
+ {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
+ {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
+ {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
+ {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
+ {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
+ {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
+ {I40E_INSET_TUNNEL_DMAC,
+ I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
+ {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
+ {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
+ {I40E_INSET_TUNNEL_SRC_PORT,
+ I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
+ {I40E_INSET_TUNNEL_DST_PORT,
+ I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
+ {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
+ {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
+ {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
+ {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
+ {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
+ {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
+ {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
+ {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
+ {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
+ };
+
+ if (input == 0)
+ return val;
+
+ /* Translate input set to register aware inset */
+ for (i = 0; i < RTE_DIM(inset_map); i++) {
+ if (input & inset_map[i].inset)
+ val |= inset_map[i].inset_reg;
+ }
+
+ return val;
+}
+
+static uint8_t
+i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
+{
+ uint8_t i, idx = 0;
+
+ static const struct {
+ uint64_t inset;
+ uint32_t mask;
+ } inset_mask_map[] = {
+ {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
+ {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
+ {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
+ {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
+ };
+
+ if (!inset || !mask || !nb_elem)
+ return 0;
+
+ if (!inset && nb_elem >= I40E_INSET_MASK_NUM_REG) {
+ for (i = 0; i < I40E_INSET_MASK_NUM_REG; i++)
+ mask[i] = 0;
+ return I40E_INSET_MASK_NUM_REG;
+ }
+
+ for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
+ if (idx >= nb_elem)
+ break;
+ if (inset & inset_mask_map[i].inset) {
+ mask[idx] = inset_mask_map[i].mask;
+ idx++;
+ }
+ }
+
+ return idx;
+}
+
+static uint64_t
+i40e_get_reg_inset(struct i40e_hw *hw, enum rte_filter_type filter,
+ enum i40e_filter_pctype pctype)
+{
+ uint64_t reg = 0;
+
+ if (filter == RTE_ETH_FILTER_HASH) {
+ reg = I40E_READ_REG(hw, I40E_GLQF_HASH_INSET(1, pctype));
+ reg <<= I40E_32_BIT_WIDTH;
+ reg |= I40E_READ_REG(hw, I40E_GLQF_HASH_INSET(0, pctype));
+ } else if (filter == RTE_ETH_FILTER_FDIR) {
+ reg = I40E_READ_REG(hw, I40E_PRTQF_FD_INSET(pctype, 1));
+ reg <<= I40E_32_BIT_WIDTH;
+ reg |= I40E_READ_REG(hw, I40E_PRTQF_FD_INSET(pctype, 0));
+ }
+
+ return reg;
+}
+
+static void
+i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
+{
+ uint32_t reg = I40E_READ_REG(hw, addr);
+
+ PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x\n", addr, reg);
+ if (reg != val)
+ I40E_WRITE_REG(hw, addr, val);
+ PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x\n", addr,
+ (uint32_t)I40E_READ_REG(hw, addr));
+}
+
+static int
+i40e_set_hash_inset_mask(struct i40e_hw *hw,
+ enum i40e_filter_pctype pctype,
+ enum rte_filter_input_set_op op,
+ uint32_t *mask_reg,
+ uint8_t num)
+{
+ uint32_t reg;
+ uint8_t i;
+
+ if (!mask_reg || num > RTE_ETH_INPUT_SET_SELECT)
+ return -EINVAL;
+
+ if (op == RTE_ETH_INPUT_SET_SELECT) {
+ for (i = 0; i < I40E_INSET_MASK_NUM_REG; i++) {
+ i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
+ 0);
+ if (i >= num)
+ continue;
+ i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
+ mask_reg[i]);
+ }
+ } else if (op == RTE_ETH_INPUT_SET_ADD) {
+ uint8_t j, count = 0;
+
+ for (i = 0; i < I40E_INSET_MASK_NUM_REG; i++) {
+ reg = I40E_READ_REG(hw, I40E_GLQF_HASH_MSK(i, pctype));
+ if (reg & I40E_GLQF_HASH_MSK_FIELD)
+ count++;
+ }
+ if (count + num > I40E_INSET_MASK_NUM_REG)
+ return -EINVAL;
+
+ for (i = count, j = 0; i < I40E_INSET_MASK_NUM_REG; i++, j++)
+ i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
+ mask_reg[j]);
+ }
+
+ return 0;
+}
+
+static int
+i40e_set_fd_inset_mask(struct i40e_hw *hw,
+ enum i40e_filter_pctype pctype,
+ enum rte_filter_input_set_op op,
+ uint32_t *mask_reg,
+ uint8_t num)
+{
+ uint32_t reg;
+ uint8_t i;
+
+ if (!mask_reg || num > RTE_ETH_INPUT_SET_SELECT)
+ return -EINVAL;
+
+ if (op == RTE_ETH_INPUT_SET_SELECT) {
+ for (i = 0; i < I40E_INSET_MASK_NUM_REG; i++) {
+ i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
+ 0);
+ if (i >= num)
+ continue;
+ i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
+ mask_reg[i]);
+ }
+ } else if (op == RTE_ETH_INPUT_SET_ADD) {
+ uint8_t j, count = 0;
+
+ for (i = 0; i < I40E_INSET_MASK_NUM_REG; i++) {
+ reg = I40E_READ_REG(hw, I40E_GLQF_FD_MSK(i, pctype));
+ if (reg & I40E_GLQF_FD_MSK_FIELD)
+ count++;
+ }
+ if (count + num > I40E_INSET_MASK_NUM_REG)
+ return -EINVAL;
+
+ for (i = count, j = 0; i < I40E_INSET_MASK_NUM_REG; i++, j++)
+ i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
+ mask_reg[j]);
+ }
+
+ return 0;
+}
+
+int
+i40e_filter_inset_select(struct i40e_hw *hw,
+ struct rte_eth_input_set_conf *conf,
+ enum rte_filter_type filter)
+{
+ enum i40e_filter_pctype pctype;
+ uint64_t inset_reg = 0, input_set;
+ uint32_t mask_reg[I40E_INSET_MASK_NUM_REG];
+ uint8_t num;
+ int ret;
+
+ if (!hw || !conf) {
+ PMD_DRV_LOG(ERR, "Invalid pointer");
+ return -EFAULT;
+ }
+
+ pctype = i40e_flowtype_to_pctype(conf->flow_type);
+ if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
+ PMD_DRV_LOG(ERR, "Not supported flow type (%u)",
+ conf->flow_type);
+ return -EINVAL;
+ }
+ if (filter != RTE_ETH_FILTER_HASH && filter != RTE_ETH_FILTER_FDIR) {
+ PMD_DRV_LOG(ERR, "Not supported filter type (%u)", filter);
+ return -EINVAL;
+ }
+
+ ret = i40e_parse_input_set(&input_set, pctype, conf->field,
+ conf->inset_size);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to parse input set");
+ return -EINVAL;
+ }
+ if (i40e_validate_input_set(pctype, filter, input_set) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid input set");
+ return -EINVAL;
+ }
+
+ if (conf->op == RTE_ETH_INPUT_SET_ADD) {
+ inset_reg |= i40e_get_reg_inset(hw, filter, pctype);
+ } else if (conf->op != RTE_ETH_INPUT_SET_SELECT) {
+ PMD_DRV_LOG(ERR, "Unsupported input set operation");
+ return -EINVAL;
+ }
+ num = i40e_generate_inset_mask_reg(input_set, mask_reg,
+ I40E_INSET_MASK_NUM_REG);
+ inset_reg |= i40e_translate_input_set_reg(input_set);
+
+ if (filter == RTE_ETH_FILTER_HASH) {
+ ret = i40e_set_hash_inset_mask(hw, pctype, conf->op, mask_reg,
+ num);
+ if (ret)
+ return -EINVAL;
+
+ i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
+ (uint32_t)(inset_reg & UINT32_MAX));
+ i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
+ (uint32_t)((inset_reg >>
+ I40E_32_BIT_WIDTH) & UINT32_MAX));
+ } else if (filter == RTE_ETH_FILTER_FDIR) {
+ ret = i40e_set_fd_inset_mask(hw, pctype, conf->op, mask_reg,
+ num);
+ if (ret)
+ return -EINVAL;
+
+ i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
+ (uint32_t)(inset_reg & UINT32_MAX));
+ i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
+ (uint32_t)((inset_reg >>
+ I40E_32_BIT_WIDTH) & UINT32_MAX));
+ } else {
+ PMD_DRV_LOG(ERR, "Not supported filter type (%u)", filter);
+ return -EINVAL;
+ }
+ I40E_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static int
+i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
+{
+ int ret = 0;
+
+ if (!hw || !info) {
+ PMD_DRV_LOG(ERR, "Invalid pointer");
+ return -EFAULT;
+ }
+
+ switch (info->info_type) {
+ case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
+ i40e_get_symmetric_hash_enable_per_port(hw,
+ &(info->info.enable));
+ break;
+ case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
+ ret = i40e_get_hash_filter_global_config(hw,
+ &(info->info.global_conf));
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
+ info->info_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
+{
+ int ret = 0;
+
+ if (!hw || !info) {
+ PMD_DRV_LOG(ERR, "Invalid pointer");
+ return -EFAULT;
+ }
+
+ switch (info->info_type) {
+ case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
+ i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
+ break;
+ case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
+ ret = i40e_set_hash_filter_global_config(hw,
+ &(info->info.global_conf));
+ break;
+ case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
+ ret = i40e_filter_inset_select(hw,
+ &(info->info.input_set_conf),
+ RTE_ETH_FILTER_HASH);
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
+ info->info_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* Operations for hash function */
+static int
+i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret = 0;
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_NOP:
+ break;
+ case RTE_ETH_FILTER_GET:
+ ret = i40e_hash_filter_get(hw,
+ (struct rte_eth_hash_filter_info *)arg);
+ break;
+ case RTE_ETH_FILTER_SET:
+ ret = i40e_hash_filter_set(hw,
+ (struct rte_eth_hash_filter_info *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
+ filter_op);
+ ret = -ENOTSUP;
+ break;
+ }
+
+ return ret;
+}
+
/*
* Configure ethertype filter, which can director packet by filtering
* with mac address and ether_type or only ether_type
@@ -5326,6 +7232,13 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
return -EINVAL;
switch (filter_type) {
+ case RTE_ETH_FILTER_NONE:
+ /* For global configuration */
+ ret = i40e_filter_ctrl_global_config(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_HASH:
+ ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
+ break;
case RTE_ETH_FILTER_MACVLAN:
ret = i40e_mac_filter_handle(dev, filter_op, arg);
break;
@@ -5348,106 +7261,129 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
return ret;
}
+/*
+ * As some registers wouldn't be reset unless a global hardware reset,
+ * hardware initialization is needed to put those registers into an
+ * expected initial state.
+ */
+static void
+i40e_hw_init(struct i40e_hw *hw)
+{
+ /* clear the PF Queue Filter control register */
+ I40E_WRITE_REG(hw, I40E_PFQF_CTL_0, 0);
+
+ /* Disable symmetric hash per port */
+ i40e_set_symmetric_hash_enable_per_port(hw, 0);
+}
+
enum i40e_filter_pctype
-i40e_flowtype_to_pctype(enum rte_eth_flow_type flow_type)
+i40e_flowtype_to_pctype(uint16_t flow_type)
{
static const enum i40e_filter_pctype pctype_table[] = {
- [RTE_ETH_FLOW_TYPE_UDPV4] = I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
- [RTE_ETH_FLOW_TYPE_TCPV4] = I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
- [RTE_ETH_FLOW_TYPE_SCTPV4] = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
- [RTE_ETH_FLOW_TYPE_IPV4_OTHER] =
- I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
- [RTE_ETH_FLOW_TYPE_FRAG_IPV4] =
- I40E_FILTER_PCTYPE_FRAG_IPV4,
- [RTE_ETH_FLOW_TYPE_UDPV6] = I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
- [RTE_ETH_FLOW_TYPE_TCPV6] = I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
- [RTE_ETH_FLOW_TYPE_SCTPV6] = I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
- [RTE_ETH_FLOW_TYPE_IPV6_OTHER] =
- I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
- [RTE_ETH_FLOW_TYPE_FRAG_IPV6] =
- I40E_FILTER_PCTYPE_FRAG_IPV6,
+ [RTE_ETH_FLOW_FRAG_IPV4] = I40E_FILTER_PCTYPE_FRAG_IPV4,
+ [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] =
+ I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
+ [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] =
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
+ [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] =
+ I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
+ [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] =
+ I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
+ [RTE_ETH_FLOW_FRAG_IPV6] = I40E_FILTER_PCTYPE_FRAG_IPV6,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] =
+ I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] =
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] =
+ I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] =
+ I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
+ [RTE_ETH_FLOW_L2_PAYLOAD] = I40E_FILTER_PCTYPE_L2_PAYLOAD,
};
return pctype_table[flow_type];
}
-enum rte_eth_flow_type
+uint16_t
i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
{
- static const enum rte_eth_flow_type flowtype_table[] = {
- [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = RTE_ETH_FLOW_TYPE_UDPV4,
- [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = RTE_ETH_FLOW_TYPE_TCPV4,
- [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = RTE_ETH_FLOW_TYPE_SCTPV4,
+ static const uint16_t flowtype_table[] = {
+ [I40E_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_FLOW_FRAG_IPV4,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
+ RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
+ RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
+ RTE_ETH_FLOW_NONFRAG_IPV4_SCTP,
[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
- RTE_ETH_FLOW_TYPE_IPV4_OTHER,
- [I40E_FILTER_PCTYPE_FRAG_IPV4] =
- RTE_ETH_FLOW_TYPE_FRAG_IPV4,
- [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = RTE_ETH_FLOW_TYPE_UDPV6,
- [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = RTE_ETH_FLOW_TYPE_TCPV6,
- [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = RTE_ETH_FLOW_TYPE_SCTPV6,
+ RTE_ETH_FLOW_NONFRAG_IPV4_OTHER,
+ [I40E_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_FLOW_FRAG_IPV6,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
+ RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
+ RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
+ RTE_ETH_FLOW_NONFRAG_IPV6_SCTP,
[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
- RTE_ETH_FLOW_TYPE_IPV6_OTHER,
- [I40E_FILTER_PCTYPE_FRAG_IPV6] =
- RTE_ETH_FLOW_TYPE_FRAG_IPV6,
+ RTE_ETH_FLOW_NONFRAG_IPV6_OTHER,
+ [I40E_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_FLOW_L2_PAYLOAD,
};
return flowtype_table[pctype];
}
-static int
-i40e_debug_read_register(struct i40e_hw *hw, uint32_t addr, uint64_t *val)
-{
- struct i40e_aq_desc desc;
- enum i40e_status_code status;
-
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
- desc.params.internal.param1 = rte_cpu_to_le_32(addr);
- status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
- if (status < 0)
- return status;
-
- *val = ((uint64_t)(rte_le_to_cpu_32(desc.params.internal.param2)) <<
- (CHAR_BIT * sizeof(uint32_t))) +
- rte_le_to_cpu_32(desc.params.internal.param3);
-
- return status;
-}
-
/*
* On X710, performance number is far from the expectation on recent firmware
- * versions. The fix for this issue may not be integrated in the following
+ * versions; on XL710, performance number is also far from the expectation on
+ * recent firmware versions, if promiscuous mode is disabled, or promiscuous
+ * mode is enabled and port MAC address is equal to the packet destination MAC
+ * address. The fix for this issue may not be integrated in the following
* firmware version. So the workaround in software driver is needed. It needs
- * to modify the initial values of 3 internal only registers. Note that the
+ * to modify the initial values of 3 internal only registers for both X710 and
+ * XL710. Note that the values for X710 or XL710 could be different, and the
* workaround can be removed when it is fixed in firmware in the future.
*/
-static void
-i40e_configure_registers(struct i40e_hw *hw)
-{
+
+/* For both X710 and XL710 */
+#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x10000200
#define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00
+
+#define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
#define I40E_GL_SWR_PRI_JOIN_MAP_2 0x26CE08
+
+/* For X710 */
+#define I40E_GL_SWR_PM_UP_THR_EF_VALUE 0x03030303
+/* For XL710 */
+#define I40E_GL_SWR_PM_UP_THR_SF_VALUE 0x06060606
#define I40E_GL_SWR_PM_UP_THR 0x269FBC
-#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x10000200
-#define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
-#define I40E_GL_SWR_PM_UP_THR_VALUE 0x03030303
- static const struct {
+static void
+i40e_configure_registers(struct i40e_hw *hw)
+{
+ static struct {
uint32_t addr;
uint64_t val;
} reg_table[] = {
{I40E_GL_SWR_PRI_JOIN_MAP_0, I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE},
{I40E_GL_SWR_PRI_JOIN_MAP_2, I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE},
- {I40E_GL_SWR_PM_UP_THR, I40E_GL_SWR_PM_UP_THR_VALUE},
+ {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
};
uint64_t reg;
uint32_t i;
int ret;
- /* Below fix is for X710 only */
- if (i40e_is_40G_device(hw->device_id))
- return;
-
for (i = 0; i < RTE_DIM(reg_table); i++) {
- ret = i40e_debug_read_register(hw, reg_table[i].addr, &reg);
+ if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
+ if (i40e_is_40G_device(hw->device_id)) /* For XL710 */
+ reg_table[i].val =
+ I40E_GL_SWR_PM_UP_THR_SF_VALUE;
+ else /* For X710 */
+ reg_table[i].val =
+ I40E_GL_SWR_PM_UP_THR_EF_VALUE;
+ }
+
+ ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
+ &reg, NULL);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
reg_table[i].addr);
@@ -5470,3 +7406,1129 @@ i40e_configure_registers(struct i40e_hw *hw)
"0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
}
}
+
+#define I40E_VSI_TSR(_i) (0x00050800 + ((_i) * 4))
+#define I40E_VSI_TSR_QINQ_CONFIG 0xc030
+#define I40E_VSI_L2TAGSTXVALID(_i) (0x00042800 + ((_i) * 4))
+#define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
+static int
+i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
+{
+ uint32_t reg;
+ int ret;
+
+ if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
+ PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
+ return -EINVAL;
+ }
+
+ /* Configure for double VLAN RX stripping */
+ reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
+ if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
+ reg |= I40E_VSI_TSR_QINQ_CONFIG;
+ ret = i40e_aq_debug_write_register(hw,
+ I40E_VSI_TSR(vsi->vsi_id),
+ reg, NULL);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
+ vsi->vsi_id);
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ /* Configure for double VLAN TX insertion */
+ reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
+ if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
+ reg = I40E_VSI_L2TAGSTXVALID_QINQ;
+ ret = i40e_aq_debug_write_register(hw,
+ I40E_VSI_L2TAGSTXVALID(
+ vsi->vsi_id), reg, NULL);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to update "
+ "VSI_L2TAGSTXVALID[%d]", vsi->vsi_id);
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_aq_add_mirror_rule
+ * @hw: pointer to the hardware structure
+ * @seid: VEB seid to add mirror rule to
+ * @dst_id: destination vsi seid
+ * @entries: Buffer which contains the entities to be mirrored
+ * @count: number of entities contained in the buffer
+ * @rule_id:the rule_id of the rule to be added
+ *
+ * Add a mirror rule for a given veb.
+ *
+ **/
+static enum i40e_status_code
+i40e_aq_add_mirror_rule(struct i40e_hw *hw,
+ uint16_t seid, uint16_t dst_id,
+ uint16_t rule_type, uint16_t *entries,
+ uint16_t count, uint16_t *rule_id)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_delete_mirror_rule cmd;
+ struct i40e_aqc_add_delete_mirror_rule_completion *resp =
+ (struct i40e_aqc_add_delete_mirror_rule_completion *)
+ &desc.params.raw;
+ uint16_t buff_len;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_mirror_rule);
+ memset(&cmd, 0, sizeof(cmd));
+
+ buff_len = sizeof(uint16_t) * count;
+ desc.datalen = rte_cpu_to_le_16(buff_len);
+ if (buff_len > 0)
+ desc.flags |= rte_cpu_to_le_16(
+ (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd.rule_type = rte_cpu_to_le_16(rule_type <<
+ I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
+ cmd.num_entries = rte_cpu_to_le_16(count);
+ cmd.seid = rte_cpu_to_le_16(seid);
+ cmd.destination = rte_cpu_to_le_16(dst_id);
+
+ rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
+ status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
+ PMD_DRV_LOG(INFO, "i40e_aq_add_mirror_rule, aq_status %d,"
+ "rule_id = %u"
+ " mirror_rules_used = %u, mirror_rules_free = %u,",
+ hw->aq.asq_last_status, resp->rule_id,
+ resp->mirror_rules_used, resp->mirror_rules_free);
+ *rule_id = rte_le_to_cpu_16(resp->rule_id);
+
+ return status;
+}
+
+/**
+ * i40e_aq_del_mirror_rule
+ * @hw: pointer to the hardware structure
+ * @seid: VEB seid to add mirror rule to
+ * @entries: Buffer which contains the entities to be mirrored
+ * @count: number of entities contained in the buffer
+ * @rule_id:the rule_id of the rule to be delete
+ *
+ * Delete a mirror rule for a given veb.
+ *
+ **/
+static enum i40e_status_code
+i40e_aq_del_mirror_rule(struct i40e_hw *hw,
+ uint16_t seid, uint16_t rule_type, uint16_t *entries,
+ uint16_t count, uint16_t rule_id)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_delete_mirror_rule cmd;
+ uint16_t buff_len = 0;
+ enum i40e_status_code status;
+ void *buff = NULL;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_delete_mirror_rule);
+ memset(&cmd, 0, sizeof(cmd));
+ if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
+ desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
+ I40E_AQ_FLAG_RD));
+ cmd.num_entries = count;
+ buff_len = sizeof(uint16_t) * count;
+ desc.datalen = rte_cpu_to_le_16(buff_len);
+ buff = (void *)entries;
+ } else
+ /* rule id is filled in destination field for deleting mirror rule */
+ cmd.destination = rte_cpu_to_le_16(rule_id);
+
+ cmd.rule_type = rte_cpu_to_le_16(rule_type <<
+ I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
+ cmd.seid = rte_cpu_to_le_16(seid);
+
+ rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
+ status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_mirror_rule_set
+ * @dev: pointer to the hardware structure
+ * @mirror_conf: mirror rule info
+ * @sw_id: mirror rule's sw_id
+ * @on: enable/disable
+ *
+ * set a mirror rule.
+ *
+ **/
+static int
+i40e_mirror_rule_set(struct rte_eth_dev *dev,
+ struct rte_eth_mirror_conf *mirror_conf,
+ uint8_t sw_id, uint8_t on)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_mirror_rule *it, *mirr_rule = NULL;
+ struct i40e_mirror_rule *parent = NULL;
+ uint16_t seid, dst_seid, rule_id;
+ uint16_t i, j = 0;
+ int ret;
+
+ PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
+
+ if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
+ PMD_DRV_LOG(ERR, "mirror rule can not be configured"
+ " without veb or vfs.");
+ return -ENOSYS;
+ }
+ if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
+ PMD_DRV_LOG(ERR, "mirror table is full.");
+ return -ENOSPC;
+ }
+ if (mirror_conf->dst_pool > pf->vf_num) {
+ PMD_DRV_LOG(ERR, "invalid destination pool %u.",
+ mirror_conf->dst_pool);
+ return -EINVAL;
+ }
+
+ seid = pf->main_vsi->veb->seid;
+
+ TAILQ_FOREACH(it, &pf->mirror_list, rules) {
+ if (sw_id <= it->index) {
+ mirr_rule = it;
+ break;
+ }
+ parent = it;
+ }
+ if (mirr_rule && sw_id == mirr_rule->index) {
+ if (on) {
+ PMD_DRV_LOG(ERR, "mirror rule exists.");
+ return -EEXIST;
+ } else {
+ ret = i40e_aq_del_mirror_rule(hw, seid,
+ mirr_rule->rule_type,
+ mirr_rule->entries,
+ mirr_rule->num_entries, mirr_rule->id);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "failed to remove mirror rule:"
+ " ret = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
+ return -ENOSYS;
+ }
+ TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
+ rte_free(mirr_rule);
+ pf->nb_mirror_rule--;
+ return 0;
+ }
+ } else if (!on) {
+ PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
+ return -ENOENT;
+ }
+
+ mirr_rule = rte_zmalloc("i40e_mirror_rule",
+ sizeof(struct i40e_mirror_rule) , 0);
+ if (!mirr_rule) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+ switch (mirror_conf->rule_type) {
+ case ETH_MIRROR_VLAN:
+ for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
+ if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
+ mirr_rule->entries[j] =
+ mirror_conf->vlan.vlan_id[i];
+ j++;
+ }
+ }
+ if (j == 0) {
+ PMD_DRV_LOG(ERR, "vlan is not specified.");
+ rte_free(mirr_rule);
+ return -EINVAL;
+ }
+ mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
+ break;
+ case ETH_MIRROR_VIRTUAL_POOL_UP:
+ case ETH_MIRROR_VIRTUAL_POOL_DOWN:
+ /* check if the specified pool bit is out of range */
+ if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
+ PMD_DRV_LOG(ERR, "pool mask is out of range.");
+ rte_free(mirr_rule);
+ return -EINVAL;
+ }
+ for (i = 0, j = 0; i < pf->vf_num; i++) {
+ if (mirror_conf->pool_mask & (1ULL << i)) {
+ mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
+ j++;
+ }
+ }
+ if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
+ /* add pf vsi to entries */
+ mirr_rule->entries[j] = pf->main_vsi_seid;
+ j++;
+ }
+ if (j == 0) {
+ PMD_DRV_LOG(ERR, "pool is not specified.");
+ rte_free(mirr_rule);
+ return -EINVAL;
+ }
+ /* egress and ingress in aq commands means from switch but not port */
+ mirr_rule->rule_type =
+ (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
+ I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
+ I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
+ break;
+ case ETH_MIRROR_UPLINK_PORT:
+ /* egress and ingress in aq commands means from switch but not port*/
+ mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
+ break;
+ case ETH_MIRROR_DOWNLINK_PORT:
+ mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
+ mirror_conf->rule_type);
+ rte_free(mirr_rule);
+ return -EINVAL;
+ }
+
+ /* If the dst_pool is equal to vf_num, consider it as PF */
+ if (mirror_conf->dst_pool == pf->vf_num)
+ dst_seid = pf->main_vsi_seid;
+ else
+ dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
+
+ ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
+ mirr_rule->rule_type, mirr_rule->entries,
+ j, &rule_id);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "failed to add mirror rule:"
+ " ret = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
+ rte_free(mirr_rule);
+ return -ENOSYS;
+ }
+
+ mirr_rule->index = sw_id;
+ mirr_rule->num_entries = j;
+ mirr_rule->id = rule_id;
+ mirr_rule->dst_vsi_seid = dst_seid;
+
+ if (parent)
+ TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
+ else
+ TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
+
+ pf->nb_mirror_rule++;
+ return 0;
+}
+
+/**
+ * i40e_mirror_rule_reset
+ * @dev: pointer to the device
+ * @sw_id: mirror rule's sw_id
+ *
+ * reset a mirror rule.
+ *
+ **/
+static int
+i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_mirror_rule *it, *mirr_rule = NULL;
+ uint16_t seid;
+ int ret;
+
+ PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
+
+ seid = pf->main_vsi->veb->seid;
+
+ TAILQ_FOREACH(it, &pf->mirror_list, rules) {
+ if (sw_id == it->index) {
+ mirr_rule = it;
+ break;
+ }
+ }
+ if (mirr_rule) {
+ ret = i40e_aq_del_mirror_rule(hw, seid,
+ mirr_rule->rule_type,
+ mirr_rule->entries,
+ mirr_rule->num_entries, mirr_rule->id);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "failed to remove mirror rule:"
+ " status = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
+ return -ENOSYS;
+ }
+ TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
+ rte_free(mirr_rule);
+ pf->nb_mirror_rule--;
+ } else {
+ PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
+ return -ENOENT;
+ }
+ return 0;
+}
+
+static uint64_t
+i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t systim_cycles;
+
+ systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
+ systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
+ << 32;
+
+ return systim_cycles;
+}
+
+static uint64_t
+i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t rx_tstamp;
+
+ rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
+ rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
+ << 32;
+
+ return rx_tstamp;
+}
+
+static uint64_t
+i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t tx_tstamp;
+
+ tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
+ tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
+ << 32;
+
+ return tx_tstamp;
+}
+
+static void
+i40e_start_timecounters(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_adapter *adapter =
+ (struct i40e_adapter *)dev->data->dev_private;
+ struct rte_eth_link link;
+ uint32_t tsync_inc_l;
+ uint32_t tsync_inc_h;
+
+ /* Get current link speed. */
+ memset(&link, 0, sizeof(link));
+ i40e_dev_link_update(dev, 1);
+ rte_i40e_dev_atomic_read_link_status(dev, &link);
+
+ switch (link.link_speed) {
+ case ETH_LINK_SPEED_40G:
+ tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
+ tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
+ break;
+ case ETH_LINK_SPEED_10G:
+ tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
+ tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
+ break;
+ case ETH_LINK_SPEED_1000:
+ tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
+ tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
+ break;
+ default:
+ tsync_inc_l = 0x0;
+ tsync_inc_h = 0x0;
+ }
+
+ /* Set the timesync increment value. */
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
+
+ memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
+ memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+ memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+
+ adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
+ adapter->systime_tc.cc_shift = 0;
+ adapter->systime_tc.nsec_mask = 0;
+
+ adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
+ adapter->rx_tstamp_tc.cc_shift = 0;
+ adapter->rx_tstamp_tc.nsec_mask = 0;
+
+ adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
+ adapter->tx_tstamp_tc.cc_shift = 0;
+ adapter->tx_tstamp_tc.nsec_mask = 0;
+}
+
+static int
+i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
+{
+ struct i40e_adapter *adapter =
+ (struct i40e_adapter *)dev->data->dev_private;
+
+ adapter->systime_tc.nsec += delta;
+ adapter->rx_tstamp_tc.nsec += delta;
+ adapter->tx_tstamp_tc.nsec += delta;
+
+ return 0;
+}
+
+static int
+i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
+{
+ uint64_t ns;
+ struct i40e_adapter *adapter =
+ (struct i40e_adapter *)dev->data->dev_private;
+
+ ns = rte_timespec_to_ns(ts);
+
+ /* Set the timecounters to a new value. */
+ adapter->systime_tc.nsec = ns;
+ adapter->rx_tstamp_tc.nsec = ns;
+ adapter->tx_tstamp_tc.nsec = ns;
+
+ return 0;
+}
+
+static int
+i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+ uint64_t ns, systime_cycles;
+ struct i40e_adapter *adapter =
+ (struct i40e_adapter *)dev->data->dev_private;
+
+ systime_cycles = i40e_read_systime_cyclecounter(dev);
+ ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
+ *ts = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+i40e_timesync_enable(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t tsync_ctl_l;
+ uint32_t tsync_ctl_h;
+
+ /* Stop the timesync system time. */
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
+ /* Reset the timesync system time value. */
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
+
+ i40e_start_timecounters(dev);
+
+ /* Clear timesync registers. */
+ I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
+ I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
+ I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
+ I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
+ I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
+ I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
+
+ /* Enable timestamping of PTP packets. */
+ tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
+ tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
+
+ tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
+ tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
+ tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
+
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
+
+ return 0;
+}
+
+static int
+i40e_timesync_disable(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t tsync_ctl_l;
+ uint32_t tsync_ctl_h;
+
+ /* Disable timestamping of transmitted PTP packets. */
+ tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
+ tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
+
+ tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
+ tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
+
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
+
+ /* Reset the timesync increment value. */
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
+ I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
+
+ return 0;
+}
+
+static int
+i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp, uint32_t flags)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_adapter *adapter =
+ (struct i40e_adapter *)dev->data->dev_private;
+
+ uint32_t sync_status;
+ uint32_t index = flags & 0x03;
+ uint64_t rx_tstamp_cycles;
+ uint64_t ns;
+
+ sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
+ if ((sync_status & (1 << index)) == 0)
+ return -EINVAL;
+
+ rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
+ ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
+ *timestamp = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_adapter *adapter =
+ (struct i40e_adapter *)dev->data->dev_private;
+
+ uint32_t sync_status;
+ uint64_t tx_tstamp_cycles;
+ uint64_t ns;
+
+ sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
+ if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
+ return -EINVAL;
+
+ tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
+ ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
+ *timestamp = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+/*
+ * i40e_parse_dcb_configure - parse dcb configure from user
+ * @dev: the device being configured
+ * @dcb_cfg: pointer of the result of parse
+ * @*tc_map: bit map of enabled traffic classes
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static int
+i40e_parse_dcb_configure(struct rte_eth_dev *dev,
+ struct i40e_dcbx_config *dcb_cfg,
+ uint8_t *tc_map)
+{
+ struct rte_eth_dcb_rx_conf *dcb_rx_conf;
+ uint8_t i, tc_bw, bw_lf;
+
+ memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
+
+ dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+ if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
+ PMD_INIT_LOG(ERR, "number of tc exceeds max.");
+ return -EINVAL;
+ }
+
+ /* assume each tc has the same bw */
+ tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
+ for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
+ dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
+ /* to ensure the sum of tcbw is equal to 100 */
+ bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
+ for (i = 0; i < bw_lf; i++)
+ dcb_cfg->etscfg.tcbwtable[i]++;
+
+ /* assume each tc has the same Transmission Selection Algorithm */
+ for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
+ dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
+
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
+ dcb_cfg->etscfg.prioritytable[i] =
+ dcb_rx_conf->dcb_tc[i];
+
+ /* FW needs one App to configure HW */
+ dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
+ dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
+ dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
+ dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
+
+ if (dcb_rx_conf->nb_tcs == 0)
+ *tc_map = 1; /* tc0 only */
+ else
+ *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
+
+ if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+ dcb_cfg->pfc.willing = 0;
+ dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+ dcb_cfg->pfc.pfcenable = *tc_map;
+ }
+ return 0;
+}
+
+
+static enum i40e_status_code
+i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
+ struct i40e_aqc_vsi_properties_data *info,
+ uint8_t enabled_tcmap)
+{
+ enum i40e_status_code ret;
+ int i, total_tc = 0;
+ uint16_t qpnum_per_tc, bsf, qp_idx;
+ struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
+
+ ret = validate_tcmap_parameter(vsi, enabled_tcmap);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (enabled_tcmap & (1 << i))
+ total_tc++;
+ }
+ if (total_tc == 0)
+ total_tc = 1;
+ vsi->enabled_tc = enabled_tcmap;
+
+ qpnum_per_tc = dev_data->nb_rx_queues / total_tc;
+ /* Number of queues per enabled TC */
+ if (qpnum_per_tc == 0) {
+ PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
+ return I40E_ERR_INVALID_QP_ID;
+ }
+ qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
+ I40E_MAX_Q_PER_TC);
+ bsf = rte_bsf32(qpnum_per_tc);
+
+ /**
+ * Configure TC and queue mapping parameters, for enabled TC,
+ * allocate qpnum_per_tc queues to this traffic. For disabled TC,
+ * default queue will serve it.
+ */
+ qp_idx = 0;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (vsi->enabled_tc & (1 << i)) {
+ info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
+ (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
+ qp_idx += qpnum_per_tc;
+ } else
+ info->tc_mapping[i] = 0;
+ }
+
+ /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
+ if (vsi->type == I40E_VSI_SRIOV) {
+ info->mapping_flags |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
+ for (i = 0; i < vsi->nb_qps; i++)
+ info->queue_mapping[i] =
+ rte_cpu_to_le_16(vsi->base_queue + i);
+ } else {
+ info->mapping_flags |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
+ info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
+ }
+ info->valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
+
+ return I40E_SUCCESS;
+}
+
+/*
+ * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
+ * @vsi: VSI to be configured
+ * @tc_map: enabled TC bitmap
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static enum i40e_status_code
+i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 tc_map)
+{
+ struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
+ struct i40e_vsi_context ctxt;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ enum i40e_status_code ret = I40E_SUCCESS;
+ int i;
+
+ /* Check if enabled_tc is same as existing or new TCs */
+ if (vsi->enabled_tc == tc_map)
+ return ret;
+
+ /* configure tc bandwidth */
+ memset(&bw_data, 0, sizeof(bw_data));
+ bw_data.tc_valid_bits = tc_map;
+ /* Enable ETS TCs with equal BW Share for now across all VSIs */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (tc_map & BIT_ULL(i))
+ bw_data.tc_bw_credits[i] = 1;
+ }
+ ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "AQ command Config VSI BW allocation"
+ " per TC failed = %d",
+ hw->aq.asq_last_status);
+ goto out;
+ }
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ vsi->info.qs_handle[i] = bw_data.qs_handles[i];
+
+ /* Update Queue Pairs Mapping for currently enabled UPs */
+ ctxt.seid = vsi->seid;
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.info = vsi->info;
+ i40e_get_cap(hw);
+ ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
+ if (ret)
+ goto out;
+
+ /* Update the VSI after updating the VSI queue-mapping information */
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to configure "
+ "TC queue mapping = %d",
+ hw->aq.asq_last_status);
+ goto out;
+ }
+ /* update the local VSI info with updated queue map */
+ (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
+ sizeof(vsi->info.tc_mapping));
+ (void)rte_memcpy(&vsi->info.queue_mapping,
+ &ctxt.info.queue_mapping,
+ sizeof(vsi->info.queue_mapping));
+ vsi->info.mapping_flags = ctxt.info.mapping_flags;
+ vsi->info.valid_sections = 0;
+
+ /* query and update current VSI BW information */
+ ret = i40e_vsi_get_bw_config(vsi);
+ if (ret) {
+ PMD_INIT_LOG(ERR,
+ "Failed updating vsi bw info, err %s aq_err %s",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ goto out;
+ }
+
+ vsi->enabled_tc = tc_map;
+
+out:
+ return ret;
+}
+
+/*
+ * i40e_dcb_hw_configure - program the dcb setting to hw
+ * @pf: pf the configuration is taken on
+ * @new_cfg: new configuration
+ * @tc_map: enabled TC bitmap
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static enum i40e_status_code
+i40e_dcb_hw_configure(struct i40e_pf *pf,
+ struct i40e_dcbx_config *new_cfg,
+ uint8_t tc_map)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
+ struct i40e_vsi *main_vsi = pf->main_vsi;
+ struct i40e_vsi_list *vsi_list;
+ enum i40e_status_code ret;
+ int i;
+ uint32_t val;
+
+ /* Use the FW API if FW > v4.4*/
+ if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
+ (hw->aq.fw_maj_ver >= 5))) {
+ PMD_INIT_LOG(ERR, "FW < v4.4, can not use FW LLDP API"
+ " to configure DCB");
+ return I40E_ERR_FIRMWARE_API_VERSION;
+ }
+
+ /* Check if need reconfiguration */
+ if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
+ PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
+ return I40E_SUCCESS;
+ }
+
+ /* Copy the new config to the current config */
+ *old_cfg = *new_cfg;
+ old_cfg->etsrec = old_cfg->etscfg;
+ ret = i40e_set_dcb_config(hw);
+ if (ret) {
+ PMD_INIT_LOG(ERR,
+ "Set DCB Config failed, err %s aq_err %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return ret;
+ }
+ /* set receive Arbiter to RR mode and ETS scheme by default */
+ for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
+ val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
+ val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK |
+ I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
+ I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
+ val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
+ I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
+ I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
+ val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
+ I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
+ val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
+ I40E_PRTDCB_RETSTCC_ETSTC_MASK;
+ I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
+ }
+ /* get local mib to check whether it is configured correctly */
+ /* IEEE mode */
+ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
+ /* Get Local DCB Config */
+ i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
+ &hw->local_dcbx_config);
+
+ /* Update each VSI */
+ i40e_vsi_config_tc(main_vsi, tc_map);
+ if (main_vsi->veb) {
+ TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
+ /* Beside main VSI, only enable default
+ * TC for other VSIs
+ */
+ ret = i40e_vsi_config_tc(vsi_list->vsi,
+ I40E_DEFAULT_TCMAP);
+ if (ret)
+ PMD_INIT_LOG(WARNING,
+ "Failed configuring TC for VSI seid=%d\n",
+ vsi_list->vsi->seid);
+ /* continue */
+ }
+ }
+ return I40E_SUCCESS;
+}
+
+/*
+ * i40e_dcb_init_configure - initial dcb config
+ * @dev: device being configured
+ * @sw_dcb: indicate whether dcb is sw configured or hw offload
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static int
+i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret = 0;
+
+ if ((pf->flags & I40E_FLAG_DCB) == 0) {
+ PMD_INIT_LOG(ERR, "HW doesn't support DCB");
+ return -ENOTSUP;
+ }
+
+ /* DCB initialization:
+ * Update DCB configuration from the Firmware and configure
+ * LLDP MIB change event.
+ */
+ if (sw_dcb == TRUE) {
+ ret = i40e_aq_stop_lldp(hw, TRUE, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_INIT_LOG(DEBUG, "Failed to stop lldp");
+
+ ret = i40e_init_dcb(hw);
+ /* if sw_dcb, lldp agent is stopped, the return from
+ * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
+ * adminq status.
+ */
+ if (ret != I40E_SUCCESS &&
+ hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
+ memset(&hw->local_dcbx_config, 0,
+ sizeof(struct i40e_dcbx_config));
+ /* set dcb default configuration */
+ hw->local_dcbx_config.etscfg.willing = 0;
+ hw->local_dcbx_config.etscfg.maxtcs = 0;
+ hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
+ hw->local_dcbx_config.etscfg.tsatable[0] =
+ I40E_IEEE_TSA_ETS;
+ hw->local_dcbx_config.etsrec =
+ hw->local_dcbx_config.etscfg;
+ hw->local_dcbx_config.pfc.willing = 0;
+ hw->local_dcbx_config.pfc.pfccap =
+ I40E_MAX_TRAFFIC_CLASS;
+ /* FW needs one App to configure HW */
+ hw->local_dcbx_config.numapps = 1;
+ hw->local_dcbx_config.app[0].selector =
+ I40E_APP_SEL_ETHTYPE;
+ hw->local_dcbx_config.app[0].priority = 3;
+ hw->local_dcbx_config.app[0].protocolid =
+ I40E_APP_PROTOID_FCOE;
+ ret = i40e_set_dcb_config(hw);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "default dcb config fails."
+ " err = %d, aq_err = %d.", ret,
+ hw->aq.asq_last_status);
+ return -ENOSYS;
+ }
+ } else {
+ PMD_INIT_LOG(ERR, "DCBX configuration failed, err = %d,"
+ " aq_err = %d.", ret,
+ hw->aq.asq_last_status);
+ return -ENOTSUP;
+ }
+ } else {
+ ret = i40e_aq_start_lldp(hw, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_INIT_LOG(DEBUG, "Failed to start lldp");
+
+ ret = i40e_init_dcb(hw);
+ if (!ret) {
+ if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
+ PMD_INIT_LOG(ERR, "HW doesn't support"
+ " DCBX offload.");
+ return -ENOTSUP;
+ }
+ } else {
+ PMD_INIT_LOG(ERR, "DCBX configuration failed, err = %d,"
+ " aq_err = %d.", ret,
+ hw->aq.asq_last_status);
+ return -ENOTSUP;
+ }
+ }
+ return 0;
+}
+
+/*
+ * i40e_dcb_setup - setup dcb related config
+ * @dev: device being configured
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static int
+i40e_dcb_setup(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_dcbx_config dcb_cfg;
+ uint8_t tc_map = 0;
+ int ret = 0;
+
+ if ((pf->flags & I40E_FLAG_DCB) == 0) {
+ PMD_INIT_LOG(ERR, "HW doesn't support DCB");
+ return -ENOTSUP;
+ }
+
+ if (pf->vf_num != 0 ||
+ (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
+ PMD_INIT_LOG(DEBUG, " DCB only works on main vsi.");
+
+ ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "invalid dcb config");
+ return -EINVAL;
+ }
+ ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "dcb sw configure fails");
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+
+static int
+i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
+ struct rte_eth_dcb_info *dcb_info)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
+ uint16_t bsf, tc_mapping;
+ int i;
+
+ if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+ dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
+ else
+ dcb_info->nb_tcs = 1;
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
+ dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
+ for (i = 0; i < dcb_info->nb_tcs; i++)
+ dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (vsi->enabled_tc & (1 << i)) {
+ tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
+ /* only main vsi support multi TCs */
+ dcb_info->tc_queue.tc_rxq[0][i].base =
+ (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
+ dcb_info->tc_queue.tc_txq[0][i].base =
+ dcb_info->tc_queue.tc_rxq[0][i].base;
+ bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
+ dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 1 << bsf;
+ dcb_info->tc_queue.tc_txq[0][i].nb_queue =
+ dcb_info->tc_queue.tc_rxq[0][i].nb_queue;
+ }
+ }
+
+ return 0;
+}
+
+static int
+i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t interval =
+ i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
+ uint16_t msix_intr;
+
+ msix_intr = intr_handle->intr_vec[queue_id];
+ if (msix_intr == I40E_MISC_VEC_ID)
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+ (interval <<
+ I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
+ else
+ I40E_WRITE_REG(hw,
+ I40E_PFINT_DYN_CTLN(msix_intr -
+ I40E_RX_VEC_START),
+ I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+ (interval <<
+ I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
+
+ I40E_WRITE_FLUSH(hw);
+ rte_intr_enable(&dev->pci_dev->intr_handle);
+
+ return 0;
+}
+
+static int
+i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t msix_intr;
+
+ msix_intr = intr_handle->intr_vec[queue_id];
+ if (msix_intr == I40E_MISC_VEC_ID)
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
+ else
+ I40E_WRITE_REG(hw,
+ I40E_PFINT_DYN_CTLN(msix_intr -
+ I40E_RX_VEC_START),
+ 0);
+ I40E_WRITE_FLUSH(hw);
+
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e_ethdev.h b/src/dpdk22/drivers/net/i40e/i40e_ethdev.h
index f913ea96..1f9792b3 100755..100644
--- a/src/dpdk_lib18/librte_pmd_i40e/i40e_ethdev.h
+++ b/src/dpdk22/drivers/net/i40e/i40e_ethdev.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -35,6 +35,9 @@
#define _I40E_ETHDEV_H_
#include <rte_eth_ctrl.h>
+#include <rte_time.h>
+
+#define I40E_VLAN_TAG_SIZE 4
#define I40E_AQ_LEN 32
#define I40E_AQ_BUF_SZ 4096
@@ -89,6 +92,11 @@
#define I40E_48_BIT_WIDTH (CHAR_BIT * 6)
#define I40E_48_BIT_MASK RTE_LEN2MASK(I40E_48_BIT_WIDTH, uint64_t)
+/* Linux PF host with virtchnl version 1.1 */
+#define PF_IS_V11(vf) \
+ (((vf)->version_major == I40E_VIRTCHNL_VERSION_MAJOR) && \
+ ((vf)->version_minor == 1))
+
/* index flex payload per layer */
enum i40e_flxpld_layer_idx {
I40E_FLXPLD_L2_IDX = 0,
@@ -100,6 +108,7 @@ enum i40e_flxpld_layer_idx {
#define I40E_FDIR_BITMASK_NUM_WORD 2 /* max number of bitmask words */
#define I40E_FDIR_MAX_FLEXWORD_NUM 8 /* max number of flexpayload words */
#define I40E_FDIR_MAX_FLEX_LEN 16 /* len in bytes of flex payload */
+#define I40E_INSET_MASK_NUM_REG 2 /* number of input set mask registers */
/* i40e flags */
#define I40E_FLAG_RSS (1ULL << 0)
@@ -110,6 +119,7 @@ enum i40e_flxpld_layer_idx {
#define I40E_FLAG_HEADER_SPLIT_ENABLED (1ULL << 5)
#define I40E_FLAG_FDIR (1ULL << 6)
#define I40E_FLAG_VXLAN (1ULL << 7)
+#define I40E_FLAG_RSS_AQ_CAPABLE (1ULL << 8)
#define I40E_FLAG_ALL (I40E_FLAG_RSS | \
I40E_FLAG_DCB | \
I40E_FLAG_VMDQ | \
@@ -117,19 +127,20 @@ enum i40e_flxpld_layer_idx {
I40E_FLAG_HEADER_SPLIT_DISABLED | \
I40E_FLAG_HEADER_SPLIT_ENABLED | \
I40E_FLAG_FDIR | \
- I40E_FLAG_VXLAN)
+ I40E_FLAG_VXLAN | \
+ I40E_FLAG_RSS_AQ_CAPABLE)
#define I40E_RSS_OFFLOAD_ALL ( \
- ETH_RSS_NONF_IPV4_UDP | \
- ETH_RSS_NONF_IPV4_TCP | \
- ETH_RSS_NONF_IPV4_SCTP | \
- ETH_RSS_NONF_IPV4_OTHER | \
ETH_RSS_FRAG_IPV4 | \
- ETH_RSS_NONF_IPV6_UDP | \
- ETH_RSS_NONF_IPV6_TCP | \
- ETH_RSS_NONF_IPV6_SCTP | \
- ETH_RSS_NONF_IPV6_OTHER | \
+ ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_NONFRAG_IPV4_UDP | \
+ ETH_RSS_NONFRAG_IPV4_SCTP | \
+ ETH_RSS_NONFRAG_IPV4_OTHER | \
ETH_RSS_FRAG_IPV6 | \
+ ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_NONFRAG_IPV6_UDP | \
+ ETH_RSS_NONFRAG_IPV6_SCTP | \
+ ETH_RSS_NONFRAG_IPV6_OTHER | \
ETH_RSS_L2_PAYLOAD)
/* All bits of RSS hash enable */
@@ -149,6 +160,14 @@ enum i40e_flxpld_layer_idx {
(1ULL << I40E_FILTER_PCTYPE_FCOE_OTHER) | \
(1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+#define I40E_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
+#define I40E_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
+
+/* Default queue interrupt throttling time in microseconds */
+#define I40E_ITR_INDEX_DEFAULT 0
+#define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
+#define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
+
struct i40e_adapter;
/**
@@ -197,6 +216,19 @@ struct i40e_macvlan_filter {
uint16_t vlan_id;
};
+/* Bandwidth limit information */
+struct i40e_bw_info {
+ uint16_t bw_limit; /* BW Limit (0 = disabled) */
+ uint8_t bw_max; /* Max BW limit if enabled */
+
+ /* Relative VSI credits within same TC with respect to other VSIs */
+ uint8_t bw_ets_share_credits[I40E_MAX_TRAFFIC_CLASS];
+ /* Bandwidth limit per TC */
+ uint8_t bw_ets_credits[I40E_MAX_TRAFFIC_CLASS];
+ /* Max bandwidth limit per TC */
+ uint8_t bw_ets_max[I40E_MAX_TRAFFIC_CLASS];
+};
+
/*
* Structure that defines a VSI, associated with a adapter.
*/
@@ -233,6 +265,7 @@ struct i40e_vsi {
uint16_t seid; /* The seid of VSI itself */
uint16_t uplink_seid; /* The uplink seid of this VSI */
uint16_t nb_qps; /* Number of queue pairs VSI can occupy */
+ uint16_t nb_used_qps; /* Number of queue pairs VSI uses */
uint16_t max_macaddrs; /* Maximum number of MAC addresses */
uint16_t base_queue; /* The first queue index of this VSI */
/*
@@ -241,7 +274,9 @@ struct i40e_vsi {
*/
uint16_t vsi_id;
uint16_t msix_intr; /* The MSIX interrupt binds to VSI */
+ uint16_t nb_msix; /* The max number of msix vector */
uint8_t enabled_tc; /* The traffic class enabled */
+ struct i40e_bw_info bw_info; /* VSI bandwidth information */
};
struct pool_entry {
@@ -280,6 +315,17 @@ struct i40e_pf_vf {
};
/*
+ * Structure to store private data for flow control.
+ */
+struct i40e_fc_conf {
+ uint16_t pause_time; /* Flow control pause timer */
+ /* FC high water 0-7 for pfc and 8 for lfc unit:kilobytes */
+ uint32_t high_water[I40E_MAX_TRAFFIC_CLASS + 1];
+ /* FC low water 0-7 for pfc and 8 for lfc unit:kilobytes */
+ uint32_t low_water[I40E_MAX_TRAFFIC_CLASS + 1];
+};
+
+/*
* Structure to store private data for VMDQ instance
*/
struct i40e_vmdq_info {
@@ -323,6 +369,27 @@ struct i40e_fdir_info {
struct i40e_fdir_flex_mask flex_mask[I40E_FILTER_PCTYPE_MAX];
};
+#define I40E_MIRROR_MAX_ENTRIES_PER_RULE 64
+#define I40E_MAX_MIRROR_RULES 64
+/*
+ * Mirror rule structure
+ */
+struct i40e_mirror_rule {
+ TAILQ_ENTRY(i40e_mirror_rule) rules;
+ uint8_t rule_type;
+ uint16_t index; /* the sw index of mirror rule */
+ uint16_t id; /* the rule id assigned by firmware */
+ uint16_t dst_vsi_seid; /* destination vsi for this mirror rule. */
+ uint16_t num_entries;
+ /* the info stores depend on the rule type.
+ If type is I40E_MIRROR_TYPE_VLAN, vlan ids are stored here.
+ If type is I40E_MIRROR_TYPE_VPORT_*, vsi's seid are stored.
+ */
+ uint16_t entries[I40E_MIRROR_MAX_ENTRIES_PER_RULE];
+};
+
+TAILQ_HEAD(i40e_mirror_rule_list, i40e_mirror_rule);
+
/*
* Structure to store private data specific for PF instance.
*/
@@ -347,10 +414,18 @@ struct i40e_pf {
uint16_t vf_num;
/* Each of below queue pairs should be power of 2 since it's the
precondition after TC configuration applied */
+ uint16_t lan_nb_qp_max;
uint16_t lan_nb_qps; /* The number of queue pairs of LAN */
+ uint16_t lan_qp_offset;
+ uint16_t vmdq_nb_qp_max;
uint16_t vmdq_nb_qps; /* The number of queue pairs of VMDq */
+ uint16_t vmdq_qp_offset;
+ uint16_t vf_nb_qp_max;
uint16_t vf_nb_qps; /* The number of queue pairs of VF */
+ uint16_t vf_qp_offset;
uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */
+ uint16_t fdir_qp_offset;
+
uint16_t hash_lut_size; /* The size of hash lookup table */
/* store VXLAN UDP ports */
uint16_t vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
@@ -362,6 +437,9 @@ struct i40e_pf {
struct i40e_vmdq_info *vmdq;
struct i40e_fdir_info fdir; /* flow director info */
+ struct i40e_fc_conf fc_conf; /* Flow control conf */
+ struct i40e_mirror_rule_list mirror_list;
+ uint16_t nb_mirror_rule; /* The number of mirror rules */
};
enum pending_msg {
@@ -422,6 +500,7 @@ struct i40e_vf {
struct i40e_virtchnl_vf_resource *vf_res; /* All VSIs */
struct i40e_virtchnl_vsi_resource *vsi_res; /* LAN VSI */
struct i40e_vsi vsi;
+ uint64_t flags;
};
/*
@@ -437,6 +516,17 @@ struct i40e_adapter {
struct i40e_pf pf;
struct i40e_vf vf;
};
+
+ /* For vector PMD */
+ bool rx_bulk_alloc_allowed;
+ bool rx_vec_allowed;
+ bool tx_simple_allowed;
+ bool tx_vec_allowed;
+
+ /* For PTP */
+ struct rte_timecounter systime_tc;
+ struct rte_timecounter rx_tstamp_tc;
+ struct rte_timecounter tx_tstamp_tc;
};
int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
@@ -471,13 +561,22 @@ const struct rte_memzone *i40e_memzone_reserve(const char *name,
int socket_id);
int i40e_fdir_configure(struct rte_eth_dev *dev);
void i40e_fdir_teardown(struct i40e_pf *pf);
-enum i40e_filter_pctype i40e_flowtype_to_pctype(
- enum rte_eth_flow_type flow_type);
-enum rte_eth_flow_type i40e_pctype_to_flowtype(
- enum i40e_filter_pctype pctype);
+enum i40e_filter_pctype i40e_flowtype_to_pctype(uint16_t flow_type);
+uint16_t i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype);
int i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
+int i40e_select_filter_input_set(struct i40e_hw *hw,
+ struct rte_eth_input_set_conf *conf,
+ enum rte_filter_type filter);
+int i40e_filter_inset_select(struct i40e_hw *hw,
+ struct rte_eth_input_set_conf *conf,
+ enum rte_filter_type filter);
+
+void i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo);
+void i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo);
/* I40E_DEV_PRIVATE_TO */
#define I40E_DEV_PRIVATE_TO_PF(adapter) \
@@ -516,6 +615,8 @@ i40e_get_vsi_from_adapter(struct i40e_adapter *adapter)
(&(((struct i40e_vsi *)vsi)->adapter->hw))
#define I40E_VSI_TO_PF(vsi) \
(&(((struct i40e_vsi *)vsi)->adapter->pf))
+#define I40E_VSI_TO_VF(vsi) \
+ (&(((struct i40e_vsi *)vsi)->adapter->vf))
#define I40E_VSI_TO_DEV_DATA(vsi) \
(((struct i40e_vsi *)vsi)->adapter->pf.dev_data)
#define I40E_VSI_TO_ETH_DEV(vsi) \
@@ -540,28 +641,48 @@ i40e_init_adminq_parameter(struct i40e_hw *hw)
hw->aq.asq_buf_size = I40E_AQ_BUF_SZ;
}
-#define I40E_VALID_FLOW_TYPE(flow_type) \
- ((flow_type) == RTE_ETH_FLOW_TYPE_UDPV4 || \
- (flow_type) == RTE_ETH_FLOW_TYPE_TCPV4 || \
- (flow_type) == RTE_ETH_FLOW_TYPE_SCTPV4 || \
- (flow_type) == RTE_ETH_FLOW_TYPE_IPV4_OTHER || \
- (flow_type) == RTE_ETH_FLOW_TYPE_FRAG_IPV4 || \
- (flow_type) == RTE_ETH_FLOW_TYPE_UDPV6 || \
- (flow_type) == RTE_ETH_FLOW_TYPE_TCPV6 || \
- (flow_type) == RTE_ETH_FLOW_TYPE_SCTPV6 || \
- (flow_type) == RTE_ETH_FLOW_TYPE_IPV6_OTHER || \
- (flow_type) == RTE_ETH_FLOW_TYPE_FRAG_IPV6)
+static inline int
+i40e_align_floor(int n)
+{
+ if (n == 0)
+ return 0;
+ return 1 << (sizeof(n) * CHAR_BIT - 1 - __builtin_clz(n));
+}
+
+static inline uint16_t
+i40e_calc_itr_interval(int16_t interval)
+{
+ if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX)
+ interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
+
+ /* Convert to hardware count, as writing each 1 represents 2 us */
+ return (interval / 2);
+}
+
+#define I40E_VALID_FLOW(flow_type) \
+ ((flow_type) == RTE_ETH_FLOW_FRAG_IPV4 || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_TCP || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_UDP || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_OTHER || \
+ (flow_type) == RTE_ETH_FLOW_FRAG_IPV6 || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_TCP || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_UDP || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_OTHER || \
+ (flow_type) == RTE_ETH_FLOW_L2_PAYLOAD)
#define I40E_VALID_PCTYPE(pctype) \
- ((pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_UDP || \
+ ((pctype) == I40E_FILTER_PCTYPE_FRAG_IPV4 || \
(pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_TCP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_UDP || \
(pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP || \
(pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER || \
- (pctype) == I40E_FILTER_PCTYPE_FRAG_IPV4 || \
+ (pctype) == I40E_FILTER_PCTYPE_FRAG_IPV6 || \
(pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_UDP || \
(pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_TCP || \
(pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP || \
(pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER || \
- (pctype) == I40E_FILTER_PCTYPE_FRAG_IPV6)
+ (pctype) == I40E_FILTER_PCTYPE_L2_PAYLOAD)
#endif /* _I40E_ETHDEV_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e_ethdev_vf.c b/src/dpdk22/drivers/net/i40e/i40e_ethdev_vf.c
index fe46cf12..14d2a50f 100755..100644
--- a/src/dpdk_lib18/librte_pmd_i40e/i40e_ethdev_vf.c
+++ b/src/dpdk22/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -51,7 +51,6 @@
#include <rte_branch_prediction.h>
#include <rte_memory.h>
#include <rte_memzone.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_alarm.h>
#include <rte_ether.h>
@@ -61,19 +60,22 @@
#include <rte_dev.h>
#include "i40e_logs.h"
-#include "i40e/i40e_prototype.h"
-#include "i40e/i40e_adminq_cmd.h"
-#include "i40e/i40e_type.h"
+#include "base/i40e_prototype.h"
+#include "base/i40e_adminq_cmd.h"
+#include "base/i40e_type.h"
#include "i40e_rxtx.h"
#include "i40e_ethdev.h"
#include "i40e_pf.h"
-#define I40EVF_VSI_DEFAULT_MSIX_INTR 1
+#define I40EVF_VSI_DEFAULT_MSIX_INTR 1
+#define I40EVF_VSI_DEFAULT_MSIX_INTR_LNX 0
/* busy wait delay in msec */
#define I40EVF_BUSY_WAIT_DELAY 10
#define I40EVF_BUSY_WAIT_COUNT 50
#define MAX_RESET_WAIT_CNT 20
+/*ITR index for NOITR*/
+#define I40E_QINT_RQCTL_MSIX_INDX_NOITR 3
struct i40evf_arq_msg_info {
enum i40e_virtchnl_ops ops;
@@ -113,6 +115,9 @@ static int i40evf_dev_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete);
static void i40evf_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
+static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstats *xstats, unsigned n);
+static void i40evf_dev_xstats_reset(struct rte_eth_dev *dev);
static int i40evf_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id, int on);
static void i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
@@ -145,11 +150,39 @@ static int i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf);
static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf);
+static int
+i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
+static int
+i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
/* Default hash key buffer for RSS */
static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1];
-static struct eth_dev_ops i40evf_eth_dev_ops = {
+struct rte_i40evf_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned offset;
+};
+
+static const struct rte_i40evf_xstats_name_off rte_i40evf_stats_strings[] = {
+ {"rx_bytes", offsetof(struct i40e_eth_stats, rx_bytes)},
+ {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
+ {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
+ {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
+ {"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)},
+ {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
+ rx_unknown_protocol)},
+ {"tx_bytes", offsetof(struct i40e_eth_stats, tx_bytes)},
+ {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
+ {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
+ {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
+ {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
+ {"tx_error_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
+};
+
+#define I40EVF_NB_XSTATS (sizeof(rte_i40evf_stats_strings) / \
+ sizeof(rte_i40evf_stats_strings[0]))
+
+static const struct eth_dev_ops i40evf_eth_dev_ops = {
.dev_configure = i40evf_dev_configure,
.dev_start = i40evf_dev_start,
.dev_stop = i40evf_dev_stop,
@@ -159,6 +192,8 @@ static struct eth_dev_ops i40evf_eth_dev_ops = {
.allmulticast_disable = i40evf_dev_allmulticast_disable,
.link_update = i40evf_dev_link_update,
.stats_get = i40evf_dev_stats_get,
+ .xstats_get = i40evf_dev_xstats_get,
+ .xstats_reset = i40evf_dev_xstats_reset,
.dev_close = i40evf_dev_close,
.dev_infos_get = i40evf_dev_info_get,
.vlan_filter_set = i40evf_vlan_filter_set,
@@ -170,6 +205,9 @@ static struct eth_dev_ops i40evf_eth_dev_ops = {
.tx_queue_stop = i40evf_dev_tx_queue_stop,
.rx_queue_setup = i40e_dev_rx_queue_setup,
.rx_queue_release = i40e_dev_rx_queue_release,
+ .rx_queue_intr_enable = i40evf_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = i40evf_dev_rx_queue_intr_disable,
+ .rx_descriptor_done = i40e_dev_rx_descriptor_done,
.tx_queue_setup = i40e_dev_tx_queue_setup,
.tx_queue_release = i40e_dev_tx_queue_release,
.reta_update = i40evf_dev_rss_reta_update,
@@ -299,8 +337,8 @@ i40evf_wait_cmd_done(struct rte_eth_dev *dev,
int i = 0;
enum i40evf_aq_result ret;
-#define MAX_TRY_TIMES 10
-#define ASQ_DELAY_MS 50
+#define MAX_TRY_TIMES 20
+#define ASQ_DELAY_MS 100
do {
/* Delay some time first */
rte_delay_ms(ASQ_DELAY_MS);
@@ -362,6 +400,7 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
args->in_args, args->in_args_size, NULL);
if (err) {
PMD_DRV_LOG(ERR, "fail to send cmd %d", args->ops);
+ _clear_cmd(vf);
return err;
}
@@ -369,8 +408,10 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
/* read message and it's expected one */
if (!err && args->ops == info.ops)
_clear_cmd(vf);
- else if (err)
+ else if (err) {
PMD_DRV_LOG(ERR, "Failed to read message from AdminQ");
+ _clear_cmd(vf);
+ }
else if (args->ops != info.ops)
PMD_DRV_LOG(ERR, "command mismatch, expect %u, get %u",
args->ops, info.ops);
@@ -410,7 +451,7 @@ i40evf_check_api_version(struct rte_eth_dev *dev)
if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
PMD_DRV_LOG(INFO, "Peer is DPDK PF host");
else if ((vf->version_major == I40E_VIRTCHNL_VERSION_MAJOR) &&
- (vf->version_minor == I40E_VIRTCHNL_VERSION_MINOR))
+ (vf->version_minor <= I40E_VIRTCHNL_VERSION_MINOR))
PMD_DRV_LOG(INFO, "Peer is Linux PF host");
else {
PMD_INIT_LOG(ERR, "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
@@ -430,14 +471,23 @@ i40evf_get_vf_resource(struct rte_eth_dev *dev)
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
int err;
struct vf_cmd_info args;
- uint32_t len;
+ uint32_t caps, len;
args.ops = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
- args.in_args = NULL;
- args.in_args_size = 0;
args.out_buffer = cmd_result_buffer;
args.out_size = I40E_AQ_BUF_SZ;
-
+ if (PF_IS_V11(vf)) {
+ caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ I40E_VIRTCHNL_VF_OFFLOAD_VLAN |
+ I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+ args.in_args = (uint8_t *)&caps;
+ args.in_args_size = sizeof(caps);
+ } else {
+ args.in_args = NULL;
+ args.in_args_size = 0;
+ }
err = i40evf_execute_vf_cmd(dev, &args);
if (err) {
@@ -572,13 +622,11 @@ i40evf_fill_virtchnl_vsi_rxq_info(struct i40e_virtchnl_rxq_info *rxq_info,
rxq_info->queue_id = queue_id;
rxq_info->max_pkt_size = max_pkt_size;
if (queue_id < nb_rxq) {
- struct rte_pktmbuf_pool_private *mbp_priv;
-
rxq_info->ring_len = rxq->nb_rx_desc;
rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
- mbp_priv = rte_mempool_get_priv(rxq->mp);
rxq_info->databuffer_size =
- mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+ (rte_pktmbuf_data_room_size(rxq->mp) -
+ RTE_PKTMBUF_HEADROOM);
}
}
@@ -700,19 +748,33 @@ i40evf_config_irq_map(struct rte_eth_dev *dev)
uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_irq_map_info) + \
sizeof(struct i40e_virtchnl_vector_map)];
struct i40e_virtchnl_irq_map_info *map_info;
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ uint32_t vector_id;
int i, err;
+
+ if (rte_intr_allow_others(intr_handle)) {
+ if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
+ vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR;
+ else
+ vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR_LNX;
+ } else {
+ vector_id = I40E_MISC_VEC_ID;
+ }
+
map_info = (struct i40e_virtchnl_irq_map_info *)cmd_buffer;
map_info->num_vectors = 1;
- map_info->vecmap[0].rxitr_idx = RTE_LIBRTE_I40E_ITR_INTERVAL / 2;
- map_info->vecmap[0].txitr_idx = RTE_LIBRTE_I40E_ITR_INTERVAL / 2;
+ map_info->vecmap[0].rxitr_idx = I40E_QINT_RQCTL_MSIX_INDX_NOITR;
map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id;
/* Alway use default dynamic MSIX interrupt */
- map_info->vecmap[0].vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR;
+ map_info->vecmap[0].vector_id = vector_id;
/* Don't map any tx queue */
map_info->vecmap[0].txq_map = 0;
map_info->vecmap[0].rxq_map = 0;
- for (i = 0; i < dev->data->nb_rx_queues; i++)
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
map_info->vecmap[0].rxq_map |= 1 << i;
+ if (rte_intr_dp_is_en(intr_handle))
+ intr_handle->intr_vec[i] = vector_id;
+ }
args.ops = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
args.in_args = (u8 *)cmd_buffer;
@@ -797,7 +859,7 @@ i40evf_stop_queues(struct rte_eth_dev *dev)
/* Stop TX queues first */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
if (i40evf_dev_tx_queue_stop(dev, i) != 0) {
- PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
+ PMD_DRV_LOG(ERR, "Fail to stop queue %u", i);
return -1;
}
}
@@ -805,7 +867,7 @@ i40evf_stop_queues(struct rte_eth_dev *dev)
/* Then stop RX queues */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
if (i40evf_dev_rx_queue_stop(dev, i) != 0) {
- PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
+ PMD_DRV_LOG(ERR, "Fail to stop queue %u", i);
return -1;
}
}
@@ -888,11 +950,10 @@ i40evf_del_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
}
static int
-i40evf_get_statics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+i40evf_update_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_virtchnl_queue_select q_stats;
- struct i40e_eth_stats *pstats;
int err;
struct vf_cmd_info args;
@@ -907,9 +968,23 @@ i40evf_get_statics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
err = i40evf_execute_vf_cmd(dev, &args);
if (err) {
PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
+ *pstats = NULL;
return err;
}
- pstats = (struct i40e_eth_stats *)args.out_buffer;
+ *pstats = (struct i40e_eth_stats *)args.out_buffer;
+ return 0;
+}
+
+static int
+i40evf_get_statics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ int ret;
+ struct i40e_eth_stats *pstats = NULL;
+
+ ret = i40evf_update_stats(dev, &pstats);
+ if (ret != 0)
+ return 0;
+
stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
pstats->rx_broadcast;
stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
@@ -922,6 +997,47 @@ i40evf_get_statics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
return 0;
}
+static void
+i40evf_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_eth_stats *pstats = NULL;
+
+ /* read stat values to clear hardware registers */
+ i40evf_update_stats(dev, &pstats);
+
+ /* set stats offset base on current values */
+ vf->vsi.eth_stats_offset = vf->vsi.eth_stats;
+}
+
+static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstats *xstats, unsigned n)
+{
+ int ret;
+ unsigned i;
+ struct i40e_eth_stats *pstats = NULL;
+
+ if (n < I40EVF_NB_XSTATS)
+ return I40EVF_NB_XSTATS;
+
+ ret = i40evf_update_stats(dev, &pstats);
+ if (ret != 0)
+ return 0;
+
+ if (!xstats)
+ return 0;
+
+ /* loop over xstats array and values from pstats */
+ for (i = 0; i < I40EVF_NB_XSTATS; i++) {
+ snprintf(xstats[i].name, sizeof(xstats[i].name),
+ "%s", rte_i40evf_stats_strings[i].name);
+ xstats[i].value = *(uint64_t *)(((char *)pstats) +
+ rte_i40evf_stats_strings[i].offset);
+ }
+
+ return I40EVF_NB_XSTATS;
+}
+
static int
i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
{
@@ -1000,7 +1116,7 @@ i40evf_get_link_status(struct rte_eth_dev *dev, struct rte_eth_link *link)
return 0;
}
-static struct rte_pci_id pci_id_i40evf_map[] = {
+static const struct rte_pci_id pci_id_i40evf_map[] = {
#define RTE_PCI_DEV_ID_DECL_I40EVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
#include "rte_pci_dev_ids.h"
{ .vendor_id = 0, /* sentinel */ },
@@ -1126,9 +1242,12 @@ i40evf_init_vf(struct rte_eth_dev *dev)
goto err_alloc;
}
+ if (hw->mac.type == I40E_MAC_X722_VF)
+ vf->flags = I40E_FLAG_RSS_AQ_CAPABLE;
vf->vsi.vsi_id = vf->vsi_res->vsi_id;
vf->vsi.type = vf->vsi_res->vsi_type;
vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs;
+ vf->vsi.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
/* check mac addr, if it's not valid, genrate one */
if (I40E_SUCCESS != i40e_validate_mac_addr(\
@@ -1149,8 +1268,23 @@ err:
}
static int
-i40evf_dev_init(__rte_unused struct eth_driver *eth_drv,
- struct rte_eth_dev *eth_dev)
+i40evf_uninit_vf(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (hw->adapter_stopped == 0)
+ i40evf_dev_close(dev);
+ rte_free(vf->vf_res);
+ vf->vf_res = NULL;
+
+ return 0;
+}
+
+static int
+i40evf_dev_init(struct rte_eth_dev *eth_dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(\
eth_dev->data->dev_private);
@@ -1167,11 +1301,13 @@ i40evf_dev_init(__rte_unused struct eth_driver *eth_drv,
* has already done this work.
*/
if (rte_eal_process_type() != RTE_PROC_PRIMARY){
- if (eth_dev->data->scattered_rx)
- eth_dev->rx_pkt_burst = i40e_recv_scattered_pkts;
+ i40e_set_rx_function(eth_dev);
+ i40e_set_tx_function(eth_dev);
return 0;
}
+ rte_eth_copy_pci_info(eth_dev, eth_dev->pci_dev);
+
hw->vendor_id = eth_dev->pci_dev->id.vendor_id;
hw->device_id = eth_dev->pci_dev->id.device_id;
hw->subsystem_vendor_id = eth_dev->pci_dev->id.subsystem_vendor_id;
@@ -1179,6 +1315,7 @@ i40evf_dev_init(__rte_unused struct eth_driver *eth_drv,
hw->bus.device = eth_dev->pci_dev->addr.devid;
hw->bus.func = eth_dev->pci_dev->addr.function;
hw->hw_addr = (void *)eth_dev->pci_dev->mem_resource[0].addr;
+ hw->adapter_stopped = 0;
if(i40evf_init_vf(eth_dev) != 0) {
PMD_INIT_LOG(ERR, "Init vf failed");
@@ -1199,17 +1336,40 @@ i40evf_dev_init(__rte_unused struct eth_driver *eth_drv,
return 0;
}
+static int
+i40evf_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ if (i40evf_uninit_vf(eth_dev) != 0) {
+ PMD_INIT_LOG(ERR, "i40evf_uninit_vf failed");
+ return -1;
+ }
+
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+
+ return 0;
+}
/*
* virtual function driver struct
*/
static struct eth_driver rte_i40evf_pmd = {
- {
+ .pci_drv = {
.name = "rte_i40evf_pmd",
.id_table = pci_id_i40evf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
},
.eth_dev_init = i40evf_dev_init,
- .dev_private_size = sizeof(struct i40e_vf),
+ .eth_dev_uninit = i40evf_dev_uninit,
+ .dev_private_size = sizeof(struct i40e_adapter),
};
/*
@@ -1238,6 +1398,17 @@ PMD_REGISTER_DRIVER(rte_i40evf_driver);
static int
i40evf_dev_configure(struct rte_eth_dev *dev)
{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ /* Initialize to TRUE. If any of Rx queues doesn't meet the bulk
+ * allocation or vector Rx preconditions we will reset it.
+ */
+ ad->rx_bulk_alloc_allowed = true;
+ ad->rx_vec_allowed = true;
+ ad->tx_simple_allowed = true;
+ ad->tx_vec_allowed = true;
+
return i40evf_init_vlan(dev);
}
@@ -1334,6 +1505,8 @@ i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
if (err)
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
rx_queue_id);
+ else
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
}
return err;
@@ -1358,6 +1531,7 @@ i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
i40e_rx_queue_release_mbufs(rxq);
i40e_reset_rx_queue(rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
}
return 0;
@@ -1378,6 +1552,8 @@ i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
if (err)
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
tx_queue_id);
+ else
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
}
return err;
@@ -1395,13 +1571,14 @@ i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
if (err) {
- PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of",
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
tx_queue_id);
return err;
}
i40e_tx_queue_release_mbufs(txq);
i40e_reset_tx_queue(txq);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
}
return 0;
@@ -1421,24 +1598,80 @@ i40evf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
}
static int
+i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_dev_data *dev_data = dev->data;
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ uint16_t buf_size, len;
+
+ rxq->qrx_tail = hw->hw_addr + I40E_QRX_TAIL1(rxq->queue_id);
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ I40EVF_WRITE_FLUSH(hw);
+
+ /* Calculate the maximum packet length allowed */
+ mbp_priv = rte_mempool_get_priv(rxq->mp);
+ buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+ RTE_PKTMBUF_HEADROOM);
+ rxq->hs_mode = i40e_header_split_none;
+ rxq->rx_hdr_len = 0;
+ rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
+ len = rxq->rx_buf_len * I40E_MAX_CHAINED_RX_BUFFERS;
+ rxq->max_pkt_len = RTE_MIN(len,
+ dev_data->dev_conf.rxmode.max_rx_pkt_len);
+
+ /**
+ * Check if the jumbo frame and maximum packet length are set correctly
+ */
+ if (dev_data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
+ rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
+ PMD_DRV_LOG(ERR, "maximum packet length must be "
+ "larger than %u and smaller than %u, as jumbo "
+ "frame is enabled", (uint32_t)ETHER_MAX_LEN,
+ (uint32_t)I40E_FRAME_SIZE_MAX);
+ return I40E_ERR_CONFIG;
+ }
+ } else {
+ if (rxq->max_pkt_len < ETHER_MIN_LEN ||
+ rxq->max_pkt_len > ETHER_MAX_LEN) {
+ PMD_DRV_LOG(ERR, "maximum packet length must be "
+ "larger than %u and smaller than %u, as jumbo "
+ "frame is disabled", (uint32_t)ETHER_MIN_LEN,
+ (uint32_t)ETHER_MAX_LEN);
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ if (dev_data->dev_conf.rxmode.enable_scatter ||
+ (rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
+ dev_data->scattered_rx = 1;
+ }
+
+ return 0;
+}
+
+static int
i40evf_rx_init(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
uint16_t i;
+ int ret = I40E_SUCCESS;
struct i40e_rx_queue **rxq =
(struct i40e_rx_queue **)dev->data->rx_queues;
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
i40evf_config_rss(vf);
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq[i]->qrx_tail = hw->hw_addr + I40E_QRX_TAIL1(i);
- I40E_PCI_REG_WRITE(rxq[i]->qrx_tail, rxq[i]->nb_rx_desc - 1);
+ if (!rxq[i] || !rxq[i]->q_set)
+ continue;
+ ret = i40evf_rxq_init(dev, rxq[i]);
+ if (ret != I40E_SUCCESS)
+ break;
}
+ if (ret == I40E_SUCCESS)
+ i40e_set_rx_function(dev);
- /* Flush the operation to write registers */
- I40EVF_WRITE_FLUSH(hw);
-
- return 0;
+ return ret;
}
static void
@@ -1451,21 +1684,118 @@ i40evf_tx_init(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_tx_queues; i++)
txq[i]->qtx_tail = hw->hw_addr + I40E_QTX_TAIL1(i);
+
+ i40e_set_tx_function(dev);
}
static inline void
-i40evf_enable_queues_intr(struct i40e_hw *hw)
+i40evf_enable_queues_intr(struct rte_eth_dev *dev)
{
- I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR - 1),
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+
+ if (!rte_intr_allow_others(intr_handle)) {
+ I40E_WRITE_REG(hw,
+ I40E_VFINT_DYN_CTL01,
+ I40E_VFINT_DYN_CTL01_INTENA_MASK |
+ I40E_VFINT_DYN_CTL01_CLEARPBA_MASK);
+ I40EVF_WRITE_FLUSH(hw);
+ return;
+ }
+
+ if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
+ /* To support DPDK PF host */
+ I40E_WRITE_REG(hw,
+ I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR - 1),
I40E_VFINT_DYN_CTLN1_INTENA_MASK |
I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+ else
+ /* To support Linux PF host */
+ I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
+ I40E_VFINT_DYN_CTL01_INTENA_MASK |
+ I40E_VFINT_DYN_CTL01_CLEARPBA_MASK);
+
+ I40EVF_WRITE_FLUSH(hw);
}
static inline void
-i40evf_disable_queues_intr(struct i40e_hw *hw)
+i40evf_disable_queues_intr(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+
+ if (!rte_intr_allow_others(intr_handle)) {
+ I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, 0);
+ I40EVF_WRITE_FLUSH(hw);
+ return;
+ }
+
+ if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
+ I40E_WRITE_REG(hw,
+ I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR
+ - 1),
+ 0);
+ else
+ I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, 0);
+
+ I40EVF_WRITE_FLUSH(hw);
+}
+
+static int
+i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t interval =
+ i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
+ uint16_t msix_intr;
+
+ msix_intr = intr_handle->intr_vec[queue_id];
+ if (msix_intr == I40E_MISC_VEC_ID)
+ I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
+ I40E_VFINT_DYN_CTL01_INTENA_MASK |
+ I40E_VFINT_DYN_CTL01_CLEARPBA_MASK |
+ (0 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
+ (interval <<
+ I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT));
+ else
+ I40E_WRITE_REG(hw,
+ I40E_VFINT_DYN_CTLN1(msix_intr -
+ I40E_RX_VEC_START),
+ I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
+ (0 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
+ (interval <<
+ I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT));
+
+ I40EVF_WRITE_FLUSH(hw);
+
+ rte_intr_enable(&dev->pci_dev->intr_handle);
+
+ return 0;
+}
+
+static int
+i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR - 1),
- 0);
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t msix_intr;
+
+ msix_intr = intr_handle->intr_vec[queue_id];
+ if (msix_intr == I40E_MISC_VEC_ID)
+ I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, 0);
+ else
+ I40E_WRITE_REG(hw,
+ I40E_VFINT_DYN_CTLN1(msix_intr -
+ I40E_RX_VEC_START),
+ 0);
+
+ I40EVF_WRITE_FLUSH(hw);
+
+ return 0;
}
static int
@@ -1473,36 +1803,36 @@ i40evf_dev_start(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
struct ether_addr mac_addr;
+ uint32_t intr_vector = 0;
PMD_INIT_FUNC_TRACE();
- vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
- if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
- if (vf->max_pkt_len <= ETHER_MAX_LEN ||
- vf->max_pkt_len > I40E_FRAME_SIZE_MAX) {
- PMD_DRV_LOG(ERR, "maximum packet length must "
- "be larger than %u and smaller than %u,"
- "as jumbo frame is enabled",
- (uint32_t)ETHER_MAX_LEN,
- (uint32_t)I40E_FRAME_SIZE_MAX);
- return I40E_ERR_CONFIG;
- }
- } else {
- if (vf->max_pkt_len < ETHER_MIN_LEN ||
- vf->max_pkt_len > ETHER_MAX_LEN) {
- PMD_DRV_LOG(ERR, "maximum packet length must be "
- "larger than %u and smaller than %u, "
- "as jumbo frame is disabled",
- (uint32_t)ETHER_MIN_LEN,
- (uint32_t)ETHER_MAX_LEN);
- return I40E_ERR_CONFIG;
- }
- }
+ hw->adapter_stopped = 0;
+ vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
dev->data->nb_tx_queues);
+ /* check and configure queue intr-vector mapping */
+ if (dev->data->dev_conf.intr_conf.rxq != 0) {
+ intr_vector = dev->data->nb_rx_queues;
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -1;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int), 0);
+ if (!intr_handle->intr_vec) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+ " intr_vec\n", dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+ }
+
if (i40evf_rx_init(dev) != 0){
PMD_DRV_LOG(ERR, "failed to do RX init");
return -1;
@@ -1532,7 +1862,11 @@ i40evf_dev_start(struct rte_eth_dev *dev)
goto err_mac;
}
- i40evf_enable_queues_intr(hw);
+ /* vf don't allow intr except for rxq intr */
+ if (dev->data->dev_conf.intr_conf.rxq != 0)
+ rte_intr_enable(intr_handle);
+
+ i40evf_enable_queues_intr(dev);
return 0;
err_mac:
@@ -1545,11 +1879,26 @@ static void
i40evf_dev_stop(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct ether_addr mac_addr;
PMD_INIT_FUNC_TRACE();
- i40evf_disable_queues_intr(hw);
i40evf_stop_queues(dev);
+ i40evf_disable_queues_intr(dev);
+ i40e_dev_clear_queues(dev);
+
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+ /* Set mac addr */
+ (void)rte_memcpy(mac_addr.addr_bytes, hw->mac.addr,
+ sizeof(mac_addr.addr_bytes));
+ /* Delete mac addr of this vf */
+ i40evf_del_mac_addr(dev, &mac_addr);
}
static int
@@ -1645,7 +1994,22 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
+ dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
+ dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_QINQ_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_QINQ_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
@@ -1668,12 +2032,23 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
ETH_TXQ_FLAGS_NOOFFLOADS,
};
+
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = I40E_MAX_RING_DESC,
+ .nb_min = I40E_MIN_RING_DESC,
+ .nb_align = I40E_ALIGN_RING_DESC,
+ };
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = I40E_MAX_RING_DESC,
+ .nb_min = I40E_MIN_RING_DESC,
+ .nb_align = I40E_ALIGN_RING_DESC,
+ };
}
static void
i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
- memset(stats, 0, sizeof(*stats));
if (i40evf_get_statics(dev, stats))
PMD_DRV_LOG(ERR, "Get statics failed");
}
@@ -1684,20 +2059,78 @@ i40evf_dev_close(struct rte_eth_dev *dev)
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
i40evf_dev_stop(dev);
+ hw->adapter_stopped = 1;
+ i40e_dev_free_queues(dev);
i40evf_reset_vf(hw);
i40e_shutdown_adminq(hw);
}
static int
+i40evf_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
+{
+ struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ int ret;
+
+ if (!lut)
+ return -EINVAL;
+
+ if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+ ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, FALSE,
+ lut, lut_size);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
+ return ret;
+ }
+ } else {
+ uint32_t *lut_dw = (uint32_t *)lut;
+ uint16_t i, lut_size_dw = lut_size / 4;
+
+ for (i = 0; i < lut_size_dw; i++)
+ lut_dw[i] = I40E_READ_REG(hw, I40E_VFQF_HLUT(i));
+ }
+
+ return 0;
+}
+
+static int
+i40evf_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
+{
+ struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ int ret;
+
+ if (!vsi || !lut)
+ return -EINVAL;
+
+ if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+ ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, FALSE,
+ lut, lut_size);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
+ return ret;
+ }
+ } else {
+ uint32_t *lut_dw = (uint32_t *)lut;
+ uint16_t i, lut_size_dw = lut_size / 4;
+
+ for (i = 0; i < lut_size_dw; i++)
+ I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i), lut_dw[i]);
+ I40EVF_WRITE_FLUSH(hw);
+ }
+
+ return 0;
+}
+
+static int
i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t lut, l;
- uint16_t i, j;
- uint16_t idx, shift;
- uint8_t mask;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ uint8_t *lut;
+ uint16_t i, idx, shift;
+ int ret;
if (reta_size != ETH_RSS_RETA_SIZE_64) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
@@ -1706,29 +2139,26 @@ i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
return -EINVAL;
}
- for (i = 0; i < reta_size; i += I40E_4_BIT_WIDTH) {
+ lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
+ if (!lut) {
+ PMD_DRV_LOG(ERR, "No memory can be allocated");
+ return -ENOMEM;
+ }
+ ret = i40evf_get_rss_lut(&vf->vsi, lut, reta_size);
+ if (ret)
+ goto out;
+ for (i = 0; i < reta_size; i++) {
idx = i / RTE_RETA_GROUP_SIZE;
shift = i % RTE_RETA_GROUP_SIZE;
- mask = (uint8_t)((reta_conf[idx].mask >> shift) &
- I40E_4_BIT_MASK);
- if (!mask)
- continue;
- if (mask == I40E_4_BIT_MASK)
- l = 0;
- else
- l = I40E_READ_REG(hw, I40E_VFQF_HLUT(i >> 2));
-
- for (j = 0, lut = 0; j < I40E_4_BIT_WIDTH; j++) {
- if (mask & (0x1 << j))
- lut |= reta_conf[idx].reta[shift + j] <<
- (CHAR_BIT * j);
- else
- lut |= l & (I40E_8_BIT_MASK << (CHAR_BIT * j));
- }
- I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i >> 2), lut);
+ if (reta_conf[idx].mask & (1ULL << shift))
+ lut[i] = reta_conf[idx].reta[shift];
}
+ ret = i40evf_set_rss_lut(&vf->vsi, lut, reta_size);
- return 0;
+out:
+ rte_free(lut);
+
+ return ret;
}
static int
@@ -1736,11 +2166,10 @@ i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t lut;
- uint16_t i, j;
- uint16_t idx, shift;
- uint8_t mask;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ uint16_t i, idx, shift;
+ uint8_t *lut;
+ int ret;
if (reta_size != ETH_RSS_RETA_SIZE_64) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
@@ -1749,42 +2178,104 @@ i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
return -EINVAL;
}
- for (i = 0; i < reta_size; i += I40E_4_BIT_WIDTH) {
+ lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
+ if (!lut) {
+ PMD_DRV_LOG(ERR, "No memory can be allocated");
+ return -ENOMEM;
+ }
+
+ ret = i40evf_get_rss_lut(&vf->vsi, lut, reta_size);
+ if (ret)
+ goto out;
+ for (i = 0; i < reta_size; i++) {
idx = i / RTE_RETA_GROUP_SIZE;
shift = i % RTE_RETA_GROUP_SIZE;
- mask = (uint8_t)((reta_conf[idx].mask >> shift) &
- I40E_4_BIT_MASK);
- if (!mask)
- continue;
-
- lut = I40E_READ_REG(hw, I40E_VFQF_HLUT(i >> 2));
- for (j = 0; j < I40E_4_BIT_WIDTH; j++) {
- if (mask & (0x1 << j))
- reta_conf[idx].reta[shift + j] =
- ((lut >> (CHAR_BIT * j)) &
- I40E_8_BIT_MASK);
- }
+ if (reta_conf[idx].mask & (1ULL << shift))
+ reta_conf[idx].reta[shift] = lut[i];
}
- return 0;
+out:
+ rte_free(lut);
+
+ return ret;
}
static int
-i40evf_hw_rss_hash_set(struct i40e_hw *hw, struct rte_eth_rss_conf *rss_conf)
+i40evf_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
{
- uint32_t *hash_key;
- uint8_t hash_key_len;
- uint64_t rss_hf, hena;
+ struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ int ret = 0;
- hash_key = (uint32_t *)(rss_conf->rss_key);
- hash_key_len = rss_conf->rss_key_len;
- if (hash_key != NULL && hash_key_len >=
- (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
+ if (!key || key_len == 0) {
+ PMD_DRV_LOG(DEBUG, "No key to be configured");
+ return 0;
+ } else if (key_len != (I40E_VFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t)) {
+ PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
+ return -EINVAL;
+ }
+
+ if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+ struct i40e_aqc_get_set_rss_key_data *key_dw =
+ (struct i40e_aqc_get_set_rss_key_data *)key;
+
+ ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to configure RSS key "
+ "via AQ");
+ } else {
+ uint32_t *hash_key = (uint32_t *)key;
uint16_t i;
for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
I40E_WRITE_REG(hw, I40E_VFQF_HKEY(i), hash_key[i]);
+ I40EVF_WRITE_FLUSH(hw);
+ }
+
+ return ret;
+}
+
+static int
+i40evf_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
+{
+ struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ int ret;
+
+ if (!key || !key_len)
+ return -EINVAL;
+
+ if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+ ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
+ (struct i40e_aqc_get_set_rss_key_data *)key);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
+ return ret;
+ }
+ } else {
+ uint32_t *key_dw = (uint32_t *)key;
+ uint16_t i;
+
+ for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
+ key_dw[i] = I40E_READ_REG(hw, I40E_VFQF_HKEY(i));
}
+ *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
+
+ return 0;
+}
+
+static int
+i40evf_hw_rss_hash_set(struct i40e_vf *vf, struct rte_eth_rss_conf *rss_conf)
+{
+ struct i40e_hw *hw = I40E_VF_TO_HW(vf);
+ uint64_t rss_hf, hena;
+ int ret;
+
+ ret = i40evf_set_rss_key(&vf->vsi, rss_conf->rss_key,
+ rss_conf->rss_key_len);
+ if (ret)
+ return ret;
rss_hf = rss_conf->rss_hf;
hena = (uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(0));
@@ -1818,6 +2309,7 @@ i40evf_config_rss(struct i40e_vf *vf)
struct i40e_hw *hw = I40E_VF_TO_HW(vf);
struct rte_eth_rss_conf rss_conf;
uint32_t i, j, lut = 0, nb_q = (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
+ uint16_t num;
if (vf->dev_data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
i40evf_disable_rss(vf);
@@ -1825,9 +2317,10 @@ i40evf_config_rss(struct i40e_vf *vf)
return 0;
}
+ num = RTE_MIN(vf->dev_data->nb_rx_queues, I40E_MAX_QP_NUM_PER_VF);
/* Fill out the look up table */
for (i = 0, j = 0; i < nb_q; i++, j++) {
- if (j >= vf->num_queue_pairs)
+ if (j >= num)
j = 0;
lut = (lut << 8) | j;
if ((i & 3) == 3)
@@ -1841,21 +2334,24 @@ i40evf_config_rss(struct i40e_vf *vf)
return 0;
}
- if (rss_conf.rss_key == NULL || rss_conf.rss_key_len < nb_q) {
+ if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
+ (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
/* Calculate the default hash key */
for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
rss_key_default[i] = (uint32_t)rte_rand();
rss_conf.rss_key = (uint8_t *)rss_key_default;
- rss_conf.rss_key_len = nb_q;
+ rss_conf.rss_key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t);
}
- return i40evf_hw_rss_hash_set(hw, &rss_conf);
+ return i40evf_hw_rss_hash_set(vf, &rss_conf);
}
static int
i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
uint64_t hena;
@@ -1872,23 +2368,20 @@ i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
if (rss_hf == 0) /* Disable RSS */
return -EINVAL;
- return i40evf_hw_rss_hash_set(hw, rss_conf);
+ return i40evf_hw_rss_hash_set(vf, rss_conf);
}
static int
i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t *hash_key = (uint32_t *)(rss_conf->rss_key);
uint64_t hena;
- uint16_t i;
- if (hash_key) {
- for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
- hash_key[i] = I40E_READ_REG(hw, I40E_VFQF_HKEY(i));
- rss_conf->rss_key_len = i * sizeof(uint32_t);
- }
+ i40evf_get_rss_key(&vf->vsi, rss_conf->rss_key,
+ &rss_conf->rss_key_len);
+
hena = (uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(0));
hena |= ((uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(1))) << 32;
rss_conf->rss_hf = i40e_parse_hena(hena);
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e_fdir.c b/src/dpdk22/drivers/net/i40e/i40e_fdir.c
index 4b209e18..9ad6981c 100755..100644
--- a/src/dpdk_lib18/librte_pmd_i40e/i40e_fdir.c
+++ b/src/dpdk22/drivers/net/i40e/i40e_fdir.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -44,13 +44,14 @@
#include <rte_log.h>
#include <rte_memzone.h>
#include <rte_malloc.h>
+#include <rte_arp.h>
#include <rte_ip.h>
#include <rte_udp.h>
#include <rte_tcp.h>
#include <rte_sctp.h>
#include "i40e_logs.h"
-#include "i40e/i40e_type.h"
+#include "base/i40e_type.h"
#include "i40e_ethdev.h"
#include "i40e_rxtx.h"
@@ -61,7 +62,7 @@
#define I40E_FDIR_PKT_LEN 512
#define I40E_FDIR_IP_DEFAULT_LEN 420
-#define I40E_FDIR_IP_DEFAULT_TTL 0xFF
+#define I40E_FDIR_IP_DEFAULT_TTL 0x40
#define I40E_FDIR_IP_DEFAULT_VERSION_IHL 0x45
#define I40E_FDIR_TCP_DEFAULT_DATAOFF 0x50
#define I40E_FDIR_IPv6_DEFAULT_VTC_FLOW 0x60300000
@@ -94,17 +95,18 @@
I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) & \
I40E_PRTQF_FLX_PIT_DEST_OFF_MASK))
-#define I40E_FDIR_FLOW_TYPES ( \
- (1 << RTE_ETH_FLOW_TYPE_UDPV4) | \
- (1 << RTE_ETH_FLOW_TYPE_TCPV4) | \
- (1 << RTE_ETH_FLOW_TYPE_SCTPV4) | \
- (1 << RTE_ETH_FLOW_TYPE_IPV4_OTHER) | \
- (1 << RTE_ETH_FLOW_TYPE_FRAG_IPV4) | \
- (1 << RTE_ETH_FLOW_TYPE_UDPV6) | \
- (1 << RTE_ETH_FLOW_TYPE_TCPV6) | \
- (1 << RTE_ETH_FLOW_TYPE_SCTPV6) | \
- (1 << RTE_ETH_FLOW_TYPE_IPV6_OTHER) | \
- (1 << RTE_ETH_FLOW_TYPE_FRAG_IPV6))
+#define I40E_FDIR_FLOWS ( \
+ (1 << RTE_ETH_FLOW_FRAG_IPV4) | \
+ (1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
+ (1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
+ (1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
+ (1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
+ (1 << RTE_ETH_FLOW_FRAG_IPV6) | \
+ (1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
+ (1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
+ (1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
+ (1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
+ (1 << RTE_ETH_FLOW_L2_PAYLOAD))
#define I40E_FLEX_WORD_MASK(off) (0x80 >> (off))
@@ -273,11 +275,8 @@ i40e_fdir_setup(struct i40e_pf *pf)
goto fail_mem;
}
pf->fdir.prg_pkt = mz->addr;
-#ifdef RTE_LIBRTE_XEN_DOM0
pf->fdir.dma_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
-#else
- pf->fdir.dma_addr = (uint64_t)mz->phys_addr;
-#endif
+
pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id);
PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
vsi->base_queue);
@@ -366,7 +365,9 @@ i40e_init_flx_pld(struct i40e_pf *pf)
/* initialize the masks */
for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
- pctype <= I40E_FILTER_PCTYPE_FRAG_IPV6; pctype++) {
+ pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
+ if (!I40E_VALID_PCTYPE((enum i40e_filter_pctype)pctype))
+ continue;
pf->fdir.flex_mask[pctype].word_mask = 0;
I40E_WRITE_REG(hw, I40E_PRTQF_FD_FLXINSET(pctype), 0);
for (i = 0; i < I40E_FDIR_BITMASK_NUM_WORD; i++) {
@@ -402,28 +403,27 @@ i40e_srcoff_to_flx_pit(const uint16_t *src_offset,
while (j < I40E_FDIR_MAX_FLEX_LEN) {
size = 1;
- for (; j < I40E_FDIR_MAX_FLEX_LEN; j++) {
+ for (; j < I40E_FDIR_MAX_FLEX_LEN - 1; j++) {
if (src_offset[j + 1] == src_offset[j] + 1)
size++;
- else {
- src_tmp = src_offset[j] + 1 - size;
- /* the flex_pit need to be sort by scr_offset */
- for (i = 0; i < num; i++) {
- if (src_tmp < flex_pit[i].src_offset)
- break;
- }
- /* if insert required, move backward */
- for (k = num; k > i; k--)
- flex_pit[k] = flex_pit[k - 1];
- /* insert */
- flex_pit[i].dst_offset = j + 1 - size;
- flex_pit[i].src_offset = src_tmp;
- flex_pit[i].size = size;
- j++;
- num++;
+ else
+ break;
+ }
+ src_tmp = src_offset[j] + 1 - size;
+ /* the flex_pit need to be sort by src_offset */
+ for (i = 0; i < num; i++) {
+ if (src_tmp < flex_pit[i].src_offset)
break;
- }
}
+ /* if insert required, move backward */
+ for (k = num; k > i; k--)
+ flex_pit[k] = flex_pit[k - 1];
+ /* insert */
+ flex_pit[i].dst_offset = j + 1 - size;
+ flex_pit[i].src_offset = src_tmp;
+ flex_pit[i].size = size;
+ j++;
+ num++;
}
return num;
}
@@ -498,13 +498,13 @@ i40e_check_fdir_flex_conf(const struct rte_eth_fdir_flex_conf *conf)
}
/* check flex mask setting configuration */
- if (conf->nb_flexmasks > RTE_ETH_FLOW_TYPE_FRAG_IPV6) {
+ if (conf->nb_flexmasks >= RTE_ETH_FLOW_MAX) {
PMD_DRV_LOG(ERR, "invalid number of flex masks.");
return -EINVAL;
}
for (i = 0; i < conf->nb_flexmasks; i++) {
flex_mask = &conf->flex_mask[i];
- if (!I40E_VALID_FLOW_TYPE(flex_mask->flow_type)) {
+ if (!I40E_VALID_FLOW(flex_mask->flow_type)) {
PMD_DRV_LOG(WARNING, "invalid flow type.");
return -EINVAL;
}
@@ -552,7 +552,7 @@ i40e_set_flx_pld_cfg(struct i40e_pf *pf,
memset(flex_pit, 0, sizeof(flex_pit));
num = i40e_srcoff_to_flx_pit(cfg->src_offset, flex_pit);
- for (i = 0; i < num; i++) {
+ for (i = 0; i < RTE_MIN(num, RTE_DIM(flex_pit)); i++) {
field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
/* record the info in fdir structure */
pf->fdir.flex_set[field_idx].src_offset =
@@ -677,11 +677,8 @@ i40e_fdir_configure(struct rte_eth_dev *dev)
i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]);
/* configure flex mask*/
for (i = 0; i < conf->nb_flexmasks; i++) {
- pctype = i40e_flowtype_to_pctype(
- conf->flex_mask[i].flow_type);
- i40e_set_flex_mask_on_pctype(pf,
- pctype,
- &conf->flex_mask[i]);
+ pctype = i40e_flowtype_to_pctype(conf->flex_mask[i].flow_type);
+ i40e_set_flex_mask_on_pctype(pf, pctype, &conf->flex_mask[i]);
}
return ret;
@@ -695,31 +692,34 @@ i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input,
struct ipv4_hdr *ip;
struct ipv6_hdr *ip6;
static const uint8_t next_proto[] = {
- [RTE_ETH_FLOW_TYPE_UDPV4] = IPPROTO_UDP,
- [RTE_ETH_FLOW_TYPE_TCPV4] = IPPROTO_TCP,
- [RTE_ETH_FLOW_TYPE_SCTPV4] = IPPROTO_SCTP,
- [RTE_ETH_FLOW_TYPE_IPV4_OTHER] = IPPROTO_IP,
- [RTE_ETH_FLOW_TYPE_FRAG_IPV4] = IPPROTO_IP,
- [RTE_ETH_FLOW_TYPE_UDPV6] = IPPROTO_UDP,
- [RTE_ETH_FLOW_TYPE_TCPV6] = IPPROTO_TCP,
- [RTE_ETH_FLOW_TYPE_SCTPV6] = IPPROTO_SCTP,
- [RTE_ETH_FLOW_TYPE_IPV6_OTHER] = IPPROTO_NONE,
- [RTE_ETH_FLOW_TYPE_FRAG_IPV6] = IPPROTO_NONE,
+ [RTE_ETH_FLOW_FRAG_IPV4] = IPPROTO_IP,
+ [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
+ [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
+ [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] = IPPROTO_SCTP,
+ [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] = IPPROTO_IP,
+ [RTE_ETH_FLOW_FRAG_IPV6] = IPPROTO_NONE,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] = IPPROTO_SCTP,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] = IPPROTO_NONE,
};
switch (fdir_input->flow_type) {
- case RTE_ETH_FLOW_TYPE_UDPV4:
- case RTE_ETH_FLOW_TYPE_TCPV4:
- case RTE_ETH_FLOW_TYPE_SCTPV4:
- case RTE_ETH_FLOW_TYPE_IPV4_OTHER:
- case RTE_ETH_FLOW_TYPE_FRAG_IPV4:
+ case RTE_ETH_FLOW_L2_PAYLOAD:
+ ether->ether_type = fdir_input->flow.l2_flow.ether_type;
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
+ case RTE_ETH_FLOW_FRAG_IPV4:
ip = (struct ipv4_hdr *)(raw_pkt + sizeof(struct ether_hdr));
ether->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
/* set len to by default */
ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
- ip->time_to_live = fdir_input->ttl;
+ ip->time_to_live = I40E_FDIR_IP_DEFAULT_TTL;
/*
* The source and destination fields in the transmitted packet
* need to be presented in a reversed order with respect
@@ -727,16 +727,13 @@ i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input,
*/
ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
- if (fdir_input->flow_type == RTE_ETH_FLOW_TYPE_IPV4_OTHER) {
- ip->next_proto_id = fdir_input->flow.ip4_flow.l4_proto;
- } else
- ip->next_proto_id = next_proto[fdir_input->flow_type];
+ ip->next_proto_id = next_proto[fdir_input->flow_type];
break;
- case RTE_ETH_FLOW_TYPE_UDPV6:
- case RTE_ETH_FLOW_TYPE_TCPV6:
- case RTE_ETH_FLOW_TYPE_SCTPV6:
- case RTE_ETH_FLOW_TYPE_IPV6_OTHER:
- case RTE_ETH_FLOW_TYPE_FRAG_IPV6:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
+ case RTE_ETH_FLOW_FRAG_IPV6:
ip6 = (struct ipv6_hdr *)(raw_pkt + sizeof(struct ether_hdr));
ether->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
@@ -744,7 +741,7 @@ i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input,
rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW);
ip6->payload_len =
rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
- ip6->hop_limits = fdir_input->ttl;
+ ip6->hop_limits = I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
/*
* The source and destination fields in the transmitted packet
@@ -790,7 +787,7 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf,
/* fill the L4 head */
switch (fdir_input->flow_type) {
- case RTE_ETH_FLOW_TYPE_UDPV4:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
udp = (struct udp_hdr *)(raw_pkt + sizeof(struct ether_hdr) +
sizeof(struct ipv4_hdr));
payload = (unsigned char *)udp + sizeof(struct udp_hdr);
@@ -804,7 +801,7 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf,
udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
break;
- case RTE_ETH_FLOW_TYPE_TCPV4:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
tcp = (struct tcp_hdr *)(raw_pkt + sizeof(struct ether_hdr) +
sizeof(struct ipv4_hdr));
payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
@@ -818,21 +815,28 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf,
tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
break;
- case RTE_ETH_FLOW_TYPE_SCTPV4:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
sctp = (struct sctp_hdr *)(raw_pkt + sizeof(struct ether_hdr) +
sizeof(struct ipv4_hdr));
payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
+ /*
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
+ sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
break;
- case RTE_ETH_FLOW_TYPE_IPV4_OTHER:
- case RTE_ETH_FLOW_TYPE_FRAG_IPV4:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
+ case RTE_ETH_FLOW_FRAG_IPV4:
payload = raw_pkt + sizeof(struct ether_hdr) +
sizeof(struct ipv4_hdr);
set_idx = I40E_FLXPLD_L3_IDX;
break;
- case RTE_ETH_FLOW_TYPE_UDPV6:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
udp = (struct udp_hdr *)(raw_pkt + sizeof(struct ether_hdr) +
sizeof(struct ipv6_hdr));
payload = (unsigned char *)udp + sizeof(struct udp_hdr);
@@ -846,7 +850,7 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf,
udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
break;
- case RTE_ETH_FLOW_TYPE_TCPV6:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
tcp = (struct tcp_hdr *)(raw_pkt + sizeof(struct ether_hdr) +
sizeof(struct ipv6_hdr));
payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
@@ -860,19 +864,37 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf,
tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
break;
- case RTE_ETH_FLOW_TYPE_SCTPV6:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
sctp = (struct sctp_hdr *)(raw_pkt + sizeof(struct ether_hdr) +
sizeof(struct ipv6_hdr));
payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
+ /*
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
+ sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
break;
- case RTE_ETH_FLOW_TYPE_IPV6_OTHER:
- case RTE_ETH_FLOW_TYPE_FRAG_IPV6:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
+ case RTE_ETH_FLOW_FRAG_IPV6:
payload = raw_pkt + sizeof(struct ether_hdr) +
sizeof(struct ipv6_hdr);
set_idx = I40E_FLXPLD_L3_IDX;
break;
+ case RTE_ETH_FLOW_L2_PAYLOAD:
+ payload = raw_pkt + sizeof(struct ether_hdr);
+ /*
+ * ARP packet is a special case on which the payload
+ * starts after the whole ARP header
+ */
+ if (fdir_input->flow.l2_flow.ether_type ==
+ rte_cpu_to_be_16(ETHER_TYPE_ARP))
+ payload += sizeof(struct arp_hdr);
+ set_idx = I40E_FLXPLD_L2_IDX;
+ break;
default:
PMD_DRV_LOG(ERR, "unknown flow type %u.", fdir_input->flow_type);
return -EINVAL;
@@ -989,7 +1011,7 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
return -ENOTSUP;
}
- if (!I40E_VALID_FLOW_TYPE(filter->input.flow_type)) {
+ if (!I40E_VALID_FLOW(filter->input.flow_type)) {
PMD_DRV_LOG(ERR, "invalid flow_type input.");
return -EINVAL;
}
@@ -997,6 +1019,11 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR, "Invalid queue ID");
return -EINVAL;
}
+ if (filter->input.flow_ext.is_vf &&
+ filter->input.flow_ext.dst_id >= pf->vf_num) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID");
+ return -EINVAL;
+ }
memset(pkt, 0, I40E_FDIR_PKT_LEN);
@@ -1036,7 +1063,7 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
volatile struct i40e_tx_desc *txdp;
volatile struct i40e_filter_program_desc *fdirdp;
uint32_t td_cmd;
- uint16_t i;
+ uint16_t vsi_id, i;
uint8_t dest;
PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
@@ -1058,9 +1085,13 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
I40E_TXD_FLTR_QW0_PCTYPE_MASK);
- /* Use LAN VSI Id by default */
+ if (filter->input.flow_ext.is_vf)
+ vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
+ else
+ /* Use LAN VSI Id by default */
+ vsi_id = pf->main_vsi->vsi_id;
fdirdp->qindex_flex_ptype_vsi |=
- rte_cpu_to_le_32((pf->main_vsi->vsi_id <<
+ rte_cpu_to_le_32(((uint32_t)vsi_id <<
I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
@@ -1078,8 +1109,16 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
if (fdir_action->behavior == RTE_ETH_FDIR_REJECT)
dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
- else
+ else if (fdir_action->behavior == RTE_ETH_FDIR_ACCEPT)
dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
+ else if (fdir_action->behavior == RTE_ETH_FDIR_PASSTHRU)
+ dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
+ else {
+ PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
+ " unsupported fdir behavior.");
+ return -EINVAL;
+ }
+
fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
I40E_TXD_FLTR_QW1_DEST_SHIFT) &
I40E_TXD_FLTR_QW1_DEST_MASK);
@@ -1117,7 +1156,8 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
for (i = 0; i < I40E_FDIR_WAIT_COUNT; i++) {
rte_delay_us(I40E_FDIR_WAIT_INTERVAL_US);
- if (txdp->cmd_type_offset_bsz &
+ if ((txdp->cmd_type_offset_bsz &
+ rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
break;
}
@@ -1220,12 +1260,12 @@ i40e_fdir_info_get_flex_mask(struct i40e_pf *pf,
{
struct i40e_fdir_flex_mask *mask;
struct rte_eth_fdir_flex_mask *ptr = flex_mask;
- enum rte_eth_flow_type flow_type;
+ uint16_t flow_type;
uint8_t i, j;
uint16_t off_bytes, mask_tmp;
for (i = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
- i <= I40E_FILTER_PCTYPE_FRAG_IPV6;
+ i <= I40E_FILTER_PCTYPE_L2_PAYLOAD;
i++) {
mask = &pf->fdir.flex_mask[i];
if (!I40E_VALID_PCTYPE((enum i40e_filter_pctype)i))
@@ -1276,7 +1316,7 @@ i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir)
fdir->best_spc =
(uint32_t)hw->func_caps.fd_filters_best_effort;
fdir->max_flexpayload = I40E_FDIR_MAX_FLEX_LEN;
- fdir->flow_types_mask[0] = I40E_FDIR_FLOW_TYPES;
+ fdir->flow_types_mask[0] = I40E_FDIR_FLOWS;
fdir->flex_payload_unit = sizeof(uint16_t);
fdir->flex_bitmask_unit = sizeof(uint16_t);
fdir->max_flex_payload_segment_num = I40E_MAX_FLXPLD_FIED;
@@ -1316,6 +1356,33 @@ i40e_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *stat)
I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
}
+static int
+i40e_fdir_filter_set(struct rte_eth_dev *dev,
+ struct rte_eth_fdir_filter_info *info)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ int ret = 0;
+
+ if (!info) {
+ PMD_DRV_LOG(ERR, "Invalid pointer");
+ return -EFAULT;
+ }
+
+ switch (info->info_type) {
+ case RTE_ETH_FDIR_FILTER_INPUT_SET_SELECT:
+ ret = i40e_filter_inset_select(hw,
+ &(info->info.input_set_conf), RTE_ETH_FILTER_FDIR);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "FD filter info type (%d) not supported",
+ info->info_type);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
/*
* i40e_fdir_ctrl_func - deal with all operations on flow director.
* @pf: board private structure
@@ -1356,6 +1423,10 @@ i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_INFO:
i40e_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg);
break;
+ case RTE_ETH_FILTER_SET:
+ ret = i40e_fdir_filter_set(dev,
+ (struct rte_eth_fdir_filter_info *)arg);
+ break;
case RTE_ETH_FILTER_STATS:
i40e_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg);
break;
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e_logs.h b/src/dpdk22/drivers/net/i40e/i40e_logs.h
index 63c9c991..e042e242 100755..100644
--- a/src/dpdk_lib18/librte_pmd_i40e/i40e_logs.h
+++ b/src/dpdk22/drivers/net/i40e/i40e_logs.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -34,8 +34,8 @@
#ifndef _I40E_LOGS_H_
#define _I40E_LOGS_H_
-#define PMD_INIT_LOG(level, fmt, args...) RTE_LOG(level, PMD," " fmt "\n", ##args)
-
+#define PMD_INIT_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args)
#ifdef RTE_LIBRTE_I40E_DEBUG_INIT
#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e_pf.c b/src/dpdk22/drivers/net/i40e/i40e_pf.c
index cbb2dcc1..cbf4e5bb 100755..100644
--- a/src/dpdk_lib18/librte_pmd_i40e/i40e_pf.c
+++ b/src/dpdk22/drivers/net/i40e/i40e_pf.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -49,9 +49,9 @@
#include <rte_memcpy.h>
#include "i40e_logs.h"
-#include "i40e/i40e_prototype.h"
-#include "i40e/i40e_adminq_cmd.h"
-#include "i40e/i40e_type.h"
+#include "base/i40e_prototype.h"
+#include "base/i40e_adminq_cmd.h"
+#include "base/i40e_type.h"
#include "i40e_ethdev.h"
#include "i40e_rxtx.h"
#include "i40e_pf.h"
@@ -547,13 +547,10 @@ i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
goto send_msg;
}
- if (irqmap->vecmap[0].vector_id == 0) {
- PMD_DRV_LOG(ERR, "DPDK host don't support use IRQ0");
- ret = I40E_ERR_PARAM;
- goto send_msg;
- }
/* This MSIX intr store the intr in VF range */
vf->vsi->msix_intr = irqmap->vecmap[0].vector_id;
+ vf->vsi->nb_msix = irqmap->num_vectors;
+ vf->vsi->nb_used_qps = vf->vsi->nb_qps;
/* Don't care how the TX/RX queue mapping with this vector.
* Link all VF RX queues together. Only did mapping work.
@@ -1061,3 +1058,37 @@ fail:
return ret;
}
+
+int
+i40e_pf_host_uninit(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t val;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /**
+ * return if SRIOV not enabled, VF number not configured or
+ * no queue assigned.
+ */
+ if ((!hw->func_caps.sr_iov_1_1) ||
+ (pf->vf_num == 0) ||
+ (pf->vf_nb_qps == 0))
+ return I40E_SUCCESS;
+
+ /* free memory to store VF structure */
+ rte_free(pf->vfs);
+ pf->vfs = NULL;
+
+ /* Disable irq0 for VFR event */
+ i40e_pf_disable_irq0(hw);
+
+ /* Disable VF link status interrupt */
+ val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM);
+ val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
+ I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
+ I40E_WRITE_FLUSH(hw);
+
+ return I40E_SUCCESS;
+}
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e_pf.h b/src/dpdk22/drivers/net/i40e/i40e_pf.h
index 8bf83c21..9c01829a 100755..100644
--- a/src/dpdk_lib18/librte_pmd_i40e/i40e_pf.h
+++ b/src/dpdk22/drivers/net/i40e/i40e_pf.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -123,5 +123,6 @@ void i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
__rte_unused uint32_t retval,
uint8_t *msg, uint16_t msglen);
int i40e_pf_host_init(struct rte_eth_dev *dev);
+int i40e_pf_host_uninit(struct rte_eth_dev *dev);
#endif /* _I40E_PF_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e_rxtx.c b/src/dpdk22/drivers/net/i40e/i40e_rxtx.c
index 2beae3c8..39d94eca 100755..100644
--- a/src/dpdk_lib18/librte_pmd_i40e/i40e_rxtx.c
+++ b/src/dpdk22/drivers/net/i40e/i40e_rxtx.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -52,56 +52,80 @@
#include <rte_udp.h>
#include "i40e_logs.h"
-#include "i40e/i40e_prototype.h"
-#include "i40e/i40e_type.h"
+#include "base/i40e_prototype.h"
+#include "base/i40e_type.h"
#include "i40e_ethdev.h"
#include "i40e_rxtx.h"
-#define I40E_MIN_RING_DESC 64
-#define I40E_MAX_RING_DESC 4096
-#define I40E_ALIGN 128
#define DEFAULT_TX_RS_THRESH 32
#define DEFAULT_TX_FREE_THRESH 32
#define I40E_MAX_PKT_TYPE 256
-#define I40E_VLAN_TAG_SIZE 4
#define I40E_TX_MAX_BURST 32
#define I40E_DMA_MEM_ALIGN 4096
+/* Base address of the HW descriptor ring should be 128B aligned. */
+#define I40E_RING_BASE_ALIGN 128
+
#define I40E_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
ETH_TXQ_FLAGS_NOOFFLOADS)
#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+#define I40E_TX_CKSUM_OFFLOAD_MASK ( \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_OUTER_IP_CKSUM)
+
#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
(uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
((uint64_t)((mb)->buf_physaddr + (mb)->data_off))
-static const struct rte_memzone *
-i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev,
- const char *ring_name,
- uint16_t queue_id,
- uint32_t ring_size,
- int socket_id);
static uint16_t i40e_xmit_pkts_simple(void *tx_queue,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+static inline void
+i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp)
+{
+ if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
+ (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
+ mb->ol_flags |= PKT_RX_VLAN_PKT;
+ mb->vlan_tci =
+ rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
+ PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
+ rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1));
+ } else {
+ mb->vlan_tci = 0;
+ }
+#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+ if (rte_le_to_cpu_16(rxdp->wb.qword2.ext_status) &
+ (1 << I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) {
+ mb->ol_flags |= PKT_RX_QINQ_PKT;
+ mb->vlan_tci_outer = mb->vlan_tci;
+ mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2);
+ PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
+ rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_1),
+ rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2));
+ } else {
+ mb->vlan_tci_outer = 0;
+ }
+#endif
+ PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
+ mb->vlan_tci, mb->vlan_tci_outer);
+}
+
/* Translate the rx descriptor status to pkt flags */
static inline uint64_t
i40e_rxd_status_to_pkt_flags(uint64_t qword)
{
uint64_t flags;
- /* Check if VLAN packet */
- flags = qword & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
- PKT_RX_VLAN_PKT : 0;
-
/* Check if RSS_HASH */
- flags |= (((qword >> I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) &
+ flags = (((qword >> I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) &
I40E_RX_DESC_FLTSTAT_RSS_HASH) ==
I40E_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
@@ -146,272 +170,561 @@ i40e_rxd_error_to_pkt_flags(uint64_t qword)
return flags;
}
-/* Translate pkt types to pkt flags */
+/* Function to check and set the ieee1588 timesync index and get the
+ * appropriate flags.
+ */
+#ifdef RTE_LIBRTE_IEEE1588
static inline uint64_t
-i40e_rxd_ptype_to_pkt_flags(uint64_t qword)
-{
- uint8_t ptype = (uint8_t)((qword & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT);
- static const uint64_t ip_ptype_map[I40E_MAX_PKT_TYPE] = {
- 0, /* PTYPE 0 */
- 0, /* PTYPE 1 */
- 0, /* PTYPE 2 */
- 0, /* PTYPE 3 */
- 0, /* PTYPE 4 */
- 0, /* PTYPE 5 */
- 0, /* PTYPE 6 */
- 0, /* PTYPE 7 */
- 0, /* PTYPE 8 */
- 0, /* PTYPE 9 */
- 0, /* PTYPE 10 */
- 0, /* PTYPE 11 */
- 0, /* PTYPE 12 */
- 0, /* PTYPE 13 */
- 0, /* PTYPE 14 */
- 0, /* PTYPE 15 */
- 0, /* PTYPE 16 */
- 0, /* PTYPE 17 */
- 0, /* PTYPE 18 */
- 0, /* PTYPE 19 */
- 0, /* PTYPE 20 */
- 0, /* PTYPE 21 */
- PKT_RX_IPV4_HDR, /* PTYPE 22 */
- PKT_RX_IPV4_HDR, /* PTYPE 23 */
- PKT_RX_IPV4_HDR, /* PTYPE 24 */
- 0, /* PTYPE 25 */
- PKT_RX_IPV4_HDR, /* PTYPE 26 */
- PKT_RX_IPV4_HDR, /* PTYPE 27 */
- PKT_RX_IPV4_HDR, /* PTYPE 28 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 29 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 30 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 31 */
- 0, /* PTYPE 32 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 33 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 34 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 35 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 36 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 37 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 38 */
- 0, /* PTYPE 39 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 40 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 41 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 42 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 43 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 44 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 45 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 46 */
- 0, /* PTYPE 47 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 48 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 49 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 50 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 51 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 52 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 53 */
- 0, /* PTYPE 54 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 55 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 56 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 57 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 58 */
- PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 59 */
- PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 60 */
- PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 61 */
- 0, /* PTYPE 62 */
- PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 63 */
- PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 64 */
- PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 65 */
- PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 66 */
- PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 67 */
- PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 68 */
- 0, /* PTYPE 69 */
- PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 70 */
- PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 71 */
- PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 72 */
- PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 73 */
- PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 74 */
- PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 75 */
- PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 76 */
- 0, /* PTYPE 77 */
- PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 78 */
- PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 79 */
- PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 80 */
- PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 81 */
- PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 82 */
- PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 83 */
- 0, /* PTYPE 84 */
- PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 85 */
- PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 86 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 87 */
- PKT_RX_IPV6_HDR, /* PTYPE 88 */
- PKT_RX_IPV6_HDR, /* PTYPE 89 */
- PKT_RX_IPV6_HDR, /* PTYPE 90 */
- 0, /* PTYPE 91 */
- PKT_RX_IPV6_HDR, /* PTYPE 92 */
- PKT_RX_IPV6_HDR, /* PTYPE 93 */
- PKT_RX_IPV6_HDR, /* PTYPE 94 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 95 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 96 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 97 */
- 0, /* PTYPE 98 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 99 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 100 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 101 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 102 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 103 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 104 */
- 0, /* PTYPE 105 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 106 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 107 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 108 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 109 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 110 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 111 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 112 */
- 0, /* PTYPE 113 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 114 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 115 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 116 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 117 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 118 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 119 */
- 0, /* PTYPE 120 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 121 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 122 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 123 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 124 */
- PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 125 */
- PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 126 */
- PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 127 */
- 0, /* PTYPE 128 */
- PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 129 */
- PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 130 */
- PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 131 */
- PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 132 */
- PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 133 */
- PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 134 */
- 0, /* PTYPE 135 */
- PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 136 */
- PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 137 */
- PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 138 */
- PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 139 */
- PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 140 */
- PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 141 */
- PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 142 */
- 0, /* PTYPE 143 */
- PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 144 */
- PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 145 */
- PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 146 */
- PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 147 */
- PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 148 */
- PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 149 */
- 0, /* PTYPE 150 */
- PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 151 */
- PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 152 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 153 */
- 0, /* PTYPE 154 */
- 0, /* PTYPE 155 */
- 0, /* PTYPE 156 */
- 0, /* PTYPE 157 */
- 0, /* PTYPE 158 */
- 0, /* PTYPE 159 */
- 0, /* PTYPE 160 */
- 0, /* PTYPE 161 */
- 0, /* PTYPE 162 */
- 0, /* PTYPE 163 */
- 0, /* PTYPE 164 */
- 0, /* PTYPE 165 */
- 0, /* PTYPE 166 */
- 0, /* PTYPE 167 */
- 0, /* PTYPE 168 */
- 0, /* PTYPE 169 */
- 0, /* PTYPE 170 */
- 0, /* PTYPE 171 */
- 0, /* PTYPE 172 */
- 0, /* PTYPE 173 */
- 0, /* PTYPE 174 */
- 0, /* PTYPE 175 */
- 0, /* PTYPE 176 */
- 0, /* PTYPE 177 */
- 0, /* PTYPE 178 */
- 0, /* PTYPE 179 */
- 0, /* PTYPE 180 */
- 0, /* PTYPE 181 */
- 0, /* PTYPE 182 */
- 0, /* PTYPE 183 */
- 0, /* PTYPE 184 */
- 0, /* PTYPE 185 */
- 0, /* PTYPE 186 */
- 0, /* PTYPE 187 */
- 0, /* PTYPE 188 */
- 0, /* PTYPE 189 */
- 0, /* PTYPE 190 */
- 0, /* PTYPE 191 */
- 0, /* PTYPE 192 */
- 0, /* PTYPE 193 */
- 0, /* PTYPE 194 */
- 0, /* PTYPE 195 */
- 0, /* PTYPE 196 */
- 0, /* PTYPE 197 */
- 0, /* PTYPE 198 */
- 0, /* PTYPE 199 */
- 0, /* PTYPE 200 */
- 0, /* PTYPE 201 */
- 0, /* PTYPE 202 */
- 0, /* PTYPE 203 */
- 0, /* PTYPE 204 */
- 0, /* PTYPE 205 */
- 0, /* PTYPE 206 */
- 0, /* PTYPE 207 */
- 0, /* PTYPE 208 */
- 0, /* PTYPE 209 */
- 0, /* PTYPE 210 */
- 0, /* PTYPE 211 */
- 0, /* PTYPE 212 */
- 0, /* PTYPE 213 */
- 0, /* PTYPE 214 */
- 0, /* PTYPE 215 */
- 0, /* PTYPE 216 */
- 0, /* PTYPE 217 */
- 0, /* PTYPE 218 */
- 0, /* PTYPE 219 */
- 0, /* PTYPE 220 */
- 0, /* PTYPE 221 */
- 0, /* PTYPE 222 */
- 0, /* PTYPE 223 */
- 0, /* PTYPE 224 */
- 0, /* PTYPE 225 */
- 0, /* PTYPE 226 */
- 0, /* PTYPE 227 */
- 0, /* PTYPE 228 */
- 0, /* PTYPE 229 */
- 0, /* PTYPE 230 */
- 0, /* PTYPE 231 */
- 0, /* PTYPE 232 */
- 0, /* PTYPE 233 */
- 0, /* PTYPE 234 */
- 0, /* PTYPE 235 */
- 0, /* PTYPE 236 */
- 0, /* PTYPE 237 */
- 0, /* PTYPE 238 */
- 0, /* PTYPE 239 */
- 0, /* PTYPE 240 */
- 0, /* PTYPE 241 */
- 0, /* PTYPE 242 */
- 0, /* PTYPE 243 */
- 0, /* PTYPE 244 */
- 0, /* PTYPE 245 */
- 0, /* PTYPE 246 */
- 0, /* PTYPE 247 */
- 0, /* PTYPE 248 */
- 0, /* PTYPE 249 */
- 0, /* PTYPE 250 */
- 0, /* PTYPE 251 */
- 0, /* PTYPE 252 */
- 0, /* PTYPE 253 */
- 0, /* PTYPE 254 */
- 0, /* PTYPE 255 */
+i40e_get_iee15888_flags(struct rte_mbuf *mb, uint64_t qword)
+{
+ uint64_t pkt_flags = 0;
+ uint16_t tsyn = (qword & (I40E_RXD_QW1_STATUS_TSYNVALID_MASK
+ | I40E_RXD_QW1_STATUS_TSYNINDX_MASK))
+ >> I40E_RX_DESC_STATUS_TSYNINDX_SHIFT;
+
+ if ((mb->packet_type & RTE_PTYPE_L2_MASK)
+ == RTE_PTYPE_L2_ETHER_TIMESYNC)
+ pkt_flags = PKT_RX_IEEE1588_PTP;
+ if (tsyn & 0x04) {
+ pkt_flags |= PKT_RX_IEEE1588_TMST;
+ mb->timesync = tsyn & 0x03;
+ }
+
+ return pkt_flags;
+}
+#endif
+
+/* For each value it means, datasheet of hardware can tell more details */
+static inline uint32_t
+i40e_rxd_pkt_type_mapping(uint8_t ptype)
+{
+ static const uint32_t ptype_table[UINT8_MAX] __rte_cache_aligned = {
+ /* L2 types */
+ /* [0] reserved */
+ [1] = RTE_PTYPE_L2_ETHER,
+ [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
+ /* [3] - [5] reserved */
+ [6] = RTE_PTYPE_L2_ETHER_LLDP,
+ /* [7] - [10] reserved */
+ [11] = RTE_PTYPE_L2_ETHER_ARP,
+ /* [12] - [21] reserved */
+
+ /* Non tunneled IPv4 */
+ [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ /* [25] reserved */
+ [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+
+ /* IPv4 --> IPv4 */
+ [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [32] reserved */
+ [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> IPv6 */
+ [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [39] reserved */
+ [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN */
+ [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
+ [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [47] reserved */
+ [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
+ [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [54] reserved */
+ [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
+ [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
+ [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [62] reserved */
+ [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
+ [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [69] reserved */
+ [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */
+ [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
+ [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [77] reserved */
+ [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
+ [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [84] reserved */
+ [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* Non tunneled IPv6 */
+ [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ /* [91] reserved */
+ [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+
+ /* IPv6 --> IPv4 */
+ [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [98] reserved */
+ [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> IPv6 */
+ [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [105] reserved */
+ [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN */
+ [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
+ [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [113] reserved */
+ [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
+ [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [120] reserved */
+ [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
+ [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
+ [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [128] reserved */
+ [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
+ [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [135] reserved */
+ [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */
+ [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
+ [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [143] reserved */
+ [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
+ [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [150] reserved */
+ [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* All others reserved */
};
- return ip_ptype_map[ptype];
+ return ptype_table[ptype];
}
#define I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK 0x03
@@ -460,26 +773,13 @@ static inline void
i40e_txd_enable_checksum(uint64_t ol_flags,
uint32_t *td_cmd,
uint32_t *td_offset,
- uint8_t l2_len,
- uint16_t l3_len,
- uint8_t outer_l2_len,
- uint16_t outer_l3_len,
+ union i40e_tx_offload tx_offload,
uint32_t *cd_tunneling)
{
- if (!l2_len) {
- PMD_DRV_LOG(DEBUG, "L2 length set to 0");
- return;
- }
-
- if (!l3_len) {
- PMD_DRV_LOG(DEBUG, "L3 length set to 0");
- return;
- }
-
/* UDP tunneling packet TX checksum offload */
- if (unlikely(ol_flags & PKT_TX_UDP_TUNNEL_PKT)) {
+ if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
- *td_offset |= (outer_l2_len >> 1)
+ *td_offset |= (tx_offload.outer_l2_len >> 1)
<< I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
@@ -490,26 +790,35 @@ i40e_txd_enable_checksum(uint64_t ol_flags,
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
/* Now set the ctx descriptor fields */
- *cd_tunneling |= (outer_l3_len >> 2) <<
+ *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
- I40E_TXD_CTX_UDP_TUNNELING |
- (l2_len >> 1) <<
+ (tx_offload.l2_len >> 1) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
} else
- *td_offset |= (l2_len >> 1)
+ *td_offset |= (tx_offload.l2_len >> 1)
<< I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
/* Enable L3 checksum offloads */
- if (ol_flags & PKT_TX_IPV4_CSUM) {
+ if (ol_flags & PKT_TX_IP_CKSUM) {
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
- *td_offset |= (l3_len >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ *td_offset |= (tx_offload.l3_len >> 2)
+ << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
} else if (ol_flags & PKT_TX_IPV4) {
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
- *td_offset |= (l3_len >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ *td_offset |= (tx_offload.l3_len >> 2)
+ << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
} else if (ol_flags & PKT_TX_IPV6) {
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
- *td_offset |= (l3_len >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ *td_offset |= (tx_offload.l3_len >> 2)
+ << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ }
+
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
+ *td_offset |= (tx_offload.l4_len >> 2)
+ << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ return;
}
/* Enable L4 checksum offloads */
@@ -574,8 +883,9 @@ i40e_xmit_cleanup(struct i40e_tx_queue *txq)
desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
- if (!(txd[desc_to_clean_to].cmd_type_offset_bsz &
- rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))) {
+ if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
+ rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
+ rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)) {
PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
"(port=%d queue=%d)", desc_to_clean_to,
txq->port_id, txq->queue_id);
@@ -619,7 +929,7 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct i40e_rx_queue *rxq)
"rxq->nb_rx_desc=%d",
rxq->rx_free_thresh, rxq->nb_rx_desc);
ret = -EINVAL;
- } else if (!(rxq->nb_rx_desc % rxq->rx_free_thresh) == 0) {
+ } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
"rxq->nb_rx_desc=%d, "
"rxq->rx_free_thresh=%d",
@@ -696,30 +1006,29 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
mb = rxep[j].mbuf;
qword1 = rte_le_to_cpu_64(\
rxdp[j].wb.qword1.status_error_len);
- rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
pkt_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
mb->data_len = pkt_len;
mb->pkt_len = pkt_len;
- mb->vlan_tci = rx_status &
- (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
- rte_le_to_cpu_16(\
- rxdp[j].wb.qword0.lo_dword.l2tag1) : 0;
+ mb->ol_flags = 0;
+ i40e_rxd_to_vlan_tci(mb, &rxdp[j]);
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
- pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
-
- mb->packet_type = (uint16_t)((qword1 &
- I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT);
+ mb->packet_type =
+ i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
+ I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT));
if (pkt_flags & PKT_RX_RSS_HASH)
mb->hash.rss = rte_le_to_cpu_32(\
rxdp[j].wb.qword0.hi_dword.rss);
if (pkt_flags & PKT_RX_FDIR)
pkt_flags |= i40e_rxd_build_fdir(&rxdp[j], mb);
- mb->ol_flags = pkt_flags;
+#ifdef RTE_LIBRTE_IEEE1588
+ pkt_flags |= i40e_get_iee15888_flags(mb, qword1);
+#endif
+ mb->ol_flags |= pkt_flags;
+
}
for (j = 0; j < I40E_LOOK_AHEAD; j++)
@@ -778,6 +1087,10 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq)
rxdp = &rxq->rx_ring[alloc_idx];
for (i = 0; i < rxq->rx_free_thresh; i++) {
+ if (likely(i < (rxq->rx_free_thresh - 1)))
+ /* Prefetch next mbuf */
+ rte_prefetch0(rxep[i + 1].mbuf);
+
mb = rxep[i].mbuf;
rte_mbuf_refcnt_set(mb, 1);
mb->next = NULL;
@@ -786,7 +1099,7 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq)
mb->port = rxq->port_id;
dma_addr = rte_cpu_to_le_64(\
RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
- rxdp[i].read.hdr_addr = dma_addr;
+ rxdp[i].read.hdr_addr = 0;
rxdp[i].read.pkt_addr = dma_addr;
}
@@ -901,6 +1214,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK)
>> I40E_RXD_QW1_STATUS_SHIFT;
+
/* Check the DD bit first */
if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
@@ -932,7 +1246,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxe->mbuf = nmb;
dma_addr =
rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
- rxdp->read.hdr_addr = dma_addr;
+ rxdp->read.hdr_addr = 0;
rxdp->read.pkt_addr = dma_addr;
rx_packet_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
@@ -945,22 +1259,23 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxm->pkt_len = rx_packet_len;
rxm->data_len = rx_packet_len;
rxm->port = rxq->port_id;
-
- rxm->vlan_tci = rx_status &
- (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
- rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) : 0;
+ rxm->ol_flags = 0;
+ i40e_rxd_to_vlan_tci(rxm, &rxd);
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
- pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
- rxm->packet_type = (uint16_t)((qword1 & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT);
+ rxm->packet_type =
+ i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
+ I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT));
if (pkt_flags & PKT_RX_RSS_HASH)
rxm->hash.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
if (pkt_flags & PKT_RX_FDIR)
pkt_flags |= i40e_rxd_build_fdir(&rxd, rxm);
- rxm->ol_flags = pkt_flags;
+#ifdef RTE_LIBRTE_IEEE1588
+ pkt_flags |= i40e_get_iee15888_flags(rxm, qword1);
+#endif
+ rxm->ol_flags |= pkt_flags;
rx_pkts[nb_rx++] = rxm;
}
@@ -1010,6 +1325,7 @@ i40e_recv_scattered_pkts(void *rx_queue,
qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK) >>
I40E_RXD_QW1_STATUS_SHIFT;
+
/* Check the DD bit */
if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
@@ -1043,7 +1359,7 @@ i40e_recv_scattered_pkts(void *rx_queue,
rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
/* Set data buffer address and data length of the mbuf */
- rxdp->read.hdr_addr = dma_addr;
+ rxdp->read.hdr_addr = 0;
rxdp->read.pkt_addr = dma_addr;
rx_packet_len = (qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
@@ -1105,22 +1421,23 @@ i40e_recv_scattered_pkts(void *rx_queue,
}
first_seg->port = rxq->port_id;
- first_seg->vlan_tci = (rx_status &
- (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
- rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) : 0;
+ first_seg->ol_flags = 0;
+ i40e_rxd_to_vlan_tci(first_seg, &rxd);
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
- pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
- first_seg->packet_type = (uint16_t)((qword1 &
- I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT);
+ first_seg->packet_type =
+ i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
+ I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT));
if (pkt_flags & PKT_RX_RSS_HASH)
rxm->hash.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
if (pkt_flags & PKT_RX_FDIR)
pkt_flags |= i40e_rxd_build_fdir(&rxd, rxm);
- first_seg->ol_flags = pkt_flags;
+#ifdef RTE_LIBRTE_IEEE1588
+ pkt_flags |= i40e_get_iee15888_flags(first_seg, qword1);
+#endif
+ first_seg->ol_flags |= pkt_flags;
/* Prefetch data of first segment, if configured to do so. */
rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
@@ -1158,18 +1475,48 @@ i40e_recv_scattered_pkts(void *rx_queue,
static inline uint16_t
i40e_calc_context_desc(uint64_t flags)
{
- uint64_t mask = 0ULL;
-
- if (flags | PKT_TX_UDP_TUNNEL_PKT)
- mask |= PKT_TX_UDP_TUNNEL_PKT;
+ static uint64_t mask = PKT_TX_OUTER_IP_CKSUM |
+ PKT_TX_TCP_SEG |
+ PKT_TX_QINQ_PKT;
#ifdef RTE_LIBRTE_IEEE1588
mask |= PKT_TX_IEEE1588_TMST;
#endif
- if (flags & mask)
- return 1;
- return 0;
+ return ((flags & mask) ? 1 : 0);
+}
+
+/* set i40e TSO context descriptor */
+static inline uint64_t
+i40e_set_tso_ctx(struct rte_mbuf *mbuf, union i40e_tx_offload tx_offload)
+{
+ uint64_t ctx_desc = 0;
+ uint32_t cd_cmd, hdr_len, cd_tso_len;
+
+ if (!tx_offload.l4_len) {
+ PMD_DRV_LOG(DEBUG, "L4 length set to 0");
+ return ctx_desc;
+ }
+
+ /**
+ * in case of tunneling packet, the outer_l2_len and
+ * outer_l3_len must be 0.
+ */
+ hdr_len = tx_offload.outer_l2_len +
+ tx_offload.outer_l3_len +
+ tx_offload.l2_len +
+ tx_offload.l3_len +
+ tx_offload.l4_len;
+
+ cd_cmd = I40E_TX_CTX_DESC_TSO;
+ cd_tso_len = mbuf->pkt_len - hdr_len;
+ ctx_desc |= ((uint64_t)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
+ ((uint64_t)cd_tso_len <<
+ I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+ ((uint64_t)mbuf->tso_segsz <<
+ I40E_TXD_CTX_QW1_MSS_SHIFT);
+
+ return ctx_desc;
}
uint16_t
@@ -1190,15 +1537,12 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
uint32_t tx_flags;
uint32_t td_tag;
uint64_t ol_flags;
- uint8_t l2_len;
- uint16_t l3_len;
- uint8_t outer_l2_len;
- uint16_t outer_l3_len;
uint16_t nb_used;
uint16_t nb_ctx;
uint16_t tx_last;
uint16_t slen;
uint64_t buf_dma_addr;
+ union i40e_tx_offload tx_offload = {0};
txq = tx_queue;
sw_ring = txq->sw_ring;
@@ -1207,7 +1551,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
txe = &sw_ring[tx_id];
/* Check if the descriptor ring needs to be cleaned. */
- if ((txq->nb_tx_desc - txq->nb_tx_free) > txq->tx_free_thresh)
+ if (txq->nb_tx_free < txq->tx_free_thresh)
i40e_xmit_cleanup(txq);
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
@@ -1220,10 +1564,12 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
ol_flags = tx_pkt->ol_flags;
- l2_len = tx_pkt->l2_len;
- l3_len = tx_pkt->l3_len;
- outer_l2_len = tx_pkt->outer_l2_len;
- outer_l3_len = tx_pkt->outer_l3_len;
+ tx_offload.l2_len = tx_pkt->l2_len;
+ tx_offload.l3_len = tx_pkt->l3_len;
+ tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
+ tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
+ tx_offload.l4_len = tx_pkt->l4_len;
+ tx_offload.tso_segsz = tx_pkt->tso_segsz;
/* Calculate the number of context descriptors needed. */
nb_ctx = i40e_calc_context_desc(ol_flags);
@@ -1258,9 +1604,9 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
}
/* Descriptor based VLAN insertion */
- if (ol_flags & PKT_TX_VLAN_PKT) {
+ if (ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
tx_flags |= tx_pkt->vlan_tci <<
- I40E_TX_FLAG_L2TAG1_SHIFT;
+ I40E_TX_FLAG_L2TAG1_SHIFT;
tx_flags |= I40E_TX_FLAG_INSERT_VLAN;
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
td_tag = (tx_flags & I40E_TX_FLAG_L2TAG1_MASK) >>
@@ -1272,12 +1618,12 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Enable checksum offloading */
cd_tunneling_params = 0;
- i40e_txd_enable_checksum(ol_flags, &td_cmd, &td_offset,
- l2_len, l3_len, outer_l2_len,
- outer_l3_len,
- &cd_tunneling_params);
+ if (ol_flags & I40E_TX_CKSUM_OFFLOAD_MASK) {
+ i40e_txd_enable_checksum(ol_flags, &td_cmd, &td_offset,
+ tx_offload, &cd_tunneling_params);
+ }
- if (unlikely(nb_ctx)) {
+ if (nb_ctx) {
/* Setup TX context descriptor if required */
volatile struct i40e_tx_context_desc *ctx_txd =
(volatile struct i40e_tx_context_desc *)\
@@ -1292,17 +1638,43 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
rte_pktmbuf_free_seg(txe->mbuf);
txe->mbuf = NULL;
}
-#ifdef RTE_LIBRTE_IEEE1588
- if (ol_flags & PKT_TX_IEEE1588_TMST)
+
+ /* TSO enabled means no timestamp */
+ if (ol_flags & PKT_TX_TCP_SEG)
cd_type_cmd_tso_mss |=
- ((uint64_t)I40E_TX_CTX_DESC_TSYN <<
- I40E_TXD_CTX_QW1_CMD_SHIFT);
+ i40e_set_tso_ctx(tx_pkt, tx_offload);
+ else {
+#ifdef RTE_LIBRTE_IEEE1588
+ if (ol_flags & PKT_TX_IEEE1588_TMST)
+ cd_type_cmd_tso_mss |=
+ ((uint64_t)I40E_TX_CTX_DESC_TSYN <<
+ I40E_TXD_CTX_QW1_CMD_SHIFT);
#endif
+ }
+
ctx_txd->tunneling_params =
rte_cpu_to_le_32(cd_tunneling_params);
+ if (ol_flags & PKT_TX_QINQ_PKT) {
+ cd_l2tag2 = tx_pkt->vlan_tci_outer;
+ cd_type_cmd_tso_mss |=
+ ((uint64_t)I40E_TX_CTX_DESC_IL2TAG2 <<
+ I40E_TXD_CTX_QW1_CMD_SHIFT);
+ }
ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
ctx_txd->type_cmd_tso_mss =
rte_cpu_to_le_64(cd_type_cmd_tso_mss);
+
+ PMD_TX_LOG(DEBUG, "mbuf: %p, TCD[%u]:\n"
+ "tunneling_params: %#x;\n"
+ "l2tag2: %#hx;\n"
+ "rsvd: %#hx;\n"
+ "type_cmd_tso_mss: %#"PRIx64";\n",
+ tx_pkt, tx_id,
+ ctx_txd->tunneling_params,
+ ctx_txd->l2tag2,
+ ctx_txd->rsvd,
+ ctx_txd->type_cmd_tso_mss);
+
txe->last_id = tx_last;
tx_id = txe->next_id;
txe = txn;
@@ -1320,6 +1692,16 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Setup TX Descriptor */
slen = m_seg->data_len;
buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+
+ PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n"
+ "buf_dma_addr: %#"PRIx64";\n"
+ "td_cmd: %#x;\n"
+ "td_offset: %#x;\n"
+ "td_len: %u;\n"
+ "td_tag: %#x;\n",
+ tx_pkt, tx_id, buf_dma_addr,
+ td_cmd, td_offset, slen, td_tag);
+
txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
txd->cmd_type_offset_bsz = i40e_build_ctob(td_cmd,
td_offset, slen, td_tag);
@@ -1370,8 +1752,9 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
struct i40e_tx_entry *txep;
uint16_t i;
- if (!(txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
- rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)))
+ if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+ rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
+ rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
return 0;
txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
@@ -1399,9 +1782,6 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
return txq->tx_rs_thresh;
}
-#define I40E_TD_CMD (I40E_TX_DESC_CMD_ICRC |\
- I40E_TX_DESC_CMD_EOP)
-
/* Populate 4 descriptors with data from 4 mbufs */
static inline void
tx4(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
@@ -1620,7 +2000,8 @@ i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
i40e_rx_queue_release_mbufs(rxq);
i40e_reset_rx_queue(rxq);
- }
+ } else
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
}
return err;
@@ -1649,6 +2030,7 @@ i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
}
i40e_rx_queue_release_mbufs(rxq);
i40e_reset_rx_queue(rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
}
return 0;
@@ -1674,6 +2056,8 @@ i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
if (err)
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
tx_queue_id);
+ else
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
}
return err;
@@ -1703,6 +2087,7 @@ i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
i40e_tx_queue_release_mbufs(txq);
i40e_reset_tx_queue(txq);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
}
return 0;
@@ -1719,10 +2104,13 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
struct i40e_vsi *vsi;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct i40e_rx_queue *rxq;
const struct rte_memzone *rz;
uint32_t ring_size;
- uint16_t len;
+ uint16_t len, i;
+ uint16_t base, bsf, tc_mapping;
int use_def_burst_func = 1;
if (hw->mac.type == I40E_MAC_VF) {
@@ -1737,9 +2125,9 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
"index exceeds the maximum");
return I40E_ERR_PARAM;
}
- if (((nb_desc * sizeof(union i40e_rx_desc)) % I40E_ALIGN) != 0 ||
- (nb_desc > I40E_MAX_RING_DESC) ||
- (nb_desc < I40E_MIN_RING_DESC)) {
+ if (nb_desc % I40E_ALIGN_RING_DESC != 0 ||
+ (nb_desc > I40E_MAX_RING_DESC) ||
+ (nb_desc < I40E_MIN_RING_DESC)) {
PMD_DRV_LOG(ERR, "Number (%u) of receive descriptors is "
"invalid", nb_desc);
return I40E_ERR_PARAM;
@@ -1781,11 +2169,8 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
/* Allocate the maximun number of RX ring hardware descriptor. */
ring_size = sizeof(union i40e_rx_desc) * I40E_MAX_RING_DESC;
ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
- rz = i40e_ring_dma_zone_reserve(dev,
- "rx_ring",
- queue_idx,
- ring_size,
- socket_id);
+ rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
+ ring_size, I40E_RING_BASE_ALIGN, socket_id);
if (!rz) {
i40e_dev_rx_queue_release(rxq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX");
@@ -1795,12 +2180,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
/* Zero all the descriptors in the ring. */
memset(rz->addr, 0, ring_size);
-#ifdef RTE_LIBRTE_XEN_DOM0
rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
-#else
- rxq->rx_ring_phys_addr = (uint64_t)rz->phys_addr;
-#endif
-
rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
@@ -1827,13 +2207,12 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
- if (!use_def_burst_func && !dev->data->scattered_rx) {
+ if (!use_def_burst_func) {
#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
"satisfied. Rx Burst Bulk Alloc function will be "
"used on port=%d, queue=%d.",
rxq->port_id, rxq->queue_id);
- dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
} else {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
@@ -1841,6 +2220,20 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
"or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is "
"not enabled on port=%d, queue=%d.",
rxq->port_id, rxq->queue_id);
+ ad->rx_bulk_alloc_allowed = false;
+ }
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (!(vsi->enabled_tc & (1 << i)))
+ continue;
+ tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
+ base = (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
+ bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
+
+ if (queue_idx >= base && queue_idx < (base + BIT(bsf)))
+ rxq->dcb_tc = i;
}
return 0;
@@ -1935,6 +2328,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
const struct rte_memzone *tz;
uint32_t ring_size;
uint16_t tx_rs_thresh, tx_free_thresh;
+ uint16_t i, base, bsf, tc_mapping;
if (hw->mac.type == I40E_MAC_VF) {
struct i40e_vf *vf =
@@ -1949,9 +2343,9 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
return I40E_ERR_PARAM;
}
- if (((nb_desc * sizeof(struct i40e_tx_desc)) % I40E_ALIGN) != 0 ||
- (nb_desc > I40E_MAX_RING_DESC) ||
- (nb_desc < I40E_MIN_RING_DESC)) {
+ if (nb_desc % I40E_ALIGN_RING_DESC != 0 ||
+ (nb_desc > I40E_MAX_RING_DESC) ||
+ (nb_desc < I40E_MIN_RING_DESC)) {
PMD_DRV_LOG(ERR, "Number (%u) of transmit descriptors is "
"invalid", nb_desc);
return I40E_ERR_PARAM;
@@ -2049,11 +2443,8 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
/* Allocate TX hardware ring descriptors. */
ring_size = sizeof(struct i40e_tx_desc) * I40E_MAX_RING_DESC;
ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
- tz = i40e_ring_dma_zone_reserve(dev,
- "tx_ring",
- queue_idx,
- ring_size,
- socket_id);
+ tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ ring_size, I40E_RING_BASE_ALIGN, socket_id);
if (!tz) {
i40e_dev_tx_queue_release(txq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX");
@@ -2078,11 +2469,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->vsi = vsi;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
-#ifdef RTE_LIBRTE_XEN_DOM0
txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
-#else
- txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
-#endif
txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
/* Allocate software ring */
@@ -2102,13 +2489,19 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
dev->data->tx_queues[queue_idx] = txq;
/* Use a simple TX queue without offloads or multi segs if possible */
- if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS) &&
- (txq->tx_rs_thresh >= I40E_TX_MAX_BURST)) {
- PMD_INIT_LOG(INFO, "Using simple tx path");
- dev->tx_pkt_burst = i40e_xmit_pkts_simple;
- } else {
- PMD_INIT_LOG(INFO, "Using full-featured tx path");
- dev->tx_pkt_burst = i40e_xmit_pkts;
+ i40e_set_tx_function_flag(dev, txq);
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (!(vsi->enabled_tc & (1 << i)))
+ continue;
+ tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
+ base = (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
+ bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
+
+ if (queue_idx >= base && queue_idx < (base + BIT(bsf)))
+ txq->dcb_tc = i;
}
return 0;
@@ -2129,47 +2522,21 @@ i40e_dev_tx_queue_release(void *txq)
rte_free(q);
}
-static const struct rte_memzone *
-i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev,
- const char *ring_name,
- uint16_t queue_id,
- uint32_t ring_size,
- int socket_id)
-{
- char z_name[RTE_MEMZONE_NAMESIZE];
- const struct rte_memzone *mz;
-
- snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->driver->pci_drv.name, ring_name,
- dev->data->port_id, queue_id);
- mz = rte_memzone_lookup(z_name);
- if (mz)
- return mz;
-
-#ifdef RTE_LIBRTE_XEN_DOM0
- return rte_memzone_reserve_bounded(z_name, ring_size,
- socket_id, 0, I40E_ALIGN, RTE_PGSIZE_2M);
-#else
- return rte_memzone_reserve_aligned(z_name, ring_size,
- socket_id, 0, I40E_ALIGN);
-#endif
-}
-
const struct rte_memzone *
i40e_memzone_reserve(const char *name, uint32_t len, int socket_id)
{
- const struct rte_memzone *mz = NULL;
+ const struct rte_memzone *mz;
mz = rte_memzone_lookup(name);
if (mz)
return mz;
-#ifdef RTE_LIBRTE_XEN_DOM0
- mz = rte_memzone_reserve_bounded(name, len,
- socket_id, 0, I40E_ALIGN, RTE_PGSIZE_2M);
-#else
- mz = rte_memzone_reserve_aligned(name, len,
- socket_id, 0, I40E_ALIGN);
-#endif
+
+ if (rte_xen_dom0_supported())
+ mz = rte_memzone_reserve_bounded(name, len,
+ socket_id, 0, I40E_RING_BASE_ALIGN, RTE_PGSIZE_2M);
+ else
+ mz = rte_memzone_reserve_aligned(name, len,
+ socket_id, 0, I40E_RING_BASE_ALIGN);
return mz;
}
@@ -2178,6 +2545,12 @@ i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq)
{
uint16_t i;
+ /* SSE Vector driver has a different way of releasing mbufs. */
+ if (rxq->rx_using_sse) {
+ i40e_rx_queue_release_mbufs_vec(rxq);
+ return;
+ }
+
if (!rxq || !rxq->sw_ring) {
PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
return;
@@ -2208,6 +2581,11 @@ i40e_reset_rx_queue(struct i40e_rx_queue *rxq)
unsigned i;
uint16_t len;
+ if (!rxq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
+ return;
+ }
+
#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
if (check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
len = (uint16_t)(rxq->nb_rx_desc + RTE_PMD_I40E_RX_MAX_BURST);
@@ -2231,6 +2609,9 @@ i40e_reset_rx_queue(struct i40e_rx_queue *rxq)
rxq->nb_rx_hold = 0;
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
+
+ rxq->rxrearm_start = 0;
+ rxq->rxrearm_nb = 0;
}
void
@@ -2305,7 +2686,11 @@ i40e_tx_queue_init(struct i40e_tx_queue *txq)
tx_ctx.new_context = 1;
tx_ctx.base = txq->tx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
tx_ctx.qlen = txq->nb_tx_desc;
- tx_ctx.rdylist = rte_le_to_cpu_16(vsi->info.qs_handle[0]);
+
+#ifdef RTE_LIBRTE_IEEE1588
+ tx_ctx.timesync_ena = 1;
+#endif
+ tx_ctx.rdylist = rte_le_to_cpu_16(vsi->info.qs_handle[txq->dcb_tc]);
if (vsi->type == I40E_VSI_FDIR)
tx_ctx.fd_ena = TRUE;
@@ -2360,7 +2745,7 @@ i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq)
rxd = &rxq->rx_ring[i];
rxd->read.pkt_addr = dma_addr;
- rxd->read.hdr_addr = dma_addr;
+ rxd->read.hdr_addr = 0;
#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
rxd->read.rsvd1 = 0;
rxd->read.rsvd2 = 0;
@@ -2382,11 +2767,10 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
struct i40e_pf *pf = I40E_VSI_TO_PF(rxq->vsi);
struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
struct rte_eth_dev_data *data = pf->dev_data;
- struct rte_pktmbuf_pool_private *mbp_priv =
- rte_mempool_get_priv(rxq->mp);
- uint16_t buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
- RTE_PKTMBUF_HEADROOM);
- uint16_t len;
+ uint16_t buf_size, len;
+
+ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
+ RTE_PKTMBUF_HEADROOM);
switch (pf->flags & (I40E_FLAG_HEADER_SPLIT_DISABLED |
I40E_FLAG_HEADER_SPLIT_ENABLED)) {
@@ -2440,11 +2824,9 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq)
int err = I40E_SUCCESS;
struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(rxq->vsi);
- struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
uint16_t pf_q = rxq->reg_idx;
uint16_t buf_size;
struct i40e_hmc_obj_rxq rx_ctx;
- struct rte_pktmbuf_pool_private *mbp_priv;
err = i40e_rx_queue_config(rxq);
if (err < 0) {
@@ -2491,14 +2873,12 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq)
rxq->qrx_tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
- mbp_priv = rte_mempool_get_priv(rxq->mp);
- buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
- RTE_PKTMBUF_HEADROOM);
+ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
+ RTE_PKTMBUF_HEADROOM);
/* Check if scattered RX needs to be used. */
if ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
dev_data->scattered_rx = 1;
- dev->rx_pkt_burst = i40e_recv_scattered_pkts;
}
/* Init the RX tail regieter. */
@@ -2525,6 +2905,26 @@ i40e_dev_clear_queues(struct rte_eth_dev *dev)
}
}
+void
+i40e_dev_free_queues(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ i40e_dev_rx_queue_release(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = NULL;
+ }
+ dev->data->nb_rx_queues = 0;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ i40e_dev_tx_queue_release(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = NULL;
+ }
+ dev->data->nb_tx_queues = 0;
+}
+
#define I40E_FDIR_NUM_TX_DESC I40E_MIN_RING_DESC
#define I40E_FDIR_NUM_RX_DESC I40E_MIN_RING_DESC
@@ -2556,11 +2956,9 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
ring_size = sizeof(struct i40e_tx_desc) * I40E_FDIR_NUM_TX_DESC;
ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
- tz = i40e_ring_dma_zone_reserve(dev,
- "fdir_tx_ring",
- I40E_FDIR_QUEUE_ID,
- ring_size,
- SOCKET_ID_ANY);
+ tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
+ I40E_FDIR_QUEUE_ID, ring_size,
+ I40E_RING_BASE_ALIGN, SOCKET_ID_ANY);
if (!tz) {
i40e_dev_tx_queue_release(txq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
@@ -2572,11 +2970,7 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
txq->vsi = pf->fdir.fdir_vsi;
-#ifdef RTE_LIBRTE_XEN_DOM0
txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
-#else
- txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
-#endif
txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
/*
* don't need to allocate software ring and reset for the fdir
@@ -2616,11 +3010,9 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
ring_size = sizeof(union i40e_rx_desc) * I40E_FDIR_NUM_RX_DESC;
ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
- rz = i40e_ring_dma_zone_reserve(dev,
- "fdir_rx_ring",
- I40E_FDIR_QUEUE_ID,
- ring_size,
- SOCKET_ID_ANY);
+ rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
+ I40E_FDIR_QUEUE_ID, ring_size,
+ I40E_RING_BASE_ALIGN, SOCKET_ID_ANY);
if (!rz) {
i40e_dev_rx_queue_release(rxq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
@@ -2632,11 +3024,7 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
rxq->vsi = pf->fdir.fdir_vsi;
-#ifdef RTE_LIBRTE_XEN_DOM0
rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
-#else
- rxq->rx_ring_phys_addr = (uint64_t)rz->phys_addr;
-#endif
rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
/*
@@ -2648,3 +3036,237 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
return I40E_SUCCESS;
}
+
+void
+i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct i40e_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mp;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.rx_drop_en = rxq->drop_en;
+ qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+}
+
+void
+i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct i40e_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+
+ qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+ qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+ qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+ qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+ qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+ qinfo->conf.txq_flags = txq->txq_flags;
+ qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+
+void __attribute__((cold))
+i40e_set_rx_function(struct rte_eth_dev *dev)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ uint16_t rx_using_sse, i;
+ /* In order to allow Vector Rx there are a few configuration
+ * conditions to be met and Rx Bulk Allocation should be allowed.
+ */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ if (i40e_rx_vec_dev_conf_condition_check(dev) ||
+ !ad->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet"
+ " Vector Rx preconditions",
+ dev->data->port_id);
+
+ ad->rx_vec_allowed = false;
+ }
+ if (ad->rx_vec_allowed) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct i40e_rx_queue *rxq =
+ dev->data->rx_queues[i];
+
+ if (i40e_rxq_vec_setup(rxq)) {
+ ad->rx_vec_allowed = false;
+ break;
+ }
+ }
+ }
+ }
+
+ if (dev->data->scattered_rx) {
+ /* Set the non-LRO scattered callback: there are Vector and
+ * single allocation versions.
+ */
+ if (ad->rx_vec_allowed) {
+ PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
+ "callback (port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = i40e_recv_scattered_pkts_vec;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
+ "allocation callback (port=%d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst = i40e_recv_scattered_pkts;
+ }
+ /* If parameters allow we are going to choose between the following
+ * callbacks:
+ * - Vector
+ * - Bulk Allocation
+ * - Single buffer allocation (the simplest one)
+ */
+ } else if (ad->rx_vec_allowed) {
+ PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
+ "burst size no less than %d (port=%d).",
+ RTE_I40E_DESCS_PER_LOOP,
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = i40e_recv_pkts_vec;
+ } else if (ad->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+ "satisfied. Rx Burst Bulk Alloc function "
+ "will be used on port=%d.",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
+ "satisfied, or Scattered Rx is requested "
+ "(port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = i40e_recv_pkts;
+ }
+
+ /* Propagate information about RX function choice through all queues. */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ rx_using_sse =
+ (dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec ||
+ dev->rx_pkt_burst == i40e_recv_pkts_vec);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct i40e_rx_queue *rxq = dev->data->rx_queues[i];
+
+ rxq->rx_using_sse = rx_using_sse;
+ }
+ }
+}
+
+void __attribute__((cold))
+i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ /* Use a simple Tx queue (no offloads, no multi segs) if possible */
+ if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS)
+ && (txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST)) {
+ if (txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ) {
+ PMD_INIT_LOG(DEBUG, "Vector tx"
+ " can be enabled on this txq.");
+
+ } else {
+ ad->tx_vec_allowed = false;
+ }
+ } else {
+ ad->tx_simple_allowed = false;
+ }
+}
+
+void __attribute__((cold))
+i40e_set_tx_function(struct rte_eth_dev *dev)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ int i;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ if (ad->tx_vec_allowed) {
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct i40e_tx_queue *txq =
+ dev->data->tx_queues[i];
+
+ if (i40e_txq_vec_setup(txq)) {
+ ad->tx_vec_allowed = false;
+ break;
+ }
+ }
+ }
+ }
+
+ if (ad->tx_simple_allowed) {
+ if (ad->tx_vec_allowed) {
+ PMD_INIT_LOG(DEBUG, "Vector tx finally be used.");
+ dev->tx_pkt_burst = i40e_xmit_pkts_vec;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
+ dev->tx_pkt_burst = i40e_xmit_pkts_simple;
+ }
+ } else {
+ PMD_INIT_LOG(DEBUG, "Xmit tx finally be used.");
+ dev->tx_pkt_burst = i40e_xmit_pkts;
+ }
+}
+
+/* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to 'n' */
+int __attribute__((weak))
+i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
+{
+ return -1;
+}
+
+uint16_t __attribute__((weak))
+i40e_recv_pkts_vec(
+ void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
+uint16_t __attribute__((weak))
+i40e_recv_scattered_pkts_vec(
+ void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
+int __attribute__((weak))
+i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq)
+{
+ return -1;
+}
+
+int __attribute__((weak))
+i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
+{
+ return -1;
+}
+
+void __attribute__((weak))
+i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq)
+{
+ return;
+}
+
+uint16_t __attribute__((weak))
+i40e_xmit_pkts_vec(void __rte_unused *tx_queue,
+ struct rte_mbuf __rte_unused **tx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e_rxtx.h b/src/dpdk22/drivers/net/i40e/i40e_rxtx.h
index af932e3d..5c2f5c22 100755..100644
--- a/src/dpdk_lib18/librte_pmd_i40e/i40e_rxtx.h
+++ b/src/dpdk22/drivers/net/i40e/i40e_rxtx.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -44,13 +44,33 @@
#define I40E_TX_FLAG_INSERT_VLAN ((uint32_t)(1 << 1))
#define I40E_TX_FLAG_TSYN ((uint32_t)(1 << 2))
-#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
#define RTE_PMD_I40E_RX_MAX_BURST 32
-#endif
+#define RTE_PMD_I40E_TX_MAX_BURST 32
+
+#define RTE_I40E_VPMD_RX_BURST 32
+#define RTE_I40E_VPMD_TX_BURST 32
+#define RTE_I40E_RXQ_REARM_THRESH 32
+#define RTE_I40E_MAX_RX_BURST RTE_I40E_RXQ_REARM_THRESH
+#define RTE_I40E_TX_MAX_FREE_BUF_SZ 64
+#define RTE_I40E_DESCS_PER_LOOP 4
#define I40E_RXBUF_SZ_1024 1024
#define I40E_RXBUF_SZ_2048 2048
+/* In none-PXE mode QLEN must be whole number of 32 descriptors. */
+#define I40E_ALIGN_RING_DESC 32
+
+#define I40E_MIN_RING_DESC 64
+#define I40E_MAX_RING_DESC 4096
+
+#undef container_of
+#define container_of(ptr, type, member) ({ \
+ typeof(((type *)0)->member)(*__mptr) = (ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); })
+
+#define I40E_TD_CMD (I40E_TX_DESC_CMD_ICRC |\
+ I40E_TX_DESC_CMD_EOP)
+
enum i40e_header_split_mode {
i40e_header_split_none = 0,
i40e_header_split_enabled = 1,
@@ -100,6 +120,11 @@ struct i40e_rx_queue {
struct rte_mbuf fake_mbuf; /**< dummy mbuf */
struct rte_mbuf *rx_stage[RTE_PMD_I40E_RX_MAX_BURST * 2];
#endif
+
+ uint16_t rxrearm_nb; /**< number of remaining to be re-armed */
+ uint16_t rxrearm_start; /**< the idx we start the re-arming from */
+ uint64_t mbuf_initializer; /**< value to init mbufs */
+
uint8_t port_id; /**< device port ID */
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise */
uint16_t queue_id; /**< RX queue index */
@@ -113,6 +138,8 @@ struct i40e_rx_queue {
uint8_t hs_mode; /* Header Split mode */
bool q_set; /**< indicate if rx queue has been configured */
bool rx_deferred_start; /**< don't start this queue in dev start */
+ uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
+ uint8_t dcb_tc; /**< Traffic class of rx queue */
};
struct i40e_tx_entry {
@@ -136,8 +163,9 @@ struct i40e_tx_queue {
uint16_t last_desc_cleaned;
/**< Total number of TX descriptors ready to be allocated. */
uint16_t nb_tx_free;
- /**< Number of TX descriptors to use before RS bit is set. */
- uint16_t tx_free_thresh; /**< minimum TX before freeing. */
+ /**< Start freeing TX buffers if there are less free descriptors than
+ this value. */
+ uint16_t tx_free_thresh;
/** Number of TX descriptors to use before RS bit is set. */
uint16_t tx_rs_thresh;
uint8_t pthresh; /**< Prefetch threshold register. */
@@ -152,6 +180,20 @@ struct i40e_tx_queue {
uint16_t tx_next_rs;
bool q_set; /**< indicate if tx queue has been configured */
bool tx_deferred_start; /**< don't start this queue in dev start */
+ uint8_t dcb_tc; /**< Traffic class of tx queue */
+};
+
+/** Offload features */
+union i40e_tx_offload {
+ uint64_t data;
+ struct {
+ uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
+ uint64_t l3_len:9; /**< L3 (IP) Header Length. */
+ uint64_t l4_len:8; /**< L4 Header Length. */
+ uint64_t tso_segsz:16; /**< TCP TSO segment size */
+ uint64_t outer_l2_len:8; /**< outer L2 Header Length */
+ uint64_t outer_l3_len:16; /**< outer L3 Header Length */
+ };
};
int i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
@@ -185,6 +227,7 @@ int i40e_rx_queue_init(struct i40e_rx_queue *rxq);
void i40e_free_tx_resources(struct i40e_tx_queue *txq);
void i40e_free_rx_resources(struct i40e_rx_queue *rxq);
void i40e_dev_clear_queues(struct rte_eth_dev *dev);
+void i40e_dev_free_queues(struct rte_eth_dev *dev);
void i40e_reset_rx_queue(struct i40e_rx_queue *rxq);
void i40e_reset_tx_queue(struct i40e_tx_queue *txq);
void i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq);
@@ -195,4 +238,20 @@ uint32_t i40e_dev_rx_queue_count(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
int i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
+uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t i40e_recv_scattered_pkts_vec(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+int i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev);
+int i40e_rxq_vec_setup(struct i40e_rx_queue *rxq);
+int i40e_txq_vec_setup(struct i40e_tx_queue *txq);
+void i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq);
+uint16_t i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+void i40e_set_rx_function(struct rte_eth_dev *dev);
+void i40e_set_tx_function_flag(struct rte_eth_dev *dev,
+ struct i40e_tx_queue *txq);
+void i40e_set_tx_function(struct rte_eth_dev *dev);
+
#endif /* _I40E_RXTX_H_ */
diff --git a/src/dpdk22/drivers/net/i40e/i40e_rxtx_vec.c b/src/dpdk22/drivers/net/i40e/i40e_rxtx_vec.c
new file mode 100644
index 00000000..047aff53
--- /dev/null
+++ b/src/dpdk22/drivers/net/i40e/i40e_rxtx_vec.c
@@ -0,0 +1,777 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+
+#include "base/i40e_prototype.h"
+#include "base/i40e_type.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+
+#include <tmmintrin.h>
+
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+static inline void
+i40e_rxq_rearm(struct i40e_rx_queue *rxq)
+{
+ int i;
+ uint16_t rx_id;
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
+ struct rte_mbuf *mb0, *mb1;
+ __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
+ RTE_PKTMBUF_HEADROOM);
+ __m128i dma_addr0, dma_addr1;
+
+ rxdp = rxq->rx_ring + rxq->rxrearm_start;
+
+ /* Pull 'n' more MBUFs into the software ring */
+ if (rte_mempool_get_bulk(rxq->mp,
+ (void *)rxep,
+ RTE_I40E_RXQ_REARM_THRESH) < 0) {
+ if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
+ rxq->nb_rx_desc) {
+ dma_addr0 = _mm_setzero_si128();
+ for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
+ rxep[i].mbuf = &rxq->fake_mbuf;
+ _mm_store_si128((__m128i *)&rxdp[i].read,
+ dma_addr0);
+ }
+ }
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ RTE_I40E_RXQ_REARM_THRESH;
+ return;
+ }
+
+ /* Initialize the mbufs in vector, process 2 mbufs in one loop */
+ for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
+ __m128i vaddr0, vaddr1;
+ uintptr_t p0, p1;
+
+ mb0 = rxep[0].mbuf;
+ mb1 = rxep[1].mbuf;
+
+ /* Flush mbuf with pkt template.
+ * Data to be rearmed is 6 bytes long.
+ * Though, RX will overwrite ol_flags that are coming next
+ * anyway. So overwrite whole 8 bytes with one load:
+ * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
+ */
+ p0 = (uintptr_t)&mb0->rearm_data;
+ *(uint64_t *)p0 = rxq->mbuf_initializer;
+ p1 = (uintptr_t)&mb1->rearm_data;
+ *(uint64_t *)p1 = rxq->mbuf_initializer;
+
+ /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+ vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
+ vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
+
+ /* convert pa to dma_addr hdr/data */
+ dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
+ dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
+
+ /* add headroom to pa values */
+ dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
+ dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
+
+ /* flush desc with pa dma_addr */
+ _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
+ _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
+ }
+
+ rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
+ if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
+
+ rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+ (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+ /* Update the tail pointer on the NIC */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+}
+
+/* Handling the offload flags (olflags) field takes computation
+ * time when receiving packets. Therefore we provide a flag to disable
+ * the processing of the olflags field when they are not needed. This
+ * gives improved performance, at the cost of losing the offload info
+ * in the received packet
+ */
+#ifdef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
+
+static inline void
+desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
+{
+ __m128i vlan0, vlan1, rss;
+ union {
+ uint16_t e[4];
+ uint64_t dword;
+ } vol;
+
+ /* mask everything except rss and vlan flags
+ *bit2 is for vlan tag, bits 13:12 for rss
+ */
+ const __m128i rss_vlan_msk = _mm_set_epi16(
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x3004, 0x3004, 0x3004, 0x3004);
+
+ /* map rss and vlan type to rss hash and vlan flag */
+ const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, PKT_RX_VLAN_PKT,
+ 0, 0, 0, 0);
+
+ const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PKT_RX_FDIR, 0, PKT_RX_RSS_HASH, 0);
+
+ vlan0 = _mm_unpackhi_epi16(descs[0], descs[1]);
+ vlan1 = _mm_unpackhi_epi16(descs[2], descs[3]);
+ vlan0 = _mm_unpacklo_epi32(vlan0, vlan1);
+
+ vlan1 = _mm_and_si128(vlan0, rss_vlan_msk);
+ vlan0 = _mm_shuffle_epi8(vlan_flags, vlan1);
+
+ rss = _mm_srli_epi16(vlan1, 12);
+ rss = _mm_shuffle_epi8(rss_flags, rss);
+
+ vlan0 = _mm_or_si128(vlan0, rss);
+ vol.dword = _mm_cvtsi128_si64(vlan0);
+
+ rx_pkts[0]->ol_flags = vol.e[0];
+ rx_pkts[1]->ol_flags = vol.e[1];
+ rx_pkts[2]->ol_flags = vol.e[2];
+ rx_pkts[3]->ol_flags = vol.e[3];
+}
+#else
+#define desc_to_olflags_v(desc, rx_pkts) do {} while (0)
+#endif
+
+#define PKTLEN_SHIFT (6)
+#define PKTLEN_MASK (0x3FFF)
+/* Handling the pkt len field is not aligned with 1byte, so shift is
+ * needed to let it align
+ */
+static inline void
+desc_pktlen_align(__m128i descs[4])
+{
+ __m128i pktlen0, pktlen1, zero;
+ union {
+ uint16_t e[4];
+ uint64_t dword;
+ } vol;
+
+ /* mask everything except pktlen field*/
+ const __m128i pktlen_msk = _mm_set_epi32(PKTLEN_MASK, PKTLEN_MASK,
+ PKTLEN_MASK, PKTLEN_MASK);
+
+ pktlen0 = _mm_unpackhi_epi32(descs[0], descs[2]);
+ pktlen1 = _mm_unpackhi_epi32(descs[1], descs[3]);
+ pktlen0 = _mm_unpackhi_epi32(pktlen0, pktlen1);
+
+ zero = _mm_xor_si128(pktlen0, pktlen0);
+
+ pktlen0 = _mm_srli_epi32(pktlen0, PKTLEN_SHIFT);
+ pktlen0 = _mm_and_si128(pktlen0, pktlen_msk);
+
+ pktlen0 = _mm_packs_epi32(pktlen0, zero);
+ vol.dword = _mm_cvtsi128_si64(pktlen0);
+ /* let the descriptor byte 15-14 store the pkt len */
+ *((uint16_t *)&descs[0]+7) = vol.e[0];
+ *((uint16_t *)&descs[1]+7) = vol.e[1];
+ *((uint16_t *)&descs[2]+7) = vol.e[2];
+ *((uint16_t *)&descs[3]+7) = vol.e[3];
+}
+
+ /*
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+static inline uint16_t
+_recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts, uint8_t *split_packet)
+{
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_entry *sw_ring;
+ uint16_t nb_pkts_recd;
+ int pos;
+ uint64_t var;
+ __m128i shuf_msk;
+
+ __m128i crc_adjust = _mm_set_epi16(
+ 0, 0, 0, /* ignore non-length fields */
+ -rxq->crc_len, /* sub crc on data_len */
+ 0, /* ignore high-16bits of pkt_len */
+ -rxq->crc_len, /* sub crc on pkt_len */
+ 0, 0 /* ignore pkt_type field */
+ );
+ __m128i dd_check, eop_check;
+
+ /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */
+ nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST);
+
+ /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
+
+ /* Just the act of getting into the function from the application is
+ * going to cost about 7 cycles
+ */
+ rxdp = rxq->rx_ring + rxq->rx_tail;
+
+ _mm_prefetch((const void *)rxdp, _MM_HINT_T0);
+
+ /* See if we need to rearm the RX queue - gives the prefetch a bit
+ * of time to act
+ */
+ if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
+ i40e_rxq_rearm(rxq);
+
+ /* Before we start moving massive data around, check to see if
+ * there is actually a packet available
+ */
+ if (!(rxdp->wb.qword1.status_error_len &
+ rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ return 0;
+
+ /* 4 packets DD mask */
+ dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
+
+ /* 4 packets EOP mask */
+ eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);
+
+ /* mask to shuffle from desc. to mbuf */
+ shuf_msk = _mm_set_epi8(
+ 7, 6, 5, 4, /* octet 4~7, 32bits rss */
+ 3, 2, /* octet 2~3, low 16 bits vlan_macip */
+ 15, 14, /* octet 15~14, 16 bits data_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 15, 14, /* octet 15~14, low 16 bits pkt_len */
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 0xFF, 0xFF /*pkt_type set as unknown */
+ );
+
+ /* Cache is empty -> need to scan the buffer rings, but first move
+ * the next 'n' mbufs into the cache
+ */
+ sw_ring = &rxq->sw_ring[rxq->rx_tail];
+
+ /* A. load 4 packet in one loop
+ * [A*. mask out 4 unused dirty field in desc]
+ * B. copy 4 mbuf point from swring to rx_pkts
+ * C. calc the number of DD bits among the 4 packets
+ * [C*. extract the end-of-packet bit, if requested]
+ * D. fill info. from desc to mbuf
+ */
+
+ for (pos = 0, nb_pkts_recd = 0; pos < RTE_I40E_VPMD_RX_BURST;
+ pos += RTE_I40E_DESCS_PER_LOOP,
+ rxdp += RTE_I40E_DESCS_PER_LOOP) {
+ __m128i descs[RTE_I40E_DESCS_PER_LOOP];
+ __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+ __m128i zero, staterr, sterr_tmp1, sterr_tmp2;
+ __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
+
+ /* B.1 load 1 mbuf point */
+ mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
+ /* Read desc statuses backwards to avoid race condition */
+ /* A.1 load 4 pkts desc */
+ descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
+
+ /* B.1 load 1 mbuf point */
+ mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
+
+ descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
+ /* B.1 load 2 mbuf point */
+ descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
+ descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+
+ if (split_packet) {
+ rte_prefetch0(&rx_pkts[pos]->cacheline1);
+ rte_prefetch0(&rx_pkts[pos + 1]->cacheline1);
+ rte_prefetch0(&rx_pkts[pos + 2]->cacheline1);
+ rte_prefetch0(&rx_pkts[pos + 3]->cacheline1);
+ }
+
+ /*shift the pktlen field*/
+ desc_pktlen_align(descs);
+
+ /* avoid compiler reorder optimization */
+ rte_compiler_barrier();
+
+ /* D.1 pkt 3,4 convert format from desc to pktmbuf */
+ pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
+ pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
+
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
+
+ desc_to_olflags_v(descs, &rx_pkts[pos]);
+
+ /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
+ pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
+ pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
+
+ /* D.1 pkt 1,2 convert format from desc to pktmbuf */
+ pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
+ pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
+
+ /* C.2 get 4 pkts staterr value */
+ zero = _mm_xor_si128(dd_check, dd_check);
+ staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
+
+ /* D.3 copy final 3,4 data to rx_pkts */
+ _mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1,
+ pkt_mb4);
+ _mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1,
+ pkt_mb3);
+
+ /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
+ pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
+ pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
+
+ /* C* extract and record EOP bit */
+ if (split_packet) {
+ __m128i eop_shuf_mask = _mm_set_epi8(
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x04, 0x0C, 0x00, 0x08
+ );
+
+ /* and with mask to extract bits, flipping 1-0 */
+ __m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
+ /* the staterr values are not in order, as the count
+ * count of dd bits doesn't care. However, for end of
+ * packet tracking, we do care, so shuffle. This also
+ * compresses the 32-bit values to 8-bit
+ */
+ eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
+ /* store the resulting 32-bit value */
+ *(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
+ split_packet += RTE_I40E_DESCS_PER_LOOP;
+
+ /* zero-out next pointers */
+ rx_pkts[pos]->next = NULL;
+ rx_pkts[pos + 1]->next = NULL;
+ rx_pkts[pos + 2]->next = NULL;
+ rx_pkts[pos + 3]->next = NULL;
+ }
+
+ /* C.3 calc available number of desc */
+ staterr = _mm_and_si128(staterr, dd_check);
+ staterr = _mm_packs_epi32(staterr, zero);
+
+ /* D.3 copy final 1,2 data to rx_pkts */
+ _mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1,
+ pkt_mb2);
+ _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
+ pkt_mb1);
+ /* C.4 calc avaialbe number of desc */
+ var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
+ nb_pkts_recd += var;
+ if (likely(var != RTE_I40E_DESCS_PER_LOOP))
+ break;
+ }
+
+ /* Update our internal tail pointer */
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
+ rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
+
+ return nb_pkts_recd;
+}
+
+ /*
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+uint16_t
+i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+static inline uint16_t
+reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs,
+ uint16_t nb_bufs, uint8_t *split_flags)
+{
+ struct rte_mbuf *pkts[RTE_I40E_VPMD_RX_BURST]; /*finished pkts*/
+ struct rte_mbuf *start = rxq->pkt_first_seg;
+ struct rte_mbuf *end = rxq->pkt_last_seg;
+ unsigned pkt_idx, buf_idx;
+
+ for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
+ if (end != NULL) {
+ /* processing a split packet */
+ end->next = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+
+ start->nb_segs++;
+ start->pkt_len += rx_bufs[buf_idx]->data_len;
+ end = end->next;
+
+ if (!split_flags[buf_idx]) {
+ /* it's the last packet of the set */
+ start->hash = end->hash;
+ start->ol_flags = end->ol_flags;
+ /* we need to strip crc for the whole packet */
+ start->pkt_len -= rxq->crc_len;
+ if (end->data_len > rxq->crc_len) {
+ end->data_len -= rxq->crc_len;
+ } else {
+ /* free up last mbuf */
+ struct rte_mbuf *secondlast = start;
+
+ while (secondlast->next != end)
+ secondlast = secondlast->next;
+ secondlast->data_len -= (rxq->crc_len -
+ end->data_len);
+ secondlast->next = NULL;
+ rte_pktmbuf_free_seg(end);
+ end = secondlast;
+ }
+ pkts[pkt_idx++] = start;
+ start = end = NULL;
+ }
+ } else {
+ /* not processing a split packet */
+ if (!split_flags[buf_idx]) {
+ /* not a split packet, save and skip */
+ pkts[pkt_idx++] = rx_bufs[buf_idx];
+ continue;
+ }
+ end = start = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+ rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
+ }
+ }
+
+ /* save the partial packet for next time */
+ rxq->pkt_first_seg = start;
+ rxq->pkt_last_seg = end;
+ memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
+ return pkt_idx;
+}
+
+ /* vPMD receive routine that reassembles scattered packets
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+uint16_t
+i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+
+ struct i40e_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
+
+ /* get some new buffers */
+ uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
+ split_flags);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+ if (rxq->pkt_first_seg == NULL &&
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly*/
+ unsigned i = 0;
+
+ if (rxq->pkt_first_seg == NULL) {
+ /* find the first split flag, and only reassemble then*/
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ }
+ return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
+static inline void
+vtx1(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf *pkt, uint64_t flags)
+{
+ uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA |
+ ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) |
+ ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
+
+ __m128i descriptor = _mm_set_epi64x(high_qw,
+ pkt->buf_physaddr + pkt->data_off);
+ _mm_store_si128((__m128i *)txdp, descriptor);
+}
+
+static inline void
+vtx(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
+{
+ int i;
+
+ for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+ vtx1(txdp, *pkt, flags);
+}
+
+static inline int __attribute__((always_inline))
+i40e_tx_free_bufs(struct i40e_tx_queue *txq)
+{
+ struct i40e_tx_entry *txep;
+ uint32_t n;
+ uint32_t i;
+ int nb_free = 0;
+ struct rte_mbuf *m, *free[RTE_I40E_TX_MAX_FREE_BUF_SZ];
+
+ /* check DD bits on threshold descriptor */
+ if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+ rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
+ rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
+ return 0;
+
+ n = txq->tx_rs_thresh;
+
+ /* first buffer to free from S/W ring is at index
+ * tx_next_dd - (tx_rs_thresh-1)
+ */
+ txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
+ m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
+ if (likely(m != NULL)) {
+ free[0] = m;
+ nb_free = 1;
+ for (i = 1; i < n; i++) {
+ m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (likely(m != NULL)) {
+ if (likely(m->pool == free[0]->pool)) {
+ free[nb_free++] = m;
+ } else {
+ rte_mempool_put_bulk(free[0]->pool,
+ (void *)free,
+ nb_free);
+ free[0] = m;
+ nb_free = 1;
+ }
+ }
+ }
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+ } else {
+ for (i = 1; i < n; i++) {
+ m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (m != NULL)
+ rte_mempool_put(m->pool, m);
+ }
+ }
+
+ /* buffers were freed, update counters */
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+ txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+ if (txq->tx_next_dd >= txq->nb_tx_desc)
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ return txq->tx_rs_thresh;
+}
+
+static inline void __attribute__((always_inline))
+tx_backlog_entry(struct i40e_tx_entry *txep,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i;
+
+ for (i = 0; i < (int)nb_pkts; ++i)
+ txep[i].mbuf = tx_pkts[i];
+}
+
+uint16_t
+i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+ volatile struct i40e_tx_desc *txdp;
+ struct i40e_tx_entry *txep;
+ uint16_t n, nb_commit, tx_id;
+ uint64_t flags = I40E_TD_CMD;
+ uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
+ int i;
+
+ /* cross rx_thresh boundary is not allowed */
+ nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ i40e_tx_free_bufs(txq);
+
+ nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ tx_id = txq->tx_tail;
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+ n = (uint16_t)(txq->nb_tx_desc - tx_id);
+ if (nb_commit >= n) {
+ tx_backlog_entry(txep, tx_pkts, n);
+
+ for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+ vtx1(txdp, *tx_pkts, flags);
+
+ vtx1(txdp, *tx_pkts++, rs);
+
+ nb_commit = (uint16_t)(nb_commit - n);
+
+ tx_id = 0;
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ /* avoid reach the end of ring */
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+ }
+
+ tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+ vtx(txdp, tx_pkts, nb_commit, flags);
+
+ tx_id = (uint16_t)(tx_id + nb_commit);
+ if (tx_id > txq->tx_next_rs) {
+ txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ txq->tx_next_rs =
+ (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
+ }
+
+ txq->tx_tail = tx_id;
+
+ I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+void __attribute__((cold))
+i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
+{
+ const unsigned mask = rxq->nb_rx_desc - 1;
+ unsigned i;
+
+ if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
+ return;
+
+ /* free all mbufs that are valid in the ring */
+ for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ rxq->rxrearm_nb = rxq->nb_rx_desc;
+
+ /* set all entries to NULL */
+ memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
+}
+
+int __attribute__((cold))
+i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
+{
+ uintptr_t p;
+ struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
+
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.port = rxq->port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ p = (uintptr_t)&mb_def.rearm_data;
+ rxq->mbuf_initializer = *(uint64_t *)p;
+ return 0;
+}
+
+int __attribute__((cold))
+i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
+{
+ return 0;
+}
+
+int __attribute__((cold))
+i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
+{
+#ifndef RTE_LIBRTE_IEEE1588
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+
+#ifndef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
+ /* whithout rx ol_flags, no VP flag report */
+ if (rxmode->hw_vlan_strip != 0 ||
+ rxmode->hw_vlan_extend != 0)
+ return -1;
+#endif
+
+ /* no fdir support */
+ if (fconf->mode != RTE_FDIR_MODE_NONE)
+ return -1;
+
+ /* - no csum error report support
+ * - no header split support
+ */
+ if (rxmode->hw_ip_checksum == 1 ||
+ rxmode->header_split == 1)
+ return -1;
+
+ return 0;
+#else
+ RTE_SET_USED(dev);
+ return -1;
+#endif
+}
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/README b/src/dpdk22/drivers/net/ixgbe/base/README
index e0e5f0dc..cec48a6f 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/README
+++ b/src/dpdk22/drivers/net/ixgbe/base/README
@@ -1,7 +1,7 @@
..
BSD LICENSE
- Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -34,7 +34,7 @@ Intel® IXGBE driver
===================
This directory contains source code of FreeBSD ixgbe driver of version
-cid-10g-shared-code.2014.09.04 released by LAD. The sub-directory of lad/
+cid-10g-shared-code.2015.06.05 released by ND. The sub-directory of base/
contains the original source package.
This driver is valid for the product(s) listed below
@@ -52,16 +52,10 @@ This driver is valid for the product(s) listed below
* Intel® Ethernet Server Adapter X520-T2
* Intel® Ethernet Controller X550-BT2
-Updating driver
-===============
-
-The following modifications have been made to this code to integrate it with the
-Intel® DPDK:
-
+Updating the driver
+===================
-ixgbe_osdep.h
--------------
+NOTE: The source code in this directory should not be modified apart from
+the following file(s):
-The OS dependency layer has been extensively modified to support the drivers in
-the Intel® DPDK environment. It is expected that these files will not need to be
-changed on updating the driver.
+ ixgbe_osdep.h
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_82598.c b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_82598.c
index c8ce893b..9e65fffa 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_82598.c
+++ b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_82598.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -36,7 +36,6 @@ POSSIBILITY OF SUCH DAMAGE.
#include "ixgbe_api.h"
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
-#ident "$Id: ixgbe_82598.c,v 1.199 2013/05/22 23:26:31 jtkirshe Exp $"
#define IXGBE_82598_MAX_TX_QUEUES 32
#define IXGBE_82598_MAX_RX_QUEUES 64
@@ -128,29 +127,29 @@ s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
ret_val = ixgbe_init_ops_generic(hw);
/* PHY */
- phy->ops.init = &ixgbe_init_phy_ops_82598;
+ phy->ops.init = ixgbe_init_phy_ops_82598;
/* MAC */
- mac->ops.start_hw = &ixgbe_start_hw_82598;
- mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598;
- mac->ops.reset_hw = &ixgbe_reset_hw_82598;
- mac->ops.get_media_type = &ixgbe_get_media_type_82598;
+ mac->ops.start_hw = ixgbe_start_hw_82598;
+ mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_82598;
+ mac->ops.reset_hw = ixgbe_reset_hw_82598;
+ mac->ops.get_media_type = ixgbe_get_media_type_82598;
mac->ops.get_supported_physical_layer =
- &ixgbe_get_supported_physical_layer_82598;
- mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
- mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
- mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
- mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82598;
+ ixgbe_get_supported_physical_layer_82598;
+ mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82598;
+ mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82598;
+ mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie_82598;
+ mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82598;
/* RAR, Multicast, VLAN */
- mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
- mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
- mac->ops.set_vfta = &ixgbe_set_vfta_82598;
+ mac->ops.set_vmdq = ixgbe_set_vmdq_82598;
+ mac->ops.clear_vmdq = ixgbe_clear_vmdq_82598;
+ mac->ops.set_vfta = ixgbe_set_vfta_82598;
mac->ops.set_vlvf = NULL;
- mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
+ mac->ops.clear_vfta = ixgbe_clear_vfta_82598;
/* Flow Control */
- mac->ops.fc_enable = &ixgbe_fc_enable_82598;
+ mac->ops.fc_enable = ixgbe_fc_enable_82598;
mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
@@ -161,15 +160,15 @@ s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
/* SFP+ Module */
- phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
- phy->ops.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598;
+ phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_82598;
+ phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_82598;
/* Link */
- mac->ops.check_link = &ixgbe_check_mac_link_82598;
- mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
+ mac->ops.check_link = ixgbe_check_mac_link_82598;
+ mac->ops.setup_link = ixgbe_setup_mac_link_82598;
mac->ops.flap_tx_laser = NULL;
- mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82598;
- mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598;
+ mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82598;
+ mac->ops.setup_rxpba = ixgbe_set_rxpba_82598;
/* Manageability interface */
mac->ops.set_fw_drv_ver = NULL;
@@ -202,20 +201,20 @@ s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
/* Overwrite the link function pointers if copper PHY */
if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
- mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
+ mac->ops.setup_link = ixgbe_setup_copper_link_82598;
mac->ops.get_link_capabilities =
- &ixgbe_get_copper_link_capabilities_generic;
+ ixgbe_get_copper_link_capabilities_generic;
}
switch (hw->phy.type) {
case ixgbe_phy_tn:
- phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
- phy->ops.check_link = &ixgbe_check_phy_link_tnx;
+ phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
+ phy->ops.check_link = ixgbe_check_phy_link_tnx;
phy->ops.get_firmware_version =
- &ixgbe_get_phy_firmware_version_tnx;
+ ixgbe_get_phy_firmware_version_tnx;
break;
case ixgbe_phy_nl:
- phy->ops.reset = &ixgbe_reset_phy_nl;
+ phy->ops.reset = ixgbe_reset_phy_nl;
/* Call SFP+ identify routine to get the SFP+ module type */
ret_val = phy->ops.identify_sfp(hw);
@@ -260,6 +259,8 @@ s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_start_hw_82598");
ret_val = ixgbe_start_hw_generic(hw);
+ if (ret_val)
+ return ret_val;
/* Disable relaxed ordering */
for (i = 0; ((i < hw->mac.max_tx_queues) &&
@@ -278,8 +279,7 @@ s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
}
/* set the completion timeout for interface */
- if (ret_val == IXGBE_SUCCESS)
- ixgbe_set_pcie_completion_timeout(hw);
+ ixgbe_set_pcie_completion_timeout(hw);
return ret_val;
}
@@ -659,7 +659,7 @@ STATIC s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
&adapt_comp_reg);
if (link_up_wait_to_complete) {
- for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
+ for (i = 0; i < hw->mac.max_link_up_time; i++) {
if ((link_reg & 1) &&
((adapt_comp_reg & 1) == 0)) {
*link_up = true;
@@ -688,7 +688,7 @@ STATIC s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
if (link_up_wait_to_complete) {
- for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
+ for (i = 0; i < hw->mac.max_link_up_time; i++) {
if (links_reg & IXGBE_LINKS_UP) {
*link_up = true;
break;
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_82598.h b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_82598.h
index 58ce4d12..89dd11a5 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_82598.h
+++ b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_82598.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -33,7 +33,6 @@ POSSIBILITY OF SUCH DAMAGE.
#ifndef _IXGBE_82598_H_
#define _IXGBE_82598_H_
-#ident "$Id: ixgbe_82598.h,v 1.3 2012/03/27 22:16:51 jtkirshe Exp $"
u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw);
s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw);
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_82599.c b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_82599.c
index bd65a7f1..f0deb596 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_82599.c
+++ b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_82599.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -36,7 +36,6 @@ POSSIBILITY OF SUCH DAMAGE.
#include "ixgbe_api.h"
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
-#ident "$Id: ixgbe_82599.c,v 1.334 2013/12/04 22:34:00 jtkirshe Exp $"
#define IXGBE_82599_MAX_TX_QUEUES 128
#define IXGBE_82599_MAX_RX_QUEUES 128
@@ -71,10 +70,10 @@ void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
!ixgbe_mng_enabled(hw)) {
mac->ops.disable_tx_laser =
- &ixgbe_disable_tx_laser_multispeed_fiber;
+ ixgbe_disable_tx_laser_multispeed_fiber;
mac->ops.enable_tx_laser =
- &ixgbe_enable_tx_laser_multispeed_fiber;
- mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
+ ixgbe_enable_tx_laser_multispeed_fiber;
+ mac->ops.flap_tx_laser = ixgbe_flap_tx_laser_multispeed_fiber;
} else {
mac->ops.disable_tx_laser = NULL;
@@ -84,15 +83,18 @@ void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
if (hw->phy.multispeed_fiber) {
/* Set up dual speed SFP+ support */
- mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
+ mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
+ mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599;
+ mac->ops.set_rate_select_speed =
+ ixgbe_set_hard_rate_select_speed;
} else {
if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
(hw->phy.smart_speed == ixgbe_smart_speed_auto ||
hw->phy.smart_speed == ixgbe_smart_speed_on) &&
!ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
+ mac->ops.setup_link = ixgbe_setup_mac_link_smartspeed;
} else {
- mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
+ mac->ops.setup_link = ixgbe_setup_mac_link_82599;
}
}
}
@@ -129,8 +131,8 @@ s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
IXGBE_WRITE_FLUSH(hw);
- phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_82599;
- phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_82599;
+ phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_82599;
+ phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_82599;
}
/* Identify the PHY or SFP module */
ret_val = phy->ops.identify(hw);
@@ -144,18 +146,18 @@ s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
/* If copper media, overwrite with copper function pointers */
if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
- mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
+ mac->ops.setup_link = ixgbe_setup_copper_link_82599;
mac->ops.get_link_capabilities =
- &ixgbe_get_copper_link_capabilities_generic;
+ ixgbe_get_copper_link_capabilities_generic;
}
/* Set necessary function pointers based on PHY type */
switch (hw->phy.type) {
case ixgbe_phy_tn:
- phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
- phy->ops.check_link = &ixgbe_check_phy_link_tnx;
+ phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
+ phy->ops.check_link = ixgbe_check_phy_link_tnx;
phy->ops.get_firmware_version =
- &ixgbe_get_phy_firmware_version_tnx;
+ ixgbe_get_phy_firmware_version_tnx;
break;
default:
break;
@@ -325,47 +327,47 @@ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
ret_val = ixgbe_init_ops_generic(hw);
/* PHY */
- phy->ops.identify = &ixgbe_identify_phy_82599;
- phy->ops.init = &ixgbe_init_phy_ops_82599;
+ phy->ops.identify = ixgbe_identify_phy_82599;
+ phy->ops.init = ixgbe_init_phy_ops_82599;
/* MAC */
- mac->ops.reset_hw = &ixgbe_reset_hw_82599;
- mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2;
- mac->ops.get_media_type = &ixgbe_get_media_type_82599;
+ mac->ops.reset_hw = ixgbe_reset_hw_82599;
+ mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2;
+ mac->ops.get_media_type = ixgbe_get_media_type_82599;
mac->ops.get_supported_physical_layer =
- &ixgbe_get_supported_physical_layer_82599;
- mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
- mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
- mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
- mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
- mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
- mac->ops.start_hw = &ixgbe_start_hw_82599;
- mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
- mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
- mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
- mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
- mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
- mac->ops.prot_autoc_read = &prot_autoc_read_82599;
- mac->ops.prot_autoc_write = &prot_autoc_write_82599;
+ ixgbe_get_supported_physical_layer_82599;
+ mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic;
+ mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic;
+ mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82599;
+ mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82599;
+ mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82599;
+ mac->ops.start_hw = ixgbe_start_hw_82599;
+ mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic;
+ mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic;
+ mac->ops.get_device_caps = ixgbe_get_device_caps_generic;
+ mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic;
+ mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic;
+ mac->ops.prot_autoc_read = prot_autoc_read_82599;
+ mac->ops.prot_autoc_write = prot_autoc_write_82599;
/* RAR, Multicast, VLAN */
- mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
- mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
- mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
- mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
+ mac->ops.set_vmdq = ixgbe_set_vmdq_generic;
+ mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic;
+ mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic;
+ mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic;
mac->rar_highwater = 1;
- mac->ops.set_vfta = &ixgbe_set_vfta_generic;
- mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
- mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
- mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
- mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
- mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
- mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
+ mac->ops.set_vfta = ixgbe_set_vfta_generic;
+ mac->ops.set_vlvf = ixgbe_set_vlvf_generic;
+ mac->ops.clear_vfta = ixgbe_clear_vfta_generic;
+ mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic;
+ mac->ops.setup_sfp = ixgbe_setup_sfp_modules_82599;
+ mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing;
+ mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing;
/* Link */
- mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
- mac->ops.check_link = &ixgbe_check_mac_link_generic;
- mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
+ mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82599;
+ mac->ops.check_link = ixgbe_check_mac_link_generic;
+ mac->ops.setup_rxpba = ixgbe_set_rxpba_generic;
ixgbe_init_mac_link_ops_82599(hw);
mac->mcft_size = IXGBE_82599_MC_TBL_SIZE;
@@ -382,18 +384,18 @@ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
/* EEPROM */
- eeprom->ops.read = &ixgbe_read_eeprom_82599;
- eeprom->ops.read_buffer = &ixgbe_read_eeprom_buffer_82599;
+ eeprom->ops.read = ixgbe_read_eeprom_82599;
+ eeprom->ops.read_buffer = ixgbe_read_eeprom_buffer_82599;
/* Manageability interface */
- mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
+ mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic;
mac->ops.get_thermal_sensor_data =
- &ixgbe_get_thermal_sensor_data_generic;
+ ixgbe_get_thermal_sensor_data_generic;
mac->ops.init_thermal_sensor_thresh =
- &ixgbe_init_thermal_sensor_thresh_generic;
+ ixgbe_init_thermal_sensor_thresh_generic;
- mac->ops.get_rtrup2tc = &ixgbe_dcb_get_rtrup2tc_generic;
+ mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
return ret_val;
}
@@ -579,17 +581,13 @@ out:
**/
void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
{
- u32 autoc2_reg, fwsm;
+ u32 autoc2_reg;
u16 ee_ctrl_2 = 0;
DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599");
ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
- /* Check to see if MNG FW could be enabled */
- fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
-
- if (((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) &&
- !hw->wol_enabled &&
+ if (!ixgbe_mng_present(hw) && !hw->wol_enabled &&
ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
@@ -734,172 +732,33 @@ void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
}
}
-
/**
- * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
+ * ixgbe_set_hard_rate_select_speed - Set module link speed
* @hw: pointer to hardware structure
- * @speed: new link speed
- * @autoneg_wait_to_complete: true when waiting for completion is needed
+ * @speed: link speed to set
*
- * Set the link speed in the AUTOC register and restarts link.
- **/
-s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+ * Set module link speed via RS0/RS1 rate select pins.
+ */
+void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed)
{
- s32 status = IXGBE_SUCCESS;
- ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
- ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
- u32 speedcnt = 0;
u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
- u32 i = 0;
- bool autoneg, link_up = false;
-
- DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
-
- /* Mask off requested but non-supported speeds */
- status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
- if (status != IXGBE_SUCCESS)
- return status;
-
- speed &= link_speed;
-
- /*
- * Try each speed one by one, highest priority first. We do this in
- * software because 10gb fiber doesn't support speed autonegotiation.
- */
- if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
- speedcnt++;
- highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
-
- /* If we already have link at this speed, just jump out */
- status = ixgbe_check_link(hw, &link_speed, &link_up, false);
- if (status != IXGBE_SUCCESS)
- return status;
-
- if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
- goto out;
-
- /* Set the module link speed */
- switch (hw->phy.media_type) {
- case ixgbe_media_type_fiber:
- esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- IXGBE_WRITE_FLUSH(hw);
- break;
- case ixgbe_media_type_fiber_qsfp:
- /* QSFP module automatically detects MAC link speed */
- break;
- default:
- DEBUGOUT("Unexpected media type.\n");
- break;
- }
-
- /* Allow module to change analog characteristics (1G->10G) */
- msec_delay(40);
- status = ixgbe_setup_mac_link_82599(hw,
- IXGBE_LINK_SPEED_10GB_FULL,
- autoneg_wait_to_complete);
- if (status != IXGBE_SUCCESS)
- return status;
-
- /* Flap the tx laser if it has not already been done */
- ixgbe_flap_tx_laser(hw);
-
- /*
- * Wait for the controller to acquire link. Per IEEE 802.3ap,
- * Section 73.10.2, we may have to wait up to 500ms if KR is
- * attempted. 82599 uses the same timing for 10g SFI.
- */
- for (i = 0; i < 5; i++) {
- /* Wait for the link partner to also set speed */
- msec_delay(100);
-
- /* If we have link, just jump out */
- status = ixgbe_check_link(hw, &link_speed,
- &link_up, false);
- if (status != IXGBE_SUCCESS)
- return status;
-
- if (link_up)
- goto out;
- }
- }
-
- if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
- speedcnt++;
- if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
- highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
-
- /* If we already have link at this speed, just jump out */
- status = ixgbe_check_link(hw, &link_speed, &link_up, false);
- if (status != IXGBE_SUCCESS)
- return status;
-
- if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
- goto out;
-
- /* Set the module link speed */
- switch (hw->phy.media_type) {
- case ixgbe_media_type_fiber:
- esdp_reg &= ~IXGBE_ESDP_SDP5;
- esdp_reg |= IXGBE_ESDP_SDP5_DIR;
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- IXGBE_WRITE_FLUSH(hw);
- break;
- case ixgbe_media_type_fiber_qsfp:
- /* QSFP module automatically detects link speed */
- break;
- default:
- DEBUGOUT("Unexpected media type.\n");
- break;
- }
-
- /* Allow module to change analog characteristics (10G->1G) */
- msec_delay(40);
-
- status = ixgbe_setup_mac_link_82599(hw,
- IXGBE_LINK_SPEED_1GB_FULL,
- autoneg_wait_to_complete);
- if (status != IXGBE_SUCCESS)
- return status;
-
- /* Flap the Tx laser if it has not already been done */
- ixgbe_flap_tx_laser(hw);
-
- /* Wait for the link partner to also set speed */
- msec_delay(100);
-
- /* If we have link, just jump out */
- status = ixgbe_check_link(hw, &link_speed, &link_up, false);
- if (status != IXGBE_SUCCESS)
- return status;
-
- if (link_up)
- goto out;
+ switch (speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ esdp_reg &= ~IXGBE_ESDP_SDP5;
+ esdp_reg |= IXGBE_ESDP_SDP5_DIR;
+ break;
+ default:
+ DEBUGOUT("Invalid fixed module speed\n");
+ return;
}
- /*
- * We didn't get link. Configure back to the highest speed we tried,
- * (if there was more than one). We call ourselves back with just the
- * single highest speed that the user requested.
- */
- if (speedcnt > 1)
- status = ixgbe_setup_mac_link_multispeed_fiber(hw,
- highest_link_speed, autoneg_wait_to_complete);
-
-out:
- /* Set autoneg_advertised value based on input link speed */
- hw->phy.autoneg_advertised = 0;
-
- if (speed & IXGBE_LINK_SPEED_10GB_FULL)
- hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
-
- if (speed & IXGBE_LINK_SPEED_1GB_FULL)
- hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
-
- return status;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
}
/**
@@ -1338,14 +1197,15 @@ reset_hw_out:
/**
* ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
* @hw: pointer to hardware structure
+ * @fdircmd: current value of FDIRCMD register
*/
-STATIC s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw)
+STATIC s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
{
int i;
for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
- if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
- IXGBE_FDIRCMD_CMD_MASK))
+ *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
+ if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
return IXGBE_SUCCESS;
usec_delay(10);
}
@@ -1362,6 +1222,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
s32 err;
int i;
u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+ u32 fdircmd;
fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
@@ -1370,7 +1231,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
* Before starting reinitialization process,
* FDIRCMD.CMD must be zero.
*/
- err = ixgbe_fdir_check_cmd_complete(hw);
+ err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
if (err) {
DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n");
return err;
@@ -1626,6 +1487,9 @@ u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
* @input: unique input dword
* @common: compressed common input dword
* @queue: queue index to direct traffic to
+ *
+ * Note that the tunnel bit in input must not be set when the hardware
+ * tunneling support does not exist.
**/
s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_hash_dword input,
@@ -1633,6 +1497,8 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
u8 queue)
{
u64 fdirhashcmd;
+ u8 flow_type;
+ bool tunnel;
u32 fdircmd;
s32 err;
@@ -1643,7 +1509,10 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
* fifth is FDIRCMD.TUNNEL_FILTER
*/
- switch (input.formatted.flow_type) {
+ tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK);
+ flow_type = input.formatted.flow_type &
+ (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1);
+ switch (flow_type) {
case IXGBE_ATR_FLOW_TYPE_TCPV4:
case IXGBE_ATR_FLOW_TYPE_UDPV4:
case IXGBE_ATR_FLOW_TYPE_SCTPV4:
@@ -1659,8 +1528,10 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
/* configure FDIRCMD register */
fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
- fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+ fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+ if (tunnel)
+ fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
/*
* The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
@@ -1670,7 +1541,7 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
- err = ixgbe_fdir_check_cmd_complete(hw);
+ err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
if (err) {
DEBUGOUT("Flow Director command did not complete!\n");
return err;
@@ -1907,36 +1778,44 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
return IXGBE_ERR_CONFIG;
}
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m);
+
+ /* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSIP4M and
+ * FDIRDIP4M in cloud mode to allow L3/L3 packets to
+ * tunnel.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
}
/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
- /* store the TCP/UDP port masks, bit reversed from port layout */
- fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
-
- /* write both the same so that UDP and TCP use the same mask */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
- /* also use it for SCTP */
- switch (hw->mac.type) {
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
- break;
- default:
- break;
- }
-
- /* store source and destination IP masks (big-endian) */
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
- ~input_mask->formatted.src_ip[0]);
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
- ~input_mask->formatted.dst_ip[0]);
-
- /* store IPv6 mask */
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, 0xffffffff);
+ if (!cloud_mode) {
+ /* store the TCP/UDP port masks, bit reversed from port
+ * layout */
+ fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
+
+ /* write both the same so that UDP and TCP use the same mask */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
+ /* also use it for SCTP */
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
+ break;
+ default:
+ break;
+ }
+ /* store source and destination IP masks (big-enian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
+ ~input_mask->formatted.src_ip[0]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
+ ~input_mask->formatted.dst_ip[0]);
+ }
return IXGBE_SUCCESS;
}
@@ -1950,26 +1829,30 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
s32 err;
DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
-
- /* currently IPv6 is not supported, must be programmed with 0 */
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
- input->formatted.src_ip[0]);
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
- input->formatted.src_ip[1]);
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
- input->formatted.src_ip[2]);
-
- /* record the source address (big-endian) */
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
-
- /* record the first 32 bits of the destination address (big-endian) */
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
-
- /* record source and destination port (little-endian)*/
- fdirport = IXGBE_NTOHS(input->formatted.dst_port);
- fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
- fdirport |= IXGBE_NTOHS(input->formatted.src_port);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
+ if (!cloud_mode) {
+ /* currently IPv6 is not supported, must be programmed with 0 */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
+ input->formatted.src_ip[0]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
+ input->formatted.src_ip[1]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
+ input->formatted.src_ip[2]);
+
+ /* record the source address (big-endian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA,
+ input->formatted.src_ip[0]);
+
+ /* record the first 32 bits of the destination address
+ * (big-endian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA,
+ input->formatted.dst_ip[0]);
+
+ /* record source and destination port (little-endian)*/
+ fdirport = IXGBE_NTOHS(input->formatted.dst_port);
+ fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
+ fdirport |= IXGBE_NTOHS(input->formatted.src_port);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
+ }
/* record VLAN (little-endian) and flex_bytes(big-endian) */
fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
@@ -2016,7 +1899,7 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
- err = ixgbe_fdir_check_cmd_complete(hw);
+ err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
if (err) {
DEBUGOUT("Flow Director command did not complete!\n");
return err;
@@ -2030,7 +1913,7 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
u16 soft_id)
{
u32 fdirhash;
- u32 fdircmd = 0;
+ u32 fdircmd;
s32 err;
/* configure FDIRHASH register */
@@ -2044,7 +1927,7 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
/* Query if filter is present */
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
- err = ixgbe_fdir_check_cmd_complete(hw);
+ err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
if (err) {
DEBUGOUT("Flow Director command did not complete!\n");
return err;
@@ -2582,7 +2465,6 @@ reset_pipeline_out:
return ret_val;
}
-
/**
* ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
@@ -2696,4 +2578,3 @@ release_i2c_access:
return status;
}
-
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_82599.h b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_82599.h
index 83124196..c034d3d9 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_82599.h
+++ b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_82599.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -33,7 +33,6 @@ POSSIBILITY OF SUCH DAMAGE.
#ifndef _IXGBE_82599_H_
#define _IXGBE_82599_H_
-#ident "$Id: ixgbe_82599.h,v 1.12 2013/10/30 10:19:10 jtkirshe Exp $"
s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
ixgbe_link_speed *speed, bool *autoneg);
@@ -41,9 +40,8 @@ enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
-s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete);
+void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed);
s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_api.c b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_api.c
index 18027604..64579ae8 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_api.c
+++ b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_api.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -33,7 +33,24 @@ POSSIBILITY OF SUCH DAMAGE.
#include "ixgbe_api.h"
#include "ixgbe_common.h"
-#ident "$Id: ixgbe_api.c,v 1.207 2013/11/22 01:02:01 jtkirshe Exp $"
+
+#define IXGBE_EMPTY_PARAM
+
+static const u32 ixgbe_mvals_base[IXGBE_MVALS_IDX_LIMIT] = {
+ IXGBE_MVALS_INIT(IXGBE_EMPTY_PARAM)
+};
+
+static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = {
+ IXGBE_MVALS_INIT(_X540)
+};
+
+static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = {
+ IXGBE_MVALS_INIT(_X550)
+};
+
+static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = {
+ IXGBE_MVALS_INIT(_X550EM_x)
+};
/**
* ixgbe_dcb_get_rtrup2tc - read rtrup2tc reg
@@ -97,6 +114,7 @@ s32 ixgbe_init_shared_code(struct ixgbe_hw *hw)
status = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
break;
}
+ hw->mac.max_link_up_time = IXGBE_LINK_UP_TIME;
return status;
}
@@ -120,6 +138,8 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
return IXGBE_ERR_DEVICE_NOT_SUPPORTED;
}
+ hw->mvals = ixgbe_mvals_base;
+
switch (hw->device_id) {
case IXGBE_DEV_ID_82598:
case IXGBE_DEV_ID_82598_BX:
@@ -160,27 +180,34 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X540_VF:
case IXGBE_DEV_ID_X540_VF_HV:
hw->mac.type = ixgbe_mac_X540_vf;
+ hw->mvals = ixgbe_mvals_X540;
break;
case IXGBE_DEV_ID_X540T:
case IXGBE_DEV_ID_X540T1:
hw->mac.type = ixgbe_mac_X540;
+ hw->mvals = ixgbe_mvals_X540;
break;
case IXGBE_DEV_ID_X550T:
hw->mac.type = ixgbe_mac_X550;
+ hw->mvals = ixgbe_mvals_X550;
break;
- case IXGBE_DEV_ID_X550EM_X:
case IXGBE_DEV_ID_X550EM_X_KX4:
case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_X_10G_T:
+ case IXGBE_DEV_ID_X550EM_X_1G_T:
case IXGBE_DEV_ID_X550EM_X_SFP:
hw->mac.type = ixgbe_mac_X550EM_x;
+ hw->mvals = ixgbe_mvals_X550EM_x;
break;
case IXGBE_DEV_ID_X550_VF:
case IXGBE_DEV_ID_X550_VF_HV:
hw->mac.type = ixgbe_mac_X550_vf;
+ hw->mvals = ixgbe_mvals_X550;
break;
case IXGBE_DEV_ID_X550EM_X_VF:
case IXGBE_DEV_ID_X550EM_X_VF_HV:
hw->mac.type = ixgbe_mac_X550EM_x_vf;
+ hw->mvals = ixgbe_mvals_X550EM_x;
break;
default:
ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
@@ -538,6 +565,20 @@ s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_setup_internal_phy - Configure integrated PHY
+ * @hw: pointer to hardware structure
+ *
+ * Reconfigure the integrated PHY in order to enable talk to the external PHY.
+ * Returns success if not implemented, since nothing needs to be done in this
+ * case.
+ */
+s32 ixgbe_setup_internal_phy(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.setup_internal_link, (hw),
+ IXGBE_SUCCESS);
+}
+
+/**
* ixgbe_check_phy_link - Determine link and speed status
* @hw: pointer to hardware structure
*
@@ -567,6 +608,17 @@ s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed,
}
/**
+ * ixgbe_set_phy_power - Control the phy power state
+ * @hw: pointer to hardware structure
+ * @on: true for on, false for off
+ */
+s32 ixgbe_set_phy_power(struct ixgbe_hw *hw, bool on)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.set_phy_power, (hw, on),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
* ixgbe_check_link - Get link and speed status
* @hw: pointer to hardware structure
*
@@ -635,6 +687,22 @@ s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
}
/**
+ * ixgbe_setup_mac_link - Set link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ *
+ * Configures link settings. Restarts the link.
+ * Performs autonegotiation if needed.
+ **/
+s32 ixgbe_setup_mac_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.setup_mac_link, (hw, speed,
+ autoneg_wait_to_complete),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
* ixgbe_get_link_capabilities - Returns link capabilities
* @hw: pointer to hardware structure
*
@@ -1028,6 +1096,18 @@ s32 ixgbe_fc_enable(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_setup_fc - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Called at init time to set up flow control.
+ **/
+s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.setup_fc, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
* ixgbe_set_fw_drv_ver - Try to send the driver version number FW
* @hw: pointer to hardware structure
* @maj: driver major number to be sent to firmware
@@ -1227,6 +1307,36 @@ void ixgbe_restore_mdd_vf(struct ixgbe_hw *hw, u32 vf)
}
/**
+ * ixgbe_enter_lplu - Transition to low power states
+ * @hw: pointer to hardware structure
+ *
+ * Configures Low Power Link Up on transition to low power states
+ * (from D0 to non-D0).
+ **/
+s32 ixgbe_enter_lplu(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.enter_lplu, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_handle_lasi - Handle external Base T PHY interrupt
+ * @hw: pointer to hardware structure
+ *
+ * Handle external Base T PHY interrupt. If high temperature
+ * failure alarm then return error, else if link status change
+ * then setup internal/external PHY link
+ *
+ * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
+ * failure alarm, else return PHY access status.
+ */
+s32 ixgbe_handle_lasi(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.handle_lasi, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
* ixgbe_read_analog_reg8 - Reads 8 bit analog register
* @hw: pointer to hardware structure
* @reg: analog register to read
@@ -1271,6 +1381,7 @@ s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw)
* ixgbe_read_i2c_byte - Reads 8 bit word over I2C at specified device address
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
+ * @dev_addr: I2C bus address to read from
* @data: value read
*
* Performs byte read operation to SFP module's EEPROM over I2C interface.
@@ -1283,9 +1394,59 @@ s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
}
/**
+ * ixgbe_read_i2c_byte_unlocked - Reads 8 bit word via I2C from device address
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @dev_addr: I2C bus address to read from
+ * @data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.read_i2c_byte_unlocked,
+ (hw, byte_offset, dev_addr, data),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_read_i2c_combined - Perform I2C read combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to read from
+ * @reg: I2C device register to read from
+ * @val: pointer to location to receive read value
+ *
+ * Returns an error code on error.
+ */
+s32 ixgbe_read_i2c_combined(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.read_i2c_combined, (hw, addr,
+ reg, val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_read_i2c_combined_unlocked - Perform I2C read combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to read from
+ * @reg: I2C device register to read from
+ * @val: pointer to location to receive read value
+ *
+ * Returns an error code on error.
+ **/
+s32 ixgbe_read_i2c_combined_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg,
+ u16 *val)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.read_i2c_combined_unlocked,
+ (hw, addr, reg, val),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
* ixgbe_write_i2c_byte - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
+ * @dev_addr: I2C bus address to write to
* @data: value to write
*
* Performs byte write operation to SFP module's EEPROM over I2C interface
@@ -1299,6 +1460,55 @@ s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
}
/**
+ * ixgbe_write_i2c_byte_unlocked - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @dev_addr: I2C bus address to write to
+ * @data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface
+ * at a specified device address.
+ **/
+s32 ixgbe_write_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.write_i2c_byte_unlocked,
+ (hw, byte_offset, dev_addr, data),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_write_i2c_combined - Perform I2C write combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to write to
+ * @reg: I2C device register to write to
+ * @val: value to write
+ *
+ * Returns an error code on error.
+ */
+s32 ixgbe_write_i2c_combined(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.write_i2c_combined, (hw, addr,
+ reg, val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_write_i2c_combined_unlocked - Perform I2C write combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to write to
+ * @reg: I2C device register to write to
+ * @val: value to write
+ *
+ * Returns an error code on error.
+ **/
+s32 ixgbe_write_i2c_combined_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg,
+ u16 val)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.write_i2c_combined_unlocked,
+ (hw, addr, reg, val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
* ixgbe_write_i2c_eeprom - Writes 8 bit EEPROM word over I2C interface
* @hw: pointer to hardware structure
* @byte_offset: EEPROM byte offset to write
@@ -1418,3 +1628,16 @@ void ixgbe_enable_rx(struct ixgbe_hw *hw)
if (hw->mac.ops.enable_rx)
hw->mac.ops.enable_rx(hw);
}
+
+/**
+ * ixgbe_set_rate_select_speed - Set module link speed
+ * @hw: pointer to hardware structure
+ * @speed: link speed to set
+ *
+ * Set module link speed via the rate select.
+ */
+void ixgbe_set_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed)
+{
+ if (hw->mac.ops.set_rate_select_speed)
+ hw->mac.ops.set_rate_select_speed(hw, speed);
+}
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_api.h b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_api.h
index 1c12ff6d..bd1208e6 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_api.h
+++ b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_api.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -35,7 +35,6 @@ POSSIBILITY OF SUCH DAMAGE.
#define _IXGBE_API_H_
#include "ixgbe_type.h"
-#ident "$Id: ixgbe_api.h,v 1.123 2013/11/22 01:02:01 jtkirshe Exp $"
void ixgbe_dcb_get_rtrup2tc(struct ixgbe_hw *hw, u8 *map);
@@ -71,17 +70,21 @@ s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
u16 phy_data);
s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw);
+s32 ixgbe_setup_internal_phy(struct ixgbe_hw *hw);
s32 ixgbe_check_phy_link(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up);
s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
+s32 ixgbe_set_phy_power(struct ixgbe_hw *, bool on);
void ixgbe_disable_tx_laser(struct ixgbe_hw *hw);
void ixgbe_enable_tx_laser(struct ixgbe_hw *hw);
void ixgbe_flap_tx_laser(struct ixgbe_hw *hw);
s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
+s32 ixgbe_setup_mac_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete);
s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
@@ -125,6 +128,7 @@ s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan,
s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on, bool *vfta_changed);
s32 ixgbe_fc_enable(struct ixgbe_hw *hw);
+s32 ixgbe_setup_fc(struct ixgbe_hw *hw);
s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
u8 ver);
s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw);
@@ -170,8 +174,18 @@ u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
u8 *data);
+s32 ixgbe_read_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+s32 ixgbe_read_i2c_combined(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val);
+s32 ixgbe_read_i2c_combined_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg,
+ u16 *val);
s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
u8 data);
+s32 ixgbe_write_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data);
+s32 ixgbe_write_i2c_combined(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val);
+s32 ixgbe_write_i2c_combined_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg,
+ u16 val);
s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data);
s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
@@ -197,6 +211,9 @@ void ixgbe_disable_mdd(struct ixgbe_hw *hw);
void ixgbe_enable_mdd(struct ixgbe_hw *hw);
void ixgbe_mdd_event(struct ixgbe_hw *hw, u32 *vf_bitmap);
void ixgbe_restore_mdd_vf(struct ixgbe_hw *hw, u32 vf);
+s32 ixgbe_enter_lplu(struct ixgbe_hw *hw);
+s32 ixgbe_handle_lasi(struct ixgbe_hw *hw);
+void ixgbe_set_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed);
void ixgbe_disable_rx(struct ixgbe_hw *hw);
void ixgbe_enable_rx(struct ixgbe_hw *hw);
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_common.c b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_common.c
index 37e5bae5..08754c83 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_common.c
+++ b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_common.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -36,7 +36,6 @@ POSSIBILITY OF SUCH DAMAGE.
#include "ixgbe_dcb.h"
#include "ixgbe_dcb_82599.h"
#include "ixgbe_api.h"
-#ident "$Id: ixgbe_common.c,v 1.382 2013/11/22 01:02:01 jtkirshe Exp $"
STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
@@ -75,66 +74,67 @@ s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_init_ops_generic");
/* EEPROM */
- eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
+ eeprom->ops.init_params = ixgbe_init_eeprom_params_generic;
/* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
if (eec & IXGBE_EEC_PRES) {
- eeprom->ops.read = &ixgbe_read_eerd_generic;
- eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_generic;
+ eeprom->ops.read = ixgbe_read_eerd_generic;
+ eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic;
} else {
- eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic;
+ eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic;
eeprom->ops.read_buffer =
- &ixgbe_read_eeprom_buffer_bit_bang_generic;
+ ixgbe_read_eeprom_buffer_bit_bang_generic;
}
- eeprom->ops.write = &ixgbe_write_eeprom_generic;
- eeprom->ops.write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic;
+ eeprom->ops.write = ixgbe_write_eeprom_generic;
+ eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic;
eeprom->ops.validate_checksum =
- &ixgbe_validate_eeprom_checksum_generic;
- eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
- eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic;
+ ixgbe_validate_eeprom_checksum_generic;
+ eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic;
+ eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic;
/* MAC */
- mac->ops.init_hw = &ixgbe_init_hw_generic;
+ mac->ops.init_hw = ixgbe_init_hw_generic;
mac->ops.reset_hw = NULL;
- mac->ops.start_hw = &ixgbe_start_hw_generic;
- mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic;
+ mac->ops.start_hw = ixgbe_start_hw_generic;
+ mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic;
mac->ops.get_media_type = NULL;
mac->ops.get_supported_physical_layer = NULL;
- mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic;
- mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic;
- mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
- mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
- mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie;
- mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync;
- mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync;
- mac->ops.prot_autoc_read = &prot_autoc_read_generic;
- mac->ops.prot_autoc_write = &prot_autoc_write_generic;
+ mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic;
+ mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic;
+ mac->ops.stop_adapter = ixgbe_stop_adapter_generic;
+ mac->ops.get_bus_info = ixgbe_get_bus_info_generic;
+ mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie;
+ mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync;
+ mac->ops.release_swfw_sync = ixgbe_release_swfw_sync;
+ mac->ops.prot_autoc_read = prot_autoc_read_generic;
+ mac->ops.prot_autoc_write = prot_autoc_write_generic;
/* LEDs */
- mac->ops.led_on = &ixgbe_led_on_generic;
- mac->ops.led_off = &ixgbe_led_off_generic;
- mac->ops.blink_led_start = &ixgbe_blink_led_start_generic;
- mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic;
+ mac->ops.led_on = ixgbe_led_on_generic;
+ mac->ops.led_off = ixgbe_led_off_generic;
+ mac->ops.blink_led_start = ixgbe_blink_led_start_generic;
+ mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic;
/* RAR, Multicast, VLAN */
- mac->ops.set_rar = &ixgbe_set_rar_generic;
- mac->ops.clear_rar = &ixgbe_clear_rar_generic;
+ mac->ops.set_rar = ixgbe_set_rar_generic;
+ mac->ops.clear_rar = ixgbe_clear_rar_generic;
mac->ops.insert_mac_addr = NULL;
mac->ops.set_vmdq = NULL;
mac->ops.clear_vmdq = NULL;
- mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic;
- mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic;
- mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic;
- mac->ops.enable_mc = &ixgbe_enable_mc_generic;
- mac->ops.disable_mc = &ixgbe_disable_mc_generic;
+ mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic;
+ mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic;
+ mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic;
+ mac->ops.enable_mc = ixgbe_enable_mc_generic;
+ mac->ops.disable_mc = ixgbe_disable_mc_generic;
mac->ops.clear_vfta = NULL;
mac->ops.set_vfta = NULL;
mac->ops.set_vlvf = NULL;
mac->ops.init_uta_tables = NULL;
- mac->ops.enable_rx = &ixgbe_enable_rx_generic;
- mac->ops.disable_rx = &ixgbe_disable_rx_generic;
+ mac->ops.enable_rx = ixgbe_enable_rx_generic;
+ mac->ops.disable_rx = ixgbe_disable_rx_generic;
/* Flow Control */
- mac->ops.fc_enable = &ixgbe_fc_enable_generic;
+ mac->ops.fc_enable = ixgbe_fc_enable_generic;
+ mac->ops.setup_fc = ixgbe_setup_fc_generic;
/* Link */
mac->ops.get_link_capabilities = NULL;
@@ -185,6 +185,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X540T:
case IXGBE_DEV_ID_X540T1:
case IXGBE_DEV_ID_X550T:
+ case IXGBE_DEV_ID_X550EM_X_10G_T:
supported = true;
break;
default:
@@ -201,19 +202,19 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
}
/**
- * ixgbe_setup_fc - Set up flow control
+ * ixgbe_setup_fc_generic - Set up flow control
* @hw: pointer to hardware structure
*
* Called at init time to set up flow control.
**/
-STATIC s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
+s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
{
s32 ret_val = IXGBE_SUCCESS;
u32 reg = 0, reg_bp = 0;
u16 reg_cu = 0;
bool locked = false;
- DEBUGFUNC("ixgbe_setup_fc");
+ DEBUGFUNC("ixgbe_setup_fc_generic");
/* Validate the requested mode */
if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
@@ -954,7 +955,8 @@ void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
{
struct ixgbe_mac_info *mac = &hw->mac;
- hw->bus.type = ixgbe_bus_type_pci_express;
+ if (hw->bus.type == ixgbe_bus_type_unknown)
+ hw->bus.type = ixgbe_bus_type_pci_express;
switch (link_status & IXGBE_PCI_LINK_WIDTH) {
case IXGBE_PCI_LINK_WIDTH_1:
@@ -1085,7 +1087,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
msec_delay(2);
/*
- * Prevent the PCI-E bus from from hanging by disabling PCI-E master
+ * Prevent the PCI-E bus from hanging by disabling PCI-E master
* access and verify no pending requests
*/
return ixgbe_disable_pcie_master(hw);
@@ -2900,8 +2902,7 @@ STATIC s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
(!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
- ERROR_REPORT1(IXGBE_ERROR_POLLING,
- "Auto-Negotiation did not complete or timed out");
+ DEBUGOUT("Auto-Negotiation did not complete or timed out\n");
goto out;
}
@@ -2936,16 +2937,14 @@ STATIC s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
*/
links = IXGBE_READ_REG(hw, IXGBE_LINKS);
if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
- ERROR_REPORT1(IXGBE_ERROR_POLLING,
- "Auto-Negotiation did not complete");
+ DEBUGOUT("Auto-Negotiation did not complete\n");
goto out;
}
if (hw->mac.type == ixgbe_mac_82599EB) {
links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
- ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
- "Link partner is not AN enabled");
+ DEBUGOUT("Link partner is not AN enabled\n");
goto out;
}
}
@@ -3143,6 +3142,9 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ if (hw->mac.type >= ixgbe_mac_X550)
+ goto out;
+
/*
* Before proceeding, make sure that the PCIe block does not have
* transactions pending.
@@ -4065,7 +4067,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
}
if (link_up_wait_to_complete) {
- for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
+ for (i = 0; i < hw->mac.max_link_up_time; i++) {
if (links_reg & IXGBE_LINKS_UP) {
*link_up = true;
break;
@@ -4082,22 +4084,27 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
*link_up = false;
}
- if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
- IXGBE_LINKS_SPEED_10G_82599) {
+ switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+ case IXGBE_LINKS_SPEED_10G_82599:
*speed = IXGBE_LINK_SPEED_10GB_FULL;
- if (hw->mac.type > ixgbe_mac_X550) {
+ if (hw->mac.type >= ixgbe_mac_X550) {
if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
*speed = IXGBE_LINK_SPEED_2_5GB_FULL;
}
- }
- else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
- IXGBE_LINKS_SPEED_1G_82599)
+ break;
+ case IXGBE_LINKS_SPEED_1G_82599:
*speed = IXGBE_LINK_SPEED_1GB_FULL;
- else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
- IXGBE_LINKS_SPEED_100_82599)
+ break;
+ case IXGBE_LINKS_SPEED_100_82599:
*speed = IXGBE_LINK_SPEED_100_FULL;
- else
+ if (hw->mac.type >= ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_5GB_FULL;
+ }
+ break;
+ default:
*speed = IXGBE_LINK_SPEED_UNKNOWN;
+ }
return IXGBE_SUCCESS;
}
@@ -4349,6 +4356,7 @@ u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
* @buffer: contains the command to write and where the return status will
* be placed
* @length: length of buffer, must be multiple of 4 bytes
+ * @timeout: time in ms to wait for command completion
* @return_data: read and return data from the buffer (true) or not (false)
* Needed because FW structures are big endian and decoding of
* these fields can be 8 bit or 16 bit based on command. Decoding
@@ -4360,12 +4368,12 @@ u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
* else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
**/
s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
- u32 length, bool return_data)
+ u32 length, u32 timeout, bool return_data)
{
u32 hicr, i, bi, fwsts;
u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
u16 buf_len;
- u8 dword_len;
+ u16 dword_len;
DEBUGFUNC("ixgbe_host_interface_command");
@@ -4402,7 +4410,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
/* Setting this bit tells the ARC that a new command is pending. */
IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
- for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
+ for (i = 0; i < timeout; i++) {
hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
if (!(hicr & IXGBE_HICR_C))
break;
@@ -4410,7 +4418,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
}
/* Check command completion */
- if (i == IXGBE_HI_COMMAND_TIMEOUT ||
+ if ((timeout != 0 && i == timeout) ||
!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
ERROR_REPORT1(IXGBE_ERROR_CAUTION,
"Command has failed with no status valid.\n");
@@ -4495,7 +4503,9 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
- sizeof(fw_cmd), true);
+ sizeof(fw_cmd),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ true);
if (ret_val != IXGBE_SUCCESS)
continue;
@@ -4582,7 +4592,8 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
**/
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
{
- u32 gcr_ext, hlreg0;
+ u32 gcr_ext, hlreg0, i, poll;
+ u16 value;
/*
* If double reset is not requested then all transactions should
@@ -4599,6 +4610,25 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
+ /* Wait for a last completion before clearing buffers */
+ IXGBE_WRITE_FLUSH(hw);
+ msec_delay(3);
+
+ /*
+ * Before proceeding, make sure that the PCIe block does not have
+ * transactions pending.
+ */
+ poll = ixgbe_pcie_timeout_poll(hw);
+ for (i = 0; i < poll; i++) {
+ usec_delay(100);
+ value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
+ if (IXGBE_REMOVED(hw->hw_addr))
+ goto out;
+ if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
+ goto out;
+ }
+
+out:
/* initiate cleaning flow for buffers in the PCIe transaction layer */
gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
@@ -4842,6 +4872,22 @@ void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_mng_present - returns true when management capability is present
+ * @hw: pointer to hardware structure
+ */
+bool ixgbe_mng_present(struct ixgbe_hw *hw)
+{
+ u32 fwsm;
+
+ if (hw->mac.type < ixgbe_mac_82599EB)
+ return false;
+
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+ fwsm &= IXGBE_FWSM_MODE_MASK;
+ return fwsm == IXGBE_FWSM_FW_MODE_PT;
+}
+
+/**
* ixgbe_mng_enabled - Is the manageability engine enabled?
* @hw: pointer to hardware structure
*
@@ -4867,3 +4913,231 @@ bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
return true;
}
+
+/**
+ * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Set the link speed in the MAC and/or PHY register and restarts link.
+ **/
+s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ s32 status = IXGBE_SUCCESS;
+ u32 speedcnt = 0;
+ u32 i = 0;
+ bool autoneg, link_up = false;
+
+ DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
+
+ /* Mask off requested but non-supported speeds */
+ status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ speed &= link_speed;
+
+ /* Try each speed one by one, highest priority first. We do this in
+ * software because 10Gb fiber doesn't support speed autonegotiation.
+ */
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+ speedcnt++;
+ highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
+
+ /* If we already have link at this speed, just jump out */
+ status = ixgbe_check_link(hw, &link_speed, &link_up, false);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
+ goto out;
+
+ /* Set the module link speed */
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber:
+ ixgbe_set_rate_select_speed(hw,
+ IXGBE_LINK_SPEED_10GB_FULL);
+ break;
+ case ixgbe_media_type_fiber_qsfp:
+ /* QSFP module automatically detects MAC link speed */
+ break;
+ default:
+ DEBUGOUT("Unexpected media type.\n");
+ break;
+ }
+
+ /* Allow module to change analog characteristics (1G->10G) */
+ msec_delay(40);
+
+ status = ixgbe_setup_mac_link(hw,
+ IXGBE_LINK_SPEED_10GB_FULL,
+ autoneg_wait_to_complete);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Flap the Tx laser if it has not already been done */
+ ixgbe_flap_tx_laser(hw);
+
+ /* Wait for the controller to acquire link. Per IEEE 802.3ap,
+ * Section 73.10.2, we may have to wait up to 500ms if KR is
+ * attempted. 82599 uses the same timing for 10g SFI.
+ */
+ for (i = 0; i < 5; i++) {
+ /* Wait for the link partner to also set speed */
+ msec_delay(100);
+
+ /* If we have link, just jump out */
+ status = ixgbe_check_link(hw, &link_speed,
+ &link_up, false);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if (link_up)
+ goto out;
+ }
+ }
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
+ speedcnt++;
+ if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
+ highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+ /* If we already have link at this speed, just jump out */
+ status = ixgbe_check_link(hw, &link_speed, &link_up, false);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
+ goto out;
+
+ /* Set the module link speed */
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber:
+ ixgbe_set_rate_select_speed(hw,
+ IXGBE_LINK_SPEED_1GB_FULL);
+ break;
+ case ixgbe_media_type_fiber_qsfp:
+ /* QSFP module automatically detects link speed */
+ break;
+ default:
+ DEBUGOUT("Unexpected media type.\n");
+ break;
+ }
+
+ /* Allow module to change analog characteristics (10G->1G) */
+ msec_delay(40);
+
+ status = ixgbe_setup_mac_link(hw,
+ IXGBE_LINK_SPEED_1GB_FULL,
+ autoneg_wait_to_complete);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Flap the Tx laser if it has not already been done */
+ ixgbe_flap_tx_laser(hw);
+
+ /* Wait for the link partner to also set speed */
+ msec_delay(100);
+
+ /* If we have link, just jump out */
+ status = ixgbe_check_link(hw, &link_speed, &link_up, false);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if (link_up)
+ goto out;
+ }
+
+ /* We didn't get link. Configure back to the highest speed we tried,
+ * (if there was more than one). We call ourselves back with just the
+ * single highest speed that the user requested.
+ */
+ if (speedcnt > 1)
+ status = ixgbe_setup_mac_link_multispeed_fiber(hw,
+ highest_link_speed,
+ autoneg_wait_to_complete);
+
+out:
+ /* Set autoneg_advertised value based on input link speed */
+ hw->phy.autoneg_advertised = 0;
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+ return status;
+}
+
+/**
+ * ixgbe_set_soft_rate_select_speed - Set module link speed
+ * @hw: pointer to hardware structure
+ * @speed: link speed to set
+ *
+ * Set module link speed via the soft rate select.
+ */
+void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed)
+{
+ s32 status;
+ u8 rs, eeprom_data;
+
+ switch (speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ /* one bit mask same as setting on */
+ rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
+ break;
+ default:
+ DEBUGOUT("Invalid fixed module speed\n");
+ return;
+ }
+
+ /* Set RS0 */
+ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ &eeprom_data);
+ if (status) {
+ DEBUGOUT("Failed to read Rx Rate Select RS0\n");
+ goto out;
+ }
+
+ eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
+
+ status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ eeprom_data);
+ if (status) {
+ DEBUGOUT("Failed to write Rx Rate Select RS0\n");
+ goto out;
+ }
+
+ /* Set RS1 */
+ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ &eeprom_data);
+ if (status) {
+ DEBUGOUT("Failed to read Rx Rate Select RS1\n");
+ goto out;
+ }
+
+ eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
+
+ status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ eeprom_data);
+ if (status) {
+ DEBUGOUT("Failed to write Rx Rate Select RS1\n");
+ goto out;
+ }
+out:
+ return;
+}
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_common.h b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_common.h
index bfd41aad..fd67a889 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_common.h
+++ b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_common.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -35,7 +35,6 @@ POSSIBILITY OF SUCH DAMAGE.
#define _IXGBE_COMMON_H_
#include "ixgbe_type.h"
-#ident "$Id: ixgbe_common.h,v 1.143 2013/11/22 01:02:01 jtkirshe Exp $"
#define IXGBE_WRITE_REG64(hw, reg, value) \
do { \
IXGBE_WRITE_REG(hw, reg, (u32) value); \
@@ -112,6 +111,7 @@ s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw);
s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
+s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw);
s32 ixgbe_validate_mac_addr(u8 *mac_addr);
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask);
@@ -157,12 +157,13 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 ver);
u8 ixgbe_calculate_checksum(u8 *buffer, u32 length);
s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
- u32 length, bool return_data);
+ u32 length, u32 timeout, bool return_data);
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
extern s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
extern void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
+bool ixgbe_mng_present(struct ixgbe_hw *hw);
bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
@@ -179,4 +180,9 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
void ixgbe_disable_rx_generic(struct ixgbe_hw *hw);
void ixgbe_enable_rx_generic(struct ixgbe_hw *hw);
+s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed);
#endif /* IXGBE_COMMON */
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb.c b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_dcb.c
index 2245f276..dce9e9ab 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb.c
+++ b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_dcb.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -36,7 +36,6 @@ POSSIBILITY OF SUCH DAMAGE.
#include "ixgbe_dcb.h"
#include "ixgbe_dcb_82598.h"
#include "ixgbe_dcb_82599.h"
-#ident "$Id: ixgbe_dcb.c,v 1.55 2013/11/22 01:02:01 jtkirshe Exp $"
/**
* ixgbe_dcb_calculate_tc_credits - This calculates the ieee traffic class
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb.h b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_dcb.h
index 633abb2e..41208049 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb.h
+++ b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_dcb.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -34,8 +34,6 @@ POSSIBILITY OF SUCH DAMAGE.
#ifndef _IXGBE_DCB_H_
#define _IXGBE_DCB_H_
-#ident "$Id: ixgbe_dcb.h,v 1.39 2012/04/17 00:07:40 jtkirshe Exp $"
-
#include "ixgbe_type.h"
/* DCB defines */
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82598.c b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_dcb_82598.c
index a6161cd5..7ff7beb4 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82598.c
+++ b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_dcb_82598.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -35,7 +35,6 @@ POSSIBILITY OF SUCH DAMAGE.
#include "ixgbe_type.h"
#include "ixgbe_dcb.h"
#include "ixgbe_dcb_82598.h"
-#ident "$Id: ixgbe_dcb_82598.c,v 1.29 2012/03/30 06:45:33 jtkirshe Exp $"
/**
* ixgbe_dcb_get_tc_stats_82598 - Return status data for each traffic class
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82598.h b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_dcb_82598.h
index 9307644c..eb88b3d3 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82598.h
+++ b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_dcb_82598.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -33,7 +33,6 @@ POSSIBILITY OF SUCH DAMAGE.
#ifndef _IXGBE_DCB_82598_H_
#define _IXGBE_DCB_82598_H_
-#ident "$Id: ixgbe_dcb_82598.h,v 1.12 2012/03/26 22:28:19 jtkirshe Exp $"
/* DCB register definitions */
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82599.c b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_dcb_82599.c
index e754d1a4..a52f83a0 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82599.c
+++ b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_dcb_82599.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -35,7 +35,6 @@ POSSIBILITY OF SUCH DAMAGE.
#include "ixgbe_type.h"
#include "ixgbe_dcb.h"
#include "ixgbe_dcb_82599.h"
-#ident "$Id: ixgbe_dcb_82599.c,v 1.67 2012/03/30 06:45:33 jtkirshe Exp $"
/**
* ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class
@@ -299,7 +298,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
*/
reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
- if (hw->mac.type == ixgbe_mac_X540)
+ if (hw->mac.type >= ixgbe_mac_X540)
reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
if (pfc_en)
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82599.h b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_dcb_82599.h
index 94a5e9e0..dc0fb284 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82599.h
+++ b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_dcb_82599.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -33,7 +33,6 @@ POSSIBILITY OF SUCH DAMAGE.
#ifndef _IXGBE_DCB_82599_H_
#define _IXGBE_DCB_82599_H_
-#ident "$Id: ixgbe_dcb_82599.h,v 1.34 2013/03/20 21:52:47 jtkirshe Exp $"
/* DCB register definitions */
#define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin,
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.c b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_mbx.c
index c00c2f7c..e2d969fc 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.c
+++ b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_mbx.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.h b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_mbx.h
index 4594572a..445df105 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.h
+++ b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_mbx.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_osdep.h b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_osdep.h
index 7f95ae48..15a3afb5 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_osdep.h
+++ b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_osdep.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2014, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -103,11 +103,11 @@ enum {
typedef uint8_t u8;
typedef int8_t s8;
typedef uint16_t u16;
-typedef int16_t s16;
+typedef int16_t s16;
typedef uint32_t u32;
typedef int32_t s32;
typedef uint64_t u64;
-
+// trex change
#ifndef __cplusplus
typedef int bool;
#endif
@@ -116,17 +116,19 @@ typedef int bool;
#define wmb() rte_wmb()
#define rmb() rte_rmb()
+#define IOMEM
+
#define prefetch(x) rte_prefetch0(x)
#define IXGBE_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
static inline uint32_t ixgbe_read_addr(volatile void* addr)
{
- return IXGBE_PCI_REG(addr);
+ return rte_le_to_cpu_32(IXGBE_PCI_REG(addr));
}
#define IXGBE_PCI_REG_WRITE(reg, value) do { \
- IXGBE_PCI_REG((reg)) = (value); \
+ IXGBE_PCI_REG((reg)) = (rte_cpu_to_le_32(value)); \
} while(0)
#define IXGBE_PCI_REG_ADDR(hw, reg) \
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_phy.c b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_phy.c
index af8b6d3e..51f32c65 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_phy.c
+++ b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_phy.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -34,7 +34,6 @@ POSSIBILITY OF SUCH DAMAGE.
#include "ixgbe_api.h"
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
-#ident "$Id: ixgbe_phy.c,v 1.155 2013/08/14 22:34:03 jtkirshe Exp $"
STATIC void ixgbe_i2c_start(struct ixgbe_hw *hw);
STATIC void ixgbe_i2c_stop(struct ixgbe_hw *hw);
@@ -101,16 +100,17 @@ STATIC u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2)
}
/**
- * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
+ * ixgbe_read_i2c_combined_generic_int - Perform I2C read combined operation
* @hw: pointer to the hardware structure
* @addr: I2C bus address to read from
* @reg: I2C device register to read from
* @val: pointer to location to receive read value
+ * @lock: true if to take and release semaphore
*
* Returns an error code on error.
*/
-STATIC s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
- u16 reg, u16 *val)
+STATIC s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
+ u16 reg, u16 *val, bool lock)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
int max_retry = 10;
@@ -121,11 +121,13 @@ STATIC s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
u8 reg_high;
u8 csum;
+ if (hw->mac.type >= ixgbe_mac_X550)
+ max_retry = 3;
reg_high = ((reg >> 7) & 0xFE) | 1; /* Indicate read combined */
csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
csum = ~csum;
do {
- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+ if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
return IXGBE_ERR_SWFW_SYNC;
ixgbe_i2c_start(hw);
/* Device Address and write indication */
@@ -158,13 +160,15 @@ STATIC s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
if (ixgbe_clock_out_i2c_bit(hw, false))
goto fail;
ixgbe_i2c_stop(hw);
- hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ if (lock)
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
*val = (high_bits << 8) | low_bits;
return 0;
fail:
ixgbe_i2c_bus_clear(hw);
- hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ if (lock)
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
retry++;
if (retry < max_retry)
DEBUGOUT("I2C byte read combined error - Retrying.\n");
@@ -176,17 +180,50 @@ fail:
}
/**
- * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
+ * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to read from
+ * @reg: I2C device register to read from
+ * @val: pointer to location to receive read value
+ *
+ * Returns an error code on error.
+ **/
+STATIC s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
+ u16 reg, u16 *val)
+{
+ return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true);
+}
+
+/**
+ * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to read from
+ * @reg: I2C device register to read from
+ * @val: pointer to location to receive read value
+ *
+ * Returns an error code on error.
+ **/
+STATIC s32
+ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
+ u16 reg, u16 *val)
+{
+ return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false);
+}
+
+/**
+ * ixgbe_write_i2c_combined_generic_int - Perform I2C write combined operation
* @hw: pointer to the hardware structure
* @addr: I2C bus address to write to
* @reg: I2C device register to write to
* @val: value to write
+ * @lock: true if to take and release semaphore
*
* Returns an error code on error.
*/
-STATIC s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
- u8 addr, u16 reg, u16 val)
+STATIC s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
+ u16 reg, u16 val, bool lock)
{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
int max_retry = 1;
int retry = 0;
u8 reg_high;
@@ -198,6 +235,8 @@ STATIC s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF);
csum = ~csum;
do {
+ if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+ return IXGBE_ERR_SWFW_SYNC;
ixgbe_i2c_start(hw);
/* Device Address and write indication */
if (ixgbe_out_i2c_byte_ack(hw, addr))
@@ -218,10 +257,14 @@ STATIC s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
if (ixgbe_out_i2c_byte_ack(hw, csum))
goto fail;
ixgbe_i2c_stop(hw);
+ if (lock)
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
return 0;
fail:
ixgbe_i2c_bus_clear(hw);
+ if (lock)
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
retry++;
if (retry < max_retry)
DEBUGOUT("I2C byte write combined error - Retrying.\n");
@@ -233,6 +276,37 @@ fail:
}
/**
+ * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to write to
+ * @reg: I2C device register to write to
+ * @val: value to write
+ *
+ * Returns an error code on error.
+ **/
+STATIC s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
+ u8 addr, u16 reg, u16 val)
+{
+ return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true);
+}
+
+/**
+ * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to write to
+ * @reg: I2C device register to write to
+ * @val: value to write
+ *
+ * Returns an error code on error.
+ **/
+STATIC s32
+ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
+ u8 addr, u16 reg, u16 val)
+{
+ return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false);
+}
+
+/**
* ixgbe_init_phy_ops_generic - Inits PHY function ptrs
* @hw: pointer to the hardware structure
*
@@ -245,27 +319,34 @@ s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_init_phy_ops_generic");
/* PHY */
- phy->ops.identify = &ixgbe_identify_phy_generic;
- phy->ops.reset = &ixgbe_reset_phy_generic;
- phy->ops.read_reg = &ixgbe_read_phy_reg_generic;
- phy->ops.write_reg = &ixgbe_write_phy_reg_generic;
- phy->ops.read_reg_mdi = &ixgbe_read_phy_reg_mdi;
- phy->ops.write_reg_mdi = &ixgbe_write_phy_reg_mdi;
- phy->ops.setup_link = &ixgbe_setup_phy_link_generic;
- phy->ops.setup_link_speed = &ixgbe_setup_phy_link_speed_generic;
+ phy->ops.identify = ixgbe_identify_phy_generic;
+ phy->ops.reset = ixgbe_reset_phy_generic;
+ phy->ops.read_reg = ixgbe_read_phy_reg_generic;
+ phy->ops.write_reg = ixgbe_write_phy_reg_generic;
+ phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi;
+ phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi;
+ phy->ops.setup_link = ixgbe_setup_phy_link_generic;
+ phy->ops.setup_link_speed = ixgbe_setup_phy_link_speed_generic;
phy->ops.check_link = NULL;
phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_generic;
- phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_generic;
- phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_generic;
- phy->ops.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic;
- phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic;
- phy->ops.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic;
- phy->ops.i2c_bus_clear = &ixgbe_i2c_bus_clear;
- phy->ops.identify_sfp = &ixgbe_identify_module_generic;
+ phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_generic;
+ phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_generic;
+ phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_generic;
+ phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_generic;
+ phy->ops.write_i2c_eeprom = ixgbe_write_i2c_eeprom_generic;
+ phy->ops.i2c_bus_clear = ixgbe_i2c_bus_clear;
+ phy->ops.identify_sfp = ixgbe_identify_module_generic;
phy->sfp_type = ixgbe_sfp_type_unknown;
- phy->ops.read_i2c_combined = &ixgbe_read_i2c_combined_generic;
- phy->ops.write_i2c_combined = &ixgbe_write_i2c_combined_generic;
- phy->ops.check_overtemp = &ixgbe_tn_check_overtemp;
+ phy->ops.read_i2c_combined = ixgbe_read_i2c_combined_generic;
+ phy->ops.write_i2c_combined = ixgbe_write_i2c_combined_generic;
+ phy->ops.read_i2c_combined_unlocked =
+ ixgbe_read_i2c_combined_generic_unlocked;
+ phy->ops.write_i2c_combined_unlocked =
+ ixgbe_write_i2c_combined_generic_unlocked;
+ phy->ops.read_i2c_byte_unlocked = ixgbe_read_i2c_byte_generic_unlocked;
+ phy->ops.write_i2c_byte_unlocked =
+ ixgbe_write_i2c_byte_generic_unlocked;
+ phy->ops.check_overtemp = ixgbe_tn_check_overtemp;
return IXGBE_SUCCESS;
}
@@ -284,9 +365,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_identify_phy_generic");
if (!hw->phy.phy_semaphore_mask) {
- hw->phy.lan_id = IXGBE_READ_REG(hw, IXGBE_STATUS) &
- IXGBE_STATUS_LAN_ID_1;
- if (hw->phy.lan_id)
+ if (hw->bus.lan_id)
hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
else
hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
@@ -428,7 +507,9 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
case TN1010_PHY_ID:
phy_type = ixgbe_phy_tn;
break;
- case X550_PHY_ID:
+ case X550_PHY_ID1:
+ case X550_PHY_ID2:
+ case X550_PHY_ID3:
case X540_PHY_ID:
phy_type = ixgbe_phy_aq;
break;
@@ -438,6 +519,9 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
case ATH_PHY_ID:
phy_type = ixgbe_phy_nl;
break;
+ case X557_PHY_ID:
+ phy_type = ixgbe_phy_x550em_ext_t;
+ break;
default:
phy_type = ixgbe_phy_unknown;
break;
@@ -745,6 +829,44 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
autoneg_reg);
}
+ if (hw->mac.type == ixgbe_mac_X550) {
+ if (speed & IXGBE_LINK_SPEED_5GB_FULL) {
+ /* Set or unset auto-negotiation 5G advertisement */
+ hw->phy.ops.read_reg(hw,
+ IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+
+ autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE;
+ if (hw->phy.autoneg_advertised &
+ IXGBE_LINK_SPEED_5GB_FULL)
+ autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE;
+
+ hw->phy.ops.write_reg(hw,
+ IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
+ }
+
+ if (speed & IXGBE_LINK_SPEED_2_5GB_FULL) {
+ /* Set or unset auto-negotiation 2.5G advertisement */
+ hw->phy.ops.read_reg(hw,
+ IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+
+ autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE;
+ if (hw->phy.autoneg_advertised &
+ IXGBE_LINK_SPEED_2_5GB_FULL)
+ autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE;
+
+ hw->phy.ops.write_reg(hw,
+ IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
+ }
+ }
+
if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
/* Set or unset auto-negotiation 1G advertisement */
hw->phy.ops.read_reg(hw,
@@ -816,6 +938,12 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
if (speed & IXGBE_LINK_SPEED_10GB_FULL)
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (speed & IXGBE_LINK_SPEED_5GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_2_5GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
+
if (speed & IXGBE_LINK_SPEED_1GB_FULL)
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
@@ -834,7 +962,8 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
* @speed: pointer to link speed
* @autoneg: boolean auto-negotiation value
*
- * Determines the link capabilities by reading the AUTOC register.
+ * Determines the supported link capabilities by reading the PHY auto
+ * negotiation register.
**/
s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
@@ -861,6 +990,15 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
*speed |= IXGBE_LINK_SPEED_100_FULL;
}
+ /* Internal PHY does not support 100 Mbps */
+ if (hw->mac.type == ixgbe_mac_X550EM_x)
+ *speed &= ~IXGBE_LINK_SPEED_100_FULL;
+
+ if (hw->mac.type == ixgbe_mac_X550) {
+ *speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
+ *speed |= IXGBE_LINK_SPEED_5GB_FULL;
+ }
+
return status;
}
@@ -1202,6 +1340,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
goto out;
}
+ /* LAN ID is needed for I2C access */
+ hw->mac.ops.set_lan_id(hw);
+
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_IDENTIFIER,
&identifier);
@@ -1209,9 +1350,6 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
if (status != IXGBE_SUCCESS)
goto err_read_i2c_eeprom;
- /* LAN ID is needed for sfp_type determination */
- hw->mac.ops.set_lan_id(hw);
-
if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
status = IXGBE_ERR_SFP_NOT_SUPPORTED;
@@ -1430,7 +1568,6 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
status = IXGBE_SUCCESS;
} else {
if (hw->allow_unsupported_sfp == true) {
- printf("SFP+ is not Intel \n");
EWARN(hw, "WARNING: Intel (R) Network "
"Connections are quality tested "
"using Intel (R) Ethernet Optics."
@@ -1443,7 +1580,6 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
"untested modules.\n", status);
status = IXGBE_SUCCESS;
} else {
- printf("SFP+ module not supported\n");
DEBUGOUT("SFP+ module not supported\n");
hw->phy.type =
ixgbe_phy_sfp_unsupported;
@@ -1558,6 +1694,9 @@ s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
goto out;
}
+ /* LAN ID is needed for I2C access */
+ hw->mac.ops.set_lan_id(hw);
+
status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
&identifier);
@@ -1572,9 +1711,6 @@ s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
hw->phy.id = identifier;
- /* LAN ID is needed for sfp_type determination */
- hw->mac.ops.set_lan_id(hw);
-
status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP,
&comp_codes_10g);
@@ -1877,16 +2013,32 @@ s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
}
/**
- * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C
+ * ixgbe_is_sfp_probe - Returns true if SFP is being detected
+ * @hw: pointer to hardware structure
+ * @offset: eeprom offset to be read
+ * @addr: I2C address to be read
+ */
+STATIC bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr)
+{
+ if (addr == IXGBE_I2C_EEPROM_DEV_ADDR &&
+ offset == IXGBE_SFF_IDENTIFIER &&
+ hw->phy.sfp_type == ixgbe_sfp_type_not_present)
+ return true;
+ return false;
+}
+
+/**
+ * ixgbe_read_i2c_byte_generic_int - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
* @data: value read
+ * @lock: true if to take and release semaphore
*
* Performs byte read operation to SFP module's EEPROM over I2C interface at
* a specified device address.
**/
-s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 *data)
+STATIC s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data, bool lock)
{
s32 status;
u32 max_retry = 10;
@@ -1897,8 +2049,13 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
DEBUGFUNC("ixgbe_read_i2c_byte_generic");
+ if (hw->mac.type >= ixgbe_mac_X550)
+ max_retry = 3;
+ if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr))
+ max_retry = IXGBE_SFP_DETECT_RETRIES;
+
do {
- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+ if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
return IXGBE_ERR_SWFW_SYNC;
ixgbe_i2c_start(hw);
@@ -1940,13 +2097,16 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
goto fail;
ixgbe_i2c_stop(hw);
- hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ if (lock)
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
return IXGBE_SUCCESS;
fail:
ixgbe_i2c_bus_clear(hw);
- hw->mac.ops.release_swfw_sync(hw, swfw_mask);
- msec_delay(100);
+ if (lock) {
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ msec_delay(100);
+ }
retry++;
if (retry < max_retry)
DEBUGOUT("I2C byte read error - Retrying.\n");
@@ -1959,28 +2119,60 @@ fail:
}
/**
- * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C
+ * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
+ data, true);
+}
+
+/**
+ * ixgbe_read_i2c_byte_generic_unlocked - Reads 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
+ data, false);
+}
+
+/**
+ * ixgbe_write_i2c_byte_generic_int - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
* @data: value to write
+ * @lock: true if to take and release semaphore
*
* Performs byte write operation to SFP module's EEPROM over I2C interface at
* a specified device address.
**/
-s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 data)
+STATIC s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data, bool lock)
{
- s32 status = IXGBE_SUCCESS;
+ s32 status;
u32 max_retry = 1;
u32 retry = 0;
u32 swfw_mask = hw->phy.phy_semaphore_mask;
DEBUGFUNC("ixgbe_write_i2c_byte_generic");
- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != IXGBE_SUCCESS) {
- status = IXGBE_ERR_SWFW_SYNC;
- goto write_byte_out;
- }
+ if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) !=
+ IXGBE_SUCCESS)
+ return IXGBE_ERR_SWFW_SYNC;
do {
ixgbe_i2c_start(hw);
@@ -2010,7 +2202,8 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
goto fail;
ixgbe_i2c_stop(hw);
- hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ if (lock)
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
return IXGBE_SUCCESS;
fail:
@@ -2022,17 +2215,50 @@ fail:
DEBUGOUT("I2C byte write error.\n");
} while (retry < max_retry);
- hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ if (lock)
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
-write_byte_out:
return status;
}
/**
+ * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
+ data, true);
+}
+
+/**
+ * ixgbe_write_i2c_byte_generic_unlocked - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
+ data, false);
+}
+
+/**
* ixgbe_i2c_start - Sets I2C start condition
* @hw: pointer to hardware structure
*
* Sets I2C start condition (High -> Low on SDA while SCL is High)
+ * Set bit-bang mode on X550 hardware.
**/
STATIC void ixgbe_i2c_start(struct ixgbe_hw *hw)
{
@@ -2040,6 +2266,8 @@ STATIC void ixgbe_i2c_start(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_i2c_start");
+ i2cctl |= IXGBE_I2C_BB_EN_BY_MAC(hw);
+
/* Start condition must begin with data and clock high */
ixgbe_set_i2c_data(hw, &i2cctl, 1);
ixgbe_raise_i2c_clk(hw, &i2cctl);
@@ -2064,10 +2292,15 @@ STATIC void ixgbe_i2c_start(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
*
* Sets I2C stop condition (Low -> High on SDA while SCL is High)
+ * Disables bit-bang mode and negates data output enable on X550
+ * hardware.
**/
STATIC void ixgbe_i2c_stop(struct ixgbe_hw *hw)
{
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
+ u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw);
+ u32 bb_en_bit = IXGBE_I2C_BB_EN_BY_MAC(hw);
DEBUGFUNC("ixgbe_i2c_stop");
@@ -2082,6 +2315,13 @@ STATIC void ixgbe_i2c_stop(struct ixgbe_hw *hw)
/* bus free time between stop and start (4.7us)*/
usec_delay(IXGBE_I2C_T_BUF);
+
+ if (bb_en_bit || data_oe_bit || clk_oe_bit) {
+ i2cctl &= ~bb_en_bit;
+ i2cctl |= data_oe_bit | clk_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+ }
}
/**
@@ -2098,6 +2338,7 @@ STATIC s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
DEBUGFUNC("ixgbe_clock_in_i2c_byte");
+ *data = 0;
for (i = 7; i >= 0; i--) {
ixgbe_clock_in_i2c_bit(hw, &bit);
*data |= bit << i;
@@ -2133,6 +2374,7 @@ STATIC s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
/* Release SDA line (set high) */
i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
+ i2cctl |= IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
IXGBE_WRITE_FLUSH(hw);
@@ -2147,6 +2389,7 @@ STATIC s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
**/
STATIC s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
{
+ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
s32 status = IXGBE_SUCCESS;
u32 i = 0;
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
@@ -2155,9 +2398,14 @@ STATIC s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_get_i2c_ack");
+ if (data_oe_bit) {
+ i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
+ i2cctl |= data_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+ }
ixgbe_raise_i2c_clk(hw, &i2cctl);
-
/* Minimum high period of clock is 4us */
usec_delay(IXGBE_I2C_T_HIGH);
@@ -2195,9 +2443,16 @@ STATIC s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
STATIC s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
{
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
DEBUGFUNC("ixgbe_clock_in_i2c_bit");
+ if (data_oe_bit) {
+ i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
+ i2cctl |= data_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+ }
ixgbe_raise_i2c_clk(hw, &i2cctl);
/* Minimum high period of clock is 4us */
@@ -2256,15 +2511,22 @@ STATIC s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
* @i2cctl: Current value of I2CCTL register
*
* Raises the I2C clock line '0'->'1'
+ * Negates the I2C clock output enable on X550 hardware.
**/
STATIC void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
{
+ u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw);
u32 i = 0;
u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT;
u32 i2cctl_r = 0;
DEBUGFUNC("ixgbe_raise_i2c_clk");
+ if (clk_oe_bit) {
+ *i2cctl |= clk_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+ }
+
for (i = 0; i < timeout; i++) {
*i2cctl |= IXGBE_I2C_CLK_OUT_BY_MAC(hw);
@@ -2285,13 +2547,14 @@ STATIC void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
* @i2cctl: Current value of I2CCTL register
*
* Lowers the I2C clock line '1'->'0'
+ * Asserts the I2C clock output enable on X550 hardware.
**/
STATIC void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
{
-
DEBUGFUNC("ixgbe_lower_i2c_clk");
*i2cctl &= ~(IXGBE_I2C_CLK_OUT_BY_MAC(hw));
+ *i2cctl &= ~IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw);
IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
IXGBE_WRITE_FLUSH(hw);
@@ -2307,9 +2570,11 @@ STATIC void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
* @data: I2C data value (0 or 1) to set
*
* Sets the I2C data bit
+ * Asserts the I2C data output enable on X550 hardware.
**/
STATIC s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
{
+ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
s32 status = IXGBE_SUCCESS;
DEBUGFUNC("ixgbe_set_i2c_data");
@@ -2318,6 +2583,7 @@ STATIC s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
*i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
else
*i2cctl &= ~(IXGBE_I2C_DATA_OUT_BY_MAC(hw));
+ *i2cctl &= ~data_oe_bit;
IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
IXGBE_WRITE_FLUSH(hw);
@@ -2325,6 +2591,14 @@ STATIC s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
/* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
usec_delay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
+ if (!data) /* Can't verify data in this case */
+ return IXGBE_SUCCESS;
+ if (data_oe_bit) {
+ *i2cctl |= data_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
/* Verify data was set correctly */
*i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
if (data != ixgbe_get_i2c_data(hw, i2cctl)) {
@@ -2343,14 +2617,22 @@ STATIC s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
* @i2cctl: Current value of I2CCTL register
*
* Returns the I2C data bit value
+ * Negates the I2C data output enable on X550 hardware.
**/
STATIC bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
{
+ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
bool data;
- UNREFERENCED_1PARAMETER(hw);
DEBUGFUNC("ixgbe_get_i2c_data");
+ if (data_oe_bit) {
+ *i2cctl |= data_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(IXGBE_I2C_T_FALL);
+ }
+
if (*i2cctl & IXGBE_I2C_DATA_IN_BY_MAC(hw))
data = 1;
else
@@ -2368,12 +2650,13 @@ STATIC bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
**/
void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
{
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ u32 i2cctl;
u32 i;
DEBUGFUNC("ixgbe_i2c_bus_clear");
ixgbe_i2c_start(hw);
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
ixgbe_set_i2c_data(hw, &i2cctl, 1);
@@ -2423,3 +2706,33 @@ s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
out:
return status;
}
+
+/**
+ * ixgbe_set_copper_phy_power - Control power for copper phy
+ * @hw: pointer to hardware structure
+ * @on: true for on, false for off
+ */
+s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on)
+{
+ u32 status;
+ u16 reg;
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+ if (status)
+ return status;
+
+ if (on) {
+ reg &= ~IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
+ } else {
+ if (ixgbe_check_reset_blocked(hw))
+ return 0;
+ reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
+ }
+
+ status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ reg);
+ return status;
+}
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_phy.h b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_phy.h
index e262cc41..1f07c1e9 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_phy.h
+++ b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_phy.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -51,6 +51,8 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
#define IXGBE_SFF_SFF_8472_SWAP 0x5C
#define IXGBE_SFF_SFF_8472_COMP 0x5E
+#define IXGBE_SFF_SFF_8472_OSCB 0x6E
+#define IXGBE_SFF_SFF_8472_ESCB 0x76
#define IXGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD
#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0 0xA5
#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1 0xA6
@@ -70,6 +72,9 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
+#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
#define IXGBE_SFF_ADDRESSING_MODE 0x4
#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
@@ -82,10 +87,27 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
-#define IXGBE_CS4227 0x9E /* CS4227 address */
-#define IXGBE_CS4227_SPARE24_LSB 0x12B0 /* Reg to program EDC */
+#define IXGBE_CS4227 0xBE /* CS4227 address */
+#define IXGBE_CS4227_GLOBAL_ID_LSB 0
+#define IXGBE_CS4227_SCRATCH 2
+#define IXGBE_CS4227_GLOBAL_ID_VALUE 0x03E5
+#define IXGBE_CS4227_SCRATCH_VALUE 0x5aa5
+#define IXGBE_CS4227_RETRIES 5
+#define IXGBE_CS4227_LINE_SPARE22_MSB 0x12AD /* Reg to program speed */
+#define IXGBE_CS4227_LINE_SPARE24_LSB 0x12B0 /* Reg to program EDC */
+#define IXGBE_CS4227_HOST_SPARE22_MSB 0x1AAD /* Reg to program speed */
+#define IXGBE_CS4227_HOST_SPARE24_LSB 0x1AB0 /* Reg to program EDC */
+#define IXGBE_CS4227_SPEED_1G 0x8000
+#define IXGBE_CS4227_SPEED_10G 0
#define IXGBE_CS4227_EDC_MODE_CX1 0x0002
#define IXGBE_CS4227_EDC_MODE_SR 0x0004
+#define IXGBE_CS4227_RESET_HOLD 500 /* microseconds */
+#define IXGBE_CS4227_RESET_DELAY 500 /* milliseconds */
+#define IXGBE_CS4227_CHECK_DELAY 30 /* milliseconds */
+#define IXGBE_PE 0xE0 /* Port expander address */
+#define IXGBE_PE_OUTPUT 1 /* Output register offset */
+#define IXGBE_PE_CONFIG 3 /* Config register offset */
+#define IXGBE_PE_BIT1 (1 << 1)
/* Flow control defines */
#define IXGBE_TAF_SYM_PAUSE 0x400
@@ -114,6 +136,10 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_I2C_T_SU_STO 4
#define IXGBE_I2C_T_BUF 5
+#ifndef IXGBE_SFP_DETECT_RETRIES
+#define IXGBE_SFP_DETECT_RETRIES 10
+
+#endif /* IXGBE_SFP_DETECT_RETRIES */
#define IXGBE_TN_LASI_STATUS_REG 0x9005
#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
@@ -156,6 +182,7 @@ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
u16 *firmware_version);
s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
+s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on);
s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw);
@@ -166,8 +193,12 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
+s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data);
s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data);
+s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data);
s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_type.h b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_type.h
index c67d462f..881f34cc 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_type.h
+++ b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_type.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -74,7 +74,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include "ixgbe_osdep.h"
-#ident "$Id: ixgbe_type.h,v 1.630 2013/11/22 22:48:40 jtkirshe Exp $"
+/* Override this by setting IOMEM in your ixgbe_osdep.h header */
/* Vendor ID */
#define IXGBE_INTEL_VENDOR_ID 0x8086
@@ -109,6 +109,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_SUBDEV_ID_82599_LOM_SNAP6 0x2159
#define IXGBE_SUBDEV_ID_82599_SFP_1OCP 0x000D
#define IXGBE_SUBDEV_ID_82599_SFP_2OCP 0x0008
+#define IXGBE_SUBDEV_ID_82599_SFP_LOM 0x06EE
#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152A
#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
@@ -126,16 +127,21 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_DEV_ID_X540_VF 0x1515
#define IXGBE_DEV_ID_X540_VF_HV 0x1530
#define IXGBE_DEV_ID_X540T1 0x1560
-#define IXGBE_DEV_ID_X550EM_X 0x15A7
-#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC
#define IXGBE_DEV_ID_X550T 0x1563
#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA
#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB
+#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC
+#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD
+#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE
#define IXGBE_DEV_ID_X550_VF_HV 0x1564
#define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
+#define IXGBE_CAT(r, m) IXGBE_##r##m
+
+#define IXGBE_BY_MAC(_hw, r) ((_hw)->mvals[IXGBE_CAT(r, _IDX)])
+
/* General Registers */
#define IXGBE_CTRL 0x00000
#define IXGBE_STATUS 0x00008
@@ -143,9 +149,11 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_ESDP 0x00020
#define IXGBE_EODSDP 0x00028
#define IXGBE_I2CCTL_82599 0x00028
+#define IXGBE_I2CCTL IXGBE_I2CCTL_82599
+#define IXGBE_I2CCTL_X540 IXGBE_I2CCTL_82599
#define IXGBE_I2CCTL_X550 0x15F5C
-#define IXGBE_I2CCTL_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \
- IXGBE_I2CCTL_X550 : IXGBE_I2CCTL_82599))
+#define IXGBE_I2CCTL_X550EM_x IXGBE_I2CCTL_X550
+#define IXGBE_I2CCTL_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2CCTL)
#define IXGBE_PHY_GPIO 0x00028
#define IXGBE_MAC_GPIO 0x00030
#define IXGBE_PHYINT_STATUS0 0x00100
@@ -158,18 +166,40 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_EXVET 0x05078
/* NVM Registers */
-#define IXGBE_EEC 0x10010
-#define IXGBE_EERD 0x10014
-#define IXGBE_EEWR 0x10018
-#define IXGBE_FLA 0x1001C
+#define IXGBE_EEC 0x10010
+#define IXGBE_EEC_X540 IXGBE_EEC
+#define IXGBE_EEC_X550 IXGBE_EEC
+#define IXGBE_EEC_X550EM_x IXGBE_EEC
+#define IXGBE_EEC_BY_MAC(_hw) IXGBE_EEC
+
+#define IXGBE_EERD 0x10014
+#define IXGBE_EEWR 0x10018
+
+#define IXGBE_FLA 0x1001C
+#define IXGBE_FLA_X540 IXGBE_FLA
+#define IXGBE_FLA_X550 IXGBE_FLA
+#define IXGBE_FLA_X550EM_x IXGBE_FLA
+#define IXGBE_FLA_BY_MAC(_hw) IXGBE_FLA
+
#define IXGBE_EEMNGCTL 0x10110
#define IXGBE_EEMNGDATA 0x10114
#define IXGBE_FLMNGCTL 0x10118
#define IXGBE_FLMNGDATA 0x1011C
#define IXGBE_FLMNGCNT 0x10120
#define IXGBE_FLOP 0x1013C
-#define IXGBE_GRC 0x10200
-#define IXGBE_SRAMREL 0x10210
+
+#define IXGBE_GRC 0x10200
+#define IXGBE_GRC_X540 IXGBE_GRC
+#define IXGBE_GRC_X550 IXGBE_GRC
+#define IXGBE_GRC_X550EM_x IXGBE_GRC
+#define IXGBE_GRC_BY_MAC(_hw) IXGBE_GRC
+
+#define IXGBE_SRAMREL 0x10210
+#define IXGBE_SRAMREL_X540 IXGBE_SRAMREL
+#define IXGBE_SRAMREL_X550 IXGBE_SRAMREL
+#define IXGBE_SRAMREL_X550EM_x IXGBE_SRAMREL
+#define IXGBE_SRAMREL_BY_MAC(_hw) IXGBE_SRAMREL
+
#define IXGBE_PHYDBG 0x10218
/* General Receive Control */
@@ -180,14 +210,48 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_VPDDIAG1 0x10208
/* I2CCTL Bit Masks */
-#define IXGBE_I2C_CLK_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
- 0x00004000 : 0x00000001)
-#define IXGBE_I2C_CLK_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
- 0x00000200 : 0x00000002)
-#define IXGBE_I2C_DATA_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
- 0x00001000 : 0x00000004)
-#define IXGBE_I2C_DATA_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
- 0x00000400 : 0x00000008)
+#define IXGBE_I2C_CLK_IN 0x00000001
+#define IXGBE_I2C_CLK_IN_X540 IXGBE_I2C_CLK_IN
+#define IXGBE_I2C_CLK_IN_X550 0x00004000
+#define IXGBE_I2C_CLK_IN_X550EM_x IXGBE_I2C_CLK_IN_X550
+#define IXGBE_I2C_CLK_IN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_IN)
+
+#define IXGBE_I2C_CLK_OUT 0x00000002
+#define IXGBE_I2C_CLK_OUT_X540 IXGBE_I2C_CLK_OUT
+#define IXGBE_I2C_CLK_OUT_X550 0x00000200
+#define IXGBE_I2C_CLK_OUT_X550EM_x IXGBE_I2C_CLK_OUT_X550
+#define IXGBE_I2C_CLK_OUT_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OUT)
+
+#define IXGBE_I2C_DATA_IN 0x00000004
+#define IXGBE_I2C_DATA_IN_X540 IXGBE_I2C_DATA_IN
+#define IXGBE_I2C_DATA_IN_X550 0x00001000
+#define IXGBE_I2C_DATA_IN_X550EM_x IXGBE_I2C_DATA_IN_X550
+#define IXGBE_I2C_DATA_IN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_IN)
+
+#define IXGBE_I2C_DATA_OUT 0x00000008
+#define IXGBE_I2C_DATA_OUT_X540 IXGBE_I2C_DATA_OUT
+#define IXGBE_I2C_DATA_OUT_X550 0x00000400
+#define IXGBE_I2C_DATA_OUT_X550EM_x IXGBE_I2C_DATA_OUT_X550
+#define IXGBE_I2C_DATA_OUT_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OUT)
+
+#define IXGBE_I2C_DATA_OE_N_EN 0
+#define IXGBE_I2C_DATA_OE_N_EN_X540 IXGBE_I2C_DATA_OE_N_EN
+#define IXGBE_I2C_DATA_OE_N_EN_X550 0x00000800
+#define IXGBE_I2C_DATA_OE_N_EN_X550EM_x IXGBE_I2C_DATA_OE_N_EN_X550
+#define IXGBE_I2C_DATA_OE_N_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OE_N_EN)
+
+#define IXGBE_I2C_BB_EN 0
+#define IXGBE_I2C_BB_EN_X540 IXGBE_I2C_BB_EN
+#define IXGBE_I2C_BB_EN_X550 0x00000100
+#define IXGBE_I2C_BB_EN_X550EM_x IXGBE_I2C_BB_EN_X550
+
+#define IXGBE_I2C_BB_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_BB_EN)
+
+#define IXGBE_I2C_CLK_OE_N_EN 0
+#define IXGBE_I2C_CLK_OE_N_EN_X540 IXGBE_I2C_CLK_OE_N_EN
+#define IXGBE_I2C_CLK_OE_N_EN_X550 0x00002000
+#define IXGBE_I2C_CLK_OE_N_EN_X550EM_x IXGBE_I2C_CLK_OE_N_EN_X550
+#define IXGBE_I2C_CLK_OE_N_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OE_N_EN)
#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500
#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
@@ -618,6 +682,7 @@ struct ixgbe_dmac_config {
#define IXGBE_EEER 0x043A0 /* EEE register */
#define IXGBE_EEE_STAT 0x04398 /* EEE Status */
#define IXGBE_EEE_SU 0x04380 /* EEE Set up */
+#define IXGBE_EEE_SU_TEEE_DLY_SHIFT 26
#define IXGBE_TLPIC 0x041F4 /* EEE Tx LPI count */
#define IXGBE_RLPIC 0x041F8 /* EEE Rx LPI count */
@@ -995,14 +1060,34 @@ struct ixgbe_dmac_config {
#define IXGBE_GSCN_2 0x11028
#define IXGBE_GSCN_3 0x1102C
#define IXGBE_FACTPS 0x10150
+#define IXGBE_FACTPS_X540 IXGBE_FACTPS
+#define IXGBE_FACTPS_X550 IXGBE_FACTPS
+#define IXGBE_FACTPS_X550EM_x IXGBE_FACTPS
+#define IXGBE_FACTPS_BY_MAC(_hw) IXGBE_FACTPS
+
#define IXGBE_PCIEANACTL 0x11040
#define IXGBE_SWSM 0x10140
+#define IXGBE_SWSM_X540 IXGBE_SWSM
+#define IXGBE_SWSM_X550 IXGBE_SWSM
+#define IXGBE_SWSM_X550EM_x IXGBE_SWSM
+#define IXGBE_SWSM_BY_MAC(_hw) IXGBE_SWSM
+
#define IXGBE_FWSM 0x10148
+#define IXGBE_FWSM_X540 IXGBE_FWSM
+#define IXGBE_FWSM_X550 IXGBE_FWSM
+#define IXGBE_FWSM_X550EM_x IXGBE_FWSM
+#define IXGBE_FWSM_BY_MAC(_hw) IXGBE_FWSM
+
+#define IXGBE_SWFW_SYNC IXGBE_GSSR
+#define IXGBE_SWFW_SYNC_X540 IXGBE_SWFW_SYNC
+#define IXGBE_SWFW_SYNC_X550 IXGBE_SWFW_SYNC
+#define IXGBE_SWFW_SYNC_X550EM_x IXGBE_SWFW_SYNC
+#define IXGBE_SWFW_SYNC_BY_MAC(_hw) IXGBE_SWFW_SYNC
+
#define IXGBE_GSSR 0x10160
#define IXGBE_MREVID 0x11064
#define IXGBE_DCA_ID 0x11070
#define IXGBE_DCA_CTRL 0x11074
-#define IXGBE_SWFW_SYNC IXGBE_GSSR
/* PCI-E registers 82599-Specific */
#define IXGBE_GCR_EXT 0x11050
@@ -1014,14 +1099,18 @@ struct ixgbe_dmac_config {
#define IXGBE_PHYDAT_82599 0x11044
#define IXGBE_PHYCTL_82599 0x11048
#define IXGBE_PBACLR_82599 0x11068
-#define IXGBE_CIAA_82599 0x11088
-#define IXGBE_CIAD_82599 0x1108C
+#define IXGBE_CIAA 0x11088
+#define IXGBE_CIAD 0x1108C
+#define IXGBE_CIAA_82599 IXGBE_CIAA
+#define IXGBE_CIAD_82599 IXGBE_CIAD
+#define IXGBE_CIAA_X540 IXGBE_CIAA
+#define IXGBE_CIAD_X540 IXGBE_CIAD
#define IXGBE_CIAA_X550 0x11508
#define IXGBE_CIAD_X550 0x11510
-#define IXGBE_CIAA_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \
- IXGBE_CIAA_X550 : IXGBE_CIAA_82599))
-#define IXGBE_CIAD_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \
- IXGBE_CIAD_X550 : IXGBE_CIAD_82599))
+#define IXGBE_CIAA_X550EM_x IXGBE_CIAA_X550
+#define IXGBE_CIAD_X550EM_x IXGBE_CIAD_X550
+#define IXGBE_CIAA_BY_MAC(_hw) IXGBE_BY_MAC((_hw), CIAA)
+#define IXGBE_CIAD_BY_MAC(_hw) IXGBE_BY_MAC((_hw), CIAD)
#define IXGBE_PICAUSE 0x110B0
#define IXGBE_PIENA 0x110B8
#define IXGBE_CDQ_MBR_82599 0x110B4
@@ -1369,6 +1458,10 @@ struct ixgbe_dmac_config {
#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */
#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT 0xC800 /* AUTO_NEG Vendor Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM 0xCC00 /* AUTO_NEG Vendor TX Reg */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2 0xCC01 /* AUTO_NEG Vendor Tx Reg */
+#define IXGBE_MDIO_AUTO_NEG_VEN_LSC 0x1 /* AUTO_NEG Vendor Tx LSC */
#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */
#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */
#define IXGBE_MDIO_AUTO_NEG_EEE_ADVT 0x3C /* AUTO_NEG EEE Advt Reg */
@@ -1388,11 +1481,35 @@ struct ixgbe_dmac_config {
#define IXGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */
#define IXGBE_MDIO_PHY_100BASETX_ABILITY 0x0080 /* 100BaseTX capable */
#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */
-
+#define IXGBE_AUTO_NEG_LP_STATUS 0xE820 /* AUTO NEG Rx LP Status Reg */
+#define IXGBE_AUTO_NEG_LP_1000BASE_CAP 0x8000 /* AUTO NEG Rx LP 1000BaseT Cap */
+#define IXGBE_AUTO_NEG_LP_10GBASE_CAP 0x0800 /* AUTO NEG Rx LP 10GBaseT Cap */
+#define IXGBE_AUTO_NEG_10GBASET_STAT 0x0021 /* AUTO NEG 10G BaseT Stat */
+
+#define IXGBE_MDIO_TX_VENDOR_ALARMS_3 0xCC02 /* Vendor Alarms 3 Reg */
+#define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */
+#define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */
+#define IXGBE_MDIO_POWER_UP_STALL 0x8000 /* Power Up Stall */
+#define IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK 0xFF00 /* int std mask */
+#define IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG 0xFC00 /* chip std int flag */
+#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK 0xFF01 /* int chip-wide mask */
+#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG 0xFC01 /* int chip-wide mask */
+#define IXGBE_MDIO_GLOBAL_ALARM_1 0xCC00 /* Global alarm 1 */
+#define IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL 0x4000 /* high temp failure */
+#define IXGBE_MDIO_GLOBAL_INT_MASK 0xD400 /* Global int mask */
+#define IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN 0x1000 /* autoneg vendor alarm int enable */
+#define IXGBE_MDIO_GLOBAL_ALARM_1_INT 0x4 /* int in Global alarm 1 */
+#define IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN 0x1 /* vendor alarm int enable */
+#define IXGBE_MDIO_GLOBAL_STD_ALM2_INT 0x200 /* vendor alarm2 int mask */
+#define IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN 0x4000 /* int high temp enable */
#define IXGBE_MDIO_PMA_PMD_CONTROL_ADDR 0x0000 /* PMA/PMD Control Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */
+#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK 0xD401 /* PHY TX Vendor LASI */
+#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN 0x1 /* PHY TX Vendor LASI enable */
+#define IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR 0x9 /* Standard Transmit Dis Reg */
+#define IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE 0x0001 /* PMD Global Transmit Dis */
#define IXGBE_PCRC8ECL 0x0E810 /* PCR CRC-8 Error Count Lo */
#define IXGBE_PCRC8ECH 0x0E811 /* PCR CRC-8 Error Count Hi */
@@ -1403,6 +1520,24 @@ struct ixgbe_dmac_config {
/* MII clause 22/28 definitions */
#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800
+#define IXGBE_MDIO_XENPAK_LASI_STATUS 0x9005 /* XENPAK LASI Status register*/
+#define IXGBE_XENPAK_LASI_LINK_STATUS_ALARM 0x1 /* Link Status Alarm change */
+
+#define IXGBE_MDIO_AUTO_NEG_LINK_STATUS 0x4 /* Indicates if link is up */
+
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK 0x7 /* Speed/Duplex Mask */
+#define IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK 0x6 /* Speed Mask */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_HALF 0x0 /* 10Mb/s Half Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_FULL 0x1 /* 10Mb/s Full Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_HALF 0x2 /* 100Mb/s Half Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_FULL 0x3 /* 100Mb/s Full Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_HALF 0x4 /* 1Gb/s Half Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL 0x5 /* 1Gb/s Full Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_HALF 0x6 /* 10Gb/s Half Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL 0x7 /* 10Gb/s Full Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB 0x4 /* 1Gb/s */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB 0x6 /* 10Gb/s */
+
#define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG 0x20 /* 10G Control Reg */
#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */
#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */
@@ -1410,6 +1545,8 @@ struct ixgbe_dmac_config {
#define IXGBE_MII_10GBASE_T_ADVERTISE 0x1000 /* full duplex, bit:12*/
#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/
#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/
+#define IXGBE_MII_2_5GBASE_T_ADVERTISE 0x0400
+#define IXGBE_MII_5GBASE_T_ADVERTISE 0x0800
#define IXGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */
#define IXGBE_MII_100BASE_T_ADVERTISE_HALF 0x0080 /* half duplex, bit:7 */
#define IXGBE_MII_RESTART 0x200
@@ -1424,7 +1561,10 @@ struct ixgbe_dmac_config {
#define TN1010_PHY_ID 0x00A19410
#define TNX_FW_REV 0xB
#define X540_PHY_ID 0x01540200
-#define X550_PHY_ID 0x01540220
+#define X550_PHY_ID1 0x01540220
+#define X550_PHY_ID2 0x01540223
+#define X550_PHY_ID3 0x01540221
+#define X557_PHY_ID 0x01540240
#define AQ_FW_REV 0x20
#define QT2022_PHY_ID 0x0043A400
#define ATH_PHY_ID 0x03429050
@@ -1448,6 +1588,19 @@ struct ixgbe_dmac_config {
#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */
#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */
#define IXGBE_SDP2_GPIEN 0x00000004 /* SDP2 */
+#define IXGBE_SDP0_GPIEN_X540 0x00000002 /* SDP0 on X540 and X550 */
+#define IXGBE_SDP1_GPIEN_X540 0x00000004 /* SDP1 on X540 and X550 */
+#define IXGBE_SDP2_GPIEN_X540 0x00000008 /* SDP2 on X540 and X550 */
+#define IXGBE_SDP0_GPIEN_X550 IXGBE_SDP0_GPIEN_X540
+#define IXGBE_SDP1_GPIEN_X550 IXGBE_SDP1_GPIEN_X540
+#define IXGBE_SDP2_GPIEN_X550 IXGBE_SDP2_GPIEN_X540
+#define IXGBE_SDP0_GPIEN_X550EM_x IXGBE_SDP0_GPIEN_X540
+#define IXGBE_SDP1_GPIEN_X550EM_x IXGBE_SDP1_GPIEN_X540
+#define IXGBE_SDP2_GPIEN_X550EM_x IXGBE_SDP2_GPIEN_X540
+#define IXGBE_SDP0_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP0_GPIEN)
+#define IXGBE_SDP1_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP1_GPIEN)
+#define IXGBE_SDP2_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP2_GPIEN)
+
#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */
#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */
#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */
@@ -1625,6 +1778,19 @@ enum {
#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */
#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */
#define IXGBE_EICR_ECC 0x10000000 /* ECC Error */
+#define IXGBE_EICR_GPI_SDP0_X540 0x02000000 /* Gen Purpose Interrupt on SDP0 */
+#define IXGBE_EICR_GPI_SDP1_X540 0x04000000 /* Gen Purpose Interrupt on SDP1 */
+#define IXGBE_EICR_GPI_SDP2_X540 0x08000000 /* Gen Purpose Interrupt on SDP2 */
+#define IXGBE_EICR_GPI_SDP0_X550 IXGBE_EICR_GPI_SDP0_X540
+#define IXGBE_EICR_GPI_SDP1_X550 IXGBE_EICR_GPI_SDP1_X540
+#define IXGBE_EICR_GPI_SDP2_X550 IXGBE_EICR_GPI_SDP2_X540
+#define IXGBE_EICR_GPI_SDP0_X550EM_x IXGBE_EICR_GPI_SDP0_X540
+#define IXGBE_EICR_GPI_SDP1_X550EM_x IXGBE_EICR_GPI_SDP1_X540
+#define IXGBE_EICR_GPI_SDP2_X550EM_x IXGBE_EICR_GPI_SDP2_X540
+#define IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP0)
+#define IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP1)
+#define IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP2)
+
#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */
#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */
#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */
@@ -1643,6 +1809,9 @@ enum {
#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
#define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */
+#define IXGBE_EICS_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw)
+#define IXGBE_EICS_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw)
+#define IXGBE_EICS_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw)
#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */
#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
@@ -1662,6 +1831,9 @@ enum {
#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
#define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */
+#define IXGBE_EIMS_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw)
+#define IXGBE_EIMS_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw)
+#define IXGBE_EIMS_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw)
#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */
#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
@@ -1680,6 +1852,9 @@ enum {
#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
#define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */
+#define IXGBE_EIMC_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw)
+#define IXGBE_EIMC_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw)
+#define IXGBE_EIMC_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw)
#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */
#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
@@ -1843,6 +2018,9 @@ enum {
#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i)
#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i)
#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i)
+#define IXGBE_X557_LED_MANUAL_SET_MASK (1 << 8)
+#define IXGBE_X557_MAX_LED_INDEX 3
+#define IXGBE_X557_LED_PROVISIONING 0xC430
/* LED modes */
#define IXGBE_LED_LINK_UP 0x0
@@ -1975,12 +2153,13 @@ enum {
#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */
/* SW_FW_SYNC/GSSR definitions */
-#define IXGBE_GSSR_EEP_SM 0x0001
-#define IXGBE_GSSR_PHY0_SM 0x0002
-#define IXGBE_GSSR_PHY1_SM 0x0004
-#define IXGBE_GSSR_MAC_CSR_SM 0x0008
-#define IXGBE_GSSR_FLASH_SM 0x0010
-#define IXGBE_GSSR_SW_MNG_SM 0x0400
+#define IXGBE_GSSR_EEP_SM 0x0001
+#define IXGBE_GSSR_PHY0_SM 0x0002
+#define IXGBE_GSSR_PHY1_SM 0x0004
+#define IXGBE_GSSR_MAC_CSR_SM 0x0008
+#define IXGBE_GSSR_FLASH_SM 0x0010
+#define IXGBE_GSSR_NVM_UPDATE_SM 0x0200
+#define IXGBE_GSSR_SW_MNG_SM 0x0400
#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys and both I2Cs */
#define IXGBE_GSSR_I2C_MASK 0x1800
#define IXGBE_GSSR_NVM_PHY_MASK 0xF
@@ -2101,6 +2280,11 @@ enum {
#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for wr complete */
#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for rd complete */
+#define NVM_INIT_CTRL_3 0x38
+#define NVM_INIT_CTRL_3_LPLU 0x8
+#define NVM_INIT_CTRL_3_D10GMP_PORT0 0x40
+#define NVM_INIT_CTRL_3_D10GMP_PORT1 0x100
+
#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
#define IXGBE_EEPROM_PAGE_SIZE_MAX 128
@@ -2410,7 +2594,7 @@ enum {
#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */
#define IXGBE_RXDADV_ERR_OUTERIPER 0x04000000 /* CRC IP Header error */
#define IXGBE_RXDADV_ERR_RXE 0x20000000 /* Any MAC Error */
-#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCoEFe/IPE */
+#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCEOFe/IPE */
#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */
#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */
#define IXGBE_RXDADV_ERR_FDIR_DROP 0x00200000 /* FDIR Drop error */
@@ -2731,6 +2915,10 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */
#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */
+#define IXGBE_HI_FLASH_ERASE_TIMEOUT 1000 /* Process Erase command limit */
+#define IXGBE_HI_FLASH_UPDATE_TIMEOUT 5000 /* Process Update command limit */
+#define IXGBE_HI_FLASH_APPLY_TIMEOUT 0 /* Process Apply command limit */
+#define IXGBE_HI_PHY_MGMT_REQ_TIMEOUT 2000 /* Wait up to 2 seconds */
/* CEM Support */
#define FW_CEM_HDR_LEN 0x4
@@ -2751,6 +2939,7 @@ enum ixgbe_fdir_pballoc_type {
#define FW_MAX_READ_BUFFER_SIZE 1024
#define FW_DISABLE_RXEN_CMD 0xDE
#define FW_DISABLE_RXEN_LEN 0x1
+#define FW_PHY_MGMT_REQ_CMD 0x20
/* Host Interface Command Structures */
struct ixgbe_hic_hdr {
@@ -2763,13 +2952,25 @@ struct ixgbe_hic_hdr {
u8 checksum;
};
-struct ixgbe_hic_hdr2 {
+struct ixgbe_hic_hdr2_req {
u8 cmd;
- u8 buf_len1;
- u8 buf_len2;
+ u8 buf_lenh;
+ u8 buf_lenl;
u8 checksum;
};
+struct ixgbe_hic_hdr2_rsp {
+ u8 cmd;
+ u8 buf_lenl;
+ u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */
+ u8 checksum;
+};
+
+union ixgbe_hic_hdr2 {
+ struct ixgbe_hic_hdr2_req req;
+ struct ixgbe_hic_hdr2_rsp rsp;
+};
+
struct ixgbe_hic_drv_info {
struct ixgbe_hic_hdr hdr;
u8 port_num;
@@ -2783,7 +2984,7 @@ struct ixgbe_hic_drv_info {
/* These need to be dword aligned */
struct ixgbe_hic_read_shadow_ram {
- struct ixgbe_hic_hdr2 hdr;
+ union ixgbe_hic_hdr2 hdr;
u32 address;
u16 length;
u16 pad2;
@@ -2792,7 +2993,7 @@ struct ixgbe_hic_read_shadow_ram {
};
struct ixgbe_hic_write_shadow_ram {
- struct ixgbe_hic_hdr2 hdr;
+ union ixgbe_hic_hdr2 hdr;
u32 address;
u16 length;
u16 pad2;
@@ -2962,7 +3163,8 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_LINK_SPEED_UNKNOWN 0
#define IXGBE_LINK_SPEED_100_FULL 0x0008
#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
-#define IXGBE_LINK_SPEED_2_5GB_FULL 0x0040
+#define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400
+#define IXGBE_LINK_SPEED_5GB_FULL 0x0800
#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
IXGBE_LINK_SPEED_10GB_FULL)
@@ -3134,6 +3336,37 @@ union ixgbe_atr_hash_dword {
};
+#define IXGBE_MVALS_INIT(m) \
+ IXGBE_CAT(EEC, m), \
+ IXGBE_CAT(FLA, m), \
+ IXGBE_CAT(GRC, m), \
+ IXGBE_CAT(SRAMREL, m), \
+ IXGBE_CAT(FACTPS, m), \
+ IXGBE_CAT(SWSM, m), \
+ IXGBE_CAT(SWFW_SYNC, m), \
+ IXGBE_CAT(FWSM, m), \
+ IXGBE_CAT(SDP0_GPIEN, m), \
+ IXGBE_CAT(SDP1_GPIEN, m), \
+ IXGBE_CAT(SDP2_GPIEN, m), \
+ IXGBE_CAT(EICR_GPI_SDP0, m), \
+ IXGBE_CAT(EICR_GPI_SDP1, m), \
+ IXGBE_CAT(EICR_GPI_SDP2, m), \
+ IXGBE_CAT(CIAA, m), \
+ IXGBE_CAT(CIAD, m), \
+ IXGBE_CAT(I2C_CLK_IN, m), \
+ IXGBE_CAT(I2C_CLK_OUT, m), \
+ IXGBE_CAT(I2C_DATA_IN, m), \
+ IXGBE_CAT(I2C_DATA_OUT, m), \
+ IXGBE_CAT(I2C_DATA_OE_N_EN, m), \
+ IXGBE_CAT(I2C_BB_EN, m), \
+ IXGBE_CAT(I2C_CLK_OE_N_EN, m), \
+ IXGBE_CAT(I2CCTL, m)
+
+enum ixgbe_mvals {
+ IXGBE_MVALS_INIT(_IDX),
+ IXGBE_MVALS_IDX_LIMIT
+};
+
/*
* Unavailable: The FCoE Boot Option ROM is not present in the flash.
* Disabled: Present; boot order is not set for any targets on the port.
@@ -3159,11 +3392,6 @@ enum ixgbe_mac_type {
ixgbe_mac_82599_vf,
ixgbe_mac_X540,
ixgbe_mac_X540_vf,
- /*
- * X550EM MAC type decoder:
- * ixgbe_mac_X550EM_x: "x" = Xeon
- * ixgbe_mac_X550EM_a: "a" = Atom
- */
ixgbe_mac_X550,
ixgbe_mac_X550EM_x,
ixgbe_mac_X550_vf,
@@ -3178,6 +3406,7 @@ enum ixgbe_phy_type {
ixgbe_phy_aq,
ixgbe_phy_x550em_kr,
ixgbe_phy_x550em_kx4,
+ ixgbe_phy_x550em_ext_t,
ixgbe_phy_cu_unknown,
ixgbe_phy_qt,
ixgbe_phy_xaui,
@@ -3265,6 +3494,7 @@ enum ixgbe_bus_type {
ixgbe_bus_type_pci,
ixgbe_bus_type_pcix,
ixgbe_bus_type_pci_express,
+ ixgbe_bus_type_internal,
ixgbe_bus_type_reserved
};
@@ -3457,9 +3687,11 @@ struct ixgbe_mac_operations {
void (*enable_tx_laser)(struct ixgbe_hw *);
void (*flap_tx_laser)(struct ixgbe_hw *);
s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
+ s32 (*setup_mac_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
bool *);
+ void (*set_rate_select_speed)(struct ixgbe_hw *, ixgbe_link_speed);
/* Packet Buffer manipulation */
void (*setup_rxpba)(struct ixgbe_hw *, int, u32, int);
@@ -3494,21 +3726,22 @@ struct ixgbe_mac_operations {
/* Flow Control */
s32 (*fc_enable)(struct ixgbe_hw *);
+ s32 (*setup_fc)(struct ixgbe_hw *);
/* Manageability interface */
s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
- s32 (*dmac_config)(struct ixgbe_hw *hw);
- s32 (*dmac_update_tcs)(struct ixgbe_hw *hw);
- s32 (*dmac_config_tcs)(struct ixgbe_hw *hw);
void (*get_rtrup2tc)(struct ixgbe_hw *hw, u8 *map);
- s32 (*setup_eee)(struct ixgbe_hw *hw, bool enable_eee);
- void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int);
- void (*set_source_address_pruning)(struct ixgbe_hw *, bool,
- unsigned int);
void (*disable_rx)(struct ixgbe_hw *hw);
void (*enable_rx)(struct ixgbe_hw *hw);
+ void (*set_source_address_pruning)(struct ixgbe_hw *, bool,
+ unsigned int);
+ void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int);
+ s32 (*dmac_update_tcs)(struct ixgbe_hw *hw);
+ s32 (*dmac_config_tcs)(struct ixgbe_hw *hw);
+ s32 (*dmac_config)(struct ixgbe_hw *hw);
+ s32 (*setup_eee)(struct ixgbe_hw *hw, bool enable_eee);
s32 (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *);
s32 (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32);
void (*disable_mdd)(struct ixgbe_hw *hw);
@@ -3527,6 +3760,7 @@ struct ixgbe_phy_operations {
s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *);
s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16);
s32 (*setup_link)(struct ixgbe_hw *);
+ s32 (*setup_internal_link)(struct ixgbe_hw *);
s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
@@ -3539,6 +3773,17 @@ struct ixgbe_phy_operations {
s32 (*read_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val);
s32 (*write_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val);
s32 (*check_overtemp)(struct ixgbe_hw *);
+ s32 (*set_phy_power)(struct ixgbe_hw *, bool on);
+ s32 (*enter_lplu)(struct ixgbe_hw *);
+ s32 (*handle_lasi)(struct ixgbe_hw *hw);
+ s32 (*read_i2c_combined_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
+ u16 *value);
+ s32 (*write_i2c_combined_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
+ u16 value);
+ s32 (*read_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
+ u8 *value);
+ s32 (*write_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
+ u8 value);
};
struct ixgbe_eeprom_info {
@@ -3548,6 +3793,7 @@ struct ixgbe_eeprom_info {
u16 word_size;
u16 address_bits;
u16 word_page_size;
+ u16 ctrl_word_3;
};
#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
@@ -3584,6 +3830,7 @@ struct ixgbe_mac_info {
bool thermal_sensor_enabled;
struct ixgbe_dmac_config dmac_config;
bool set_lben;
+ u32 max_link_up_time;
};
struct ixgbe_phy_info {
@@ -3596,7 +3843,6 @@ struct ixgbe_phy_info {
u32 revision;
enum ixgbe_media_type media_type;
u32 phy_semaphore_mask;
- u8 lan_id;
bool reset_disable;
ixgbe_autoneg_advertised autoneg_advertised;
enum ixgbe_smart_speed smart_speed;
@@ -3604,6 +3850,7 @@ struct ixgbe_phy_info {
bool multispeed_fiber;
bool reset_if_overtemp;
bool qsfp_shared_i2c_bus;
+ u32 nw_mng_if_sel;
};
#include "ixgbe_mbx.h"
@@ -3638,7 +3885,7 @@ struct ixgbe_mbx_info {
};
struct ixgbe_hw {
- u8 *hw_addr;
+ u8 IOMEM *hw_addr;
void *back;
struct ixgbe_mac_info mac;
struct ixgbe_addr_filter_info addr_ctrl;
@@ -3647,6 +3894,7 @@ struct ixgbe_hw {
struct ixgbe_eeprom_info eeprom;
struct ixgbe_bus_info bus;
struct ixgbe_mbx_info mbx;
+ const u32 *mvals;
u16 device_id;
u16 vendor_id;
u16 subsystem_device_id;
@@ -3705,14 +3953,19 @@ struct ixgbe_hw {
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
-#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P == 0) ? (0x4010) : (0x8010))
-#define IXGBE_KRM_LINK_CTRL_1(P) ((P == 0) ? (0x420C) : (0x820C))
-#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P == 0) ? (0x4634) : (0x8634))
-#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P == 0) ? (0x4638) : (0x8638))
-#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P == 0) ? (0x4B00) : (0x8B00))
-#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P == 0) ? (0x4E00) : (0x8E00))
-#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P == 0) ? (0x5520) : (0x9520))
-#define IXGBE_KRM_RX_ANA_CTL(P) ((P == 0) ? (0x5A00) : (0x9A00))
+#define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4))
+#define IXGBE_FUSES0_300MHZ (1 << 5)
+#define IXGBE_FUSES0_REV1 (1 << 6)
+
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010)
+#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C)
+#define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C)
+#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634)
+#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638)
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00)
+#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P) ? 0x8E00 : 0x4E00)
+#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520)
+#define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00)
#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9)
#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11)
@@ -3729,6 +3982,9 @@ struct ixgbe_hw {
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31)
+#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE (1 << 28)
+#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE (1 << 29)
+
#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6)
#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15)
#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16)
@@ -3743,6 +3999,15 @@ struct ixgbe_hw {
#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN (1 << 3)
#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN (1 << 31)
+#define IXGBE_KX4_LINK_CNTL_1 0x4C
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX (1 << 16)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 (1 << 17)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX (1 << 24)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 (1 << 25)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE (1 << 29)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP (1 << 30)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART (1 << 31)
+
#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144
#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148
@@ -3762,4 +4027,7 @@ struct ixgbe_hw {
#define IXGBE_SB_IOSF_TARGET_KX4_PHY 1
#define IXGBE_SB_IOSF_TARGET_KX4_PCS 2
+#define IXGBE_NW_MNG_IF_SEL 0x00011178
+#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE (1 << 24)
+
#endif /* _IXGBE_TYPE_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_vf.c b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_vf.c
index e6b6c517..50f5e54d 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_vf.c
+++ b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_vf.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -35,7 +35,6 @@ POSSIBILITY OF SUCH DAMAGE.
#include "ixgbe_api.h"
#include "ixgbe_type.h"
#include "ixgbe_vf.h"
-#ident "$Id: ixgbe_vf.c,v 1.62 2013/06/27 21:30:59 jtkirshe Exp $"
#ifndef IXGBE_VFWRITE_REG
#define IXGBE_VFWRITE_REG IXGBE_WRITE_REG
@@ -722,4 +721,3 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
return err;
}
-
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_vf.h b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_vf.h
index 3c1c1689..411152a4 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_vf.h
+++ b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_vf.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -33,7 +33,6 @@ POSSIBILITY OF SUCH DAMAGE.
#ifndef __IXGBE_VF_H__
#define __IXGBE_VF_H__
-#ident "$Id: ixgbe_vf.h,v 1.37 2013/11/07 08:18:53 jtkirshe Exp $"
#define IXGBE_VF_IRQ_CLEAR_MASK 7
#define IXGBE_VF_MAX_TX_QUEUES 8
@@ -138,8 +137,4 @@ int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
unsigned int *default_tc);
-#ifdef IXGBEVF_OSDEP2
-#include "ixgbevf_osdep2.h"
-
-#endif /* IXGBEVF_OSDEP2 */
#endif /* __IXGBE_VF_H__ */
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_x540.c b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_x540.c
index ab384502..48917029 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_x540.c
+++ b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_x540.c
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -69,57 +69,59 @@ s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
/* EEPROM */
- eeprom->ops.init_params = &ixgbe_init_eeprom_params_X540;
- eeprom->ops.read = &ixgbe_read_eerd_X540;
- eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_X540;
- eeprom->ops.write = &ixgbe_write_eewr_X540;
- eeprom->ops.write_buffer = &ixgbe_write_eewr_buffer_X540;
- eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_X540;
- eeprom->ops.validate_checksum = &ixgbe_validate_eeprom_checksum_X540;
- eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_X540;
+ eeprom->ops.init_params = ixgbe_init_eeprom_params_X540;
+ eeprom->ops.read = ixgbe_read_eerd_X540;
+ eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_X540;
+ eeprom->ops.write = ixgbe_write_eewr_X540;
+ eeprom->ops.write_buffer = ixgbe_write_eewr_buffer_X540;
+ eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X540;
+ eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X540;
+ eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X540;
/* PHY */
- phy->ops.init = &ixgbe_init_phy_ops_generic;
+ phy->ops.init = ixgbe_init_phy_ops_generic;
phy->ops.reset = NULL;
+ if (!ixgbe_mng_present(hw))
+ phy->ops.set_phy_power = ixgbe_set_copper_phy_power;
/* MAC */
- mac->ops.reset_hw = &ixgbe_reset_hw_X540;
- mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2;
- mac->ops.get_media_type = &ixgbe_get_media_type_X540;
+ mac->ops.reset_hw = ixgbe_reset_hw_X540;
+ mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2;
+ mac->ops.get_media_type = ixgbe_get_media_type_X540;
mac->ops.get_supported_physical_layer =
- &ixgbe_get_supported_physical_layer_X540;
+ ixgbe_get_supported_physical_layer_X540;
mac->ops.read_analog_reg8 = NULL;
mac->ops.write_analog_reg8 = NULL;
- mac->ops.start_hw = &ixgbe_start_hw_X540;
- mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
- mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
- mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
- mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
- mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
- mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540;
- mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync_X540;
- mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
- mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
+ mac->ops.start_hw = ixgbe_start_hw_X540;
+ mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic;
+ mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic;
+ mac->ops.get_device_caps = ixgbe_get_device_caps_generic;
+ mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic;
+ mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic;
+ mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540;
+ mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X540;
+ mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic;
+ mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic;
/* RAR, Multicast, VLAN */
- mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
- mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
- mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
- mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
+ mac->ops.set_vmdq = ixgbe_set_vmdq_generic;
+ mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic;
+ mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic;
+ mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic;
mac->rar_highwater = 1;
- mac->ops.set_vfta = &ixgbe_set_vfta_generic;
- mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
- mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
- mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
- mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
- mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
+ mac->ops.set_vfta = ixgbe_set_vfta_generic;
+ mac->ops.set_vlvf = ixgbe_set_vlvf_generic;
+ mac->ops.clear_vfta = ixgbe_clear_vfta_generic;
+ mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic;
+ mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing;
+ mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing;
/* Link */
mac->ops.get_link_capabilities =
- &ixgbe_get_copper_link_capabilities_generic;
- mac->ops.setup_link = &ixgbe_setup_mac_link_X540;
- mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
- mac->ops.check_link = &ixgbe_check_mac_link_generic;
+ ixgbe_get_copper_link_capabilities_generic;
+ mac->ops.setup_link = ixgbe_setup_mac_link_X540;
+ mac->ops.setup_rxpba = ixgbe_set_rxpba_generic;
+ mac->ops.check_link = ixgbe_check_mac_link_generic;
mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
@@ -145,9 +147,9 @@ s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
mac->ops.blink_led_stop = ixgbe_blink_led_stop_X540;
/* Manageability interface */
- mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
+ mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic;
- mac->ops.get_rtrup2tc = &ixgbe_dcb_get_rtrup2tc_generic;
+ mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
return ret_val;
}
@@ -737,26 +739,6 @@ STATIC s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
}
/**
- * ixgbe_set_mux - Set mux for port 1 access with CS4227
- * @hw: pointer to hardware structure
- * @state: set mux if 1, clear if 0
- */
-STATIC void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state)
-{
- u32 esdp;
-
- if (!hw->phy.lan_id)
- return;
- esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
- if (state)
- esdp |= IXGBE_ESDP_SDP1;
- else
- esdp &= ~IXGBE_ESDP_SDP1;
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
- IXGBE_WRITE_FLUSH(hw);
-}
-
-/**
* ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore
* @hw: pointer to hardware structure
* @mask: Mask to specify which semaphore to acquire
@@ -798,8 +780,6 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
msec_delay(5);
- if (swi2c_mask)
- ixgbe_set_mux(hw, 1);
return IXGBE_SUCCESS;
}
/* Firmware currently using resource (fwmask), hardware
@@ -830,8 +810,6 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
msec_delay(5);
- if (swi2c_mask)
- ixgbe_set_mux(hw, 1);
return IXGBE_SUCCESS;
}
/* If the resource is not released by other SW the SW can assume that
@@ -869,10 +847,8 @@ void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
DEBUGFUNC("ixgbe_release_swfw_sync_X540");
- if (mask & IXGBE_GSSR_I2C_MASK) {
+ if (mask & IXGBE_GSSR_I2C_MASK)
swmask |= mask & IXGBE_GSSR_I2C_MASK;
- ixgbe_set_mux(hw, 0);
- }
ixgbe_get_swfw_sync_semaphore(hw);
swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
@@ -955,14 +931,14 @@ STATIC void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
/* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
- swsm &= ~IXGBE_SWSM_SMBI;
- IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
-
swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
swsm &= ~IXGBE_SWFW_REGSMP;
IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm &= ~IXGBE_SWSM_SMBI;
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+
IXGBE_WRITE_FLUSH(hw);
}
@@ -1034,5 +1010,3 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
return IXGBE_SUCCESS;
}
-
-
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_x540.h b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_x540.h
index 338c0e6e..42c08a82 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_x540.h
+++ b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_x540.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -35,7 +35,6 @@ POSSIBILITY OF SUCH DAMAGE.
#define _IXGBE_X540_H_
#include "ixgbe_type.h"
-#ident "$Id: ixgbe_x540.h,v 1.11 2013/10/11 08:36:03 jtkirshe Exp $"
s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
ixgbe_link_speed *speed, bool *autoneg);
diff --git a/src/dpdk22/drivers/net/ixgbe/base/ixgbe_x550.c b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_x550.c
new file mode 100644
index 00000000..48077aec
--- /dev/null
+++ b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_x550.c
@@ -0,0 +1,3209 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "ixgbe_x550.h"
+#include "ixgbe_x540.h"
+#include "ixgbe_type.h"
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed);
+
+/**
+ * ixgbe_init_ops_X550 - Inits func ptrs and MAC type
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the function pointers and assign the MAC type for X550.
+ * Does not touch the hardware.
+ **/
+s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ s32 ret_val;
+
+ DEBUGFUNC("ixgbe_init_ops_X550");
+
+ ret_val = ixgbe_init_ops_X540(hw);
+ mac->ops.dmac_config = ixgbe_dmac_config_X550;
+ mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550;
+ mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550;
+ mac->ops.setup_eee = ixgbe_setup_eee_X550;
+ mac->ops.set_source_address_pruning =
+ ixgbe_set_source_address_pruning_X550;
+ mac->ops.set_ethertype_anti_spoofing =
+ ixgbe_set_ethertype_anti_spoofing_X550;
+
+ mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
+ eeprom->ops.init_params = ixgbe_init_eeprom_params_X550;
+ eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
+ eeprom->ops.read = ixgbe_read_ee_hostif_X550;
+ eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
+ eeprom->ops.write = ixgbe_write_ee_hostif_X550;
+ eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
+ eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
+ eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
+
+ mac->ops.disable_mdd = ixgbe_disable_mdd_X550;
+ mac->ops.enable_mdd = ixgbe_enable_mdd_X550;
+ mac->ops.mdd_event = ixgbe_mdd_event_X550;
+ mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550;
+ mac->ops.disable_rx = ixgbe_disable_rx_x550;
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
+ hw->mac.ops.led_on = ixgbe_led_on_t_X550em;
+ hw->mac.ops.led_off = ixgbe_led_off_t_X550em;
+ }
+ return ret_val;
+}
+
+/**
+ * ixgbe_read_cs4227 - Read CS4227 register
+ * @hw: pointer to hardware structure
+ * @reg: register number to write
+ * @value: pointer to receive value read
+ *
+ * Returns status code
+ **/
+STATIC s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
+{
+ return ixgbe_read_i2c_combined_unlocked(hw, IXGBE_CS4227, reg, value);
+}
+
+/**
+ * ixgbe_write_cs4227 - Write CS4227 register
+ * @hw: pointer to hardware structure
+ * @reg: register number to write
+ * @value: value to write to register
+ *
+ * Returns status code
+ **/
+STATIC s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
+{
+ return ixgbe_write_i2c_combined_unlocked(hw, IXGBE_CS4227, reg, value);
+}
+
+/**
+ * ixgbe_get_cs4227_status - Return CS4227 status
+ * @hw: pointer to hardware structure
+ *
+ * Performs a diagnostic on the CS4227 chip. Returns an error if it is
+ * not operating correctly.
+ * This function assumes that the caller has acquired the proper semaphore.
+ **/
+STATIC s32 ixgbe_get_cs4227_status(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u16 value = 0;
+ u16 reg_slice, reg_val;
+ u8 retry;
+
+ /* Check register reads. */
+ for (retry = 0; retry < IXGBE_CS4227_RETRIES; ++retry) {
+ status = ixgbe_read_cs4227(hw, IXGBE_CS4227_GLOBAL_ID_LSB,
+ &value);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ if (value == IXGBE_CS4227_GLOBAL_ID_VALUE)
+ break;
+ msec_delay(IXGBE_CS4227_CHECK_DELAY);
+ }
+ if (value != IXGBE_CS4227_GLOBAL_ID_VALUE)
+ return IXGBE_ERR_PHY;
+
+ status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* If this is the first time after power-on, check the ucode.
+ * Otherwise, this will disrupt link on all ports. Because we
+ * can only do this the first time, we must check all ports,
+ * not just our own.
+ * While we are at it, set the LINE side to 10G SR, which is
+ * what it needs to be regardless of the actual link.
+ */
+ if (value != IXGBE_CS4227_SCRATCH_VALUE) {
+ reg_slice = IXGBE_CS4227_LINE_SPARE22_MSB;
+ reg_val = IXGBE_CS4227_SPEED_10G;
+ status = ixgbe_write_cs4227(hw, reg_slice, reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB;
+ reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
+ status = ixgbe_write_cs4227(hw, reg_slice, reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ reg_slice = IXGBE_CS4227_HOST_SPARE24_LSB;
+ reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
+ status = ixgbe_write_cs4227(hw, reg_slice, reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ reg_slice = IXGBE_CS4227_LINE_SPARE22_MSB + (1 << 12);
+ reg_val = IXGBE_CS4227_SPEED_10G;
+ status = ixgbe_write_cs4227(hw, reg_slice, reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + (1 << 12);
+ reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
+ status = ixgbe_write_cs4227(hw, reg_slice, reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ reg_slice = IXGBE_CS4227_HOST_SPARE24_LSB + (1 << 12);
+ reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
+ status = ixgbe_write_cs4227(hw, reg_slice, reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ msec_delay(10);
+ }
+
+ /* Verify that the ucode is operational on all ports. */
+ reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB;
+ reg_val = 0xFFFF;
+ status = ixgbe_read_cs4227(hw, reg_slice, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ if (reg_val != 0)
+ return IXGBE_ERR_PHY;
+
+ reg_slice = IXGBE_CS4227_HOST_SPARE24_LSB;
+ reg_val = 0xFFFF;
+ status = ixgbe_read_cs4227(hw, reg_slice, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ if (reg_val != 0)
+ return IXGBE_ERR_PHY;
+
+ reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + (1 << 12);
+ reg_val = 0xFFFF;
+ status = ixgbe_read_cs4227(hw, reg_slice, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ if (reg_val != 0)
+ return IXGBE_ERR_PHY;
+
+ reg_slice = IXGBE_CS4227_HOST_SPARE24_LSB + (1 << 12);
+ reg_val = 0xFFFF;
+ status = ixgbe_read_cs4227(hw, reg_slice, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ if (reg_val != 0)
+ return IXGBE_ERR_PHY;
+
+ /* Set scratch indicating that the diagnostic was successful. */
+ status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
+ IXGBE_CS4227_SCRATCH_VALUE);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ if (value != IXGBE_CS4227_SCRATCH_VALUE)
+ return IXGBE_ERR_PHY;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_pe - Read register from port expander
+ * @hw: pointer to hardware structure
+ * @reg: register number to read
+ * @value: pointer to receive read value
+ *
+ * Returns status code
+ **/
+STATIC s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
+{
+ s32 status;
+
+ status = ixgbe_read_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
+ if (status != IXGBE_SUCCESS)
+ ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+ "port expander access failed with %d\n", status);
+ return status;
+}
+
+/**
+ * ixgbe_write_pe - Write register to port expander
+ * @hw: pointer to hardware structure
+ * @reg: register number to write
+ * @value: value to write
+ *
+ * Returns status code
+ **/
+STATIC s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
+{
+ s32 status;
+
+ status = ixgbe_write_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
+ if (status != IXGBE_SUCCESS)
+ ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+ "port expander access failed with %d\n", status);
+ return status;
+}
+
+/**
+ * ixgbe_reset_cs4227 - Reset CS4227 using port expander
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ **/
+STATIC s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u8 reg;
+
+ status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, &reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg |= IXGBE_PE_BIT1;
+ status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, &reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg &= ~IXGBE_PE_BIT1;
+ status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, &reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg &= ~IXGBE_PE_BIT1;
+ status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ usec_delay(IXGBE_CS4227_RESET_HOLD);
+
+ status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, &reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg |= IXGBE_PE_BIT1;
+ status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ msec_delay(IXGBE_CS4227_RESET_DELAY);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_check_cs4227 - Check CS4227 and reset as needed
+ * @hw: pointer to hardware structure
+ **/
+STATIC void ixgbe_check_cs4227(struct ixgbe_hw *hw)
+{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ s32 status;
+ u8 retry;
+
+ for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status != IXGBE_SUCCESS) {
+ ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+ "semaphore failed with %d\n", status);
+ return;
+ }
+ status = ixgbe_get_cs4227_status(hw);
+ if (status == IXGBE_SUCCESS) {
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ msec_delay(hw->eeprom.semaphore_delay);
+ return;
+ }
+ ixgbe_reset_cs4227(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ msec_delay(hw->eeprom.semaphore_delay);
+ }
+ ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+ "Unable to initialize CS4227, err=%d\n", status);
+}
+
+/**
+ * ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control
+ * @hw: pointer to hardware structure
+ **/
+STATIC void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
+{
+ u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+ if (hw->bus.lan_id) {
+ esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
+ esdp |= IXGBE_ESDP_SDP1_DIR;
+ }
+ esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_identify_phy_x550em - Get PHY type based on device id
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
+{
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_X_SFP:
+ /* set up for CS4227 usage */
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
+ ixgbe_setup_mux_ctl(hw);
+ ixgbe_check_cs4227(hw);
+
+ return ixgbe_identify_module_generic(hw);
+ break;
+ case IXGBE_DEV_ID_X550EM_X_KX4:
+ hw->phy.type = ixgbe_phy_x550em_kx4;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_KR:
+ hw->phy.type = ixgbe_phy_x550em_kr;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_1G_T:
+ case IXGBE_DEV_ID_X550EM_X_10G_T:
+ return ixgbe_identify_phy_generic(hw);
+ default:
+ break;
+ }
+ return IXGBE_SUCCESS;
+}
+
+STATIC s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 *phy_data)
+{
+ UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, *phy_data);
+ return IXGBE_NOT_IMPLEMENTED;
+}
+
+STATIC s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data)
+{
+ UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, phy_data);
+ return IXGBE_NOT_IMPLEMENTED;
+}
+
+/**
+* ixgbe_init_ops_X550EM - Inits func ptrs and MAC type
+* @hw: pointer to hardware structure
+*
+* Initialize the function pointers and for MAC type X550EM.
+* Does not touch the hardware.
+**/
+s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ DEBUGFUNC("ixgbe_init_ops_X550EM");
+
+ /* Similar to X550 so start there. */
+ ret_val = ixgbe_init_ops_X550(hw);
+
+ /* Since this function eventually calls
+ * ixgbe_init_ops_540 by design, we are setting
+ * the pointers to NULL explicitly here to overwrite
+ * the values being set in the x540 function.
+ */
+ /* Thermal sensor not supported in x550EM */
+ mac->ops.get_thermal_sensor_data = NULL;
+ mac->ops.init_thermal_sensor_thresh = NULL;
+ mac->thermal_sensor_enabled = false;
+
+ /* FCOE not supported in x550EM */
+ mac->ops.get_san_mac_addr = NULL;
+ mac->ops.set_san_mac_addr = NULL;
+ mac->ops.get_wwn_prefix = NULL;
+ mac->ops.get_fcoe_boot_status = NULL;
+
+ /* IPsec not supported in x550EM */
+ mac->ops.disable_sec_rx_path = NULL;
+ mac->ops.enable_sec_rx_path = NULL;
+
+ /* AUTOC register is not present in x550EM. */
+ mac->ops.prot_autoc_read = NULL;
+ mac->ops.prot_autoc_write = NULL;
+
+ /* X550EM bus type is internal*/
+ hw->bus.type = ixgbe_bus_type_internal;
+ mac->ops.get_bus_info = ixgbe_get_bus_info_X550em;
+
+ mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
+ mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
+ mac->ops.get_media_type = ixgbe_get_media_type_X550em;
+ mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em;
+ mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_X550em;
+ mac->ops.reset_hw = ixgbe_reset_hw_X550em;
+ mac->ops.get_supported_physical_layer =
+ ixgbe_get_supported_physical_layer_X550em;
+
+ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
+ mac->ops.setup_fc = ixgbe_setup_fc_generic;
+ else
+ mac->ops.setup_fc = ixgbe_setup_fc_X550em;
+
+ mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550em;
+ mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550em;
+
+ if (hw->device_id != IXGBE_DEV_ID_X550EM_X_KR)
+ mac->ops.setup_eee = NULL;
+
+ /* PHY */
+ phy->ops.init = ixgbe_init_phy_ops_X550em;
+ phy->ops.identify = ixgbe_identify_phy_x550em;
+ if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
+ phy->ops.set_phy_power = NULL;
+
+
+ /* EEPROM */
+ eeprom->ops.init_params = ixgbe_init_eeprom_params_X540;
+ eeprom->ops.read = ixgbe_read_ee_hostif_X550;
+ eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
+ eeprom->ops.write = ixgbe_write_ee_hostif_X550;
+ eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
+ eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
+ eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
+ eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_dmac_config_X550
+ * @hw: pointer to hardware structure
+ *
+ * Configure DMA coalescing. If enabling dmac, dmac is activated.
+ * When disabling dmac, dmac enable dmac bit is cleared.
+ **/
+s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw)
+{
+ u32 reg, high_pri_tc;
+
+ DEBUGFUNC("ixgbe_dmac_config_X550");
+
+ /* Disable DMA coalescing before configuring */
+ reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
+ reg &= ~IXGBE_DMACR_DMAC_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
+
+ /* Disable DMA Coalescing if the watchdog timer is 0 */
+ if (!hw->mac.dmac_config.watchdog_timer)
+ goto out;
+
+ ixgbe_dmac_config_tcs_X550(hw);
+
+ /* Configure DMA Coalescing Control Register */
+ reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
+
+ /* Set the watchdog timer in units of 40.96 usec */
+ reg &= ~IXGBE_DMACR_DMACWT_MASK;
+ reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096;
+
+ reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK;
+ /* If fcoe is enabled, set high priority traffic class */
+ if (hw->mac.dmac_config.fcoe_en) {
+ high_pri_tc = 1 << hw->mac.dmac_config.fcoe_tc;
+ reg |= ((high_pri_tc << IXGBE_DMACR_HIGH_PRI_TC_SHIFT) &
+ IXGBE_DMACR_HIGH_PRI_TC_MASK);
+ }
+ reg |= IXGBE_DMACR_EN_MNG_IND;
+
+ /* Enable DMA coalescing after configuration */
+ reg |= IXGBE_DMACR_DMAC_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
+
+out:
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dmac_config_tcs_X550
+ * @hw: pointer to hardware structure
+ *
+ * Configure DMA coalescing threshold per TC. The dmac enable bit must
+ * be cleared before configuring.
+ **/
+s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw)
+{
+ u32 tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb;
+
+ DEBUGFUNC("ixgbe_dmac_config_tcs_X550");
+
+ /* Configure DMA coalescing enabled */
+ switch (hw->mac.dmac_config.link_speed) {
+ case IXGBE_LINK_SPEED_100_FULL:
+ pb_headroom = IXGBE_DMACRXT_100M;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ pb_headroom = IXGBE_DMACRXT_1G;
+ break;
+ default:
+ pb_headroom = IXGBE_DMACRXT_10G;
+ break;
+ }
+
+ maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >>
+ IXGBE_MHADD_MFS_SHIFT) / 1024);
+
+ /* Set the per Rx packet buffer receive threshold */
+ for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) {
+ reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc));
+ reg &= ~IXGBE_DMCTH_DMACRXT_MASK;
+
+ if (tc < hw->mac.dmac_config.num_tcs) {
+ /* Get Rx PB size */
+ rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc));
+ rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >>
+ IXGBE_RXPBSIZE_SHIFT;
+
+ /* Calculate receive buffer threshold in kilobytes */
+ if (rx_pb_size > pb_headroom)
+ rx_pb_size = rx_pb_size - pb_headroom;
+ else
+ rx_pb_size = 0;
+
+ /* Minimum of MFS shall be set for DMCTH */
+ reg |= (rx_pb_size > maxframe_size_kb) ?
+ rx_pb_size : maxframe_size_kb;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg);
+ }
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dmac_update_tcs_X550
+ * @hw: pointer to hardware structure
+ *
+ * Disables dmac, updates per TC settings, and then enables dmac.
+ **/
+s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw)
+{
+ u32 reg;
+
+ DEBUGFUNC("ixgbe_dmac_update_tcs_X550");
+
+ /* Disable DMA coalescing before configuring */
+ reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
+ reg &= ~IXGBE_DMACR_DMAC_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
+
+ ixgbe_dmac_config_tcs_X550(hw);
+
+ /* Enable DMA coalescing after configuration */
+ reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
+ reg |= IXGBE_DMACR_DMAC_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ * ixgbe_hw struct in order to set up EEPROM access.
+ **/
+s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ u32 eec;
+ u16 eeprom_size;
+
+ DEBUGFUNC("ixgbe_init_eeprom_params_X550");
+
+ if (eeprom->type == ixgbe_eeprom_uninitialized) {
+ eeprom->semaphore_delay = 10;
+ eeprom->type = ixgbe_flash;
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
+ IXGBE_EEC_SIZE_SHIFT);
+ eeprom->word_size = 1 << (eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
+
+ DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
+ eeprom->type, eeprom->word_size);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_setup_eee_X550 - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ * @enable_eee: boolean flag to enable EEE
+ *
+ * Enable/disable EEE based on enable_eee flag.
+ * Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C
+ * are modified.
+ *
+ **/
+s32 ixgbe_setup_eee_X550(struct ixgbe_hw *hw, bool enable_eee)
+{
+ u32 eeer;
+ u16 autoneg_eee_reg;
+ u32 link_reg;
+ s32 status;
+ u32 fuse;
+
+ DEBUGFUNC("ixgbe_setup_eee_X550");
+
+ eeer = IXGBE_READ_REG(hw, IXGBE_EEER);
+ /* Enable or disable EEE per flag */
+ if (enable_eee) {
+ eeer |= (IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN);
+
+ if (hw->device_id == IXGBE_DEV_ID_X550T) {
+ /* Advertise EEE capability */
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_eee_reg);
+
+ autoneg_eee_reg |= (IXGBE_AUTO_NEG_10GBASE_EEE_ADVT |
+ IXGBE_AUTO_NEG_1000BASE_EEE_ADVT |
+ IXGBE_AUTO_NEG_100BASE_EEE_ADVT);
+
+ hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_eee_reg);
+ } else if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
+ /* Not supported on first revision. */
+ fuse = IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0));
+ if (!(fuse & IXGBE_FUSES0_REV1))
+ return IXGBE_SUCCESS;
+
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR |
+ IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX;
+
+ /* Don't advertise FEC capability when EEE enabled. */
+ link_reg &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
+
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ }
+ } else {
+ eeer &= ~(IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN);
+
+ if (hw->device_id == IXGBE_DEV_ID_X550T) {
+ /* Disable advertised EEE capability */
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_eee_reg);
+
+ autoneg_eee_reg &= ~(IXGBE_AUTO_NEG_10GBASE_EEE_ADVT |
+ IXGBE_AUTO_NEG_1000BASE_EEE_ADVT |
+ IXGBE_AUTO_NEG_100BASE_EEE_ADVT);
+
+ hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_eee_reg);
+ } else if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ link_reg &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR |
+ IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX);
+
+ /* Advertise FEC capability when EEE is disabled. */
+ link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
+
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ }
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_EEER, eeer);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning
+ * @hw: pointer to hardware structure
+ * @enable: enable or disable source address pruning
+ * @pool: Rx pool to set source address pruning for
+ **/
+void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
+ unsigned int pool)
+{
+ u64 pfflp;
+
+ /* max rx pool is 63 */
+ if (pool > 63)
+ return;
+
+ pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL);
+ pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32;
+
+ if (enable)
+ pfflp |= (1ULL << pool);
+ else
+ pfflp &= ~(1ULL << pool);
+
+ IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp);
+ IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32));
+}
+
+/**
+ * ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype anti-spoofing
+ * @hw: pointer to hardware structure
+ * @enable: enable or disable switch for Ethertype anti-spoofing
+ * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
+ *
+ **/
+void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
+ bool enable, int vf)
+{
+ int vf_target_reg = vf >> 3;
+ int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
+ u32 pfvfspoof;
+
+ DEBUGFUNC("ixgbe_set_ethertype_anti_spoofing_X550");
+
+ pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
+ if (enable)
+ pfvfspoof |= (1 << vf_target_shift);
+ else
+ pfvfspoof &= ~(1 << vf_target_shift);
+
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
+}
+
+/**
+ * ixgbe_iosf_wait - Wait for IOSF command completion
+ * @hw: pointer to hardware structure
+ * @ctrl: pointer to location to receive final IOSF control value
+ *
+ * Returns failing status on timeout
+ *
+ * Note: ctrl can be NULL if the IOSF control register value is not needed
+ **/
+STATIC s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
+{
+ u32 i, command = 0;
+
+ /* Check every 10 usec to see if the address cycle completed.
+ * The SB IOSF BUSY bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
+ if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
+ break;
+ usec_delay(10);
+ }
+ if (ctrl)
+ *ctrl = command;
+ if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
+ ERROR_REPORT1(IXGBE_ERROR_POLLING, "Wait timed out\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the IOSF
+ * device
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 3 bit device type
+ * @data: Data to write to the register
+ **/
+s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 data)
+{
+ u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
+ u32 command, error;
+ s32 ret;
+
+ ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
+ if (ret != IXGBE_SUCCESS)
+ return ret;
+
+ ret = ixgbe_iosf_wait(hw, NULL);
+ if (ret != IXGBE_SUCCESS)
+ goto out;
+
+ command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
+ (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
+
+ /* Write IOSF control register */
+ IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
+
+ /* Write IOSF data register */
+ IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
+
+ ret = ixgbe_iosf_wait(hw, &command);
+
+ if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
+ error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
+ IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
+ ERROR_REPORT2(IXGBE_ERROR_POLLING,
+ "Failed to write, error %x\n", error);
+ ret = IXGBE_ERR_PHY;
+ }
+
+out:
+ ixgbe_release_swfw_semaphore(hw, gssr);
+ return ret;
+}
+
+/**
+ * ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the IOSF
+ * device
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 3 bit device type
+ * @phy_data: Pointer to read data from the register
+ **/
+s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 *data)
+{
+ u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
+ u32 command, error;
+ s32 ret;
+
+ ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
+ if (ret != IXGBE_SUCCESS)
+ return ret;
+
+ ret = ixgbe_iosf_wait(hw, NULL);
+ if (ret != IXGBE_SUCCESS)
+ goto out;
+
+ command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
+ (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
+
+ /* Write IOSF control register */
+ IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
+
+ ret = ixgbe_iosf_wait(hw, &command);
+
+ if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
+ error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
+ IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
+ ERROR_REPORT2(IXGBE_ERROR_POLLING,
+ "Failed to read, error %x\n", error);
+ ret = IXGBE_ERR_PHY;
+ }
+
+ if (ret == IXGBE_SUCCESS)
+ *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
+
+out:
+ ixgbe_release_swfw_semaphore(hw, gssr);
+ return ret;
+}
+
+/**
+ * ixgbe_disable_mdd_X550
+ * @hw: pointer to hardware structure
+ *
+ * Disable malicious driver detection
+ **/
+void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw)
+{
+ u32 reg;
+
+ DEBUGFUNC("ixgbe_disable_mdd_X550");
+
+ /* Disable MDD for TX DMA and interrupt */
+ reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ reg &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
+
+ /* Disable MDD for RX and interrupt */
+ reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+ reg &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
+ IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
+}
+
+/**
+ * ixgbe_enable_mdd_X550
+ * @hw: pointer to hardware structure
+ *
+ * Enable malicious driver detection
+ **/
+void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw)
+{
+ u32 reg;
+
+ DEBUGFUNC("ixgbe_enable_mdd_X550");
+
+ /* Enable MDD for TX DMA and interrupt */
+ reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ reg |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
+
+ /* Enable MDD for RX and interrupt */
+ reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+ reg |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
+ IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
+}
+
+/**
+ * ixgbe_restore_mdd_vf_X550
+ * @hw: pointer to hardware structure
+ * @vf: vf index
+ *
+ * Restore VF that was disabled during malicious driver detection event
+ **/
+void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf)
+{
+ u32 idx, reg, num_qs, start_q, bitmask;
+
+ DEBUGFUNC("ixgbe_restore_mdd_vf_X550");
+
+ /* Map VF to queues */
+ reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ switch (reg & IXGBE_MRQC_MRQE_MASK) {
+ case IXGBE_MRQC_VMDQRT8TCEN:
+ num_qs = 8; /* 16 VFs / pools */
+ bitmask = 0x000000FF;
+ break;
+ case IXGBE_MRQC_VMDQRSS32EN:
+ case IXGBE_MRQC_VMDQRT4TCEN:
+ num_qs = 4; /* 32 VFs / pools */
+ bitmask = 0x0000000F;
+ break;
+ default: /* 64 VFs / pools */
+ num_qs = 2;
+ bitmask = 0x00000003;
+ break;
+ }
+ start_q = vf * num_qs;
+
+ /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */
+ idx = start_q / 32;
+ reg = 0;
+ reg |= (bitmask << (start_q % 32));
+ IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), reg);
+ IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), reg);
+}
+
+/**
+ * ixgbe_mdd_event_X550
+ * @hw: pointer to hardware structure
+ * @vf_bitmap: vf bitmap of malicious vfs
+ *
+ * Handle malicious driver detection event.
+ **/
+void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap)
+{
+ u32 wqbr;
+ u32 i, j, reg, q, shift, vf, idx;
+
+ DEBUGFUNC("ixgbe_mdd_event_X550");
+
+ /* figure out pool size for mapping to vf's */
+ reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ switch (reg & IXGBE_MRQC_MRQE_MASK) {
+ case IXGBE_MRQC_VMDQRT8TCEN:
+ shift = 3; /* 16 VFs / pools */
+ break;
+ case IXGBE_MRQC_VMDQRSS32EN:
+ case IXGBE_MRQC_VMDQRT4TCEN:
+ shift = 2; /* 32 VFs / pools */
+ break;
+ default:
+ shift = 1; /* 64 VFs / pools */
+ break;
+ }
+
+ /* Read WQBR_TX and WQBR_RX and check for malicious queues */
+ for (i = 0; i < 4; i++) {
+ wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i));
+ wqbr |= IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i));
+
+ if (!wqbr)
+ continue;
+
+ /* Get malicious queue */
+ for (j = 0; j < 32 && wqbr; j++) {
+
+ if (!(wqbr & (1 << j)))
+ continue;
+
+ /* Get queue from bitmask */
+ q = j + (i * 32);
+
+ /* Map queue to vf */
+ vf = (q >> shift);
+
+ /* Set vf bit in vf_bitmap */
+ idx = vf / 32;
+ vf_bitmap[idx] |= (1 << (vf % 32));
+ wqbr &= ~(1 << j);
+ }
+ }
+}
+
+/**
+ * ixgbe_get_media_type_X550em - Get media type
+ * @hw: pointer to hardware structure
+ *
+ * Returns the media type (fiber, copper, backplane)
+ */
+enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
+{
+ enum ixgbe_media_type media_type;
+
+ DEBUGFUNC("ixgbe_get_media_type_X550em");
+
+ /* Detect if there is a copper PHY attached. */
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_X_KX4:
+ media_type = ixgbe_media_type_backplane;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_SFP:
+ media_type = ixgbe_media_type_fiber;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_1G_T:
+ case IXGBE_DEV_ID_X550EM_X_10G_T:
+ media_type = ixgbe_media_type_copper;
+ break;
+ default:
+ media_type = ixgbe_media_type_unknown;
+ break;
+ }
+ return media_type;
+}
+
+/**
+ * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported
+ * @hw: pointer to hardware structure
+ * @linear: true if SFP module is linear
+ */
+STATIC s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
+{
+ DEBUGFUNC("ixgbe_supported_sfp_modules_X550em");
+
+ switch (hw->phy.sfp_type) {
+ case ixgbe_sfp_type_not_present:
+ return IXGBE_ERR_SFP_NOT_PRESENT;
+ case ixgbe_sfp_type_da_cu_core0:
+ case ixgbe_sfp_type_da_cu_core1:
+ *linear = true;
+ break;
+ case ixgbe_sfp_type_srlr_core0:
+ case ixgbe_sfp_type_srlr_core1:
+ case ixgbe_sfp_type_da_act_lmt_core0:
+ case ixgbe_sfp_type_da_act_lmt_core1:
+ case ixgbe_sfp_type_1g_sx_core0:
+ case ixgbe_sfp_type_1g_sx_core1:
+ case ixgbe_sfp_type_1g_lx_core0:
+ case ixgbe_sfp_type_1g_lx_core1:
+ *linear = false;
+ break;
+ case ixgbe_sfp_type_unknown:
+ case ixgbe_sfp_type_1g_cu_core0:
+ case ixgbe_sfp_type_1g_cu_core1:
+ default:
+ return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_identify_sfp_module_X550em - Identifies SFP modules
+ * @hw: pointer to hardware structure
+ *
+ * Searches for and identifies the SFP module and assigns appropriate PHY type.
+ **/
+s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw)
+{
+ s32 status;
+ bool linear;
+
+ DEBUGFUNC("ixgbe_identify_sfp_module_X550em");
+
+ status = ixgbe_identify_module_generic(hw);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Check if SFP module is supported */
+ status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
+
+ return status;
+}
+
+/**
+ * ixgbe_setup_sfp_modules_X550em - Setup MAC link ops
+ * @hw: pointer to hardware structure
+ */
+s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
+{
+ s32 status;
+ bool linear;
+
+ DEBUGFUNC("ixgbe_setup_sfp_modules_X550em");
+
+ /* Check if SFP module is supported */
+ status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ ixgbe_init_mac_link_ops_X550em(hw);
+ hw->phy.ops.reset = NULL;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_init_mac_link_ops_X550em - init mac link function pointers
+ * @hw: pointer to hardware structure
+ */
+void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+
+ DEBUGFUNC("ixgbe_init_mac_link_ops_X550em");
+
+ switch (hw->mac.ops.get_media_type(hw)) {
+ case ixgbe_media_type_fiber:
+ /* CS4227 does not support autoneg, so disable the laser control
+ * functions for SFP+ fiber
+ */
+ mac->ops.disable_tx_laser = NULL;
+ mac->ops.enable_tx_laser = NULL;
+ mac->ops.flap_tx_laser = NULL;
+ mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
+ mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_x550em;
+ mac->ops.set_rate_select_speed =
+ ixgbe_set_soft_rate_select_speed;
+ break;
+ case ixgbe_media_type_copper:
+ mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
+ mac->ops.check_link = ixgbe_check_link_t_X550em;
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ixgbe_get_link_capabilities_x550em - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: true when autoneg or autotry is enabled
+ */
+s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ DEBUGFUNC("ixgbe_get_link_capabilities_X550em");
+
+ /* SFP */
+ if (hw->phy.media_type == ixgbe_media_type_fiber) {
+
+ /* CS4227 SFP must not enable auto-negotiation */
+ *autoneg = false;
+
+ /* Check if 1G SFP module. */
+ if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1
+ || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ return IXGBE_SUCCESS;
+ }
+
+ /* Link capabilities are based on SFP */
+ if (hw->phy.multispeed_fiber)
+ *speed = IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ else
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ } else {
+ *speed = IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = true;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause
+ * @hw: pointer to hardware structure
+ * @lsc: pointer to boolean flag which indicates whether external Base T
+ * PHY interrupt is lsc
+ *
+ * Determime if external Base T PHY interrupt cause is high temperature
+ * failure alarm or link status change.
+ *
+ * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
+ * failure alarm, else return PHY access status.
+ */
+STATIC s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
+{
+ u32 status;
+ u16 reg;
+
+ *lsc = false;
+
+ /* Vendor alarm triggered */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+
+ if (status != IXGBE_SUCCESS ||
+ !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN))
+ return status;
+
+ /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+
+ if (status != IXGBE_SUCCESS ||
+ !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
+ IXGBE_MDIO_GLOBAL_ALARM_1_INT)))
+ return status;
+
+ /* High temperature failure alarm triggered */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* If high temperature failure, then return over temp error and exit */
+ if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) {
+ /* power down the PHY in case the PHY FW didn't already */
+ ixgbe_set_copper_phy_power(hw, false);
+ return IXGBE_ERR_OVERTEMP;
+ }
+
+ /* Vendor alarm 2 triggered */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg);
+
+ if (status != IXGBE_SUCCESS ||
+ !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT))
+ return status;
+
+ /* link connect/disconnect event occurred */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Indicate LSC */
+ if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC)
+ *lsc = true;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts
+ * @hw: pointer to hardware structure
+ *
+ * Enable link status change and temperature failure alarm for the external
+ * Base T PHY
+ *
+ * Returns PHY access status
+ */
+STATIC s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
+{
+ u32 status;
+ u16 reg;
+ bool lsc;
+
+ /* Clear interrupt flags */
+ status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
+
+ /* Enable link status change alarm */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
+
+ status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Enables high temperature failure alarm */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ reg |= IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN;
+
+ status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
+ IXGBE_MDIO_GLOBAL_ALARM_1_INT);
+
+ status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Enable chip-wide vendor alarm */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN;
+
+ status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ reg);
+
+ return status;
+}
+
+/**
+ * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed.
+ * @hw: pointer to hardware structure
+ * @speed: link speed
+ *
+ * Configures the integrated KR PHY.
+ **/
+STATIC s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed)
+{
+ s32 status;
+ u32 reg_val;
+
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status)
+ return status;
+
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
+ IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
+
+ /* Advertise 10G support. */
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
+
+ /* Advertise 1G support. */
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
+
+ /* Restart auto-negotiation. */
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ return status;
+}
+
+/**
+ * ixgbe_init_phy_ops_X550em - PHY/SFP specific init
+ * @hw: pointer to hardware structure
+ *
+ * Initialize any function pointers that were not able to be
+ * set during init_shared_code because the PHY/SFP type was
+ * not known. Perform the SFP init if necessary.
+ */
+s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
+{
+ struct ixgbe_phy_info *phy = &hw->phy;
+ ixgbe_link_speed speed;
+ s32 ret_val;
+
+ DEBUGFUNC("ixgbe_init_phy_ops_X550em");
+
+ hw->mac.ops.set_lan_id(hw);
+
+ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
+ phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
+ ixgbe_setup_mux_ctl(hw);
+
+ /* Save NW management interface connected on board. This is used
+ * to determine internal PHY mode.
+ */
+ phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
+
+ /* If internal PHY mode is KR, then initialize KR link */
+ if (phy->nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE) {
+ speed = IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ ret_val = ixgbe_setup_kr_speed_x550em(hw, speed);
+ }
+
+ phy->ops.identify_sfp = ixgbe_identify_sfp_module_X550em;
+ }
+
+ /* Identify the PHY or SFP module */
+ ret_val = phy->ops.identify(hw);
+ if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ return ret_val;
+
+ /* Setup function pointers based on detected hardware */
+ ixgbe_init_mac_link_ops_X550em(hw);
+ if (phy->sfp_type != ixgbe_sfp_type_unknown)
+ phy->ops.reset = NULL;
+
+ /* Set functions pointers based on phy type */
+ switch (hw->phy.type) {
+ case ixgbe_phy_x550em_kx4:
+ phy->ops.setup_link = ixgbe_setup_kx4_x550em;
+ phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
+ phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
+ break;
+ case ixgbe_phy_x550em_kr:
+ phy->ops.setup_link = ixgbe_setup_kr_x550em;
+ phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
+ phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
+ break;
+ case ixgbe_phy_x550em_ext_t:
+ /* Save NW management interface connected on board. This is used
+ * to determine internal PHY mode
+ */
+ phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
+
+ /* If internal link mode is XFI, then setup iXFI internal link,
+ * else setup KR now.
+ */
+ if (!(phy->nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
+ phy->ops.setup_internal_link =
+ ixgbe_setup_internal_phy_t_x550em;
+ } else {
+ speed = IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ ret_val = ixgbe_setup_kr_speed_x550em(hw, speed);
+ }
+
+ /* setup SW LPLU only for first revision */
+ if (!(IXGBE_FUSES0_REV1 & IXGBE_READ_REG(hw,
+ IXGBE_FUSES0_GROUP(0))))
+ phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em;
+
+ phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
+ phy->ops.reset = ixgbe_reset_phy_t_X550em;
+ break;
+ default:
+ break;
+ }
+ return ret_val;
+}
+
+/**
+ * ixgbe_reset_hw_X550em - Perform hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks
+ * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
+ * reset.
+ */
+s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
+{
+ ixgbe_link_speed link_speed;
+ s32 status;
+ u32 ctrl = 0;
+ u32 i;
+ u32 hlreg0;
+ bool link_up = false;
+
+ DEBUGFUNC("ixgbe_reset_hw_X550em");
+
+ /* Call adapter stop to disable Tx/Rx and clear interrupts */
+ status = hw->mac.ops.stop_adapter(hw);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* flush pending Tx transactions */
+ ixgbe_clear_tx_pending(hw);
+
+ /* PHY ops must be identified and initialized prior to reset */
+
+ /* Identify PHY and related function pointers */
+ status = hw->phy.ops.init(hw);
+
+ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ return status;
+
+ /* start the external PHY */
+ if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
+ status = ixgbe_init_ext_t_x550em(hw);
+ if (status)
+ return status;
+ }
+
+ /* Setup SFP module if there is one present. */
+ if (hw->phy.sfp_setup_needed) {
+ status = hw->mac.ops.setup_sfp(hw);
+ hw->phy.sfp_setup_needed = false;
+ }
+
+ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ return status;
+
+ /* Reset PHY */
+ if (!hw->phy.reset_disable && hw->phy.ops.reset)
+ hw->phy.ops.reset(hw);
+
+mac_reset_top:
+ /* Issue global reset to the MAC. Needs to be SW reset if link is up.
+ * If link reset is used when link is up, it might reset the PHY when
+ * mng is using it. If link is down or the flag to force full link
+ * reset is set, then perform link reset.
+ */
+ ctrl = IXGBE_CTRL_LNK_RST;
+ if (!hw->force_full_reset) {
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ if (link_up)
+ ctrl = IXGBE_CTRL_RST;
+ }
+
+ ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Poll for reset bit to self-clear meaning reset is complete */
+ for (i = 0; i < 10; i++) {
+ usec_delay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST_MASK))
+ break;
+ }
+
+ if (ctrl & IXGBE_CTRL_RST_MASK) {
+ status = IXGBE_ERR_RESET_FAILED;
+ DEBUGOUT("Reset polling failed to complete.\n");
+ }
+
+ msec_delay(50);
+
+ /* Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to
+ * allow time for any pending HW events to complete.
+ */
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ goto mac_reset_top;
+ }
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /* Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table. Also reset num_rar_entries to 128,
+ * since we modify this value when programming the SAN MAC address.
+ */
+ hw->mac.num_rar_entries = 128;
+ hw->mac.ops.init_rx_addrs(hw);
+
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
+ /* Config MDIO clock speed. */
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+ }
+
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
+ ixgbe_setup_mux_ctl(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
+ * @hw: pointer to hardware structure
+ */
+s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
+{
+ u32 status;
+ u16 reg;
+
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_TX_VENDOR_ALARMS_3,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* If PHY FW reset completed bit is set then this is the first
+ * SW instance after a power on so the PHY FW must be un-stalled.
+ */
+ if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_GLOBAL_RES_PR_10,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ reg &= ~IXGBE_MDIO_POWER_UP_STALL;
+
+ status = hw->phy.ops.write_reg(hw,
+ IXGBE_MDIO_GLOBAL_RES_PR_10,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_setup_kr_x550em - Configure the KR PHY.
+ * @hw: pointer to hardware structure
+ *
+ * Configures the integrated KR PHY.
+ **/
+s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
+{
+ return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
+}
+
+/**
+ * ixgbe_setup_kx4_x550em - Configure the KX4 PHY.
+ * @hw: pointer to hardware structure
+ *
+ * Configures the integrated KX4 PHY.
+ **/
+s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u32 reg_val;
+
+ status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1,
+ IXGBE_SB_IOSF_TARGET_KX4_PCS, &reg_val);
+ if (status)
+ return status;
+
+ reg_val &= ~(IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 |
+ IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX);
+
+ reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE;
+
+ /* Advertise 10G support. */
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+ reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4;
+
+ /* Advertise 1G support. */
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+ reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX;
+
+ /* Restart auto-negotiation. */
+ reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART;
+ status = ixgbe_write_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1,
+ IXGBE_SB_IOSF_TARGET_KX4_PCS, reg_val);
+
+ return status;
+}
+
+/**
+ * ixgbe_setup_mac_link_sfp_x550em - Setup internal/external the PHY for SFP
+ * @hw: pointer to hardware structure
+ *
+ * Configure the external PHY and the integrated KR PHY for SFP support.
+ **/
+s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ s32 ret_val;
+ u16 reg_slice, reg_val;
+ bool setup_linear = false;
+ UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
+
+ /* Check if SFP module is supported and linear */
+ ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
+
+ /* If no SFP module present, then return success. Return success since
+ * there is no reason to configure CS4227 and SFP not present error is
+ * not excepted in the setup MAC link flow.
+ */
+ if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
+ return IXGBE_SUCCESS;
+
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ /* Configure CS4227 LINE side to 10G SR. */
+ reg_slice = IXGBE_CS4227_LINE_SPARE22_MSB + (hw->bus.lan_id << 12);
+ reg_val = IXGBE_CS4227_SPEED_10G;
+ ret_val = ixgbe_write_i2c_combined(hw, IXGBE_CS4227, reg_slice,
+ reg_val);
+
+ reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + (hw->bus.lan_id << 12);
+ reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
+ ret_val = ixgbe_write_i2c_combined(hw, IXGBE_CS4227, reg_slice,
+ reg_val);
+
+ /* Configure CS4227 for HOST connection rate then type. */
+ reg_slice = IXGBE_CS4227_HOST_SPARE22_MSB + (hw->bus.lan_id << 12);
+ reg_val = (speed & IXGBE_LINK_SPEED_10GB_FULL) ?
+ IXGBE_CS4227_SPEED_10G : IXGBE_CS4227_SPEED_1G;
+ ret_val = ixgbe_write_i2c_combined(hw, IXGBE_CS4227, reg_slice,
+ reg_val);
+
+ reg_slice = IXGBE_CS4227_HOST_SPARE24_LSB + (hw->bus.lan_id << 12);
+ if (setup_linear)
+ reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
+ else
+ reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
+ ret_val = ixgbe_write_i2c_combined(hw, IXGBE_CS4227, reg_slice,
+ reg_val);
+
+ /* If internal link mode is XFI, then setup XFI internal link. */
+ if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE))
+ ret_val = ixgbe_setup_ixfi_x550em(hw, &speed);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
+ * @hw: pointer to hardware structure
+ * @speed: the link speed to force
+ *
+ * Configures the integrated KR PHY to use iXFI mode. Used to connect an
+ * internal and external PHY at a specific speed, without autonegotiation.
+ **/
+STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+{
+ s32 status;
+ u32 reg_val;
+
+ /* Disable AN and force speed to 10G Serial. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+
+ /* Select forced link speed for internal PHY. */
+ switch (*speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
+ break;
+ default:
+ /* Other link speeds are not supported by internal KR PHY. */
+ return IXGBE_ERR_LINK_SETUP;
+ }
+
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Disable training protocol FSM. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Disable Flex from training TXFFE. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Enable override for coefficients. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
+ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
+ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
+ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Toggle port SW reset by AN reset. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ return status;
+}
+
+/**
+ * ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status
+ * @hw: address of hardware structure
+ * @link_up: address of boolean to indicate link status
+ *
+ * Returns error code if unable to get link status.
+ */
+STATIC s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
+{
+ u32 ret;
+ u16 autoneg_status;
+
+ *link_up = false;
+
+ /* read this twice back to back to indicate current status */
+ ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_status);
+ if (ret != IXGBE_SUCCESS)
+ return ret;
+
+ ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_status);
+ if (ret != IXGBE_SUCCESS)
+ return ret;
+
+ *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link
+ * @hw: point to hardware structure
+ *
+ * Configures the link between the integrated KR PHY and the external X557 PHY
+ * The driver will call this function when it gets a link status change
+ * interrupt from the X557 PHY. This function configures the link speed
+ * between the PHYs to match the link speed of the BASE-T link.
+ *
+ * A return of a non-zero value indicates an error, and the base driver should
+ * not report link up.
+ */
+s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
+{
+ ixgbe_link_speed force_speed;
+ bool link_up;
+ u32 status;
+ u16 speed;
+
+ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
+ return IXGBE_ERR_CONFIG;
+
+ /* If link is not up, then there is no setup necessary so return */
+ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if (!link_up)
+ return IXGBE_SUCCESS;
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &speed);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* If link is not still up, then no setup is necessary so return */
+ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ if (!link_up)
+ return IXGBE_SUCCESS;
+
+ /* clear everything but the speed and duplex bits */
+ speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
+
+ switch (speed) {
+ case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL:
+ force_speed = IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL:
+ force_speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ default:
+ /* Internal PHY does not support anything else */
+ return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ }
+
+ return ixgbe_setup_ixfi_x550em(hw, &force_speed);
+}
+
+/**
+ * ixgbe_setup_phy_loopback_x550em - Configure the KR PHY for loopback.
+ * @hw: pointer to hardware structure
+ *
+ * Configures the integrated KR PHY to use internal loopback mode.
+ **/
+s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u32 reg_val;
+
+ /* Disable AN and force speed to 10G Serial. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Set near-end loopback clocks. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B;
+ reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Set loopback enable. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Training bypass. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ return status;
+}
+
+/**
+ * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
+ * assuming that the semaphore is already obtained.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the hostif.
+ **/
+s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
+ u16 *data)
+{
+ s32 status;
+ struct ixgbe_hic_read_shadow_ram buffer;
+
+ DEBUGFUNC("ixgbe_read_ee_hostif_data_X550");
+ buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
+ buffer.hdr.req.buf_lenh = 0;
+ buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
+ buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
+
+ /* convert offset from words to bytes */
+ buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
+ /* one word */
+ buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
+
+ status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+ sizeof(buffer),
+ IXGBE_HI_COMMAND_TIMEOUT, false);
+
+ if (status)
+ return status;
+
+ *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
+ FW_NVM_DATA_OFFSET);
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the hostif.
+ **/
+s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
+ u16 *data)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_read_ee_hostif_X550");
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+ IXGBE_SUCCESS) {
+ status = ixgbe_read_ee_hostif_data_X550(hw, offset, data);
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ } else {
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words
+ * @data: word(s) read from the EEPROM
+ *
+ * Reads a 16 bit word(s) from the EEPROM using the hostif.
+ **/
+s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+ u16 offset, u16 words, u16 *data)
+{
+ struct ixgbe_hic_read_shadow_ram buffer;
+ u32 current_word = 0;
+ u16 words_to_read;
+ s32 status;
+ u32 i;
+
+ DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550");
+
+ /* Take semaphore for the entire operation. */
+ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ if (status) {
+ DEBUGOUT("EEPROM read buffer - semaphore failed\n");
+ return status;
+ }
+ while (words) {
+ if (words > FW_MAX_READ_BUFFER_SIZE / 2)
+ words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
+ else
+ words_to_read = words;
+
+ buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
+ buffer.hdr.req.buf_lenh = 0;
+ buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
+ buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
+
+ /* convert offset from words to bytes */
+ buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2);
+ buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2);
+
+ status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+ sizeof(buffer),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ false);
+
+ if (status) {
+ DEBUGOUT("Host interface command failed\n");
+ goto out;
+ }
+
+ for (i = 0; i < words_to_read; i++) {
+ u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) +
+ 2 * i;
+ u32 value = IXGBE_READ_REG(hw, reg);
+
+ data[current_word] = (u16)(value & 0xffff);
+ current_word++;
+ i++;
+ if (i < words_to_read) {
+ value >>= 16;
+ data[current_word] = (u16)(value & 0xffff);
+ current_word++;
+ }
+ }
+ words -= words_to_read;
+ }
+
+out:
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ return status;
+}
+
+/**
+ * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the hostif.
+ **/
+s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
+ u16 data)
+{
+ s32 status;
+ struct ixgbe_hic_write_shadow_ram buffer;
+
+ DEBUGFUNC("ixgbe_write_ee_hostif_data_X550");
+
+ buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
+ buffer.hdr.req.buf_lenh = 0;
+ buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN;
+ buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
+
+ /* one word */
+ buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
+ buffer.data = data;
+ buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
+
+ status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+ sizeof(buffer),
+ IXGBE_HI_COMMAND_TIMEOUT, false);
+
+ return status;
+}
+
+/**
+ * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the hostif.
+ **/
+s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
+ u16 data)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_write_ee_hostif_X550");
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+ IXGBE_SUCCESS) {
+ status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ } else {
+ DEBUGOUT("write ee hostif failed to get semaphore");
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @words: number of words
+ * @data: word(s) write to the EEPROM
+ *
+ * Write a 16 bit word(s) to the EEPROM using the hostif.
+ **/
+s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+ u16 offset, u16 words, u16 *data)
+{
+ s32 status = IXGBE_SUCCESS;
+ u32 i = 0;
+
+ DEBUGFUNC("ixgbe_write_ee_hostif_buffer_X550");
+
+ /* Take semaphore for the entire operation. */
+ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT("EEPROM write buffer - semaphore failed\n");
+ goto out;
+ }
+
+ for (i = 0; i < words; i++) {
+ status = ixgbe_write_ee_hostif_data_X550(hw, offset + i,
+ data[i]);
+
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT("Eeprom buffered write failed\n");
+ break;
+ }
+ }
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+out:
+
+ return status;
+}
+
+/**
+ * ixgbe_checksum_ptr_x550 - Checksum one pointer region
+ * @hw: pointer to hardware structure
+ * @ptr: pointer offset in eeprom
+ * @size: size of section pointed by ptr, if 0 first word will be used as size
+ * @csum: address of checksum to update
+ *
+ * Returns error status for any failure
+ */
+STATIC s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
+ u16 size, u16 *csum, u16 *buffer,
+ u32 buffer_size)
+{
+ u16 buf[256];
+ s32 status;
+ u16 length, bufsz, i, start;
+ u16 *local_buffer;
+
+ bufsz = sizeof(buf) / sizeof(buf[0]);
+
+ /* Read a chunk at the pointer location */
+ if (!buffer) {
+ status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf);
+ if (status) {
+ DEBUGOUT("Failed to read EEPROM image\n");
+ return status;
+ }
+ local_buffer = buf;
+ } else {
+ if (buffer_size < ptr)
+ return IXGBE_ERR_PARAM;
+ local_buffer = &buffer[ptr];
+ }
+
+ if (size) {
+ start = 0;
+ length = size;
+ } else {
+ start = 1;
+ length = local_buffer[0];
+
+ /* Skip pointer section if length is invalid. */
+ if (length == 0xFFFF || length == 0 ||
+ (ptr + length) >= hw->eeprom.word_size)
+ return IXGBE_SUCCESS;
+ }
+
+ if (buffer && ((u32)start + (u32)length > buffer_size))
+ return IXGBE_ERR_PARAM;
+
+ for (i = start; length; i++, length--) {
+ if (i == bufsz && !buffer) {
+ ptr += bufsz;
+ i = 0;
+ if (length < bufsz)
+ bufsz = length;
+
+ /* Read a chunk at the pointer location */
+ status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr,
+ bufsz, buf);
+ if (status) {
+ DEBUGOUT("Failed to read EEPROM image\n");
+ return status;
+ }
+ }
+ *csum += local_buffer[i];
+ }
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_calc_checksum_X550 - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ * @buffer: pointer to buffer containing calculated checksum
+ * @buffer_size: size of buffer
+ *
+ * Returns a negative error code on error, or the 16-bit checksum
+ **/
+s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
+{
+ u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
+ u16 *local_buffer;
+ s32 status;
+ u16 checksum = 0;
+ u16 pointer, i, size;
+
+ DEBUGFUNC("ixgbe_calc_eeprom_checksum_X550");
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (!buffer) {
+ /* Read pointer area */
+ status = ixgbe_read_ee_hostif_buffer_X550(hw, 0,
+ IXGBE_EEPROM_LAST_WORD + 1,
+ eeprom_ptrs);
+ if (status) {
+ DEBUGOUT("Failed to read EEPROM image\n");
+ return status;
+ }
+ local_buffer = eeprom_ptrs;
+ } else {
+ if (buffer_size < IXGBE_EEPROM_LAST_WORD)
+ return IXGBE_ERR_PARAM;
+ local_buffer = buffer;
+ }
+
+ /*
+ * For X550 hardware include 0x0-0x41 in the checksum, skip the
+ * checksum word itself
+ */
+ for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++)
+ if (i != IXGBE_EEPROM_CHECKSUM)
+ checksum += local_buffer[i];
+
+ /*
+ * Include all data from pointers 0x3, 0x6-0xE. This excludes the
+ * FW, PHY module, and PCIe Expansion/Option ROM pointers.
+ */
+ for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) {
+ if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
+ continue;
+
+ pointer = local_buffer[i];
+
+ /* Skip pointer section if the pointer is invalid. */
+ if (pointer == 0xFFFF || pointer == 0 ||
+ pointer >= hw->eeprom.word_size)
+ continue;
+
+ switch (i) {
+ case IXGBE_PCIE_GENERAL_PTR:
+ size = IXGBE_IXGBE_PCIE_GENERAL_SIZE;
+ break;
+ case IXGBE_PCIE_CONFIG0_PTR:
+ case IXGBE_PCIE_CONFIG1_PTR:
+ size = IXGBE_PCIE_CONFIG_SIZE;
+ break;
+ default:
+ size = 0;
+ break;
+ }
+
+ status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum,
+ buffer, buffer_size);
+ if (status)
+ return status;
+ }
+
+ checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+
+ return (s32)checksum;
+}
+
+/**
+ * ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ *
+ * Returns a negative error code on error, or the 16-bit checksum
+ **/
+s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
+{
+ return ixgbe_calc_checksum_X550(hw, NULL, 0);
+}
+
+/**
+ * ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum. If the
+ * caller does not need checksum_val, the value can be NULL.
+ **/
+s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
+{
+ s32 status;
+ u16 checksum;
+ u16 read_checksum = 0;
+
+ DEBUGFUNC("ixgbe_validate_eeprom_checksum_X550");
+
+ /* Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = hw->eeprom.ops.read(hw, 0, &checksum);
+ if (status) {
+ DEBUGOUT("EEPROM read failed\n");
+ return status;
+ }
+
+ status = hw->eeprom.ops.calc_checksum(hw);
+ if (status < 0)
+ return status;
+
+ checksum = (u16)(status & 0xffff);
+
+ status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
+ &read_checksum);
+ if (status)
+ return status;
+
+ /* Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (read_checksum != checksum) {
+ status = IXGBE_ERR_EEPROM_CHECKSUM;
+ ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
+ "Invalid EEPROM checksum");
+ }
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum_val)
+ *checksum_val = checksum;
+
+ return status;
+}
+
+/**
+ * ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash
+ * @hw: pointer to hardware structure
+ *
+ * After writing EEPROM to shadow RAM using EEWR register, software calculates
+ * checksum and updates the EEPROM and instructs the hardware to update
+ * the flash.
+ **/
+s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u16 checksum = 0;
+
+ DEBUGFUNC("ixgbe_update_eeprom_checksum_X550");
+
+ /* Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum);
+ if (status) {
+ DEBUGOUT("EEPROM read failed\n");
+ return status;
+ }
+
+ status = ixgbe_calc_eeprom_checksum_X550(hw);
+ if (status < 0)
+ return status;
+
+ checksum = (u16)(status & 0xffff);
+
+ status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
+ checksum);
+ if (status)
+ return status;
+
+ status = ixgbe_update_flash_X550(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device
+ * @hw: pointer to hardware structure
+ *
+ * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
+ **/
+s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+ union ixgbe_hic_hdr2 buffer;
+
+ DEBUGFUNC("ixgbe_update_flash_X550");
+
+ buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
+ buffer.req.buf_lenh = 0;
+ buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
+ buffer.req.checksum = FW_DEFAULT_CHECKSUM;
+
+ status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+ sizeof(buffer),
+ IXGBE_HI_COMMAND_TIMEOUT, false);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_supported_physical_layer_X550em - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ **/
+u32 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
+{
+ u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u16 ext_ability = 0;
+
+ DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em");
+
+ hw->phy.ops.identify(hw);
+
+ switch (hw->phy.type) {
+ case ixgbe_phy_x550em_kr:
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
+ IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ break;
+ case ixgbe_phy_x550em_kx4:
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
+ IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ break;
+ case ixgbe_phy_x550em_ext_t:
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &ext_ability);
+ if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+ if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ break;
+ default:
+ break;
+ }
+
+ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
+ physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
+
+ return physical_layer;
+}
+
+/**
+ * ixgbe_get_bus_info_x550em - Set PCI bus info
+ * @hw: pointer to hardware structure
+ *
+ * Sets bus link width and speed to unknown because X550em is
+ * not a PCI device.
+ **/
+s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
+{
+
+ DEBUGFUNC("ixgbe_get_bus_info_x550em");
+
+ hw->bus.width = ixgbe_bus_width_unknown;
+ hw->bus.speed = ixgbe_bus_speed_unknown;
+
+ hw->mac.ops.set_lan_id(hw);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_disable_rx_x550 - Disable RX unit
+ *
+ * Enables the Rx DMA unit for x550
+ **/
+void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
+{
+ u32 rxctrl, pfdtxgswc;
+ s32 status;
+ struct ixgbe_hic_disable_rxen fw_cmd;
+
+ DEBUGFUNC("ixgbe_enable_rx_dma_x550");
+
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (rxctrl & IXGBE_RXCTRL_RXEN) {
+ pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
+ pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+ hw->mac.set_lben = true;
+ } else {
+ hw->mac.set_lben = false;
+ }
+
+ fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
+ fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
+ fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ fw_cmd.port_number = (u8)hw->bus.lan_id;
+
+ status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+ sizeof(struct ixgbe_hic_disable_rxen),
+ IXGBE_HI_COMMAND_TIMEOUT, true);
+
+ /* If we fail - disable RX using register write */
+ if (status) {
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (rxctrl & IXGBE_RXCTRL_RXEN) {
+ rxctrl &= ~IXGBE_RXCTRL_RXEN;
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
+ }
+ }
+ }
+}
+
+/**
+ * ixgbe_enter_lplu_x550em - Transition to low power states
+ * @hw: pointer to hardware structure
+ *
+ * Configures Low Power Link Up on transition to low power states
+ * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
+ * X557 PHY immediately prior to entering LPLU.
+ **/
+s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
+{
+ u16 an_10g_cntl_reg, autoneg_reg, speed;
+ s32 status;
+ ixgbe_link_speed lcd_speed;
+ u32 save_autoneg;
+ bool link_up;
+
+ /* SW LPLU not required on later HW revisions. */
+ if (IXGBE_FUSES0_REV1 & IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)))
+ return IXGBE_SUCCESS;
+
+ /* If blocked by MNG FW, then don't restart AN */
+ if (ixgbe_check_reset_blocked(hw))
+ return IXGBE_SUCCESS;
+
+ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ status = ixgbe_read_eeprom(hw, NVM_INIT_CTRL_3, &hw->eeprom.ctrl_word_3);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* If link is down, LPLU disabled in NVM, WoL disabled, or manageability
+ * disabled, then force link down by entering low power mode.
+ */
+ if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) ||
+ !(hw->wol_enabled || ixgbe_mng_present(hw)))
+ return ixgbe_set_copper_phy_power(hw, FALSE);
+
+ /* Determine LCD */
+ status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* If no valid LCD link speed, then force link down and exit. */
+ if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN)
+ return ixgbe_set_copper_phy_power(hw, FALSE);
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &speed);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* If no link now, speed is invalid so take link down */
+ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
+ if (status != IXGBE_SUCCESS)
+ return ixgbe_set_copper_phy_power(hw, false);
+
+ /* clear everything but the speed bits */
+ speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK;
+
+ /* If current speed is already LCD, then exit. */
+ if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) &&
+ (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) ||
+ ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) &&
+ (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL)))
+ return status;
+
+ /* Clear AN completed indication */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &an_10g_cntl_reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ save_autoneg = hw->phy.autoneg_advertised;
+
+ /* Setup link at least common link speed */
+ status = hw->mac.ops.setup_link(hw, lcd_speed, false);
+
+ /* restore autoneg from before setting lplu speed */
+ hw->phy.autoneg_advertised = save_autoneg;
+
+ return status;
+}
+
+/**
+ * ixgbe_get_lcd_x550em - Determine lowest common denominator
+ * @hw: pointer to hardware structure
+ * @lcd_speed: pointer to lowest common link speed
+ *
+ * Determine lowest common link speed with link partner.
+ **/
+s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed)
+{
+ u16 an_lp_status;
+ s32 status;
+ u16 word = hw->eeprom.ctrl_word_3;
+
+ *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN;
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &an_lp_status);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* If link partner advertised 1G, return 1G */
+ if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) {
+ *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL;
+ return status;
+ }
+
+ /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */
+ if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) ||
+ (word & NVM_INIT_CTRL_3_D10GMP_PORT0))
+ return status;
+
+ /* Link partner not capable of lower speeds, return 10G */
+ *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL;
+ return status;
+}
+
+/**
+ * ixgbe_setup_fc_X550em - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Called at init time to set up flow control.
+ **/
+s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+ u32 pause, asm_dir, reg_val;
+
+ DEBUGFUNC("ixgbe_setup_fc_X550em");
+
+ /* Validate the requested mode */
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+ "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ /* 10gig parts do not have a word in the EEPROM to determine the
+ * default flow control setting, so we explicitly set it to full.
+ */
+ if (hw->fc.requested_mode == ixgbe_fc_default)
+ hw->fc.requested_mode = ixgbe_fc_full;
+
+ /* Determine PAUSE and ASM_DIR bits. */
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_none:
+ pause = 0;
+ asm_dir = 0;
+ break;
+ case ixgbe_fc_tx_pause:
+ pause = 0;
+ asm_dir = 1;
+ break;
+ case ixgbe_fc_rx_pause:
+ /* Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE, as such we fall
+ * through to the fc_full statement. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ case ixgbe_fc_full:
+ pause = 1;
+ asm_dir = 1;
+ break;
+ default:
+ ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
+ "Flow control param set incorrectly\n");
+ ret_val = IXGBE_ERR_CONFIG;
+ goto out;
+ }
+
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
+ ret_val = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+ reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
+ if (pause)
+ reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
+ if (asm_dir)
+ reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
+ ret_val = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* This device does not fully support AN. */
+ hw->fc.disable_fc_autoneg = true;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_set_mux - Set mux for port 1 access with CS4227
+ * @hw: pointer to hardware structure
+ * @state: set mux if 1, clear if 0
+ */
+STATIC void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state)
+{
+ u32 esdp;
+
+ if (!hw->bus.lan_id)
+ return;
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ if (state)
+ esdp |= IXGBE_ESDP_SDP1;
+ else
+ esdp &= ~IXGBE_ESDP_SDP1;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore and sets the I2C MUX
+ **/
+s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
+{
+ s32 status;
+
+ DEBUGFUNC("ixgbe_acquire_swfw_sync_X550em");
+
+ status = ixgbe_acquire_swfw_sync_X540(hw, mask);
+ if (status)
+ return status;
+
+ if (mask & IXGBE_GSSR_I2C_MASK)
+ ixgbe_set_mux(hw, 1);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore and sets the I2C MUX
+ **/
+void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
+{
+ DEBUGFUNC("ixgbe_release_swfw_sync_X550em");
+
+ if (mask & IXGBE_GSSR_I2C_MASK)
+ ixgbe_set_mux(hw, 0);
+
+ ixgbe_release_swfw_sync_X540(hw, mask);
+}
+
+/**
+ * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt
+ * @hw: pointer to hardware structure
+ *
+ * Handle external Base T PHY interrupt. If high temperature
+ * failure alarm then return error, else if link status change
+ * then setup internal/external PHY link
+ *
+ * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
+ * failure alarm, else return PHY access status.
+ */
+s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
+{
+ bool lsc;
+ u32 status;
+
+ status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if (lsc)
+ return ixgbe_setup_internal_phy(hw);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Setup internal/external PHY link speed based on link speed, then set
+ * external PHY auto advertised link speed.
+ *
+ * Returns error status for any failure
+ **/
+s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ s32 status;
+ ixgbe_link_speed force_speed;
+
+ DEBUGFUNC("ixgbe_setup_mac_link_t_X550em");
+
+ /* Setup internal/external PHY link speed to iXFI (10G), unless
+ * only 1G is auto advertised then setup KX link.
+ */
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ force_speed = IXGBE_LINK_SPEED_10GB_FULL;
+ else
+ force_speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+ /* If internal link mode is XFI, then setup XFI internal link. */
+ if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
+ status = ixgbe_setup_ixfi_x550em(hw, &force_speed);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+ }
+
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete);
+}
+
+/**
+ * ixgbe_check_link_t_X550em - Determine link and speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true when link is up
+ * @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ * Check that both the MAC and X557 external PHY have link.
+ **/
+s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete)
+{
+ u32 status;
+ u16 autoneg_status;
+
+ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
+ return IXGBE_ERR_CONFIG;
+
+ status = ixgbe_check_mac_link_generic(hw, speed, link_up,
+ link_up_wait_to_complete);
+
+ /* If check link fails or MAC link is not up, then return */
+ if (status != IXGBE_SUCCESS || !(*link_up))
+ return status;
+
+ /* MAC link is up, so check external PHY link.
+ * Read this twice back to back to indicate current status.
+ */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_status);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_status);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* If external PHY link is not up, then indicate link not up */
+ if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
+ *link_up = false;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ status = ixgbe_reset_phy_generic(hw);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Configure Link Status Alarm and Temperature Threshold interrupts */
+ return ixgbe_enable_lasi_ext_t_x550em(hw);
+}
+
+/**
+ * ixgbe_led_on_t_X550em - Turns on the software controllable LEDs.
+ * @hw: pointer to hardware structure
+ * @led_idx: led number to turn on
+ **/
+s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
+{
+ u16 phy_data;
+
+ DEBUGFUNC("ixgbe_led_on_t_X550em");
+
+ if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
+ return IXGBE_ERR_PARAM;
+
+ /* To turn on the LED, set mode to ON. */
+ ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
+ phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK;
+ ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_led_off_t_X550em - Turns off the software controllable LEDs.
+ * @hw: pointer to hardware structure
+ * @led_idx: led number to turn off
+ **/
+s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
+{
+ u16 phy_data;
+
+ DEBUGFUNC("ixgbe_led_off_t_X550em");
+
+ if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
+ return IXGBE_ERR_PARAM;
+
+ /* To turn on the LED, set mode to ON. */
+ ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
+ phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK;
+ ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
+
+ return IXGBE_SUCCESS;
+}
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_x550.h b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_x550.h
index e8de1343..4f986e87 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_x550.h
+++ b/src/dpdk22/drivers/net/ixgbe/base/ixgbe_x550.h
@@ -1,6 +1,6 @@
/*******************************************************************************
-Copyright (c) 2001-2014, Intel Corporation
+Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -40,11 +40,12 @@ s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw);
s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw);
s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw);
+s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw);
s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw);
s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw);
s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw);
-s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
- u16 *checksum_val);
+s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size);
+s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val);
s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw);
s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
u16 offset, u16 words, u16 *data);
@@ -80,9 +81,28 @@ void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw);
s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw);
s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw);
s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw);
-s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw);
+s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw);
+s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw);
+s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw);
s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw);
u32 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw);
void ixgbe_disable_rx_x550(struct ixgbe_hw *hw);
+s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed);
+s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw);
+s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask);
+void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask);
+s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw);
+s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw);
+s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete);
+s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw);
+s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw);
+s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx);
+s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx);
#endif /* _IXGBE_X550_H_ */
-
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_bypass.h b/src/dpdk22/drivers/net/ixgbe/ixgbe_bypass.h
index fcd97743..fcd97743 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_bypass.h
+++ b/src/dpdk22/drivers/net/ixgbe/ixgbe_bypass.h
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_bypass_api.h b/src/dpdk22/drivers/net/ixgbe/ixgbe_bypass_api.h
index b4a73864..b4a73864 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_bypass_api.h
+++ b/src/dpdk22/drivers/net/ixgbe/ixgbe_bypass_api.h
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_bypass_defines.h b/src/dpdk22/drivers/net/ixgbe/ixgbe_bypass_defines.h
index 22570acf..22570acf 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_bypass_defines.h
+++ b/src/dpdk22/drivers/net/ixgbe/ixgbe_bypass_defines.h
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_ethdev.c b/src/dpdk22/drivers/net/ixgbe/ixgbe_ethdev.c
index 3fc37384..4c4c6dfb 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_ethdev.c
+++ b/src/dpdk22/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -52,7 +52,6 @@
#include <rte_branch_prediction.h>
#include <rte_memory.h>
#include <rte_memzone.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_alarm.h>
#include <rte_ether.h>
@@ -63,12 +62,15 @@
#include <rte_dev.h>
#include "ixgbe_logs.h"
-#include "ixgbe/ixgbe_api.h"
-#include "ixgbe/ixgbe_vf.h"
-#include "ixgbe/ixgbe_common.h"
+#include "base/ixgbe_api.h"
+#include "base/ixgbe_vf.h"
+#include "base/ixgbe_common.h"
#include "ixgbe_ethdev.h"
#include "ixgbe_bypass.h"
#include "ixgbe_rxtx.h"
+#include "base/ixgbe_type.h"
+#include "base/ixgbe_phy.h"
+#include "ixgbe_regs.h"
/*
* High threshold controlling when to start sending XOFF frames. Must be at
@@ -83,6 +85,9 @@
*/
#define IXGBE_FC_LO 0x40
+/* Default minimum inter-interrupt interval for EITR configuration */
+#define IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT 0x79E
+
/* Timer value included in XOFF frames. */
#define IXGBE_FC_PAUSE 0x680
@@ -92,6 +97,7 @@
#define IXGBE_MMW_SIZE_DEFAULT 0x4
#define IXGBE_MMW_SIZE_JUMBO_FRAME 0x14
+#define IXGBE_MAX_RING_DESC 4096 /* replicate define from rxtx */
/*
* Default values for RX/TX configuration
@@ -117,8 +123,23 @@
#define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
-static int eth_ixgbe_dev_init(struct eth_driver *eth_drv,
- struct rte_eth_dev *eth_dev);
+#define IXGBE_HKEY_MAX_INDEX 10
+
+/* Additional timesync values. */
+#define NSEC_PER_SEC 1000000000L
+#define IXGBE_INCVAL_10GB 0x66666666
+#define IXGBE_INCVAL_1GB 0x40000000
+#define IXGBE_INCVAL_100 0x50000000
+#define IXGBE_INCVAL_SHIFT_10GB 28
+#define IXGBE_INCVAL_SHIFT_1GB 24
+#define IXGBE_INCVAL_SHIFT_100 21
+#define IXGBE_INCVAL_SHIFT_82599 7
+#define IXGBE_INCPER_SHIFT_82599 24
+
+#define IXGBE_CYCLECOUNTER_MASK 0xffffffffffffffffULL
+
+static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
+static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
static int ixgbe_dev_configure(struct rte_eth_dev *dev);
static int ixgbe_dev_start(struct rte_eth_dev *dev);
static void ixgbe_dev_stop(struct rte_eth_dev *dev);
@@ -133,7 +154,12 @@ static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
+static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstats *xstats, unsigned n);
+static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstats *xstats, unsigned n);
static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
+static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
uint16_t queue_id,
uint8_t stat_idx,
@@ -173,6 +199,7 @@ static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
uint16_t reta_size);
static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
+static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
@@ -181,16 +208,19 @@ static void ixgbe_dev_interrupt_delayed_handler(void *param);
static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, uint32_t pool);
static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
+static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr);
static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config);
/* For Virtual Function support */
-static int eth_ixgbevf_dev_init(struct eth_driver *eth_drv,
- struct rte_eth_dev *eth_dev);
+static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
+static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
static int ixgbevf_dev_configure(struct rte_eth_dev *dev);
static int ixgbevf_dev_start(struct rte_eth_dev *dev);
static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
static void ixgbevf_dev_close(struct rte_eth_dev *dev);
static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
+static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
@@ -200,6 +230,13 @@ static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
uint16_t queue, int on);
static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
+static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
+ uint8_t queue, uint8_t msix_vector);
+static void ixgbevf_configure_msix(struct rte_eth_dev *dev);
/* For Eth VMDQ APIs support */
static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
@@ -212,10 +249,17 @@ static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
uint64_t pool_mask,uint8_t vlan_on);
static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
- struct rte_eth_vmdq_mirror_conf *mirror_conf,
+ struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id, uint8_t on);
static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
uint8_t rule_id);
+static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
+ uint8_t queue, uint8_t msix_vector);
+static void ixgbe_configure_msix(struct rte_eth_dev *dev);
static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
uint16_t queue_idx, uint16_t tx_rate);
@@ -226,33 +270,81 @@ static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
uint32_t index, uint32_t pool);
static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
-static int ixgbe_add_syn_filter(struct rte_eth_dev *dev,
- struct rte_syn_filter *filter, uint16_t rx_queue);
-static int ixgbe_remove_syn_filter(struct rte_eth_dev *dev);
-static int ixgbe_get_syn_filter(struct rte_eth_dev *dev,
- struct rte_syn_filter *filter, uint16_t *rx_queue);
-static int ixgbe_add_ethertype_filter(struct rte_eth_dev *dev, uint16_t index,
- struct rte_ethertype_filter *filter, uint16_t rx_queue);
-static int ixgbe_remove_ethertype_filter(struct rte_eth_dev *dev,
- uint16_t index);
-static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev, uint16_t index,
- struct rte_ethertype_filter *filter, uint16_t *rx_queue);
-static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
- struct rte_5tuple_filter *filter, uint16_t rx_queue);
-static int ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
- uint16_t index);
-static int ixgbe_get_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
- struct rte_5tuple_filter *filter, uint16_t *rx_queue);
-
+static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr);
+static int ixgbe_syn_filter_set(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter,
+ bool add);
+static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter);
+static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg);
+static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
+ struct ixgbe_5tuple_filter *filter);
+static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
+ struct ixgbe_5tuple_filter *filter);
+static int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *filter,
+ bool add);
+static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg);
+static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *filter);
+static int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter,
+ bool add);
+static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg);
+static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter);
+static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg);
static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
+static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr);
+static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
+ struct rte_eth_dcb_info *dcb_info);
+
+static int ixgbe_get_reg_length(struct rte_eth_dev *dev);
+static int ixgbe_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs);
+static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev);
+static int ixgbe_get_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *eeprom);
+static int ixgbe_set_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *eeprom);
+
+static int ixgbevf_get_reg_length(struct rte_eth_dev *dev);
+static int ixgbevf_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs);
+
+static int ixgbe_timesync_enable(struct rte_eth_dev *dev);
+static int ixgbe_timesync_disable(struct rte_eth_dev *dev);
+static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp,
+ uint32_t flags);
+static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp);
+static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
+static int ixgbe_timesync_read_time(struct rte_eth_dev *dev,
+ struct timespec *timestamp);
+static int ixgbe_timesync_write_time(struct rte_eth_dev *dev,
+ const struct timespec *timestamp);
+
/*
* Define VF Stats MACRO for Non "cleared on read" register
*/
-#define UPDATE_VF_STAT(reg, last, cur) \
+#define UPDATE_VF_STAT(reg, last, cur) \
{ \
- u32 latest = IXGBE_READ_REG(hw, reg); \
- cur += latest - last; \
+ uint32_t latest = IXGBE_READ_REG(hw, reg); \
+ cur += (latest - last) & UINT_MAX; \
last = latest; \
}
@@ -286,7 +378,7 @@ static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
/*
* The set of PCI devices this driver supports
*/
-static struct rte_pci_id pci_id_ixgbe_map[] = {
+static const struct rte_pci_id pci_id_ixgbe_map[] = {
#define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
#include "rte_pci_dev_ids.h"
@@ -298,7 +390,7 @@ static struct rte_pci_id pci_id_ixgbe_map[] = {
/*
* The set of PCI devices this driver supports (for 82599 VF)
*/
-static struct rte_pci_id pci_id_ixgbevf_map[] = {
+static const struct rte_pci_id pci_id_ixgbevf_map[] = {
#define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
#include "rte_pci_dev_ids.h"
@@ -306,7 +398,19 @@ static struct rte_pci_id pci_id_ixgbevf_map[] = {
};
-static struct eth_dev_ops ixgbe_eth_dev_ops = {
+static const struct rte_eth_desc_lim rx_desc_lim = {
+ .nb_max = IXGBE_MAX_RING_DESC,
+ .nb_min = IXGBE_MIN_RING_DESC,
+ .nb_align = IXGBE_RXD_ALIGN,
+};
+
+static const struct rte_eth_desc_lim tx_desc_lim = {
+ .nb_max = IXGBE_MAX_RING_DESC,
+ .nb_min = IXGBE_MIN_RING_DESC,
+ .nb_align = IXGBE_TXD_ALIGN,
+};
+
+static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.dev_configure = ixgbe_dev_configure,
.dev_start = ixgbe_dev_start,
.dev_stop = ixgbe_dev_stop,
@@ -319,7 +423,9 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
.allmulticast_disable = ixgbe_dev_allmulticast_disable,
.link_update = ixgbe_dev_link_update,
.stats_get = ixgbe_dev_stats_get,
+ .xstats_get = ixgbe_dev_xstats_get,
.stats_reset = ixgbe_dev_stats_reset,
+ .xstats_reset = ixgbe_dev_xstats_reset,
.queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
.dev_infos_get = ixgbe_dev_info_get,
.mtu_set = ixgbe_dev_mtu_set,
@@ -332,6 +438,8 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
.tx_queue_start = ixgbe_dev_tx_queue_start,
.tx_queue_stop = ixgbe_dev_tx_queue_stop,
.rx_queue_setup = ixgbe_dev_rx_queue_setup,
+ .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable,
.rx_queue_release = ixgbe_dev_rx_queue_release,
.rx_queue_count = ixgbe_dev_rx_queue_count,
.rx_descriptor_done = ixgbe_dev_rx_descriptor_done,
@@ -344,6 +452,7 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
.priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
.mac_addr_add = ixgbe_add_rar,
.mac_addr_remove = ixgbe_remove_rar,
+ .mac_addr_set = ixgbe_set_default_mac_addr,
.uc_hash_table_set = ixgbe_uc_hash_table_set,
.uc_all_hash_table_set = ixgbe_uc_all_hash_table_set,
.mirror_rule_set = ixgbe_mirror_rule_set,
@@ -354,14 +463,6 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
.set_vf_vlan_filter = ixgbe_set_pool_vlan_filter,
.set_queue_rate_limit = ixgbe_set_queue_rate_limit,
.set_vf_rate_limit = ixgbe_set_vf_rate_limit,
- .fdir_add_signature_filter = ixgbe_fdir_add_signature_filter,
- .fdir_update_signature_filter = ixgbe_fdir_update_signature_filter,
- .fdir_remove_signature_filter = ixgbe_fdir_remove_signature_filter,
- .fdir_infos_get = ixgbe_fdir_info_get,
- .fdir_add_perfect_filter = ixgbe_fdir_add_perfect_filter,
- .fdir_update_perfect_filter = ixgbe_fdir_update_perfect_filter,
- .fdir_remove_perfect_filter = ixgbe_fdir_remove_perfect_filter,
- .fdir_set_masks = ixgbe_fdir_set_masks,
.reta_update = ixgbe_dev_rss_reta_update,
.reta_query = ixgbe_dev_rss_reta_query,
#ifdef RTE_NIC_BYPASS
@@ -377,29 +478,38 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
#endif /* RTE_NIC_BYPASS */
.rss_hash_update = ixgbe_dev_rss_hash_update,
.rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get,
- .add_syn_filter = ixgbe_add_syn_filter,
- .remove_syn_filter = ixgbe_remove_syn_filter,
- .get_syn_filter = ixgbe_get_syn_filter,
- .add_ethertype_filter = ixgbe_add_ethertype_filter,
- .remove_ethertype_filter = ixgbe_remove_ethertype_filter,
- .get_ethertype_filter = ixgbe_get_ethertype_filter,
- .add_5tuple_filter = ixgbe_add_5tuple_filter,
- .remove_5tuple_filter = ixgbe_remove_5tuple_filter,
- .get_5tuple_filter = ixgbe_get_5tuple_filter,
+ .filter_ctrl = ixgbe_dev_filter_ctrl,
+ .set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
+ .rxq_info_get = ixgbe_rxq_info_get,
+ .txq_info_get = ixgbe_txq_info_get,
+ .timesync_enable = ixgbe_timesync_enable,
+ .timesync_disable = ixgbe_timesync_disable,
+ .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
+ .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp,
+ .get_reg_length = ixgbe_get_reg_length,
+ .get_reg = ixgbe_get_regs,
+ .get_eeprom_length = ixgbe_get_eeprom_length,
+ .get_eeprom = ixgbe_get_eeprom,
+ .set_eeprom = ixgbe_set_eeprom,
+ .get_dcb_info = ixgbe_dev_get_dcb_info,
+ .timesync_adjust_time = ixgbe_timesync_adjust_time,
+ .timesync_read_time = ixgbe_timesync_read_time,
+ .timesync_write_time = ixgbe_timesync_write_time,
};
/*
* dev_ops for virtual function, bare necessities for basic vf
* operation have been implemented
*/
-static struct eth_dev_ops ixgbevf_eth_dev_ops = {
-
+static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
.dev_configure = ixgbevf_dev_configure,
.dev_start = ixgbevf_dev_start,
.dev_stop = ixgbevf_dev_stop,
.link_update = ixgbe_dev_link_update,
.stats_get = ixgbevf_dev_stats_get,
+ .xstats_get = ixgbevf_dev_xstats_get,
.stats_reset = ixgbevf_dev_stats_reset,
+ .xstats_reset = ixgbevf_dev_stats_reset,
.dev_close = ixgbevf_dev_close,
.dev_infos_get = ixgbevf_dev_info_get,
.mtu_set = ixgbevf_dev_set_mtu,
@@ -408,12 +518,143 @@ static struct eth_dev_ops ixgbevf_eth_dev_ops = {
.vlan_offload_set = ixgbevf_vlan_offload_set,
.rx_queue_setup = ixgbe_dev_rx_queue_setup,
.rx_queue_release = ixgbe_dev_rx_queue_release,
+ .rx_descriptor_done = ixgbe_dev_rx_descriptor_done,
.tx_queue_setup = ixgbe_dev_tx_queue_setup,
.tx_queue_release = ixgbe_dev_tx_queue_release,
+ .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable,
.mac_addr_add = ixgbevf_add_mac_addr,
.mac_addr_remove = ixgbevf_remove_mac_addr,
+ .set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
+ .rxq_info_get = ixgbe_rxq_info_get,
+ .txq_info_get = ixgbe_txq_info_get,
+ .mac_addr_set = ixgbevf_set_default_mac_addr,
+ .get_reg_length = ixgbevf_get_reg_length,
+ .get_reg = ixgbevf_get_regs,
+ .reta_update = ixgbe_dev_rss_reta_update,
+ .reta_query = ixgbe_dev_rss_reta_query,
+ .rss_hash_update = ixgbe_dev_rss_hash_update,
+ .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get,
+};
+
+/* store statistics names and its offset in stats structure */
+struct rte_ixgbe_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned offset;
+};
+
+static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
+ {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)},
+ {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)},
+ {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)},
+ {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)},
+ {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)},
+ {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)},
+ {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)},
+ {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)},
+ {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)},
+ {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)},
+ {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)},
+ {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)},
+ {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)},
+ {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)},
+ {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
+ prc1023)},
+ {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
+ prc1522)},
+ {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)},
+ {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)},
+ {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)},
+ {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)},
+ {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)},
+ {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)},
+ {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)},
+ {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)},
+ {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)},
+ {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)},
+ {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)},
+ {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)},
+ {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)},
+ {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)},
+ {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)},
+ {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)},
+ {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
+ ptc1023)},
+ {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
+ ptc1522)},
+ {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)},
+ {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)},
+ {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)},
+ {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)},
+
+ {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats,
+ fdirustat_add)},
+ {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats,
+ fdirustat_remove)},
+ {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats,
+ fdirfstat_fadd)},
+ {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats,
+ fdirfstat_fremove)},
+ {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats,
+ fdirmatch)},
+ {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats,
+ fdirmiss)},
+
+ {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)},
+ {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)},
+ {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats,
+ fclast)},
+ {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)},
+ {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)},
+ {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)},
+ {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)},
+ {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats,
+ fcoe_noddp)},
+ {"rx_fcoe_no_direct_data_placement_ext_buff",
+ offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)},
+
+ {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
+ lxontxc)},
+ {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
+ lxonrxc)},
+ {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
+ lxofftxc)},
+ {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
+ lxoffrxc)},
+ {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)},
+};
+
+#define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
+ sizeof(rte_ixgbe_stats_strings[0]))
+
+/* Per-queue statistics */
+static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
+ {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)},
+ {"dropped", offsetof(struct ixgbe_hw_stats, mpc)},
+ {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)},
+ {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)},
+};
+
+#define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \
+ sizeof(rte_ixgbe_rxq_strings[0]))
+
+static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = {
+ {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)},
+ {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)},
+ {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats,
+ pxon2offc)},
};
+#define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \
+ sizeof(rte_ixgbe_txq_strings[0]))
+
+static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
+ {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)},
+};
+
+#define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \
+ sizeof(rte_ixgbevf_stats_strings[0]))
+
/**
* Atomically reads the link status information from global
* structure rte_eth_dev.
@@ -467,7 +708,7 @@ rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
}
/*
- * This function is the same as ixgbe_is_sfp() in ixgbe/ixgbe.h.
+ * This function is the same as ixgbe_is_sfp() in base/ixgbe.h.
*/
static inline int
ixgbe_is_sfp(struct ixgbe_hw *hw)
@@ -515,7 +756,7 @@ ixgbe_enable_intr(struct rte_eth_dev *dev)
}
/*
- * This function is based on ixgbe_disable_intr() in ixgbe/ixgbe.h.
+ * This function is based on ixgbe_disable_intr() in base/ixgbe.h.
*/
static void
ixgbe_disable_intr(struct ixgbe_hw *hw)
@@ -574,7 +815,7 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
(hw->mac.type != ixgbe_mac_X550EM_x))
return -ENOSYS;
- PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d",
+ PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
(int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
queue_id, stat_idx);
@@ -600,20 +841,20 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
else
stat_mappings->rqsmr[n] |= qsmr_mask;
- PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d",
+ PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
(int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
queue_id, stat_idx);
- PMD_INIT_LOG(INFO, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
+ PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
/* Now write the mapping in the appropriate register */
if (is_rx) {
- PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d",
+ PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d",
stat_mappings->rqsmr[n], n);
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
}
else {
- PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d",
+ PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d",
stat_mappings->tqsm[n], n);
IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
}
@@ -716,12 +957,11 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
}
/*
- * This function is based on code in ixgbe_attach() in ixgbe/ixgbe.c.
+ * This function is based on code in ixgbe_attach() in base/ixgbe.c.
* It returns 0 on success.
*/
static int
-eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
- struct rte_eth_dev *eth_dev)
+eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
{
struct rte_pci_device *pci_dev;
struct ixgbe_hw *hw =
@@ -732,6 +972,8 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
struct ixgbe_dcb_config *dcb_config =
IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
uint32_t ctrl_ext;
uint16_t csum;
int diag, i;
@@ -748,24 +990,26 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
* RX and TX function.
*/
if (rte_eal_process_type() != RTE_PROC_PRIMARY){
- struct igb_tx_queue *txq;
+ struct ixgbe_tx_queue *txq;
/* TX queue function in primary, set by last queue initialized
* Tx queue may not initialized by primary process */
if (eth_dev->data->tx_queues) {
txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
- set_tx_function(eth_dev, txq);
+ ixgbe_set_tx_function(eth_dev, txq);
} else {
/* Use default TX function if we get here */
- PMD_INIT_LOG(INFO, "No TX queues configured yet. "
- "Using default TX function.");
+ PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
+ "Using default TX function.");
}
- if (eth_dev->data->scattered_rx)
- eth_dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+ ixgbe_set_rx_function(eth_dev);
+
return 0;
}
pci_dev = eth_dev->pci_dev;
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
/* Vendor and Device ID need to be set before init of shared code */
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
@@ -845,6 +1089,9 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
return -EIO;
}
+ /* Reset the hw statistics */
+ ixgbe_dev_stats_reset(eth_dev);
+
/* disable interrupt */
ixgbe_disable_intr(hw);
@@ -904,18 +1151,64 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
eth_dev->data->port_id, pci_dev->id.vendor_id,
pci_dev->id.device_id);
- rte_intr_callback_register(&(pci_dev->intr_handle),
- ixgbe_dev_interrupt_handler, (void *)eth_dev);
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ ixgbe_dev_interrupt_handler,
+ (void *)eth_dev);
- /* enable uio intr after callback register */
- rte_intr_enable(&(pci_dev->intr_handle));
+ /* enable uio/vfio intr/eventfd mapping */
+ rte_intr_enable(&pci_dev->intr_handle);
/* enable support intr */
ixgbe_enable_intr(eth_dev);
+ /* initialize 5tuple filter list */
+ TAILQ_INIT(&filter_info->fivetuple_list);
+ memset(filter_info->fivetuple_mask, 0,
+ sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
+
return 0;
}
+static int
+eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev;
+ struct ixgbe_hw *hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ pci_dev = eth_dev->pci_dev;
+
+ if (hw->adapter_stopped == 0)
+ ixgbe_dev_close(eth_dev);
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ /* Unlock any pending hardware semaphore */
+ ixgbe_swfw_lock_reset(hw);
+
+ /* disable uio intr before callback unregister */
+ rte_intr_disable(&(pci_dev->intr_handle));
+ rte_intr_callback_unregister(&(pci_dev->intr_handle),
+ ixgbe_dev_interrupt_handler, (void *)eth_dev);
+
+ /* uninitialize PF if max_vfs not zero */
+ ixgbe_pf_host_uninit(eth_dev);
+
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+
+ rte_free(eth_dev->data->hash_mac_addrs);
+ eth_dev->data->hash_mac_addrs = NULL;
+
+ return 0;
+}
/*
* Negotiate mailbox API version with the PF.
@@ -962,8 +1255,7 @@ generate_random_mac_addr(struct ether_addr *mac_addr)
* Virtual Function device init
*/
static int
-eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
- struct rte_eth_dev *eth_dev)
+eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
{
int diag;
uint32_t tc, tcs;
@@ -987,12 +1279,14 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
* RX function */
if (rte_eal_process_type() != RTE_PROC_PRIMARY){
if (eth_dev->data->scattered_rx)
- eth_dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+ eth_dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
return 0;
}
pci_dev = eth_dev->pci_dev;
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
@@ -1013,6 +1307,9 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
/* init_mailbox_params */
hw->mbx.ops.init_params(hw);
+ /* Reset the hw statistics */
+ ixgbevf_dev_stats_reset(eth_dev);
+
/* Disable the interrupts for VF */
ixgbevf_intr_disable(hw);
@@ -1087,13 +1384,58 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
return 0;
}
+/* Virtual Function device uninit */
+
+static int
+eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_hw *hw;
+ unsigned i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+
+ if (hw->adapter_stopped == 0)
+ ixgbevf_dev_close(eth_dev);
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ /* Disable the interrupts for VF */
+ ixgbevf_intr_disable(hw);
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ ixgbe_dev_rx_queue_release(eth_dev->data->rx_queues[i]);
+ eth_dev->data->rx_queues[i] = NULL;
+ }
+ eth_dev->data->nb_rx_queues = 0;
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ ixgbe_dev_tx_queue_release(eth_dev->data->tx_queues[i]);
+ eth_dev->data->tx_queues[i] = NULL;
+ }
+ eth_dev->data->nb_tx_queues = 0;
+
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+
+ return 0;
+}
+
static struct eth_driver rte_ixgbe_pmd = {
- {
+ .pci_drv = {
.name = "rte_ixgbe_pmd",
.id_table = pci_id_ixgbe_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_DETACHABLE,
},
.eth_dev_init = eth_ixgbe_dev_init,
+ .eth_dev_uninit = eth_ixgbe_dev_uninit,
.dev_private_size = sizeof(struct ixgbe_adapter),
};
@@ -1101,12 +1443,13 @@ static struct eth_driver rte_ixgbe_pmd = {
* virtual function driver struct
*/
static struct eth_driver rte_ixgbevf_pmd = {
- {
+ .pci_drv = {
.name = "rte_ixgbevf_pmd",
.id_table = pci_id_ixgbevf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
},
.eth_dev_init = eth_ixgbevf_dev_init,
+ .eth_dev_uninit = eth_ixgbevf_dev_uninit,
.dev_private_size = sizeof(struct ixgbe_adapter),
};
@@ -1249,7 +1592,7 @@ ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
if (hw->mac.type == ixgbe_mac_82598EB) {
/* No queue level support */
- PMD_INIT_LOG(INFO, "82598EB not support queue level hw strip");
+ PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
return;
}
else {
@@ -1273,7 +1616,7 @@ ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
if (hw->mac.type == ixgbe_mac_82598EB) {
/* No queue level supported */
- PMD_INIT_LOG(INFO, "82598EB not support queue level hw strip");
+ PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
return;
}
else {
@@ -1425,16 +1768,196 @@ ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
}
static int
+ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
+{
+ switch (nb_rx_q) {
+ case 1:
+ case 2:
+ RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
+ break;
+ case 4:
+ RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
+ RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = dev->pci_dev->max_vfs * nb_rx_q;
+
+ return 0;
+}
+
+static int
+ixgbe_check_mq_mode(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ uint16_t nb_rx_q = dev->data->nb_rx_queues;
+ uint16_t nb_tx_q = dev->data->nb_rx_queues;
+
+ if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
+ /* check multi-queue mode */
+ switch (dev_conf->rxmode.mq_mode) {
+ case ETH_MQ_RX_VMDQ_DCB:
+ case ETH_MQ_RX_VMDQ_DCB_RSS:
+ /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
+ PMD_INIT_LOG(ERR, "SRIOV active,"
+ " unsupported mq_mode rx %d.",
+ dev_conf->rxmode.mq_mode);
+ return -EINVAL;
+ case ETH_MQ_RX_RSS:
+ case ETH_MQ_RX_VMDQ_RSS:
+ dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+ if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
+ if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
+ PMD_INIT_LOG(ERR, "SRIOV is active,"
+ " invalid queue number"
+ " for VMDQ RSS, allowed"
+ " value are 1, 2 or 4.");
+ return -EINVAL;
+ }
+ break;
+ case ETH_MQ_RX_VMDQ_ONLY:
+ case ETH_MQ_RX_NONE:
+ /* if nothing mq mode configure, use default scheme */
+ dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
+ if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
+ RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
+ break;
+ default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
+ /* SRIOV only works in VMDq enable mode */
+ PMD_INIT_LOG(ERR, "SRIOV is active,"
+ " wrong mq_mode rx %d.",
+ dev_conf->rxmode.mq_mode);
+ return -EINVAL;
+ }
+
+ switch (dev_conf->txmode.mq_mode) {
+ case ETH_MQ_TX_VMDQ_DCB:
+ /* DCB VMDQ in SRIOV mode, not implement yet */
+ PMD_INIT_LOG(ERR, "SRIOV is active,"
+ " unsupported VMDQ mq_mode tx %d.",
+ dev_conf->txmode.mq_mode);
+ return -EINVAL;
+ default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
+ dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
+ break;
+ }
+
+ /* check valid queue number */
+ if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
+ (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
+ PMD_INIT_LOG(ERR, "SRIOV is active,"
+ " queue number must less equal to %d.",
+ RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
+ return -EINVAL;
+ }
+ } else {
+ /* check configuration for vmdb+dcb mode */
+ if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
+ const struct rte_eth_vmdq_dcb_conf *conf;
+
+ if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
+ PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
+ IXGBE_VMDQ_DCB_NB_QUEUES);
+ return -EINVAL;
+ }
+ conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
+ if (!(conf->nb_queue_pools == ETH_16_POOLS ||
+ conf->nb_queue_pools == ETH_32_POOLS)) {
+ PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
+ " nb_queue_pools must be %d or %d.",
+ ETH_16_POOLS, ETH_32_POOLS);
+ return -EINVAL;
+ }
+ }
+ if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ const struct rte_eth_vmdq_dcb_tx_conf *conf;
+
+ if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
+ PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
+ IXGBE_VMDQ_DCB_NB_QUEUES);
+ return -EINVAL;
+ }
+ conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
+ if (!(conf->nb_queue_pools == ETH_16_POOLS ||
+ conf->nb_queue_pools == ETH_32_POOLS)) {
+ PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
+ " nb_queue_pools != %d and"
+ " nb_queue_pools != %d.",
+ ETH_16_POOLS, ETH_32_POOLS);
+ return -EINVAL;
+ }
+ }
+
+ /* For DCB mode check our configuration before we go further */
+ if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
+ const struct rte_eth_dcb_rx_conf *conf;
+
+ if (nb_rx_q != IXGBE_DCB_NB_QUEUES) {
+ PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.",
+ IXGBE_DCB_NB_QUEUES);
+ return -EINVAL;
+ }
+ conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
+ if (!(conf->nb_tcs == ETH_4_TCS ||
+ conf->nb_tcs == ETH_8_TCS)) {
+ PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
+ " and nb_tcs != %d.",
+ ETH_4_TCS, ETH_8_TCS);
+ return -EINVAL;
+ }
+ }
+
+ if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+ const struct rte_eth_dcb_tx_conf *conf;
+
+ if (nb_tx_q != IXGBE_DCB_NB_QUEUES) {
+ PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.",
+ IXGBE_DCB_NB_QUEUES);
+ return -EINVAL;
+ }
+ conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
+ if (!(conf->nb_tcs == ETH_4_TCS ||
+ conf->nb_tcs == ETH_8_TCS)) {
+ PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
+ " and nb_tcs != %d.",
+ ETH_4_TCS, ETH_8_TCS);
+ return -EINVAL;
+ }
+ }
+ }
+ return 0;
+}
+
+static int
ixgbe_dev_configure(struct rte_eth_dev *dev)
{
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
+ int ret;
PMD_INIT_FUNC_TRACE();
+ /* multipe queue mode checking */
+ ret = ixgbe_check_mq_mode(dev);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.",
+ ret);
+ return ret;
+ }
/* set flag to update link status after init */
intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+ /*
+ * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
+ * allocation or vector Rx preconditions we will reset it.
+ */
+ adapter->rx_bulk_alloc_allowed = true;
+ adapter->rx_vec_allowed = true;
+
return 0;
}
@@ -1449,6 +1972,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_vf_info *vfinfo =
*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ uint32_t intr_vector = 0;
int err, link_up = 0, negotiate = 0;
uint32_t speed = 0;
int mask = 0;
@@ -1466,8 +1991,11 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
return -EINVAL;
}
+ /* disable uio/vfio intr/eventfd mapping */
+ rte_intr_disable(intr_handle);
+
/* stop adapter */
- hw->adapter_stopped = FALSE;
+ hw->adapter_stopped = 0;
ixgbe_stop_adapter(hw);
/* reinitialize adapter
@@ -1481,6 +2009,29 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
/* configure PF module if SRIOV enabled */
ixgbe_pf_host_configure(dev);
+ /* check and configure queue intr-vector mapping */
+ if ((rte_intr_cap_multiple(intr_handle) ||
+ !RTE_ETH_DEV_SRIOV(dev).active) &&
+ dev->data->dev_conf.intr_conf.rxq != 0) {
+ intr_vector = dev->data->nb_rx_queues;
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -1;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int), 0);
+ if (intr_handle->intr_vec == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+ " intr_vec\n", dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+ }
+
+ /* confiugre msix for sleep until rx interrupt */
+ ixgbe_configure_msix(dev);
+
/* initialize transmission unit */
ixgbe_dev_tx_init(dev);
@@ -1491,7 +2042,16 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
goto error;
}
- ixgbe_dev_rxtx_start(dev);
+ err = ixgbe_dev_rxtx_start(dev);
+ if (err < 0) {
+ PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
+ goto error;
+ }
+
+ /* Skip link setup if loopback mode is enabled for 82599. */
+ if (hw->mac.type == ixgbe_mac_82599EB &&
+ dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
+ goto skip_link_setup;
if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
err = hw->mac.ops.setup_sfp(hw);
@@ -1499,17 +2059,19 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
goto error;
}
- /* Turn on the laser */
- ixgbe_enable_tx_laser(hw);
-
- /* Skip link setup if loopback mode is enabled for 82599. */
- if (hw->mac.type == ixgbe_mac_82599EB &&
- dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
- goto skip_link_setup;
+ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
+ /* Turn on the copper */
+ ixgbe_set_phy_power(hw, true);
+ } else {
+ /* Turn on the laser */
+ ixgbe_enable_tx_laser(hw);
+ }
err = ixgbe_check_link(hw, &speed, &link_up, 0);
if (err)
goto error;
+ dev->data->dev_link.link_status = link_up;
+
err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
if (err)
goto error;
@@ -1546,9 +2108,26 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
skip_link_setup:
- /* check if lsc interrupt is enabled */
- if (dev->data->dev_conf.intr_conf.lsc != 0)
- ixgbe_dev_lsc_interrupt_setup(dev);
+ if (rte_intr_allow_others(intr_handle)) {
+ /* check if lsc interrupt is enabled */
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ ixgbe_dev_lsc_interrupt_setup(dev);
+ } else {
+ rte_intr_callback_unregister(intr_handle,
+ ixgbe_dev_interrupt_handler,
+ (void *)dev);
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ PMD_INIT_LOG(INFO, "lsc won't enable because of"
+ " no intr multiplex\n");
+ }
+
+ /* check if rxq interrupt is enabled */
+ if (dev->data->dev_conf.intr_conf.rxq != 0 &&
+ rte_intr_dp_is_en(intr_handle))
+ ixgbe_dev_rxq_interrupt_setup(dev);
+
+ /* enable uio/vfio intr/eventfd mapping */
+ rte_intr_enable(intr_handle);
/* resume enabled intr since hw reset */
ixgbe_enable_intr(dev);
@@ -1602,6 +2181,10 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_vf_info *vfinfo =
*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct ixgbe_5tuple_filter *p_5tuple, *p_5tuple_next;
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
int vf;
PMD_INIT_FUNC_TRACE();
@@ -1609,9 +2192,12 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
/* disable interrupts */
ixgbe_disable_intr(hw);
+ /* disable intr eventfd mapping */
+ rte_intr_disable(intr_handle);
+
/* reset the NIC */
ixgbe_pf_reset_hw(hw);
- hw->adapter_stopped = FALSE;
+ hw->adapter_stopped = 0;
/* stop adapter */
ixgbe_stop_adapter(hw);
@@ -1620,21 +2206,51 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
vf < dev->pci_dev->max_vfs; vf++)
vfinfo[vf].clear_to_send = false;
- /* Turn off the laser */
- ixgbe_disable_tx_laser(hw);
+ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
+ /* Turn off the copper */
+ ixgbe_set_phy_power(hw, false);
+ } else {
+ /* Turn off the laser */
+ ixgbe_disable_tx_laser(hw);
+ }
ixgbe_dev_clear_queues(dev);
/* Clear stored conf */
dev->data->scattered_rx = 0;
+ dev->data->lro = 0;
/* Clear recorded link status */
memset(&link, 0, sizeof(link));
rte_ixgbe_dev_atomic_write_link_status(dev, &link);
+
+ /* Remove all ntuple filters of the device */
+ for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list);
+ p_5tuple != NULL; p_5tuple = p_5tuple_next) {
+ p_5tuple_next = TAILQ_NEXT(p_5tuple, entries);
+ TAILQ_REMOVE(&filter_info->fivetuple_list,
+ p_5tuple, entries);
+ rte_free(p_5tuple);
+ }
+ memset(filter_info->fivetuple_mask, 0,
+ sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
+
+ if (!rte_intr_allow_others(intr_handle))
+ /* resume to the default handler */
+ rte_intr_callback_register(intr_handle,
+ ixgbe_dev_interrupt_handler,
+ (void *)dev);
+
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec != NULL) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
}
/*
- * Set device link up: enable tx laser.
+ * Set device link up: enable tx.
*/
static int
ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
@@ -1650,18 +2266,21 @@ ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
return -ENOTSUP;
}
#endif
+ }
+
+ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
+ /* Turn on the copper */
+ ixgbe_set_phy_power(hw, true);
+ } else {
/* Turn on the laser */
ixgbe_enable_tx_laser(hw);
- return 0;
}
- PMD_INIT_LOG(ERR, "Set link up is not supported by device id 0x%x",
- hw->device_id);
- return -ENOTSUP;
+ return 0;
}
/*
- * Set device link down: disable tx laser.
+ * Set device link down: disable tx.
*/
static int
ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
@@ -1677,14 +2296,17 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
return -ENOTSUP;
}
#endif
+ }
+
+ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
+ /* Turn off the copper */
+ ixgbe_set_phy_power(hw, false);
+ } else {
/* Turn off the laser */
ixgbe_disable_tx_laser(hw);
- return 0;
}
- PMD_INIT_LOG(ERR, "Set link down is not supported by device id 0x%x",
- hw->device_id);
- return -ENOTSUP;
+ return 0;
}
/*
@@ -1703,29 +2325,29 @@ ixgbe_dev_close(struct rte_eth_dev *dev)
ixgbe_dev_stop(dev);
hw->adapter_stopped = 1;
+ ixgbe_dev_free_queues(dev);
+
ixgbe_disable_pcie_master(hw);
/* reprogram the RAR[0] in case user changed it. */
ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
}
-/*
- * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
- */
static void
-ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+ixgbe_read_stats_registers(struct ixgbe_hw *hw,
+ struct ixgbe_hw_stats *hw_stats,
+ uint64_t *total_missed_rx, uint64_t *total_qbrc,
+ uint64_t *total_qprc, uint64_t *total_qprdc)
{
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_hw_stats *hw_stats =
- IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
uint32_t bprc, lxon, lxoff, total;
- uint64_t total_missed_rx, total_qbrc, total_qprc;
+ uint32_t delta_gprc = 0;
unsigned i;
-
- total_missed_rx = 0;
- total_qbrc = 0;
- total_qprc = 0;
+ /* Workaround for RX byte count not including CRC bytes when CRC
++ * strip is enabled. CRC bytes are removed from counters when crc_strip
+ * is disabled.
++ */
+ int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) &
+ IXGBE_HLREG0_RXCRCSTRP);
hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
@@ -1738,41 +2360,62 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
/* global total per queue */
hw_stats->mpc[i] += mp;
/* Running comprehensive total for stats display */
- total_missed_rx += hw_stats->mpc[i];
- if (hw->mac.type == ixgbe_mac_82598EB)
+ *total_missed_rx += hw_stats->mpc[i];
+ if (hw->mac.type == ixgbe_mac_82598EB) {
hw_stats->rnbc[i] +=
IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+ hw_stats->pxonrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
+ hw_stats->pxoffrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+ } else {
+ hw_stats->pxonrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
+ hw_stats->pxoffrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
+ hw_stats->pxon2offc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
+ }
hw_stats->pxontxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
- hw_stats->pxonrxc[i] +=
- IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
hw_stats->pxofftxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
- hw_stats->pxoffrxc[i] +=
- IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
- hw_stats->pxon2offc[i] +=
- IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
}
for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
- hw_stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
- hw_stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+ uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i));
+ uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+ uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+
+ delta_gprc += delta_qprc;
+
+ hw_stats->qprc[i] += delta_qprc;
+ hw_stats->qptc[i] += delta_qptc;
+
hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
hw_stats->qbrc[i] +=
((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
+ if (crc_strip == 0)
+ hw_stats->qbrc[i] -= delta_qprc * ETHER_CRC_LEN;
+
hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
hw_stats->qbtc[i] +=
((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
- hw_stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
- total_qprc += hw_stats->qprc[i];
- total_qbrc += hw_stats->qbrc[i];
+ hw_stats->qprdc[i] += delta_qprdc;
+ *total_qprdc += hw_stats->qprdc[i];
+
+ *total_qprc += hw_stats->qprc[i];
+ *total_qbrc += hw_stats->qbrc[i];
}
hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
- /* Note that gprc counts missed packets */
- hw_stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
+ /*
+ * An errata states that gprc actually counts good + missed packets:
+ * Workaround to set gprc to summated queue packet receives
+ */
+ hw_stats->gprc = *total_qprc;
if (hw->mac.type != ixgbe_mac_82598EB) {
hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
@@ -1791,6 +2434,18 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
}
+ uint64_t old_tpr = hw_stats->tpr;
+
+ hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
+ hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
+
+ if (crc_strip == 0)
+ hw_stats->gorc -= delta_gprc * ETHER_CRC_LEN;
+
+ uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC);
+ hw_stats->gptc += delta_gptc;
+ hw_stats->gotc -= delta_gptc * ETHER_CRC_LEN;
+ hw_stats->tor -= (hw_stats->tpr - old_tpr) * ETHER_CRC_LEN;
/*
* Workaround: mprc hardware is incorrectly counting
@@ -1815,7 +2470,6 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
hw_stats->lxofftxc += lxoff;
total = lxon + lxoff;
- hw_stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
hw_stats->gptc -= total;
@@ -1830,8 +2484,6 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
- hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
- hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
@@ -1850,6 +2502,32 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
}
+ /* Flow Director Stats registers */
+ hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
+ hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+}
+
+/*
+ * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
+ */
+static void
+ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_hw_stats *hw_stats =
+ IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
+ unsigned i;
+
+ total_missed_rx = 0;
+ total_qbrc = 0;
+ total_qprc = 0;
+ total_qprdc = 0;
+
+ ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
+ &total_qprc, &total_qprdc);
+
if (stats == NULL)
return;
@@ -1858,7 +2536,6 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
stats->ibytes = total_qbrc;
stats->opackets = hw_stats->gptc;
stats->obytes = hw_stats->gotc;
- stats->imcasts = hw_stats->mprc;
for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
stats->q_ipackets[i] = hw_stats->qprc[i];
@@ -1869,28 +2546,21 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
}
/* Rx Errors */
- stats->ibadcrc = hw_stats->crcerrs;
- stats->ibadlen = hw_stats->rlec + hw_stats->ruc + hw_stats->roc;
stats->imissed = total_missed_rx;
- stats->ierrors = stats->ibadcrc +
- stats->ibadlen +
- stats->imissed +
- hw_stats->illerrc + hw_stats->errbc;
+ stats->ierrors = hw_stats->crcerrs +
+ hw_stats->mspdc +
+ hw_stats->rlec +
+ hw_stats->ruc +
+ hw_stats->roc +
+ total_missed_rx +
+ hw_stats->illerrc +
+ hw_stats->errbc +
+ hw_stats->rfc +
+ hw_stats->fccrc +
+ hw_stats->fclast;
/* Tx Errors */
stats->oerrors = 0;
-
- /* XON/XOFF pause frames */
- stats->tx_pause_xon = hw_stats->lxontxc;
- stats->rx_pause_xon = hw_stats->lxonrxc;
- stats->tx_pause_xoff = hw_stats->lxofftxc;
- stats->rx_pause_xoff = hw_stats->lxoffrxc;
-
- /* Flow Director Stats registers */
- hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
- hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
- stats->fdirmatch = hw_stats->fdirmatch;
- stats->fdirmiss = hw_stats->fdirmiss;
}
static void
@@ -1906,8 +2576,99 @@ ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
memset(stats, 0, sizeof(*stats));
}
+/* This function calculates the number of xstats based on the current config */
+static unsigned
+ixgbe_xstats_calc_num(void) {
+ return IXGBE_NB_HW_STATS + (IXGBE_NB_RXQ_PRIO_STATS * 8) +
+ (IXGBE_NB_TXQ_PRIO_STATS * 8);
+}
+
+static int
+ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
+ unsigned n)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_hw_stats *hw_stats =
+ IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
+ unsigned i, stat, count = 0;
+
+ count = ixgbe_xstats_calc_num();
+
+ if (n < count)
+ return count;
+
+ total_missed_rx = 0;
+ total_qbrc = 0;
+ total_qprc = 0;
+ total_qprdc = 0;
+
+ ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
+ &total_qprc, &total_qprdc);
+
+ /* If this is a reset xstats is NULL, and we have cleared the
+ * registers by reading them.
+ */
+ if (!xstats)
+ return 0;
+
+ /* Extended stats from ixgbe_hw_stats */
+ count = 0;
+ for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
+ snprintf(xstats[count].name, sizeof(xstats[count].name), "%s",
+ rte_ixgbe_stats_strings[i].name);
+ xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
+ rte_ixgbe_stats_strings[i].offset);
+ count++;
+ }
+
+ /* RX Priority Stats */
+ for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
+ for (i = 0; i < 8; i++) {
+ snprintf(xstats[count].name, sizeof(xstats[count].name),
+ "rx_priority%u_%s", i,
+ rte_ixgbe_rxq_strings[stat].name);
+ xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
+ rte_ixgbe_rxq_strings[stat].offset +
+ (sizeof(uint64_t) * i));
+ count++;
+ }
+ }
+
+ /* TX Priority Stats */
+ for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
+ for (i = 0; i < 8; i++) {
+ snprintf(xstats[count].name, sizeof(xstats[count].name),
+ "tx_priority%u_%s", i,
+ rte_ixgbe_txq_strings[stat].name);
+ xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
+ rte_ixgbe_txq_strings[stat].offset +
+ (sizeof(uint64_t) * i));
+ count++;
+ }
+ }
+
+ return count;
+}
+
+static void
+ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw_stats *stats =
+ IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ unsigned count = ixgbe_xstats_calc_num();
+
+ /* HW registers are cleared on read */
+ ixgbe_dev_xstats_get(dev, NULL, count);
+
+ /* Reset software totals */
+ memset(stats, 0, sizeof(*stats));
+}
+
static void
-ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+ixgbevf_update_stats(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
@@ -1932,16 +2693,52 @@ ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
/* Rx Multicst Packet */
UPDATE_VF_STAT(IXGBE_VFMPRC,
hw_stats->last_vfmprc, hw_stats->vfmprc);
+}
+
+static int
+ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
+ unsigned n)
+{
+ struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
+ IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ unsigned i;
+
+ if (n < IXGBEVF_NB_XSTATS)
+ return IXGBEVF_NB_XSTATS;
+
+ ixgbevf_update_stats(dev);
+
+ if (!xstats)
+ return 0;
+
+ /* Extended stats */
+ for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
+ snprintf(xstats[i].name, sizeof(xstats[i].name),
+ "%s", rte_ixgbevf_stats_strings[i].name);
+ xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
+ rte_ixgbevf_stats_strings[i].offset);
+ }
+
+ return IXGBEVF_NB_XSTATS;
+}
+
+static void
+ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
+ IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ ixgbevf_update_stats(dev);
if (stats == NULL)
return;
- memset(stats, 0, sizeof(*stats));
stats->ipackets = hw_stats->vfgprc;
stats->ibytes = hw_stats->vfgorc;
stats->opackets = hw_stats->vfgptc;
stats->obytes = hw_stats->vfgotc;
stats->imcasts = hw_stats->vfmprc;
+ /* stats->imcasts should be removed as imcasts is deprecated */
}
static void
@@ -1984,6 +2781,16 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM;
+
+ /*
+ * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
+ * mode.
+ */
+ if ((hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540) &&
+ !RTE_ETH_DEV_SRIOV(dev).active)
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
+
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
@@ -2013,7 +2820,13 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
ETH_TXQ_FLAGS_NOOFFLOADS,
};
- dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+
+ dev_info->rx_desc_lim = rx_desc_lim;
+ dev_info->tx_desc_lim = tx_desc_lim;
+
+ dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
+ dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
+ dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
}
static void
@@ -2041,7 +2854,8 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM;
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
@@ -2064,6 +2878,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
ETH_TXQ_FLAGS_NOOFFLOADS,
};
+
+ dev_info->rx_desc_lim = rx_desc_lim;
+ dev_info->tx_desc_lim = tx_desc_lim;
}
/* return 0 means link status changed, -1 means not changed */
@@ -2082,11 +2899,14 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
memset(&old, 0, sizeof(old));
rte_ixgbe_dev_atomic_read_link_status(dev, &old);
+ hw->mac.get_link_status = true;
+
/* check if it needs to wait to complete, if lsc interrupt is enabled */
if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
diag = ixgbe_check_link(hw, &link_speed, &link_up, 0);
else
diag = ixgbe_check_link(hw, &link_speed, &link_up, 1);
+
if (diag != 0) {
link.link_speed = ETH_LINK_SPEED_100;
link.link_duplex = ETH_LINK_HALF_DUPLEX;
@@ -2096,12 +2916,6 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
return 0;
}
- if (link_speed == IXGBE_LINK_SPEED_UNKNOWN &&
- !hw->mac.get_link_status) {
- memcpy(&link, &old, sizeof(link));
- return -1;
- }
-
if (link_up == 0) {
rte_ixgbe_dev_atomic_write_link_status(dev, &link);
if (link.link_status == old.link_status)
@@ -2212,6 +3026,28 @@ ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev)
return 0;
}
+/**
+ * It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
+{
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ intr->mask |= IXGBE_EICR_RTX_QUEUE;
+
+ return 0;
+}
+
/*
* It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
*
@@ -2235,13 +3071,13 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
/* read-on-clear nic registers here */
eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
- PMD_DRV_LOG(INFO, "eicr %x", eicr);
+ PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
intr->flags = 0;
- if (eicr & IXGBE_EICR_LSC) {
- /* set flag for async link update */
+
+ /* set flag for async link update */
+ if (eicr & IXGBE_EICR_LSC)
intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
- }
if (eicr & IXGBE_EICR_MAILBOX)
intr->flags |= IXGBE_FLAG_MAILBOX;
@@ -2276,7 +3112,7 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
PMD_INIT_LOG(INFO, " Port %d: Link Down",
(int)(dev->data->port_id));
}
- PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
+ PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d",
dev->pci_dev->addr.domain,
dev->pci_dev->addr.bus,
dev->pci_dev->addr.devid,
@@ -2398,9 +3234,10 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
*/
static void
ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
- void *param)
+ void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+
ixgbe_dev_interrupt_get_status(dev);
ixgbe_dev_interrupt_action(dev);
}
@@ -2490,8 +3327,6 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (fc_conf->autoneg != !hw->fc.disable_fc_autoneg)
- return -ENOTSUP;
rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
@@ -2512,6 +3347,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
hw->fc.high_water[0] = fc_conf->high_water;
hw->fc.low_water[0] = fc_conf->low_water;
hw->fc.send_xon = fc_conf->send_xon;
+ hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
err = ixgbe_fc_enable(hw);
@@ -2753,12 +3589,22 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
uint32_t reta, r;
uint16_t idx, shift;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t sp_reta_size;
+ uint32_t reta_reg;
PMD_INIT_FUNC_TRACE();
- if (reta_size != ETH_RSS_RETA_SIZE_128) {
+
+ if (!ixgbe_rss_update_sp(hw->mac.type)) {
+ PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
+ "NIC.");
+ return -ENOTSUP;
+ }
+
+ sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
+ if (reta_size != sp_reta_size) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+ "(%d)\n", reta_size, sp_reta_size);
return -EINVAL;
}
@@ -2769,10 +3615,11 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
IXGBE_4_BIT_MASK);
if (!mask)
continue;
+ reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
if (mask == IXGBE_4_BIT_MASK)
r = 0;
else
- r = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2));
+ r = IXGBE_READ_REG(hw, reta_reg);
for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
if (mask & (0x1 << j))
reta |= reta_conf[idx].reta[shift + j] <<
@@ -2781,7 +3628,7 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
reta |= r & (IXGBE_8_BIT_MASK <<
(CHAR_BIT * j));
}
- IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
+ IXGBE_WRITE_REG(hw, reta_reg, reta);
}
return 0;
@@ -2796,16 +3643,19 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
uint32_t reta;
uint16_t idx, shift;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t sp_reta_size;
+ uint32_t reta_reg;
PMD_INIT_FUNC_TRACE();
- if (reta_size != ETH_RSS_RETA_SIZE_128) {
+ sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
+ if (reta_size != sp_reta_size) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+ "(%d)\n", reta_size, sp_reta_size);
return -EINVAL;
}
- for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IXGBE_4_BIT_WIDTH) {
+ for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
idx = i / RTE_RETA_GROUP_SIZE;
shift = i % RTE_RETA_GROUP_SIZE;
mask = (uint8_t)((reta_conf[idx].mask >> shift) &
@@ -2813,7 +3663,8 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
if (!mask)
continue;
- reta = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2));
+ reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
+ reta = IXGBE_READ_REG(hw, reta_reg);
for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
if (mask & (0x1 << j))
reta_conf[idx].reta[shift + j] =
@@ -2843,6 +3694,14 @@ ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
ixgbe_clear_rar(hw, index);
}
+static void
+ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
+{
+ ixgbe_remove_rar(dev, 0);
+
+ ixgbe_add_rar(dev, addr, 0, 0);
+}
+
static int
ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
@@ -2903,10 +3762,25 @@ ixgbevf_intr_disable(struct ixgbe_hw *hw)
IXGBE_WRITE_FLUSH(hw);
}
+static void
+ixgbevf_intr_enable(struct ixgbe_hw *hw)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ /* VF enable interrupt autoclean */
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK);
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
static int
ixgbevf_dev_configure(struct rte_eth_dev *dev)
{
struct rte_eth_conf* conf = &dev->data->dev_conf;
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
dev->data->port_id);
@@ -2917,16 +3791,23 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
*/
#ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
if (!conf->rxmode.hw_strip_crc) {
- PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip");
+ PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
conf->rxmode.hw_strip_crc = 1;
}
#else
if (conf->rxmode.hw_strip_crc) {
- PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip");
+ PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
conf->rxmode.hw_strip_crc = 0;
}
#endif
+ /*
+ * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
+ * allocation or vector Rx preconditions we will reset it.
+ */
+ adapter->rx_bulk_alloc_allowed = true;
+ adapter->rx_vec_allowed = true;
+
return 0;
}
@@ -2935,6 +3816,9 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t intr_vector = 0;
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+
int err, mask = 0;
PMD_INIT_FUNC_TRACE();
@@ -2965,6 +3849,30 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
ixgbevf_dev_rxtx_start(dev);
+ /* check and configure queue intr-vector mapping */
+ if (dev->data->dev_conf.intr_conf.rxq != 0) {
+ intr_vector = dev->data->nb_rx_queues;
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -1;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int), 0);
+ if (intr_handle->intr_vec == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+ " intr_vec\n", dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+ }
+ ixgbevf_configure_msix(dev);
+
+ rte_intr_enable(intr_handle);
+
+ /* Re-enable interrupt for VF */
+ ixgbevf_intr_enable(hw);
+
return 0;
}
@@ -2972,10 +3880,11 @@ static void
ixgbevf_dev_stop(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
PMD_INIT_FUNC_TRACE();
- hw->adapter_stopped = TRUE;
+ hw->adapter_stopped = 1;
ixgbe_stop_adapter(hw);
/*
@@ -2988,6 +3897,16 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)
dev->data->scattered_rx = 0;
ixgbe_dev_clear_queues(dev);
+
+ /* disable intr eventfd mapping */
+ rte_intr_disable(intr_handle);
+
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec != NULL) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
}
static void
@@ -3001,6 +3920,8 @@ ixgbevf_dev_close(struct rte_eth_dev *dev)
ixgbevf_dev_stop(dev);
+ ixgbe_dev_free_queues(dev);
+
/* reprogram the RAR[0] in case user changed it. */
ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
}
@@ -3346,9 +4267,17 @@ ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
return ret;
}
+#define IXGBE_MRCTL_VPME 0x01 /* Virtual Pool Mirroring. */
+#define IXGBE_MRCTL_UPME 0x02 /* Uplink Port Mirroring. */
+#define IXGBE_MRCTL_DPME 0x04 /* Downlink Port Mirroring. */
+#define IXGBE_MRCTL_VLME 0x08 /* VLAN Mirroring. */
+#define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
+ ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
+ ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
+
static int
ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
- struct rte_eth_vmdq_mirror_conf *mirror_conf,
+ struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id, uint8_t on)
{
uint32_t mr_ctl,vlvf;
@@ -3370,32 +4299,37 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
(IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint8_t mirror_type = 0;
if (ixgbe_vmdq_mode_check(hw) < 0)
- return (-ENOTSUP);
+ return -ENOTSUP;
+
+ if (rule_id >= IXGBE_MAX_MIRROR_RULES)
+ return -EINVAL;
- /* Check if vlan mask is valid */
- if ((mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) && (on)) {
- if (mirror_conf->vlan.vlan_mask == 0)
- return (-EINVAL);
+ if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
+ PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
+ mirror_conf->rule_type);
+ return -EINVAL;
}
- /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
- if (mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) {
+ if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
+ mirror_type |= IXGBE_MRCTL_VLME;
+ /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
for (i = 0;i < IXGBE_VLVF_ENTRIES; i++) {
if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
/* search vlan id related pool vlan filter index */
reg_index = ixgbe_find_vlvf_slot(hw,
mirror_conf->vlan.vlan_id[i]);
if(reg_index < 0)
- return (-EINVAL);
+ return -EINVAL;
vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index));
if ((vlvf & IXGBE_VLVF_VIEN) &&
- ((vlvf & IXGBE_VLVF_VLANID_MASK)
- == mirror_conf->vlan.vlan_id[i]))
+ ((vlvf & IXGBE_VLVF_VLANID_MASK) ==
+ mirror_conf->vlan.vlan_id[i]))
vlan_mask |= (1ULL << reg_index);
else
- return (-EINVAL);
+ return -EINVAL;
}
}
@@ -3423,7 +4357,8 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
* if enable pool mirror, write related pool mask register,if disable
* pool mirror, clear PFMRVM register
*/
- if (mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) {
+ if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
+ mirror_type |= IXGBE_MRCTL_VPME;
if (on) {
mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
@@ -3436,31 +4371,35 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
mr_info->mr_conf[rule_id].pool_mask = 0;
}
}
+ if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
+ mirror_type |= IXGBE_MRCTL_UPME;
+ if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
+ mirror_type |= IXGBE_MRCTL_DPME;
/* read mirror control register and recalculate it */
- mr_ctl = IXGBE_READ_REG(hw,IXGBE_MRCTL(rule_id));
+ mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id));
if (on) {
- mr_ctl |= mirror_conf->rule_type_mask;
+ mr_ctl |= mirror_type;
mr_ctl &= mirror_rule_mask;
mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
} else
- mr_ctl &= ~(mirror_conf->rule_type_mask & mirror_rule_mask);
+ mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
- mr_info->mr_conf[rule_id].rule_type_mask = (uint8_t)(mr_ctl & mirror_rule_mask);
+ mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
/* write mirrror control register */
IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
- /* write pool mirrror control register */
- if (mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) {
+ /* write pool mirrror control register */
+ if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) {
IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
mp_msb);
}
/* write VLAN mirrror control register */
- if (mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) {
+ if (mirror_conf->rule_type == ETH_MIRROR_VLAN) {
IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
mv_msb);
@@ -3486,7 +4425,7 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
return (-ENOTSUP);
memset(&mr_info->mr_conf[rule_id], 0,
- sizeof(struct rte_eth_vmdq_mirror_conf));
+ sizeof(struct rte_eth_mirror_conf));
/* clear PFVMCTL register */
IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
@@ -3502,6 +4441,265 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
return 0;
}
+static int
+ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ uint32_t mask;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
+ mask |= (1 << IXGBE_MISC_VEC_ID);
+ RTE_SET_USED(queue_id);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+
+ rte_intr_enable(&dev->pci_dev->intr_handle);
+
+ return 0;
+}
+
+static int
+ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ uint32_t mask;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
+ mask &= ~(1 << IXGBE_MISC_VEC_ID);
+ RTE_SET_USED(queue_id);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+
+ return 0;
+}
+
+static int
+ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ uint32_t mask;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ if (queue_id < 16) {
+ ixgbe_disable_intr(hw);
+ intr->mask |= (1 << queue_id);
+ ixgbe_enable_intr(dev);
+ } else if (queue_id < 32) {
+ mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
+ mask &= (1 << queue_id);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
+ } else if (queue_id < 64) {
+ mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
+ mask &= (1 << (queue_id - 32));
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+ }
+ rte_intr_enable(&dev->pci_dev->intr_handle);
+
+ return 0;
+}
+
+static int
+ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ uint32_t mask;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ if (queue_id < 16) {
+ ixgbe_disable_intr(hw);
+ intr->mask &= ~(1 << queue_id);
+ ixgbe_enable_intr(dev);
+ } else if (queue_id < 32) {
+ mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
+ mask &= ~(1 << queue_id);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
+ } else if (queue_id < 64) {
+ mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
+ mask &= ~(1 << (queue_id - 32));
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+ }
+
+ return 0;
+}
+
+static void
+ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
+ uint8_t queue, uint8_t msix_vector)
+{
+ uint32_t tmp, idx;
+
+ if (direction == -1) {
+ /* other causes */
+ msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+ tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
+ tmp &= ~0xFF;
+ tmp |= msix_vector;
+ IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp);
+ } else {
+ /* rx or tx cause */
+ msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+ idx = ((16 * (queue & 1)) + (8 * direction));
+ tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
+ tmp &= ~(0xFF << idx);
+ tmp |= (msix_vector << idx);
+ IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp);
+ }
+}
+
+/**
+ * set the IVAR registers, mapping interrupt causes to vectors
+ * @param hw
+ * pointer to ixgbe_hw struct
+ * @direction
+ * 0 for Rx, 1 for Tx, -1 for other causes
+ * @queue
+ * queue to map the corresponding interrupt to
+ * @msix_vector
+ * the vector to map to the corresponding queue
+ */
+static void
+ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
+ uint8_t queue, uint8_t msix_vector)
+{
+ uint32_t tmp, idx;
+
+ msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ if (direction == -1)
+ direction = 0;
+ idx = (((direction * 64) + queue) >> 2) & 0x1F;
+ tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx));
+ tmp &= ~(0xFF << (8 * (queue & 0x3)));
+ tmp |= (msix_vector << (8 * (queue & 0x3)));
+ IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
+ } else if ((hw->mac.type == ixgbe_mac_82599EB) ||
+ (hw->mac.type == ixgbe_mac_X540)) {
+ if (direction == -1) {
+ /* other causes */
+ idx = ((queue & 1) * 8);
+ tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
+ tmp &= ~(0xFF << idx);
+ tmp |= (msix_vector << idx);
+ IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp);
+ } else {
+ /* rx or tx causes */
+ idx = ((16 * (queue & 1)) + (8 * direction));
+ tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
+ tmp &= ~(0xFF << idx);
+ tmp |= (msix_vector << idx);
+ IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp);
+ }
+ }
+}
+
+static void
+ixgbevf_configure_msix(struct rte_eth_dev *dev)
+{
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t q_idx;
+ uint32_t vector_idx = IXGBE_MISC_VEC_ID;
+
+ /* won't configure msix register if no mapping is done
+ * between intr vector and event fd.
+ */
+ if (!rte_intr_dp_is_en(intr_handle))
+ return;
+
+ /* Configure all RX queues of VF */
+ for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
+ /* Force all queue use vector 0,
+ * as IXGBE_VF_MAXMSIVECOTR = 1
+ */
+ ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
+ intr_handle->intr_vec[q_idx] = vector_idx;
+ }
+
+ /* Configure VF other cause ivar */
+ ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
+}
+
+/**
+ * Sets up the hardware to properly generate MSI-X interrupts
+ * @hw
+ * board private structure
+ */
+static void
+ixgbe_configure_msix(struct rte_eth_dev *dev)
+{
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t queue_id, base = IXGBE_MISC_VEC_ID;
+ uint32_t vec = IXGBE_MISC_VEC_ID;
+ uint32_t mask;
+ uint32_t gpie;
+
+ /* won't configure msix register if no mapping is done
+ * between intr vector and event fd
+ */
+ if (!rte_intr_dp_is_en(intr_handle))
+ return;
+
+ if (rte_intr_allow_others(intr_handle))
+ vec = base = IXGBE_RX_VEC_START;
+
+ /* setup GPIE for MSI-x mode */
+ gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+ gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
+ IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME;
+ /* auto clearing and auto setting corresponding bits in EIMS
+ * when MSI-X interrupt is triggered
+ */
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+
+ /* Populate the IVAR table and set the ITR values to the
+ * corresponding register.
+ */
+ for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
+ queue_id++) {
+ /* by default, 1:1 mapping */
+ ixgbe_set_ivar_map(hw, 0, queue_id, vec);
+ intr_handle->intr_vec[queue_id] = vec;
+ if (vec < base + intr_handle->nb_efd - 1)
+ vec++;
+ }
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
+ IXGBE_MISC_VEC_ID);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
+ break;
+ default:
+ break;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
+ IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF);
+
+ /* set up to autoclear timer, and the vectors */
+ mask = IXGBE_EIMS_ENABLE_MASK;
+ mask &= ~(IXGBE_EIMS_OTHER |
+ IXGBE_EIMS_MAILBOX |
+ IXGBE_EIMS_LSC);
+
+ IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
+}
+
static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
uint16_t queue_idx, uint16_t tx_rate)
{
@@ -3673,449 +4871,1323 @@ ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
}
}
-/*
- * add syn filter
- *
- * @param
- * dev: Pointer to struct rte_eth_dev.
- * filter: ponter to the filter that will be added.
- * rx_queue: the queue id the filter assigned to.
- *
- * @return
- * - On success, zero.
- * - On failure, a negative value.
- */
+static void
+ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0);
+}
+
+#define MAC_TYPE_FILTER_SUP(type) do {\
+ if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\
+ (type) != ixgbe_mac_X550)\
+ return -ENOTSUP;\
+} while (0)
+
static int
-ixgbe_add_syn_filter(struct rte_eth_dev *dev,
- struct rte_syn_filter *filter, uint16_t rx_queue)
+ixgbe_syn_filter_set(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter,
+ bool add)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t synqf;
- if (hw->mac.type != ixgbe_mac_82599EB)
- return -ENOSYS;
-
- if (rx_queue >= IXGBE_MAX_RX_QUEUE_NUM)
+ if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
return -EINVAL;
synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
- if (synqf & IXGBE_SYN_FILTER_ENABLE)
+ if (add) {
+ if (synqf & IXGBE_SYN_FILTER_ENABLE)
+ return -EINVAL;
+ synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
+ IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
+
+ if (filter->hig_pri)
+ synqf |= IXGBE_SYN_FILTER_SYNQFP;
+ else
+ synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
+ } else {
+ if (!(synqf & IXGBE_SYN_FILTER_ENABLE))
+ return -ENOENT;
+ synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
+ IXGBE_WRITE_FLUSH(hw);
+ return 0;
+}
+
+static int
+ixgbe_syn_filter_get(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+
+ if (synqf & IXGBE_SYN_FILTER_ENABLE) {
+ filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
+ filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
+ return 0;
+ }
+ return -ENOENT;
+}
+
+static int
+ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL) {
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
+ filter_op);
return -EINVAL;
+ }
- synqf = (uint32_t)(((rx_queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
- IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = ixgbe_syn_filter_set(dev,
+ (struct rte_eth_syn_filter *)arg,
+ TRUE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = ixgbe_syn_filter_set(dev,
+ (struct rte_eth_syn_filter *)arg,
+ FALSE);
+ break;
+ case RTE_ETH_FILTER_GET:
+ ret = ixgbe_syn_filter_get(dev,
+ (struct rte_eth_syn_filter *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
+ ret = -EINVAL;
+ break;
+ }
- if (filter->hig_pri)
- synqf |= IXGBE_SYN_FILTER_SYNQFP;
- else
- synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
+ return ret;
+}
- IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
- return 0;
+
+static inline enum ixgbe_5tuple_protocol
+convert_protocol_type(uint8_t protocol_value)
+{
+ if (protocol_value == IPPROTO_TCP)
+ return IXGBE_FILTER_PROTOCOL_TCP;
+ else if (protocol_value == IPPROTO_UDP)
+ return IXGBE_FILTER_PROTOCOL_UDP;
+ else if (protocol_value == IPPROTO_SCTP)
+ return IXGBE_FILTER_PROTOCOL_SCTP;
+ else
+ return IXGBE_FILTER_PROTOCOL_NONE;
}
/*
- * remove syn filter
+ * add a 5tuple filter
*
* @param
* dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ * filter: ponter to the filter that will be added.
+ * rx_queue: the queue id the filter assigned to.
*
* @return
* - On success, zero.
* - On failure, a negative value.
*/
static int
-ixgbe_remove_syn_filter(struct rte_eth_dev *dev)
+ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
+ struct ixgbe_5tuple_filter *filter)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t synqf;
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ int i, idx, shift;
+ uint32_t ftqf, sdpqf;
+ uint32_t l34timir = 0;
+ uint8_t mask = 0xff;
- if (hw->mac.type != ixgbe_mac_82599EB)
+ /*
+ * look for an unused 5tuple filter index,
+ * and insert the filter to list.
+ */
+ for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) {
+ idx = i / (sizeof(uint32_t) * NBBY);
+ shift = i % (sizeof(uint32_t) * NBBY);
+ if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
+ filter_info->fivetuple_mask[idx] |= 1 << shift;
+ filter->index = i;
+ TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
+ filter,
+ entries);
+ break;
+ }
+ }
+ if (i >= IXGBE_MAX_FTQF_FILTERS) {
+ PMD_DRV_LOG(ERR, "5tuple filters are full.");
return -ENOSYS;
+ }
- synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+ sdpqf = (uint32_t)(filter->filter_info.dst_port <<
+ IXGBE_SDPQF_DSTPORT_SHIFT);
+ sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
- synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
+ ftqf = (uint32_t)(filter->filter_info.proto &
+ IXGBE_FTQF_PROTOCOL_MASK);
+ ftqf |= (uint32_t)((filter->filter_info.priority &
+ IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
+ if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
+ mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
+ if (filter->filter_info.dst_ip_mask == 0)
+ mask &= IXGBE_FTQF_DEST_ADDR_MASK;
+ if (filter->filter_info.src_port_mask == 0)
+ mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
+ if (filter->filter_info.dst_port_mask == 0)
+ mask &= IXGBE_FTQF_DEST_PORT_MASK;
+ if (filter->filter_info.proto_mask == 0)
+ mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
+ ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
+ ftqf |= IXGBE_FTQF_POOL_MASK_EN;
+ ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
+ IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
+ IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
+ IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
+ IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
+
+ l34timir |= IXGBE_L34T_IMIR_RESERVE;
+ l34timir |= (uint32_t)(filter->queue <<
+ IXGBE_L34T_IMIR_QUEUE_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
return 0;
}
/*
- * get the syn filter's info
+ * remove a 5tuple filter
*
* @param
* dev: Pointer to struct rte_eth_dev.
- * filter: ponter to the filter that returns.
- * *rx_queue: pointer to the queue id the filter assigned to.
- *
- * @return
- * - On success, zero.
- * - On failure, a negative value.
+ * filter: the pointer of the filter will be removed.
*/
+static void
+ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
+ struct ixgbe_5tuple_filter *filter)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ uint16_t index = filter->index;
+
+ filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
+ ~(1 << (index % (sizeof(uint32_t) * NBBY)));
+ TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
+ rte_free(filter);
+
+ IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
+}
+
static int
-ixgbe_get_syn_filter(struct rte_eth_dev *dev,
- struct rte_syn_filter *filter, uint16_t *rx_queue)
+ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct ixgbe_hw *hw;
+ uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
+ return -EINVAL;
+
+ /* refuse mtu that requires the support of scattered packets when this
+ * feature has not been enabled before. */
+ if (!dev->data->scattered_rx &&
+ (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
+ dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
+ return -EINVAL;
+
+ /*
+ * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
+ * request of the version 2.0 of the mailbox API.
+ * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
+ * of the mailbox API.
+ * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
+ * prior to 3.11.33 which contains the following change:
+ * "ixgbe: Enable jumbo frames support w/ SR-IOV"
+ */
+ ixgbevf_rlpml_set_vf(hw, max_frame);
+
+ /* update max frame size */
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
+ return 0;
+}
+#define MAC_TYPE_FILTER_SUP_EXT(type) do {\
+ if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\
+ return -ENOTSUP;\
+} while (0)
+
+static inline struct ixgbe_5tuple_filter *
+ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list,
+ struct ixgbe_5tuple_filter_info *key)
{
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t synqf;
+ struct ixgbe_5tuple_filter *it;
- if (hw->mac.type != ixgbe_mac_82599EB)
- return -ENOSYS;
+ TAILQ_FOREACH(it, filter_list, entries) {
+ if (memcmp(key, &it->filter_info,
+ sizeof(struct ixgbe_5tuple_filter_info)) == 0) {
+ return it;
+ }
+ }
+ return NULL;
+}
- synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
- if (synqf & IXGBE_SYN_FILTER_ENABLE) {
- filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
- *rx_queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
- return 0;
+/* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/
+static inline int
+ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
+ struct ixgbe_5tuple_filter_info *filter_info)
+{
+ if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
+ filter->priority > IXGBE_5TUPLE_MAX_PRI ||
+ filter->priority < IXGBE_5TUPLE_MIN_PRI)
+ return -EINVAL;
+
+ switch (filter->dst_ip_mask) {
+ case UINT32_MAX:
+ filter_info->dst_ip_mask = 0;
+ filter_info->dst_ip = filter->dst_ip;
+ break;
+ case 0:
+ filter_info->dst_ip_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
+ return -EINVAL;
}
- return -ENOENT;
+
+ switch (filter->src_ip_mask) {
+ case UINT32_MAX:
+ filter_info->src_ip_mask = 0;
+ filter_info->src_ip = filter->src_ip;
+ break;
+ case 0:
+ filter_info->src_ip_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid src_ip mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->dst_port_mask) {
+ case UINT16_MAX:
+ filter_info->dst_port_mask = 0;
+ filter_info->dst_port = filter->dst_port;
+ break;
+ case 0:
+ filter_info->dst_port_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid dst_port mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->src_port_mask) {
+ case UINT16_MAX:
+ filter_info->src_port_mask = 0;
+ filter_info->src_port = filter->src_port;
+ break;
+ case 0:
+ filter_info->src_port_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid src_port mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->proto_mask) {
+ case UINT8_MAX:
+ filter_info->proto_mask = 0;
+ filter_info->proto =
+ convert_protocol_type(filter->proto);
+ break;
+ case 0:
+ filter_info->proto_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid protocol mask.");
+ return -EINVAL;
+ }
+
+ filter_info->priority = (uint8_t)filter->priority;
+ return 0;
}
/*
- * add an ethertype filter
+ * add or delete a ntuple filter
*
* @param
* dev: Pointer to struct rte_eth_dev.
- * index: the index the filter allocates.
- * filter: ponter to the filter that will be added.
- * rx_queue: the queue id the filter assigned to.
+ * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
+ * add: if true, add filter, if false, remove filter
*
* @return
* - On success, zero.
* - On failure, a negative value.
*/
static int
-ixgbe_add_ethertype_filter(struct rte_eth_dev *dev,
- uint16_t index, struct rte_ethertype_filter *filter,
- uint16_t rx_queue)
+ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter,
+ bool add)
{
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t etqf, etqs = 0;
-
- if (hw->mac.type != ixgbe_mac_82599EB)
- return -ENOSYS;
-
- if (index >= IXGBE_MAX_ETQF_FILTERS ||
- rx_queue >= IXGBE_MAX_RX_QUEUE_NUM)
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct ixgbe_5tuple_filter_info filter_5tuple;
+ struct ixgbe_5tuple_filter *filter;
+ int ret;
+
+ if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
+ PMD_DRV_LOG(ERR, "only 5tuple is supported.");
return -EINVAL;
+ }
- etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(index));
- if (etqf & IXGBE_ETQF_FILTER_EN)
- return -EINVAL; /* filter index is in use. */
-
- etqf = 0;
- etqf |= IXGBE_ETQF_FILTER_EN;
- etqf |= (uint32_t)filter->ethertype;
+ memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
+ ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
+ if (ret < 0)
+ return ret;
- if (filter->priority_en) {
- if (filter->priority > IXGBE_ETQF_MAX_PRI)
- return -EINVAL;
- etqf |= (uint32_t)((filter->priority << IXGBE_ETQF_SHIFT) & IXGBE_ETQF_UP);
- etqf |= IXGBE_ETQF_UP_EN;
+ filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
+ &filter_5tuple);
+ if (filter != NULL && add) {
+ PMD_DRV_LOG(ERR, "filter exists.");
+ return -EEXIST;
+ }
+ if (filter == NULL && !add) {
+ PMD_DRV_LOG(ERR, "filter doesn't exist.");
+ return -ENOENT;
}
- etqs |= (uint32_t)((rx_queue << IXGBE_ETQS_RX_QUEUE_SHIFT) & IXGBE_ETQS_RX_QUEUE);
- etqs |= IXGBE_ETQS_QUEUE_EN;
- IXGBE_WRITE_REG(hw, IXGBE_ETQF(index), etqf);
- IXGBE_WRITE_REG(hw, IXGBE_ETQS(index), etqs);
+ if (add) {
+ filter = rte_zmalloc("ixgbe_5tuple_filter",
+ sizeof(struct ixgbe_5tuple_filter), 0);
+ if (filter == NULL)
+ return -ENOMEM;
+ (void)rte_memcpy(&filter->filter_info,
+ &filter_5tuple,
+ sizeof(struct ixgbe_5tuple_filter_info));
+ filter->queue = ntuple_filter->queue;
+ ret = ixgbe_add_5tuple_filter(dev, filter);
+ if (ret < 0) {
+ rte_free(filter);
+ return ret;
+ }
+ } else
+ ixgbe_remove_5tuple_filter(dev, filter);
+
return 0;
}
/*
- * remove an ethertype filter
+ * get a ntuple filter
*
* @param
* dev: Pointer to struct rte_eth_dev.
- * index: the index the filter allocates.
+ * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
*
* @return
* - On success, zero.
* - On failure, a negative value.
*/
static int
-ixgbe_remove_ethertype_filter(struct rte_eth_dev *dev,
- uint16_t index)
+ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter)
{
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- if (hw->mac.type != ixgbe_mac_82599EB)
- return -ENOSYS;
-
- if (index >= IXGBE_MAX_ETQF_FILTERS)
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct ixgbe_5tuple_filter_info filter_5tuple;
+ struct ixgbe_5tuple_filter *filter;
+ int ret;
+
+ if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
+ PMD_DRV_LOG(ERR, "only 5tuple is supported.");
return -EINVAL;
+ }
- IXGBE_WRITE_REG(hw, IXGBE_ETQF(index), 0);
- IXGBE_WRITE_REG(hw, IXGBE_ETQS(index), 0);
+ memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
+ ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
+ if (ret < 0)
+ return ret;
+ filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
+ &filter_5tuple);
+ if (filter == NULL) {
+ PMD_DRV_LOG(ERR, "filter doesn't exist.");
+ return -ENOENT;
+ }
+ ntuple_filter->queue = filter->queue;
return 0;
}
/*
- * get an ethertype filter
- *
- * @param
- * dev: Pointer to struct rte_eth_dev.
- * index: the index the filter allocates.
- * filter: ponter to the filter that will be gotten.
- * *rx_queue: the ponited of the queue id the filter assigned to.
+ * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter.
+ * @dev: pointer to rte_eth_dev structure
+ * @filter_op:operation will be taken.
+ * @arg: a pointer to specific structure corresponding to the filter_op
*
* @return
* - On success, zero.
* - On failure, a negative value.
*/
static int
-ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
- uint16_t index, struct rte_ethertype_filter *filter,
- uint16_t *rx_queue)
+ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t etqf, etqs;
+ int ret;
- if (hw->mac.type != ixgbe_mac_82599EB)
- return -ENOSYS;
+ MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
- if (index >= IXGBE_MAX_ETQF_FILTERS)
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL) {
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
+ filter_op);
return -EINVAL;
+ }
- etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(index));
- etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(index));
- if (etqf & IXGBE_ETQF_FILTER_EN) {
- filter->ethertype = etqf & IXGBE_ETQF_ETHERTYPE;
- filter->priority_en = (etqf & IXGBE_ETQF_UP_EN) ? 1 : 0;
- if (filter->priority_en)
- filter->priority = (etqf & IXGBE_ETQF_UP) >> 16;
- *rx_queue = (etqs & IXGBE_ETQS_RX_QUEUE) >> IXGBE_ETQS_RX_QUEUE_SHIFT;
- return 0;
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = ixgbe_add_del_ntuple_filter(dev,
+ (struct rte_eth_ntuple_filter *)arg,
+ TRUE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = ixgbe_add_del_ntuple_filter(dev,
+ (struct rte_eth_ntuple_filter *)arg,
+ FALSE);
+ break;
+ case RTE_ETH_FILTER_GET:
+ ret = ixgbe_get_ntuple_filter(dev,
+ (struct rte_eth_ntuple_filter *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
+ ret = -EINVAL;
+ break;
}
- return -ENOENT;
+ return ret;
}
-static inline enum ixgbe_5tuple_protocol
-convert_protocol_type(uint8_t protocol_value)
+static inline int
+ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
+ uint16_t ethertype)
{
- if (protocol_value == IPPROTO_TCP)
- return IXGBE_FILTER_PROTOCOL_TCP;
- else if (protocol_value == IPPROTO_UDP)
- return IXGBE_FILTER_PROTOCOL_UDP;
- else if (protocol_value == IPPROTO_SCTP)
- return IXGBE_FILTER_PROTOCOL_SCTP;
- else
- return IXGBE_FILTER_PROTOCOL_NONE;
+ int i;
+
+ for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+ if (filter_info->ethertype_filters[i] == ethertype &&
+ (filter_info->ethertype_mask & (1 << i)))
+ return i;
+ }
+ return -1;
}
-static inline uint8_t
-revert_protocol_type(enum ixgbe_5tuple_protocol protocol)
+static inline int
+ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info,
+ uint16_t ethertype)
{
- if (protocol == IXGBE_FILTER_PROTOCOL_TCP)
- return IPPROTO_TCP;
- else if (protocol == IXGBE_FILTER_PROTOCOL_UDP)
- return IPPROTO_UDP;
- else if (protocol == IXGBE_FILTER_PROTOCOL_SCTP)
- return IPPROTO_SCTP;
- else
+ int i;
+
+ for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+ if (!(filter_info->ethertype_mask & (1 << i))) {
+ filter_info->ethertype_mask |= 1 << i;
+ filter_info->ethertype_filters[i] = ethertype;
+ return i;
+ }
+ }
+ return -1;
+}
+
+static inline int
+ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info,
+ uint8_t idx)
+{
+ if (idx >= IXGBE_MAX_ETQF_FILTERS)
+ return -1;
+ filter_info->ethertype_mask &= ~(1 << idx);
+ filter_info->ethertype_filters[idx] = 0;
+ return idx;
+}
+
+static int
+ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter,
+ bool add)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ uint32_t etqf = 0;
+ uint32_t etqs = 0;
+ int ret;
+
+ if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
+ return -EINVAL;
+
+ if (filter->ether_type == ETHER_TYPE_IPv4 ||
+ filter->ether_type == ETHER_TYPE_IPv6) {
+ PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
+ " ethertype filter.", filter->ether_type);
+ return -EINVAL;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+ PMD_DRV_LOG(ERR, "mac compare is unsupported.");
+ return -EINVAL;
+ }
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+ PMD_DRV_LOG(ERR, "drop option is unsupported.");
+ return -EINVAL;
+ }
+
+ ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
+ if (ret >= 0 && add) {
+ PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
+ filter->ether_type);
+ return -EEXIST;
+ }
+ if (ret < 0 && !add) {
+ PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
+ filter->ether_type);
+ return -ENOENT;
+ }
+
+ if (add) {
+ ret = ixgbe_ethertype_filter_insert(filter_info,
+ filter->ether_type);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "ethertype filters are full.");
+ return -ENOSYS;
+ }
+ etqf = IXGBE_ETQF_FILTER_EN;
+ etqf |= (uint32_t)filter->ether_type;
+ etqs |= (uint32_t)((filter->queue <<
+ IXGBE_ETQS_RX_QUEUE_SHIFT) &
+ IXGBE_ETQS_RX_QUEUE);
+ etqs |= IXGBE_ETQS_QUEUE_EN;
+ } else {
+ ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
+ if (ret < 0)
+ return -ENOSYS;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf);
+ IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static int
+ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ uint32_t etqf, etqs;
+ int ret;
+
+ ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
+ filter->ether_type);
+ return -ENOENT;
+ }
+
+ etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret));
+ if (etqf & IXGBE_ETQF_FILTER_EN) {
+ etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret));
+ filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE;
+ filter->flags = 0;
+ filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >>
+ IXGBE_ETQS_RX_QUEUE_SHIFT;
return 0;
+ }
+ return -ENOENT;
}
/*
- * add a 5tuple filter
- *
- * @param
- * dev: Pointer to struct rte_eth_dev.
- * index: the index the filter allocates.
- * filter: ponter to the filter that will be added.
- * rx_queue: the queue id the filter assigned to.
- *
- * @return
- * - On success, zero.
- * - On failure, a negative value.
+ * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter.
+ * @dev: pointer to rte_eth_dev structure
+ * @filter_op:operation will be taken.
+ * @arg: a pointer to specific structure corresponding to the filter_op
*/
static int
-ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
- struct rte_5tuple_filter *filter, uint16_t rx_queue)
+ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t ftqf, sdpqf = 0;
- uint32_t l34timir = 0;
- uint8_t mask = 0xff;
+ int ret;
- if (hw->mac.type != ixgbe_mac_82599EB)
- return -ENOSYS;
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
- if (index >= IXGBE_MAX_FTQF_FILTERS ||
- rx_queue >= IXGBE_MAX_RX_QUEUE_NUM ||
- filter->priority > IXGBE_5TUPLE_MAX_PRI ||
- filter->priority < IXGBE_5TUPLE_MIN_PRI)
- return -EINVAL; /* filter index is out of range. */
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
- if (filter->tcp_flags) {
- PMD_INIT_LOG(INFO, "82599EB not tcp flags in 5tuple");
+ if (arg == NULL) {
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
+ filter_op);
return -EINVAL;
}
- ftqf = IXGBE_READ_REG(hw, IXGBE_FTQF(index));
- if (ftqf & IXGBE_FTQF_QUEUE_ENABLE)
- return -EINVAL; /* filter index is in use. */
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = ixgbe_add_del_ethertype_filter(dev,
+ (struct rte_eth_ethertype_filter *)arg,
+ TRUE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = ixgbe_add_del_ethertype_filter(dev,
+ (struct rte_eth_ethertype_filter *)arg,
+ FALSE);
+ break;
+ case RTE_ETH_FILTER_GET:
+ ret = ixgbe_get_ethertype_filter(dev,
+ (struct rte_eth_ethertype_filter *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
- ftqf = 0;
- sdpqf = (uint32_t)(filter->dst_port << IXGBE_SDPQF_DSTPORT_SHIFT);
- sdpqf = sdpqf | (filter->src_port & IXGBE_SDPQF_SRCPORT);
+static int
+ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret = -EINVAL;
- ftqf |= (uint32_t)(convert_protocol_type(filter->protocol) &
- IXGBE_FTQF_PROTOCOL_MASK);
- ftqf |= (uint32_t)((filter->priority & IXGBE_FTQF_PRIORITY_MASK) <<
- IXGBE_FTQF_PRIORITY_SHIFT);
- if (filter->src_ip_mask == 0) /* 0 means compare. */
- mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
- if (filter->dst_ip_mask == 0)
- mask &= IXGBE_FTQF_DEST_ADDR_MASK;
- if (filter->src_port_mask == 0)
- mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
- if (filter->dst_port_mask == 0)
- mask &= IXGBE_FTQF_DEST_PORT_MASK;
- if (filter->protocol_mask == 0)
- mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
- ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
- ftqf |= IXGBE_FTQF_POOL_MASK_EN;
- ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
+ switch (filter_type) {
+ case RTE_ETH_FILTER_NTUPLE:
+ ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_SYN:
+ ret = ixgbe_syn_filter_handle(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg);
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ break;
+ }
+
+ return ret;
+}
- IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), filter->dst_ip);
- IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), filter->src_ip);
- IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), sdpqf);
- IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), ftqf);
+static u8 *
+ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw,
+ u8 **mc_addr_ptr, u32 *vmdq)
+{
+ u8 *mc_addr;
+
+ *vmdq = 0;
+ mc_addr = *mc_addr_ptr;
+ *mc_addr_ptr = (mc_addr + sizeof(struct ether_addr));
+ return mc_addr;
+}
+
+static int
+ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ struct ixgbe_hw *hw;
+ u8 *mc_addr_list;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ mc_addr_list = (u8 *)mc_addr_set;
+ return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
+ ixgbe_dev_addr_list_itr, TRUE);
+}
+
+static uint64_t
+ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t systime_cycles;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
+ systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
+ systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
+ * NSEC_PER_SEC;
+ break;
+ default:
+ systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
+ systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
+ << 32;
+ }
+
+ return systime_cycles;
+}
+
+static uint64_t
+ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t rx_tstamp_cycles;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ /* RXSTMPL stores ns and RXSTMPH stores seconds. */
+ rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
+ rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
+ * NSEC_PER_SEC;
+ break;
+ default:
+ /* RXSTMPL stores ns and RXSTMPH stores seconds. */
+ rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
+ rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
+ << 32;
+ }
+
+ return rx_tstamp_cycles;
+}
+
+static uint64_t
+ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t tx_tstamp_cycles;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ /* TXSTMPL stores ns and TXSTMPH stores seconds. */
+ tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
+ tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
+ * NSEC_PER_SEC;
+ break;
+ default:
+ /* TXSTMPL stores ns and TXSTMPH stores seconds. */
+ tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
+ tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
+ << 32;
+ }
+
+ return tx_tstamp_cycles;
+}
+
+static void
+ixgbe_start_timecounters(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
+ struct rte_eth_link link;
+ uint32_t incval = 0;
+ uint32_t shift = 0;
+
+ /* Get current link speed. */
+ memset(&link, 0, sizeof(link));
+ ixgbe_dev_link_update(dev, 1);
+ rte_ixgbe_dev_atomic_read_link_status(dev, &link);
+
+ switch (link.link_speed) {
+ case ETH_LINK_SPEED_100:
+ incval = IXGBE_INCVAL_100;
+ shift = IXGBE_INCVAL_SHIFT_100;
+ break;
+ case ETH_LINK_SPEED_1000:
+ incval = IXGBE_INCVAL_1GB;
+ shift = IXGBE_INCVAL_SHIFT_1GB;
+ break;
+ case ETH_LINK_SPEED_10000:
+ default:
+ incval = IXGBE_INCVAL_10GB;
+ shift = IXGBE_INCVAL_SHIFT_10GB;
+ break;
+ }
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ /* Independent of link speed. */
+ incval = 1;
+ /* Cycles read will be interpreted as ns. */
+ shift = 0;
+ /* Fall-through */
+ case ixgbe_mac_X540:
+ IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval);
+ break;
+ case ixgbe_mac_82599EB:
+ incval >>= IXGBE_INCVAL_SHIFT_82599;
+ shift -= IXGBE_INCVAL_SHIFT_82599;
+ IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
+ (1 << IXGBE_INCPER_SHIFT_82599) | incval);
+ break;
+ default:
+ /* Not supported. */
+ return;
+ }
+
+ memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
+ memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+ memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+
+ adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
+ adapter->systime_tc.cc_shift = shift;
+ adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
+
+ adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
+ adapter->rx_tstamp_tc.cc_shift = shift;
+ adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+
+ adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
+ adapter->tx_tstamp_tc.cc_shift = shift;
+ adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+}
+
+static int
+ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
+{
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
+
+ adapter->systime_tc.nsec += delta;
+ adapter->rx_tstamp_tc.nsec += delta;
+ adapter->tx_tstamp_tc.nsec += delta;
- l34timir |= IXGBE_L34T_IMIR_RESERVE;
- l34timir |= (uint32_t)(rx_queue << IXGBE_L34T_IMIR_QUEUE_SHIFT);
- IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), l34timir);
return 0;
}
-/*
- * remove a 5tuple filter
- *
- * @param
- * dev: Pointer to struct rte_eth_dev.
- * index: the index the filter allocates.
- *
- * @return
- * - On success, zero.
- * - On failure, a negative value.
- */
static int
-ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
- uint16_t index)
+ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
+{
+ uint64_t ns;
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
+
+ ns = rte_timespec_to_ns(ts);
+ /* Set the timecounters to a new value. */
+ adapter->systime_tc.nsec = ns;
+ adapter->rx_tstamp_tc.nsec = ns;
+ adapter->tx_tstamp_tc.nsec = ns;
+
+ return 0;
+}
+
+static int
+ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+ uint64_t ns, systime_cycles;
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
+
+ systime_cycles = ixgbe_read_systime_cyclecounter(dev);
+ ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
+ *ts = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+ixgbe_timesync_enable(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t tsync_ctl;
+ uint32_t tsauxc;
+
+ /* Stop the timesync system time. */
+ IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0);
+ /* Reset the timesync system time value. */
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0);
+
+ /* Enable system time for platforms where it isn't on by default. */
+ tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
+ tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME;
+ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
+
+ ixgbe_start_timecounters(dev);
+
+ /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
+ (ETHER_TYPE_1588 |
+ IXGBE_ETQF_FILTER_EN |
+ IXGBE_ETQF_1588));
+
+ /* Enable timestamping of received PTP packets. */
+ tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
+ tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED;
+ IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
+
+ /* Enable timestamping of transmitted PTP packets. */
+ tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
+ tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED;
+ IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
- if (hw->mac.type != ixgbe_mac_82599EB)
- return -ENOSYS;
+ IXGBE_WRITE_FLUSH(hw);
- if (index >= IXGBE_MAX_FTQF_FILTERS)
- return -EINVAL; /* filter index is out of range. */
+ return 0;
+}
+
+static int
+ixgbe_timesync_disable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t tsync_ctl;
+
+ /* Disable timestamping of transmitted PTP packets. */
+ tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
+ tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED;
+ IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
+
+ /* Disable timestamping of received PTP packets. */
+ tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
+ tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED;
+ IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
+
+ /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
+
+ /* Stop incrementating the System Time registers. */
+ IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0);
- IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
- IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
- IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
- IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
- IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
return 0;
}
-/*
- * get a 5tuple filter
- *
- * @param
- * dev: Pointer to struct rte_eth_dev.
- * index: the index the filter allocates
- * filter: ponter to the filter that returns.
- * *rx_queue: pointer of the queue id the filter assigned to.
- *
- * @return
- * - On success, zero.
- * - On failure, a negative value.
- */
static int
-ixgbe_get_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
- struct rte_5tuple_filter *filter, uint16_t *rx_queue)
+ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp,
+ uint32_t flags __rte_unused)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t sdpqf, ftqf, l34timir;
- uint8_t mask;
- enum ixgbe_5tuple_protocol proto;
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
+ uint32_t tsync_rxctl;
+ uint64_t rx_tstamp_cycles;
+ uint64_t ns;
+
+ tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
+ if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0)
+ return -EINVAL;
- if (hw->mac.type != ixgbe_mac_82599EB)
- return -ENOSYS;
+ rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev);
+ ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
+ *timestamp = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
+ uint32_t tsync_txctl;
+ uint64_t tx_tstamp_cycles;
+ uint64_t ns;
+
+ tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
+ if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0)
+ return -EINVAL;
+
+ tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev);
+ ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
+ *timestamp = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+ixgbe_get_reg_length(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int count = 0;
+ int g_ind = 0;
+ const struct reg_info *reg_group;
+ const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
+ ixgbe_regs_mac_82598EB : ixgbe_regs_others;
- if (index >= IXGBE_MAX_FTQF_FILTERS)
- return -EINVAL; /* filter index is out of range. */
-
- ftqf = IXGBE_READ_REG(hw, IXGBE_FTQF(index));
- if (ftqf & IXGBE_FTQF_QUEUE_ENABLE) {
- proto = (enum ixgbe_5tuple_protocol)(ftqf & IXGBE_FTQF_PROTOCOL_MASK);
- filter->protocol = revert_protocol_type(proto);
- filter->priority = (ftqf >> IXGBE_FTQF_PRIORITY_SHIFT) &
- IXGBE_FTQF_PRIORITY_MASK;
- mask = (uint8_t)((ftqf >> IXGBE_FTQF_5TUPLE_MASK_SHIFT) &
- IXGBE_FTQF_5TUPLE_MASK_MASK);
- filter->src_ip_mask =
- (mask & IXGBE_FTQF_SOURCE_ADDR_MASK) ? 1 : 0;
- filter->dst_ip_mask =
- (mask & IXGBE_FTQF_DEST_ADDR_MASK) ? 1 : 0;
- filter->src_port_mask =
- (mask & IXGBE_FTQF_SOURCE_PORT_MASK) ? 1 : 0;
- filter->dst_port_mask =
- (mask & IXGBE_FTQF_DEST_PORT_MASK) ? 1 : 0;
- filter->protocol_mask =
- (mask & IXGBE_FTQF_PROTOCOL_COMP_MASK) ? 1 : 0;
-
- sdpqf = IXGBE_READ_REG(hw, IXGBE_SDPQF(index));
- filter->dst_port = (sdpqf & IXGBE_SDPQF_DSTPORT) >>
- IXGBE_SDPQF_DSTPORT_SHIFT;
- filter->src_port = sdpqf & IXGBE_SDPQF_SRCPORT;
- filter->dst_ip = IXGBE_READ_REG(hw, IXGBE_DAQF(index));
- filter->src_ip = IXGBE_READ_REG(hw, IXGBE_SAQF(index));
-
- l34timir = IXGBE_READ_REG(hw, IXGBE_L34T_IMIR(index));
- *rx_queue = (l34timir & IXGBE_L34T_IMIR_QUEUE) >>
- IXGBE_L34T_IMIR_QUEUE_SHIFT;
+ while ((reg_group = reg_set[g_ind++]))
+ count += ixgbe_regs_group_count(reg_group);
+
+ return count;
+}
+
+static int
+ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
+{
+ int count = 0;
+ int g_ind = 0;
+ const struct reg_info *reg_group;
+
+ while ((reg_group = ixgbevf_regs[g_ind++]))
+ count += ixgbe_regs_group_count(reg_group);
+
+ return count;
+}
+
+static int
+ixgbe_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t *data = regs->data;
+ int g_ind = 0;
+ int count = 0;
+ const struct reg_info *reg_group;
+ const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
+ ixgbe_regs_mac_82598EB : ixgbe_regs_others;
+
+ /* Support only full register dump */
+ if ((regs->length == 0) ||
+ (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) {
+ regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
+ hw->device_id;
+ while ((reg_group = reg_set[g_ind++]))
+ count += ixgbe_read_regs_group(dev, &data[count],
+ reg_group);
return 0;
}
- return -ENOENT;
+
+ return -ENOTSUP;
}
static int
-ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+ixgbevf_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs)
{
- struct ixgbe_hw *hw;
- uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t *data = regs->data;
+ int g_ind = 0;
+ int count = 0;
+ const struct reg_info *reg_group;
+
+ /* Support only full register dump */
+ if ((regs->length == 0) ||
+ (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) {
+ regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
+ hw->device_id;
+ while ((reg_group = ixgbevf_regs[g_ind++]))
+ count += ixgbe_read_regs_group(dev, &data[count],
+ reg_group);
+ return 0;
+ }
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ return -ENOTSUP;
+}
- if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
+static int
+ixgbe_get_eeprom_length(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Return unit is byte count */
+ return hw->eeprom.word_size * 2;
+}
+
+static int
+ixgbe_get_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *in_eeprom)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ uint16_t *data = in_eeprom->data;
+ int first, length;
+
+ first = in_eeprom->offset >> 1;
+ length = in_eeprom->length >> 1;
+ if ((first > hw->eeprom.word_size) ||
+ ((first + length) > hw->eeprom.word_size))
return -EINVAL;
- /* refuse mtu that requires the support of scattered packets when this
- * feature has not been enabled before. */
- if (!dev->data->scattered_rx &&
- (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
- dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
+ in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+ return eeprom->ops.read_buffer(hw, first, length, data);
+}
+
+static int
+ixgbe_set_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *in_eeprom)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ uint16_t *data = in_eeprom->data;
+ int first, length;
+
+ first = in_eeprom->offset >> 1;
+ length = in_eeprom->length >> 1;
+ if ((first > hw->eeprom.word_size) ||
+ ((first + length) > hw->eeprom.word_size))
return -EINVAL;
- /*
- * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
- * request of the version 2.0 of the mailbox API.
- * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
- * of the mailbox API.
- * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
- * prior to 3.11.33 which contains the following change:
- * "ixgbe: Enable jumbo frames support w/ SR-IOV"
- */
- ixgbevf_rlpml_set_vf(hw, max_frame);
+ in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
- /* update max frame size */
- dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
+ return eeprom->ops.write_buffer(hw, first, length, data);
+}
+
+uint16_t
+ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
+ switch (mac_type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ return ETH_RSS_RETA_SIZE_512;
+ case ixgbe_mac_X550_vf:
+ case ixgbe_mac_X550EM_x_vf:
+ return ETH_RSS_RETA_SIZE_64;
+ default:
+ return ETH_RSS_RETA_SIZE_128;
+ }
+}
+
+uint32_t
+ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
+ switch (mac_type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ if (reta_idx < ETH_RSS_RETA_SIZE_128)
+ return IXGBE_RETA(reta_idx >> 2);
+ else
+ return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
+ case ixgbe_mac_X550_vf:
+ case ixgbe_mac_X550EM_x_vf:
+ return IXGBE_VFRETA(reta_idx >> 2);
+ default:
+ return IXGBE_RETA(reta_idx >> 2);
+ }
+}
+
+uint32_t
+ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) {
+ switch (mac_type) {
+ case ixgbe_mac_X550_vf:
+ case ixgbe_mac_X550EM_x_vf:
+ return IXGBE_VFMRQC;
+ default:
+ return IXGBE_MRQC;
+ }
+}
+
+uint32_t
+ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) {
+ switch (mac_type) {
+ case ixgbe_mac_X550_vf:
+ case ixgbe_mac_X550EM_x_vf:
+ return IXGBE_VFRSSRK(i);
+ default:
+ return IXGBE_RSSRK(i);
+ }
+}
+
+bool
+ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) {
+ switch (mac_type) {
+ case ixgbe_mac_82599_vf:
+ case ixgbe_mac_X540_vf:
+ return 0;
+ default:
+ return 1;
+ }
+}
+
+static int
+ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
+ struct rte_eth_dcb_info *dcb_info)
+{
+ struct ixgbe_dcb_config *dcb_config =
+ IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+ struct ixgbe_dcb_tc_config *tc;
+ uint8_t i, j;
+
+ if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+ dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
+ else
+ dcb_info->nb_tcs = 1;
+
+ if (dcb_config->vt_mode) { /* vt is enabled*/
+ struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
+ &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
+ for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
+ for (j = 0; j < dcb_info->nb_tcs; j++) {
+ dcb_info->tc_queue.tc_rxq[i][j].base =
+ i * dcb_info->nb_tcs + j;
+ dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1;
+ dcb_info->tc_queue.tc_txq[i][j].base =
+ i * dcb_info->nb_tcs + j;
+ dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1;
+ }
+ }
+ } else { /* vt is disabled*/
+ struct rte_eth_dcb_rx_conf *rx_conf =
+ &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
+ if (dcb_info->nb_tcs == ETH_4_TCS) {
+ for (i = 0; i < dcb_info->nb_tcs; i++) {
+ dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
+ dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
+ }
+ dcb_info->tc_queue.tc_txq[0][0].base = 0;
+ dcb_info->tc_queue.tc_txq[0][1].base = 64;
+ dcb_info->tc_queue.tc_txq[0][2].base = 96;
+ dcb_info->tc_queue.tc_txq[0][3].base = 112;
+ dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
+ dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
+ dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
+ dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
+ } else if (dcb_info->nb_tcs == ETH_8_TCS) {
+ for (i = 0; i < dcb_info->nb_tcs; i++) {
+ dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
+ dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
+ }
+ dcb_info->tc_queue.tc_txq[0][0].base = 0;
+ dcb_info->tc_queue.tc_txq[0][1].base = 32;
+ dcb_info->tc_queue.tc_txq[0][2].base = 64;
+ dcb_info->tc_queue.tc_txq[0][3].base = 80;
+ dcb_info->tc_queue.tc_txq[0][4].base = 96;
+ dcb_info->tc_queue.tc_txq[0][5].base = 104;
+ dcb_info->tc_queue.tc_txq[0][6].base = 112;
+ dcb_info->tc_queue.tc_txq[0][7].base = 120;
+ dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
+ dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
+ dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
+ dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
+ dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
+ dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
+ dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
+ dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
+ }
+ }
+ for (i = 0; i < dcb_info->nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent;
+ }
return 0;
}
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_ethdev.h b/src/dpdk22/drivers/net/ixgbe/ixgbe_ethdev.h
index ca991701..d26771a7 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_ethdev.h
+++ b/src/dpdk22/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -33,10 +33,11 @@
#ifndef _IXGBE_ETHDEV_H_
#define _IXGBE_ETHDEV_H_
-#include "ixgbe/ixgbe_dcb.h"
-#include "ixgbe/ixgbe_dcb_82599.h"
-#include "ixgbe/ixgbe_dcb_82598.h"
+#include "base/ixgbe_dcb.h"
+#include "base/ixgbe_dcb_82599.h"
+#include "base/ixgbe_dcb_82598.h"
#include "ixgbe_bypass.h"
+#include <rte_time.h>
/* need update link, bit flag */
#define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
@@ -57,11 +58,22 @@
#define IXGBE_VFTA_SIZE 128
#define IXGBE_VLAN_TAG_SIZE 4
#define IXGBE_MAX_RX_QUEUE_NUM 128
+#define IXGBE_VMDQ_DCB_NB_QUEUES IXGBE_MAX_RX_QUEUE_NUM
+#define IXGBE_DCB_NB_QUEUES IXGBE_MAX_RX_QUEUE_NUM
+
#ifndef NBBY
#define NBBY 8 /* number of bits in a byte */
#endif
#define IXGBE_HWSTRIP_BITMAP_SIZE (IXGBE_MAX_RX_QUEUE_NUM / (sizeof(uint32_t) * NBBY))
+/* EITR Inteval is in 2048ns uinits for 1G and 10G link */
+#define IXGBE_EITR_INTERVAL_UNIT_NS 2048
+#define IXGBE_EITR_ITR_INT_SHIFT 3
+#define IXGBE_EITR_INTERVAL_US(us) \
+ (((us) * 1000 / IXGBE_EITR_INTERVAL_UNIT_NS << IXGBE_EITR_ITR_INT_SHIFT) & \
+ IXGBE_EITR_ITR_INT_MASK)
+
+
/* Loopback operation modes */
/* 82599 specific loopback operation types */
#define IXGBE_LPBK_82599_NONE 0x0 /* Default value. Loopback is disabled. */
@@ -98,10 +110,44 @@
#define IXGBE_5TUPLE_MAX_PRI 7
#define IXGBE_5TUPLE_MIN_PRI 1
+#define IXGBE_RSS_OFFLOAD_ALL ( \
+ ETH_RSS_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_NONFRAG_IPV4_UDP | \
+ ETH_RSS_IPV6 | \
+ ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_NONFRAG_IPV6_UDP | \
+ ETH_RSS_IPV6_EX | \
+ ETH_RSS_IPV6_TCP_EX | \
+ ETH_RSS_IPV6_UDP_EX)
+
+#define IXGBE_VF_IRQ_ENABLE_MASK 3 /* vf irq enable mask */
+#define IXGBE_VF_MAXMSIVECTOR 1
+
+#define IXGBE_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
+#define IXGBE_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
+
/*
* Information about the fdir mode.
*/
+
+struct ixgbe_hw_fdir_mask {
+ uint16_t vlan_tci_mask;
+ uint32_t src_ipv4_mask;
+ uint32_t dst_ipv4_mask;
+ uint16_t src_ipv6_mask;
+ uint16_t dst_ipv6_mask;
+ uint16_t src_port_mask;
+ uint16_t dst_port_mask;
+ uint16_t flex_bytes_mask;
+ uint8_t mac_addr_byte_mask;
+ uint32_t tunnel_id_mask;
+ uint8_t tunnel_type_mask;
+};
+
struct ixgbe_hw_fdir_info {
+ struct ixgbe_hw_fdir_mask mask;
+ uint8_t flex_bytes_offset;
uint16_t collision;
uint16_t free;
uint16_t maxhash;
@@ -144,8 +190,10 @@ struct ixgbe_uta_info {
uint32_t uta_shadow[IXGBE_MAX_UTA];
};
+#define IXGBE_MAX_MIRROR_RULES 4 /* Maximum nb. of mirror rules. */
+
struct ixgbe_mirror_info {
- struct rte_eth_vmdq_mirror_conf mr_conf[ETH_VMDQ_NUM_MIRROR_RULE];
+ struct rte_eth_mirror_conf mr_conf[IXGBE_MAX_MIRROR_RULES];
/**< store PF mirror rules configuration*/
};
@@ -159,6 +207,58 @@ struct ixgbe_vf_info {
uint16_t tx_rate[IXGBE_MAX_QUEUE_NUM_PER_VF];
uint16_t vlan_count;
uint8_t spoofchk_enabled;
+ uint8_t api_version;
+};
+
+/*
+ * Possible l4type of 5tuple filters.
+ */
+enum ixgbe_5tuple_protocol {
+ IXGBE_FILTER_PROTOCOL_TCP = 0,
+ IXGBE_FILTER_PROTOCOL_UDP,
+ IXGBE_FILTER_PROTOCOL_SCTP,
+ IXGBE_FILTER_PROTOCOL_NONE,
+};
+
+TAILQ_HEAD(ixgbe_5tuple_filter_list, ixgbe_5tuple_filter);
+
+struct ixgbe_5tuple_filter_info {
+ uint32_t dst_ip;
+ uint32_t src_ip;
+ uint16_t dst_port;
+ uint16_t src_port;
+ enum ixgbe_5tuple_protocol proto; /* l4 protocol. */
+ uint8_t priority; /* seven levels (001b-111b), 111b is highest,
+ used when more than one filter matches. */
+ uint8_t dst_ip_mask:1, /* if mask is 1b, do not compare dst ip. */
+ src_ip_mask:1, /* if mask is 1b, do not compare src ip. */
+ dst_port_mask:1, /* if mask is 1b, do not compare dst port. */
+ src_port_mask:1, /* if mask is 1b, do not compare src port. */
+ proto_mask:1; /* if mask is 1b, do not compare protocol. */
+};
+
+/* 5tuple filter structure */
+struct ixgbe_5tuple_filter {
+ TAILQ_ENTRY(ixgbe_5tuple_filter) entries;
+ uint16_t index; /* the index of 5tuple filter */
+ struct ixgbe_5tuple_filter_info filter_info;
+ uint16_t queue; /* rx queue assigned to */
+};
+
+#define IXGBE_5TUPLE_ARRAY_SIZE \
+ (RTE_ALIGN(IXGBE_MAX_FTQF_FILTERS, (sizeof(uint32_t) * NBBY)) / \
+ (sizeof(uint32_t) * NBBY))
+
+/*
+ * Structure to store filters' info.
+ */
+struct ixgbe_filter_info {
+ uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */
+ /* store used ethertype filters*/
+ uint16_t ethertype_filters[IXGBE_MAX_ETQF_FILTERS];
+ /* Bit mask for every used 5tuple filter */
+ uint32_t fivetuple_mask[IXGBE_5TUPLE_ARRAY_SIZE];
+ struct ixgbe_5tuple_filter_list fivetuple_list;
};
/*
@@ -179,16 +279,13 @@ struct ixgbe_adapter {
#ifdef RTE_NIC_BYPASS
struct ixgbe_bypass_info bps;
#endif /* RTE_NIC_BYPASS */
-};
+ struct ixgbe_filter_info filter;
-/*
- * Possible l4type of 5tuple filters.
- */
-enum ixgbe_5tuple_protocol {
- IXGBE_FILTER_PROTOCOL_TCP = 0,
- IXGBE_FILTER_PROTOCOL_UDP,
- IXGBE_FILTER_PROTOCOL_SCTP,
- IXGBE_FILTER_PROTOCOL_NONE,
+ bool rx_bulk_alloc_allowed;
+ bool rx_vec_allowed;
+ struct rte_timecounter systime_tc;
+ struct rte_timecounter rx_tstamp_tc;
+ struct rte_timecounter tx_tstamp_tc;
};
#define IXGBE_DEV_PRIVATE_TO_HW(adapter)\
@@ -224,11 +321,16 @@ enum ixgbe_5tuple_protocol {
#define IXGBE_DEV_PRIVATE_TO_UTA(adapter) \
(&((struct ixgbe_adapter *)adapter)->uta_info)
+#define IXGBE_DEV_PRIVATE_TO_FILTER_INFO(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->filter)
+
/*
* RX/TX function prototypes
*/
void ixgbe_dev_clear_queues(struct rte_eth_dev *dev);
+void ixgbe_dev_free_queues(struct rte_eth_dev *dev);
+
void ixgbe_dev_rx_queue_release(void *rxq);
void ixgbe_dev_tx_queue_release(void *txq);
@@ -246,12 +348,13 @@ uint32_t ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
int ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
+int ixgbevf_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
int ixgbe_dev_rx_init(struct rte_eth_dev *dev);
void ixgbe_dev_tx_init(struct rte_eth_dev *dev);
-void ixgbe_dev_rxtx_start(struct rte_eth_dev *dev);
+int ixgbe_dev_rxtx_start(struct rte_eth_dev *dev);
int ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
@@ -261,6 +364,12 @@ int ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
int ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+void ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo);
+
+void ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo);
+
int ixgbevf_dev_rx_init(struct rte_eth_dev *dev);
void ixgbevf_dev_tx_init(struct rte_eth_dev *dev);
@@ -270,12 +379,9 @@ void ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev);
uint16_t ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
-uint16_t ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts);
-#endif
-
-uint16_t ixgbe_recv_scattered_pkts(void *rx_queue,
+uint16_t ixgbe_recv_pkts_lro_single_alloc(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue,
struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
uint16_t ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
@@ -290,36 +396,20 @@ int ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
int ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf);
-/*
- * Flow director function prototypes
- */
-int ixgbe_fdir_configure(struct rte_eth_dev *dev);
-
-int ixgbe_fdir_add_signature_filter(struct rte_eth_dev *dev,
- struct rte_fdir_filter *fdir_filter, uint8_t queue);
-
-int ixgbe_fdir_update_signature_filter(struct rte_eth_dev *dev,
- struct rte_fdir_filter *fdir_filter, uint8_t queue);
+uint16_t ixgbe_reta_size_get(enum ixgbe_mac_type mac_type);
-int ixgbe_fdir_remove_signature_filter(struct rte_eth_dev *dev,
- struct rte_fdir_filter *fdir_filter);
+uint32_t ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx);
-void ixgbe_fdir_info_get(struct rte_eth_dev *dev,
- struct rte_eth_fdir *fdir);
+uint32_t ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type);
-int ixgbe_fdir_add_perfect_filter(struct rte_eth_dev *dev,
- struct rte_fdir_filter *fdir_filter, uint16_t soft_id,
- uint8_t queue, uint8_t drop);
+uint32_t ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i);
-int ixgbe_fdir_update_perfect_filter(struct rte_eth_dev *dev,
- struct rte_fdir_filter *fdir_filter,uint16_t soft_id,
- uint8_t queue, uint8_t drop);
+bool ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type);
-int ixgbe_fdir_remove_perfect_filter(struct rte_eth_dev *dev,
- struct rte_fdir_filter *fdir_filter, uint16_t soft_id);
-
-int ixgbe_fdir_set_masks(struct rte_eth_dev *dev,
- struct rte_fdir_masks *fdir_masks);
+/*
+ * Flow director function prototypes
+ */
+int ixgbe_fdir_configure(struct rte_eth_dev *dev);
void ixgbe_configure_dcb(struct rte_eth_dev *dev);
@@ -336,9 +426,14 @@ void ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev);
void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev);
+void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev);
+
void ixgbe_pf_mbx_process(struct rte_eth_dev *eth_dev);
int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev);
uint32_t ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
+
+int ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op, void *arg);
#endif /* _IXGBE_ETHDEV_H_ */
diff --git a/src/dpdk22/drivers/net/ixgbe/ixgbe_fdir.c b/src/dpdk22/drivers/net/ixgbe/ixgbe_fdir.c
new file mode 100644
index 00000000..e03219b1
--- /dev/null
+++ b/src/dpdk22/drivers/net/ixgbe/ixgbe_fdir.c
@@ -0,0 +1,1361 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+
+#include "ixgbe_logs.h"
+#include "base/ixgbe_api.h"
+#include "base/ixgbe_common.h"
+#include "ixgbe_ethdev.h"
+
+/* To get PBALLOC (Packet Buffer Allocation) bits from FDIRCTRL value */
+#define FDIRCTRL_PBALLOC_MASK 0x03
+
+/* For calculating memory required for FDIR filters */
+#define PBALLOC_SIZE_SHIFT 15
+
+/* Number of bits used to mask bucket hash for different pballoc sizes */
+#define PERFECT_BUCKET_64KB_HASH_MASK 0x07FF /* 11 bits */
+#define PERFECT_BUCKET_128KB_HASH_MASK 0x0FFF /* 12 bits */
+#define PERFECT_BUCKET_256KB_HASH_MASK 0x1FFF /* 13 bits */
+#define SIG_BUCKET_64KB_HASH_MASK 0x1FFF /* 13 bits */
+#define SIG_BUCKET_128KB_HASH_MASK 0x3FFF /* 14 bits */
+#define SIG_BUCKET_256KB_HASH_MASK 0x7FFF /* 15 bits */
+#define IXGBE_DEFAULT_FLEXBYTES_OFFSET 12 /* default flexbytes offset in bytes */
+#define IXGBE_FDIR_MAX_FLEX_LEN 2 /* len in bytes of flexbytes */
+#define IXGBE_MAX_FLX_SOURCE_OFF 62
+#define IXGBE_FDIRCTRL_FLEX_MASK (0x1F << IXGBE_FDIRCTRL_FLEX_SHIFT)
+#define IXGBE_FDIRCMD_CMD_INTERVAL_US 10
+
+#define IXGBE_FDIR_FLOW_TYPES ( \
+ (1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
+ (1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
+ (1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
+ (1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
+ (1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
+ (1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
+ (1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
+ (1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER))
+
+#define IPV6_ADDR_TO_MASK(ipaddr, ipv6m) do { \
+ uint8_t ipv6_addr[16]; \
+ uint8_t i; \
+ rte_memcpy(ipv6_addr, (ipaddr), sizeof(ipv6_addr));\
+ (ipv6m) = 0; \
+ for (i = 0; i < sizeof(ipv6_addr); i++) { \
+ if (ipv6_addr[i] == UINT8_MAX) \
+ (ipv6m) |= 1 << i; \
+ else if (ipv6_addr[i] != 0) { \
+ PMD_DRV_LOG(ERR, " invalid IPv6 address mask."); \
+ return -EINVAL; \
+ } \
+ } \
+} while (0)
+
+#define IPV6_MASK_TO_ADDR(ipv6m, ipaddr) do { \
+ uint8_t ipv6_addr[16]; \
+ uint8_t i; \
+ for (i = 0; i < sizeof(ipv6_addr); i++) { \
+ if ((ipv6m) & (1 << i)) \
+ ipv6_addr[i] = UINT8_MAX; \
+ else \
+ ipv6_addr[i] = 0; \
+ } \
+ rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\
+} while (0)
+
+#define DEFAULT_VXLAN_PORT 4789
+#define IXGBE_FDIRIP6M_INNER_MAC_SHIFT 4
+
+static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash);
+static int fdir_set_input_mask(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_masks *input_mask);
+static int fdir_set_input_mask_82599(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_masks *input_mask);
+static int fdir_set_input_mask_x550(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_masks *input_mask);
+static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl);
+static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);
+static int ixgbe_fdir_filter_to_atr_input(
+ const struct rte_eth_fdir_filter *fdir_filter,
+ union ixgbe_atr_input *input,
+ enum rte_fdir_mode mode);
+static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
+ uint32_t key);
+static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
+ enum rte_fdir_pballoc_type pballoc);
+static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+ enum rte_fdir_pballoc_type pballoc);
+static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input, uint8_t queue,
+ uint32_t fdircmd, uint32_t fdirhash,
+ enum rte_fdir_mode mode);
+static int fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
+ uint32_t fdirhash);
+static int ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *fdir_filter,
+ bool del,
+ bool update);
+static int ixgbe_fdir_flush(struct rte_eth_dev *dev);
+static void ixgbe_fdir_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_fdir_info *fdir_info);
+static void ixgbe_fdir_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_fdir_stats *fdir_stats);
+
+/**
+ * This function is based on ixgbe_fdir_enable_82599() in base/ixgbe_82599.c.
+ * It adds extra configuration of fdirctrl that is common for all filter types.
+ *
+ * Initialize Flow Director control registers
+ * @hw: pointer to hardware structure
+ * @fdirctrl: value to write to flow director control register
+ **/
+static int
+fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl)
+{
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Prime the keys for hashing */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
+
+ /*
+ * Continue setup of fdirctrl register bits:
+ * Set the maximum length per hash bucket to 0xA filters
+ * Send interrupt when 64 filters are left
+ */
+ fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
+ (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
+
+ /*
+ * Poll init-done after we write the register. Estimated times:
+ * 10G: PBALLOC = 11b, timing is 60us
+ * 1G: PBALLOC = 11b, timing is 600us
+ * 100M: PBALLOC = 11b, timing is 6ms
+ *
+ * Multiple these timings by 4 if under full Rx load
+ *
+ * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
+ * 1 msec per poll time. If we're at line rate and drop to 100M, then
+ * this might not finish in our poll time, but we can live with that
+ * for now.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
+ IXGBE_WRITE_FLUSH(hw);
+ for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
+ if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
+ IXGBE_FDIRCTRL_INIT_DONE)
+ break;
+ msec_delay(1);
+ }
+
+ if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
+ PMD_INIT_LOG(ERR, "Flow Director poll time exceeded "
+ "during enabling!");
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+/*
+ * Set appropriate bits in fdirctrl for: variable reporting levels, moving
+ * flexbytes matching field, and drop queue (only for perfect matching mode).
+ */
+static inline int
+configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl)
+{
+ *fdirctrl = 0;
+
+ switch (conf->pballoc) {
+ case RTE_FDIR_PBALLOC_64K:
+ /* 8k - 1 signature filters */
+ *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
+ break;
+ case RTE_FDIR_PBALLOC_128K:
+ /* 16k - 1 signature filters */
+ *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
+ break;
+ case RTE_FDIR_PBALLOC_256K:
+ /* 32k - 1 signature filters */
+ *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
+ break;
+ default:
+ /* bad value */
+ PMD_INIT_LOG(ERR, "Invalid fdir_conf->pballoc value");
+ return -EINVAL;
+ };
+
+ /* status flags: write hash & swindex in the rx descriptor */
+ switch (conf->status) {
+ case RTE_FDIR_NO_REPORT_STATUS:
+ /* do nothing, default mode */
+ break;
+ case RTE_FDIR_REPORT_STATUS:
+ /* report status when the packet matches a fdir rule */
+ *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
+ break;
+ case RTE_FDIR_REPORT_STATUS_ALWAYS:
+ /* always report status */
+ *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS;
+ break;
+ default:
+ /* bad value */
+ PMD_INIT_LOG(ERR, "Invalid fdir_conf->status value");
+ return -EINVAL;
+ };
+
+ *fdirctrl |= (IXGBE_DEFAULT_FLEXBYTES_OFFSET / sizeof(uint16_t)) <<
+ IXGBE_FDIRCTRL_FLEX_SHIFT;
+
+ if (conf->mode >= RTE_FDIR_MODE_PERFECT &&
+ conf->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) {
+ *fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
+ *fdirctrl |= (conf->drop_queue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
+ if (conf->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
+ *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_MACVLAN
+ << IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
+ else if (conf->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
+ *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_CLOUD
+ << IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
+ }
+
+ return 0;
+}
+
+/**
+ * Reverse the bits in FDIR registers that store 2 x 16 bit masks.
+ *
+ * @hi_dword: Bits 31:16 mask to be bit swapped.
+ * @lo_dword: Bits 15:0 mask to be bit swapped.
+ *
+ * Flow director uses several registers to store 2 x 16 bit masks with the
+ * bits reversed such as FDIRTCPM, FDIRUDPM. The LS bit of the
+ * mask affects the MS bit/byte of the target. This function reverses the
+ * bits in these masks.
+ * **/
+static inline uint32_t
+reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword)
+{
+ uint32_t mask = hi_dword << 16;
+ mask |= lo_dword;
+ mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
+ mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
+ mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
+ return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
+}
+
+/*
+ * This references ixgbe_fdir_set_input_mask_82599() in base/ixgbe_82599.c,
+ * but makes use of the rte_fdir_masks structure to see which bits to set.
+ */
+static int
+fdir_set_input_mask_82599(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_masks *input_mask)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_hw_fdir_info *info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ /*
+ * mask VM pool and DIPv6 since there are currently not supported
+ * mask FLEX byte, it will be set in flex_conf
+ */
+ uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 | IXGBE_FDIRM_FLEX;
+ uint32_t fdirtcpm; /* TCP source and destination port masks. */
+ uint32_t fdiripv6m; /* IPv6 source and destination masks. */
+ uint16_t dst_ipv6m = 0;
+ uint16_t src_ipv6m = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /*
+ * Program the relevant mask registers. If src/dst_port or src/dst_addr
+ * are zero, then assume a full mask for that field. Also assume that
+ * a VLAN of 0 is unspecified, so mask that out as well. L4type
+ * cannot be masked out in this implementation.
+ */
+ if (input_mask->dst_port_mask == 0 && input_mask->src_port_mask == 0)
+ /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
+ fdirm |= IXGBE_FDIRM_L4P;
+
+ if (input_mask->vlan_tci_mask == 0x0FFF)
+ /* mask VLAN Priority */
+ fdirm |= IXGBE_FDIRM_VLANP;
+ else if (input_mask->vlan_tci_mask == 0xE000)
+ /* mask VLAN ID */
+ fdirm |= IXGBE_FDIRM_VLANID;
+ else if (input_mask->vlan_tci_mask == 0)
+ /* mask VLAN ID and Priority */
+ fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
+ else if (input_mask->vlan_tci_mask != 0xEFFF) {
+ PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
+ return -EINVAL;
+ }
+ info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
+
+ /* store the TCP/UDP port masks, bit reversed from port layout */
+ fdirtcpm = reverse_fdir_bitmasks(input_mask->dst_port_mask,
+ input_mask->src_port_mask);
+
+ /* write all the same so that UDP, TCP and SCTP use the same mask */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
+ info->mask.src_port_mask = input_mask->src_port_mask;
+ info->mask.dst_port_mask = input_mask->dst_port_mask;
+
+ /* Store source and destination IPv4 masks (big-endian) */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, ~(input_mask->ipv4_mask.src_ip));
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, ~(input_mask->ipv4_mask.dst_ip));
+ info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
+ info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
+
+ if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) {
+ /*
+ * Store source and destination IPv6 masks (bit reversed)
+ */
+ IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
+ IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m);
+ fdiripv6m = (dst_ipv6m << 16) | src_ipv6m;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, ~fdiripv6m);
+ info->mask.src_ipv6_mask = src_ipv6m;
+ info->mask.dst_ipv6_mask = dst_ipv6m;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/*
+ * This references ixgbe_fdir_set_input_mask_82599() in base/ixgbe_82599.c,
+ * but makes use of the rte_fdir_masks structure to see which bits to set.
+ */
+static int
+fdir_set_input_mask_x550(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_masks *input_mask)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_hw_fdir_info *info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ /* mask VM pool and DIPv6 since there are currently not supported
+ * mask FLEX byte, it will be set in flex_conf
+ */
+ uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 |
+ IXGBE_FDIRM_FLEX;
+ uint32_t fdiripv6m;
+ enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
+ uint16_t mac_mask;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* set the default UDP port for VxLAN */
+ if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
+ IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, DEFAULT_VXLAN_PORT);
+
+ /* some bits must be set for mac vlan or tunnel mode */
+ fdirm |= IXGBE_FDIRM_L4P | IXGBE_FDIRM_L3P;
+
+ if (input_mask->vlan_tci_mask == 0x0FFF)
+ /* mask VLAN Priority */
+ fdirm |= IXGBE_FDIRM_VLANP;
+ else if (input_mask->vlan_tci_mask == 0xE000)
+ /* mask VLAN ID */
+ fdirm |= IXGBE_FDIRM_VLANID;
+ else if (input_mask->vlan_tci_mask == 0)
+ /* mask VLAN ID and Priority */
+ fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
+ else if (input_mask->vlan_tci_mask != 0xEFFF) {
+ PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
+ return -EINVAL;
+ }
+ info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
+
+ fdiripv6m = ((u32)0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
+ fdiripv6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
+ if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
+ fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE |
+ IXGBE_FDIRIP6M_TNI_VNI;
+
+ mac_mask = input_mask->mac_addr_byte_mask;
+ fdiripv6m |= (mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT)
+ & IXGBE_FDIRIP6M_INNER_MAC;
+ info->mask.mac_addr_byte_mask = input_mask->mac_addr_byte_mask;
+
+ if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
+ switch (input_mask->tunnel_type_mask) {
+ case 0:
+ /* Mask turnnel type */
+ fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
+ break;
+ case 1:
+ break;
+ default:
+ PMD_INIT_LOG(ERR, "invalid tunnel_type_mask");
+ return -EINVAL;
+ }
+ info->mask.tunnel_type_mask =
+ input_mask->tunnel_type_mask;
+
+ switch (input_mask->tunnel_id_mask & 0xFFFFFFFF) {
+ case 0x0:
+ /* Mask vxlan id */
+ fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI;
+ break;
+ case 0x00FFFFFF:
+ fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
+ break;
+ case 0xFFFFFFFF:
+ break;
+ default:
+ PMD_INIT_LOG(ERR, "invalid tunnel_id_mask");
+ return -EINVAL;
+ }
+ info->mask.tunnel_id_mask =
+ input_mask->tunnel_id_mask;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, fdiripv6m);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
+
+ return IXGBE_SUCCESS;
+}
+
+static int
+fdir_set_input_mask(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_masks *input_mask)
+{
+ enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
+
+ if (mode >= RTE_FDIR_MODE_SIGNATURE &&
+ mode <= RTE_FDIR_MODE_PERFECT)
+ return fdir_set_input_mask_82599(dev, input_mask);
+ else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
+ mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+ return fdir_set_input_mask_x550(dev, input_mask);
+
+ PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
+ return -ENOTSUP;
+}
+
+/*
+ * ixgbe_check_fdir_flex_conf -check if the flex payload and mask configuration
+ * arguments are valid
+ */
+static int
+ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_hw_fdir_info *info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ const struct rte_eth_flex_payload_cfg *flex_cfg;
+ const struct rte_eth_fdir_flex_mask *flex_mask;
+ uint32_t fdirm;
+ uint16_t flexbytes = 0;
+ uint16_t i;
+
+ fdirm = IXGBE_READ_REG(hw, IXGBE_FDIRM);
+
+ if (conf == NULL) {
+ PMD_DRV_LOG(ERR, "NULL pointer.");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < conf->nb_payloads; i++) {
+ flex_cfg = &conf->flex_set[i];
+ if (flex_cfg->type != RTE_ETH_RAW_PAYLOAD) {
+ PMD_DRV_LOG(ERR, "unsupported payload type.");
+ return -EINVAL;
+ }
+ if (((flex_cfg->src_offset[0] & 0x1) == 0) &&
+ (flex_cfg->src_offset[1] == flex_cfg->src_offset[0] + 1) &&
+ (flex_cfg->src_offset[0] <= IXGBE_MAX_FLX_SOURCE_OFF)) {
+ *fdirctrl &= ~IXGBE_FDIRCTRL_FLEX_MASK;
+ *fdirctrl |=
+ (flex_cfg->src_offset[0] / sizeof(uint16_t)) <<
+ IXGBE_FDIRCTRL_FLEX_SHIFT;
+ } else {
+ PMD_DRV_LOG(ERR, "invalid flexbytes arguments.");
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < conf->nb_flexmasks; i++) {
+ flex_mask = &conf->flex_mask[i];
+ if (flex_mask->flow_type != RTE_ETH_FLOW_UNKNOWN) {
+ PMD_DRV_LOG(ERR, "flexmask should be set globally.");
+ return -EINVAL;
+ }
+ flexbytes = (uint16_t)(((flex_mask->mask[0] << 8) & 0xFF00) |
+ ((flex_mask->mask[1]) & 0xFF));
+ if (flexbytes == UINT16_MAX)
+ fdirm &= ~IXGBE_FDIRM_FLEX;
+ else if (flexbytes != 0) {
+ /* IXGBE_FDIRM_FLEX is set by default when set mask */
+ PMD_DRV_LOG(ERR, " invalid flexbytes mask arguments.");
+ return -EINVAL;
+ }
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
+ info->mask.flex_bytes_mask = flexbytes ? UINT16_MAX : 0;
+ info->flex_bytes_offset = (uint8_t)((*fdirctrl &
+ IXGBE_FDIRCTRL_FLEX_MASK) >>
+ IXGBE_FDIRCTRL_FLEX_SHIFT);
+ return 0;
+}
+
+int
+ixgbe_fdir_configure(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int err;
+ uint32_t fdirctrl, pbsize;
+ int i;
+ enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (hw->mac.type != ixgbe_mac_82599EB &&
+ hw->mac.type != ixgbe_mac_X540 &&
+ hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x)
+ return -ENOSYS;
+
+ /* x550 supports mac-vlan and tunnel mode but other NICs not */
+ if (hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ mode != RTE_FDIR_MODE_SIGNATURE &&
+ mode != RTE_FDIR_MODE_PERFECT)
+ return -ENOSYS;
+
+ err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, &fdirctrl);
+ if (err)
+ return err;
+
+ /*
+ * Before enabling Flow Director, the Rx Packet Buffer size
+ * must be reduced. The new value is the current size minus
+ * flow director memory usage size.
+ */
+ pbsize = (1 << (PBALLOC_SIZE_SHIFT + (fdirctrl & FDIRCTRL_PBALLOC_MASK)));
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
+ (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
+
+ /*
+ * The defaults in the HW for RX PB 1-7 are not zero and so should be
+ * intialized to zero for non DCB mode otherwise actual total RX PB
+ * would be bigger than programmed and filter space would run into
+ * the PB 0 region.
+ */
+ for (i = 1; i < 8; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
+
+ err = fdir_set_input_mask(dev, &dev->data->dev_conf.fdir_conf.mask);
+ if (err < 0) {
+ PMD_INIT_LOG(ERR, " Error on setting FD mask");
+ return err;
+ }
+ err = ixgbe_set_fdir_flex_conf(dev,
+ &dev->data->dev_conf.fdir_conf.flex_conf, &fdirctrl);
+ if (err < 0) {
+ PMD_INIT_LOG(ERR, " Error on setting FD flexible arguments.");
+ return err;
+ }
+
+ err = fdir_enable_82599(hw, fdirctrl);
+ if (err < 0) {
+ PMD_INIT_LOG(ERR, " Error on enabling FD.");
+ return err;
+ }
+ return 0;
+}
+
+/*
+ * Convert DPDK rte_eth_fdir_filter struct to ixgbe_atr_input union that is used
+ * by the IXGBE driver code.
+ */
+static int
+ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
+ union ixgbe_atr_input *input, enum rte_fdir_mode mode)
+{
+ input->formatted.vlan_id = fdir_filter->input.flow_ext.vlan_tci;
+ input->formatted.flex_bytes = (uint16_t)(
+ (fdir_filter->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
+ (fdir_filter->input.flow_ext.flexbytes[0] & 0xFF));
+
+ switch (fdir_filter->input.flow_type) {
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
+ input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
+ input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV6;
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
+ input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV6;
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
+ input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV6;
+ break;
+ default:
+ break;
+ }
+
+ switch (fdir_filter->input.flow_type) {
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ input->formatted.src_port =
+ fdir_filter->input.flow.udp4_flow.src_port;
+ input->formatted.dst_port =
+ fdir_filter->input.flow.udp4_flow.dst_port;
+ /*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
+ case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
+ input->formatted.src_ip[0] =
+ fdir_filter->input.flow.ip4_flow.src_ip;
+ input->formatted.dst_ip[0] =
+ fdir_filter->input.flow.ip4_flow.dst_ip;
+ break;
+
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ input->formatted.src_port =
+ fdir_filter->input.flow.udp6_flow.src_port;
+ input->formatted.dst_port =
+ fdir_filter->input.flow.udp6_flow.dst_port;
+ /*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
+ case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
+ rte_memcpy(input->formatted.src_ip,
+ fdir_filter->input.flow.ipv6_flow.src_ip,
+ sizeof(input->formatted.src_ip));
+ rte_memcpy(input->formatted.dst_ip,
+ fdir_filter->input.flow.ipv6_flow.dst_ip,
+ sizeof(input->formatted.dst_ip));
+ break;
+ default:
+ break;
+ }
+
+ if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
+ rte_memcpy(
+ input->formatted.inner_mac,
+ fdir_filter->input.flow.mac_vlan_flow.mac_addr.addr_bytes,
+ sizeof(input->formatted.inner_mac));
+ } else if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
+ rte_memcpy(
+ input->formatted.inner_mac,
+ fdir_filter->input.flow.tunnel_flow.mac_addr.addr_bytes,
+ sizeof(input->formatted.inner_mac));
+ input->formatted.tunnel_type =
+ fdir_filter->input.flow.tunnel_flow.tunnel_type;
+ input->formatted.tni_vni =
+ fdir_filter->input.flow.tunnel_flow.tunnel_id;
+ }
+
+ return 0;
+}
+
+/*
+ * The below function is taken from the FreeBSD IXGBE drivers release
+ * 2.3.8. The only change is not to mask hash_result with IXGBE_ATR_HASH_MASK
+ * before returning, as the signature hash can use 16bits.
+ *
+ * The newer driver has optimised functions for calculating bucket and
+ * signature hashes. However they don't support IPv6 type packets for signature
+ * filters so are not used here.
+ *
+ * Note that the bkt_hash field in the ixgbe_atr_input structure is also never
+ * set.
+ *
+ * Compute the hashes for SW ATR
+ * @stream: input bitstream to compute the hash on
+ * @key: 32-bit hash key
+ **/
+static uint32_t
+ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
+ uint32_t key)
+{
+ /*
+ * The algorithm is as follows:
+ * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
+ * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
+ * and A[n] x B[n] is bitwise AND between same length strings
+ *
+ * K[n] is 16 bits, defined as:
+ * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
+ * for n modulo 32 < 15, K[n] =
+ * K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
+ *
+ * S[n] is 16 bits, defined as:
+ * for n >= 15, S[n] = S[n:n - 15]
+ * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
+ *
+ * To simplify for programming, the algorithm is implemented
+ * in software this way:
+ *
+ * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
+ *
+ * for (i = 0; i < 352; i+=32)
+ * hi_hash_dword[31:0] ^= Stream[(i+31):i];
+ *
+ * lo_hash_dword[15:0] ^= Stream[15:0];
+ * lo_hash_dword[15:0] ^= hi_hash_dword[31:16];
+ * lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
+ *
+ * hi_hash_dword[31:0] ^= Stream[351:320];
+ *
+ * if(key[0])
+ * hash[15:0] ^= Stream[15:0];
+ *
+ * for (i = 0; i < 16; i++) {
+ * if (key[i])
+ * hash[15:0] ^= lo_hash_dword[(i+15):i];
+ * if (key[i + 16])
+ * hash[15:0] ^= hi_hash_dword[(i+15):i];
+ * }
+ *
+ */
+ __be32 common_hash_dword = 0;
+ u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+ u32 hash_result = 0;
+ u8 i;
+
+ /* record the flow_vm_vlan bits as they are a key part to the hash */
+ flow_vm_vlan = IXGBE_NTOHL(atr_input->dword_stream[0]);
+
+ /* generate common hash dword */
+ for (i = 1; i <= 13; i++)
+ common_hash_dword ^= atr_input->dword_stream[i];
+
+ hi_hash_dword = IXGBE_NTOHL(common_hash_dword);
+
+ /* low dword is word swapped version of common */
+ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+ /* apply flow ID/VM pool/VLAN ID bits to hash words */
+ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+
+ /* Process bits 0 and 16 */
+ if (key & 0x0001) hash_result ^= lo_hash_dword;
+ if (key & 0x00010000) hash_result ^= hi_hash_dword;
+
+ /*
+ * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+ * delay this because bit 0 of the stream should not be processed
+ * so we do not add the vlan until after bit 0 was processed
+ */
+ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+
+
+ /* process the remaining 30 bits in the key 2 bits at a time */
+ for (i = 15; i; i-- ) {
+ if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
+ if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
+ }
+
+ return hash_result;
+}
+
+static uint32_t
+atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+ enum rte_fdir_pballoc_type pballoc)
+{
+ if (pballoc == RTE_FDIR_PBALLOC_256K)
+ return ixgbe_atr_compute_hash_82599(input,
+ IXGBE_ATR_BUCKET_HASH_KEY) &
+ PERFECT_BUCKET_256KB_HASH_MASK;
+ else if (pballoc == RTE_FDIR_PBALLOC_128K)
+ return ixgbe_atr_compute_hash_82599(input,
+ IXGBE_ATR_BUCKET_HASH_KEY) &
+ PERFECT_BUCKET_128KB_HASH_MASK;
+ else
+ return ixgbe_atr_compute_hash_82599(input,
+ IXGBE_ATR_BUCKET_HASH_KEY) &
+ PERFECT_BUCKET_64KB_HASH_MASK;
+}
+
+/**
+ * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
+ * @hw: pointer to hardware structure
+ */
+static inline int
+ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, uint32_t *fdircmd)
+{
+ int i;
+
+ for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
+ *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
+ if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
+ return 0;
+ rte_delay_us(IXGBE_FDIRCMD_CMD_INTERVAL_US);
+ }
+
+ return -ETIMEDOUT;
+}
+
+/*
+ * Calculate the hash value needed for signature-match filters. In the FreeBSD
+ * driver, this is done by the optimised function
+ * ixgbe_atr_compute_sig_hash_82599(). However that can't be used here as it
+ * doesn't support calculating a hash for an IPv6 filter.
+ */
+static uint32_t
+atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
+ enum rte_fdir_pballoc_type pballoc)
+{
+ uint32_t bucket_hash, sig_hash;
+
+ if (pballoc == RTE_FDIR_PBALLOC_256K)
+ bucket_hash = ixgbe_atr_compute_hash_82599(input,
+ IXGBE_ATR_BUCKET_HASH_KEY) &
+ SIG_BUCKET_256KB_HASH_MASK;
+ else if (pballoc == RTE_FDIR_PBALLOC_128K)
+ bucket_hash = ixgbe_atr_compute_hash_82599(input,
+ IXGBE_ATR_BUCKET_HASH_KEY) &
+ SIG_BUCKET_128KB_HASH_MASK;
+ else
+ bucket_hash = ixgbe_atr_compute_hash_82599(input,
+ IXGBE_ATR_BUCKET_HASH_KEY) &
+ SIG_BUCKET_64KB_HASH_MASK;
+
+ sig_hash = ixgbe_atr_compute_hash_82599(input,
+ IXGBE_ATR_SIGNATURE_HASH_KEY);
+
+ return (sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT) | bucket_hash;
+}
+
+/*
+ * This is based on ixgbe_fdir_write_perfect_filter_82599() in
+ * base/ixgbe_82599.c, with the ability to set extra flags in FDIRCMD register
+ * added, and IPv6 support also added. The hash value is also pre-calculated
+ * as the pballoc value is needed to do it.
+ */
+static int
+fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input, uint8_t queue,
+ uint32_t fdircmd, uint32_t fdirhash,
+ enum rte_fdir_mode mode)
+{
+ uint32_t fdirport, fdirvlan;
+ u32 addr_low, addr_high;
+ u32 tunnel_type = 0;
+ int err = 0;
+
+ if (mode == RTE_FDIR_MODE_PERFECT) {
+ /* record the IPv4 address (big-endian) */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA,
+ input->formatted.src_ip[0]);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA,
+ input->formatted.dst_ip[0]);
+
+ /* record source and destination port (little-endian)*/
+ fdirport = IXGBE_NTOHS(input->formatted.dst_port);
+ fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
+ fdirport |= IXGBE_NTOHS(input->formatted.src_port);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
+ } else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
+ mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) {
+ /* for mac vlan and tunnel modes */
+ addr_low = ((u32)input->formatted.inner_mac[0] |
+ ((u32)input->formatted.inner_mac[1] << 8) |
+ ((u32)input->formatted.inner_mac[2] << 16) |
+ ((u32)input->formatted.inner_mac[3] << 24));
+ addr_high = ((u32)input->formatted.inner_mac[4] |
+ ((u32)input->formatted.inner_mac[5] << 8));
+
+ if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), addr_high);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0);
+ } else {
+ /* tunnel mode */
+ if (input->formatted.tunnel_type !=
+ RTE_FDIR_TUNNEL_TYPE_NVGRE)
+ tunnel_type = 0x80000000;
+ tunnel_type |= addr_high;
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), tunnel_type);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2),
+ input->formatted.tni_vni);
+ }
+ }
+
+ /* record vlan (little-endian) and flex_bytes(big-endian) */
+ fdirvlan = input->formatted.flex_bytes;
+ fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
+ fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
+
+ /* configure FDIRHASH register */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+
+ /*
+ * flush all previous writes to make certain registers are
+ * programmed prior to issuing the command
+ */
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* configure FDIRCMD register */
+ fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+ fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+ fdircmd |= (uint32_t)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
+
+ PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash);
+
+ err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err < 0)
+ PMD_DRV_LOG(ERR, "Timeout writing flow director filter.");
+
+ return err;
+}
+
+/**
+ * This function is based on ixgbe_atr_add_signature_filter_82599() in
+ * base/ixgbe_82599.c, but uses a pre-calculated hash value. It also supports
+ * setting extra fields in the FDIRCMD register, and removes the code that was
+ * verifying the flow_type field. According to the documentation, a flow type of
+ * 00 (i.e. not TCP, UDP, or SCTP) is not supported, however it appears to
+ * work ok...
+ *
+ * Adds a signature hash filter
+ * @hw: pointer to hardware structure
+ * @input: unique input dword
+ * @queue: queue index to direct traffic to
+ * @fdircmd: any extra flags to set in fdircmd register
+ * @fdirhash: pre-calculated hash value for the filter
+ **/
+static int
+fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
+ uint32_t fdirhash)
+{
+ int err = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* configure FDIRCMD register */
+ fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+ fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
+
+ PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash);
+
+ err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err < 0)
+ PMD_DRV_LOG(ERR, "Timeout writing flow director filter.");
+
+ return err;
+}
+
+/*
+ * This is based on ixgbe_fdir_erase_perfect_filter_82599() in
+ * base/ixgbe_82599.c. It is modified to take in the hash as a parameter so
+ * that it can be used for removing signature and perfect filters.
+ */
+static int
+fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash)
+{
+ uint32_t fdircmd = 0;
+ int err = 0;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+
+ /* flush hash to HW */
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Query if filter is present */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
+
+ err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err < 0) {
+ PMD_INIT_LOG(ERR, "Timeout querying for flow director filter.");
+ return err;
+ }
+
+ /* if filter exists in hardware then remove it */
+ if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+ IXGBE_WRITE_FLUSH(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+ IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
+ }
+ err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err < 0)
+ PMD_INIT_LOG(ERR, "Timeout erasing flow director filter.");
+ return err;
+
+}
+
+/*
+ * ixgbe_add_del_fdir_filter - add or remove a flow diretor filter.
+ * @dev: pointer to the structure rte_eth_dev
+ * @fdir_filter: fdir filter entry
+ * @del: 1 - delete, 0 - add
+ * @update: 1 - update
+ */
+static int
+ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *fdir_filter,
+ bool del,
+ bool update)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t fdircmd_flags;
+ uint32_t fdirhash;
+ union ixgbe_atr_input input;
+ uint8_t queue;
+ bool is_perfect = FALSE;
+ int err;
+ struct ixgbe_hw_fdir_info *info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+
+ if (fdir_mode == RTE_FDIR_MODE_NONE)
+ return -ENOTSUP;
+
+ /*
+ * Sanity check for x550.
+ * When adding a new filter with flow type set to IPv4-other,
+ * the flow director mask should be configed before,
+ * and the L4 protocol and ports are masked.
+ */
+ if ((!del) &&
+ (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x) &&
+ (fdir_filter->input.flow_type ==
+ RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) &&
+ (info->mask.src_port_mask != 0 ||
+ info->mask.dst_port_mask != 0)) {
+ PMD_DRV_LOG(ERR, "By this device,"
+ " IPv4-other is not supported without"
+ " L4 protocol and ports masked!");
+ return -ENOTSUP;
+ }
+
+ if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
+ fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+ is_perfect = TRUE;
+
+ memset(&input, 0, sizeof(input));
+
+ err = ixgbe_fdir_filter_to_atr_input(fdir_filter, &input,
+ fdir_mode);
+ if (err)
+ return err;
+
+ if (is_perfect) {
+ if (input.formatted.flow_type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
+ PMD_DRV_LOG(ERR, "IPv6 is not supported in"
+ " perfect mode!");
+ return -ENOTSUP;
+ }
+ fdirhash = atr_compute_perfect_hash_82599(&input,
+ dev->data->dev_conf.fdir_conf.pballoc);
+ fdirhash |= fdir_filter->soft_id <<
+ IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
+ } else
+ fdirhash = atr_compute_sig_hash_82599(&input,
+ dev->data->dev_conf.fdir_conf.pballoc);
+
+ if (del) {
+ err = fdir_erase_filter_82599(hw, fdirhash);
+ if (err < 0)
+ PMD_DRV_LOG(ERR, "Fail to delete FDIR filter!");
+ else
+ PMD_DRV_LOG(DEBUG, "Success to delete FDIR filter!");
+ return err;
+ }
+ /* add or update an fdir filter*/
+ fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0;
+ if (fdir_filter->action.behavior == RTE_ETH_FDIR_REJECT) {
+ if (is_perfect) {
+ queue = dev->data->dev_conf.fdir_conf.drop_queue;
+ fdircmd_flags |= IXGBE_FDIRCMD_DROP;
+ } else {
+ PMD_DRV_LOG(ERR, "Drop option is not supported in"
+ " signature mode.");
+ return -EINVAL;
+ }
+ } else if (fdir_filter->action.behavior == RTE_ETH_FDIR_ACCEPT &&
+ fdir_filter->action.rx_queue < IXGBE_MAX_RX_QUEUE_NUM)
+ queue = (uint8_t)fdir_filter->action.rx_queue;
+ else
+ return -EINVAL;
+
+ if (is_perfect) {
+ err = fdir_write_perfect_filter_82599(hw, &input, queue,
+ fdircmd_flags, fdirhash,
+ fdir_mode);
+ } else {
+ err = fdir_add_signature_filter_82599(hw, &input, queue,
+ fdircmd_flags, fdirhash);
+ }
+ if (err < 0)
+ PMD_DRV_LOG(ERR, "Fail to add FDIR filter!");
+ else
+ PMD_DRV_LOG(DEBUG, "Success to add FDIR filter");
+
+ return err;
+}
+
+static int
+ixgbe_fdir_flush(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_hw_fdir_info *info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ int ret;
+
+ ret = ixgbe_reinit_fdir_tables_82599(hw);
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR, "Failed to re-initialize FD table.");
+ return ret;
+ }
+
+ info->f_add = 0;
+ info->f_remove = 0;
+ info->add = 0;
+ info->remove = 0;
+
+ return ret;
+}
+
+#define FDIRENTRIES_NUM_SHIFT 10
+static void
+ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_hw_fdir_info *info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ uint32_t fdirctrl, max_num;
+ uint8_t offset;
+
+ fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+ offset = ((fdirctrl & IXGBE_FDIRCTRL_FLEX_MASK) >>
+ IXGBE_FDIRCTRL_FLEX_SHIFT) * sizeof(uint16_t);
+
+ fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
+ max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
+ (fdirctrl & FDIRCTRL_PBALLOC_MASK)));
+ if (fdir_info->mode >= RTE_FDIR_MODE_PERFECT &&
+ fdir_info->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+ fdir_info->guarant_spc = max_num;
+ else if (fdir_info->mode == RTE_FDIR_MODE_SIGNATURE)
+ fdir_info->guarant_spc = max_num * 4;
+
+ fdir_info->mask.vlan_tci_mask = info->mask.vlan_tci_mask;
+ fdir_info->mask.ipv4_mask.src_ip = info->mask.src_ipv4_mask;
+ fdir_info->mask.ipv4_mask.dst_ip = info->mask.dst_ipv4_mask;
+ IPV6_MASK_TO_ADDR(info->mask.src_ipv6_mask,
+ fdir_info->mask.ipv6_mask.src_ip);
+ IPV6_MASK_TO_ADDR(info->mask.dst_ipv6_mask,
+ fdir_info->mask.ipv6_mask.dst_ip);
+ fdir_info->mask.src_port_mask = info->mask.src_port_mask;
+ fdir_info->mask.dst_port_mask = info->mask.dst_port_mask;
+ fdir_info->mask.mac_addr_byte_mask = info->mask.mac_addr_byte_mask;
+ fdir_info->mask.tunnel_id_mask = info->mask.tunnel_id_mask;
+ fdir_info->mask.tunnel_type_mask = info->mask.tunnel_type_mask;
+ fdir_info->max_flexpayload = IXGBE_FDIR_MAX_FLEX_LEN;
+
+ if (fdir_info->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN ||
+ fdir_info->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
+ fdir_info->flow_types_mask[0] = 0;
+ else
+ fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES;
+
+ fdir_info->flex_payload_unit = sizeof(uint16_t);
+ fdir_info->max_flex_payload_segment_num = 1;
+ fdir_info->flex_payload_limit = IXGBE_MAX_FLX_SOURCE_OFF;
+ fdir_info->flex_conf.nb_payloads = 1;
+ fdir_info->flex_conf.flex_set[0].type = RTE_ETH_RAW_PAYLOAD;
+ fdir_info->flex_conf.flex_set[0].src_offset[0] = offset;
+ fdir_info->flex_conf.flex_set[0].src_offset[1] = offset + 1;
+ fdir_info->flex_conf.nb_flexmasks = 1;
+ fdir_info->flex_conf.flex_mask[0].flow_type = RTE_ETH_FLOW_UNKNOWN;
+ fdir_info->flex_conf.flex_mask[0].mask[0] =
+ (uint8_t)(info->mask.flex_bytes_mask & 0x00FF);
+ fdir_info->flex_conf.flex_mask[0].mask[1] =
+ (uint8_t)((info->mask.flex_bytes_mask & 0xFF00) >> 8);
+}
+
+static void
+ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_stats)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_hw_fdir_info *info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ uint32_t reg, max_num;
+ enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+
+ /* Get the information from registers */
+ reg = IXGBE_READ_REG(hw, IXGBE_FDIRFREE);
+ info->collision = (uint16_t)((reg & IXGBE_FDIRFREE_COLL_MASK) >>
+ IXGBE_FDIRFREE_COLL_SHIFT);
+ info->free = (uint16_t)((reg & IXGBE_FDIRFREE_FREE_MASK) >>
+ IXGBE_FDIRFREE_FREE_SHIFT);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
+ info->maxhash = (uint16_t)((reg & IXGBE_FDIRLEN_MAXHASH_MASK) >>
+ IXGBE_FDIRLEN_MAXHASH_SHIFT);
+ info->maxlen = (uint8_t)((reg & IXGBE_FDIRLEN_MAXLEN_MASK) >>
+ IXGBE_FDIRLEN_MAXLEN_SHIFT);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
+ info->remove += (reg & IXGBE_FDIRUSTAT_REMOVE_MASK) >>
+ IXGBE_FDIRUSTAT_REMOVE_SHIFT;
+ info->add += (reg & IXGBE_FDIRUSTAT_ADD_MASK) >>
+ IXGBE_FDIRUSTAT_ADD_SHIFT;
+
+ reg = IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT) & 0xFFFF;
+ info->f_remove += (reg & IXGBE_FDIRFSTAT_FREMOVE_MASK) >>
+ IXGBE_FDIRFSTAT_FREMOVE_SHIFT;
+ info->f_add += (reg & IXGBE_FDIRFSTAT_FADD_MASK) >>
+ IXGBE_FDIRFSTAT_FADD_SHIFT;
+
+ /* Copy the new information in the fdir parameter */
+ fdir_stats->collision = info->collision;
+ fdir_stats->free = info->free;
+ fdir_stats->maxhash = info->maxhash;
+ fdir_stats->maxlen = info->maxlen;
+ fdir_stats->remove = info->remove;
+ fdir_stats->add = info->add;
+ fdir_stats->f_remove = info->f_remove;
+ fdir_stats->f_add = info->f_add;
+
+ reg = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+ max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
+ (reg & FDIRCTRL_PBALLOC_MASK)));
+ if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
+ fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+ fdir_stats->guarant_cnt = max_num - fdir_stats->free;
+ else if (fdir_mode == RTE_FDIR_MODE_SIGNATURE)
+ fdir_stats->guarant_cnt = max_num * 4 - fdir_stats->free;
+
+}
+
+/*
+ * ixgbe_fdir_ctrl_func - deal with all operations on flow director.
+ * @dev: pointer to the structure rte_eth_dev
+ * @filter_op:operation will be taken
+ * @arg: a pointer to specific structure corresponding to the filter_op
+ */
+int
+ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op, void *arg)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret = 0;
+
+ if (hw->mac.type != ixgbe_mac_82599EB &&
+ hw->mac.type != ixgbe_mac_X540 &&
+ hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x)
+ return -ENOTSUP;
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
+ return -EINVAL;
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = ixgbe_add_del_fdir_filter(dev,
+ (struct rte_eth_fdir_filter *)arg, FALSE, FALSE);
+ break;
+ case RTE_ETH_FILTER_UPDATE:
+ ret = ixgbe_add_del_fdir_filter(dev,
+ (struct rte_eth_fdir_filter *)arg, FALSE, TRUE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = ixgbe_add_del_fdir_filter(dev,
+ (struct rte_eth_fdir_filter *)arg, TRUE, FALSE);
+ break;
+ case RTE_ETH_FILTER_FLUSH:
+ ret = ixgbe_fdir_flush(dev);
+ break;
+ case RTE_ETH_FILTER_INFO:
+ ixgbe_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg);
+ break;
+ case RTE_ETH_FILTER_STATS:
+ ixgbe_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_logs.h b/src/dpdk22/drivers/net/ixgbe/ixgbe_logs.h
index 4f224ecf..53ba42d9 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_logs.h
+++ b/src/dpdk22/drivers/net/ixgbe/ixgbe_logs.h
@@ -34,9 +34,8 @@
#ifndef _IXGBE_LOGS_H_
#define _IXGBE_LOGS_H_
-
-#define PMD_INIT_LOG(level, fmt, args...) RTE_LOG(level, PMD," " fmt "\n", ##args)
-
+#define PMD_INIT_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args)
#ifdef RTE_LIBRTE_IXGBE_DEBUG_INIT
#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_pf.c b/src/dpdk22/drivers/net/ixgbe/ixgbe_pf.c
index 51da1fd1..2ffbd1fa 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_pf.c
+++ b/src/dpdk22/drivers/net/ixgbe/ixgbe_pf.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -49,10 +49,13 @@
#include <rte_malloc.h>
#include <rte_random.h>
-#include "ixgbe/ixgbe_common.h"
+#include "base/ixgbe_common.h"
#include "ixgbe_ethdev.h"
#define IXGBE_MAX_VFTA (128)
+#define IXGBE_VF_MSG_SIZE_DEFAULT 1
+#define IXGBE_VF_GET_QUEUE_MSG_SIZE 5
+#define IXGBE_ETHERTYPE_FLOW_CTRL 0x8808
static inline uint16_t
dev_num_vf(struct rte_eth_dev *eth_dev)
@@ -142,6 +145,69 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
return;
}
+void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_vf_info **vfinfo;
+ uint16_t vf_num;
+
+ PMD_INIT_FUNC_TRACE();
+
+ vfinfo = IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
+
+ RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
+ RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = 0;
+ RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = 0;
+ RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = 0;
+
+ vf_num = dev_num_vf(eth_dev);
+ if (vf_num == 0)
+ return;
+
+ rte_free(*vfinfo);
+ *vfinfo = NULL;
+}
+
+static void
+ixgbe_add_tx_flow_control_drop_filter(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
+ uint16_t vf_num;
+ int i;
+
+ if (!hw->mac.ops.set_ethertype_anti_spoofing) {
+ RTE_LOG(INFO, PMD, "ether type anti-spoofing is not"
+ " supported.\n");
+ return;
+ }
+
+ /* occupy an entity of ether type filter */
+ for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+ if (!(filter_info->ethertype_mask & (1 << i))) {
+ filter_info->ethertype_mask |= 1 << i;
+ filter_info->ethertype_filters[i] =
+ IXGBE_ETHERTYPE_FLOW_CTRL;
+ break;
+ }
+ }
+ if (i == IXGBE_MAX_ETQF_FILTERS) {
+ RTE_LOG(ERR, PMD, "Cannot find an unused ether type filter"
+ " entity for flow control.\n");
+ return;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(i),
+ (IXGBE_ETQF_FILTER_EN |
+ IXGBE_ETQF_TX_ANTISPOOF |
+ IXGBE_ETHERTYPE_FLOW_CTRL));
+
+ vf_num = dev_num_vf(eth_dev);
+ for (i = 0; i < vf_num; i++)
+ hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i);
+}
+
int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
{
uint32_t vtctl, fcrth;
@@ -238,6 +304,8 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
}
+ ixgbe_add_tx_flow_control_drop_filter(eth_dev);
+
return 0;
}
@@ -469,9 +537,63 @@ ixgbe_set_vf_lpe(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *ms
}
static int
+ixgbe_negotiate_vf_api(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
+{
+ uint32_t api_version = msgbuf[1];
+ struct ixgbe_vf_info *vfinfo =
+ *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+
+ switch (api_version) {
+ case ixgbe_mbox_api_10:
+ case ixgbe_mbox_api_11:
+ vfinfo[vf].api_version = (uint8_t)api_version;
+ return 0;
+ default:
+ break;
+ }
+
+ RTE_LOG(ERR, PMD, "Negotiate invalid api version %u from VF %d\n",
+ api_version, vf);
+
+ return -1;
+}
+
+static int
+ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
+{
+ struct ixgbe_vf_info *vfinfo =
+ *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+ uint32_t default_q = vf * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+
+ /* Verify if the PF supports the mbox APIs version or not */
+ switch (vfinfo[vf].api_version) {
+ case ixgbe_mbox_api_20:
+ case ixgbe_mbox_api_11:
+ break;
+ default:
+ return -1;
+ }
+
+ /* Notify VF of Rx and Tx queue number */
+ msgbuf[IXGBE_VF_RX_QUEUES] = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+ msgbuf[IXGBE_VF_TX_QUEUES] = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+
+ /* Notify VF of default queue */
+ msgbuf[IXGBE_VF_DEF_QUEUE] = default_q;
+
+ /*
+ * FIX ME if it needs fill msgbuf[IXGBE_VF_TRANS_VLAN]
+ * for VLAN strip or VMDQ_DCB or VMDQ_DCB_RSS
+ */
+
+ return 0;
+}
+
+static int
ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
{
uint16_t mbx_size = IXGBE_VFMAILBOX_SIZE;
+ uint16_t msg_size = IXGBE_VF_MSG_SIZE_DEFAULT;
uint32_t msgbuf[IXGBE_VFMAILBOX_SIZE];
int32_t retval;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -512,6 +634,13 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
case IXGBE_VF_SET_VLAN:
retval = ixgbe_vf_set_vlan(dev, vf, msgbuf);
break;
+ case IXGBE_VF_API_NEGOTIATE:
+ retval = ixgbe_negotiate_vf_api(dev, vf, msgbuf);
+ break;
+ case IXGBE_VF_GET_QUEUES:
+ retval = ixgbe_get_vf_queues(dev, vf, msgbuf);
+ msg_size = IXGBE_VF_GET_QUEUE_MSG_SIZE;
+ break;
default:
PMD_DRV_LOG(DEBUG, "Unhandled Msg %8.8x", (unsigned)msgbuf[0]);
retval = IXGBE_ERR_MBX;
@@ -526,7 +655,7 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
- ixgbe_write_mbx(hw, msgbuf, 1, vf);
+ ixgbe_write_mbx(hw, msgbuf, msg_size, vf);
return retval;
}
diff --git a/src/dpdk22/drivers/net/ixgbe/ixgbe_regs.h b/src/dpdk22/drivers/net/ixgbe/ixgbe_regs.h
new file mode 100644
index 00000000..c7457a6f
--- /dev/null
+++ b/src/dpdk22/drivers/net/ixgbe/ixgbe_regs.h
@@ -0,0 +1,376 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _IXGBE_REGS_H_
+#define _IXGBE_REGS_H_
+
+#include "ixgbe_ethdev.h"
+
+struct ixgbe_hw;
+struct reg_info {
+ uint32_t base_addr;
+ uint32_t count;
+ uint32_t stride;
+ const char *name;
+} reg_info;
+
+static const struct reg_info ixgbe_regs_general[] = {
+ {IXGBE_CTRL, 1, 1, "IXGBE_CTRL"},
+ {IXGBE_STATUS, 1, 1, "IXGBE_STATUS"},
+ {IXGBE_CTRL_EXT, 1, 1, "IXGBE_CTRL_EXT"},
+ {IXGBE_ESDP, 1, 1, "IXGBE_ESDP"},
+ {IXGBE_EODSDP, 1, 1, "IXGBE_EODSDP"},
+ {IXGBE_LEDCTL, 1, 1, "IXGBE_LEDCTL"},
+ {IXGBE_FRTIMER, 1, 1, "IXGBE_FRTIMER"},
+ {IXGBE_TCPTIMER, 1, 1, "IXGBE_TCPTIMER"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info ixgbevf_regs_general[] = {
+ {IXGBE_CTRL, 1, 1, "IXGBE_CTRL"},
+ {IXGBE_STATUS, 1, 1, "IXGBE_STATUS"},
+ {IXGBE_VFLINKS, 1, 1, "IXGBE_VFLINKS"},
+ {IXGBE_FRTIMER, 1, 1, "IXGBE_FRTIMER"},
+ {IXGBE_VFMAILBOX, 1, 1, "IXGBE_VFMAILBOX"},
+ {IXGBE_VFMBMEM, 16, 4, "IXGBE_VFMBMEM"},
+ {IXGBE_VFRXMEMWRAP, 1, 1, "IXGBE_VFRXMEMWRAP"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info ixgbe_regs_nvm[] = {
+ {IXGBE_EEC, 1, 1, "IXGBE_EEC"},
+ {IXGBE_EERD, 1, 1, "IXGBE_EERD"},
+ {IXGBE_FLA, 1, 1, "IXGBE_FLA"},
+ {IXGBE_EEMNGCTL, 1, 1, "IXGBE_EEMNGCTL"},
+ {IXGBE_EEMNGDATA, 1, 1, "IXGBE_EEMNGDATA"},
+ {IXGBE_FLMNGCTL, 1, 1, "IXGBE_FLMNGCTL"},
+ {IXGBE_FLMNGDATA, 1, 1, "IXGBE_FLMNGDATA"},
+ {IXGBE_FLMNGCNT, 1, 1, "IXGBE_FLMNGCNT"},
+ {IXGBE_FLOP, 1, 1, "IXGBE_FLOP"},
+ {IXGBE_GRC, 1, 1, "IXGBE_GRC"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info ixgbe_regs_interrupt[] = {
+ {IXGBE_EICS, 1, 1, "IXGBE_EICS"},
+ {IXGBE_EIMS, 1, 1, "IXGBE_EIMS"},
+ {IXGBE_EIMC, 1, 1, "IXGBE_EIMC"},
+ {IXGBE_EIAC, 1, 1, "IXGBE_EIAC"},
+ {IXGBE_EIAM, 1, 1, "IXGBE_EIAM"},
+ {IXGBE_EITR(0), 24, 4, "IXGBE_EITR"},
+ {IXGBE_IVAR(0), 24, 4, "IXGBE_IVAR"},
+ {IXGBE_MSIXT, 1, 1, "IXGBE_MSIXT"},
+ {IXGBE_MSIXPBA, 1, 1, "IXGBE_MSIXPBA"},
+ {IXGBE_PBACL(0), 1, 4, "IXGBE_PBACL"},
+ {IXGBE_GPIE, 1, 1, ""},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info ixgbevf_regs_interrupt[] = {
+ {IXGBE_VTEICR, 1, 1, "IXGBE_VTEICR"},
+ {IXGBE_VTEICS, 1, 1, "IXGBE_VTEICS"},
+ {IXGBE_VTEIMS, 1, 1, "IXGBE_VTEIMS"},
+ {IXGBE_VTEIMC, 1, 1, "IXGBE_VTEIMC"},
+ {IXGBE_VTEIAM, 1, 1, "IXGBE_VTEIAM"},
+ {IXGBE_VTEITR(0), 2, 4, "IXGBE_VTEITR"},
+ {IXGBE_VTIVAR(0), 4, 4, "IXGBE_VTIVAR"},
+ {IXGBE_VTIVAR_MISC, 1, 1, "IXGBE_VTIVAR_MISC"},
+ {IXGBE_VTRSCINT(0), 2, 4, "IXGBE_VTRSCINT"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info ixgbe_regs_fctl_mac_82598EB[] = {
+ {IXGBE_PFCTOP, 1, 1, ""},
+ {IXGBE_FCTTV(0), 4, 4, ""},
+ {IXGBE_FCRTV, 1, 1, ""},
+ {IXGBE_TFCS, 1, 1, ""},
+ {IXGBE_FCRTL(0), 8, 8, "IXGBE_FCRTL"},
+ {IXGBE_FCRTH(0), 8, 8, "IXGBE_FCRTH"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info ixgbe_regs_fctl_others[] = {
+ {IXGBE_PFCTOP, 1, 1, ""},
+ {IXGBE_FCTTV(0), 4, 4, ""},
+ {IXGBE_FCRTV, 1, 1, ""},
+ {IXGBE_TFCS, 1, 1, ""},
+ {IXGBE_FCRTL_82599(0), 8, 4, "IXGBE_FCRTL"},
+ {IXGBE_FCRTH_82599(0), 8, 4, "IXGBE_FCRTH"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info ixgbe_regs_rxdma[] = {
+ {IXGBE_RDBAL(0), 64, 0x40, "IXGBE_RDBAL"},
+ {IXGBE_RDBAH(0), 64, 0x40, "IXGBE_RDBAH"},
+ {IXGBE_RDLEN(0), 64, 0x40, "IXGBE_RDLEN"},
+ {IXGBE_RDH(0), 64, 0x40, "IXGBE_RDH"},
+ {IXGBE_RDT(0), 64, 0x40, "IXGBE_RDT"},
+ {IXGBE_RXDCTL(0), 64, 0x40, "IXGBE_RXDCTL"},
+ {IXGBE_SRRCTL(0), 16, 0x4, "IXGBE_SRRCTL"},
+ {IXGBE_DCA_RXCTRL(0), 16, 4, "IXGBE_DCA_RXCTRL"},
+ {IXGBE_RDRXCTL, 1, 1, "IXGBE_RDRXCTL"},
+ {IXGBE_RXPBSIZE(0), 8, 4, "IXGBE_RXPBSIZE"},
+ {IXGBE_RXCTRL, 1, 1, "IXGBE_RXCTRL"},
+ {IXGBE_DROPEN, 1, 1, "IXGBE_DROPEN"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info ixgbevf_regs_rxdma[] = {
+ {IXGBE_RDBAL(0), 8, 0x40, "IXGBE_RDBAL"},
+ {IXGBE_RDBAH(0), 8, 0x40, "IXGBE_RDBAH"},
+ {IXGBE_RDLEN(0), 8, 0x40, "IXGBE_RDLEN"},
+ {IXGBE_RDH(0), 8, 0x40, "IXGBE_RDH"},
+ {IXGBE_RDT(0), 8, 0x40, "IXGBE_RDT"},
+ {IXGBE_RXDCTL(0), 8, 0x40, "IXGBE_RXDCTL"},
+ {IXGBE_SRRCTL(0), 8, 0x40, "IXGBE_SRRCTL"},
+ {IXGBE_VFPSRTYPE, 1, 1, "IXGBE_VFPSRTYPE"},
+ {IXGBE_VFRSCCTL(0), 8, 0x40, "IXGBE_VFRSCCTL"},
+ {IXGBE_PVFDCA_RXCTRL(0), 8, 0x40, "IXGBE_PVFDCA_RXCTRL"},
+ {IXGBE_PVFDCA_TXCTRL(0), 8, 0x40, "IXGBE_PVFDCA_TXCTRL"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info ixgbe_regs_rx[] = {
+ {IXGBE_RXCSUM, 1, 1, "IXGBE_RXCSUM"},
+ {IXGBE_RFCTL, 1, 1, "IXGBE_RFCTL"},
+ {IXGBE_RAL(0), 16, 8, "IXGBE_RAL"},
+ {IXGBE_RAH(0), 16, 8, "IXGBE_RAH"},
+ {IXGBE_PSRTYPE(0), 1, 4, "IXGBE_PSRTYPE"},
+ {IXGBE_FCTRL, 1, 1, "IXGBE_FCTRL"},
+ {IXGBE_VLNCTRL, 1, 1, "IXGBE_VLNCTRL"},
+ {IXGBE_MCSTCTRL, 1, 1, "IXGBE_MCSTCTRL"},
+ {IXGBE_MRQC, 1, 1, "IXGBE_MRQC"},
+ {IXGBE_VMD_CTL, 1, 1, "IXGBE_VMD_CTL"},
+ {IXGBE_IMIR(0), 8, 4, "IXGBE_IMIR"},
+ {IXGBE_IMIREXT(0), 8, 4, "IXGBE_IMIREXT"},
+ {IXGBE_IMIRVP, 1, 1, "IXGBE_IMIRVP"},
+ {0, 0, 0, ""}
+};
+
+static struct reg_info ixgbe_regs_tx[] = {
+ {IXGBE_TDBAL(0), 32, 0x40, "IXGBE_TDBAL"},
+ {IXGBE_TDBAH(0), 32, 0x40, "IXGBE_TDBAH"},
+ {IXGBE_TDLEN(0), 32, 0x40, "IXGBE_TDLEN"},
+ {IXGBE_TDH(0), 32, 0x40, "IXGBE_TDH"},
+ {IXGBE_TDT(0), 32, 0x40, "IXGBE_TDT"},
+ {IXGBE_TXDCTL(0), 32, 0x40, "IXGBE_TXDCTL"},
+ {IXGBE_TDWBAL(0), 32, 0x40, "IXGBE_TDWBAL"},
+ {IXGBE_TDWBAH(0), 32, 0x40, "IXGBE_TDWBAH"},
+ {IXGBE_DTXCTL, 1, 1, "IXGBE_DTXCTL"},
+ {IXGBE_DCA_TXCTRL(0), 16, 4, "IXGBE_DCA_TXCTRL"},
+ {IXGBE_TXPBSIZE(0), 8, 4, "IXGBE_TXPBSIZE"},
+ {IXGBE_MNGTXMAP, 1, 1, "IXGBE_MNGTXMAP"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info ixgbevf_regs_tx[] = {
+ {IXGBE_TDBAL(0), 4, 0x40, "IXGBE_TDBAL"},
+ {IXGBE_TDBAH(0), 4, 0x40, "IXGBE_TDBAH"},
+ {IXGBE_TDLEN(0), 4, 0x40, "IXGBE_TDLEN"},
+ {IXGBE_TDH(0), 4, 0x40, "IXGBE_TDH"},
+ {IXGBE_TDT(0), 4, 0x40, "IXGBE_TDT"},
+ {IXGBE_TXDCTL(0), 4, 0x40, "IXGBE_TXDCTL"},
+ {IXGBE_TDWBAL(0), 4, 0x40, "IXGBE_TDWBAL"},
+ {IXGBE_TDWBAH(0), 4, 0x40, "IXGBE_TDWBAH"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info ixgbe_regs_wakeup[] = {
+ {IXGBE_WUC, 1, 1, "IXGBE_WUC"},
+ {IXGBE_WUFC, 1, 1, "IXGBE_WUFC"},
+ {IXGBE_WUS, 1, 1, "IXGBE_WUS"},
+ {IXGBE_IPAV, 1, 1, "IXGBE_IPAV"},
+ {IXGBE_IP4AT, 1, 1, "IXGBE_IP4AT"},
+ {IXGBE_IP6AT, 1, 1, "IXGBE_IP6AT"},
+ {IXGBE_WUPL, 1, 1, "IXGBE_WUPL"},
+ {IXGBE_WUPM, 1, 1, "IXGBE_WUPM"},
+ {IXGBE_FHFT(0), 1, 1, "IXGBE_FHFT"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info ixgbe_regs_dcb[] = {
+ {IXGBE_RMCS, 1, 1, "IXGBE_RMCS"},
+ {IXGBE_DPMCS, 1, 1, "IXGBE_DPMCS"},
+ {IXGBE_PDPMCS, 1, 1, "IXGBE_PDPMCS"},
+ {IXGBE_RUPPBMR, 1, 1, "IXGBE_RUPPBMR"},
+ {IXGBE_RT2CR(0), 8, 4, "IXGBE_RT2CR"},
+ {IXGBE_RT2SR(0), 8, 4, "IXGBE_RT2SR"},
+ {IXGBE_TDTQ2TCCR(0), 8, 0x40, "IXGBE_TDTQ2TCCR"},
+ {IXGBE_TDTQ2TCSR(0), 8, 0x40, "IXGBE_TDTQ2TCSR"},
+ {IXGBE_TDPT2TCCR(0), 8, 4, "IXGBE_TDPT2TCCR"},
+ {IXGBE_TDPT2TCSR(0), 8, 4, "IXGBE_TDPT2TCSR"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info ixgbe_regs_mac[] = {
+ {IXGBE_PCS1GCFIG, 1, 1, "IXGBE_PCS1GCFIG"},
+ {IXGBE_PCS1GLCTL, 1, 1, "IXGBE_PCS1GLCTL"},
+ {IXGBE_PCS1GLSTA, 1, 1, "IXGBE_PCS1GLSTA"},
+ {IXGBE_PCS1GDBG0, 1, 1, "IXGBE_PCS1GDBG0"},
+ {IXGBE_PCS1GDBG1, 1, 1, "IXGBE_PCS1GDBG1"},
+ {IXGBE_PCS1GANA, 1, 1, "IXGBE_PCS1GANA"},
+ {IXGBE_PCS1GANLP, 1, 1, "IXGBE_PCS1GANLP"},
+ {IXGBE_PCS1GANNP, 1, 1, "IXGBE_PCS1GANNP"},
+ {IXGBE_PCS1GANLPNP, 1, 1, "IXGBE_PCS1GANLPNP"},
+ {IXGBE_HLREG0, 1, 1, "IXGBE_HLREG0"},
+ {IXGBE_HLREG1, 1, 1, "IXGBE_HLREG1"},
+ {IXGBE_PAP, 1, 1, "IXGBE_PAP"},
+ {IXGBE_MACA, 1, 1, "IXGBE_MACA"},
+ {IXGBE_APAE, 1, 1, "IXGBE_APAE"},
+ {IXGBE_ARD, 1, 1, "IXGBE_ARD"},
+ {IXGBE_AIS, 1, 1, "IXGBE_AIS"},
+ {IXGBE_MSCA, 1, 1, "IXGBE_MSCA"},
+ {IXGBE_MSRWD, 1, 1, "IXGBE_MSRWD"},
+ {IXGBE_MLADD, 1, 1, "IXGBE_MLADD"},
+ {IXGBE_MHADD, 1, 1, "IXGBE_MHADD"},
+ {IXGBE_TREG, 1, 1, "IXGBE_TREG"},
+ {IXGBE_PCSS1, 1, 1, "IXGBE_PCSS1"},
+ {IXGBE_PCSS2, 1, 1, "IXGBE_PCSS2"},
+ {IXGBE_XPCSS, 1, 1, "IXGBE_XPCSS"},
+ {IXGBE_SERDESC, 1, 1, "IXGBE_SERDESC"},
+ {IXGBE_MACS, 1, 1, "IXGBE_MACS"},
+ {IXGBE_AUTOC, 1, 1, "IXGBE_AUTOC"},
+ {IXGBE_LINKS, 1, 1, "IXGBE_LINKS"},
+ {IXGBE_AUTOC2, 1, 1, "IXGBE_AUTOC2"},
+ {IXGBE_AUTOC3, 1, 1, "IXGBE_AUTOC3"},
+ {IXGBE_ANLP1, 1, 1, "IXGBE_ANLP1"},
+ {IXGBE_ANLP2, 1, 1, "IXGBE_ANLP2"},
+ {IXGBE_ATLASCTL, 1, 1, "IXGBE_ATLASCTL"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info ixgbe_regs_diagnostic[] = {
+ {IXGBE_RDSTATCTL, 1, 1, "IXGBE_RDSTATCTL"},
+ {IXGBE_RDSTAT(0), 8, 4, "IXGBE_RDSTAT"},
+ {IXGBE_RDHMPN, 1, 1, "IXGBE_RDHMPN"},
+ {IXGBE_RIC_DW(0), 4, 4, "IXGBE_RIC_DW"},
+ {IXGBE_RDPROBE, 1, 1, "IXGBE_RDPROBE"},
+ {IXGBE_TDHMPN, 1, 1, "IXGBE_TDHMPN"},
+ {IXGBE_TIC_DW(0), 4, 4, "IXGBE_TIC_DW"},
+ {IXGBE_TDPROBE, 1, 1, "IXGBE_TDPROBE"},
+ {IXGBE_TXBUFCTRL, 1, 1, "IXGBE_TXBUFCTRL"},
+ {IXGBE_TXBUFDATA0, 1, 1, "IXGBE_TXBUFDATA0"},
+ {IXGBE_TXBUFDATA1, 1, 1, "IXGBE_TXBUFDATA1"},
+ {IXGBE_TXBUFDATA2, 1, 1, "IXGBE_TXBUFDATA2"},
+ {IXGBE_TXBUFDATA3, 1, 1, "IXGBE_TXBUFDATA3"},
+ {IXGBE_RXBUFCTRL, 1, 1, "IXGBE_RXBUFCTRL"},
+ {IXGBE_RXBUFDATA0, 1, 1, "IXGBE_RXBUFDATA0"},
+ {IXGBE_RXBUFDATA1, 1, 1, "IXGBE_RXBUFDATA1"},
+ {IXGBE_RXBUFDATA2, 1, 1, "IXGBE_RXBUFDATA2"},
+ {IXGBE_RXBUFDATA3, 1, 1, "IXGBE_RXBUFDATA3"},
+ {IXGBE_PCIE_DIAG(0), 8, 4, ""},
+ {IXGBE_RFVAL, 1, 1, "IXGBE_RFVAL"},
+ {IXGBE_MDFTC1, 1, 1, "IXGBE_MDFTC1"},
+ {IXGBE_MDFTC2, 1, 1, "IXGBE_MDFTC2"},
+ {IXGBE_MDFTFIFO1, 1, 1, "IXGBE_MDFTFIFO1"},
+ {IXGBE_MDFTFIFO2, 1, 1, "IXGBE_MDFTFIFO2"},
+ {IXGBE_MDFTS, 1, 1, "IXGBE_MDFTS"},
+ {IXGBE_PCIEECCCTL, 1, 1, "IXGBE_PCIEECCCTL"},
+ {IXGBE_PBTXECC, 1, 1, "IXGBE_PBTXECC"},
+ {IXGBE_PBRXECC, 1, 1, "IXGBE_PBRXECC"},
+ {IXGBE_MFLCN, 1, 1, "IXGBE_MFLCN"},
+ {0, 0, 0, ""},
+};
+
+/* PF registers */
+static const struct reg_info *ixgbe_regs_others[] = {
+ ixgbe_regs_general,
+ ixgbe_regs_nvm, ixgbe_regs_interrupt,
+ ixgbe_regs_fctl_others,
+ ixgbe_regs_rxdma,
+ ixgbe_regs_rx,
+ ixgbe_regs_tx,
+ ixgbe_regs_wakeup,
+ ixgbe_regs_dcb,
+ ixgbe_regs_mac,
+ ixgbe_regs_diagnostic,
+ NULL};
+
+static const struct reg_info *ixgbe_regs_mac_82598EB[] = {
+ ixgbe_regs_general,
+ ixgbe_regs_nvm,
+ ixgbe_regs_interrupt,
+ ixgbe_regs_fctl_mac_82598EB,
+ ixgbe_regs_rxdma,
+ ixgbe_regs_rx,
+ ixgbe_regs_tx,
+ ixgbe_regs_wakeup,
+ ixgbe_regs_dcb,
+ ixgbe_regs_mac,
+ ixgbe_regs_diagnostic,
+ NULL};
+
+/* VF registers */
+static const struct reg_info *ixgbevf_regs[] = {
+ ixgbevf_regs_general,
+ ixgbevf_regs_interrupt,
+ ixgbevf_regs_rxdma,
+ ixgbevf_regs_tx,
+ NULL};
+
+static inline int
+ixgbe_read_regs(struct ixgbe_hw *hw, const struct reg_info *reg,
+ uint32_t *reg_buf)
+{
+ unsigned int i;
+
+ for (i = 0; i < reg->count; i++)
+ reg_buf[i] = IXGBE_READ_REG(hw,
+ reg->base_addr + i * reg->stride);
+ return reg->count;
+};
+
+static inline int
+ixgbe_regs_group_count(const struct reg_info *regs)
+{
+ int count = 0;
+ int i = 0;
+
+ while (regs[i].count)
+ count += regs[i++].count;
+ return count;
+};
+
+static inline int
+ixgbe_read_regs_group(struct rte_eth_dev *dev, uint32_t *reg_buf,
+ const struct reg_info *regs)
+{
+ int count = 0;
+ int i = 0;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ while (regs[i].count)
+ count += ixgbe_read_regs(hw, &regs[i++], &reg_buf[count]);
+ return count;
+};
+
+#endif /* _IXGBE_REGS_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_rxtx.c b/src/dpdk22/drivers/net/ixgbe/ixgbe_rxtx.c
index e10d6a21..52a263c2 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_rxtx.c
+++ b/src/dpdk22/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* Copyright 2014 6WIND S.A.
* All rights reserved.
*
@@ -53,7 +53,6 @@
#include <rte_memory.h>
#include <rte_memzone.h>
#include <rte_launch.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
@@ -71,26 +70,16 @@
#include <rte_sctp.h>
#include <rte_string_fns.h>
#include <rte_errno.h>
+#include <rte_ip.h>
#include "ixgbe_logs.h"
-#include "ixgbe/ixgbe_api.h"
-#include "ixgbe/ixgbe_vf.h"
+#include "base/ixgbe_api.h"
+#include "base/ixgbe_vf.h"
#include "ixgbe_ethdev.h"
-#include "ixgbe/ixgbe_dcb.h"
-#include "ixgbe/ixgbe_common.h"
+#include "base/ixgbe_dcb.h"
+#include "base/ixgbe_common.h"
#include "ixgbe_rxtx.h"
-#define IXGBE_RSS_OFFLOAD_ALL ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_IPV4_TCP | \
- ETH_RSS_IPV6 | \
- ETH_RSS_IPV6_EX | \
- ETH_RSS_IPV6_TCP | \
- ETH_RSS_IPV6_TCP_EX | \
- ETH_RSS_IPV4_UDP | \
- ETH_RSS_IPV6_UDP | \
- ETH_RSS_IPV6_UDP_EX)
-
/* Bit Mask to indicate what bits required for building TX context */
#define IXGBE_TX_OFFLOAD_MASK ( \
PKT_TX_VLAN_PKT | \
@@ -133,15 +122,15 @@ rte_rxmbuf_alloc(struct rte_mempool *mp)
* Return the total number of buffers freed.
*/
static inline int __attribute__((always_inline))
-ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
+ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
{
- struct igb_tx_entry *txep;
+ struct ixgbe_tx_entry *txep;
uint32_t status;
int i;
/* check DD bit on threshold descriptor */
status = txq->tx_ring[txq->tx_next_dd].wb.status;
- if (! (status & IXGBE_ADVTXD_STAT_DD))
+ if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD)))
return 0;
/*
@@ -186,11 +175,14 @@ tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
pkt_len = (*pkts)->data_len;
/* write data to descriptor */
- txdp->read.buffer_addr = buf_dma_addr;
+ txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
+
txdp->read.cmd_type_len =
- ((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+ rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+
txdp->read.olinfo_status =
- (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+ rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
rte_prefetch0(&(*pkts)->pool);
}
}
@@ -206,11 +198,11 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
pkt_len = (*pkts)->data_len;
/* write data to descriptor */
- txdp->read.buffer_addr = buf_dma_addr;
+ txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
txdp->read.cmd_type_len =
- ((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+ rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
txdp->read.olinfo_status =
- (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+ rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
rte_prefetch0(&(*pkts)->pool);
}
@@ -219,11 +211,11 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
* Copy mbuf pointers to the S/W ring.
*/
static inline void
-ixgbe_tx_fill_hw_ring(struct igb_tx_queue *txq, struct rte_mbuf **pkts,
+ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
uint16_t nb_pkts)
{
volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
- struct igb_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
+ struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
const int N_PER_LOOP = 4;
const int N_PER_LOOP_MASK = N_PER_LOOP-1;
int mainpart, leftover;
@@ -255,7 +247,7 @@ static inline uint16_t
tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
- struct igb_tx_queue *txq = (struct igb_tx_queue *)tx_queue;
+ struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
uint16_t n = 0;
@@ -363,7 +355,7 @@ ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
}
static inline void
-ixgbe_set_xmit_ctx(struct igb_tx_queue* txq,
+ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
uint64_t ol_flags, union ixgbe_tx_offload tx_offload)
{
@@ -386,10 +378,15 @@ ixgbe_set_xmit_ctx(struct igb_tx_queue* txq,
/* check if TCP segmentation required for this packet */
if (ol_flags & PKT_TX_TCP_SEG) {
- /* implies IP cksum and TCP cksum */
- type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 |
- IXGBE_ADVTXD_TUCMD_L4T_TCP |
- IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
+ /* implies IP cksum in IPv4 */
+ if (ol_flags & PKT_TX_IP_CKSUM)
+ type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 |
+ IXGBE_ADVTXD_TUCMD_L4T_TCP |
+ IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
+ else
+ type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV6 |
+ IXGBE_ADVTXD_TUCMD_L4T_TCP |
+ IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
tx_offload_mask.l2_len |= ~0;
tx_offload_mask.l3_len |= ~0;
@@ -418,7 +415,6 @@ ixgbe_set_xmit_ctx(struct igb_tx_queue* txq,
mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
tx_offload_mask.l2_len |= ~0;
tx_offload_mask.l3_len |= ~0;
- tx_offload_mask.l4_len |= ~0;
break;
case PKT_TX_SCTP_CKSUM:
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
@@ -453,7 +449,7 @@ ixgbe_set_xmit_ctx(struct igb_tx_queue* txq,
* or create a new context descriptor.
*/
static inline uint32_t
-what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
+what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
union ixgbe_tx_offload tx_offload)
{
/* If match with the current used context */
@@ -509,14 +505,15 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
/* Reset transmit descriptors after they have been used */
static inline int
-ixgbe_xmit_cleanup(struct igb_tx_queue *txq)
+ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
{
- struct igb_tx_entry *sw_ring = txq->sw_ring;
+ struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
uint16_t last_desc_cleaned = txq->last_desc_cleaned;
uint16_t nb_tx_desc = txq->nb_tx_desc;
uint16_t desc_to_clean_to;
uint16_t nb_tx_to_clean;
+ uint32_t status;
/* Determine the last descriptor needing to be cleaned */
desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
@@ -525,7 +522,8 @@ ixgbe_xmit_cleanup(struct igb_tx_queue *txq)
/* Check to make sure the last descriptor to clean is done */
desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
- if (! (txr[desc_to_clean_to].wb.status & IXGBE_TXD_STAT_DD))
+ status = txr[desc_to_clean_to].wb.status;
+ if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD)))
{
PMD_TX_FREE_LOG(DEBUG,
"TX descriptor %4u is not done"
@@ -570,11 +568,11 @@ uint16_t
ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
- struct igb_tx_queue *txq;
- struct igb_tx_entry *sw_ring;
- struct igb_tx_entry *txe, *txn;
+ struct ixgbe_tx_queue *txq;
+ struct ixgbe_tx_entry *sw_ring;
+ struct ixgbe_tx_entry *txe, *txn;
volatile union ixgbe_adv_tx_desc *txr;
- volatile union ixgbe_adv_tx_desc *txd;
+ volatile union ixgbe_adv_tx_desc *txd, *txp;
struct rte_mbuf *tx_pkt;
struct rte_mbuf *m_seg;
uint64_t buf_dma_addr;
@@ -590,18 +588,18 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint64_t tx_ol_req;
uint32_t ctx = 0;
uint32_t new_ctx;
- union ixgbe_tx_offload tx_offload = { .data = 0 };
+ union ixgbe_tx_offload tx_offload = {0};
txq = tx_queue;
sw_ring = txq->sw_ring;
txr = txq->tx_ring;
tx_id = txq->tx_tail;
txe = &sw_ring[tx_id];
+ txp = NULL;
/* Determine if the descriptor ring needs to be cleaned. */
- if ((txq->nb_tx_desc - txq->nb_tx_free) > txq->tx_free_thresh) {
+ if (txq->nb_tx_free < txq->tx_free_thresh)
ixgbe_xmit_cleanup(txq);
- }
rte_prefetch0(&txe->mbuf->pool);
@@ -641,6 +639,12 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
*/
nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
+ if (txp != NULL &&
+ nb_used + txq->nb_tx_used >= txq->tx_rs_thresh)
+ /* set RS on the previous packet in the burst */
+ txp->read.cmd_type_len |=
+ rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
+
/*
* The number of descriptors that must be allocated for a
* packet is the number of segments of that packet, plus 1
@@ -843,10 +847,18 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/* Update txq RS bit counters */
txq->nb_tx_used = 0;
- }
+ txp = NULL;
+ } else
+ txp = txd;
+
txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
}
+
end_of_tx:
+ /* set RS on last packet in the burst */
+ if (txp != NULL)
+ txp->read.cmd_type_len |= rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
+
rte_wmb();
/*
@@ -866,40 +878,107 @@ end_of_tx:
* RX functions
*
**********************************************************************/
-static inline uint64_t
-rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
+#define IXGBE_PACKET_TYPE_IPV4 0X01
+#define IXGBE_PACKET_TYPE_IPV4_TCP 0X11
+#define IXGBE_PACKET_TYPE_IPV4_UDP 0X21
+#define IXGBE_PACKET_TYPE_IPV4_SCTP 0X41
+#define IXGBE_PACKET_TYPE_IPV4_EXT 0X03
+#define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP 0X43
+#define IXGBE_PACKET_TYPE_IPV6 0X04
+#define IXGBE_PACKET_TYPE_IPV6_TCP 0X14
+#define IXGBE_PACKET_TYPE_IPV6_UDP 0X24
+#define IXGBE_PACKET_TYPE_IPV6_EXT 0X0C
+#define IXGBE_PACKET_TYPE_IPV6_EXT_TCP 0X1C
+#define IXGBE_PACKET_TYPE_IPV6_EXT_UDP 0X2C
+#define IXGBE_PACKET_TYPE_IPV4_IPV6 0X05
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP 0X15
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP 0X25
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
+#define IXGBE_PACKET_TYPE_MAX 0X80
+#define IXGBE_PACKET_TYPE_MASK 0X7F
+#define IXGBE_PACKET_TYPE_SHIFT 0X04
+static inline uint32_t
+ixgbe_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
{
- uint64_t pkt_flags;
-
- static uint64_t ip_pkt_types_map[16] = {
- 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
- PKT_RX_IPV6_HDR, 0, 0, 0,
- PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
- PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
+ static const uint32_t
+ ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = {
+ [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4,
+ [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT,
+ [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6,
+ [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
};
+ if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
+ return RTE_PTYPE_UNKNOWN;
+
+ pkt_info = (pkt_info >> IXGBE_PACKET_TYPE_SHIFT) &
+ IXGBE_PACKET_TYPE_MASK;
- static uint64_t ip_rss_types_map[16] = {
+ return ptype_table[pkt_info];
+}
+
+static inline uint64_t
+ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info)
+{
+ static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
PKT_RX_RSS_HASH, 0, 0, 0,
0, 0, 0, PKT_RX_FDIR,
};
-
#ifdef RTE_LIBRTE_IEEE1588
static uint64_t ip_pkt_etqf_map[8] = {
0, 0, 0, PKT_RX_IEEE1588_PTP,
0, 0, 0, 0,
};
- pkt_flags = (hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ?
- ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
- ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
+ if (likely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
+ return ip_pkt_etqf_map[(pkt_info >> 4) & 0X07] |
+ ip_rss_types_map[pkt_info & 0XF];
+ else
+ return ip_rss_types_map[pkt_info & 0XF];
#else
- pkt_flags = (hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ? 0 :
- ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
-
+ return ip_rss_types_map[pkt_info & 0XF];
#endif
- return pkt_flags | ip_rss_types_map[hl_tp_rs & 0xF];
}
static inline uint64_t
@@ -936,7 +1015,6 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status)
IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
}
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
/*
* LOOK_AHEAD defines how many desc statuses to check beyond the
* current descriptor.
@@ -949,23 +1027,26 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status)
#error "PMD IXGBE: LOOK_AHEAD must be 8\n"
#endif
static inline int
-ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
+ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
{
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_entry *rxep;
+ struct ixgbe_rx_entry *rxep;
struct rte_mbuf *mb;
uint16_t pkt_len;
uint64_t pkt_flags;
- int s[LOOK_AHEAD], nb_dd;
+ int nb_dd;
+ uint32_t s[LOOK_AHEAD];
+ uint16_t pkt_info[LOOK_AHEAD];
int i, j, nb_rx = 0;
-
+ uint32_t status;
/* get references to current descriptor and S/W ring entry */
rxdp = &rxq->rx_ring[rxq->rx_tail];
rxep = &rxq->sw_ring[rxq->rx_tail];
+ status = rxdp->wb.upper.status_error;
/* check to make sure there is at least 1 packet to receive */
- if (! (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD))
+ if (!(status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
return 0;
/*
@@ -977,7 +1058,11 @@ ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
{
/* Read desc statuses backwards to avoid race condition */
for (j = LOOK_AHEAD-1; j >= 0; --j)
- s[j] = rxdp[j].wb.upper.status_error;
+ s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
+
+ for (j = LOOK_AHEAD - 1; j >= 0; --j)
+ pkt_info[j] = rxdp[j].wb.lower.lo_dword.
+ hs_rss.pkt_info;
/* Compute how many status bits were set */
nb_dd = 0;
@@ -989,27 +1074,30 @@ ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
/* Translate descriptor info to mbuf format */
for (j = 0; j < nb_dd; ++j) {
mb = rxep[j].mbuf;
- pkt_len = (uint16_t)(rxdp[j].wb.upper.length - rxq->crc_len);
+ pkt_len = rte_le_to_cpu_16(rxdp[j].wb.upper.length) -
+ rxq->crc_len;
mb->data_len = pkt_len;
mb->pkt_len = pkt_len;
- mb->vlan_tci = rxdp[j].wb.upper.vlan;
mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
/* convert descriptor fields to rte mbuf flags */
- pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(
- rxdp[j].wb.lower.lo_dword.data);
- /* reuse status field from scan list */
- pkt_flags |= rx_desc_status_to_pkt_flags(s[j]);
+ pkt_flags = rx_desc_status_to_pkt_flags(s[j]);
pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
+ pkt_flags |=
+ ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info[j]);
mb->ol_flags = pkt_flags;
+ mb->packet_type =
+ ixgbe_rxd_pkt_info_to_pkt_type(pkt_info[j]);
if (likely(pkt_flags & PKT_RX_RSS_HASH))
- mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss;
+ mb->hash.rss = rte_le_to_cpu_32(
+ rxdp[j].wb.lower.hi_dword.rss);
else if (pkt_flags & PKT_RX_FDIR) {
- mb->hash.fdir.hash =
- (uint16_t)((rxdp[j].wb.lower.hi_dword.csum_ip.csum)
- & IXGBE_ATR_HASH_MASK);
- mb->hash.fdir.id = rxdp[j].wb.lower.hi_dword.csum_ip.ip_id;
+ mb->hash.fdir.hash = rte_le_to_cpu_16(
+ rxdp[j].wb.lower.hi_dword.csum_ip.csum) &
+ IXGBE_ATR_HASH_MASK;
+ mb->hash.fdir.id = rte_le_to_cpu_16(
+ rxdp[j].wb.lower.hi_dword.csum_ip.ip_id);
}
}
@@ -1033,18 +1121,17 @@ ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
}
static inline int
-ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
+ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
{
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_entry *rxep;
+ struct ixgbe_rx_entry *rxep;
struct rte_mbuf *mb;
uint16_t alloc_idx;
- uint64_t dma_addr;
+ __le64 dma_addr;
int diag, i;
/* allocate buffers in bulk directly into the S/W ring */
- alloc_idx = (uint16_t)(rxq->rx_free_trigger -
- (rxq->rx_free_thresh - 1));
+ alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
rxep = &rxq->sw_ring[alloc_idx];
diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
rxq->rx_free_thresh);
@@ -1055,34 +1142,32 @@ ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
for (i = 0; i < rxq->rx_free_thresh; ++i) {
/* populate the static rte mbuf fields */
mb = rxep[i].mbuf;
+ if (reset_mbuf) {
+ mb->next = NULL;
+ mb->nb_segs = 1;
+ mb->port = rxq->port_id;
+ }
+
rte_mbuf_refcnt_set(mb, 1);
- mb->next = NULL;
mb->data_off = RTE_PKTMBUF_HEADROOM;
- mb->nb_segs = 1;
- mb->port = rxq->port_id;
/* populate the descriptors */
- dma_addr = (uint64_t)mb->buf_physaddr + RTE_PKTMBUF_HEADROOM;
- rxdp[i].read.hdr_addr = dma_addr;
+ dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
+ rxdp[i].read.hdr_addr = 0;
rxdp[i].read.pkt_addr = dma_addr;
}
- /* update tail pointer */
- rte_wmb();
- IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rxq->rx_free_trigger);
-
/* update state of internal queue structure */
- rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_trigger +
- rxq->rx_free_thresh);
+ rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
- rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
+ rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
/* no errors */
return 0;
}
static inline uint16_t
-ixgbe_rx_fill_from_stage(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
@@ -1106,7 +1191,7 @@ static inline uint16_t
rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
- struct igb_rx_queue *rxq = (struct igb_rx_queue *)rx_queue;
+ struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
uint16_t nb_rx = 0;
/* Any previously recv'd pkts will be returned from the Rx stage */
@@ -1123,7 +1208,9 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
/* if required, allocate new buffers to replenish descriptors */
if (rxq->rx_tail > rxq->rx_free_trigger) {
- if (ixgbe_rx_alloc_bufs(rxq) != 0) {
+ uint16_t cur_free_trigger = rxq->rx_free_trigger;
+
+ if (ixgbe_rx_alloc_bufs(rxq, true) != 0) {
int i, j;
PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", (unsigned) rxq->port_id,
@@ -1143,6 +1230,10 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
return 0;
}
+
+ /* update tail pointer */
+ rte_wmb();
+ IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, cur_free_trigger);
}
if (rxq->rx_tail >= rxq->nb_rx_desc)
@@ -1156,7 +1247,7 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
}
/* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
-uint16_t
+static uint16_t
ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
@@ -1182,23 +1273,22 @@ ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
return nb_rx;
}
-#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
uint16_t
ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
volatile union ixgbe_adv_rx_desc *rx_ring;
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_entry *sw_ring;
- struct igb_rx_entry *rxe;
+ struct ixgbe_rx_entry *sw_ring;
+ struct ixgbe_rx_entry *rxe;
struct rte_mbuf *rxm;
struct rte_mbuf *nmb;
union ixgbe_adv_rx_desc rxd;
uint64_t dma_addr;
uint32_t staterr;
- uint32_t hlen_type_rss;
+ uint32_t pkt_info;
uint16_t pkt_len;
uint16_t rx_id;
uint16_t nb_rx;
@@ -1222,7 +1312,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
*/
rxdp = &rx_ring[rx_id];
staterr = rxdp->wb.upper.status_error;
- if (! (staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
+ if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
break;
rxd = *rxdp;
@@ -1290,7 +1380,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxe->mbuf = nmb;
dma_addr =
rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
- rxdp->read.hdr_addr = dma_addr;
+ rxdp->read.hdr_addr = 0;
rxdp->read.pkt_addr = dma_addr;
/*
@@ -1316,22 +1406,27 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxm->data_len = pkt_len;
rxm->port = rxq->port_id;
- hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
+ pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.hs_rss.
+ pkt_info);
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
- pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
- pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
+ pkt_flags = rx_desc_status_to_pkt_flags(staterr);
pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
+ pkt_flags = pkt_flags |
+ ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
rxm->ol_flags = pkt_flags;
+ rxm->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info);
if (likely(pkt_flags & PKT_RX_RSS_HASH))
- rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
+ rxm->hash.rss = rte_le_to_cpu_32(
+ rxd.wb.lower.hi_dword.rss);
else if (pkt_flags & PKT_RX_FDIR) {
- rxm->hash.fdir.hash =
- (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
- & IXGBE_ATR_HASH_MASK);
- rxm->hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id;
+ rxm->hash.fdir.hash = rte_le_to_cpu_16(
+ rxd.wb.lower.hi_dword.csum_ip.csum) &
+ IXGBE_ATR_HASH_MASK;
+ rxm->hash.fdir.id = rte_le_to_cpu_16(
+ rxd.wb.lower.hi_dword.csum_ip.ip_id);
}
/*
* Store the mbuf address into the next entry of the array
@@ -1366,130 +1461,259 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
return (nb_rx);
}
-uint16_t
-ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
+/**
+ * Detect an RSC descriptor.
+ */
+static inline uint32_t
+ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
{
- struct igb_rx_queue *rxq;
- volatile union ixgbe_adv_rx_desc *rx_ring;
- volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_entry *sw_ring;
- struct igb_rx_entry *rxe;
- struct rte_mbuf *first_seg;
- struct rte_mbuf *last_seg;
- struct rte_mbuf *rxm;
- struct rte_mbuf *nmb;
- union ixgbe_adv_rx_desc rxd;
- uint64_t dma; /* Physical address of mbuf data buffer */
- uint32_t staterr;
- uint32_t hlen_type_rss;
- uint16_t rx_id;
- uint16_t nb_rx;
- uint16_t nb_hold;
- uint16_t data_len;
+ return (rte_le_to_cpu_32(rx->wb.lower.lo_dword.data) &
+ IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
+}
+
+/**
+ * ixgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
+ *
+ * Fill the following info in the HEAD buffer of the Rx cluster:
+ * - RX port identifier
+ * - hardware offload data, if any:
+ * - RSS flag & hash
+ * - IP checksum flag
+ * - VLAN TCI, if any
+ * - error flags
+ * @head HEAD of the packet cluster
+ * @desc HW descriptor to get data from
+ * @port_id Port ID of the Rx queue
+ */
+static inline void
+ixgbe_fill_cluster_head_buf(
+ struct rte_mbuf *head,
+ union ixgbe_adv_rx_desc *desc,
+ uint8_t port_id,
+ uint32_t staterr)
+{
+ uint16_t pkt_info;
uint64_t pkt_flags;
- nb_rx = 0;
- nb_hold = 0;
- rxq = rx_queue;
- rx_id = rxq->rx_tail;
- rx_ring = rxq->rx_ring;
- sw_ring = rxq->sw_ring;
+ head->port = port_id;
- /*
- * Retrieve RX context of current packet, if any.
+ /* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
+ * set in the pkt_flags field.
*/
- first_seg = rxq->pkt_first_seg;
- last_seg = rxq->pkt_last_seg;
+ head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
+ pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.hs_rss.pkt_info);
+ pkt_flags = rx_desc_status_to_pkt_flags(staterr);
+ pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
+ pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
+ head->ol_flags = pkt_flags;
+ head->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info);
+
+ if (likely(pkt_flags & PKT_RX_RSS_HASH))
+ head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss);
+ else if (pkt_flags & PKT_RX_FDIR) {
+ head->hash.fdir.hash =
+ rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.csum)
+ & IXGBE_ATR_HASH_MASK;
+ head->hash.fdir.id =
+ rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.ip_id);
+ }
+}
- while (nb_rx < nb_pkts) {
- next_desc:
- /*
- * The order of operations here is important as the DD status
- * bit must not be read after any other descriptor fields.
- * rx_ring and rxdp are pointing to volatile data so the order
- * of accesses cannot be reordered by the compiler. If they were
- * not volatile, they could be reordered which could lead to
- * using invalid descriptor fields when read from rxd.
- */
- rxdp = &rx_ring[rx_id];
- staterr = rxdp->wb.upper.status_error;
- if (! (staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
- break;
- rxd = *rxdp;
+/**
+ * ixgbe_recv_pkts_lro - receive handler for and LRO case.
+ *
+ * @rx_queue Rx queue handle
+ * @rx_pkts table of received packets
+ * @nb_pkts size of rx_pkts table
+ * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
+ *
+ * Handles the Rx HW ring completions when RSC feature is configured. Uses an
+ * additional ring of ixgbe_rsc_entry's that will hold the relevant RSC info.
+ *
+ * We use the same logic as in Linux and in FreeBSD ixgbe drivers:
+ * 1) When non-EOP RSC completion arrives:
+ * a) Update the HEAD of the current RSC aggregation cluster with the new
+ * segment's data length.
+ * b) Set the "next" pointer of the current segment to point to the segment
+ * at the NEXTP index.
+ * c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry
+ * in the sw_rsc_ring.
+ * 2) When EOP arrives we just update the cluster's total length and offload
+ * flags and deliver the cluster up to the upper layers. In our case - put it
+ * in the rx_pkts table.
+ *
+ * Returns the number of received packets/clusters (according to the "bulk
+ * receive" interface).
+ */
+static inline uint16_t
+ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
+ bool bulk_alloc)
+{
+ struct ixgbe_rx_queue *rxq = rx_queue;
+ volatile union ixgbe_adv_rx_desc *rx_ring = rxq->rx_ring;
+ struct ixgbe_rx_entry *sw_ring = rxq->sw_ring;
+ struct ixgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
+ uint16_t rx_id = rxq->rx_tail;
+ uint16_t nb_rx = 0;
+ uint16_t nb_hold = rxq->nb_rx_hold;
+ uint16_t prev_id = rxq->rx_tail;
+ while (nb_rx < nb_pkts) {
+ bool eop;
+ struct ixgbe_rx_entry *rxe;
+ struct ixgbe_scattered_rx_entry *sc_entry;
+ struct ixgbe_scattered_rx_entry *next_sc_entry;
+ struct ixgbe_rx_entry *next_rxe;
+ struct rte_mbuf *first_seg;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *nmb;
+ union ixgbe_adv_rx_desc rxd;
+ uint16_t data_len;
+ uint16_t next_id;
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ uint32_t staterr;
+
+next_desc:
/*
- * Descriptor done.
+ * The code in this whole file uses the volatile pointer to
+ * ensure the read ordering of the status and the rest of the
+ * descriptor fields (on the compiler level only!!!). This is so
+ * UGLY - why not to just use the compiler barrier instead? DPDK
+ * even has the rte_compiler_barrier() for that.
*
- * Allocate a new mbuf to replenish the RX ring descriptor.
- * If the allocation fails:
- * - arrange for that RX descriptor to be the first one
- * being parsed the next time the receive function is
- * invoked [on the same queue].
+ * But most importantly this is just wrong because this doesn't
+ * ensure memory ordering in a general case at all. For
+ * instance, DPDK is supposed to work on Power CPUs where
+ * compiler barrier may just not be enough!
*
- * - Stop parsing the RX ring and return immediately.
+ * I tried to write only this function properly to have a
+ * starting point (as a part of an LRO/RSC series) but the
+ * compiler cursed at me when I tried to cast away the
+ * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm
+ * keeping it the way it is for now.
*
- * This policy does not drop the packet received in the RX
- * descriptor for which the allocation of a new mbuf failed.
- * Thus, it allows that packet to be later retrieved if
- * mbuf have been freed in the mean time.
- * As a side effect, holding RX descriptors instead of
- * systematically giving them back to the NIC may lead to
- * RX ring exhaustion situations.
- * However, the NIC can gracefully prevent such situations
- * to happen by sending specific "back-pressure" flow control
- * frames to its peer(s).
+ * The code in this file is broken in so many other places and
+ * will just not work on a big endian CPU anyway therefore the
+ * lines below will have to be revisited together with the rest
+ * of the ixgbe PMD.
+ *
+ * TODO:
+ * - Get rid of "volatile" crap and let the compiler do its
+ * job.
+ * - Use the proper memory barrier (rte_rmb()) to ensure the
+ * memory ordering below.
*/
- PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
- "staterr=0x%x data_len=%u",
- (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
- (unsigned) rx_id, (unsigned) staterr,
- (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
+ rxdp = &rx_ring[rx_id];
+ staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error);
- nmb = rte_rxmbuf_alloc(rxq->mb_pool);
- if (nmb == NULL) {
- PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
- "queue_id=%u", (unsigned) rxq->port_id,
- (unsigned) rxq->queue_id);
- rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+ if (!(staterr & IXGBE_RXDADV_STAT_DD))
break;
+
+ rxd = *rxdp;
+
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+ "staterr=0x%x data_len=%u",
+ rxq->port_id, rxq->queue_id, rx_id, staterr,
+ rte_le_to_cpu_16(rxd.wb.upper.length));
+
+ if (!bulk_alloc) {
+ nmb = rte_rxmbuf_alloc(rxq->mb_pool);
+ if (nmb == NULL) {
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
+ "port_id=%u queue_id=%u",
+ rxq->port_id, rxq->queue_id);
+
+ rte_eth_devices[rxq->port_id].data->
+ rx_mbuf_alloc_failed++;
+ break;
+ }
+ }
+ else if (nb_hold > rxq->rx_free_thresh) {
+ uint16_t next_rdt = rxq->rx_free_trigger;
+
+ if (!ixgbe_rx_alloc_bufs(rxq, false)) {
+ rte_wmb();
+ IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr,
+ next_rdt);
+ nb_hold -= rxq->rx_free_thresh;
+ } else {
+ PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
+ "port_id=%u queue_id=%u",
+ rxq->port_id, rxq->queue_id);
+
+ rte_eth_devices[rxq->port_id].data->
+ rx_mbuf_alloc_failed++;
+ break;
+ }
}
nb_hold++;
rxe = &sw_ring[rx_id];
- rx_id++;
- if (rx_id == rxq->nb_rx_desc)
- rx_id = 0;
+ eop = staterr & IXGBE_RXDADV_STAT_EOP;
+
+ next_id = rx_id + 1;
+ if (next_id == rxq->nb_rx_desc)
+ next_id = 0;
/* Prefetch next mbuf while processing current one. */
- rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
+ rte_ixgbe_prefetch(sw_ring[next_id].mbuf);
/*
* When next RX descriptor is on a cache-line boundary,
- * prefetch the next 4 RX descriptors and the next 8 pointers
+ * prefetch the next 4 RX descriptors and the next 4 pointers
* to mbufs.
*/
- if ((rx_id & 0x3) == 0) {
- rte_ixgbe_prefetch(&rx_ring[rx_id]);
- rte_ixgbe_prefetch(&sw_ring[rx_id]);
+ if ((next_id & 0x3) == 0) {
+ rte_ixgbe_prefetch(&rx_ring[next_id]);
+ rte_ixgbe_prefetch(&sw_ring[next_id]);
}
- /*
- * Update RX descriptor with the physical address of the new
- * data buffer of the new allocated mbuf.
- */
rxm = rxe->mbuf;
- rxe->mbuf = nmb;
- dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
- rxdp->read.hdr_addr = dma;
- rxdp->read.pkt_addr = dma;
+
+ if (!bulk_alloc) {
+ __le64 dma =
+ rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+ /*
+ * Update RX descriptor with the physical address of the
+ * new data buffer of the new allocated mbuf.
+ */
+ rxe->mbuf = nmb;
+
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rxdp->read.hdr_addr = 0;
+ rxdp->read.pkt_addr = dma;
+ } else
+ rxe->mbuf = NULL;
/*
* Set data length & data buffer address of mbuf.
*/
data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
rxm->data_len = data_len;
- rxm->data_off = RTE_PKTMBUF_HEADROOM;
+
+ if (!eop) {
+ uint16_t nextp_id;
+ /*
+ * Get next descriptor index:
+ * - For RSC it's in the NEXTP field.
+ * - For a scattered packet - it's just a following
+ * descriptor.
+ */
+ if (ixgbe_rsc_count(&rxd))
+ nextp_id =
+ (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
+ IXGBE_RXDADV_NEXTP_SHIFT;
+ else
+ nextp_id = next_id;
+
+ next_sc_entry = &sw_sc_ring[nextp_id];
+ next_rxe = &sw_ring[nextp_id];
+ rte_ixgbe_prefetch(next_rxe);
+ }
+
+ sc_entry = &sw_sc_ring[rx_id];
+ first_seg = sc_entry->fbuf;
+ sc_entry->fbuf = NULL;
/*
* If this is the first buffer of the received packet,
@@ -1504,80 +1728,52 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
first_seg->pkt_len = data_len;
first_seg->nb_segs = 1;
} else {
- first_seg->pkt_len = (uint16_t)(first_seg->pkt_len
- + data_len);
+ first_seg->pkt_len += data_len;
first_seg->nb_segs++;
- last_seg->next = rxm;
}
+ prev_id = rx_id;
+ rx_id = next_id;
+
/*
- * If this is not the last buffer of the received packet,
- * update the pointer to the last mbuf of the current scattered
- * packet and continue to parse the RX ring.
+ * If this is not the last buffer of the received packet, update
+ * the pointer to the first mbuf at the NEXTP entry in the
+ * sw_sc_ring and continue to parse the RX ring.
*/
- if (! (staterr & IXGBE_RXDADV_STAT_EOP)) {
- last_seg = rxm;
+ if (!eop) {
+ rxm->next = next_rxe->mbuf;
+ next_sc_entry->fbuf = first_seg;
goto next_desc;
}
/*
- * This is the last buffer of the received packet.
- * If the CRC is not stripped by the hardware:
- * - Subtract the CRC length from the total packet length.
- * - If the last buffer only contains the whole CRC or a part
- * of it, free the mbuf associated to the last buffer.
- * If part of the CRC is also contained in the previous
- * mbuf, subtract the length of that CRC part from the
- * data length of the previous mbuf.
+ * This is the last buffer of the received packet - return
+ * the current cluster to the user.
*/
rxm->next = NULL;
- if (unlikely(rxq->crc_len > 0)) {
- first_seg->pkt_len -= ETHER_CRC_LEN;
- if (data_len <= ETHER_CRC_LEN) {
- rte_pktmbuf_free_seg(rxm);
- first_seg->nb_segs--;
- last_seg->data_len = (uint16_t)
- (last_seg->data_len -
- (ETHER_CRC_LEN - data_len));
- last_seg->next = NULL;
- } else
- rxm->data_len =
- (uint16_t) (data_len - ETHER_CRC_LEN);
- }
- /*
- * Initialize the first mbuf of the returned packet:
- * - RX port identifier,
- * - hardware offload data, if any:
- * - RSS flag & hash,
- * - IP checksum flag,
- * - VLAN TCI, if any,
- * - error flags.
- */
- first_seg->port = rxq->port_id;
+ /* Initialize the first mbuf of the returned packet */
+ ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq->port_id,
+ staterr);
/*
- * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
- * set in the pkt_flags field.
+ * Deal with the case, when HW CRC srip is disabled.
+ * That can't happen when LRO is enabled, but still could
+ * happen for scattered RX mode.
*/
- first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
- hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
- pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
- pkt_flags = (pkt_flags |
- rx_desc_status_to_pkt_flags(staterr));
- pkt_flags = (pkt_flags |
- rx_desc_error_to_pkt_flags(staterr));
- first_seg->ol_flags = pkt_flags;
+ first_seg->pkt_len -= rxq->crc_len;
+ if (unlikely(rxm->data_len <= rxq->crc_len)) {
+ struct rte_mbuf *lp;
- if (likely(pkt_flags & PKT_RX_RSS_HASH))
- first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
- else if (pkt_flags & PKT_RX_FDIR) {
- first_seg->hash.fdir.hash =
- (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
- & IXGBE_ATR_HASH_MASK);
- first_seg->hash.fdir.id =
- rxd.wb.lower.hi_dword.csum_ip.ip_id;
- }
+ for (lp = first_seg; lp->next != rxm; lp = lp->next)
+ ;
+
+ first_seg->nb_segs--;
+ lp->data_len -= rxq->crc_len - rxm->data_len;
+ lp->next = NULL;
+ rte_pktmbuf_free_seg(rxm);
+ } else
+ rxm->data_len -= rxq->crc_len;
/* Prefetch data of first segment, if configured to do so. */
rte_packet_prefetch((char *)first_seg->buf_addr +
@@ -1588,11 +1784,6 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* of returned packets.
*/
rx_pkts[nb_rx++] = first_seg;
-
- /*
- * Setup receipt context for a new packet.
- */
- first_seg = NULL;
}
/*
@@ -1601,12 +1792,6 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxq->rx_tail = rx_id;
/*
- * Save receive context.
- */
- rxq->pkt_first_seg = first_seg;
- rxq->pkt_last_seg = last_seg;
-
- /*
* If the number of free RX descriptors is greater than the RX free
* threshold of the queue, advance the Receive Descriptor Tail (RDT)
* register.
@@ -1615,20 +1800,32 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* RDH register, which creates a "full" ring situtation from the
* hardware point of view...
*/
- nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
- if (nb_hold > rxq->rx_free_thresh) {
+ if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
"nb_hold=%u nb_rx=%u",
- (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
- (unsigned) rx_id, (unsigned) nb_hold,
- (unsigned) nb_rx);
- rx_id = (uint16_t) ((rx_id == 0) ?
- (rxq->nb_rx_desc - 1) : (rx_id - 1));
- IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+ rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
+
+ rte_wmb();
+ IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, prev_id);
nb_hold = 0;
}
+
rxq->nb_rx_hold = nb_hold;
- return (nb_rx);
+ return nb_rx;
+}
+
+uint16_t
+ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
+}
+
+uint16_t
+ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
}
/*********************************************************************
@@ -1637,56 +1834,8 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
*
**********************************************************************/
-/*
- * Rings setup and release.
- *
- * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
- * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
- * also optimize cache line size effect. H/W supports up to cache line size 128.
- */
-#define IXGBE_ALIGN 128
-
-/*
- * Maximum number of Ring Descriptors.
- *
- * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
- * descriptors should meet the following condition:
- * (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0
- */
-#define IXGBE_MIN_RING_DESC 32
-#define IXGBE_MAX_RING_DESC 4096
-
-/*
- * Create memzone for HW rings. malloc can't be used as the physical address is
- * needed. If the memzone is already created, then this function returns a ptr
- * to the old one.
- */
-static const struct rte_memzone *
-ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
- uint16_t queue_id, uint32_t ring_size, int socket_id)
-{
- char z_name[RTE_MEMZONE_NAMESIZE];
- const struct rte_memzone *mz;
-
- snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->driver->pci_drv.name, ring_name,
- dev->data->port_id, queue_id);
-
- mz = rte_memzone_lookup(z_name);
- if (mz)
- return mz;
-
-#ifdef RTE_LIBRTE_XEN_DOM0
- return rte_memzone_reserve_bounded(z_name, ring_size,
- socket_id, 0, IXGBE_ALIGN, RTE_PGSIZE_2M);
-#else
- return rte_memzone_reserve_aligned(z_name, ring_size,
- socket_id, 0, IXGBE_ALIGN);
-#endif
-}
-
-static void
-ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
+static void __attribute__((cold))
+ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
{
unsigned i;
@@ -1700,16 +1849,16 @@ ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
}
}
-static void
-ixgbe_tx_free_swring(struct igb_tx_queue *txq)
+static void __attribute__((cold))
+ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
{
if (txq != NULL &&
txq->sw_ring != NULL)
rte_free(txq->sw_ring);
}
-static void
-ixgbe_tx_queue_release(struct igb_tx_queue *txq)
+static void __attribute__((cold))
+ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
{
if (txq != NULL && txq->ops != NULL) {
txq->ops->release_mbufs(txq);
@@ -1718,19 +1867,18 @@ ixgbe_tx_queue_release(struct igb_tx_queue *txq)
}
}
-void
+void __attribute__((cold))
ixgbe_dev_tx_queue_release(void *txq)
{
ixgbe_tx_queue_release(txq);
}
-/* (Re)set dynamic igb_tx_queue fields to defaults */
-static void
-ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
+/* (Re)set dynamic ixgbe_tx_queue fields to defaults */
+static void __attribute__((cold))
+ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
{
- static const union ixgbe_adv_tx_desc zeroed_desc = { .read = {
- .buffer_addr = 0}};
- struct igb_tx_entry *txe = txq->sw_ring;
+ static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
+ struct ixgbe_tx_entry *txe = txq->sw_ring;
uint16_t prev, i;
/* Zero out HW ring memory */
@@ -1742,7 +1890,7 @@ ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
prev = (uint16_t) (txq->nb_tx_desc - 1);
for (i = 0; i < txq->nb_tx_desc; i++) {
volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
- txd->wb.status = IXGBE_TXD_STAT_DD;
+ txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
txe[i].mbuf = NULL;
txe[i].last_id = i;
txe[prev].next_id = i;
@@ -1765,7 +1913,7 @@ ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
}
-static struct ixgbe_txq_ops def_txq_ops = {
+static const struct ixgbe_txq_ops def_txq_ops = {
.release_mbufs = ixgbe_tx_queue_release_mbufs,
.free_swring = ixgbe_tx_free_swring,
.reset = ixgbe_reset_tx_queue,
@@ -1775,29 +1923,29 @@ static struct ixgbe_txq_ops def_txq_ops = {
* the queue parameters. Used in tx_queue_setup by primary process and then
* in dev_init by secondary process when attaching to an existing ethdev.
*/
-void
-set_tx_function(struct rte_eth_dev *dev, struct igb_tx_queue *txq)
+void __attribute__((cold))
+ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
{
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
&& (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
- PMD_INIT_LOG(INFO, "Using simple tx code path");
+ PMD_INIT_LOG(DEBUG, "Using simple tx code path");
#ifdef RTE_IXGBE_INC_VECTOR
if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
(rte_eal_process_type() != RTE_PROC_PRIMARY ||
ixgbe_txq_vec_setup(txq) == 0)) {
- PMD_INIT_LOG(INFO, "Vector tx enabled.");
+ PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
} else
#endif
dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
} else {
- PMD_INIT_LOG(INFO, "Using full-featured tx code path");
- PMD_INIT_LOG(INFO,
+ PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
+ PMD_INIT_LOG(DEBUG,
" - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]",
(unsigned long)txq->txq_flags,
(unsigned long)IXGBE_SIMPLE_FLAGS);
- PMD_INIT_LOG(INFO,
+ PMD_INIT_LOG(DEBUG,
" - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
(unsigned long)txq->tx_rs_thresh,
(unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
@@ -1805,7 +1953,7 @@ set_tx_function(struct rte_eth_dev *dev, struct igb_tx_queue *txq)
}
}
-int
+int __attribute__((cold))
ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
@@ -1813,7 +1961,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
const struct rte_eth_txconf *tx_conf)
{
const struct rte_memzone *tz;
- struct igb_tx_queue *txq;
+ struct ixgbe_tx_queue *txq;
struct ixgbe_hw *hw;
uint16_t tx_rs_thresh, tx_free_thresh;
@@ -1825,9 +1973,9 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
* It must not exceed hardware maximum, and must be multiple
* of IXGBE_ALIGN.
*/
- if (((nb_desc * sizeof(union ixgbe_adv_tx_desc)) % IXGBE_ALIGN) != 0 ||
- (nb_desc > IXGBE_MAX_RING_DESC) ||
- (nb_desc < IXGBE_MIN_RING_DESC)) {
+ if (nb_desc % IXGBE_TXD_ALIGN != 0 ||
+ (nb_desc > IXGBE_MAX_RING_DESC) ||
+ (nb_desc < IXGBE_MIN_RING_DESC)) {
return -EINVAL;
}
@@ -1857,9 +2005,16 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
if (tx_rs_thresh >= (nb_desc - 2)) {
PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
- "of TX descriptors minus 2. (tx_rs_thresh=%u "
- "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
- (int)dev->data->port_id, (int)queue_idx);
+ "of TX descriptors minus 2. (tx_rs_thresh=%u "
+ "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id, (int)queue_idx);
+ return -(EINVAL);
+ }
+ if (tx_rs_thresh > DEFAULT_TX_RS_THRESH) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh must be less or equal than %u. "
+ "(tx_rs_thresh=%u port=%d queue=%d)",
+ DEFAULT_TX_RS_THRESH, (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id, (int)queue_idx);
return -(EINVAL);
}
if (tx_free_thresh >= (nb_desc - 3)) {
@@ -1910,7 +2065,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
}
/* First allocate the tx queue data structure */
- txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct igb_tx_queue),
+ txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (txq == NULL)
return (-ENOMEM);
@@ -1920,9 +2075,9 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
* handle the maximum ring size is allocated in order to allow for
* resizing in later calls to the queue setup function.
*/
- tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC,
- socket_id);
+ IXGBE_ALIGN, socket_id);
if (tz == NULL) {
ixgbe_tx_queue_release(txq);
return (-ENOMEM);
@@ -1946,20 +2101,19 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
* Modification to set VFTDT for virtual function if vf is detected
*/
if (hw->mac.type == ixgbe_mac_82599_vf ||
- hw->mac.type == ixgbe_mac_X540_vf)
+ hw->mac.type == ixgbe_mac_X540_vf ||
+ hw->mac.type == ixgbe_mac_X550_vf ||
+ hw->mac.type == ixgbe_mac_X550EM_x_vf)
txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
else
txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
-#ifndef RTE_LIBRTE_XEN_DOM0
- txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
-#else
+
txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
-#endif
txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
/* Allocate software ring */
txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
- sizeof(struct igb_tx_entry) * nb_desc,
+ sizeof(struct ixgbe_tx_entry) * nb_desc,
RTE_CACHE_LINE_SIZE, socket_id);
if (txq->sw_ring == NULL) {
ixgbe_tx_queue_release(txq);
@@ -1969,7 +2123,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
/* set up vector or scalar TX function as appropriate */
- set_tx_function(dev, txq);
+ ixgbe_set_tx_function(dev, txq);
txq->ops->reset(txq);
@@ -1979,11 +2133,44 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
return (0);
}
-static void
-ixgbe_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
+/**
+ * ixgbe_free_sc_cluster - free the not-yet-completed scattered cluster
+ *
+ * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
+ * in the sw_rsc_ring is not set to NULL but rather points to the next
+ * mbuf of this RSC aggregation (that has not been completed yet and still
+ * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
+ * will just free first "nb_segs" segments of the cluster explicitly by calling
+ * an rte_pktmbuf_free_seg().
+ *
+ * @m scattered cluster head
+ */
+static void __attribute__((cold))
+ixgbe_free_sc_cluster(struct rte_mbuf *m)
+{
+ uint8_t i, nb_segs = m->nb_segs;
+ struct rte_mbuf *next_seg;
+
+ for (i = 0; i < nb_segs; i++) {
+ next_seg = m->next;
+ rte_pktmbuf_free_seg(m);
+ m = next_seg;
+ }
+}
+
+static void __attribute__((cold))
+ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
{
unsigned i;
+#ifdef RTE_IXGBE_INC_VECTOR
+ /* SSE Vector driver has a different way of releasing mbufs. */
+ if (rxq->rx_using_sse) {
+ ixgbe_rx_queue_release_mbufs_vec(rxq);
+ return;
+ }
+#endif
+
if (rxq->sw_ring != NULL) {
for (i = 0; i < rxq->nb_rx_desc; i++) {
if (rxq->sw_ring[i].mbuf != NULL) {
@@ -1991,7 +2178,6 @@ ixgbe_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
rxq->sw_ring[i].mbuf = NULL;
}
}
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
if (rxq->rx_nb_avail) {
for (i = 0; i < rxq->rx_nb_avail; ++i) {
struct rte_mbuf *mb;
@@ -2000,21 +2186,28 @@ ixgbe_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
}
rxq->rx_nb_avail = 0;
}
-#endif
}
+
+ if (rxq->sw_sc_ring)
+ for (i = 0; i < rxq->nb_rx_desc; i++)
+ if (rxq->sw_sc_ring[i].fbuf) {
+ ixgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
+ rxq->sw_sc_ring[i].fbuf = NULL;
+ }
}
-static void
-ixgbe_rx_queue_release(struct igb_rx_queue *rxq)
+static void __attribute__((cold))
+ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
{
if (rxq != NULL) {
ixgbe_rx_queue_release_mbufs(rxq);
rte_free(rxq->sw_ring);
+ rte_free(rxq->sw_sc_ring);
rte_free(rxq);
}
}
-void
+void __attribute__((cold))
ixgbe_dev_rx_queue_release(void *rxq)
{
ixgbe_rx_queue_release(rxq);
@@ -2028,12 +2221,8 @@ ixgbe_dev_rx_queue_release(void *rxq)
* -EINVAL: the preconditions are NOT satisfied and the default Rx burst
* function must be used.
*/
-static inline int
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
-check_rx_burst_bulk_alloc_preconditions(struct igb_rx_queue *rxq)
-#else
-check_rx_burst_bulk_alloc_preconditions(__rte_unused struct igb_rx_queue *rxq)
-#endif
+static inline int __attribute__((cold))
+check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
{
int ret = 0;
@@ -2046,7 +2235,6 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct igb_rx_queue *rxq)
* Scattered packets are not supported. This should be checked
* outside of this function.
*/
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
"rxq->rx_free_thresh=%d, "
@@ -2075,21 +2263,17 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct igb_rx_queue *rxq)
RTE_PMD_IXGBE_RX_MAX_BURST);
ret = -EINVAL;
}
-#else
- ret = -EINVAL;
-#endif
return ret;
}
-/* Reset dynamic igb_rx_queue fields back to defaults */
-static void
-ixgbe_reset_rx_queue(struct igb_rx_queue *rxq)
+/* Reset dynamic ixgbe_rx_queue fields back to defaults */
+static void __attribute__((cold))
+ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
{
- static const union ixgbe_adv_rx_desc zeroed_desc = { .read = {
- .pkt_addr = 0}};
+ static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
unsigned i;
- uint16_t len;
+ uint16_t len = rxq->nb_rx_desc;
/*
* By default, the Rx queue setup function allocates enough memory for
@@ -2101,14 +2285,9 @@ ixgbe_reset_rx_queue(struct igb_rx_queue *rxq)
* constraints here to see if we need to zero out memory after the end
* of the H/W descriptor ring.
*/
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
- if (check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
+ if (adapter->rx_bulk_alloc_allowed)
/* zero out extra memory */
- len = (uint16_t)(rxq->nb_rx_desc + RTE_PMD_IXGBE_RX_MAX_BURST);
- else
-#endif
- /* do not zero out extra memory */
- len = rxq->nb_rx_desc;
+ len += RTE_PMD_IXGBE_RX_MAX_BURST;
/*
* Zero out HW ring memory. Zero out extra memory at the end of
@@ -2119,27 +2298,30 @@ ixgbe_reset_rx_queue(struct igb_rx_queue *rxq)
rxq->rx_ring[i] = zeroed_desc;
}
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
/*
* initialize extra software ring entries. Space for these extra
* entries is always allocated
*/
memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
- for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST; ++i) {
- rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
+ for (i = rxq->nb_rx_desc; i < len; ++i) {
+ rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
}
rxq->rx_nb_avail = 0;
rxq->rx_next_avail = 0;
rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
-#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
rxq->rx_tail = 0;
rxq->nb_rx_hold = 0;
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
+
+#ifdef RTE_IXGBE_INC_VECTOR
+ rxq->rxrearm_start = 0;
+ rxq->rxrearm_nb = 0;
+#endif
}
-int
+int __attribute__((cold))
ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
@@ -2148,10 +2330,11 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
struct rte_mempool *mp)
{
const struct rte_memzone *rz;
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
struct ixgbe_hw *hw;
- int use_def_burst_func = 1;
uint16_t len;
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -2161,9 +2344,9 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
* It must not exceed hardware maximum, and must be multiple
* of IXGBE_ALIGN.
*/
- if (((nb_desc * sizeof(union ixgbe_adv_rx_desc)) % IXGBE_ALIGN) != 0 ||
- (nb_desc > IXGBE_MAX_RING_DESC) ||
- (nb_desc < IXGBE_MIN_RING_DESC)) {
+ if (nb_desc % IXGBE_RXD_ALIGN != 0 ||
+ (nb_desc > IXGBE_MAX_RING_DESC) ||
+ (nb_desc < IXGBE_MIN_RING_DESC)) {
return (-EINVAL);
}
@@ -2174,7 +2357,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
}
/* First allocate the rx queue data structure */
- rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct igb_rx_queue),
+ rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (rxq == NULL)
return (-ENOMEM);
@@ -2195,8 +2378,8 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
* handle the maximum ring size is allocated in order to allow for
* resizing in later calls to the queue setup function.
*/
- rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx,
- RX_RING_SZ, socket_id);
+ rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
+ RX_RING_SZ, IXGBE_ALIGN, socket_id);
if (rz == NULL) {
ixgbe_rx_queue_release(rxq);
return (-ENOMEM);
@@ -2211,7 +2394,9 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
* Modified to setup VFRDT for Virtual Function
*/
if (hw->mac.type == ixgbe_mac_82599_vf ||
- hw->mac.type == ixgbe_mac_X540_vf) {
+ hw->mac.type == ixgbe_mac_X540_vf ||
+ hw->mac.type == ixgbe_mac_X550_vf ||
+ hw->mac.type == ixgbe_mac_X550EM_x_vf) {
rxq->rdt_reg_addr =
IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
rxq->rdh_reg_addr =
@@ -2223,68 +2408,74 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->rdh_reg_addr =
IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
}
-#ifndef RTE_LIBRTE_XEN_DOM0
- rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
-#else
+
rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
-#endif
rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
/*
+ * Certain constraints must be met in order to use the bulk buffer
+ * allocation Rx burst function. If any of Rx queues doesn't meet them
+ * the feature should be disabled for the whole port.
+ */
+ if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
+ PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
+ "preconditions - canceling the feature for "
+ "the whole port[%d]",
+ rxq->queue_id, rxq->port_id);
+ adapter->rx_bulk_alloc_allowed = false;
+ }
+
+ /*
* Allocate software ring. Allow for space at the end of the
* S/W ring to make sure look-ahead logic in bulk alloc Rx burst
* function does not access an invalid memory region.
*/
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
- len = (uint16_t)(nb_desc + RTE_PMD_IXGBE_RX_MAX_BURST);
-#else
len = nb_desc;
-#endif
+ if (adapter->rx_bulk_alloc_allowed)
+ len += RTE_PMD_IXGBE_RX_MAX_BURST;
+
rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
- sizeof(struct igb_rx_entry) * len,
+ sizeof(struct ixgbe_rx_entry) * len,
RTE_CACHE_LINE_SIZE, socket_id);
- if (rxq->sw_ring == NULL) {
+ if (!rxq->sw_ring) {
ixgbe_rx_queue_release(rxq);
return (-ENOMEM);
}
- PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
- rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
/*
- * Certain constraints must be met in order to use the bulk buffer
- * allocation Rx burst function.
+ * Always allocate even if it's not going to be needed in order to
+ * simplify the code.
+ *
+ * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
+ * be requested in ixgbe_dev_rx_init(), which is called later from
+ * dev_start() flow.
*/
- use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
-
-#ifdef RTE_IXGBE_INC_VECTOR
- ixgbe_rxq_vec_setup(rxq);
-#endif
- /* Check if pre-conditions are satisfied, and no Scattered Rx */
- if (!use_def_burst_func && !dev->data->scattered_rx) {
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
- PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
- "satisfied. Rx Burst Bulk Alloc function will be "
- "used on port=%d, queue=%d.",
- rxq->port_id, rxq->queue_id);
- dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
-#ifdef RTE_IXGBE_INC_VECTOR
- if (!ixgbe_rx_vec_condition_check(dev)) {
- PMD_INIT_LOG(INFO, "Vector rx enabled, please make "
- "sure RX burst size no less than 32.");
- dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
- }
-#endif
-#endif
- } else {
- PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions "
- "are not satisfied, Scattered Rx is requested, "
- "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC is not "
- "enabled (port=%d, queue=%d).",
- rxq->port_id, rxq->queue_id);
+ rxq->sw_sc_ring =
+ rte_zmalloc_socket("rxq->sw_sc_ring",
+ sizeof(struct ixgbe_scattered_rx_entry) * len,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!rxq->sw_sc_ring) {
+ ixgbe_rx_queue_release(rxq);
+ return (-ENOMEM);
}
+
+ PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
+ "dma_addr=0x%"PRIx64,
+ rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
+ rxq->rx_ring_phys_addr);
+
+ if (!rte_is_power_of_2(nb_desc)) {
+ PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
+ "preconditions - canceling the feature for "
+ "the whole port[%d]",
+ rxq->queue_id, rxq->port_id);
+ adapter->rx_vec_allowed = false;
+ } else
+ ixgbe_rxq_vec_setup(rxq);
+
dev->data->rx_queues[queue_idx] = rxq;
- ixgbe_reset_rx_queue(rxq);
+ ixgbe_reset_rx_queue(adapter, rxq);
return 0;
}
@@ -2294,7 +2485,7 @@ ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
#define IXGBE_RXQ_SCAN_INTERVAL 4
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
uint32_t desc = 0;
if (rx_queue_id >= dev->data->nb_rx_queues) {
@@ -2306,7 +2497,8 @@ ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
rxdp = &(rxq->rx_ring[rxq->rx_tail]);
while ((desc < rxq->nb_rx_desc) &&
- (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD)) {
+ (rxdp->wb.upper.status_error &
+ rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) {
desc += IXGBE_RXQ_SCAN_INTERVAL;
rxdp += IXGBE_RXQ_SCAN_INTERVAL;
if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
@@ -2321,7 +2513,7 @@ int
ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
{
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_queue *rxq = rx_queue;
+ struct ixgbe_rx_queue *rxq = rx_queue;
uint32_t desc;
if (unlikely(offset >= rxq->nb_rx_desc))
@@ -2331,18 +2523,21 @@ ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
desc -= rxq->nb_rx_desc;
rxdp = &rxq->rx_ring[desc];
- return !!(rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD);
+ return !!(rxdp->wb.upper.status_error &
+ rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
}
-void
+void __attribute__((cold))
ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
{
unsigned i;
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- struct igb_tx_queue *txq = dev->data->tx_queues[i];
+ struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
if (txq != NULL) {
txq->ops->release_mbufs(txq);
txq->ops->reset(txq);
@@ -2350,14 +2545,34 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- struct igb_rx_queue *rxq = dev->data->rx_queues[i];
+ struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
if (rxq != NULL) {
ixgbe_rx_queue_release_mbufs(rxq);
- ixgbe_reset_rx_queue(rxq);
+ ixgbe_reset_rx_queue(adapter, rxq);
}
}
}
+void
+ixgbe_dev_free_queues(struct rte_eth_dev *dev)
+{
+ unsigned i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = NULL;
+ }
+ dev->data->nb_rx_queues = 0;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = NULL;
+ }
+ dev->data->nb_tx_queues = 0;
+}
+
/*********************************************************************
*
* Device RX/TX init functions
@@ -2399,11 +2614,13 @@ ixgbe_rss_disable(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
uint32_t mrqc;
+ uint32_t mrqc_reg;
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
+ mrqc = IXGBE_READ_REG(hw, mrqc_reg);
mrqc &= ~IXGBE_MRQC_RSSEN;
- IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+ IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
}
static void
@@ -2414,6 +2631,11 @@ ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
uint32_t rss_key;
uint64_t rss_hf;
uint16_t i;
+ uint32_t mrqc_reg;
+ uint32_t rssrk_reg;
+
+ mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
+ rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
hash_key = rss_conf->rss_key;
if (hash_key != NULL) {
@@ -2423,7 +2645,7 @@ ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
rss_key |= hash_key[(i * 4) + 1] << 8;
rss_key |= hash_key[(i * 4) + 2] << 16;
rss_key |= hash_key[(i * 4) + 3] << 24;
- IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, rss_key);
+ IXGBE_WRITE_REG_ARRAY(hw, rssrk_reg, i, rss_key);
}
}
@@ -2432,23 +2654,23 @@ ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
if (rss_hf & ETH_RSS_IPV4)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
- if (rss_hf & ETH_RSS_IPV4_TCP)
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
if (rss_hf & ETH_RSS_IPV6)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
if (rss_hf & ETH_RSS_IPV6_EX)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
- if (rss_hf & ETH_RSS_IPV6_TCP)
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
if (rss_hf & ETH_RSS_IPV6_TCP_EX)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
- if (rss_hf & ETH_RSS_IPV4_UDP)
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
- if (rss_hf & ETH_RSS_IPV6_UDP)
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
if (rss_hf & ETH_RSS_IPV6_UDP_EX)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
- IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+ IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
}
int
@@ -2458,9 +2680,17 @@ ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
struct ixgbe_hw *hw;
uint32_t mrqc;
uint64_t rss_hf;
+ uint32_t mrqc_reg;
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!ixgbe_rss_update_sp(hw->mac.type)) {
+ PMD_DRV_LOG(ERR, "RSS hash update is not supported on this "
+ "NIC.");
+ return -ENOTSUP;
+ }
+ mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
+
/*
* Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS):
* "RSS enabling cannot be done dynamically while it must be
@@ -2471,7 +2701,7 @@ ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
* disabled at initialization time.
*/
rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL;
- mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ mrqc = IXGBE_READ_REG(hw, mrqc_reg);
if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */
if (rss_hf != 0) /* Enable RSS */
return -(EINVAL);
@@ -2494,13 +2724,17 @@ ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
uint32_t rss_key;
uint64_t rss_hf;
uint16_t i;
+ uint32_t mrqc_reg;
+ uint32_t rssrk_reg;
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
+ rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
hash_key = rss_conf->rss_key;
if (hash_key != NULL) {
/* Return RSS hash key */
for (i = 0; i < 10; i++) {
- rss_key = IXGBE_READ_REG_ARRAY(hw, IXGBE_RSSRK(0), i);
+ rss_key = IXGBE_READ_REG_ARRAY(hw, rssrk_reg, i);
hash_key[(i * 4)] = rss_key & 0x000000FF;
hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
@@ -2509,7 +2743,7 @@ ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
}
/* Get RSS functions configured in MRQC register */
- mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ mrqc = IXGBE_READ_REG(hw, mrqc_reg);
if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */
rss_conf->rss_hf = 0;
return 0;
@@ -2518,19 +2752,19 @@ ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
rss_hf |= ETH_RSS_IPV4;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
- rss_hf |= ETH_RSS_IPV4_TCP;
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
rss_hf |= ETH_RSS_IPV6;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
rss_hf |= ETH_RSS_IPV6_EX;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
- rss_hf |= ETH_RSS_IPV6_TCP;
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
rss_hf |= ETH_RSS_IPV6_TCP_EX;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
- rss_hf |= ETH_RSS_IPV4_UDP;
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
- rss_hf |= ETH_RSS_IPV6_UDP;
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
rss_hf |= ETH_RSS_IPV6_UDP_EX;
rss_conf->rss_hf = rss_hf;
@@ -2545,22 +2779,28 @@ ixgbe_rss_configure(struct rte_eth_dev *dev)
uint32_t reta;
uint16_t i;
uint16_t j;
+ uint16_t sp_reta_size;
+ uint32_t reta_reg;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
+
/*
* Fill in redirection table
* The byte-swap is needed because NIC registers are in
* little-endian order.
*/
reta = 0;
- for (i = 0, j = 0; i < 128; i++, j++) {
+ for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
+ reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
+
if (j == dev->data->nb_rx_queues)
j = 0;
reta = (reta << 8) | j;
if ((i & 3) == 3)
- IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2),
+ IXGBE_WRITE_REG(hw, reta_reg,
rte_bswap32(reta));
}
@@ -2580,6 +2820,7 @@ ixgbe_rss_configure(struct rte_eth_dev *dev)
#define NUM_VFTA_REGISTERS 128
#define NIC_RX_BUFFER_SIZE 0x200
+#define X550_RX_BUFFER_SIZE 0x180
static void
ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
@@ -2608,7 +2849,15 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
* RXPBSIZE
* split rx buffer up into sections, each for 1 traffic class
*/
- pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ pbsize = (uint16_t)(X550_RX_BUFFER_SIZE / nb_tcs);
+ break;
+ default:
+ pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
+ break;
+ }
for (i = 0 ; i < nb_tcs; i++) {
uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
@@ -2646,7 +2895,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
* mapping is done with 3 bits per priority,
* so shift by i*3 each time
*/
- queue_mapping |= ((cfg->dcb_queue[i] & 0x07) << (i * 3));
+ queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3));
IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping);
@@ -2781,7 +3030,7 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
}
/* User Priority to Traffic Class mapping */
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
- j = vmdq_rx_conf->dcb_queue[i];
+ j = vmdq_rx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
(uint8_t)(1 << j);
@@ -2809,7 +3058,7 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
/* User Priority to Traffic Class mapping */
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
- j = vmdq_tx_conf->dcb_queue[i];
+ j = vmdq_tx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
(uint8_t)(1 << j);
@@ -2831,7 +3080,7 @@ ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
/* User Priority to Traffic Class mapping */
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
- j = rx_conf->dcb_queue[i];
+ j = rx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
(uint8_t)(1 << j);
@@ -2852,7 +3101,7 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
/* User Priority to Traffic Class mapping */
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
- j = tx_conf->dcb_queue[i];
+ j = tx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
(uint8_t)(1 << j);
@@ -2887,9 +3136,13 @@ ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
IXGBE_MRQC_VMDQRT4TCEN;
else {
+ /* no matter the mode is DCB or DCB_RSS, just
+ * set the MRQE to RSSXTCEN. RSS is controlled
+ * by RSS_FIELD
+ */
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
- IXGBE_MRQC_RT4TCEN;
+ IXGBE_MRQC_RTRSS4TCEN;
}
}
if (dcb_config->num_tcs.pg_tcs == 8) {
@@ -2899,7 +3152,7 @@ ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
else {
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
- IXGBE_MRQC_RT8TCEN;
+ IXGBE_MRQC_RTRSS8TCEN;
}
}
@@ -2982,7 +3235,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
{
int ret = 0;
uint8_t i,pfc_en,nb_tcs;
- uint16_t pbsize;
+ uint16_t pbsize, rx_buffer_size;
uint8_t config_dcb_rx = 0;
uint8_t config_dcb_tx = 0;
uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
@@ -3004,16 +3257,17 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
*get dcb and VT rx configuration parameters
*from rte_eth_conf
*/
- ixgbe_vmdq_dcb_rx_config(dev,dcb_config);
+ ixgbe_vmdq_dcb_rx_config(dev, dcb_config);
/*Configure general VMDQ and DCB RX parameters*/
ixgbe_vmdq_dcb_configure(dev);
}
break;
case ETH_MQ_RX_DCB:
+ case ETH_MQ_RX_DCB_RSS:
dcb_config->vt_mode = false;
config_dcb_rx = DCB_RX_CONFIG;
/* Get dcb TX configuration parameters from rte_eth_conf */
- ixgbe_dcb_rx_config(dev,dcb_config);
+ ixgbe_dcb_rx_config(dev, dcb_config);
/*Configure general DCB RX parameters*/
ixgbe_dcb_rx_hw_config(hw, dcb_config);
break;
@@ -3035,7 +3289,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
dcb_config->vt_mode = false;
config_dcb_tx = DCB_TX_CONFIG;
/*get DCB TX configuration parameters from rte_eth_conf*/
- ixgbe_dcb_tx_config(dev,dcb_config);
+ ixgbe_dcb_tx_config(dev, dcb_config);
/*Configure general DCB TX parameters*/
ixgbe_dcb_tx_hw_config(hw, dcb_config);
break;
@@ -3073,9 +3327,19 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
}
}
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ rx_buffer_size = X550_RX_BUFFER_SIZE;
+ break;
+ default:
+ rx_buffer_size = NIC_RX_BUFFER_SIZE;
+ break;
+ }
+
if(config_dcb_rx) {
/* Set RX buffer size */
- pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
+ pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
for (i = 0 ; i < nb_tcs; i++) {
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
@@ -3131,7 +3395,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
/* Check if the PFC is supported */
if(dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
- pbsize = (uint16_t) (NIC_RX_BUFFER_SIZE / nb_tcs);
+ pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
for (i = 0; i < nb_tcs; i++) {
/*
* If the TC count is 8,and the default high_water is 48,
@@ -3166,14 +3430,15 @@ void ixgbe_configure_dcb(struct rte_eth_dev *dev)
/* check support mq_mode for DCB */
if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
- (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB))
+ (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
+ (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
return;
if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES)
return;
/** Configure DCB hardware **/
- ixgbe_dcb_hw_configure(dev,dcb_cfg);
+ ixgbe_dcb_hw_configure(dev, dcb_cfg);
return;
}
@@ -3305,10 +3570,10 @@ ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
return;
}
-static int
-ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
+static int __attribute__((cold))
+ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
{
- struct igb_rx_entry *rxe = rxq->sw_ring;
+ struct ixgbe_rx_entry *rxe = rxq->sw_ring;
uint64_t dma_addr;
unsigned i;
@@ -3331,7 +3596,7 @@ ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
dma_addr =
rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
rxd = &rxq->rx_ring[i];
- rxd->read.hdr_addr = dma_addr;
+ rxd->read.hdr_addr = 0;
rxd->read.pkt_addr = dma_addr;
rxe[i].mbuf = mbuf;
}
@@ -3340,6 +3605,67 @@ ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
}
static int
+ixgbe_config_vf_rss(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw;
+ uint32_t mrqc;
+
+ ixgbe_rss_configure(dev);
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* MRQC: enable VF RSS */
+ mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ mrqc &= ~IXGBE_MRQC_MRQE_MASK;
+ switch (RTE_ETH_DEV_SRIOV(dev).active) {
+ case ETH_64_POOLS:
+ mrqc |= IXGBE_MRQC_VMDQRSS64EN;
+ break;
+
+ case ETH_32_POOLS:
+ mrqc |= IXGBE_MRQC_VMDQRSS32EN;
+ break;
+
+ default:
+ PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS");
+ return -EINVAL;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+
+ return 0;
+}
+
+static int
+ixgbe_config_vf_default(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ switch (RTE_ETH_DEV_SRIOV(dev).active) {
+ case ETH_64_POOLS:
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC,
+ IXGBE_MRQC_VMDQEN);
+ break;
+
+ case ETH_32_POOLS:
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC,
+ IXGBE_MRQC_VMDQRT4TCEN);
+ break;
+
+ case ETH_16_POOLS:
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC,
+ IXGBE_MRQC_VMDQRT8TCEN);
+ break;
+ default:
+ PMD_INIT_LOG(ERR,
+ "invalid pool number in IOV mode");
+ break;
+ }
+ return 0;
+}
+
+static int
ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw =
@@ -3354,41 +3680,46 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
* any DCB/RSS w/o VMDq multi-queue setting
*/
switch (dev->data->dev_conf.rxmode.mq_mode) {
- case ETH_MQ_RX_RSS:
- ixgbe_rss_configure(dev);
- break;
+ case ETH_MQ_RX_RSS:
+ case ETH_MQ_RX_DCB_RSS:
+ case ETH_MQ_RX_VMDQ_RSS:
+ ixgbe_rss_configure(dev);
+ break;
- case ETH_MQ_RX_VMDQ_DCB:
- ixgbe_vmdq_dcb_configure(dev);
- break;
+ case ETH_MQ_RX_VMDQ_DCB:
+ ixgbe_vmdq_dcb_configure(dev);
+ break;
- case ETH_MQ_RX_VMDQ_ONLY:
- ixgbe_vmdq_rx_hw_configure(dev);
- break;
+ case ETH_MQ_RX_VMDQ_ONLY:
+ ixgbe_vmdq_rx_hw_configure(dev);
+ break;
- case ETH_MQ_RX_NONE:
- /* if mq_mode is none, disable rss mode.*/
- default: ixgbe_rss_disable(dev);
+ case ETH_MQ_RX_NONE:
+ default:
+ /* if mq_mode is none, disable rss mode.*/
+ ixgbe_rss_disable(dev);
+ break;
}
} else {
- switch (RTE_ETH_DEV_SRIOV(dev).active) {
/*
* SRIOV active scheme
- * FIXME if support DCB/RSS together with VMDq & SRIOV
+ * Support RSS together with VMDq & SRIOV
*/
- case ETH_64_POOLS:
- IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQEN);
- break;
-
- case ETH_32_POOLS:
- IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQRT4TCEN);
+ switch (dev->data->dev_conf.rxmode.mq_mode) {
+ case ETH_MQ_RX_RSS:
+ case ETH_MQ_RX_VMDQ_RSS:
+ ixgbe_config_vf_rss(dev);
break;
- case ETH_16_POOLS:
- IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQRT8TCEN);
- break;
+ /* FIXME if support DCB/RSS together with VMDq & SRIOV */
+ case ETH_MQ_RX_VMDQ_DCB:
+ case ETH_MQ_RX_VMDQ_DCB_RSS:
+ PMD_INIT_LOG(ERR,
+ "Could not support DCB with VMDq & SRIOV");
+ return -1;
default:
- PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
+ ixgbe_config_vf_default(dev);
+ break;
}
}
@@ -3453,15 +3784,346 @@ ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
return 0;
}
+/**
+ * ixgbe_get_rscctl_maxdesc - Calculate the RSCCTL[n].MAXDESC for PF
+ *
+ * Return the RSCCTL[n].MAXDESC for 82599 and x540 PF devices according to the
+ * spec rev. 3.0 chapter 8.2.3.8.13.
+ *
+ * @pool Memory pool of the Rx queue
+ */
+static inline uint32_t
+ixgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
+{
+ struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
+
+ /* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */
+ uint16_t maxdesc =
+ IPV4_MAX_PKT_LEN /
+ (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
+
+ if (maxdesc >= 16)
+ return IXGBE_RSCCTL_MAXDESC_16;
+ else if (maxdesc >= 8)
+ return IXGBE_RSCCTL_MAXDESC_8;
+ else if (maxdesc >= 4)
+ return IXGBE_RSCCTL_MAXDESC_4;
+ else
+ return IXGBE_RSCCTL_MAXDESC_1;
+}
+
+/**
+ * ixgbe_set_ivar - Setup the correct IVAR register for a particular MSIX
+ * interrupt
+ *
+ * (Taken from FreeBSD tree)
+ * (yes this is all very magic and confusing :)
+ *
+ * @dev port handle
+ * @entry the register array entry
+ * @vector the MSIX vector for this queue
+ * @type RX/TX/MISC
+ */
+static void
+ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ u32 ivar, index;
+
+ vector |= IXGBE_IVAR_ALLOC_VAL;
+
+ switch (hw->mac.type) {
+
+ case ixgbe_mac_82598EB:
+ if (type == -1)
+ entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
+ else
+ entry += (type * 64);
+ index = (entry >> 2) & 0x1F;
+ ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
+ ivar &= ~(0xFF << (8 * (entry & 0x3)));
+ ivar |= (vector << (8 * (entry & 0x3)));
+ IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
+ break;
+
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ if (type == -1) { /* MISC IVAR */
+ index = (entry & 1) * 8;
+ ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
+ ivar &= ~(0xFF << index);
+ ivar |= (vector << index);
+ IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
+ } else { /* RX/TX IVARS */
+ index = (16 * (entry & 1)) + (8 * type);
+ ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
+ ivar &= ~(0xFF << index);
+ ivar |= (vector << index);
+ IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
+ }
+
+ break;
+
+ default:
+ break;
+ }
+}
+
+void __attribute__((cold))
+ixgbe_set_rx_function(struct rte_eth_dev *dev)
+{
+ uint16_t i, rx_using_sse;
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
+
+ /*
+ * In order to allow Vector Rx there are a few configuration
+ * conditions to be met and Rx Bulk Allocation should be allowed.
+ */
+ if (ixgbe_rx_vec_dev_conf_condition_check(dev) ||
+ !adapter->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
+ "preconditions or RTE_IXGBE_INC_VECTOR is "
+ "not enabled",
+ dev->data->port_id);
+
+ adapter->rx_vec_allowed = false;
+ }
+
+ /*
+ * Initialize the appropriate LRO callback.
+ *
+ * If all queues satisfy the bulk allocation preconditions
+ * (hw->rx_bulk_alloc_allowed is TRUE) then we may use bulk allocation.
+ * Otherwise use a single allocation version.
+ */
+ if (dev->data->lro) {
+ if (adapter->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk "
+ "allocation version");
+ dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
+ } else {
+ PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single "
+ "allocation version");
+ dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
+ }
+ } else if (dev->data->scattered_rx) {
+ /*
+ * Set the non-LRO scattered callback: there are Vector and
+ * single allocation versions.
+ */
+ if (adapter->rx_vec_allowed) {
+ PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
+ "callback (port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
+ } else if (adapter->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
+ "allocation callback (port=%d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
+ "single allocation) "
+ "Scattered Rx callback "
+ "(port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
+ }
+ /*
+ * Below we set "simple" callbacks according to port/queues parameters.
+ * If parameters allow we are going to choose between the following
+ * callbacks:
+ * - Vector
+ * - Bulk Allocation
+ * - Single buffer allocation (the simplest one)
+ */
+ } else if (adapter->rx_vec_allowed) {
+ PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
+ "burst size no less than %d (port=%d).",
+ RTE_IXGBE_DESCS_PER_LOOP,
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
+ } else if (adapter->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+ "satisfied. Rx Burst Bulk Alloc function "
+ "will be used on port=%d.",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
+ "satisfied, or Scattered Rx is requested "
+ "(port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = ixgbe_recv_pkts;
+ }
+
+ /* Propagate information about RX function choice through all queues. */
+
+ rx_using_sse =
+ (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec ||
+ dev->rx_pkt_burst == ixgbe_recv_pkts_vec);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
+ rxq->rx_using_sse = rx_using_sse;
+ }
+}
+
+/**
+ * ixgbe_set_rsc - configure RSC related port HW registers
+ *
+ * Configures the port's RSC related registers according to the 4.6.7.2 chapter
+ * of 82599 Spec (x540 configuration is virtually the same).
+ *
+ * @dev port handle
+ *
+ * Returns 0 in case of success or a non-zero error code
+ */
+static int
+ixgbe_set_rsc(struct rte_eth_dev *dev)
+{
+ struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_dev_info dev_info = { 0 };
+ bool rsc_capable = false;
+ uint16_t i;
+ uint32_t rdrxctl;
+
+ /* Sanity check */
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
+ rsc_capable = true;
+
+ if (!rsc_capable && rx_conf->enable_lro) {
+ PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
+ "support it");
+ return -EINVAL;
+ }
+
+ /* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
+
+ if (!rx_conf->hw_strip_crc && rx_conf->enable_lro) {
+ /*
+ * According to chapter of 4.6.7.2.1 of the Spec Rev.
+ * 3.0 RSC configuration requires HW CRC stripping being
+ * enabled. If user requested both HW CRC stripping off
+ * and RSC on - return an error.
+ */
+ PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
+ "is disabled");
+ return -EINVAL;
+ }
+
+ /* RFCTL configuration */
+ if (rsc_capable) {
+ uint32_t rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
+ if (rx_conf->enable_lro)
+ /*
+ * Since NFS packets coalescing is not supported - clear
+ * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
+ * enabled.
+ */
+ rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
+ IXGBE_RFCTL_NFSR_DIS);
+ else
+ rfctl |= IXGBE_RFCTL_RSC_DIS;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
+ }
+
+ /* If LRO hasn't been requested - we are done here. */
+ if (!rx_conf->enable_lro)
+ return 0;
+
+ /* Set RDRXCTL.RSCACKC bit */
+ rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+ rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
+ IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
+
+ /* Per-queue RSC configuration (chapter 4.6.7.2.2 of 82599 Spec) */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
+ uint32_t srrctl =
+ IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxq->reg_idx));
+ uint32_t rscctl =
+ IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxq->reg_idx));
+ uint32_t psrtype =
+ IXGBE_READ_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx));
+ uint32_t eitr =
+ IXGBE_READ_REG(hw, IXGBE_EITR(rxq->reg_idx));
+
+ /*
+ * ixgbe PMD doesn't support header-split at the moment.
+ *
+ * Following the 4.6.7.2.1 chapter of the 82599/x540
+ * Spec if RSC is enabled the SRRCTL[n].BSIZEHEADER
+ * should be configured even if header split is not
+ * enabled. We will configure it 128 bytes following the
+ * recommendation in the spec.
+ */
+ srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
+ srrctl |= (128 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+ IXGBE_SRRCTL_BSIZEHDR_MASK;
+
+ /*
+ * TODO: Consider setting the Receive Descriptor Minimum
+ * Threshold Size for an RSC case. This is not an obviously
+ * beneficiary option but the one worth considering...
+ */
+
+ rscctl |= IXGBE_RSCCTL_RSCEN;
+ rscctl |= ixgbe_get_rscctl_maxdesc(rxq->mb_pool);
+ psrtype |= IXGBE_PSRTYPE_TCPHDR;
+
+ /*
+ * RSC: Set ITR interval corresponding to 2K ints/s.
+ *
+ * Full-sized RSC aggregations for a 10Gb/s link will
+ * arrive at about 20K aggregation/s rate.
+ *
+ * 2K inst/s rate will make only 10% of the
+ * aggregations to be closed due to the interrupt timer
+ * expiration for a streaming at wire-speed case.
+ *
+ * For a sparse streaming case this setting will yield
+ * at most 500us latency for a single RSC aggregation.
+ */
+ eitr &= ~IXGBE_EITR_ITR_INT_MASK;
+ eitr |= IXGBE_EITR_INTERVAL_US(500) | IXGBE_EITR_CNT_WDIS;
+
+ IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
+ IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl);
+ IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
+ IXGBE_WRITE_REG(hw, IXGBE_EITR(rxq->reg_idx), eitr);
+
+ /*
+ * RSC requires the mapping of the queue to the
+ * interrupt vector.
+ */
+ ixgbe_set_ivar(dev, rxq->reg_idx, i, 0);
+ }
+
+ dev->data->lro = 1;
+
+ PMD_INIT_LOG(DEBUG, "enabling LRO mode");
+
+ return 0;
+}
+
/*
* Initializes Receive Unit.
*/
-int
+int __attribute__((cold))
ixgbe_dev_rx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct igb_rx_queue *rxq;
- struct rte_pktmbuf_pool_private *mbp_priv;
+ struct ixgbe_rx_queue *rxq;
uint64_t bus_addr;
uint32_t rxctrl;
uint32_t fctrl;
@@ -3472,6 +4134,8 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
uint32_t rxcsum;
uint16_t buf_size;
uint16_t i;
+ struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+ int rc;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -3494,7 +4158,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
* Configure CRC stripping, if any.
*/
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- if (dev->data->dev_conf.rxmode.hw_strip_crc)
+ if (rx_conf->hw_strip_crc)
hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
else
hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
@@ -3502,11 +4166,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
/*
* Configure jumbo frame support, if any.
*/
- if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (rx_conf->jumbo_frame == 1) {
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
maxfrs &= 0x0000FFFF;
- maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
+ maxfrs |= (rx_conf->max_rx_pkt_len << 16);
IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
} else
hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
@@ -3530,9 +4194,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
* Reset crc_len in case it was changed after queue setup by a
* call to configure.
*/
- rxq->crc_len = (uint8_t)
- ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
- ETHER_CRC_LEN);
+ rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN;
/* Setup the Base and Length of the Rx Descriptor Rings */
bus_addr = rxq->rx_ring_phys_addr;
@@ -3550,7 +4212,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
/*
* Configure Header Split
*/
- if (dev->data->dev_conf.rxmode.header_split) {
+ if (rx_conf->header_split) {
if (hw->mac.type == ixgbe_mac_82599EB) {
/* Must setup the PSRTYPE register */
uint32_t psrtype;
@@ -3560,10 +4222,10 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
IXGBE_PSRTYPE_IPV6HDR;
IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
}
- srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
- IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
- IXGBE_SRRCTL_BSIZEHDR_MASK);
- srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+ srrctl = ((rx_conf->split_hdr_size <<
+ IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+ IXGBE_SRRCTL_BSIZEHDR_MASK);
+ srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
} else
#endif
srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
@@ -3578,40 +4240,24 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
* The value is in 1 KB resolution. Valid values can be from
* 1 KB to 16 KB.
*/
- mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
- buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
- RTE_PKTMBUF_HEADROOM);
+ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
+ RTE_PKTMBUF_HEADROOM);
srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
IXGBE_SRRCTL_BSIZEPKT_MASK);
+
IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
IXGBE_SRRCTL_BSIZEPKT_SHIFT);
/* It adds dual VLAN length for supporting dual VLAN */
- if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
- 2 * IXGBE_VLAN_TAG_SIZE) > buf_size){
- if (!dev->data->scattered_rx)
- PMD_INIT_LOG(DEBUG, "forcing scatter mode");
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ 2 * IXGBE_VLAN_TAG_SIZE > buf_size)
dev->data->scattered_rx = 1;
-#ifdef RTE_IXGBE_INC_VECTOR
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
-#else
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
-#endif
- }
}
- if (dev->data->dev_conf.rxmode.enable_scatter) {
- if (!dev->data->scattered_rx)
- PMD_INIT_LOG(DEBUG, "forcing scatter mode");
-#ifdef RTE_IXGBE_INC_VECTOR
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
-#else
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
-#endif
+ if (rx_conf->enable_scatter)
dev->data->scattered_rx = 1;
- }
/*
* Device configured with multiple RX queues.
@@ -3625,16 +4271,17 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
*/
rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
rxcsum |= IXGBE_RXCSUM_PCSD;
- if (dev->data->dev_conf.rxmode.hw_ip_checksum)
+ if (rx_conf->hw_ip_checksum)
rxcsum |= IXGBE_RXCSUM_IPPCSE;
else
rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
- if (hw->mac.type == ixgbe_mac_82599EB) {
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540) {
rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
- if (dev->data->dev_conf.rxmode.hw_strip_crc)
+ if (rx_conf->hw_strip_crc)
rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
else
rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
@@ -3642,17 +4289,23 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
}
+ rc = ixgbe_set_rsc(dev);
+ if (rc)
+ return rc;
+
+ ixgbe_set_rx_function(dev);
+
return 0;
}
/*
* Initializes Transmit Unit.
*/
-void
+void __attribute__((cold))
ixgbe_dev_tx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct igb_tx_queue *txq;
+ struct ixgbe_tx_queue *txq;
uint64_t bus_addr;
uint32_t hlreg0;
uint32_t txctrl;
@@ -3716,7 +4369,7 @@ ixgbe_dev_tx_init(struct rte_eth_dev *dev)
/*
* Set up link for 82599 loopback mode Tx->Rx.
*/
-static inline void
+static inline void __attribute__((cold))
ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
{
PMD_INIT_FUNC_TRACE();
@@ -3744,16 +4397,17 @@ ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
/*
* Start Transmit and Receive Units.
*/
-void
+int __attribute__((cold))
ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct igb_tx_queue *txq;
- struct igb_rx_queue *rxq;
+ struct ixgbe_tx_queue *txq;
+ struct ixgbe_rx_queue *rxq;
uint32_t txdctl;
uint32_t dmatxctl;
uint32_t rxctrl;
uint16_t i;
+ int ret = 0;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -3776,14 +4430,20 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
- if (!txq->tx_deferred_start)
- ixgbe_dev_tx_queue_start(dev, i);
+ if (!txq->tx_deferred_start) {
+ ret = ixgbe_dev_tx_queue_start(dev, i);
+ if (ret < 0)
+ return ret;
+ }
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- if (!rxq->rx_deferred_start)
- ixgbe_dev_rx_queue_start(dev, i);
+ if (!rxq->rx_deferred_start) {
+ ret = ixgbe_dev_rx_queue_start(dev, i);
+ if (ret < 0)
+ return ret;
+ }
}
/* Enable Receive engine */
@@ -3798,16 +4458,17 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
ixgbe_setup_loopback_link_82599(hw);
+ return 0;
}
/*
* Start Receive Units for specified queue.
*/
-int
+int __attribute__((cold))
ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct ixgbe_hw *hw;
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
uint32_t rxdctl;
int poll_ms;
@@ -3839,6 +4500,7 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
rte_wmb();
IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
} else
return -1;
@@ -3848,11 +4510,13 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
/*
* Stop Receive Units for specified queue.
*/
-int
+int __attribute__((cold))
ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct ixgbe_hw *hw;
- struct igb_rx_queue *rxq;
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
+ struct ixgbe_rx_queue *rxq;
uint32_t rxdctl;
int poll_ms;
@@ -3879,7 +4543,8 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
rte_delay_us(RTE_IXGBE_WAIT_100_US);
ixgbe_rx_queue_release_mbufs(rxq);
- ixgbe_reset_rx_queue(rxq);
+ ixgbe_reset_rx_queue(adapter, rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
} else
return -1;
@@ -3890,11 +4555,11 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
/*
* Start Transmit Units for specified queue.
*/
-int
+int __attribute__((cold))
ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct ixgbe_hw *hw;
- struct igb_tx_queue *txq;
+ struct ixgbe_tx_queue *txq;
uint32_t txdctl;
int poll_ms;
@@ -3922,6 +4587,7 @@ ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
rte_wmb();
IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
} else
return -1;
@@ -3931,11 +4597,11 @@ ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
/*
* Stop Transmit Units for specified queue.
*/
-int
+int __attribute__((cold))
ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct ixgbe_hw *hw;
- struct igb_tx_queue *txq;
+ struct ixgbe_tx_queue *txq;
uint32_t txdctl;
uint32_t txtdh, txtdt;
int poll_ms;
@@ -3982,23 +4648,60 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
txq->ops->release_mbufs(txq);
txq->ops->reset(txq);
}
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
} else
return -1;
return 0;
}
+void
+ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct ixgbe_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.rx_drop_en = rxq->drop_en;
+ qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+}
+
+void
+ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct ixgbe_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+
+ qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+ qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+ qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+ qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+ qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+ qinfo->conf.txq_flags = txq->txq_flags;
+ qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+
/*
* [VF] Initializes Receive Unit.
*/
-int
+int __attribute__((cold))
ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct igb_rx_queue *rxq;
- struct rte_pktmbuf_pool_private *mbp_priv;
+ struct ixgbe_rx_queue *rxq;
uint64_t bus_addr;
- uint32_t srrctl;
+ uint32_t srrctl, psrtype = 0;
uint16_t buf_size;
uint16_t i;
int ret;
@@ -4006,6 +4709,19 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
+ PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
+ "it should be power of 2");
+ return -1;
+ }
+
+ if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
+ PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
+ "it should be equal to or less than %d",
+ hw->mac.max_rx_queues);
+ return -1;
+ }
+
/*
* When the VF driver issues a IXGBE_VF_RESET request, the PF driver
* disables the VF receipt of packets if the PF MTU is > 1500.
@@ -4024,7 +4740,6 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
(uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
/* Setup RX queues */
- dev->rx_pkt_burst = ixgbe_recv_pkts;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
@@ -4052,20 +4767,10 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
* Configure Header Split
*/
if (dev->data->dev_conf.rxmode.header_split) {
-
- /* Must setup the PSRTYPE register */
- uint32_t psrtype;
- psrtype = IXGBE_PSRTYPE_TCPHDR |
- IXGBE_PSRTYPE_UDPHDR |
- IXGBE_PSRTYPE_IPV4HDR |
- IXGBE_PSRTYPE_IPV6HDR;
-
- IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE(i), psrtype);
-
srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
- IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
- IXGBE_SRRCTL_BSIZEHDR_MASK);
- srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+ IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+ IXGBE_SRRCTL_BSIZEHDR_MASK);
+ srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
} else
#endif
srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
@@ -4080,9 +4785,8 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
* The value is in 1 KB resolution. Valid values can be from
* 1 KB to 16 KB.
*/
- mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
- buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
- RTE_PKTMBUF_HEADROOM);
+ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
+ RTE_PKTMBUF_HEADROOM);
srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
IXGBE_SRRCTL_BSIZEPKT_MASK);
@@ -4094,30 +4798,31 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
IXGBE_SRRCTL_BSIZEPKT_SHIFT);
- /* It adds dual VLAN length for supporting dual VLAN */
- if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ if (dev->data->dev_conf.rxmode.enable_scatter ||
+ /* It adds dual VLAN length for supporting dual VLAN */
+ (dev->data->dev_conf.rxmode.max_rx_pkt_len +
2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->data->scattered_rx = 1;
-#ifdef RTE_IXGBE_INC_VECTOR
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
-#else
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
-#endif
}
}
- if (dev->data->dev_conf.rxmode.enable_scatter) {
- if (!dev->data->scattered_rx)
- PMD_INIT_LOG(DEBUG, "forcing scatter mode");
-#ifdef RTE_IXGBE_INC_VECTOR
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
-#else
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+#ifdef RTE_HEADER_SPLIT_ENABLE
+ if (dev->data->dev_conf.rxmode.header_split)
+ /* Must setup the PSRTYPE register */
+ psrtype = IXGBE_PSRTYPE_TCPHDR |
+ IXGBE_PSRTYPE_UDPHDR |
+ IXGBE_PSRTYPE_IPV4HDR |
+ IXGBE_PSRTYPE_IPV6HDR;
#endif
- dev->data->scattered_rx = 1;
- }
+
+ /* Set RQPL for VF RSS according to max Rx queue */
+ psrtype |= (dev->data->nb_rx_queues >> 1) <<
+ IXGBE_PSRTYPE_RQPL_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
+
+ ixgbe_set_rx_function(dev);
return 0;
}
@@ -4125,11 +4830,11 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
/*
* [VF] Initializes Transmit Unit.
*/
-void
+void __attribute__((cold))
ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct igb_tx_queue *txq;
+ struct ixgbe_tx_queue *txq;
uint64_t bus_addr;
uint32_t txctrl;
uint16_t i;
@@ -4166,12 +4871,12 @@ ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
/*
* [VF] Start Transmit and Receive Units.
*/
-void
+void __attribute__((cold))
ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct igb_tx_queue *txq;
- struct igb_rx_queue *rxq;
+ struct ixgbe_tx_queue *txq;
+ struct ixgbe_rx_queue *rxq;
uint32_t txdctl;
uint32_t rxdctl;
uint16_t i;
@@ -4226,3 +4931,34 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
}
}
+
+/* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
+int __attribute__((weak))
+ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
+{
+ return -1;
+}
+
+uint16_t __attribute__((weak))
+ixgbe_recv_pkts_vec(
+ void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
+uint16_t __attribute__((weak))
+ixgbe_recv_scattered_pkts_vec(
+ void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
+int __attribute__((weak))
+ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)
+{
+ return -1;
+}
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_rxtx.h b/src/dpdk22/drivers/net/ixgbe/ixgbe_rxtx.h
index 329007cb..475a8005 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_rxtx.h
+++ b/src/dpdk22/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -34,17 +34,32 @@
#ifndef _IXGBE_RXTX_H_
#define _IXGBE_RXTX_H_
+/*
+ * Rings setup and release.
+ *
+ * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
+ * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
+ * also optimize cache line size effect. H/W supports up to cache line size 128.
+ */
+#define IXGBE_ALIGN 128
-#define RTE_PMD_IXGBE_TX_MAX_BURST 32
+#define IXGBE_RXD_ALIGN (IXGBE_ALIGN / sizeof(union ixgbe_adv_rx_desc))
+#define IXGBE_TXD_ALIGN (IXGBE_ALIGN / sizeof(union ixgbe_adv_tx_desc))
+
+/*
+ * Maximum number of Ring Descriptors.
+ *
+ * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
+ * descriptors should meet the following condition:
+ * (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0
+ */
+#define IXGBE_MIN_RING_DESC 32
+#define IXGBE_MAX_RING_DESC 4096
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+#define RTE_PMD_IXGBE_TX_MAX_BURST 32
#define RTE_PMD_IXGBE_RX_MAX_BURST 32
-#define RTE_IXGBE_DESCS_PER_LOOP 4
-#elif defined(RTE_IXGBE_INC_VECTOR)
-#define RTE_IXGBE_DESCS_PER_LOOP 4
-#else
-#define RTE_IXGBE_DESCS_PER_LOOP 1
-#endif
+
+#define RTE_IXGBE_DESCS_PER_LOOP 4
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
(uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
@@ -53,9 +68,8 @@
(uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
#ifdef RTE_IXGBE_INC_VECTOR
-#define RTE_IXGBE_VPMD_RX_BURST 32
-#define RTE_IXGBE_VPMD_TX_BURST 32
-#define RTE_IXGBE_RXQ_REARM_THRESH RTE_IXGBE_VPMD_RX_BURST
+#define RTE_IXGBE_RXQ_REARM_THRESH 32
+#define RTE_IXGBE_MAX_RX_BURST RTE_IXGBE_RXQ_REARM_THRESH
#define RTE_IXGBE_TX_MAX_FREE_BUF_SZ 64
#endif
@@ -75,14 +89,18 @@
/**
* Structure associated with each descriptor of the RX ring of a RX queue.
*/
-struct igb_rx_entry {
+struct ixgbe_rx_entry {
struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
};
+struct ixgbe_scattered_rx_entry {
+ struct rte_mbuf *fbuf; /**< First segment of the fragmented packet. */
+};
+
/**
* Structure associated with each descriptor of the TX ring of a TX queue.
*/
-struct igb_tx_entry {
+struct ixgbe_tx_entry {
struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
uint16_t next_id; /**< Index of next descriptor in ring. */
uint16_t last_id; /**< Index of last scattered descriptor. */
@@ -91,34 +109,35 @@ struct igb_tx_entry {
/**
* Structure associated with each descriptor of the TX ring of a TX queue.
*/
-struct igb_tx_entry_v {
+struct ixgbe_tx_entry_v {
struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
};
/**
* Structure associated with each RX queue.
*/
-struct igb_rx_queue {
+struct ixgbe_rx_queue {
struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
volatile union ixgbe_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
- struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
+ struct ixgbe_rx_entry *sw_ring; /**< address of RX software ring. */
+ struct ixgbe_scattered_rx_entry *sw_sc_ring; /**< address of scattered Rx software ring. */
struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
uint64_t mbuf_initializer; /**< value to init mbufs */
uint16_t nb_rx_desc; /**< number of RX descriptors. */
uint16_t rx_tail; /**< current value of RDT register. */
uint16_t nb_rx_hold; /**< number of held free RX desc. */
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */
uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
-#endif
+ uint16_t rx_using_sse;
+ /**< indicates that vector RX is in use */
#ifdef RTE_IXGBE_INC_VECTOR
- uint16_t rxrearm_nb; /**< the idx we start the re-arming from */
- uint16_t rxrearm_start; /**< number of remaining to be re-armed */
+ uint16_t rxrearm_nb; /**< number of remaining to be re-armed */
+ uint16_t rxrearm_start; /**< the idx we start the re-arming from */
#endif
uint16_t rx_free_thresh; /**< max free RX desc to hold. */
uint16_t queue_id; /**< RX queue index. */
@@ -127,12 +146,10 @@ struct igb_rx_queue {
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
uint8_t rx_deferred_start; /**< not in global dev start. */
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
struct rte_mbuf fake_mbuf;
/** hold packets to return to application */
struct rte_mbuf *rx_stage[RTE_PMD_IXGBE_RX_MAX_BURST*2];
-#endif
};
/**
@@ -182,15 +199,20 @@ struct ixgbe_advctx_info {
/**
* Structure associated with each TX queue.
*/
-struct igb_tx_queue {
+struct ixgbe_tx_queue {
/** TX ring virtual address. */
volatile union ixgbe_adv_tx_desc *tx_ring;
uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
- struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */
+ union {
+ struct ixgbe_tx_entry *sw_ring; /**< address of SW ring for scalar PMD. */
+ struct ixgbe_tx_entry_v *sw_ring_v; /**< address of SW ring for vector PMD */
+ };
volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
uint16_t nb_tx_desc; /**< number of TX descriptors. */
uint16_t tx_tail; /**< current value of TDT reg. */
- uint16_t tx_free_thresh;/**< minimum TX before freeing. */
+ /**< Start freeing TX buffers if there are less free descriptors than
+ this value. */
+ uint16_t tx_free_thresh;
/** Number of TX descriptors to use before RS bit is set. */
uint16_t tx_rs_thresh;
/** Number of TX descriptors used since RS bit was set. */
@@ -211,14 +233,14 @@ struct igb_tx_queue {
uint32_t ctx_curr; /**< Hardware context states. */
/** Hardware context0 history. */
struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
- struct ixgbe_txq_ops *ops; /**< txq ops */
+ const struct ixgbe_txq_ops *ops; /**< txq ops */
uint8_t tx_deferred_start; /**< not in global dev start. */
};
struct ixgbe_txq_ops {
- void (*release_mbufs)(struct igb_tx_queue *txq);
- void (*free_swring)(struct igb_tx_queue *txq);
- void (*reset)(struct igb_tx_queue *txq);
+ void (*release_mbufs)(struct ixgbe_tx_queue *txq);
+ void (*free_swring)(struct ixgbe_tx_queue *txq);
+ void (*reset)(struct ixgbe_tx_queue *txq);
};
/*
@@ -253,18 +275,37 @@ struct ixgbe_txq_ops {
* the queue parameters. Used in tx_queue_setup by primary process and then
* in dev_init by secondary process when attaching to an existing ethdev.
*/
-void set_tx_function(struct rte_eth_dev *dev, struct igb_tx_queue *txq);
+void ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq);
+
+/**
+ * Sets the rx_pkt_burst callback in the ixgbe rte_eth_dev instance.
+ *
+ * Sets the callback based on the device parameters:
+ * - ixgbe_hw.rx_bulk_alloc_allowed
+ * - rte_eth_dev_data.scattered_rx
+ * - rte_eth_dev_data.lro
+ * - conditions checked in ixgbe_rx_vec_condition_check()
+ *
+ * This means that the parameters above have to be configured prior to calling
+ * to this function.
+ *
+ * @dev rte_eth_dev handle
+ */
+void ixgbe_set_rx_function(struct rte_eth_dev *dev);
-#ifdef RTE_IXGBE_INC_VECTOR
uint16_t ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
uint16_t ixgbe_recv_scattered_pkts_vec(void *rx_queue,
struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+int ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev);
+int ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq);
+void ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq);
+
+#ifdef RTE_IXGBE_INC_VECTOR
+
uint16_t ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
-int ixgbe_txq_vec_setup(struct igb_tx_queue *txq);
-int ixgbe_rxq_vec_setup(struct igb_rx_queue *rxq);
-int ixgbe_rx_vec_condition_check(struct rte_eth_dev *dev);
-#endif
+int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
-#endif
+#endif /* RTE_IXGBE_INC_VECTOR */
+#endif /* _IXGBE_RXTX_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_rxtx_vec.c b/src/dpdk22/drivers/net/ixgbe/ixgbe_rxtx_vec.c
index b54cb191..ccd93c7b 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_rxtx_vec.c
+++ b/src/dpdk22/drivers/net/ixgbe/ixgbe_rxtx_vec.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -45,17 +45,19 @@
#endif
static inline void
-ixgbe_rxq_rearm(struct igb_rx_queue *rxq)
+ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
{
int i;
uint16_t rx_id;
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
+ struct ixgbe_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
struct rte_mbuf *mb0, *mb1;
__m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
RTE_PKTMBUF_HEADROOM);
__m128i dma_addr0, dma_addr1;
+ const __m128i hba_msk = _mm_set_epi64x(0, UINT64_MAX);
+
rxdp = rxq->rx_ring + rxq->rxrearm_start;
/* Pull 'n' more MBUFs into the software ring */
@@ -108,6 +110,10 @@ ixgbe_rxq_rearm(struct igb_rx_queue *rxq)
dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
+ /* set Header Buffer Address to zero */
+ dma_addr0 = _mm_and_si128(dma_addr0, hba_msk);
+ dma_addr1 = _mm_and_si128(dma_addr1, hba_msk);
+
/* flush desc with pa dma_addr */
_mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
_mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
@@ -134,14 +140,6 @@ ixgbe_rxq_rearm(struct igb_rx_queue *rxq)
*/
#ifdef RTE_IXGBE_RX_OLFLAGS_ENABLE
-#define OLFLAGS_MASK ((uint16_t)(PKT_RX_VLAN_PKT | PKT_RX_IPV4_HDR |\
- PKT_RX_IPV4_HDR_EXT | PKT_RX_IPV6_HDR |\
- PKT_RX_IPV6_HDR_EXT))
-#define OLFLAGS_MASK_V (((uint64_t)OLFLAGS_MASK << 48) | \
- ((uint64_t)OLFLAGS_MASK << 32) | \
- ((uint64_t)OLFLAGS_MASK << 16) | \
- ((uint64_t)OLFLAGS_MASK))
-#define PTYPE_SHIFT (1)
#define VTAG_SHIFT (3)
static inline void
@@ -153,19 +151,37 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
uint64_t dword;
} vol;
+ /* pkt type + vlan olflags mask */
+ const __m128i pkttype_msk = _mm_set_epi16(
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT);
+
+ /* mask everything except rss type */
+ const __m128i rsstype_msk = _mm_set_epi16(
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x000F, 0x000F, 0x000F, 0x000F);
+
+ /* map rss type to rss hash flag */
+ const __m128i rss_flags = _mm_set_epi8(PKT_RX_FDIR, 0, 0, 0,
+ 0, 0, 0, PKT_RX_RSS_HASH,
+ PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0,
+ PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0);
+
ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]);
vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]);
vtag1 = _mm_unpackhi_epi16(descs[2], descs[3]);
- ptype1 = _mm_unpacklo_epi32(ptype0, ptype1);
- vtag1 = _mm_unpacklo_epi32(vtag0, vtag1);
+ ptype0 = _mm_unpacklo_epi32(ptype0, ptype1);
+ ptype0 = _mm_and_si128(ptype0, rsstype_msk);
+ ptype0 = _mm_shuffle_epi8(rss_flags, ptype0);
- ptype1 = _mm_slli_epi16(ptype1, PTYPE_SHIFT);
+ vtag1 = _mm_unpacklo_epi32(vtag0, vtag1);
vtag1 = _mm_srli_epi16(vtag1, VTAG_SHIFT);
+ vtag1 = _mm_and_si128(vtag1, pkttype_msk);
- ptype1 = _mm_or_si128(ptype1, vtag1);
- vol.dword = _mm_cvtsi128_si64(ptype1) & OLFLAGS_MASK_V;
+ vtag1 = _mm_or_si128(ptype0, vtag1);
+ vol.dword = _mm_cvtsi128_si64(vtag1);
rx_pkts[0]->ol_flags = vol.e[0];
rx_pkts[1]->ol_flags = vol.e[1];
@@ -177,36 +193,41 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
#endif
/*
- * vPMD receive routine, now only accept (nb_pkts == RTE_IXGBE_VPMD_RX_BURST)
- * in one loop
+ * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
*
* Notice:
- * - nb_pkts < RTE_IXGBE_VPMD_RX_BURST, just return no packet
- * - nb_pkts > RTE_IXGBE_VPMD_RX_BURST, only scan RTE_IXGBE_VPMD_RX_BURST
+ * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
* numbers of DD bit
+ * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
* - don't support ol_flags for rss and csum err
*/
static inline uint16_t
-_recv_raw_pkts_vec(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+_recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts, uint8_t *split_packet)
{
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_entry *sw_ring;
+ struct ixgbe_rx_entry *sw_ring;
uint16_t nb_pkts_recd;
int pos;
uint64_t var;
__m128i shuf_msk;
__m128i crc_adjust = _mm_set_epi16(
- 0, 0, 0, 0, /* ignore non-length fields */
+ 0, 0, 0, /* ignore non-length fields */
+ -rxq->crc_len, /* sub crc on data_len */
0, /* ignore high-16bits of pkt_len */
-rxq->crc_len, /* sub crc on pkt_len */
- -rxq->crc_len, /* sub crc on data_len */
- 0 /* ignore pkt_type field */
+ 0, 0 /* ignore pkt_type field */
);
__m128i dd_check, eop_check;
+ __m128i desc_mask = _mm_set_epi32(0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFF07F0);
- if (unlikely(nb_pkts < RTE_IXGBE_VPMD_RX_BURST))
- return 0;
+ /* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */
+ nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST);
+
+ /* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP);
/* Just the act of getting into the function from the application is
* going to cost about 7 cycles */
@@ -234,46 +255,41 @@ _recv_raw_pkts_vec(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
/* mask to shuffle from desc. to mbuf */
shuf_msk = _mm_set_epi8(
7, 6, 5, 4, /* octet 4~7, 32bits rss */
- 0xFF, 0xFF, /* skip high 16 bits vlan_macip, zero out */
15, 14, /* octet 14~15, low 16 bits vlan_macip */
+ 13, 12, /* octet 12~13, 16 bits data_len */
0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
13, 12, /* octet 12~13, low 16 bits pkt_len */
- 13, 12, /* octet 12~13, 16 bits data_len */
- 0xFF, 0xFF /* skip pkt_type field */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_type */
+ 1, /* octet 1, 8 bits pkt_type field */
+ 0 /* octet 0, 4 bits offset 4 pkt_type field */
);
/* Cache is empty -> need to scan the buffer rings, but first move
* the next 'n' mbufs into the cache */
sw_ring = &rxq->sw_ring[rxq->rx_tail];
- /*
- * A. load 4 packet in one loop
+ /* A. load 4 packet in one loop
+ * [A*. mask out 4 unused dirty field in desc]
* B. copy 4 mbuf point from swring to rx_pkts
* C. calc the number of DD bits among the 4 packets
* [C*. extract the end-of-packet bit, if requested]
* D. fill info. from desc to mbuf
*/
- for (pos = 0, nb_pkts_recd = 0; pos < RTE_IXGBE_VPMD_RX_BURST;
+ for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
pos += RTE_IXGBE_DESCS_PER_LOOP,
rxdp += RTE_IXGBE_DESCS_PER_LOOP) {
+ __m128i descs0[RTE_IXGBE_DESCS_PER_LOOP];
__m128i descs[RTE_IXGBE_DESCS_PER_LOOP];
__m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
__m128i zero, staterr, sterr_tmp1, sterr_tmp2;
__m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
- if (split_packet) {
- rte_prefetch0(&rx_pkts[pos]->cacheline1);
- rte_prefetch0(&rx_pkts[pos + 1]->cacheline1);
- rte_prefetch0(&rx_pkts[pos + 2]->cacheline1);
- rte_prefetch0(&rx_pkts[pos + 3]->cacheline1);
- }
-
/* B.1 load 1 mbuf point */
mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
/* Read desc statuses backwards to avoid race condition */
/* A.1 load 4 pkts desc */
- descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
+ descs0[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
/* B.2 copy 2 mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
@@ -281,14 +297,29 @@ _recv_raw_pkts_vec(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
/* B.1 load 1 mbuf point */
mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
- descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
+ descs0[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
/* B.1 load 2 mbuf point */
- descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
- descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
+ descs0[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
+ descs0[0] = _mm_loadu_si128((__m128i *)(rxdp));
/* B.2 copy 2 mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+ if (split_packet) {
+ rte_prefetch0(&rx_pkts[pos]->cacheline1);
+ rte_prefetch0(&rx_pkts[pos + 1]->cacheline1);
+ rte_prefetch0(&rx_pkts[pos + 2]->cacheline1);
+ rte_prefetch0(&rx_pkts[pos + 3]->cacheline1);
+ }
+
+ /* A* mask out 0~3 bits RSS type */
+ descs[3] = _mm_and_si128(descs0[3], desc_mask);
+ descs[2] = _mm_and_si128(descs0[2], desc_mask);
+
+ /* A* mask out 0~3 bits RSS type */
+ descs[1] = _mm_and_si128(descs0[1], desc_mask);
+ descs[0] = _mm_and_si128(descs0[0], desc_mask);
+
/* avoid compiler reorder optimization */
rte_compiler_barrier();
@@ -301,8 +332,8 @@ _recv_raw_pkts_vec(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
/* C.1 4=>2 filter staterr info only */
sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
- /* set ol_flags with packet type and vlan tag */
- desc_to_olflags_v(descs, &rx_pkts[pos]);
+ /* set ol_flags with vlan packet type */
+ desc_to_olflags_v(descs0, &rx_pkts[pos]);
/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
@@ -379,13 +410,13 @@ _recv_raw_pkts_vec(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
}
/*
- * vPMD receive routine, now only accept (nb_pkts == RTE_IXGBE_VPMD_RX_BURST)
- * in one loop
+ * vPMD receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
*
* Notice:
- * - nb_pkts < RTE_IXGBE_VPMD_RX_BURST, just return no packet
- * - nb_pkts > RTE_IXGBE_VPMD_RX_BURST, only scan RTE_IXGBE_VPMD_RX_BURST
+ * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
* numbers of DD bit
+ * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
* - don't support ol_flags for rss and csum err
*/
uint16_t
@@ -396,16 +427,15 @@ ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
}
static inline uint16_t
-reassemble_packets(struct igb_rx_queue *rxq, struct rte_mbuf **rx_bufs,
+reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs,
uint16_t nb_bufs, uint8_t *split_flags)
{
- struct rte_mbuf *pkts[RTE_IXGBE_VPMD_RX_BURST]; /*finished pkts*/
+ struct rte_mbuf *pkts[nb_bufs]; /*finished pkts*/
struct rte_mbuf *start = rxq->pkt_first_seg;
struct rte_mbuf *end = rxq->pkt_last_seg;
- unsigned pkt_idx = 0, buf_idx = 0;
-
+ unsigned pkt_idx, buf_idx;
- while (buf_idx < nb_bufs) {
+ for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
if (end != NULL) {
/* processing a split packet */
end->next = rx_bufs[buf_idx];
@@ -426,6 +456,8 @@ reassemble_packets(struct igb_rx_queue *rxq, struct rte_mbuf **rx_bufs,
else {
/* free up last mbuf */
struct rte_mbuf *secondlast = start;
+
+ start->nb_segs--;
while (secondlast->next != end)
secondlast = secondlast->next;
secondlast->data_len -= (rxq->crc_len -
@@ -448,7 +480,6 @@ reassemble_packets(struct igb_rx_queue *rxq, struct rte_mbuf **rx_bufs,
rx_bufs[buf_idx]->data_len += rxq->crc_len;
rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
}
- buf_idx++;
}
/* save the partial packet for next time */
@@ -463,14 +494,17 @@ reassemble_packets(struct igb_rx_queue *rxq, struct rte_mbuf **rx_bufs,
*
* Notice:
* - don't support ol_flags for rss and csum err
- * - now only accept (nb_pkts == RTE_IXGBE_VPMD_RX_BURST)
+ * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
+ * numbers of DD bit
+ * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
*/
uint16_t
ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
- struct igb_rx_queue *rxq = rx_queue;
- uint8_t split_flags[RTE_IXGBE_VPMD_RX_BURST] = {0};
+ struct ixgbe_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[RTE_IXGBE_MAX_RX_BURST] = {0};
/* get some new buffers */
uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
@@ -479,10 +513,10 @@ ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
return 0;
/* happy day case, full burst + no packets to be joined */
- const uint32_t *split_fl32 = (uint32_t *)split_flags;
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
if (rxq->pkt_first_seg == NULL &&
- split_fl32[0] == 0 && split_fl32[1] == 0 &&
- split_fl32[2] == 0 && split_fl32[3] == 0)
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
return nb_bufs;
/* reassemble any packets that need reassembly*/
@@ -518,9 +552,9 @@ vtx(volatile union ixgbe_adv_tx_desc *txdp,
}
static inline int __attribute__((always_inline))
-ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
+ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
{
- struct igb_tx_entry_v *txep;
+ struct ixgbe_tx_entry_v *txep;
uint32_t status;
uint32_t n;
uint32_t i;
@@ -538,22 +572,13 @@ ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
* first buffer to free from S/W ring is at index
* tx_next_dd - (tx_rs_thresh-1)
*/
- txep = &((struct igb_tx_entry_v *)txq->sw_ring)[txq->tx_next_dd -
- (n - 1)];
-#ifdef RTE_MBUF_REFCNT
+ txep = &txq->sw_ring_v[txq->tx_next_dd - (n - 1)];
m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
-#else
- m = txep[0].mbuf;
-#endif
if (likely(m != NULL)) {
free[0] = m;
nb_free = 1;
for (i = 1; i < n; i++) {
-#ifdef RTE_MBUF_REFCNT
m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
-#else
- m = txep[i].mbuf;
-#endif
if (likely(m != NULL)) {
if (likely(m->pool == free[0]->pool))
free[nb_free++] = m;
@@ -584,7 +609,7 @@ ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
}
static inline void __attribute__((always_inline))
-tx_backlog_entry(struct igb_tx_entry_v *txep,
+tx_backlog_entry(struct ixgbe_tx_entry_v *txep,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
int i;
@@ -596,16 +621,16 @@ uint16_t
ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
- struct igb_tx_queue *txq = (struct igb_tx_queue *)tx_queue;
+ struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
volatile union ixgbe_adv_tx_desc *txdp;
- struct igb_tx_entry_v *txep;
+ struct ixgbe_tx_entry_v *txep;
uint16_t n, nb_commit, tx_id;
uint64_t flags = DCMD_DTYP_FLAGS;
uint64_t rs = IXGBE_ADVTXD_DCMD_RS|DCMD_DTYP_FLAGS;
int i;
- if (unlikely(nb_pkts > RTE_IXGBE_VPMD_TX_BURST))
- nb_pkts = RTE_IXGBE_VPMD_TX_BURST;
+ /* cross rx_thresh boundary is not allowed */
+ nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
if (txq->nb_tx_free < txq->tx_free_thresh)
ixgbe_tx_free_bufs(txq);
@@ -616,7 +641,7 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = txq->tx_tail;
txdp = &txq->tx_ring[tx_id];
- txep = &((struct igb_tx_entry_v *)txq->sw_ring)[tx_id];
+ txep = &txq->sw_ring_v[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
@@ -637,7 +662,7 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
/* avoid reach the end of ring */
txdp = &(txq->tx_ring[tx_id]);
- txep = &(((struct igb_tx_entry_v *)txq->sw_ring)[tx_id]);
+ txep = &txq->sw_ring_v[tx_id];
}
tx_backlog_entry(txep, tx_pkts, nb_commit);
@@ -659,50 +684,67 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_pkts;
}
-static void
-ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
+static void __attribute__((cold))
+ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
{
unsigned i;
- struct igb_tx_entry_v *txe;
- uint16_t nb_free, max_desc;
+ struct ixgbe_tx_entry_v *txe;
+ const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
- if (txq->sw_ring != NULL) {
- /* release the used mbufs in sw_ring */
- nb_free = txq->nb_tx_free;
- max_desc = (uint16_t)(txq->nb_tx_desc - 1);
- for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
- nb_free < max_desc && i != txq->tx_tail;
- i = (i + 1) & max_desc) {
- txe = (struct igb_tx_entry_v *)&txq->sw_ring[i];
- if (txe->mbuf != NULL)
- rte_pktmbuf_free_seg(txe->mbuf);
- }
- /* reset tx_entry */
- for (i = 0; i < txq->nb_tx_desc; i++) {
- txe = (struct igb_tx_entry_v *)&txq->sw_ring[i];
- txe->mbuf = NULL;
- }
+ if (txq->sw_ring == NULL || txq->nb_tx_free == max_desc)
+ return;
+
+ /* release the used mbufs in sw_ring */
+ for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
+ i != txq->tx_tail;
+ i = (i + 1) & max_desc) {
+ txe = &txq->sw_ring_v[i];
+ rte_pktmbuf_free_seg(txe->mbuf);
}
+ txq->nb_tx_free = max_desc;
+
+ /* reset tx_entry */
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ txe = &txq->sw_ring_v[i];
+ txe->mbuf = NULL;
+ }
+}
+
+void __attribute__((cold))
+ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
+{
+ const unsigned mask = rxq->nb_rx_desc - 1;
+ unsigned i;
+
+ if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
+ return;
+
+ /* free all mbufs that are valid in the ring */
+ for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ rxq->rxrearm_nb = rxq->nb_rx_desc;
+
+ /* set all entries to NULL */
+ memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
}
-static void
-ixgbe_tx_free_swring(struct igb_tx_queue *txq)
+static void __attribute__((cold))
+ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
{
if (txq == NULL)
return;
if (txq->sw_ring != NULL) {
- rte_free((struct igb_rx_entry *)txq->sw_ring - 1);
- txq->sw_ring = NULL;
+ rte_free(txq->sw_ring_v - 1);
+ txq->sw_ring_v = NULL;
}
}
-static void
-ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
+static void __attribute__((cold))
+ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
{
- static const union ixgbe_adv_tx_desc zeroed_desc = { .read = {
- .buffer_addr = 0} };
- struct igb_tx_entry_v *txe = (struct igb_tx_entry_v *)txq->sw_ring;
+ static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
+ struct ixgbe_tx_entry_v *txe = txq->sw_ring_v;
uint16_t i;
/* Zero out HW ring memory */
@@ -732,14 +774,14 @@ ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
}
-static struct ixgbe_txq_ops vec_txq_ops = {
- .release_mbufs = ixgbe_tx_queue_release_mbufs,
+static const struct ixgbe_txq_ops vec_txq_ops = {
+ .release_mbufs = ixgbe_tx_queue_release_mbufs_vec,
.free_swring = ixgbe_tx_free_swring,
.reset = ixgbe_reset_tx_queue,
};
-int
-ixgbe_rxq_vec_setup(struct igb_rx_queue *rxq)
+int __attribute__((cold))
+ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
{
uintptr_t p;
struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
@@ -756,20 +798,21 @@ ixgbe_rxq_vec_setup(struct igb_rx_queue *rxq)
return 0;
}
-int ixgbe_txq_vec_setup(struct igb_tx_queue *txq)
+int __attribute__((cold))
+ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
{
- if (txq->sw_ring == NULL)
+ if (txq->sw_ring_v == NULL)
return -1;
/* leave the first one for overflow */
- txq->sw_ring = (struct igb_tx_entry *)
- ((struct igb_tx_entry_v *)txq->sw_ring + 1);
+ txq->sw_ring_v = txq->sw_ring_v + 1;
txq->ops = &vec_txq_ops;
return 0;
}
-int ixgbe_rx_vec_condition_check(struct rte_eth_dev *dev)
+int __attribute__((cold))
+ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
{
#ifndef RTE_LIBRTE_IEEE1588
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
diff --git a/src/dpdk22/drivers/net/mlx4/mlx4.h b/src/dpdk22/drivers/net/mlx4/mlx4.h
new file mode 100644
index 00000000..d0c7bc29
--- /dev/null
+++ b/src/dpdk22/drivers/net/mlx4/mlx4.h
@@ -0,0 +1,163 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2012-2015 6WIND S.A.
+ * Copyright 2012 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RTE_PMD_MLX4_H_
+#define RTE_PMD_MLX4_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <limits.h>
+
+/*
+ * Maximum number of simultaneous MAC addresses supported.
+ *
+ * According to ConnectX's Programmer Reference Manual:
+ * The L2 Address Match is implemented by comparing a MAC/VLAN combination
+ * of 128 MAC addresses and 127 VLAN values, comprising 128x127 possible
+ * L2 addresses.
+ */
+#define MLX4_MAX_MAC_ADDRESSES 128
+
+/* Maximum number of simultaneous VLAN filters supported. See above. */
+#define MLX4_MAX_VLAN_IDS 127
+
+/* Request send completion once in every 64 sends, might be less. */
+#define MLX4_PMD_TX_PER_COMP_REQ 64
+
+/* Maximum number of Scatter/Gather Elements per Work Request. */
+#ifndef MLX4_PMD_SGE_WR_N
+#define MLX4_PMD_SGE_WR_N 4
+#endif
+
+/* Maximum size for inline data. */
+#ifndef MLX4_PMD_MAX_INLINE
+#define MLX4_PMD_MAX_INLINE 0
+#endif
+
+/*
+ * Maximum number of cached Memory Pools (MPs) per TX queue. Each RTE MP
+ * from which buffers are to be transmitted will have to be mapped by this
+ * driver to their own Memory Region (MR). This is a slow operation.
+ *
+ * This value is always 1 for RX queues.
+ */
+#ifndef MLX4_PMD_TX_MP_CACHE
+#define MLX4_PMD_TX_MP_CACHE 8
+#endif
+
+/*
+ * If defined, only use software counters. The PMD will never ask the hardware
+ * for these, and many of them won't be available.
+ */
+#ifndef MLX4_PMD_SOFT_COUNTERS
+#define MLX4_PMD_SOFT_COUNTERS 1
+#endif
+
+/* Alarm timeout. */
+#define MLX4_ALARM_TIMEOUT_US 100000
+
+enum {
+ PCI_VENDOR_ID_MELLANOX = 0x15b3,
+};
+
+enum {
+ PCI_DEVICE_ID_MELLANOX_CONNECTX3 = 0x1003,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX3VF = 0x1004,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO = 0x1007,
+};
+
+#define MLX4_DRIVER_NAME "librte_pmd_mlx4"
+
+/* Bit-field manipulation. */
+#define BITFIELD_DECLARE(bf, type, size) \
+ type bf[(((size_t)(size) / (sizeof(type) * CHAR_BIT)) + \
+ !!((size_t)(size) % (sizeof(type) * CHAR_BIT)))]
+#define BITFIELD_DEFINE(bf, type, size) \
+ BITFIELD_DECLARE((bf), type, (size)) = { 0 }
+#define BITFIELD_SET(bf, b) \
+ (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \
+ (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] |= \
+ ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT)))))
+#define BITFIELD_RESET(bf, b) \
+ (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \
+ (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] &= \
+ ~((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT)))))
+#define BITFIELD_ISSET(bf, b) \
+ (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \
+ !!(((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] & \
+ ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT))))))
+
+/* Number of elements in array. */
+#define elemof(a) (sizeof(a) / sizeof((a)[0]))
+
+/* Cast pointer p to structure member m to its parent structure of type t. */
+#define containerof(p, t, m) ((t *)((uint8_t *)(p) - offsetof(t, m)))
+
+/* Branch prediction helpers. */
+#ifndef likely
+#define likely(c) __builtin_expect(!!(c), 1)
+#endif
+#ifndef unlikely
+#define unlikely(c) __builtin_expect(!!(c), 0)
+#endif
+
+/* Debugging */
+#ifndef NDEBUG
+#include <stdio.h>
+#define DEBUG__(m, ...) \
+ (fprintf(stderr, "%s:%d: %s(): " m "%c", \
+ __FILE__, __LINE__, __func__, __VA_ARGS__), \
+ fflush(stderr), \
+ (void)0)
+/*
+ * Save/restore errno around DEBUG__().
+ * XXX somewhat undefined behavior, but works.
+ */
+#define DEBUG_(...) \
+ (errno = ((int []){ \
+ *(volatile int *)&errno, \
+ (DEBUG__(__VA_ARGS__), 0) \
+ })[0])
+#define DEBUG(...) DEBUG_(__VA_ARGS__, '\n')
+#define claim_zero(...) assert((__VA_ARGS__) == 0)
+#define claim_nonzero(...) assert((__VA_ARGS__) != 0)
+#define claim_positive(...) assert((__VA_ARGS__) >= 0)
+#else /* NDEBUG */
+/* No-ops. */
+#define DEBUG(...) (void)0
+#define claim_zero(...) (__VA_ARGS__)
+#define claim_nonzero(...) (__VA_ARGS__)
+#define claim_positive(...) (__VA_ARGS__)
+#endif /* NDEBUG */
+
+#endif /* RTE_PMD_MLX4_H_ */
diff --git a/src/dpdk22/drivers/net/mlx5/mlx5.h b/src/dpdk22/drivers/net/mlx5/mlx5.h
new file mode 100644
index 00000000..b84d31d5
--- /dev/null
+++ b/src/dpdk22/drivers/net/mlx5/mlx5.h
@@ -0,0 +1,220 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RTE_PMD_MLX5_H_
+#define RTE_PMD_MLX5_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <limits.h>
+#include <net/if.h>
+#include <netinet/in.h>
+#include <linux/if.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-pedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-pedantic"
+#endif
+
+/* DPDK headers don't like -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-pedantic"
+#endif
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_spinlock.h>
+#include <rte_interrupts.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-pedantic"
+#endif
+
+#include "mlx5_utils.h"
+#include "mlx5_rxtx.h"
+#include "mlx5_autoconf.h"
+#include "mlx5_defs.h"
+
+enum {
+ PCI_VENDOR_ID_MELLANOX = 0x15b3,
+};
+
+enum {
+ PCI_DEVICE_ID_MELLANOX_CONNECTX4 = 0x1013,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX4VF = 0x1014,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX4LX = 0x1015,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF = 0x1016,
+};
+
+struct priv {
+ struct rte_eth_dev *dev; /* Ethernet device. */
+ struct ibv_context *ctx; /* Verbs context. */
+ struct ibv_device_attr device_attr; /* Device properties. */
+ struct ibv_pd *pd; /* Protection Domain. */
+ /*
+ * MAC addresses array and configuration bit-field.
+ * An extra entry that cannot be modified by the DPDK is reserved
+ * for broadcast frames (destination MAC address ff:ff:ff:ff:ff:ff).
+ */
+ struct ether_addr mac[MLX5_MAX_MAC_ADDRESSES];
+ BITFIELD_DECLARE(mac_configured, uint32_t, MLX5_MAX_MAC_ADDRESSES);
+ uint16_t vlan_filter[MLX5_MAX_VLAN_IDS]; /* VLAN filters table. */
+ unsigned int vlan_filter_n; /* Number of configured VLAN filters. */
+ /* Device properties. */
+ uint16_t mtu; /* Configured MTU. */
+ uint8_t port; /* Physical port number. */
+ unsigned int started:1; /* Device started, flows enabled. */
+ unsigned int promisc_req:1; /* Promiscuous mode requested. */
+ unsigned int allmulti_req:1; /* All multicast mode requested. */
+ unsigned int hw_csum:1; /* Checksum offload is supported. */
+ unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */
+ unsigned int vf:1; /* This is a VF device. */
+ unsigned int pending_alarm:1; /* An alarm is pending. */
+ /* RX/TX queues. */
+ unsigned int rxqs_n; /* RX queues array size. */
+ unsigned int txqs_n; /* TX queues array size. */
+ struct rxq *(*rxqs)[]; /* RX queues. */
+ struct txq *(*txqs)[]; /* TX queues. */
+ /* Indirection tables referencing all RX WQs. */
+ struct ibv_exp_rwq_ind_table *(*ind_tables)[];
+ unsigned int ind_tables_n; /* Number of indirection tables. */
+ unsigned int ind_table_max_size; /* Maximum indirection table size. */
+ /* Hash RX QPs feeding the indirection table. */
+ struct hash_rxq (*hash_rxqs)[];
+ unsigned int hash_rxqs_n; /* Hash RX QPs array size. */
+ /* RSS configuration array indexed by hash RX queue type. */
+ struct rte_eth_rss_conf *(*rss_conf)[];
+ struct rte_intr_handle intr_handle; /* Interrupt handler. */
+ unsigned int (*reta_idx)[]; /* RETA index table. */
+ unsigned int reta_idx_n; /* RETA index size. */
+ rte_spinlock_t lock; /* Lock for control functions. */
+};
+
+/**
+ * Lock private structure to protect it from concurrent access in the
+ * control path.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+static inline void
+priv_lock(struct priv *priv)
+{
+ rte_spinlock_lock(&priv->lock);
+}
+
+/**
+ * Unlock private structure.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+static inline void
+priv_unlock(struct priv *priv)
+{
+ rte_spinlock_unlock(&priv->lock);
+}
+
+/* mlx5_ethdev.c */
+
+int priv_get_ifname(const struct priv *, char (*)[IF_NAMESIZE]);
+int priv_ifreq(const struct priv *, int req, struct ifreq *);
+int priv_get_mtu(struct priv *, uint16_t *);
+int priv_set_flags(struct priv *, unsigned int, unsigned int);
+int mlx5_dev_configure(struct rte_eth_dev *);
+void mlx5_dev_infos_get(struct rte_eth_dev *, struct rte_eth_dev_info *);
+int mlx5_link_update(struct rte_eth_dev *, int);
+int mlx5_dev_set_mtu(struct rte_eth_dev *, uint16_t);
+int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *, struct rte_eth_fc_conf *);
+int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *, struct rte_eth_fc_conf *);
+int mlx5_ibv_device_to_pci_addr(const struct ibv_device *,
+ struct rte_pci_addr *);
+void mlx5_dev_link_status_handler(void *);
+void mlx5_dev_interrupt_handler(struct rte_intr_handle *, void *);
+void priv_dev_interrupt_handler_uninstall(struct priv *, struct rte_eth_dev *);
+void priv_dev_interrupt_handler_install(struct priv *, struct rte_eth_dev *);
+
+/* mlx5_mac.c */
+
+int priv_get_mac(struct priv *, uint8_t (*)[ETHER_ADDR_LEN]);
+void hash_rxq_mac_addrs_del(struct hash_rxq *);
+void priv_mac_addrs_disable(struct priv *);
+void mlx5_mac_addr_remove(struct rte_eth_dev *, uint32_t);
+int hash_rxq_mac_addrs_add(struct hash_rxq *);
+int priv_mac_addr_add(struct priv *, unsigned int,
+ const uint8_t (*)[ETHER_ADDR_LEN]);
+int priv_mac_addrs_enable(struct priv *);
+void mlx5_mac_addr_add(struct rte_eth_dev *, struct ether_addr *, uint32_t,
+ uint32_t);
+
+/* mlx5_rss.c */
+
+int rss_hash_rss_conf_new_key(struct priv *, const uint8_t *, unsigned int,
+ uint64_t);
+int mlx5_rss_hash_update(struct rte_eth_dev *, struct rte_eth_rss_conf *);
+int mlx5_rss_hash_conf_get(struct rte_eth_dev *, struct rte_eth_rss_conf *);
+int priv_rss_reta_index_resize(struct priv *, unsigned int);
+int mlx5_dev_rss_reta_query(struct rte_eth_dev *,
+ struct rte_eth_rss_reta_entry64 *, uint16_t);
+int mlx5_dev_rss_reta_update(struct rte_eth_dev *,
+ struct rte_eth_rss_reta_entry64 *, uint16_t);
+
+/* mlx5_rxmode.c */
+
+int priv_promiscuous_enable(struct priv *);
+void mlx5_promiscuous_enable(struct rte_eth_dev *);
+void priv_promiscuous_disable(struct priv *);
+void mlx5_promiscuous_disable(struct rte_eth_dev *);
+int priv_allmulticast_enable(struct priv *);
+void mlx5_allmulticast_enable(struct rte_eth_dev *);
+void priv_allmulticast_disable(struct priv *);
+void mlx5_allmulticast_disable(struct rte_eth_dev *);
+
+/* mlx5_stats.c */
+
+void mlx5_stats_get(struct rte_eth_dev *, struct rte_eth_stats *);
+void mlx5_stats_reset(struct rte_eth_dev *);
+
+/* mlx5_vlan.c */
+
+int mlx5_vlan_filter_set(struct rte_eth_dev *, uint16_t, int);
+
+/* mlx5_trigger.c */
+
+int mlx5_dev_start(struct rte_eth_dev *);
+void mlx5_dev_stop(struct rte_eth_dev *);
+
+#endif /* RTE_PMD_MLX5_H_ */
diff --git a/src/dpdk22/drivers/net/mlx5/mlx5_defs.h b/src/dpdk22/drivers/net/mlx5/mlx5_defs.h
new file mode 100644
index 00000000..bb82c9a9
--- /dev/null
+++ b/src/dpdk22/drivers/net/mlx5/mlx5_defs.h
@@ -0,0 +1,84 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RTE_PMD_MLX5_DEFS_H_
+#define RTE_PMD_MLX5_DEFS_H_
+
+/* Reported driver name. */
+#define MLX5_DRIVER_NAME "librte_pmd_mlx5"
+
+/* Maximum number of simultaneous MAC addresses. */
+#define MLX5_MAX_MAC_ADDRESSES 128
+
+/* Maximum number of simultaneous VLAN filters. */
+#define MLX5_MAX_VLAN_IDS 128
+
+/* Request send completion once in every 64 sends, might be less. */
+#define MLX5_PMD_TX_PER_COMP_REQ 64
+
+/* RSS Indirection table size. */
+#define RSS_INDIRECTION_TABLE_SIZE 128
+
+/* Maximum number of Scatter/Gather Elements per Work Request. */
+#ifndef MLX5_PMD_SGE_WR_N
+#define MLX5_PMD_SGE_WR_N 4
+#endif
+
+/* Maximum size for inline data. */
+#ifndef MLX5_PMD_MAX_INLINE
+#define MLX5_PMD_MAX_INLINE 0
+#endif
+
+/*
+ * Maximum number of cached Memory Pools (MPs) per TX queue. Each RTE MP
+ * from which buffers are to be transmitted will have to be mapped by this
+ * driver to their own Memory Region (MR). This is a slow operation.
+ *
+ * This value is always 1 for RX queues.
+ */
+#ifndef MLX5_PMD_TX_MP_CACHE
+#define MLX5_PMD_TX_MP_CACHE 8
+#endif
+
+/*
+ * If defined, only use software counters. The PMD will never ask the hardware
+ * for these, and many of them won't be available.
+ */
+#ifndef MLX5_PMD_SOFT_COUNTERS
+#define MLX5_PMD_SOFT_COUNTERS 1
+#endif
+
+/* Alarm timeout. */
+#define MLX5_ALARM_TIMEOUT_US 100000
+
+#endif /* RTE_PMD_MLX5_DEFS_H_ */
diff --git a/src/dpdk22/drivers/net/mlx5/mlx5_rxtx.h b/src/dpdk22/drivers/net/mlx5/mlx5_rxtx.h
new file mode 100644
index 00000000..e1e19251
--- /dev/null
+++ b/src/dpdk22/drivers/net/mlx5/mlx5_rxtx.h
@@ -0,0 +1,274 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RTE_PMD_MLX5_RXTX_H_
+#define RTE_PMD_MLX5_RXTX_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-pedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-pedantic"
+#endif
+
+/* DPDK headers don't like -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-pedantic"
+#endif
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-pedantic"
+#endif
+
+#include "mlx5_utils.h"
+#include "mlx5.h"
+#include "mlx5_autoconf.h"
+#include "mlx5_defs.h"
+
+struct mlx5_rxq_stats {
+ unsigned int idx; /**< Mapping index. */
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ uint64_t ipackets; /**< Total of successfully received packets. */
+ uint64_t ibytes; /**< Total of successfully received bytes. */
+#endif
+ uint64_t idropped; /**< Total of packets dropped when RX ring full. */
+ uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */
+};
+
+struct mlx5_txq_stats {
+ unsigned int idx; /**< Mapping index. */
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ uint64_t opackets; /**< Total of successfully sent packets. */
+ uint64_t obytes; /**< Total of successfully sent bytes. */
+#endif
+ uint64_t odropped; /**< Total of packets not sent when TX ring full. */
+};
+
+/* RX element (scattered packets). */
+struct rxq_elt_sp {
+ struct ibv_sge sges[MLX5_PMD_SGE_WR_N]; /* Scatter/Gather Elements. */
+ struct rte_mbuf *bufs[MLX5_PMD_SGE_WR_N]; /* SGEs buffers. */
+};
+
+/* RX element. */
+struct rxq_elt {
+ struct ibv_sge sge; /* Scatter/Gather Element. */
+ struct rte_mbuf *buf; /* SGE buffer. */
+};
+
+struct priv;
+
+/* RX queue descriptor. */
+struct rxq {
+ struct priv *priv; /* Back pointer to private data. */
+ struct rte_mempool *mp; /* Memory Pool for allocations. */
+ struct ibv_mr *mr; /* Memory Region (for mp). */
+ struct ibv_cq *cq; /* Completion Queue. */
+ struct ibv_exp_wq *wq; /* Work Queue. */
+ struct ibv_exp_wq_family *if_wq; /* WQ burst interface. */
+ struct ibv_exp_cq_family *if_cq; /* CQ interface. */
+ unsigned int port_id; /* Port ID for incoming packets. */
+ unsigned int elts_n; /* (*elts)[] length. */
+ unsigned int elts_head; /* Current index in (*elts)[]. */
+ union {
+ struct rxq_elt_sp (*sp)[]; /* Scattered RX elements. */
+ struct rxq_elt (*no_sp)[]; /* RX elements. */
+ } elts;
+ unsigned int sp:1; /* Use scattered RX elements. */
+ unsigned int csum:1; /* Enable checksum offloading. */
+ unsigned int csum_l2tun:1; /* Same for L2 tunnels. */
+ uint32_t mb_len; /* Length of a mp-issued mbuf. */
+ struct mlx5_rxq_stats stats; /* RX queue counters. */
+ unsigned int socket; /* CPU socket ID for allocations. */
+ struct ibv_exp_res_domain *rd; /* Resource Domain. */
+};
+
+/* Hash RX queue types. */
+enum hash_rxq_type {
+ HASH_RXQ_TCPV4,
+ HASH_RXQ_UDPV4,
+ HASH_RXQ_IPV4,
+#ifdef HAVE_FLOW_SPEC_IPV6
+ HASH_RXQ_TCPV6,
+ HASH_RXQ_UDPV6,
+ HASH_RXQ_IPV6,
+#endif /* HAVE_FLOW_SPEC_IPV6 */
+ HASH_RXQ_ETH,
+};
+
+/* Flow structure with Ethernet specification. It is packed to prevent padding
+ * between attr and spec as this layout is expected by libibverbs. */
+struct flow_attr_spec_eth {
+ struct ibv_exp_flow_attr attr;
+ struct ibv_exp_flow_spec_eth spec;
+} __attribute__((packed));
+
+/* Define a struct flow_attr_spec_eth object as an array of at least
+ * "size" bytes. Room after the first index is normally used to store
+ * extra flow specifications. */
+#define FLOW_ATTR_SPEC_ETH(name, size) \
+ struct flow_attr_spec_eth name \
+ [((size) / sizeof(struct flow_attr_spec_eth)) + \
+ !!((size) % sizeof(struct flow_attr_spec_eth))]
+
+/* Initialization data for hash RX queue. */
+struct hash_rxq_init {
+ uint64_t hash_fields; /* Fields that participate in the hash. */
+ uint64_t dpdk_rss_hf; /* Matching DPDK RSS hash fields. */
+ unsigned int flow_priority; /* Flow priority to use. */
+ union {
+ struct {
+ enum ibv_exp_flow_spec_type type;
+ uint16_t size;
+ } hdr;
+ struct ibv_exp_flow_spec_tcp_udp tcp_udp;
+ struct ibv_exp_flow_spec_ipv4 ipv4;
+#ifdef HAVE_FLOW_SPEC_IPV6
+ struct ibv_exp_flow_spec_ipv6 ipv6;
+#endif /* HAVE_FLOW_SPEC_IPV6 */
+ struct ibv_exp_flow_spec_eth eth;
+ } flow_spec; /* Flow specification template. */
+ const struct hash_rxq_init *underlayer; /* Pointer to underlayer. */
+};
+
+/* Initialization data for indirection table. */
+struct ind_table_init {
+ unsigned int max_size; /* Maximum number of WQs. */
+ /* Hash RX queues using this table. */
+ unsigned int hash_types;
+ unsigned int hash_types_n;
+};
+
+enum hash_rxq_flow_type {
+ HASH_RXQ_FLOW_TYPE_MAC,
+ HASH_RXQ_FLOW_TYPE_PROMISC,
+ HASH_RXQ_FLOW_TYPE_ALLMULTI,
+};
+
+struct hash_rxq {
+ struct priv *priv; /* Back pointer to private data. */
+ struct ibv_qp *qp; /* Hash RX QP. */
+ enum hash_rxq_type type; /* Hash RX queue type. */
+ /* MAC flow steering rules, one per VLAN ID. */
+ struct ibv_exp_flow *mac_flow[MLX5_MAX_MAC_ADDRESSES][MLX5_MAX_VLAN_IDS];
+ struct ibv_exp_flow *promisc_flow; /* Promiscuous flow. */
+ struct ibv_exp_flow *allmulti_flow; /* Multicast flow. */
+};
+
+/* TX element. */
+struct txq_elt {
+ struct rte_mbuf *buf;
+};
+
+/* Linear buffer type. It is used when transmitting buffers with too many
+ * segments that do not fit the hardware queue (see max_send_sge).
+ * Extra segments are copied (linearized) in such buffers, replacing the
+ * last SGE during TX.
+ * The size is arbitrary but large enough to hold a jumbo frame with
+ * 8 segments considering mbuf.buf_len is about 2048 bytes. */
+typedef uint8_t linear_t[16384];
+
+/* TX queue descriptor. */
+struct txq {
+ struct priv *priv; /* Back pointer to private data. */
+ struct {
+ const struct rte_mempool *mp; /* Cached Memory Pool. */
+ struct ibv_mr *mr; /* Memory Region (for mp). */
+ uint32_t lkey; /* mr->lkey */
+ } mp2mr[MLX5_PMD_TX_MP_CACHE]; /* MP to MR translation table. */
+ struct ibv_cq *cq; /* Completion Queue. */
+ struct ibv_qp *qp; /* Queue Pair. */
+ struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */
+ struct ibv_exp_cq_family *if_cq; /* CQ interface. */
+#if MLX5_PMD_MAX_INLINE > 0
+ uint32_t max_inline; /* Max inline send size <= MLX5_PMD_MAX_INLINE. */
+#endif
+ unsigned int elts_n; /* (*elts)[] length. */
+ struct txq_elt (*elts)[]; /* TX elements. */
+ unsigned int elts_head; /* Current index in (*elts)[]. */
+ unsigned int elts_tail; /* First element awaiting completion. */
+ unsigned int elts_comp; /* Number of completion requests. */
+ unsigned int elts_comp_cd; /* Countdown for next completion request. */
+ unsigned int elts_comp_cd_init; /* Initial value for countdown. */
+ struct mlx5_txq_stats stats; /* TX queue counters. */
+ linear_t (*elts_linear)[]; /* Linearized buffers. */
+ struct ibv_mr *mr_linear; /* Memory Region for linearized buffers. */
+ unsigned int socket; /* CPU socket ID for allocations. */
+ struct ibv_exp_res_domain *rd; /* Resource Domain. */
+};
+
+/* mlx5_rxq.c */
+
+extern const struct hash_rxq_init hash_rxq_init[];
+extern const unsigned int hash_rxq_init_n;
+
+extern uint8_t rss_hash_default_key[];
+extern const size_t rss_hash_default_key_len;
+
+size_t hash_rxq_flow_attr(const struct hash_rxq *, struct ibv_exp_flow_attr *,
+ size_t);
+int priv_create_hash_rxqs(struct priv *);
+void priv_destroy_hash_rxqs(struct priv *);
+int priv_allow_flow_type(struct priv *, enum hash_rxq_flow_type);
+void rxq_cleanup(struct rxq *);
+int rxq_rehash(struct rte_eth_dev *, struct rxq *);
+int rxq_setup(struct rte_eth_dev *, struct rxq *, uint16_t, unsigned int,
+ const struct rte_eth_rxconf *, struct rte_mempool *);
+int mlx5_rx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
+ const struct rte_eth_rxconf *, struct rte_mempool *);
+void mlx5_rx_queue_release(void *);
+
+/* mlx5_txq.c */
+
+void txq_cleanup(struct txq *);
+int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
+ const struct rte_eth_txconf *);
+void mlx5_tx_queue_release(void *);
+
+/* mlx5_rxtx.c */
+
+void txq_mp2mr_iter(const struct rte_mempool *, void *);
+uint16_t mlx5_tx_burst(void *, struct rte_mbuf **, uint16_t);
+uint16_t mlx5_rx_burst_sp(void *, struct rte_mbuf **, uint16_t);
+uint16_t mlx5_rx_burst(void *, struct rte_mbuf **, uint16_t);
+uint16_t removed_tx_burst(void *, struct rte_mbuf **, uint16_t);
+uint16_t removed_rx_burst(void *, struct rte_mbuf **, uint16_t);
+
+#endif /* RTE_PMD_MLX5_RXTX_H_ */
diff --git a/src/dpdk22/drivers/net/mlx5/mlx5_utils.h b/src/dpdk22/drivers/net/mlx5/mlx5_utils.h
new file mode 100644
index 00000000..9b5e86a8
--- /dev/null
+++ b/src/dpdk22/drivers/net/mlx5/mlx5_utils.h
@@ -0,0 +1,184 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RTE_PMD_MLX5_UTILS_H_
+#define RTE_PMD_MLX5_UTILS_H_
+
+#include <stddef.h>
+#include <stdio.h>
+#include <limits.h>
+#include <assert.h>
+#include <errno.h>
+
+#include "mlx5_defs.h"
+
+/* Bit-field manipulation. */
+#define BITFIELD_DECLARE(bf, type, size) \
+ type bf[(((size_t)(size) / (sizeof(type) * CHAR_BIT)) + \
+ !!((size_t)(size) % (sizeof(type) * CHAR_BIT)))]
+#define BITFIELD_DEFINE(bf, type, size) \
+ BITFIELD_DECLARE((bf), type, (size)) = { 0 }
+#define BITFIELD_SET(bf, b) \
+ (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \
+ (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] |= \
+ ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT)))))
+#define BITFIELD_RESET(bf, b) \
+ (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \
+ (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] &= \
+ ~((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT)))))
+#define BITFIELD_ISSET(bf, b) \
+ (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \
+ !!(((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] & \
+ ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT))))))
+
+/* Save and restore errno around argument evaluation. */
+#define ERRNO_SAFE(x) ((errno = (int []){ errno, ((x), 0) }[0]))
+
+/*
+ * Helper macros to work around __VA_ARGS__ limitations in a C99 compliant
+ * manner.
+ */
+#define PMD_DRV_LOG_STRIP(a, b) a
+#define PMD_DRV_LOG_OPAREN (
+#define PMD_DRV_LOG_CPAREN )
+#define PMD_DRV_LOG_COMMA ,
+
+/* Return the file name part of a path. */
+static inline const char *
+pmd_drv_log_basename(const char *s)
+{
+ const char *n = s;
+
+ while (*n)
+ if (*(n++) == '/')
+ s = n;
+ return s;
+}
+
+/*
+ * When debugging is enabled (NDEBUG not defined), file, line and function
+ * information replace the driver name (MLX5_DRIVER_NAME) in log messages.
+ */
+#ifndef NDEBUG
+
+#define PMD_DRV_LOG___(level, ...) \
+ ERRNO_SAFE(RTE_LOG(level, PMD, __VA_ARGS__))
+#define PMD_DRV_LOG__(level, ...) \
+ PMD_DRV_LOG___(level, "%s:%u: %s(): " __VA_ARGS__)
+#define PMD_DRV_LOG_(level, s, ...) \
+ PMD_DRV_LOG__(level, \
+ s "\n" PMD_DRV_LOG_COMMA \
+ pmd_drv_log_basename(__FILE__) PMD_DRV_LOG_COMMA \
+ __LINE__ PMD_DRV_LOG_COMMA \
+ __func__, \
+ __VA_ARGS__)
+
+#else /* NDEBUG */
+
+#define PMD_DRV_LOG___(level, ...) \
+ ERRNO_SAFE(RTE_LOG(level, PMD, MLX5_DRIVER_NAME ": " __VA_ARGS__))
+#define PMD_DRV_LOG__(level, ...) \
+ PMD_DRV_LOG___(level, __VA_ARGS__)
+#define PMD_DRV_LOG_(level, s, ...) \
+ PMD_DRV_LOG__(level, s "\n", __VA_ARGS__)
+
+#endif /* NDEBUG */
+
+/* Generic printf()-like logging macro with automatic line feed. */
+#define PMD_DRV_LOG(level, ...) \
+ PMD_DRV_LOG_(level, \
+ __VA_ARGS__ PMD_DRV_LOG_STRIP PMD_DRV_LOG_OPAREN, \
+ PMD_DRV_LOG_CPAREN)
+
+/*
+ * Like assert(), DEBUG() becomes a no-op and claim_zero() does not perform
+ * any check when debugging is disabled.
+ */
+#ifndef NDEBUG
+
+#define DEBUG(...) PMD_DRV_LOG(DEBUG, __VA_ARGS__)
+#define claim_zero(...) assert((__VA_ARGS__) == 0)
+
+#else /* NDEBUG */
+
+#define DEBUG(...) (void)0
+#define claim_zero(...) (__VA_ARGS__)
+
+#endif /* NDEBUG */
+
+#define INFO(...) PMD_DRV_LOG(INFO, __VA_ARGS__)
+#define WARN(...) PMD_DRV_LOG(WARNING, __VA_ARGS__)
+#define ERROR(...) PMD_DRV_LOG(ERR, __VA_ARGS__)
+
+/* Convenience macros for accessing mbuf fields. */
+#define NEXT(m) ((m)->next)
+#define DATA_LEN(m) ((m)->data_len)
+#define PKT_LEN(m) ((m)->pkt_len)
+#define DATA_OFF(m) ((m)->data_off)
+#define SET_DATA_OFF(m, o) ((m)->data_off = (o))
+#define NB_SEGS(m) ((m)->nb_segs)
+#define PORT(m) ((m)->port)
+
+/* Transpose flags. Useful to convert IBV to DPDK flags. */
+#define TRANSPOSE(val, from, to) \
+ (((from) >= (to)) ? \
+ (((val) & (from)) / ((from) / (to))) : \
+ (((val) & (from)) * ((to) / (from))))
+
+/* Allocate a buffer on the stack and fill it with a printf format string. */
+#define MKSTR(name, ...) \
+ char name[snprintf(NULL, 0, __VA_ARGS__) + 1]; \
+ \
+ snprintf(name, sizeof(name), __VA_ARGS__)
+
+/**
+ * Return nearest power of two above input value.
+ *
+ * @param v
+ * Input value.
+ *
+ * @return
+ * Nearest power of two above input value.
+ */
+static inline unsigned int
+log2above(unsigned int v)
+{
+ unsigned int l;
+ unsigned int r;
+
+ for (l = 0, r = 0; (v >> 1); ++l, v >>= 1)
+ r |= (v & 1);
+ return (l + r);
+}
+
+#endif /* RTE_PMD_MLX5_UTILS_H_ */
diff --git a/src/dpdk22/drivers/net/null/rte_eth_null.c b/src/dpdk22/drivers/net/null/rte_eth_null.c
new file mode 100644
index 00000000..77fc9883
--- /dev/null
+++ b/src/dpdk22/drivers/net/null/rte_eth_null.c
@@ -0,0 +1,703 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (C) IGEL Co.,Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of IGEL Co.,Ltd. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_dev.h>
+#include <rte_kvargs.h>
+#include <rte_spinlock.h>
+
+#include "rte_eth_null.h"
+
+#define ETH_NULL_PACKET_SIZE_ARG "size"
+#define ETH_NULL_PACKET_COPY_ARG "copy"
+
+static unsigned default_packet_size = 64;
+static unsigned default_packet_copy;
+
+static const char *valid_arguments[] = {
+ ETH_NULL_PACKET_SIZE_ARG,
+ ETH_NULL_PACKET_COPY_ARG,
+ NULL
+};
+
+struct pmd_internals;
+
+struct null_queue {
+ struct pmd_internals *internals;
+
+ struct rte_mempool *mb_pool;
+ struct rte_mbuf *dummy_packet;
+
+ rte_atomic64_t rx_pkts;
+ rte_atomic64_t tx_pkts;
+ rte_atomic64_t err_pkts;
+};
+
+struct pmd_internals {
+ unsigned packet_size;
+ unsigned packet_copy;
+ unsigned numa_node;
+
+ unsigned nb_rx_queues;
+ unsigned nb_tx_queues;
+
+ struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
+ struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
+
+ /** Bit mask of RSS offloads, the bit offset also means flow type */
+ uint64_t flow_type_rss_offloads;
+
+ rte_spinlock_t rss_lock;
+
+ uint16_t reta_size;
+ struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
+ RTE_RETA_GROUP_SIZE];
+
+ uint8_t rss_key[40]; /**< 40-byte hash key. */
+};
+
+
+static struct ether_addr eth_addr = { .addr_bytes = {0} };
+static const char *drivername = "Null PMD";
+static struct rte_eth_link pmd_link = {
+ .link_speed = 10000,
+ .link_duplex = ETH_LINK_FULL_DUPLEX,
+ .link_status = 0
+};
+
+static uint16_t
+eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
+{
+ int i;
+ struct null_queue *h = q;
+ unsigned packet_size;
+
+ if ((q == NULL) || (bufs == NULL))
+ return 0;
+
+ packet_size = h->internals->packet_size;
+ for (i = 0; i < nb_bufs; i++) {
+ bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
+ if (!bufs[i])
+ break;
+ bufs[i]->data_len = (uint16_t)packet_size;
+ bufs[i]->pkt_len = packet_size;
+ bufs[i]->nb_segs = 1;
+ bufs[i]->next = NULL;
+ }
+
+ rte_atomic64_add(&(h->rx_pkts), i);
+
+ return i;
+}
+
+static uint16_t
+eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
+{
+ int i;
+ struct null_queue *h = q;
+ unsigned packet_size;
+
+ if ((q == NULL) || (bufs == NULL))
+ return 0;
+
+ packet_size = h->internals->packet_size;
+ for (i = 0; i < nb_bufs; i++) {
+ bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
+ if (!bufs[i])
+ break;
+ rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
+ packet_size);
+ bufs[i]->data_len = (uint16_t)packet_size;
+ bufs[i]->pkt_len = packet_size;
+ bufs[i]->nb_segs = 1;
+ bufs[i]->next = NULL;
+ }
+
+ rte_atomic64_add(&(h->rx_pkts), i);
+
+ return i;
+}
+
+static uint16_t
+eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
+{
+ int i;
+ struct null_queue *h = q;
+
+ if ((q == NULL) || (bufs == NULL))
+ return 0;
+
+ for (i = 0; i < nb_bufs; i++)
+ rte_pktmbuf_free(bufs[i]);
+
+ rte_atomic64_add(&(h->tx_pkts), i);
+
+ return i;
+}
+
+static uint16_t
+eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
+{
+ int i;
+ struct null_queue *h = q;
+ unsigned packet_size;
+
+ if ((q == NULL) || (bufs == NULL))
+ return 0;
+
+ packet_size = h->internals->packet_size;
+ for (i = 0; i < nb_bufs; i++) {
+ rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
+ packet_size);
+ rte_pktmbuf_free(bufs[i]);
+ }
+
+ rte_atomic64_add(&(h->tx_pkts), i);
+
+ return i;
+}
+
+static int
+eth_dev_configure(struct rte_eth_dev *dev) {
+ struct pmd_internals *internals;
+
+ internals = dev->data->dev_private;
+ internals->nb_rx_queues = dev->data->nb_rx_queues;
+ internals->nb_tx_queues = dev->data->nb_tx_queues;
+
+ return 0;
+}
+
+static int
+eth_dev_start(struct rte_eth_dev *dev)
+{
+ if (dev == NULL)
+ return -EINVAL;
+
+ dev->data->dev_link.link_status = 1;
+ return 0;
+}
+
+static void
+eth_dev_stop(struct rte_eth_dev *dev)
+{
+ if (dev == NULL)
+ return;
+
+ dev->data->dev_link.link_status = 0;
+}
+
+static int
+eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mb_pool)
+{
+ struct rte_mbuf *dummy_packet;
+ struct pmd_internals *internals;
+ unsigned packet_size;
+
+ if ((dev == NULL) || (mb_pool == NULL))
+ return -EINVAL;
+
+ internals = dev->data->dev_private;
+
+ if (rx_queue_id >= internals->nb_rx_queues)
+ return -ENODEV;
+
+ packet_size = internals->packet_size;
+
+ internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
+ dev->data->rx_queues[rx_queue_id] =
+ &internals->rx_null_queues[rx_queue_id];
+ dummy_packet = rte_zmalloc_socket(NULL,
+ packet_size, 0, internals->numa_node);
+ if (dummy_packet == NULL)
+ return -ENOMEM;
+
+ internals->rx_null_queues[rx_queue_id].internals = internals;
+ internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
+
+ return 0;
+}
+
+static int
+eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct rte_mbuf *dummy_packet;
+ struct pmd_internals *internals;
+ unsigned packet_size;
+
+ if (dev == NULL)
+ return -EINVAL;
+
+ internals = dev->data->dev_private;
+
+ if (tx_queue_id >= internals->nb_tx_queues)
+ return -ENODEV;
+
+ packet_size = internals->packet_size;
+
+ dev->data->tx_queues[tx_queue_id] =
+ &internals->tx_null_queues[tx_queue_id];
+ dummy_packet = rte_zmalloc_socket(NULL,
+ packet_size, 0, internals->numa_node);
+ if (dummy_packet == NULL)
+ return -ENOMEM;
+
+ internals->tx_null_queues[tx_queue_id].internals = internals;
+ internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
+
+ return 0;
+}
+
+
+static void
+eth_dev_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct pmd_internals *internals;
+
+ if ((dev == NULL) || (dev_info == NULL))
+ return;
+
+ internals = dev->data->dev_private;
+ dev_info->driver_name = drivername;
+ dev_info->max_mac_addrs = 1;
+ dev_info->max_rx_pktlen = (uint32_t)-1;
+ dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
+ dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
+ dev_info->min_rx_bufsize = 0;
+ dev_info->pci_dev = NULL;
+ dev_info->reta_size = internals->reta_size;
+ dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
+}
+
+static void
+eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
+{
+ unsigned i, num_stats;
+ unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
+ const struct pmd_internals *internal;
+
+ if ((dev == NULL) || (igb_stats == NULL))
+ return;
+
+ internal = dev->data->dev_private;
+ num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
+ RTE_MIN(internal->nb_rx_queues,
+ RTE_DIM(internal->rx_null_queues)));
+ for (i = 0; i < num_stats; i++) {
+ igb_stats->q_ipackets[i] =
+ internal->rx_null_queues[i].rx_pkts.cnt;
+ rx_total += igb_stats->q_ipackets[i];
+ }
+
+ num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
+ RTE_MIN(internal->nb_tx_queues,
+ RTE_DIM(internal->tx_null_queues)));
+ for (i = 0; i < num_stats; i++) {
+ igb_stats->q_opackets[i] =
+ internal->tx_null_queues[i].tx_pkts.cnt;
+ igb_stats->q_errors[i] =
+ internal->tx_null_queues[i].err_pkts.cnt;
+ tx_total += igb_stats->q_opackets[i];
+ tx_err_total += igb_stats->q_errors[i];
+ }
+
+ igb_stats->ipackets = rx_total;
+ igb_stats->opackets = tx_total;
+ igb_stats->oerrors = tx_err_total;
+}
+
+static void
+eth_stats_reset(struct rte_eth_dev *dev)
+{
+ unsigned i;
+ struct pmd_internals *internal;
+
+ if (dev == NULL)
+ return;
+
+ internal = dev->data->dev_private;
+ for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
+ internal->rx_null_queues[i].rx_pkts.cnt = 0;
+ for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
+ internal->tx_null_queues[i].tx_pkts.cnt = 0;
+ internal->tx_null_queues[i].err_pkts.cnt = 0;
+ }
+}
+
+static void
+eth_queue_release(void *q)
+{
+ struct null_queue *nq;
+
+ if (q == NULL)
+ return;
+
+ nq = q;
+ rte_free(nq->dummy_packet);
+}
+
+static int
+eth_link_update(struct rte_eth_dev *dev __rte_unused,
+ int wait_to_complete __rte_unused) { return 0; }
+
+static int
+eth_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
+{
+ int i, j;
+ struct pmd_internals *internal = dev->data->dev_private;
+
+ if (reta_size != internal->reta_size)
+ return -EINVAL;
+
+ rte_spinlock_lock(&internal->rss_lock);
+
+ /* Copy RETA table */
+ for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
+ internal->reta_conf[i].mask = reta_conf[i].mask;
+ for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+ if ((reta_conf[i].mask >> j) & 0x01)
+ internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
+ }
+
+ rte_spinlock_unlock(&internal->rss_lock);
+
+ return 0;
+}
+
+static int
+eth_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
+{
+ int i, j;
+ struct pmd_internals *internal = dev->data->dev_private;
+
+ if (reta_size != internal->reta_size)
+ return -EINVAL;
+
+ rte_spinlock_lock(&internal->rss_lock);
+
+ /* Copy RETA table */
+ for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
+ for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+ if ((reta_conf[i].mask >> j) & 0x01)
+ reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
+ }
+
+ rte_spinlock_unlock(&internal->rss_lock);
+
+ return 0;
+}
+
+static int
+eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
+{
+ struct pmd_internals *internal = dev->data->dev_private;
+
+ rte_spinlock_lock(&internal->rss_lock);
+
+ if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
+ dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
+ rss_conf->rss_hf & internal->flow_type_rss_offloads;
+
+ if (rss_conf->rss_key)
+ rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
+
+ rte_spinlock_unlock(&internal->rss_lock);
+
+ return 0;
+}
+
+static int
+eth_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct pmd_internals *internal = dev->data->dev_private;
+
+ rte_spinlock_lock(&internal->rss_lock);
+
+ rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
+ if (rss_conf->rss_key)
+ rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
+
+ rte_spinlock_unlock(&internal->rss_lock);
+
+ return 0;
+}
+
+static const struct eth_dev_ops ops = {
+ .dev_start = eth_dev_start,
+ .dev_stop = eth_dev_stop,
+ .dev_configure = eth_dev_configure,
+ .dev_infos_get = eth_dev_info,
+ .rx_queue_setup = eth_rx_queue_setup,
+ .tx_queue_setup = eth_tx_queue_setup,
+ .rx_queue_release = eth_queue_release,
+ .tx_queue_release = eth_queue_release,
+ .link_update = eth_link_update,
+ .stats_get = eth_stats_get,
+ .stats_reset = eth_stats_reset,
+ .reta_update = eth_rss_reta_update,
+ .reta_query = eth_rss_reta_query,
+ .rss_hash_update = eth_rss_hash_update,
+ .rss_hash_conf_get = eth_rss_hash_conf_get
+};
+
+int
+eth_dev_null_create(const char *name,
+ const unsigned numa_node,
+ unsigned packet_size,
+ unsigned packet_copy)
+{
+ const unsigned nb_rx_queues = 1;
+ const unsigned nb_tx_queues = 1;
+ struct rte_eth_dev_data *data = NULL;
+ struct pmd_internals *internals = NULL;
+ struct rte_eth_dev *eth_dev = NULL;
+
+ static const uint8_t default_rss_key[40] = {
+ 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
+ 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+ 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
+ 0xBE, 0xAC, 0x01, 0xFA
+ };
+
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
+ numa_node);
+
+ /* now do all data allocation - for eth_dev structure, dummy pci driver
+ * and internal (private) data
+ */
+ data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
+ if (data == NULL)
+ goto error;
+
+ internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
+ if (internals == NULL)
+ goto error;
+
+ /* reserve an ethdev entry */
+ eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
+ if (eth_dev == NULL)
+ goto error;
+
+ /* now put it all together
+ * - store queue data in internals,
+ * - store numa_node info in ethdev data
+ * - point eth_dev_data to internals
+ * - and point eth_dev structure to new eth_dev_data structure
+ */
+ /* NOTE: we'll replace the data element, of originally allocated eth_dev
+ * so the nulls are local per-process */
+
+ internals->nb_rx_queues = nb_rx_queues;
+ internals->nb_tx_queues = nb_tx_queues;
+ internals->packet_size = packet_size;
+ internals->packet_copy = packet_copy;
+ internals->numa_node = numa_node;
+
+ internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
+ internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
+
+ rte_memcpy(internals->rss_key, default_rss_key, 40);
+
+ data->dev_private = internals;
+ data->port_id = eth_dev->data->port_id;
+ data->nb_rx_queues = (uint16_t)nb_rx_queues;
+ data->nb_tx_queues = (uint16_t)nb_tx_queues;
+ data->dev_link = pmd_link;
+ data->mac_addrs = &eth_addr;
+ strncpy(data->name, eth_dev->data->name, strlen(eth_dev->data->name));
+
+ eth_dev->data = data;
+ eth_dev->dev_ops = &ops;
+
+ TAILQ_INIT(&eth_dev->link_intr_cbs);
+
+ eth_dev->driver = NULL;
+ eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
+ eth_dev->data->kdrv = RTE_KDRV_NONE;
+ eth_dev->data->drv_name = drivername;
+ eth_dev->data->numa_node = numa_node;
+
+ /* finally assign rx and tx ops */
+ if (packet_copy) {
+ eth_dev->rx_pkt_burst = eth_null_copy_rx;
+ eth_dev->tx_pkt_burst = eth_null_copy_tx;
+ } else {
+ eth_dev->rx_pkt_burst = eth_null_rx;
+ eth_dev->tx_pkt_burst = eth_null_tx;
+ }
+
+ return 0;
+
+error:
+ rte_free(data);
+ rte_free(internals);
+
+ return -1;
+}
+
+static inline int
+get_packet_size_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ const char *a = value;
+ unsigned *packet_size = extra_args;
+
+ if ((value == NULL) || (extra_args == NULL))
+ return -EINVAL;
+
+ *packet_size = (unsigned)strtoul(a, NULL, 0);
+ if (*packet_size == UINT_MAX)
+ return -1;
+
+ return 0;
+}
+
+static inline int
+get_packet_copy_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ const char *a = value;
+ unsigned *packet_copy = extra_args;
+
+ if ((value == NULL) || (extra_args == NULL))
+ return -EINVAL;
+
+ *packet_copy = (unsigned)strtoul(a, NULL, 0);
+ if (*packet_copy == UINT_MAX)
+ return -1;
+
+ return 0;
+}
+
+static int
+rte_pmd_null_devinit(const char *name, const char *params)
+{
+ unsigned numa_node;
+ unsigned packet_size = default_packet_size;
+ unsigned packet_copy = default_packet_copy;
+ struct rte_kvargs *kvlist = NULL;
+ int ret;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
+
+ numa_node = rte_socket_id();
+
+ if (params != NULL) {
+ kvlist = rte_kvargs_parse(params, valid_arguments);
+ if (kvlist == NULL)
+ return -1;
+
+ if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
+
+ ret = rte_kvargs_process(kvlist,
+ ETH_NULL_PACKET_SIZE_ARG,
+ &get_packet_size_arg, &packet_size);
+ if (ret < 0)
+ goto free_kvlist;
+ }
+
+ if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
+
+ ret = rte_kvargs_process(kvlist,
+ ETH_NULL_PACKET_COPY_ARG,
+ &get_packet_copy_arg, &packet_copy);
+ if (ret < 0)
+ goto free_kvlist;
+ }
+ }
+
+ RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
+ "packet copy is %s\n", packet_size,
+ packet_copy ? "enabled" : "disabled");
+
+ ret = eth_dev_null_create(name, numa_node, packet_size, packet_copy);
+
+free_kvlist:
+ if (kvlist)
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+static int
+rte_pmd_null_devuninit(const char *name)
+{
+ struct rte_eth_dev *eth_dev = NULL;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
+ rte_socket_id());
+
+ /* find the ethdev entry */
+ eth_dev = rte_eth_dev_allocated(name);
+ if (eth_dev == NULL)
+ return -1;
+
+ rte_free(eth_dev->data->dev_private);
+ rte_free(eth_dev->data);
+
+ rte_eth_dev_release_port(eth_dev);
+
+ return 0;
+}
+
+static struct rte_driver pmd_null_drv = {
+ .name = "eth_null",
+ .type = PMD_VDEV,
+ .init = rte_pmd_null_devinit,
+ .uninit = rte_pmd_null_devuninit,
+};
+
+PMD_REGISTER_DRIVER(pmd_null_drv);
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_pci_dev_feature_defs.h b/src/dpdk22/drivers/net/null/rte_eth_null.h
index 6316b6dd..abada8c2 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/rte_pci_dev_feature_defs.h
+++ b/src/dpdk22/drivers/net/null/rte_eth_null.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,15 +31,10 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef _RTE_PCI_DEV_DEFS_H_
-#define _RTE_PCI_DEV_DEFS_H_
+#ifndef RTE_ETH_NULL_H_
+#define RTE_ETH_NULL_H_
-/* interrupt mode */
-enum rte_intr_mode {
- RTE_INTR_MODE_NONE = 0,
- RTE_INTR_MODE_LEGACY,
- RTE_INTR_MODE_MSI,
- RTE_INTR_MODE_MSIX
-};
+int eth_dev_null_create(const char *name, const unsigned numa_node,
+ unsigned packet_size, unsigned packet_copy);
-#endif /* _RTE_PCI_DEV_DEFS_H_ */
+#endif /* RTE_ETH_NULL_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_ring/rte_eth_ring.c b/src/dpdk22/drivers/net/ring/rte_eth_ring.c
index 4f1b6ed8..d92b0887 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ring/rte_eth_ring.c
+++ b/src/dpdk22/drivers/net/ring/rte_eth_ring.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -36,9 +36,11 @@
#include <rte_ethdev.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
+#include <rte_memzone.h>
#include <rte_string_fns.h>
#include <rte_dev.h>
#include <rte_kvargs.h>
+#include <rte_errno.h>
#define ETH_RING_NUMA_NODE_ACTION_ARG "nodeaction"
#define ETH_RING_ACTION_CREATE "CREATE"
@@ -62,10 +64,11 @@ struct pmd_internals {
struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS];
struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS];
+
+ struct ether_addr address;
};
-static struct ether_addr eth_addr = { .addr_bytes = {0} };
static const char *drivername = "Rings PMD";
static struct rte_eth_link pmd_link = {
.link_speed = 10000,
@@ -121,6 +124,20 @@ eth_dev_stop(struct rte_eth_dev *dev)
}
static int
+eth_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ dev->data->dev_link.link_status = 0;
+ return 0;
+}
+
+static int
+eth_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ dev->data->dev_link.link_status = 1;
+ return 0;
+}
+
+static int
eth_rx_queue_setup(struct rte_eth_dev *dev,uint16_t rx_queue_id,
uint16_t nb_rx_desc __rte_unused,
unsigned int socket_id __rte_unused,
@@ -165,7 +182,6 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
const struct pmd_internals *internal = dev->data->dev_private;
- memset(igb_stats, 0, sizeof(*igb_stats));
for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
i < internal->nb_rx_queues; i++) {
igb_stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt;
@@ -199,23 +215,41 @@ eth_stats_reset(struct rte_eth_dev *dev)
}
static void
+eth_mac_addr_remove(struct rte_eth_dev *dev __rte_unused,
+ uint32_t index __rte_unused)
+{
+}
+
+static void
+eth_mac_addr_add(struct rte_eth_dev *dev __rte_unused,
+ struct ether_addr *mac_addr __rte_unused,
+ uint32_t index __rte_unused,
+ uint32_t vmdq __rte_unused)
+{
+}
+
+static void
eth_queue_release(void *q __rte_unused) { ; }
static int
eth_link_update(struct rte_eth_dev *dev __rte_unused,
int wait_to_complete __rte_unused) { return 0; }
-static struct eth_dev_ops ops = {
- .dev_start = eth_dev_start,
- .dev_stop = eth_dev_stop,
- .dev_configure = eth_dev_configure,
- .dev_infos_get = eth_dev_info,
- .rx_queue_setup = eth_rx_queue_setup,
- .tx_queue_setup = eth_tx_queue_setup,
- .rx_queue_release = eth_queue_release,
- .tx_queue_release = eth_queue_release,
- .link_update = eth_link_update,
- .stats_get = eth_stats_get,
- .stats_reset = eth_stats_reset,
+static const struct eth_dev_ops ops = {
+ .dev_start = eth_dev_start,
+ .dev_stop = eth_dev_stop,
+ .dev_set_link_up = eth_dev_set_link_up,
+ .dev_set_link_down = eth_dev_set_link_down,
+ .dev_configure = eth_dev_configure,
+ .dev_infos_get = eth_dev_info,
+ .rx_queue_setup = eth_rx_queue_setup,
+ .tx_queue_setup = eth_tx_queue_setup,
+ .rx_queue_release = eth_queue_release,
+ .tx_queue_release = eth_queue_release,
+ .link_update = eth_link_update,
+ .stats_get = eth_stats_get,
+ .stats_reset = eth_stats_reset,
+ .mac_addr_remove = eth_mac_addr_remove,
+ .mac_addr_add = eth_mac_addr_add,
};
int
@@ -226,16 +260,24 @@ rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
const unsigned numa_node)
{
struct rte_eth_dev_data *data = NULL;
- struct rte_pci_device *pci_dev = NULL;
struct pmd_internals *internals = NULL;
struct rte_eth_dev *eth_dev = NULL;
+
unsigned i;
/* do some parameter checking */
- if (rx_queues == NULL && nb_rx_queues > 0)
+ if (rx_queues == NULL && nb_rx_queues > 0) {
+ rte_errno = EINVAL;
goto error;
- if (tx_queues == NULL && nb_tx_queues > 0)
+ }
+ if (tx_queues == NULL && nb_tx_queues > 0) {
+ rte_errno = EINVAL;
goto error;
+ }
+ if (nb_rx_queues > RTE_PMD_RING_MAX_RX_RINGS) {
+ rte_errno = EINVAL;
+ goto error;
+ }
RTE_LOG(INFO, PMD, "Creating rings-backed ethdev on numa socket %u\n",
numa_node);
@@ -244,26 +286,42 @@ rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
* and internal (private) data
*/
data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
- if (data == NULL)
+ if (data == NULL) {
+ rte_errno = ENOMEM;
+ goto error;
+ }
+
+ data->rx_queues = rte_zmalloc_socket(name, sizeof(void *) * nb_rx_queues,
+ 0, numa_node);
+ if (data->rx_queues == NULL) {
+ rte_errno = ENOMEM;
goto error;
+ }
- pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, numa_node);
- if (pci_dev == NULL)
+ data->tx_queues = rte_zmalloc_socket(name, sizeof(void *) * nb_tx_queues,
+ 0, numa_node);
+ if (data->tx_queues == NULL) {
+ rte_errno = ENOMEM;
goto error;
+ }
internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
- if (internals == NULL)
+ if (internals == NULL) {
+ rte_errno = ENOMEM;
goto error;
+ }
/* reserve an ethdev entry */
- eth_dev = rte_eth_dev_allocate(name);
- if (eth_dev == NULL)
+ eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
+ if (eth_dev == NULL) {
+ rte_errno = ENOSPC;
goto error;
+ }
/* now put it all together
* - store queue data in internals,
- * - store numa_node info in pci_driver
- * - point eth_dev_data to internals and pci_driver
+ * - store numa_node info in eth_dev_data
+ * - point eth_dev_data to internals
* - and point eth_dev structure to new eth_dev_data structure
*/
/* NOTE: we'll replace the data element, of originally allocated eth_dev
@@ -273,40 +331,55 @@ rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
internals->nb_tx_queues = nb_tx_queues;
for (i = 0; i < nb_rx_queues; i++) {
internals->rx_ring_queues[i].rng = rx_queues[i];
+ data->rx_queues[i] = &internals->rx_ring_queues[i];
}
for (i = 0; i < nb_tx_queues; i++) {
internals->tx_ring_queues[i].rng = tx_queues[i];
+ data->tx_queues[i] = &internals->tx_ring_queues[i];
}
- pci_dev->numa_node = numa_node;
-
data->dev_private = internals;
data->port_id = eth_dev->data->port_id;
+ memmove(data->name, eth_dev->data->name, sizeof(data->name));
data->nb_rx_queues = (uint16_t)nb_rx_queues;
data->nb_tx_queues = (uint16_t)nb_tx_queues;
data->dev_link = pmd_link;
- data->mac_addrs = &eth_addr;
+ data->mac_addrs = &internals->address;
+
+ eth_dev->data = data;
+ eth_dev->driver = NULL;
+ eth_dev->dev_ops = &ops;
+ eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
+ eth_dev->data->kdrv = RTE_KDRV_NONE;
+ eth_dev->data->drv_name = drivername;
+ eth_dev->data->numa_node = numa_node;
- eth_dev ->data = data;
- eth_dev ->dev_ops = &ops;
- eth_dev ->pci_dev = pci_dev;
+ TAILQ_INIT(&(eth_dev->link_intr_cbs));
/* finally assign rx and tx ops */
eth_dev->rx_pkt_burst = eth_ring_rx;
eth_dev->tx_pkt_burst = eth_ring_tx;
- return 0;
+ return data->port_id;
error:
- if (data)
- rte_free(data);
- if (pci_dev)
- rte_free(pci_dev);
- if (internals)
- rte_free(internals);
+ if (data) {
+ rte_free(data->rx_queues);
+ rte_free(data->tx_queues);
+ }
+ rte_free(data);
+ rte_free(internals);
+
return -1;
}
+int
+rte_eth_from_ring(struct rte_ring *r)
+{
+ return rte_eth_from_rings(r->name, &r, 1, &r, 1,
+ r->memzone ? r->memzone->socket_id : SOCKET_ID_ANY);
+}
+
enum dev_action{
DEV_CREATE,
DEV_ATTACH
@@ -335,67 +408,12 @@ eth_dev_ring_create(const char *name, const unsigned numa_node,
return -1;
}
- if (rte_eth_from_rings(name, rxtx, num_rings, rxtx, num_rings, numa_node))
- return -1;
-
- return 0;
-}
-
-
-static int
-eth_dev_ring_pair_create(const char *name, const unsigned numa_node,
- enum dev_action action)
-{
- /* rx and tx are so-called from point of view of first port.
- * They are inverted from the point of view of second port
- */
- struct rte_ring *rx[RTE_PMD_RING_MAX_RX_RINGS];
- struct rte_ring *tx[RTE_PMD_RING_MAX_TX_RINGS];
- unsigned i;
- char rx_rng_name[RTE_RING_NAMESIZE];
- char tx_rng_name[RTE_RING_NAMESIZE];
- unsigned num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS,
- RTE_PMD_RING_MAX_TX_RINGS);
-
- for (i = 0; i < num_rings; i++) {
- snprintf(rx_rng_name, sizeof(rx_rng_name), "ETH_RX%u_%s", i, name);
- rx[i] = (action == DEV_CREATE) ?
- rte_ring_create(rx_rng_name, 1024, numa_node,
- RING_F_SP_ENQ|RING_F_SC_DEQ) :
- rte_ring_lookup(rx_rng_name);
- if (rx[i] == NULL)
- return -1;
- snprintf(tx_rng_name, sizeof(tx_rng_name), "ETH_TX%u_%s", i, name);
- tx[i] = (action == DEV_CREATE) ?
- rte_ring_create(tx_rng_name, 1024, numa_node,
- RING_F_SP_ENQ|RING_F_SC_DEQ):
- rte_ring_lookup(tx_rng_name);
- if (tx[i] == NULL)
- return -1;
- }
-
- if (rte_eth_from_rings(rx_rng_name, rx, num_rings, tx, num_rings,
- numa_node) || rte_eth_from_rings(tx_rng_name, tx, num_rings, rx,
- num_rings, numa_node))
+ if (rte_eth_from_rings(name, rxtx, num_rings, rxtx, num_rings, numa_node) < 0)
return -1;
return 0;
}
-int
-rte_eth_ring_pair_create(const char *name, const unsigned numa_node)
-{
- RTE_LOG(WARNING, PMD, "rte_eth_ring_pair_create is deprecated\n");
- return eth_dev_ring_pair_create(name, numa_node, DEV_CREATE);
-}
-
-int
-rte_eth_ring_pair_attach(const char *name, const unsigned numa_node)
-{
- RTE_LOG(WARNING, PMD, "rte_eth_ring_pair_attach is deprecated\n");
- return eth_dev_ring_pair_create(name, numa_node, DEV_ATTACH);
-}
-
struct node_action_pair {
char name[PATH_MAX];
unsigned node;
@@ -473,31 +491,48 @@ out:
return ret;
}
-int
+static int
rte_pmd_ring_devinit(const char *name, const char *params)
{
- struct rte_kvargs *kvlist;
+ struct rte_kvargs *kvlist = NULL;
int ret = 0;
struct node_action_list *info = NULL;
RTE_LOG(INFO, PMD, "Initializing pmd_ring for %s\n", name);
- if (params == NULL || params[0] == '\0')
- eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE);
+ if (params == NULL || params[0] == '\0') {
+ ret = eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE);
+ if (ret == -1) {
+ RTE_LOG(INFO, PMD,
+ "Attach to pmd_ring for %s\n", name);
+ ret = eth_dev_ring_create(name, rte_socket_id(),
+ DEV_ATTACH);
+ }
+ }
else {
kvlist = rte_kvargs_parse(params, valid_arguments);
if (!kvlist) {
RTE_LOG(INFO, PMD, "Ignoring unsupported parameters when creating"
" rings-backed ethernet device\n");
- eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE);
- return 0;
+ ret = eth_dev_ring_create(name, rte_socket_id(),
+ DEV_CREATE);
+ if (ret == -1) {
+ RTE_LOG(INFO, PMD,
+ "Attach to pmd_ring for %s\n",
+ name);
+ ret = eth_dev_ring_create(name, rte_socket_id(),
+ DEV_ATTACH);
+ }
+ return ret;
} else {
ret = rte_kvargs_count(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG);
- info = rte_zmalloc("struct node_action_list", sizeof(struct node_action_list) +
- (sizeof(struct node_action_pair) * ret), 0);
+ info = rte_zmalloc("struct node_action_list",
+ sizeof(struct node_action_list) +
+ (sizeof(struct node_action_pair) * ret),
+ 0);
if (!info)
- goto out;
+ goto out_free;
info->total = ret;
info->list = (struct node_action_pair*)(info + 1);
@@ -509,22 +544,61 @@ rte_pmd_ring_devinit(const char *name, const char *params)
goto out_free;
for (info->count = 0; info->count < info->total; info->count++) {
- eth_dev_ring_create(name, info->list[info->count].node,
- info->list[info->count].action);
+ ret = eth_dev_ring_create(name,
+ info->list[info->count].node,
+ info->list[info->count].action);
+ if ((ret == -1) &&
+ (info->list[info->count].action == DEV_CREATE)) {
+ RTE_LOG(INFO, PMD,
+ "Attach to pmd_ring for %s\n",
+ name);
+ ret = eth_dev_ring_create(name,
+ info->list[info->count].node,
+ DEV_ATTACH);
+ }
}
}
}
out_free:
+ rte_kvargs_free(kvlist);
rte_free(info);
-out:
return ret;
}
+static int
+rte_pmd_ring_devuninit(const char *name)
+{
+ struct rte_eth_dev *eth_dev = NULL;
+
+ RTE_LOG(INFO, PMD, "Un-Initializing pmd_ring for %s\n", name);
+
+ if (name == NULL)
+ return -EINVAL;
+
+ /* find an ethdev entry */
+ eth_dev = rte_eth_dev_allocated(name);
+ if (eth_dev == NULL)
+ return -ENODEV;
+
+ eth_dev_stop(eth_dev);
+
+ if (eth_dev->data) {
+ rte_free(eth_dev->data->rx_queues);
+ rte_free(eth_dev->data->tx_queues);
+ rte_free(eth_dev->data->dev_private);
+ }
+ rte_free(eth_dev->data);
+
+ rte_eth_dev_release_port(eth_dev);
+ return 0;
+}
+
static struct rte_driver pmd_ring_drv = {
.name = "eth_ring",
.type = PMD_VDEV,
.init = rte_pmd_ring_devinit,
+ .uninit = rte_pmd_ring_devuninit,
};
PMD_REGISTER_DRIVER(pmd_ring_drv);
diff --git a/src/dpdk_lib18/librte_power/guest_channel.h b/src/dpdk22/drivers/net/ring/rte_eth_ring.h
index 9e18af52..4ff83eca 100755..100644
--- a/src/dpdk_lib18/librte_power/guest_channel.h
+++ b/src/dpdk22/drivers/net/ring/rte_eth_ring.h
@@ -30,57 +30,54 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef _GUEST_CHANNEL_H
-#define _GUEST_CHANNEL_H
+
+#ifndef _RTE_ETH_RING_H_
+#define _RTE_ETH_RING_H_
#ifdef __cplusplus
extern "C" {
#endif
-#include <channel_commands.h>
+#include <rte_ring.h>
/**
- * Connect to the Virtio-Serial VM end-point located in path. It is
- * thread safe for unique lcore_ids. This function must be only called once from
- * each lcore.
- *
- * @param path
- * The path to the serial device on the filesystem
- * @param lcore_id
- * lcore_id.
+ * Create a new ethdev port from a set of rings
*
+ * @param name
+ * name to be given to the new ethdev port
+ * @param rx_queues
+ * pointer to array of rte_rings to be used as RX queues
+ * @param nb_rx_queues
+ * number of elements in the rx_queues array
+ * @param tx_queues
+ * pointer to array of rte_rings to be used as TX queues
+ * @param nb_tx_queues
+ * number of elements in the tx_queues array
+ * @param numa_node
+ * the numa node on which the memory for this port is to be allocated
* @return
- * - 0 on success.
- * - Negative on error.
- */
-int guest_channel_host_connect(const char *path, unsigned lcore_id);
-
-/**
- * Disconnect from an already connected Virtio-Serial Endpoint.
- *
- *
- * @param lcore_id
- * lcore_id.
- *
+ * the port number of the newly created the ethdev or -1 on error.
*/
-void guest_channel_host_disconnect(unsigned lcore_id);
+int rte_eth_from_rings(const char *name,
+ struct rte_ring * const rx_queues[],
+ const unsigned nb_rx_queues,
+ struct rte_ring *const tx_queues[],
+ const unsigned nb_tx_queues,
+ const unsigned numa_node);
/**
- * Send a message contained in pkt over the Virtio-Serial to the host endpoint.
+ * Create a new ethdev port from a ring
*
- * @param pkt
- * Pointer to a populated struct guest_agent_pkt
- *
- * @param lcore_id
- * lcore_id.
+ * This function is a shortcut call for rte_eth_from_rings for the
+ * case where one wants to take a single rte_ring and use it as though
+ * it were an ethdev
*
+ * @param ring
+ * the ring to be used as an ethdev
* @return
- * - 0 on success.
- * - Negative on channel not connected.
- * - errno on write to channel error.
+ * the port number of the newly created ethdev, or -1 on error
*/
-int guest_channel_send_msg(struct channel_packet *pkt, unsigned lcore_id);
-
+int rte_eth_from_ring(struct rte_ring *r);
#ifdef __cplusplus
}
diff --git a/src/dpdk22/drivers/net/szedata2/rte_eth_szedata2.h b/src/dpdk22/drivers/net/szedata2/rte_eth_szedata2.h
new file mode 100644
index 00000000..4d09a98d
--- /dev/null
+++ b/src/dpdk22/drivers/net/szedata2/rte_eth_szedata2.h
@@ -0,0 +1,102 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015 CESNET
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of CESNET nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RTE_PMD_SZEDATA2_H_
+#define RTE_PMD_SZEDATA2_H_
+
+/* szedata2_packet header length == 4 bytes == 2B segment size + 2B hw size */
+#define RTE_SZE2_PACKET_HEADER_SIZE 4
+
+#define RTE_SZE2_MMIO_MAX 10
+
+/*!
+ * Round 'what' to the nearest larger (or equal) multiple of '8'
+ * (szedata2 packet is aligned to 8 bytes)
+ */
+#define RTE_SZE2_ALIGN8(what) (((what) + ((8) - 1)) & (~((8) - 1)))
+
+/*! main handle structure */
+struct szedata {
+ int fd;
+ struct sze2_instance_info *info;
+ uint32_t *write_size;
+ void *space[RTE_SZE2_MMIO_MAX];
+ struct szedata_lock lock[2][2];
+
+ __u32 *rx_asize, *tx_asize;
+
+ /* szedata_read_next variables - to keep context (ct) */
+
+ /*
+ * rx
+ */
+ /** initial sze lock ptr */
+ const struct szedata_lock *ct_rx_lck_orig;
+ /** current sze lock ptr (initial or next) */
+ const struct szedata_lock *ct_rx_lck;
+ /** remaining bytes (not read) within current lock */
+ unsigned int ct_rx_rem_bytes;
+ /** current pointer to locked memory */
+ unsigned char *ct_rx_cur_ptr;
+ /**
+ * allocated buffer to store RX packet if it was split
+ * into 2 buffers
+ */
+ unsigned char *ct_rx_buffer;
+ /** registered function to provide filtering based on hwdata */
+ int (*ct_rx_filter)(u_int16_t hwdata_len, u_char *hwdata);
+
+ /*
+ * tx
+ */
+ /**
+ * buffer for tx - packet is prepared here
+ * (in future for burst write)
+ */
+ unsigned char *ct_tx_buffer;
+ /** initial sze TX lock ptrs - number according to TX interfaces */
+ const struct szedata_lock **ct_tx_lck_orig;
+ /** current sze TX lock ptrs - number according to TX interfaces */
+ const struct szedata_lock **ct_tx_lck;
+ /** already written bytes in both locks */
+ unsigned int *ct_tx_written_bytes;
+ /** remaining bytes (not written) within current lock */
+ unsigned int *ct_tx_rem_bytes;
+ /** current pointers to locked memory */
+ unsigned char **ct_tx_cur_ptr;
+ /** NUMA node closest to PCIe device, or -1 */
+ int numa_node;
+};
+
+
+#endif
diff --git a/src/dpdk_lib18/librte_pmd_virtio/virtio_ethdev.c b/src/dpdk22/drivers/net/virtio/virtio_ethdev.c
index b3b5bb6a..d9283390 100755..100644
--- a/src/dpdk_lib18/librte_pmd_virtio/virtio_ethdev.c
+++ b/src/dpdk22/drivers/net/virtio/virtio_ethdev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -38,6 +38,7 @@
#include <unistd.h>
#ifdef RTE_EXEC_ENV_LINUXAPP
#include <dirent.h>
+#include <fcntl.h>
#endif
#include <rte_ethdev.h>
@@ -50,6 +51,7 @@
#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_common.h>
+#include <rte_errno.h>
#include <rte_memory.h>
#include <rte_eal.h>
@@ -59,10 +61,11 @@
#include "virtio_pci.h"
#include "virtio_logs.h"
#include "virtqueue.h"
+#include "virtio_rxtx.h"
-static int eth_virtio_dev_init(struct eth_driver *eth_drv,
- struct rte_eth_dev *eth_dev);
+static int eth_virtio_dev_init(struct rte_eth_dev *eth_dev);
+static int eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev);
static int virtio_dev_configure(struct rte_eth_dev *dev);
static int virtio_dev_start(struct rte_eth_dev *dev);
static void virtio_dev_stop(struct rte_eth_dev *dev);
@@ -78,12 +81,20 @@ static int virtio_dev_link_update(struct rte_eth_dev *dev,
static void virtio_set_hwaddr(struct virtio_hw *hw);
static void virtio_get_hwaddr(struct virtio_hw *hw);
-static void virtio_dev_rx_queue_release(__rte_unused void *rxq);
-static void virtio_dev_tx_queue_release(__rte_unused void *txq);
-
-static void virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
+static void virtio_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static int virtio_dev_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstats *xstats, unsigned n);
static void virtio_dev_stats_reset(struct rte_eth_dev *dev);
static void virtio_dev_free_mbufs(struct rte_eth_dev *dev);
+static int virtio_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vlan_id, int on);
+static void virtio_mac_addr_add(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index, uint32_t vmdq __rte_unused);
+static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
+static void virtio_mac_addr_set(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr);
static int virtio_dev_queue_stats_mapping_set(
__rte_unused struct rte_eth_dev *eth_dev,
@@ -94,7 +105,7 @@ static int virtio_dev_queue_stats_mapping_set(
/*
* The set of PCI devices this driver supports
*/
-static struct rte_pci_id pci_id_virtio_map[] = {
+static const struct rte_pci_id pci_id_virtio_map[] = {
#define RTE_PCI_DEV_ID_DECL_VIRTIO(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
#include "rte_pci_dev_ids.h"
@@ -102,23 +113,49 @@ static struct rte_pci_id pci_id_virtio_map[] = {
{ .vendor_id = 0, /* sentinel */ },
};
+struct rte_virtio_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned offset;
+};
+
+/* [rt]x_qX_ is prepended to the name string here */
+static const struct rte_virtio_xstats_name_off rte_virtio_q_stat_strings[] = {
+ {"good_packets", offsetof(struct virtqueue, packets)},
+ {"good_bytes", offsetof(struct virtqueue, bytes)},
+ {"errors", offsetof(struct virtqueue, errors)},
+ {"multicast_packets", offsetof(struct virtqueue, multicast)},
+ {"broadcast_packets", offsetof(struct virtqueue, broadcast)},
+ {"undersize_packets", offsetof(struct virtqueue, size_bins[0])},
+ {"size_64_packets", offsetof(struct virtqueue, size_bins[1])},
+ {"size_65_127_packets", offsetof(struct virtqueue, size_bins[2])},
+ {"size_128_255_packets", offsetof(struct virtqueue, size_bins[3])},
+ {"size_256_511_packets", offsetof(struct virtqueue, size_bins[4])},
+ {"size_512_1023_packets", offsetof(struct virtqueue, size_bins[5])},
+ {"size_1024_1517_packets", offsetof(struct virtqueue, size_bins[6])},
+ {"size_1518_max_packets", offsetof(struct virtqueue, size_bins[7])},
+};
+
+#define VIRTIO_NB_Q_XSTATS (sizeof(rte_virtio_q_stat_strings) / \
+ sizeof(rte_virtio_q_stat_strings[0]))
+
static int
virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl,
int *dlen, int pkt_num)
{
- uint32_t head = vq->vq_desc_head_idx, i;
+ uint32_t head, i;
int k, sum = 0;
virtio_net_ctrl_ack status = ~0;
struct virtio_pmd_ctrl result;
ctrl->status = status;
- if (!vq->hw->cvq) {
+ if (!(vq && vq->hw->cvq)) {
PMD_INIT_LOG(ERR,
"%s(): Control queue is not supported.",
__func__);
return -1;
}
+ head = vq->vq_desc_head_idx;
PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
"vq->hw->cvq = %p vq = %p",
@@ -168,15 +205,16 @@ virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl,
virtqueue_notify(vq);
- while (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
+ rte_rmb();
+ while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
+ rte_rmb();
usleep(100);
+ }
while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
uint32_t idx, desc_idx, used_idx;
struct vring_used_elem *uep;
- rmb();
-
used_idx = (uint32_t)(vq->vq_used_cons_idx
& (vq->vq_nentries - 1));
uep = &vq->vq_ring.used->ring[used_idx];
@@ -207,8 +245,7 @@ virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl,
static int
virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues)
{
- struct virtio_hw *hw
- = VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct virtio_hw *hw = dev->data->dev_private;
struct virtio_pmd_ctrl ctrl;
int dlen[1];
int ret;
@@ -220,7 +257,6 @@ virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues)
dlen[0] = sizeof(uint16_t);
ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
-
if (ret) {
PMD_INIT_LOG(ERR, "Multiqueue configured but send command "
"failed, this is too late now...");
@@ -230,44 +266,53 @@ virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues)
return 0;
}
+void
+virtio_dev_queue_release(struct virtqueue *vq) {
+ struct virtio_hw *hw;
+
+ if (vq) {
+ hw = vq->hw;
+ /* Select and deactivate the queue */
+ VIRTIO_WRITE_REG_2(hw, VIRTIO_PCI_QUEUE_SEL, vq->vq_queue_index);
+ VIRTIO_WRITE_REG_4(hw, VIRTIO_PCI_QUEUE_PFN, 0);
+
+ rte_free(vq->sw_ring);
+ rte_free(vq);
+ }
+}
+
int virtio_dev_queue_setup(struct rte_eth_dev *dev,
int queue_type,
uint16_t queue_idx,
- uint8_t vtpci_queue_idx,
+ uint16_t vtpci_queue_idx,
uint16_t nb_desc,
unsigned int socket_id,
struct virtqueue **pvq)
{
char vq_name[VIRTQUEUE_MAX_NAME_SZ];
const struct rte_memzone *mz;
- uint16_t vq_size;
- int size;
- struct virtio_hw *hw =
- VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct virtqueue *vq = NULL;
+ unsigned int vq_size, size;
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtqueue *vq = NULL;
/* Write the virtqueue index to the Queue Select Field */
VIRTIO_WRITE_REG_2(hw, VIRTIO_PCI_QUEUE_SEL, vtpci_queue_idx);
- PMD_INIT_LOG(DEBUG, "selecting queue: %d", vtpci_queue_idx);
+ PMD_INIT_LOG(DEBUG, "selecting queue: %u", vtpci_queue_idx);
/*
* Read the virtqueue size from the Queue Size field
* Always power of 2 and if 0 virtqueue does not exist
*/
vq_size = VIRTIO_READ_REG_2(hw, VIRTIO_PCI_QUEUE_NUM);
- PMD_INIT_LOG(DEBUG, "vq_size: %d nb_desc:%d", vq_size, nb_desc);
- if (nb_desc == 0)
- nb_desc = vq_size;
+ PMD_INIT_LOG(DEBUG, "vq_size: %u nb_desc:%u", vq_size, nb_desc);
if (vq_size == 0) {
PMD_INIT_LOG(ERR, "%s: virtqueue does not exist", __func__);
return -EINVAL;
- } else if (!rte_is_power_of_2(vq_size)) {
+ }
+
+ if (!rte_is_power_of_2(vq_size)) {
PMD_INIT_LOG(ERR, "%s: virtqueue size is not powerof 2", __func__);
return -EINVAL;
- } else if (nb_desc != vq_size) {
- PMD_INIT_LOG(ERR, "Warning: nb_desc(%d) is not equal to vq size (%d), fall to vq size",
- nb_desc, vq_size);
- nb_desc = vq_size;
}
if (queue_type == VTNET_RQ) {
@@ -275,6 +320,9 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
dev->data->port_id, queue_idx);
vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
vq_size * sizeof(struct vq_desc_extra), RTE_CACHE_LINE_SIZE);
+ vq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
+ (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) *
+ sizeof(vq->sw_ring[0]), RTE_CACHE_LINE_SIZE, socket_id);
} else if (queue_type == VTNET_TQ) {
snprintf(vq_name, sizeof(vq_name), "port%d_tvq%d",
dev->data->port_id, queue_idx);
@@ -291,14 +339,22 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
PMD_INIT_LOG(ERR, "%s: Can not allocate virtqueue", __func__);
return (-ENOMEM);
}
+ if (queue_type == VTNET_RQ && vq->sw_ring == NULL) {
+ PMD_INIT_LOG(ERR, "%s: Can not allocate RX soft ring",
+ __func__);
+ rte_free(vq);
+ return -ENOMEM;
+ }
vq->hw = hw;
vq->port_id = dev->data->port_id;
vq->queue_id = queue_idx;
vq->vq_queue_index = vtpci_queue_idx;
- vq->vq_alignment = VIRTIO_PCI_VRING_ALIGN;
vq->vq_nentries = vq_size;
- vq->vq_free_cnt = vq_size;
+
+ if (nb_desc == 0 || nb_desc > vq_size)
+ nb_desc = vq_size;
+ vq->vq_free_cnt = nb_desc;
/*
* Reserve a memzone for vring elements
@@ -310,8 +366,12 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
socket_id, 0, VIRTIO_PCI_VRING_ALIGN);
if (mz == NULL) {
- rte_free(vq);
- return -ENOMEM;
+ if (rte_errno == EEXIST)
+ mz = rte_memzone_lookup(vq_name);
+ if (mz == NULL) {
+ rte_free(vq);
+ return -ENOMEM;
+ }
}
/*
@@ -330,7 +390,7 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
vq->vq_ring_mem = mz->phys_addr;
vq->vq_ring_virt_mem = mz->addr;
PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%"PRIx64, (uint64_t)mz->phys_addr);
- PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%"PRIx64, (uint64_t)mz->addr);
+ PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%"PRIx64, (uint64_t)(uintptr_t)mz->addr);
vq->virtio_net_hdr_mz = NULL;
vq->virtio_net_hdr_mem = 0;
@@ -344,8 +404,13 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
vq_size * hw->vtnet_hdr_size,
socket_id, 0, RTE_CACHE_LINE_SIZE);
if (vq->virtio_net_hdr_mz == NULL) {
- rte_free(vq);
- return -ENOMEM;
+ if (rte_errno == EEXIST)
+ vq->virtio_net_hdr_mz =
+ rte_memzone_lookup(vq_name);
+ if (vq->virtio_net_hdr_mz == NULL) {
+ rte_free(vq);
+ return -ENOMEM;
+ }
}
vq->virtio_net_hdr_mem =
vq->virtio_net_hdr_mz->phys_addr;
@@ -358,8 +423,13 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
vq->virtio_net_hdr_mz = rte_memzone_reserve_aligned(vq_name,
PAGE_SIZE, socket_id, 0, RTE_CACHE_LINE_SIZE);
if (vq->virtio_net_hdr_mz == NULL) {
- rte_free(vq);
- return -ENOMEM;
+ if (rte_errno == EEXIST)
+ vq->virtio_net_hdr_mz =
+ rte_memzone_lookup(vq_name);
+ if (vq->virtio_net_hdr_mz == NULL) {
+ rte_free(vq);
+ return -ENOMEM;
+ }
}
vq->virtio_net_hdr_mem =
vq->virtio_net_hdr_mz->phys_addr;
@@ -381,15 +451,12 @@ virtio_dev_cq_queue_setup(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx,
uint32_t socket_id)
{
struct virtqueue *vq;
- uint16_t nb_desc = 0;
int ret;
- struct virtio_hw *hw =
- VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct virtio_hw *hw = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
ret = virtio_dev_queue_setup(dev, VTNET_CQ, VTNET_SQ_CQ_QUEUE_IDX,
- vtpci_queue_idx, nb_desc, socket_id, &vq);
-
+ vtpci_queue_idx, 0, socket_id, &vq);
if (ret < 0) {
PMD_INIT_LOG(ERR, "control vq initialization failed");
return ret;
@@ -400,29 +467,57 @@ virtio_dev_cq_queue_setup(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx,
}
static void
+virtio_free_queues(struct rte_eth_dev *dev)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ virtio_dev_rx_queue_release(dev->data->rx_queues[i]);
+
+ dev->data->nb_rx_queues = 0;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ virtio_dev_tx_queue_release(dev->data->tx_queues[i]);
+
+ dev->data->nb_tx_queues = 0;
+}
+
+static void
virtio_dev_close(struct rte_eth_dev *dev)
{
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct rte_pci_device *pci_dev = dev->pci_dev;
+
PMD_INIT_LOG(DEBUG, "virtio_dev_close");
- virtio_dev_stop(dev);
+ /* reset the NIC */
+ if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
+ vtpci_irq_config(hw, VIRTIO_MSI_NO_VECTOR);
+ vtpci_reset(hw);
+ hw->started = 0;
+ virtio_dev_free_mbufs(dev);
+ virtio_free_queues(dev);
}
static void
virtio_dev_promiscuous_enable(struct rte_eth_dev *dev)
{
- struct virtio_hw *hw
- = VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct virtio_hw *hw = dev->data->dev_private;
struct virtio_pmd_ctrl ctrl;
int dlen[1];
int ret;
+ if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
+ PMD_INIT_LOG(INFO, "host does not support rx control\n");
+ return;
+ }
+
ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
ctrl.data[0] = 1;
dlen[0] = 1;
ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
-
if (ret)
PMD_INIT_LOG(ERR, "Failed to enable promisc");
}
@@ -430,19 +525,22 @@ virtio_dev_promiscuous_enable(struct rte_eth_dev *dev)
static void
virtio_dev_promiscuous_disable(struct rte_eth_dev *dev)
{
- struct virtio_hw *hw
- = VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct virtio_hw *hw = dev->data->dev_private;
struct virtio_pmd_ctrl ctrl;
int dlen[1];
int ret;
+ if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
+ PMD_INIT_LOG(INFO, "host does not support rx control\n");
+ return;
+ }
+
ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
ctrl.data[0] = 0;
dlen[0] = 1;
ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
-
if (ret)
PMD_INIT_LOG(ERR, "Failed to disable promisc");
}
@@ -450,19 +548,22 @@ virtio_dev_promiscuous_disable(struct rte_eth_dev *dev)
static void
virtio_dev_allmulticast_enable(struct rte_eth_dev *dev)
{
- struct virtio_hw *hw
- = VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct virtio_hw *hw = dev->data->dev_private;
struct virtio_pmd_ctrl ctrl;
int dlen[1];
int ret;
+ if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
+ PMD_INIT_LOG(INFO, "host does not support rx control\n");
+ return;
+ }
+
ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
ctrl.data[0] = 1;
dlen[0] = 1;
ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
-
if (ret)
PMD_INIT_LOG(ERR, "Failed to enable allmulticast");
}
@@ -470,19 +571,22 @@ virtio_dev_allmulticast_enable(struct rte_eth_dev *dev)
static void
virtio_dev_allmulticast_disable(struct rte_eth_dev *dev)
{
- struct virtio_hw *hw
- = VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct virtio_hw *hw = dev->data->dev_private;
struct virtio_pmd_ctrl ctrl;
int dlen[1];
int ret;
+ if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
+ PMD_INIT_LOG(INFO, "host does not support rx control\n");
+ return;
+ }
+
ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
ctrl.data[0] = 0;
dlen[0] = 1;
ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
-
if (ret)
PMD_INIT_LOG(ERR, "Failed to disable allmulticast");
}
@@ -490,7 +594,7 @@ virtio_dev_allmulticast_disable(struct rte_eth_dev *dev)
/*
* dev_ops for virtio, bare necessities for basic operation
*/
-static struct eth_dev_ops virtio_eth_dev_ops = {
+static const struct eth_dev_ops virtio_eth_dev_ops = {
.dev_configure = virtio_dev_configure,
.dev_start = virtio_dev_start,
.dev_stop = virtio_dev_stop,
@@ -502,18 +606,20 @@ static struct eth_dev_ops virtio_eth_dev_ops = {
.dev_infos_get = virtio_dev_info_get,
.stats_get = virtio_dev_stats_get,
+ .xstats_get = virtio_dev_xstats_get,
.stats_reset = virtio_dev_stats_reset,
+ .xstats_reset = virtio_dev_stats_reset,
.link_update = virtio_dev_link_update,
- .mac_addr_add = NULL,
- .mac_addr_remove = NULL,
.rx_queue_setup = virtio_dev_rx_queue_setup,
- /* meaningfull only to multiple queue */
.rx_queue_release = virtio_dev_rx_queue_release,
.tx_queue_setup = virtio_dev_tx_queue_setup,
- /* meaningfull only to multiple queue */
.tx_queue_release = virtio_dev_tx_queue_release,
/* collect stats per queue */
.queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
+ .vlan_filter_set = virtio_vlan_filter_set,
+ .mac_addr_add = virtio_mac_addr_add,
+ .mac_addr_remove = virtio_mac_addr_remove,
+ .mac_addr_set = virtio_mac_addr_set,
};
static inline int
@@ -557,7 +663,7 @@ virtio_dev_atomic_write_link_status(struct rte_eth_dev *dev,
}
static void
-virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
unsigned i;
@@ -594,6 +700,64 @@ virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
}
+static int
+virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
+ unsigned n)
+{
+ unsigned i;
+ unsigned count = 0;
+
+ unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_Q_XSTATS +
+ dev->data->nb_rx_queues * VIRTIO_NB_Q_XSTATS;
+
+ if (n < nstats)
+ return nstats;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct virtqueue *rxvq = dev->data->rx_queues[i];
+
+ if (rxvq == NULL)
+ continue;
+
+ unsigned t;
+
+ for (t = 0; t < VIRTIO_NB_Q_XSTATS; t++) {
+ snprintf(xstats[count].name, sizeof(xstats[count].name),
+ "rx_q%u_%s", i,
+ rte_virtio_q_stat_strings[t].name);
+ xstats[count].value = *(uint64_t *)(((char *)rxvq) +
+ rte_virtio_q_stat_strings[t].offset);
+ count++;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct virtqueue *txvq = dev->data->tx_queues[i];
+
+ if (txvq == NULL)
+ continue;
+
+ unsigned t;
+
+ for (t = 0; t < VIRTIO_NB_Q_XSTATS; t++) {
+ snprintf(xstats[count].name, sizeof(xstats[count].name),
+ "tx_q%u_%s", i,
+ rte_virtio_q_stat_strings[t].name);
+ xstats[count].value = *(uint64_t *)(((char *)txvq) +
+ rte_virtio_q_stat_strings[t].offset);
+ count++;
+ }
+ }
+
+ return count;
+}
+
+static void
+virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ virtio_update_stats(dev, stats);
+}
+
static void
virtio_dev_stats_reset(struct rte_eth_dev *dev)
{
@@ -607,6 +771,9 @@ virtio_dev_stats_reset(struct rte_eth_dev *dev)
txvq->packets = 0;
txvq->bytes = 0;
txvq->errors = 0;
+ txvq->multicast = 0;
+ txvq->broadcast = 0;
+ memset(txvq->size_bins, 0, sizeof(txvq->size_bins[0]) * 8);
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
@@ -617,9 +784,10 @@ virtio_dev_stats_reset(struct rte_eth_dev *dev)
rxvq->packets = 0;
rxvq->bytes = 0;
rxvq->errors = 0;
+ rxvq->multicast = 0;
+ rxvq->broadcast = 0;
+ memset(rxvq->size_bins, 0, sizeof(rxvq->size_bins[0]) * 8);
}
-
- dev->data->rx_mbuf_alloc_failed = 0;
}
static void
@@ -643,27 +811,141 @@ virtio_get_hwaddr(struct virtio_hw *hw)
}
}
+static void
+virtio_mac_table_set(struct virtio_hw *hw,
+ const struct virtio_net_ctrl_mac *uc,
+ const struct virtio_net_ctrl_mac *mc)
+{
+ struct virtio_pmd_ctrl ctrl;
+ int err, len[2];
+
+ if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
+ PMD_DRV_LOG(INFO, "host does not support mac table\n");
+ return;
+ }
+
+ ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
+ ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
+
+ len[0] = uc->entries * ETHER_ADDR_LEN + sizeof(uc->entries);
+ memcpy(ctrl.data, uc, len[0]);
+
+ len[1] = mc->entries * ETHER_ADDR_LEN + sizeof(mc->entries);
+ memcpy(ctrl.data + len[0], mc, len[1]);
+
+ err = virtio_send_command(hw->cvq, &ctrl, len, 2);
+ if (err != 0)
+ PMD_DRV_LOG(NOTICE, "mac table set failed: %d", err);
+}
+
+static void
+virtio_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint32_t index, uint32_t vmdq __rte_unused)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+ const struct ether_addr *addrs = dev->data->mac_addrs;
+ unsigned int i;
+ struct virtio_net_ctrl_mac *uc, *mc;
+
+ if (index >= VIRTIO_MAX_MAC_ADDRS) {
+ PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
+ return;
+ }
+
+ uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries));
+ uc->entries = 0;
+ mc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(mc->entries));
+ mc->entries = 0;
+
+ for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
+ const struct ether_addr *addr
+ = (i == index) ? mac_addr : addrs + i;
+ struct virtio_net_ctrl_mac *tbl
+ = is_multicast_ether_addr(addr) ? mc : uc;
+
+ memcpy(&tbl->macs[tbl->entries++], addr, ETHER_ADDR_LEN);
+ }
+
+ virtio_mac_table_set(hw, uc, mc);
+}
static void
-virtio_negotiate_features(struct virtio_hw *hw)
+virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
{
- uint32_t host_features, mask;
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct ether_addr *addrs = dev->data->mac_addrs;
+ struct virtio_net_ctrl_mac *uc, *mc;
+ unsigned int i;
- mask = VIRTIO_NET_F_CTRL_VLAN;
- mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM;
+ if (index >= VIRTIO_MAX_MAC_ADDRS) {
+ PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
+ return;
+ }
- /* TSO and LRO are only available when their corresponding
- * checksum offload feature is also negotiated.
- */
- mask |= VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 | VIRTIO_NET_F_HOST_ECN;
- mask |= VIRTIO_NET_F_GUEST_TSO4 | VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN;
- mask |= VTNET_LRO_FEATURES;
+ uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries));
+ uc->entries = 0;
+ mc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(mc->entries));
+ mc->entries = 0;
+
+ for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
+ struct virtio_net_ctrl_mac *tbl;
+
+ if (i == index || is_zero_ether_addr(addrs + i))
+ continue;
- /* not negotiating INDIRECT descriptor table support */
- mask |= VIRTIO_RING_F_INDIRECT_DESC;
+ tbl = is_multicast_ether_addr(addrs + i) ? mc : uc;
+ memcpy(&tbl->macs[tbl->entries++], addrs + i, ETHER_ADDR_LEN);
+ }
+
+ virtio_mac_table_set(hw, uc, mc);
+}
+
+static void
+virtio_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ memcpy(hw->mac_addr, mac_addr, ETHER_ADDR_LEN);
+
+ /* Use atomic update if available */
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
+ struct virtio_pmd_ctrl ctrl;
+ int len = ETHER_ADDR_LEN;
+
+ ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
+ ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
+
+ memcpy(ctrl.data, mac_addr, ETHER_ADDR_LEN);
+ virtio_send_command(hw->cvq, &ctrl, &len, 1);
+ } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC))
+ virtio_set_hwaddr(hw);
+}
+
+static int
+virtio_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtio_pmd_ctrl ctrl;
+ int len;
+
+ if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN))
+ return -ENOTSUP;
+
+ ctrl.hdr.class = VIRTIO_NET_CTRL_VLAN;
+ ctrl.hdr.cmd = on ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
+ memcpy(ctrl.data, &vlan_id, sizeof(vlan_id));
+ len = sizeof(vlan_id);
+
+ return virtio_send_command(hw->cvq, &ctrl, &len, 1);
+}
+
+static void
+virtio_negotiate_features(struct virtio_hw *hw)
+{
+ uint32_t host_features;
/* Prepare guest_features: feature that driver wants to support */
- hw->guest_features = VTNET_FEATURES & ~mask;
+ hw->guest_features = VIRTIO_PMD_GUEST_FEATURES;
PMD_INIT_LOG(DEBUG, "guest_features before negotiate = %x",
hw->guest_features);
@@ -713,9 +995,9 @@ parse_sysfs_value(const char *filename, unsigned long *val)
return 0;
}
-static int get_uio_dev(struct rte_pci_addr *loc, char *buf, unsigned int buflen)
+static int get_uio_dev(struct rte_pci_addr *loc, char *buf, unsigned int buflen,
+ unsigned int *uio_num)
{
- unsigned int uio_num;
struct dirent *e;
DIR *dir;
char dirname[PATH_MAX];
@@ -752,18 +1034,18 @@ static int get_uio_dev(struct rte_pci_addr *loc, char *buf, unsigned int buflen)
/* first try uio%d */
errno = 0;
- uio_num = strtoull(e->d_name + shortprefix_len, &endptr, 10);
+ *uio_num = strtoull(e->d_name + shortprefix_len, &endptr, 10);
if (errno == 0 && endptr != (e->d_name + shortprefix_len)) {
- snprintf(buf, buflen, "%s/uio%u", dirname, uio_num);
+ snprintf(buf, buflen, "%s/uio%u", dirname, *uio_num);
break;
}
/* then try uio:uio%d */
errno = 0;
- uio_num = strtoull(e->d_name + longprefix_len, &endptr, 10);
+ *uio_num = strtoull(e->d_name + longprefix_len, &endptr, 10);
if (errno == 0 && endptr != (e->d_name + longprefix_len)) {
snprintf(buf, buflen, "%s/uio:uio%u", dirname,
- uio_num);
+ *uio_num);
break;
}
}
@@ -794,6 +1076,132 @@ virtio_has_msix(const struct rte_pci_addr *loc)
return (d != NULL);
}
+
+/* Extract I/O port numbers from sysfs */
+static int virtio_resource_init_by_uio(struct rte_pci_device *pci_dev)
+{
+ char dirname[PATH_MAX];
+ char filename[PATH_MAX];
+ unsigned long start, size;
+ unsigned int uio_num;
+
+ if (get_uio_dev(&pci_dev->addr, dirname, sizeof(dirname), &uio_num) < 0)
+ return -1;
+
+ /* get portio size */
+ snprintf(filename, sizeof(filename),
+ "%s/portio/port0/size", dirname);
+ if (parse_sysfs_value(filename, &size) < 0) {
+ PMD_INIT_LOG(ERR, "%s(): cannot parse size",
+ __func__);
+ return -1;
+ }
+
+ /* get portio start */
+ snprintf(filename, sizeof(filename),
+ "%s/portio/port0/start", dirname);
+ if (parse_sysfs_value(filename, &start) < 0) {
+ PMD_INIT_LOG(ERR, "%s(): cannot parse portio start",
+ __func__);
+ return -1;
+ }
+ pci_dev->mem_resource[0].addr = (void *)(uintptr_t)start;
+ pci_dev->mem_resource[0].len = (uint64_t)size;
+ PMD_INIT_LOG(DEBUG,
+ "PCI Port IO found start=0x%lx with size=0x%lx",
+ start, size);
+
+ /* save fd */
+ memset(dirname, 0, sizeof(dirname));
+ snprintf(dirname, sizeof(dirname), "/dev/uio%u", uio_num);
+ pci_dev->intr_handle.fd = open(dirname, O_RDWR);
+ if (pci_dev->intr_handle.fd < 0) {
+ PMD_INIT_LOG(ERR, "Cannot open %s: %s\n",
+ dirname, strerror(errno));
+ return -1;
+ }
+
+ pci_dev->intr_handle.type = RTE_INTR_HANDLE_UIO;
+ pci_dev->driver->drv_flags |= RTE_PCI_DRV_INTR_LSC;
+
+ return 0;
+}
+
+/* Extract port I/O numbers from proc/ioports */
+static int virtio_resource_init_by_ioports(struct rte_pci_device *pci_dev)
+{
+ uint16_t start, end;
+ int size;
+ FILE *fp;
+ char *line = NULL;
+ char pci_id[16];
+ int found = 0;
+ size_t linesz;
+
+ snprintf(pci_id, sizeof(pci_id), PCI_PRI_FMT,
+ pci_dev->addr.domain,
+ pci_dev->addr.bus,
+ pci_dev->addr.devid,
+ pci_dev->addr.function);
+
+ fp = fopen("/proc/ioports", "r");
+ if (fp == NULL) {
+ PMD_INIT_LOG(ERR, "%s(): can't open ioports", __func__);
+ return -1;
+ }
+
+ while (getdelim(&line, &linesz, '\n', fp) > 0) {
+ char *ptr = line;
+ char *left;
+ int n;
+
+ n = strcspn(ptr, ":");
+ ptr[n] = 0;
+ left = &ptr[n+1];
+
+ while (*left && isspace(*left))
+ left++;
+
+ if (!strncmp(left, pci_id, strlen(pci_id))) {
+ found = 1;
+
+ while (*ptr && isspace(*ptr))
+ ptr++;
+
+ sscanf(ptr, "%04hx-%04hx", &start, &end);
+ size = end - start + 1;
+
+ break;
+ }
+ }
+
+ free(line);
+ fclose(fp);
+
+ if (!found)
+ return -1;
+
+ pci_dev->mem_resource[0].addr = (void *)(uintptr_t)(uint32_t)start;
+ pci_dev->mem_resource[0].len = (uint64_t)size;
+ PMD_INIT_LOG(DEBUG,
+ "PCI Port IO found start=0x%x with size=0x%x",
+ start, size);
+
+ /* can't support lsc interrupt without uio */
+ pci_dev->driver->drv_flags &= ~RTE_PCI_DRV_INTR_LSC;
+
+ return 0;
+}
+
+/* Extract I/O port numbers from sysfs */
+static int virtio_resource_init(struct rte_pci_device *pci_dev)
+{
+ if (virtio_resource_init_by_uio(pci_dev) == 0)
+ return 0;
+ else
+ return virtio_resource_init_by_ioports(pci_dev);
+}
+
#else
static int
virtio_has_msix(const struct rte_pci_addr *loc __rte_unused)
@@ -801,70 +1209,87 @@ virtio_has_msix(const struct rte_pci_addr *loc __rte_unused)
/* nic_uio does not enable interrupts, return 0 (false). */
return 0;
}
+
+static int virtio_resource_init(struct rte_pci_device *pci_dev __rte_unused)
+{
+ /* no setup required */
+ return 0;
+}
#endif
/*
+ * Process Virtio Config changed interrupt and call the callback
+ * if link state changed.
+ */
+static void
+virtio_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
+ void *param)
+{
+ struct rte_eth_dev *dev = param;
+ struct virtio_hw *hw = dev->data->dev_private;
+ uint8_t isr;
+
+ /* Read interrupt status which clears interrupt */
+ isr = vtpci_isr(hw);
+ PMD_DRV_LOG(INFO, "interrupt status = %#x", isr);
+
+ if (rte_intr_enable(&dev->pci_dev->intr_handle) < 0)
+ PMD_DRV_LOG(ERR, "interrupt enable failed");
+
+ if (isr & VIRTIO_PCI_ISR_CONFIG) {
+ if (virtio_dev_link_update(dev, 0) == 0)
+ _rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC);
+ }
+
+}
+
+static void
+rx_func_get(struct rte_eth_dev *eth_dev)
+{
+ struct virtio_hw *hw = eth_dev->data->dev_private;
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF))
+ eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
+ else
+ eth_dev->rx_pkt_burst = &virtio_recv_pkts;
+}
+
+/*
* This function is based on probe() function in virtio_pci.c
* It returns 0 on success.
*/
static int
-eth_virtio_dev_init(__rte_unused struct eth_driver *eth_drv,
- struct rte_eth_dev *eth_dev)
+eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
{
+ struct virtio_hw *hw = eth_dev->data->dev_private;
struct virtio_net_config *config;
struct virtio_net_config local_config;
- uint32_t offset_conf = sizeof(config->mac);
struct rte_pci_device *pci_dev;
- struct virtio_hw *hw =
- VIRTIO_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- if (RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr)) {
- PMD_INIT_LOG(ERR,
- "MBUF HEADROOM should be enough to hold virtio net hdr\n");
- return -1;
- }
+ RTE_BUILD_BUG_ON(RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr));
eth_dev->dev_ops = &virtio_eth_dev_ops;
eth_dev->tx_pkt_burst = &virtio_xmit_pkts;
- if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ rx_func_get(eth_dev);
return 0;
+ }
- pci_dev = eth_dev->pci_dev;
-
-#ifdef RTE_EXEC_ENV_LINUXAPP
- {
- char dirname[PATH_MAX];
- char filename[PATH_MAX];
- unsigned long start, size;
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("virtio", VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate %d bytes needed to store MAC addresses",
+ VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN);
+ return -ENOMEM;
+ }
- if (get_uio_dev(&pci_dev->addr, dirname, sizeof(dirname)) < 0)
- return -1;
+ pci_dev = eth_dev->pci_dev;
- /* get portio size */
- snprintf(filename, sizeof(filename),
- "%s/portio/port0/size", dirname);
- if (parse_sysfs_value(filename, &size) < 0) {
- PMD_INIT_LOG(ERR, "%s(): cannot parse size",
- __func__);
- return -1;
- }
+ if (virtio_resource_init(pci_dev) < 0)
+ return -1;
- /* get portio start */
- snprintf(filename, sizeof(filename),
- "%s/portio/port0/start", dirname);
- if (parse_sysfs_value(filename, &start) < 0) {
- PMD_INIT_LOG(ERR, "%s(): cannot parse portio start",
- __func__);
- return -1;
- }
- pci_dev->mem_resource[0].addr = (void *)(uintptr_t)start;
- pci_dev->mem_resource[0].len = (uint64_t)size;
- PMD_INIT_LOG(DEBUG,
- "PCI Port IO found start=0x%lx with size=0x%lx",
- start, size);
- }
-#endif
hw->use_msix = virtio_has_msix(&pci_dev->addr);
hw->io_base = (uint32_t)(uintptr_t)pci_dev->mem_resource[0].addr;
@@ -878,23 +1303,19 @@ eth_virtio_dev_init(__rte_unused struct eth_driver *eth_drv,
vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
virtio_negotiate_features(hw);
+ /* If host does not support status then disable LSC */
+ if (!vtpci_with_feature(hw, VIRTIO_NET_F_STATUS))
+ pci_dev->driver->drv_flags &= ~RTE_PCI_DRV_INTR_LSC;
+
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ rx_func_get(eth_dev);
+
/* Setting up rx_header size for the device */
- if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
- eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF))
hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
- } else {
- eth_dev->rx_pkt_burst = &virtio_recv_pkts;
+ else
hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
- }
-
- /* Allocate memory for storing MAC addresses */
- eth_dev->data->mac_addrs = rte_zmalloc("virtio", ETHER_ADDR_LEN, 0);
- if (eth_dev->data->mac_addrs == NULL) {
- PMD_INIT_LOG(ERR,
- "Failed to allocate %d bytes needed to store MAC addresses",
- ETHER_ADDR_LEN);
- return -ENOMEM;
- }
/* Copy the permanent MAC address to: virtio_hw */
virtio_get_hwaddr(hw);
@@ -908,8 +1329,14 @@ eth_virtio_dev_init(__rte_unused struct eth_driver *eth_drv,
if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
config = &local_config;
+ vtpci_read_dev_config(hw,
+ offsetof(struct virtio_net_config, mac),
+ &config->mac, sizeof(config->mac));
+
if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
- offset_conf += sizeof(config->status);
+ vtpci_read_dev_config(hw,
+ offsetof(struct virtio_net_config, status),
+ &config->status, sizeof(config->status));
} else {
PMD_INIT_LOG(DEBUG,
"VIRTIO_NET_F_STATUS is not supported");
@@ -917,15 +1344,16 @@ eth_virtio_dev_init(__rte_unused struct eth_driver *eth_drv,
}
if (vtpci_with_feature(hw, VIRTIO_NET_F_MQ)) {
- offset_conf += sizeof(config->max_virtqueue_pairs);
+ vtpci_read_dev_config(hw,
+ offsetof(struct virtio_net_config, max_virtqueue_pairs),
+ &config->max_virtqueue_pairs,
+ sizeof(config->max_virtqueue_pairs));
} else {
PMD_INIT_LOG(DEBUG,
"VIRTIO_NET_F_MQ is not supported");
config->max_virtqueue_pairs = 1;
}
- vtpci_read_dev_config(hw, 0, (uint8_t *)config, offset_conf);
-
hw->max_rx_queues =
(VIRTIO_MAX_RX_QUEUES < config->max_virtqueue_pairs) ?
VIRTIO_MAX_RX_QUEUES : config->max_virtqueue_pairs;
@@ -958,17 +1386,63 @@ eth_virtio_dev_init(__rte_unused struct eth_driver *eth_drv,
PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
eth_dev->data->port_id, pci_dev->id.vendor_id,
pci_dev->id.device_id);
+
+ /* Setup interrupt callback */
+ if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ virtio_interrupt_handler, eth_dev);
+
+ virtio_dev_cq_start(eth_dev);
+
+ return 0;
+}
+
+static int
+eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev;
+ struct virtio_hw *hw = eth_dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+ return -EPERM;
+
+ if (hw->started == 1) {
+ virtio_dev_stop(eth_dev);
+ virtio_dev_close(eth_dev);
+ }
+ pci_dev = eth_dev->pci_dev;
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+
+ virtio_dev_queue_release(hw->cvq);
+
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+
+ /* reset interrupt callback */
+ if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
+ virtio_interrupt_handler,
+ eth_dev);
+
+ PMD_INIT_LOG(DEBUG, "dev_uninit completed");
+
return 0;
}
static struct eth_driver rte_virtio_pmd = {
- {
+ .pci_drv = {
.name = "rte_virtio_pmd",
.id_table = pci_id_virtio_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .drv_flags = RTE_PCI_DRV_DETACHABLE,
},
.eth_dev_init = eth_virtio_dev_init,
- .dev_private_size = sizeof(struct virtio_adapter),
+ .eth_dev_uninit = eth_virtio_dev_uninit,
+ .dev_private_size = sizeof(struct virtio_hw),
};
/*
@@ -991,19 +1465,6 @@ rte_virtio_pmd_init(const char *name __rte_unused,
}
/*
- * Only 1 queue is supported, no queue release related operation
- */
-static void
-virtio_dev_rx_queue_release(__rte_unused void *rxq)
-{
-}
-
-static void
-virtio_dev_tx_queue_release(__rte_unused void *txq)
-{
-}
-
-/*
* Configure virtio device
* It returns 0 on success.
*/
@@ -1011,6 +1472,8 @@ static int
virtio_dev_configure(struct rte_eth_dev *dev)
{
const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct rte_pci_device *pci_dev = dev->pci_dev;
PMD_INIT_LOG(DEBUG, "configure");
@@ -1019,6 +1482,21 @@ virtio_dev_configure(struct rte_eth_dev *dev)
return (-EINVAL);
}
+ hw->vlan_strip = rxmode->hw_vlan_strip;
+
+ if (rxmode->hw_vlan_filter
+ && !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
+ PMD_DRV_LOG(NOTICE,
+ "vlan filtering not available on this host");
+ return -ENOTSUP;
+ }
+
+ if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
+ if (vtpci_irq_config(hw, 0) == VIRTIO_MSI_NO_VECTOR) {
+ PMD_DRV_LOG(ERR, "failed to set config vector");
+ return -EBUSY;
+ }
+
return 0;
}
@@ -1027,37 +1505,35 @@ static int
virtio_dev_start(struct rte_eth_dev *dev)
{
uint16_t nb_queues, i;
- uint16_t status;
- struct virtio_hw *hw =
- VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct rte_pci_device *pci_dev = dev->pci_dev;
+
+ /* check if lsc interrupt feature is enabled */
+ if (dev->data->dev_conf.intr_conf.lsc) {
+ if (!(pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)) {
+ PMD_DRV_LOG(ERR, "link status not supported by host");
+ return -ENOTSUP;
+ }
- /* Tell the host we've noticed this device. */
- vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
+ if (rte_intr_enable(&dev->pci_dev->intr_handle) < 0) {
+ PMD_DRV_LOG(ERR, "interrupt enable failed");
+ return -EIO;
+ }
+ }
- /* Tell the host we've known how to drive the device. */
- vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
+ /* Initialize Link state */
+ virtio_dev_link_update(dev, 0);
- virtio_dev_cq_start(dev);
+ /* On restart after stop do not touch queues */
+ if (hw->started)
+ return 0;
/* Do final configuration before rx/tx engine starts */
virtio_dev_rxtx_start(dev);
-
- /* Check VIRTIO_NET_F_STATUS for link status*/
- if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
- vtpci_read_dev_config(hw,
- offsetof(struct virtio_net_config, status),
- &status, sizeof(status));
- if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
- PMD_INIT_LOG(ERR, "Port: %d Link is DOWN",
- dev->data->port_id);
- return -EIO;
- } else {
- PMD_INIT_LOG(DEBUG, "Port: %d Link is UP",
- dev->data->port_id);
- }
- }
vtpci_reinit_complete(hw);
+ hw->started = 1;
+
/*Notify the backend
*Otherwise the tap backend might already stop its queue due to fullness.
*vhost backend will have no chance to be waked up
@@ -1094,6 +1570,8 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
"Before freeing rxq[%d] used and unused buf", i);
VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]);
+ PMD_INIT_LOG(DEBUG, "rx_queues[%d]=%p",
+ i, dev->data->rx_queues[i]);
while ((buf = (struct rte_mbuf *)virtqueue_detatch_unused(
dev->data->rx_queues[i])) != NULL) {
rte_pktmbuf_free(buf);
@@ -1128,17 +1606,20 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
}
/*
- * Stop device: disable rx and tx functions to allow for reconfiguring.
+ * Stop device: disable interrupt and mark link down
*/
static void
virtio_dev_stop(struct rte_eth_dev *dev)
{
- struct virtio_hw *hw =
- VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_link link;
- /* reset the NIC */
- vtpci_reset(hw);
- virtio_dev_free_mbufs(dev);
+ PMD_INIT_LOG(DEBUG, "stop");
+
+ if (dev->data->dev_conf.intr_conf.lsc)
+ rte_intr_disable(&dev->pci_dev->intr_handle);
+
+ memset(&link, 0, sizeof(link));
+ virtio_dev_atomic_write_link_status(dev, &link);
}
static int
@@ -1146,13 +1627,13 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet
{
struct rte_eth_link link, old;
uint16_t status;
- struct virtio_hw *hw =
- VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct virtio_hw *hw = dev->data->dev_private;
memset(&link, 0, sizeof(link));
virtio_dev_atomic_read_link_status(dev, &link);
old = link;
link.link_duplex = FULL_DUPLEX;
link.link_speed = SPEED_10G;
+
if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
PMD_INIT_LOG(DEBUG, "Get link status from hw");
vtpci_read_dev_config(hw,
@@ -1171,16 +1652,14 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet
link.link_status = 1; /* Link up */
}
virtio_dev_atomic_write_link_status(dev, &link);
- if (old.link_status == link.link_status)
- return -1;
- /*changed*/
- return 0;
+
+ return (old.link_status == link.link_status) ? -1 : 0;
}
static void
virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
- struct virtio_hw *hw = VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct virtio_hw *hw = dev->data->dev_private;
dev_info->driver_name = dev->driver->pci_drv.name;
dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
@@ -1188,6 +1667,9 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE;
dev_info->max_rx_pktlen = VIRTIO_MAX_RX_PKTLEN;
dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS;
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .txq_flags = ETH_TXQ_FLAGS_NOOFFLOADS
+ };
}
/*
diff --git a/src/dpdk_lib18/librte_pmd_virtio/virtio_ethdev.h b/src/dpdk22/drivers/net/virtio/virtio_ethdev.h
index 1da3c625..ae2d47de 100755..100644
--- a/src/dpdk_lib18/librte_pmd_virtio/virtio_ethdev.h
+++ b/src/dpdk22/drivers/net/virtio/virtio_ethdev.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -51,28 +51,20 @@
#define VIRTIO_MAX_RX_QUEUES 128
#define VIRTIO_MAX_TX_QUEUES 128
-#define VIRTIO_MAX_MAC_ADDRS 1
+#define VIRTIO_MAX_MAC_ADDRS 64
#define VIRTIO_MIN_RX_BUFSIZE 64
#define VIRTIO_MAX_RX_PKTLEN 9728
/* Features desired/implemented by this driver. */
-#define VTNET_FEATURES \
- (VIRTIO_NET_F_MAC | \
- VIRTIO_NET_F_STATUS | \
- VIRTIO_NET_F_MQ | \
- VIRTIO_NET_F_CTRL_VQ | \
- VIRTIO_NET_F_CTRL_RX | \
- VIRTIO_NET_F_CTRL_VLAN | \
- VIRTIO_NET_F_CSUM | \
- VIRTIO_NET_F_HOST_TSO4 | \
- VIRTIO_NET_F_HOST_TSO6 | \
- VIRTIO_NET_F_HOST_ECN | \
- VIRTIO_NET_F_GUEST_CSUM | \
- VIRTIO_NET_F_GUEST_TSO4 | \
- VIRTIO_NET_F_GUEST_TSO6 | \
- VIRTIO_NET_F_GUEST_ECN | \
- VIRTIO_NET_F_MRG_RXBUF | \
- VIRTIO_RING_F_INDIRECT_DESC)
+#define VIRTIO_PMD_GUEST_FEATURES \
+ (1u << VIRTIO_NET_F_MAC | \
+ 1u << VIRTIO_NET_F_STATUS | \
+ 1u << VIRTIO_NET_F_MQ | \
+ 1u << VIRTIO_NET_F_CTRL_MAC_ADDR | \
+ 1u << VIRTIO_NET_F_CTRL_VQ | \
+ 1u << VIRTIO_NET_F_CTRL_RX | \
+ 1u << VIRTIO_NET_F_CTRL_VLAN | \
+ 1u << VIRTIO_NET_F_MRG_RXBUF)
/*
* CQ function prototype
@@ -87,20 +79,26 @@ void virtio_dev_rxtx_start(struct rte_eth_dev *dev);
int virtio_dev_queue_setup(struct rte_eth_dev *dev,
int queue_type,
uint16_t queue_idx,
- uint8_t vtpci_queue_idx,
+ uint16_t vtpci_queue_idx,
uint16_t nb_desc,
unsigned int socket_id,
struct virtqueue **pvq);
+void virtio_dev_queue_release(struct virtqueue *vq);
+
int virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mb_pool);
+void virtio_dev_rx_queue_release(void *rxq);
+
int virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
+void virtio_dev_tx_queue_release(void *txq);
+
uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
@@ -110,15 +108,11 @@ uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
-/*
- * Structure to store private data for each driver instance (for each port).
- */
-struct virtio_adapter {
- struct virtio_hw hw;
-};
+uint16_t virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
-#define VIRTIO_DEV_PRIVATE_TO_HW(adapter)\
- (&((struct virtio_adapter *)adapter)->hw)
+uint16_t virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
/*
* The VIRTIO_NET_F_GUEST_TSO[46] features permit the host to send us
diff --git a/src/dpdk_lib18/librte_pmd_virtio/virtio_logs.h b/src/dpdk22/drivers/net/virtio/virtio_logs.h
index d6c33f7b..d6c33f7b 100755..100644
--- a/src/dpdk_lib18/librte_pmd_virtio/virtio_logs.h
+++ b/src/dpdk22/drivers/net/virtio/virtio_logs.h
diff --git a/src/dpdk_lib18/librte_pmd_virtio/virtio_pci.c b/src/dpdk22/drivers/net/virtio/virtio_pci.c
index ca9c7482..2245beca 100755..100644
--- a/src/dpdk_lib18/librte_pmd_virtio/virtio_pci.c
+++ b/src/dpdk22/drivers/net/virtio/virtio_pci.c
@@ -35,6 +35,8 @@
#include "virtio_pci.h"
#include "virtio_logs.h"
+static uint8_t vtpci_get_status(struct virtio_hw *);
+
void
vtpci_read_dev_config(struct virtio_hw *hw, uint64_t offset,
void *dst, int length)
@@ -113,7 +115,7 @@ vtpci_reinit_complete(struct virtio_hw *hw)
vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK);
}
-uint8_t
+static uint8_t
vtpci_get_status(struct virtio_hw *hw)
{
return VIRTIO_READ_REG_1(hw, VIRTIO_PCI_STATUS);
@@ -127,3 +129,19 @@ vtpci_set_status(struct virtio_hw *hw, uint8_t status)
VIRTIO_WRITE_REG_1(hw, VIRTIO_PCI_STATUS, status);
}
+
+uint8_t
+vtpci_isr(struct virtio_hw *hw)
+{
+
+ return VIRTIO_READ_REG_1(hw, VIRTIO_PCI_ISR);
+}
+
+
+/* Enable one vector (0) for Link State Intrerrupt */
+uint16_t
+vtpci_irq_config(struct virtio_hw *hw, uint16_t vec)
+{
+ VIRTIO_WRITE_REG_2(hw, VIRTIO_MSI_CONFIG_VECTOR, vec);
+ return VIRTIO_READ_REG_2(hw, VIRTIO_MSI_CONFIG_VECTOR);
+}
diff --git a/src/dpdk_lib18/librte_pmd_virtio/virtio_pci.h b/src/dpdk22/drivers/net/virtio/virtio_pci.h
index 373f9dcb..47f722a0 100755..100644
--- a/src/dpdk_lib18/librte_pmd_virtio/virtio_pci.h
+++ b/src/dpdk22/drivers/net/virtio/virtio_pci.h
@@ -96,26 +96,6 @@ struct virtqueue;
#define VIRTIO_CONFIG_STATUS_FAILED 0x80
/*
- * Generate interrupt when the virtqueue ring is
- * completely used, even if we've suppressed them.
- */
-#define VIRTIO_F_NOTIFY_ON_EMPTY (1 << 24)
-
-/*
- * The guest should never negotiate this feature; it
- * is used to detect faulty drivers.
- */
-#define VIRTIO_F_BAD_FEATURE (1 << 30)
-
-/*
- * Some VirtIO feature bits (currently bits 28 through 31) are
- * reserved for the transport being used (eg. virtio_ring), the
- * rest are per-device feature bits.
- */
-#define VIRTIO_TRANSPORT_F_START 28
-#define VIRTIO_TRANSPORT_F_END 32
-
-/*
* Each virtqueue indirect descriptor list must be physically contiguous.
* To allow us to malloc(9) each list individually, limit the number
* supported to what will fit in one page. With 4KB pages, this is a limit
@@ -128,33 +108,55 @@ struct virtqueue;
#define VIRTIO_MAX_INDIRECT ((int) (PAGE_SIZE / 16))
/* The feature bitmap for virtio net */
-#define VIRTIO_NET_F_CSUM 0x00001 /* Host handles pkts w/ partial csum */
-#define VIRTIO_NET_F_GUEST_CSUM 0x00002 /* Guest handles pkts w/ partial csum*/
-#define VIRTIO_NET_F_MAC 0x00020 /* Host has given MAC address. */
-#define VIRTIO_NET_F_GSO 0x00040 /* Host handles pkts w/ any GSO type */
-#define VIRTIO_NET_F_GUEST_TSO4 0x00080 /* Guest can handle TSOv4 in. */
-#define VIRTIO_NET_F_GUEST_TSO6 0x00100 /* Guest can handle TSOv6 in. */
-#define VIRTIO_NET_F_GUEST_ECN 0x00200 /* Guest can handle TSO[6] w/ ECN in.*/
-#define VIRTIO_NET_F_GUEST_UFO 0x00400 /* Guest can handle UFO in. */
-#define VIRTIO_NET_F_HOST_TSO4 0x00800 /* Host can handle TSOv4 in. */
-#define VIRTIO_NET_F_HOST_TSO6 0x01000 /* Host can handle TSOv6 in. */
-#define VIRTIO_NET_F_HOST_ECN 0x02000 /* Host can handle TSO[6] w/ ECN in. */
-#define VIRTIO_NET_F_HOST_UFO 0x04000 /* Host can handle UFO in. */
-#define VIRTIO_NET_F_MRG_RXBUF 0x08000 /* Host can merge receive buffers. */
-#define VIRTIO_NET_F_STATUS 0x10000 /* virtio_net_config.status available*/
-#define VIRTIO_NET_F_CTRL_VQ 0x20000 /* Control channel available */
-#define VIRTIO_NET_F_CTRL_RX 0x40000 /* Control channel RX mode support */
-#define VIRTIO_NET_F_CTRL_VLAN 0x80000 /* Control channel VLAN filtering */
-#define VIRTIO_NET_F_CTRL_RX_EXTRA 0x100000 /* Extra RX mode control support */
-#define VIRTIO_RING_F_INDIRECT_DESC 0x10000000 /* Support for indirect buffer descriptors. */
-/* The guest publishes the used index for which it expects an interrupt
- * at the end of the avail ring. Host should ignore the avail->flags field.
- * The host publishes the avail index for which it expects a kick
- * at the end of the used ring. Guest should ignore the used->flags field.
+#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */
+#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */
+#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */
+#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */
+#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */
+#define VIRTIO_NET_F_GUEST_ECN 9 /* Guest can handle TSO[6] w/ ECN in. */
+#define VIRTIO_NET_F_GUEST_UFO 10 /* Guest can handle UFO in. */
+#define VIRTIO_NET_F_HOST_TSO4 11 /* Host can handle TSOv4 in. */
+#define VIRTIO_NET_F_HOST_TSO6 12 /* Host can handle TSOv6 in. */
+#define VIRTIO_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/ ECN in. */
+#define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in. */
+#define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */
+#define VIRTIO_NET_F_STATUS 16 /* virtio_net_config.status available */
+#define VIRTIO_NET_F_CTRL_VQ 17 /* Control channel available */
+#define VIRTIO_NET_F_CTRL_RX 18 /* Control channel RX mode support */
+#define VIRTIO_NET_F_CTRL_VLAN 19 /* Control channel VLAN filtering */
+#define VIRTIO_NET_F_CTRL_RX_EXTRA 20 /* Extra RX mode control support */
+#define VIRTIO_NET_F_GUEST_ANNOUNCE 21 /* Guest can announce device on the
+ * network */
+#define VIRTIO_NET_F_MQ 22 /* Device supports Receive Flow
+ * Steering */
+#define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */
+
+/* Do we get callbacks when the ring is completely used, even if we've
+ * suppressed them? */
+#define VIRTIO_F_NOTIFY_ON_EMPTY 24
+
+/* Can the device handle any descriptor layout? */
+#define VIRTIO_F_ANY_LAYOUT 27
+
+/* We support indirect buffer descriptors */
+#define VIRTIO_RING_F_INDIRECT_DESC 28
+
+/*
+ * Some VirtIO feature bits (currently bits 28 through 31) are
+ * reserved for the transport being used (eg. virtio_ring), the
+ * rest are per-device feature bits.
*/
-#define VIRTIO_RING_F_EVENT_IDX 0x20000000
+#define VIRTIO_TRANSPORT_F_START 28
+#define VIRTIO_TRANSPORT_F_END 32
+
+/* The Guest publishes the used index for which it expects an interrupt
+ * at the end of the avail ring. Host should ignore the avail->flags field. */
+/* The Host publishes the avail index for which it expects a kick
+ * at the end of the used ring. Guest should ignore the used->flags field. */
+#define VIRTIO_RING_F_EVENT_IDX 29
-#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */
+#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */
+#define VIRTIO_NET_S_ANNOUNCE 2 /* Announcement is needed */
/*
* Maximum number of virtqueues per device.
@@ -168,7 +170,9 @@ struct virtio_hw {
uint32_t max_tx_queues;
uint32_t max_rx_queues;
uint16_t vtnet_hdr_size;
+ uint8_t vlan_strip;
uint8_t use_msix;
+ uint8_t started;
uint8_t mac_addr[ETHER_ADDR_LEN];
};
@@ -241,9 +245,9 @@ outl_p(unsigned int data, unsigned int port)
outl_p((unsigned int)(value), (VIRTIO_PCI_REG_ADDR((hw), (reg))))
static inline int
-vtpci_with_feature(struct virtio_hw *hw, uint32_t feature)
+vtpci_with_feature(struct virtio_hw *hw, uint32_t bit)
{
- return (hw->guest_features & feature) != 0;
+ return (hw->guest_features & (1u << bit)) != 0;
}
/*
@@ -253,8 +257,6 @@ void vtpci_reset(struct virtio_hw *);
void vtpci_reinit_complete(struct virtio_hw *);
-uint8_t vtpci_get_status(struct virtio_hw *);
-
void vtpci_set_status(struct virtio_hw *, uint8_t);
uint32_t vtpci_negotiate_features(struct virtio_hw *, uint32_t);
@@ -263,4 +265,8 @@ void vtpci_write_dev_config(struct virtio_hw *, uint64_t, void *, int);
void vtpci_read_dev_config(struct virtio_hw *, uint64_t, void *, int);
+uint8_t vtpci_isr(struct virtio_hw *);
+
+uint16_t vtpci_irq_config(struct virtio_hw *, uint16_t);
+
#endif /* _VIRTIO_PCI_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_virtio/virtio_ring.h b/src/dpdk22/drivers/net/virtio/virtio_ring.h
index a16c4991..447760a8 100755..100644
--- a/src/dpdk_lib18/librte_pmd_virtio/virtio_ring.h
+++ b/src/dpdk22/drivers/net/virtio/virtio_ring.h
@@ -123,10 +123,10 @@ struct vring {
#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
#define vring_avail_event(vr) (*(uint16_t *)&(vr)->used->ring[(vr)->num])
-static inline int
+static inline size_t
vring_size(unsigned int num, unsigned long align)
{
- int size;
+ size_t size;
size = num * sizeof(struct vring_desc);
size += sizeof(struct vring_avail) + (num * sizeof(uint16_t));
diff --git a/src/dpdk_lib18/librte_pmd_virtio/virtio_rxtx.c b/src/dpdk22/drivers/net/virtio/virtio_rxtx.c
index c013f976..74b39ef7 100755..100644
--- a/src/dpdk_lib18/librte_pmd_virtio/virtio_rxtx.c
+++ b/src/dpdk22/drivers/net/virtio/virtio_rxtx.c
@@ -49,10 +49,13 @@
#include <rte_prefetch.h>
#include <rte_string_fns.h>
#include <rte_errno.h>
+#include <rte_byteorder.h>
#include "virtio_logs.h"
#include "virtio_ethdev.h"
+#include "virtio_pci.h"
#include "virtqueue.h"
+#include "virtio_rxtx.h"
#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
#define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
@@ -60,6 +63,12 @@
#define VIRTIO_DUMP_PACKET(m, len) do { } while (0)
#endif
+
+#define VIRTIO_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
+ ETH_TXQ_FLAGS_NOOFFLOADS)
+
+static int use_simple_rxtx;
+
static void
vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
{
@@ -128,17 +137,32 @@ virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
return i;
}
+#ifndef DEFAULT_TX_FREE_THRESH
+#define DEFAULT_TX_FREE_THRESH 32
+#endif
+
+/* Cleanup from completed transmits. */
static void
-virtqueue_dequeue_pkt_tx(struct virtqueue *vq)
+virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
{
- struct vring_used_elem *uep;
- uint16_t used_idx, desc_idx;
+ uint16_t i, used_idx, desc_idx;
+ for (i = 0; i < num; i++) {
+ struct vring_used_elem *uep;
+ struct vq_desc_extra *dxp;
+
+ used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
+ uep = &vq->vq_ring.used->ring[used_idx];
+
+ desc_idx = (uint16_t) uep->id;
+ dxp = &vq->vq_descx[desc_idx];
+ vq->vq_used_cons_idx++;
+ vq_ring_free_chain(vq, desc_idx);
- used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
- uep = &vq->vq_ring.used->ring[used_idx];
- desc_idx = (uint16_t) uep->id;
- vq->vq_used_cons_idx++;
- vq_ring_free_chain(vq, desc_idx);
+ if (dxp->cookie != NULL) {
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = NULL;
+ }
+ }
}
@@ -190,7 +214,7 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)
uint16_t seg_num = cookie->nb_segs;
uint16_t needed = 1 + seg_num;
uint16_t head_idx, idx;
- uint16_t head_size = txvq->hw->vtnet_hdr_size;
+ size_t head_size = txvq->hw->vtnet_hdr_size;
if (unlikely(txvq->vq_free_cnt == 0))
return -ENOSPC;
@@ -202,15 +226,13 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)
idx = head_idx;
dxp = &txvq->vq_descx[idx];
- if (dxp->cookie != NULL)
- rte_pktmbuf_free(dxp->cookie);
dxp->cookie = (void *)cookie;
dxp->ndescs = needed;
start_dp = txvq->vq_ring.desc;
start_dp[idx].addr =
txvq->virtio_net_hdr_mem + idx * head_size;
- start_dp[idx].len = (uint32_t)head_size;
+ start_dp[idx].len = head_size;
start_dp[idx].flags = VRING_DESC_F_NEXT;
for (; ((seg_num > 0) && (cookie != NULL)); seg_num--) {
@@ -257,7 +279,7 @@ virtio_dev_vring_start(struct virtqueue *vq, int queue_type)
* Reinitialise since virtio port might have been stopped and restarted
*/
memset(vq->vq_ring_virt_mem, 0, vq->vq_ring_size);
- vring_init(vr, size, ring_mem, vq->vq_alignment);
+ vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);
vq->vq_used_cons_idx = 0;
vq->vq_desc_head_idx = 0;
vq->vq_avail_idx = 0;
@@ -284,6 +306,17 @@ virtio_dev_vring_start(struct virtqueue *vq, int queue_type)
/* Allocate blank mbufs for the each rx descriptor */
nbufs = 0;
error = ENOSPC;
+
+ if (use_simple_rxtx)
+ for (i = 0; i < vq->vq_nentries; i++) {
+ vq->vq_ring.avail->ring[i] = i;
+ vq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE;
+ }
+
+ memset(&vq->fake_mbuf, 0, sizeof(vq->fake_mbuf));
+ for (i = 0; i < RTE_PMD_VIRTIO_RX_MAX_BURST; i++)
+ vq->sw_ring[vq->vq_nentries + i] = &vq->fake_mbuf;
+
while (!virtqueue_full(vq)) {
m = rte_rxmbuf_alloc(vq->mpool);
if (m == NULL)
@@ -292,8 +325,10 @@ virtio_dev_vring_start(struct virtqueue *vq, int queue_type)
/******************************************
* Enqueue allocated buffers *
*******************************************/
- error = virtqueue_enqueue_recv_refill(vq, m);
-
+ if (use_simple_rxtx)
+ error = virtqueue_enqueue_recv_refill_simple(vq, m);
+ else
+ error = virtqueue_enqueue_recv_refill(vq, m);
if (error) {
rte_pktmbuf_free(m);
break;
@@ -310,6 +345,24 @@ virtio_dev_vring_start(struct virtqueue *vq, int queue_type)
VIRTIO_WRITE_REG_4(vq->hw, VIRTIO_PCI_QUEUE_PFN,
vq->mz->phys_addr >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
} else if (queue_type == VTNET_TQ) {
+ if (use_simple_rxtx) {
+ int mid_idx = vq->vq_nentries >> 1;
+ for (i = 0; i < mid_idx; i++) {
+ vq->vq_ring.avail->ring[i] = i + mid_idx;
+ vq->vq_ring.desc[i + mid_idx].next = i;
+ vq->vq_ring.desc[i + mid_idx].addr =
+ vq->virtio_net_hdr_mem +
+ mid_idx * vq->hw->vtnet_hdr_size;
+ vq->vq_ring.desc[i + mid_idx].len =
+ vq->hw->vtnet_hdr_size;
+ vq->vq_ring.desc[i + mid_idx].flags =
+ VRING_DESC_F_NEXT;
+ vq->vq_ring.desc[i].flags = 0;
+ }
+ for (i = mid_idx; i < vq->vq_nentries; i++)
+ vq->vq_ring.avail->ring[i] = i;
+ }
+
VIRTIO_WRITE_REG_2(vq->hw, VIRTIO_PCI_QUEUE_SEL,
vq->vq_queue_index);
VIRTIO_WRITE_REG_4(vq->hw, VIRTIO_PCI_QUEUE_PFN,
@@ -325,8 +378,7 @@ virtio_dev_vring_start(struct virtqueue *vq, int queue_type)
void
virtio_dev_cq_start(struct rte_eth_dev *dev)
{
- struct virtio_hw *hw
- = VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct virtio_hw *hw = dev->data->dev_private;
if (hw->cvq) {
virtio_dev_vring_start(hw->cvq, VTNET_CQ);
@@ -369,7 +421,7 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
__rte_unused const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
- uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
+ uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
struct virtqueue *vq;
int ret;
@@ -377,7 +429,7 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
ret = virtio_dev_queue_setup(dev, VTNET_RQ, queue_idx, vtpci_queue_idx,
nb_desc, socket_id, &vq);
if (ret < 0) {
- PMD_INIT_LOG(ERR, "tvq initialization failed");
+ PMD_INIT_LOG(ERR, "rvq initialization failed");
return ret;
}
@@ -385,9 +437,18 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
vq->mpool = mp;
dev->data->rx_queues[queue_idx] = vq;
+
+ virtio_rxq_vec_setup(vq);
+
return 0;
}
+void
+virtio_dev_rx_queue_release(void *rxq)
+{
+ virtio_dev_queue_release(rxq);
+}
+
/*
* struct rte_eth_dev *dev: Used to update dev
* uint16_t nb_desc: Defaults to values read from config space
@@ -403,17 +464,28 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
const struct rte_eth_txconf *tx_conf)
{
uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
+ struct virtio_hw *hw = dev->data->dev_private;
struct virtqueue *vq;
+ uint16_t tx_free_thresh;
int ret;
PMD_INIT_FUNC_TRACE();
- if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOOFFLOADS)
- != ETH_TXQ_FLAGS_NOOFFLOADS) {
+ if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMS)
+ != ETH_TXQ_FLAGS_NOXSUMS) {
PMD_INIT_LOG(ERR, "TX checksum offload not supported\n");
return -EINVAL;
}
+ /* Use simple rx/tx func if single segment and no offloads */
+ if ((tx_conf->txq_flags & VIRTIO_SIMPLE_FLAGS) == VIRTIO_SIMPLE_FLAGS &&
+ !vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ PMD_INIT_LOG(INFO, "Using simple rx/tx path");
+ dev->tx_pkt_burst = virtio_xmit_pkts_simple;
+ dev->rx_pkt_burst = virtio_recv_pkts_vec;
+ use_simple_rxtx = 1;
+ }
+
ret = virtio_dev_queue_setup(dev, VTNET_TQ, queue_idx, vtpci_queue_idx,
nb_desc, socket_id, &vq);
if (ret < 0) {
@@ -421,10 +493,32 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
return ret;
}
+ tx_free_thresh = tx_conf->tx_free_thresh;
+ if (tx_free_thresh == 0)
+ tx_free_thresh =
+ RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
+
+ if (tx_free_thresh >= (vq->vq_nentries - 3)) {
+ RTE_LOG(ERR, PMD, "tx_free_thresh must be less than the "
+ "number of TX entries minus 3 (%u)."
+ " (tx_free_thresh=%u port=%u queue=%u)\n",
+ vq->vq_nentries - 3,
+ tx_free_thresh, dev->data->port_id, queue_idx);
+ return -EINVAL;
+ }
+
+ vq->vq_free_thresh = tx_free_thresh;
+
dev->data->tx_queues[queue_idx] = vq;
return 0;
}
+void
+virtio_dev_tx_queue_release(void *txq)
+{
+ virtio_dev_queue_release(txq);
+}
+
static void
virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
{
@@ -440,23 +534,52 @@ virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
}
}
+static void
+virtio_update_packet_stats(struct virtqueue *vq, struct rte_mbuf *mbuf)
+{
+ uint32_t s = mbuf->pkt_len;
+ struct ether_addr *ea;
+
+ if (s == 64) {
+ vq->size_bins[1]++;
+ } else if (s > 64 && s < 1024) {
+ uint32_t bin;
+
+ /* count zeros, and offset into correct bin */
+ bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
+ vq->size_bins[bin]++;
+ } else {
+ if (s < 64)
+ vq->size_bins[0]++;
+ else if (s < 1519)
+ vq->size_bins[6]++;
+ else if (s >= 1519)
+ vq->size_bins[7]++;
+ }
+
+ ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
+ vq->multicast += is_multicast_ether_addr(ea);
+ vq->broadcast += is_broadcast_ether_addr(ea);
+}
+
#define VIRTIO_MBUF_BURST_SZ 64
#define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
uint16_t
virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
struct virtqueue *rxvq = rx_queue;
+ struct virtio_hw *hw;
struct rte_mbuf *rxm, *new_mbuf;
- uint16_t nb_used, num, nb_rx = 0;
+ uint16_t nb_used, num, nb_rx;
uint32_t len[VIRTIO_MBUF_BURST_SZ];
struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
int error;
- uint32_t i, nb_enqueued = 0;
+ uint32_t i, nb_enqueued;
const uint32_t hdr_size = sizeof(struct virtio_net_hdr);
nb_used = VIRTQUEUE_NUSED(rxvq);
- rmb();
+ virtio_rmb();
num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ);
@@ -468,6 +591,11 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
num = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, num);
PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
+
+ hw = rxvq->hw;
+ nb_rx = 0;
+ nb_enqueued = 0;
+
for (i = 0; i < num ; i++) {
rxm = rcv_pkts[i];
@@ -483,16 +611,23 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxm->port = rxvq->port_id;
rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rxm->ol_flags = 0;
+ rxm->vlan_tci = 0;
rxm->nb_segs = 1;
rxm->next = NULL;
rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
rxm->data_len = (uint16_t)(len[i] - hdr_size);
+ if (hw->vlan_strip)
+ rte_vlan_strip(rxm);
+
VIRTIO_DUMP_PACKET(rxm, rxm->data_len);
rx_pkts[nb_rx++] = rxm;
+
rxvq->bytes += rx_pkts[nb_rx - 1]->pkt_len;
+ virtio_update_packet_stats(rxvq, rxm);
}
rxvq->packets += nb_rx;
@@ -516,14 +651,14 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
}
if (likely(nb_enqueued)) {
+ vq_update_avail_idx(rxvq);
+
if (unlikely(virtqueue_kick_prepare(rxvq))) {
virtqueue_notify(rxvq);
PMD_RX_LOG(DEBUG, "Notified\n");
}
}
- vq_update_avail_idx(rxvq);
-
return nb_rx;
}
@@ -533,27 +668,36 @@ virtio_recv_mergeable_pkts(void *rx_queue,
uint16_t nb_pkts)
{
struct virtqueue *rxvq = rx_queue;
+ struct virtio_hw *hw;
struct rte_mbuf *rxm, *new_mbuf;
- uint16_t nb_used, num, nb_rx = 0;
+ uint16_t nb_used, num, nb_rx;
uint32_t len[VIRTIO_MBUF_BURST_SZ];
struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
struct rte_mbuf *prev;
int error;
- uint32_t i = 0, nb_enqueued = 0;
- uint32_t seg_num = 0;
- uint16_t extra_idx = 0;
- uint32_t seg_res = 0;
+ uint32_t i, nb_enqueued;
+ uint32_t seg_num;
+ uint16_t extra_idx;
+ uint32_t seg_res;
const uint32_t hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
nb_used = VIRTQUEUE_NUSED(rxvq);
- rmb();
+ virtio_rmb();
if (nb_used == 0)
return 0;
PMD_RX_LOG(DEBUG, "used:%d\n", nb_used);
+ hw = rxvq->hw;
+ nb_rx = 0;
+ i = 0;
+ nb_enqueued = 0;
+ seg_num = 0;
+ extra_idx = 0;
+ seg_res = 0;
+
while (i < nb_used) {
struct virtio_net_hdr_mrg_rxbuf *header;
@@ -589,6 +733,8 @@ virtio_recv_mergeable_pkts(void *rx_queue,
rxm->data_off = RTE_PKTMBUF_HEADROOM;
rxm->nb_segs = seg_num;
rxm->next = NULL;
+ rxm->ol_flags = 0;
+ rxm->vlan_tci = 0;
rxm->pkt_len = (uint32_t)(len[0] - hdr_size);
rxm->data_len = (uint16_t)(len[0] - hdr_size);
@@ -602,7 +748,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
/*
* Get extra segments for current uncompleted packet.
*/
- uint32_t rcv_cnt =
+ uint16_t rcv_cnt =
RTE_MIN(seg_res, RTE_DIM(rcv_pkts));
if (likely(VIRTQUEUE_NUSED(rxvq) >= rcv_cnt)) {
uint32_t rx_num =
@@ -639,10 +785,14 @@ virtio_recv_mergeable_pkts(void *rx_queue,
seg_res -= rcv_cnt;
}
+ if (hw->vlan_strip)
+ rte_vlan_strip(rx_pkts[nb_rx]);
+
VIRTIO_DUMP_PACKET(rx_pkts[nb_rx],
rx_pkts[nb_rx]->data_len);
rxvq->bytes += rx_pkts[nb_rx]->pkt_len;
+ virtio_update_packet_stats(rxvq, rx_pkts[nb_rx]);
nb_rx++;
}
@@ -667,14 +817,14 @@ virtio_recv_mergeable_pkts(void *rx_queue,
}
if (likely(nb_enqueued)) {
+ vq_update_avail_idx(rxvq);
+
if (unlikely(virtqueue_kick_prepare(rxvq))) {
virtqueue_notify(rxvq);
PMD_RX_LOG(DEBUG, "Notified");
}
}
- vq_update_avail_idx(rxvq);
-
return nb_rx;
}
@@ -682,65 +832,73 @@ uint16_t
virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct virtqueue *txvq = tx_queue;
- struct rte_mbuf *txm;
- uint16_t nb_used, nb_tx, num;
+ uint16_t nb_used, nb_tx;
int error;
- nb_tx = 0;
-
if (unlikely(nb_pkts < 1))
return nb_pkts;
PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
nb_used = VIRTQUEUE_NUSED(txvq);
- rmb();
+ virtio_rmb();
+ if (likely(nb_used > txvq->vq_nentries - txvq->vq_free_thresh))
+ virtio_xmit_cleanup(txvq, nb_used);
- num = (uint16_t)(likely(nb_used < VIRTIO_MBUF_BURST_SZ) ? nb_used : VIRTIO_MBUF_BURST_SZ);
-
- while (nb_tx < nb_pkts) {
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ struct rte_mbuf *txm = tx_pkts[nb_tx];
/* Need one more descriptor for virtio header. */
- int need = tx_pkts[nb_tx]->nb_segs - txvq->vq_free_cnt + 1;
- int deq_cnt = RTE_MIN(need, (int)num);
-
- num -= (deq_cnt > 0) ? deq_cnt : 0;
- while (deq_cnt > 0) {
- virtqueue_dequeue_pkt_tx(txvq);
- deq_cnt--;
+ int need = txm->nb_segs - txvq->vq_free_cnt + 1;
+
+ /* Positive value indicates it need free vring descriptors */
+ if (unlikely(need > 0)) {
+ nb_used = VIRTQUEUE_NUSED(txvq);
+ virtio_rmb();
+ need = RTE_MIN(need, (int)nb_used);
+
+ virtio_xmit_cleanup(txvq, need);
+ need = txm->nb_segs - txvq->vq_free_cnt + 1;
+ if (unlikely(need > 0)) {
+ PMD_TX_LOG(ERR,
+ "No free tx descriptors to transmit");
+ break;
+ }
}
- need = (int)tx_pkts[nb_tx]->nb_segs - txvq->vq_free_cnt + 1;
- /*
- * Zero or negative value indicates it has enough free
- * descriptors to use for transmitting.
- */
- if (likely(need <= 0)) {
- txm = tx_pkts[nb_tx];
- /* Enqueue Packet buffers */
- error = virtqueue_enqueue_xmit(txvq, txm);
+ /* Do VLAN tag insertion */
+ if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
+ error = rte_vlan_insert(&txm);
if (unlikely(error)) {
- if (error == ENOSPC)
- PMD_TX_LOG(ERR, "virtqueue_enqueue Free count = 0");
- else if (error == EMSGSIZE)
- PMD_TX_LOG(ERR, "virtqueue_enqueue Free count < 1");
- else
- PMD_TX_LOG(ERR, "virtqueue_enqueue error: %d", error);
- break;
+ rte_pktmbuf_free(txm);
+ continue;
}
- nb_tx++;
- txvq->bytes += txm->pkt_len;
- } else {
- PMD_TX_LOG(ERR, "No free tx descriptors to transmit");
+ }
+
+ /* Enqueue Packet buffers */
+ error = virtqueue_enqueue_xmit(txvq, txm);
+ if (unlikely(error)) {
+ if (error == ENOSPC)
+ PMD_TX_LOG(ERR, "virtqueue_enqueue Free count = 0");
+ else if (error == EMSGSIZE)
+ PMD_TX_LOG(ERR, "virtqueue_enqueue Free count < 1");
+ else
+ PMD_TX_LOG(ERR, "virtqueue_enqueue error: %d", error);
break;
}
+
+ txvq->bytes += txm->pkt_len;
+ virtio_update_packet_stats(txvq, txm);
}
- vq_update_avail_idx(txvq);
txvq->packets += nb_tx;
- if (unlikely(virtqueue_kick_prepare(txvq))) {
- virtqueue_notify(txvq);
- PMD_TX_LOG(DEBUG, "Notified backend after xmit");
+ if (likely(nb_tx)) {
+ vq_update_avail_idx(txvq);
+
+ if (unlikely(virtqueue_kick_prepare(txvq))) {
+ virtqueue_notify(txvq);
+ PMD_TX_LOG(DEBUG, "Notified backend after xmit");
+ }
}
return nb_tx;
diff --git a/src/dpdk_lib18/librte_power/rte_power_common.h b/src/dpdk22/drivers/net/virtio/virtio_rxtx.h
index 64bd168f..831e4926 100755..100644
--- a/src/dpdk_lib18/librte_power/rte_power_common.h
+++ b/src/dpdk22/drivers/net/virtio/virtio_rxtx.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,9 +31,9 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef RTE_POWER_COMMON_H_
-#define RTE_POWER_COMMON_H_
+#define RTE_PMD_VIRTIO_RX_MAX_BURST 64
-#define RTE_POWER_INVALID_FREQ_INDEX (~0)
+int virtio_rxq_vec_setup(struct virtqueue *rxq);
-#endif /* RTE_POWER_COMMON_H_ */
+int virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,
+ struct rte_mbuf *m);
diff --git a/src/dpdk22/drivers/net/virtio/virtio_rxtx_simple.c b/src/dpdk22/drivers/net/virtio/virtio_rxtx_simple.c
new file mode 100644
index 00000000..ff3c11a4
--- /dev/null
+++ b/src/dpdk22/drivers/net/virtio/virtio_rxtx_simple.c
@@ -0,0 +1,418 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include <tmmintrin.h>
+
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_branch_prediction.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_prefetch.h>
+#include <rte_string_fns.h>
+#include <rte_errno.h>
+#include <rte_byteorder.h>
+
+#include "virtio_logs.h"
+#include "virtio_ethdev.h"
+#include "virtqueue.h"
+#include "virtio_rxtx.h"
+
+#define RTE_VIRTIO_VPMD_RX_BURST 32
+#define RTE_VIRTIO_DESC_PER_LOOP 8
+#define RTE_VIRTIO_VPMD_RX_REARM_THRESH RTE_VIRTIO_VPMD_RX_BURST
+
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+int __attribute__((cold))
+virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,
+ struct rte_mbuf *cookie)
+{
+ struct vq_desc_extra *dxp;
+ struct vring_desc *start_dp;
+ uint16_t desc_idx;
+
+ desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);
+ dxp = &vq->vq_descx[desc_idx];
+ dxp->cookie = (void *)cookie;
+ vq->sw_ring[desc_idx] = cookie;
+
+ start_dp = vq->vq_ring.desc;
+ start_dp[desc_idx].addr = (uint64_t)((uintptr_t)cookie->buf_physaddr +
+ RTE_PKTMBUF_HEADROOM - sizeof(struct virtio_net_hdr));
+ start_dp[desc_idx].len = cookie->buf_len -
+ RTE_PKTMBUF_HEADROOM + sizeof(struct virtio_net_hdr);
+
+ vq->vq_free_cnt--;
+ vq->vq_avail_idx++;
+
+ return 0;
+}
+
+static inline void
+virtio_rxq_rearm_vec(struct virtqueue *rxvq)
+{
+ int i;
+ uint16_t desc_idx;
+ struct rte_mbuf **sw_ring;
+ struct vring_desc *start_dp;
+ int ret;
+
+ desc_idx = rxvq->vq_avail_idx & (rxvq->vq_nentries - 1);
+ sw_ring = &rxvq->sw_ring[desc_idx];
+ start_dp = &rxvq->vq_ring.desc[desc_idx];
+
+ ret = rte_mempool_get_bulk(rxvq->mpool, (void **)sw_ring,
+ RTE_VIRTIO_VPMD_RX_REARM_THRESH);
+ if (unlikely(ret)) {
+ rte_eth_devices[rxvq->port_id].data->rx_mbuf_alloc_failed +=
+ RTE_VIRTIO_VPMD_RX_REARM_THRESH;
+ return;
+ }
+
+ for (i = 0; i < RTE_VIRTIO_VPMD_RX_REARM_THRESH; i++) {
+ uintptr_t p;
+
+ p = (uintptr_t)&sw_ring[i]->rearm_data;
+ *(uint64_t *)p = rxvq->mbuf_initializer;
+
+ start_dp[i].addr =
+ (uint64_t)((uintptr_t)sw_ring[i]->buf_physaddr +
+ RTE_PKTMBUF_HEADROOM - sizeof(struct virtio_net_hdr));
+ start_dp[i].len = sw_ring[i]->buf_len -
+ RTE_PKTMBUF_HEADROOM + sizeof(struct virtio_net_hdr);
+ }
+
+ rxvq->vq_avail_idx += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
+ rxvq->vq_free_cnt -= RTE_VIRTIO_VPMD_RX_REARM_THRESH;
+ vq_update_avail_idx(rxvq);
+}
+
+/* virtio vPMD receive routine, only accept(nb_pkts >= RTE_VIRTIO_DESC_PER_LOOP)
+ *
+ * This routine is for non-mergeable RX, one desc for each guest buffer.
+ * This routine is based on the RX ring layout optimization. Each entry in the
+ * avail ring points to the desc with the same index in the desc ring and this
+ * will never be changed in the driver.
+ *
+ * - nb_pkts < RTE_VIRTIO_DESC_PER_LOOP, just return no packet
+ */
+uint16_t
+virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtqueue *rxvq = rx_queue;
+ uint16_t nb_used;
+ uint16_t desc_idx;
+ struct vring_used_elem *rused;
+ struct rte_mbuf **sw_ring;
+ struct rte_mbuf **sw_ring_end;
+ uint16_t nb_pkts_received;
+ __m128i shuf_msk1, shuf_msk2, len_adjust;
+
+ shuf_msk1 = _mm_set_epi8(
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, /* vlan tci */
+ 5, 4, /* dat len */
+ 0xFF, 0xFF, 5, 4, /* pkt len */
+ 0xFF, 0xFF, 0xFF, 0xFF /* packet type */
+
+ );
+
+ shuf_msk2 = _mm_set_epi8(
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, /* vlan tci */
+ 13, 12, /* dat len */
+ 0xFF, 0xFF, 13, 12, /* pkt len */
+ 0xFF, 0xFF, 0xFF, 0xFF /* packet type */
+ );
+
+ /* Subtract the header length.
+ * In which case do we need the header length in used->len ?
+ */
+ len_adjust = _mm_set_epi16(
+ 0, 0,
+ 0,
+ (uint16_t) -sizeof(struct virtio_net_hdr),
+ 0, (uint16_t) -sizeof(struct virtio_net_hdr),
+ 0, 0);
+
+ if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP))
+ return 0;
+
+ nb_used = *(volatile uint16_t *)&rxvq->vq_ring.used->idx -
+ rxvq->vq_used_cons_idx;
+
+ rte_compiler_barrier();
+
+ if (unlikely(nb_used == 0))
+ return 0;
+
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_VIRTIO_DESC_PER_LOOP);
+ nb_used = RTE_MIN(nb_used, nb_pkts);
+
+ desc_idx = (uint16_t)(rxvq->vq_used_cons_idx & (rxvq->vq_nentries - 1));
+ rused = &rxvq->vq_ring.used->ring[desc_idx];
+ sw_ring = &rxvq->sw_ring[desc_idx];
+ sw_ring_end = &rxvq->sw_ring[rxvq->vq_nentries];
+
+ _mm_prefetch((const void *)rused, _MM_HINT_T0);
+
+ if (rxvq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
+ virtio_rxq_rearm_vec(rxvq);
+ if (unlikely(virtqueue_kick_prepare(rxvq)))
+ virtqueue_notify(rxvq);
+ }
+
+ for (nb_pkts_received = 0;
+ nb_pkts_received < nb_used;) {
+ __m128i desc[RTE_VIRTIO_DESC_PER_LOOP / 2];
+ __m128i mbp[RTE_VIRTIO_DESC_PER_LOOP / 2];
+ __m128i pkt_mb[RTE_VIRTIO_DESC_PER_LOOP];
+
+ mbp[0] = _mm_loadu_si128((__m128i *)(sw_ring + 0));
+ desc[0] = _mm_loadu_si128((__m128i *)(rused + 0));
+ _mm_storeu_si128((__m128i *)&rx_pkts[0], mbp[0]);
+
+ mbp[1] = _mm_loadu_si128((__m128i *)(sw_ring + 2));
+ desc[1] = _mm_loadu_si128((__m128i *)(rused + 2));
+ _mm_storeu_si128((__m128i *)&rx_pkts[2], mbp[1]);
+
+ mbp[2] = _mm_loadu_si128((__m128i *)(sw_ring + 4));
+ desc[2] = _mm_loadu_si128((__m128i *)(rused + 4));
+ _mm_storeu_si128((__m128i *)&rx_pkts[4], mbp[2]);
+
+ mbp[3] = _mm_loadu_si128((__m128i *)(sw_ring + 6));
+ desc[3] = _mm_loadu_si128((__m128i *)(rused + 6));
+ _mm_storeu_si128((__m128i *)&rx_pkts[6], mbp[3]);
+
+ pkt_mb[1] = _mm_shuffle_epi8(desc[0], shuf_msk2);
+ pkt_mb[0] = _mm_shuffle_epi8(desc[0], shuf_msk1);
+ pkt_mb[1] = _mm_add_epi16(pkt_mb[1], len_adjust);
+ pkt_mb[0] = _mm_add_epi16(pkt_mb[0], len_adjust);
+ _mm_storeu_si128((void *)&rx_pkts[1]->rx_descriptor_fields1,
+ pkt_mb[1]);
+ _mm_storeu_si128((void *)&rx_pkts[0]->rx_descriptor_fields1,
+ pkt_mb[0]);
+
+ pkt_mb[3] = _mm_shuffle_epi8(desc[1], shuf_msk2);
+ pkt_mb[2] = _mm_shuffle_epi8(desc[1], shuf_msk1);
+ pkt_mb[3] = _mm_add_epi16(pkt_mb[3], len_adjust);
+ pkt_mb[2] = _mm_add_epi16(pkt_mb[2], len_adjust);
+ _mm_storeu_si128((void *)&rx_pkts[3]->rx_descriptor_fields1,
+ pkt_mb[3]);
+ _mm_storeu_si128((void *)&rx_pkts[2]->rx_descriptor_fields1,
+ pkt_mb[2]);
+
+ pkt_mb[5] = _mm_shuffle_epi8(desc[2], shuf_msk2);
+ pkt_mb[4] = _mm_shuffle_epi8(desc[2], shuf_msk1);
+ pkt_mb[5] = _mm_add_epi16(pkt_mb[5], len_adjust);
+ pkt_mb[4] = _mm_add_epi16(pkt_mb[4], len_adjust);
+ _mm_storeu_si128((void *)&rx_pkts[5]->rx_descriptor_fields1,
+ pkt_mb[5]);
+ _mm_storeu_si128((void *)&rx_pkts[4]->rx_descriptor_fields1,
+ pkt_mb[4]);
+
+ pkt_mb[7] = _mm_shuffle_epi8(desc[3], shuf_msk2);
+ pkt_mb[6] = _mm_shuffle_epi8(desc[3], shuf_msk1);
+ pkt_mb[7] = _mm_add_epi16(pkt_mb[7], len_adjust);
+ pkt_mb[6] = _mm_add_epi16(pkt_mb[6], len_adjust);
+ _mm_storeu_si128((void *)&rx_pkts[7]->rx_descriptor_fields1,
+ pkt_mb[7]);
+ _mm_storeu_si128((void *)&rx_pkts[6]->rx_descriptor_fields1,
+ pkt_mb[6]);
+
+ if (unlikely(nb_used <= RTE_VIRTIO_DESC_PER_LOOP)) {
+ if (sw_ring + nb_used <= sw_ring_end)
+ nb_pkts_received += nb_used;
+ else
+ nb_pkts_received += sw_ring_end - sw_ring;
+ break;
+ } else {
+ if (unlikely(sw_ring + RTE_VIRTIO_DESC_PER_LOOP >=
+ sw_ring_end)) {
+ nb_pkts_received += sw_ring_end - sw_ring;
+ break;
+ } else {
+ nb_pkts_received += RTE_VIRTIO_DESC_PER_LOOP;
+
+ rx_pkts += RTE_VIRTIO_DESC_PER_LOOP;
+ sw_ring += RTE_VIRTIO_DESC_PER_LOOP;
+ rused += RTE_VIRTIO_DESC_PER_LOOP;
+ nb_used -= RTE_VIRTIO_DESC_PER_LOOP;
+ }
+ }
+ }
+
+ rxvq->vq_used_cons_idx += nb_pkts_received;
+ rxvq->vq_free_cnt += nb_pkts_received;
+ rxvq->packets += nb_pkts_received;
+ return nb_pkts_received;
+}
+
+#define VIRTIO_TX_FREE_THRESH 32
+#define VIRTIO_TX_MAX_FREE_BUF_SZ 32
+#define VIRTIO_TX_FREE_NR 32
+/* TODO: vq->tx_free_cnt could mean num of free slots so we could avoid shift */
+static inline void
+virtio_xmit_cleanup(struct virtqueue *vq)
+{
+ uint16_t i, desc_idx;
+ int nb_free = 0;
+ struct rte_mbuf *m, *free[VIRTIO_TX_MAX_FREE_BUF_SZ];
+
+ desc_idx = (uint16_t)(vq->vq_used_cons_idx &
+ ((vq->vq_nentries >> 1) - 1));
+ m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
+ m = __rte_pktmbuf_prefree_seg(m);
+ if (likely(m != NULL)) {
+ free[0] = m;
+ nb_free = 1;
+ for (i = 1; i < VIRTIO_TX_FREE_NR; i++) {
+ m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
+ m = __rte_pktmbuf_prefree_seg(m);
+ if (likely(m != NULL)) {
+ if (likely(m->pool == free[0]->pool))
+ free[nb_free++] = m;
+ else {
+ rte_mempool_put_bulk(free[0]->pool,
+ (void **)free, nb_free);
+ free[0] = m;
+ nb_free = 1;
+ }
+ }
+ }
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+ } else {
+ for (i = 1; i < VIRTIO_TX_FREE_NR; i++) {
+ m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
+ m = __rte_pktmbuf_prefree_seg(m);
+ if (m != NULL)
+ rte_mempool_put(m->pool, m);
+ }
+ }
+
+ vq->vq_used_cons_idx += VIRTIO_TX_FREE_NR;
+ vq->vq_free_cnt += (VIRTIO_TX_FREE_NR << 1);
+}
+
+uint16_t
+virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtqueue *txvq = tx_queue;
+ uint16_t nb_used;
+ uint16_t desc_idx;
+ struct vring_desc *start_dp;
+ uint16_t nb_tail, nb_commit;
+ int i;
+ uint16_t desc_idx_max = (txvq->vq_nentries >> 1) - 1;
+
+ nb_used = VIRTQUEUE_NUSED(txvq);
+ rte_compiler_barrier();
+
+ if (nb_used >= VIRTIO_TX_FREE_THRESH)
+ virtio_xmit_cleanup(tx_queue);
+
+ nb_commit = nb_pkts = RTE_MIN((txvq->vq_free_cnt >> 1), nb_pkts);
+ desc_idx = (uint16_t) (txvq->vq_avail_idx & desc_idx_max);
+ start_dp = txvq->vq_ring.desc;
+ nb_tail = (uint16_t) (desc_idx_max + 1 - desc_idx);
+
+ if (nb_commit >= nb_tail) {
+ for (i = 0; i < nb_tail; i++)
+ txvq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
+ for (i = 0; i < nb_tail; i++) {
+ start_dp[desc_idx].addr =
+ RTE_MBUF_DATA_DMA_ADDR(*tx_pkts);
+ start_dp[desc_idx].len = (*tx_pkts)->pkt_len;
+ tx_pkts++;
+ desc_idx++;
+ }
+ nb_commit -= nb_tail;
+ desc_idx = 0;
+ }
+ for (i = 0; i < nb_commit; i++)
+ txvq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
+ for (i = 0; i < nb_commit; i++) {
+ start_dp[desc_idx].addr = RTE_MBUF_DATA_DMA_ADDR(*tx_pkts);
+ start_dp[desc_idx].len = (*tx_pkts)->pkt_len;
+ tx_pkts++;
+ desc_idx++;
+ }
+
+ rte_compiler_barrier();
+
+ txvq->vq_free_cnt -= (uint16_t)(nb_pkts << 1);
+ txvq->vq_avail_idx += nb_pkts;
+ txvq->vq_ring.avail->idx = txvq->vq_avail_idx;
+ txvq->packets += nb_pkts;
+
+ if (likely(nb_pkts)) {
+ if (unlikely(virtqueue_kick_prepare(txvq)))
+ virtqueue_notify(txvq);
+ }
+
+ return nb_pkts;
+}
+
+int __attribute__((cold))
+virtio_rxq_vec_setup(struct virtqueue *rxq)
+{
+ uintptr_t p;
+ struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
+
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.port = rxq->port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ p = (uintptr_t)&mb_def.rearm_data;
+ rxq->mbuf_initializer = *(uint64_t *)p;
+
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_pmd_virtio/virtqueue.c b/src/dpdk22/drivers/net/virtio/virtqueue.c
index 8a3005fb..7f60e3ef 100755..100644
--- a/src/dpdk_lib18/librte_pmd_virtio/virtqueue.c
+++ b/src/dpdk22/drivers/net/virtio/virtqueue.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -60,11 +60,13 @@ virtqueue_detatch_unused(struct virtqueue *vq)
struct rte_mbuf *cookie;
int idx;
- for (idx = 0; idx < vq->vq_nentries; idx++) {
- if ((cookie = vq->vq_descx[idx].cookie) != NULL) {
- vq->vq_descx[idx].cookie = NULL;
- return cookie;
+ if (vq != NULL)
+ for (idx = 0; idx < vq->vq_nentries; idx++) {
+ cookie = vq->vq_descx[idx].cookie;
+ if (cookie != NULL) {
+ vq->vq_descx[idx].cookie = NULL;
+ return cookie;
+ }
}
- }
return NULL;
}
diff --git a/src/dpdk_lib18/librte_pmd_virtio/virtqueue.h b/src/dpdk22/drivers/net/virtio/virtqueue.h
index fdee0547..61b31370 100755..100644
--- a/src/dpdk_lib18/librte_pmd_virtio/virtqueue.h
+++ b/src/dpdk22/drivers/net/virtio/virtqueue.h
@@ -37,7 +37,6 @@
#include <stdint.h>
#include <rte_atomic.h>
-#include <rte_mbuf.h>
#include <rte_memory.h>
#include <rte_memzone.h>
#include <rte_mempool.h>
@@ -46,9 +45,18 @@
#include "virtio_ring.h"
#include "virtio_logs.h"
-#define mb() rte_mb()
-#define wmb() rte_wmb()
-#define rmb() rte_rmb()
+struct rte_mbuf;
+
+/*
+ * Per virtio_config.h in Linux.
+ * For virtio_pci on SMP, we don't need to order with respect to MMIO
+ * accesses through relaxed memory I/O windows, so smp_mb() et al are
+ * sufficient.
+ *
+ */
+#define virtio_mb() rte_smp_mb()
+#define virtio_rmb() rte_smp_rmb()
+#define virtio_wmb() rte_smp_wmb()
#ifdef RTE_PMD_PACKET_PREFETCH
#define rte_packet_prefetch(p) rte_prefetch1(p)
@@ -90,6 +98,34 @@ enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 };
#define VIRTIO_NET_CTRL_RX_NOBCAST 5
/**
+ * Control the MAC
+ *
+ * The MAC filter table is managed by the hypervisor, the guest should
+ * assume the size is infinite. Filtering should be considered
+ * non-perfect, ie. based on hypervisor resources, the guest may
+ * received packets from sources not specified in the filter list.
+ *
+ * In addition to the class/cmd header, the TABLE_SET command requires
+ * two out scatterlists. Each contains a 4 byte count of entries followed
+ * by a concatenated byte stream of the ETH_ALEN MAC addresses. The
+ * first sg list contains unicast addresses, the second is for multicast.
+ * This functionality is present if the VIRTIO_NET_F_CTRL_RX feature
+ * is available.
+ *
+ * The ADDR_SET command requests one out scatterlist, it contains a
+ * 6 bytes MAC address. This functionality is present if the
+ * VIRTIO_NET_F_CTRL_MAC_ADDR feature is available.
+ */
+struct virtio_net_ctrl_mac {
+ uint32_t entries;
+ uint8_t macs[][ETHER_ADDR_LEN];
+} __attribute__((__packed__));
+
+#define VIRTIO_NET_CTRL_MAC 1
+ #define VIRTIO_NET_CTRL_MAC_TABLE_SET 0
+ #define VIRTIO_NET_CTRL_MAC_ADDR_SET 1
+
+/**
* Control VLAN filtering
*
* The VLAN filter table is controlled via a simple ADD/DEL interface.
@@ -112,7 +148,7 @@ typedef uint8_t virtio_net_ctrl_ack;
#define VIRTIO_NET_OK 0
#define VIRTIO_NET_ERR 1
-#define VIRTIO_MAX_CTRL_DATA 128
+#define VIRTIO_MAX_CTRL_DATA 2048
struct virtio_pmd_ctrl {
struct virtio_net_ctrl_hdr hdr;
@@ -127,16 +163,16 @@ struct virtqueue {
struct rte_mempool *mpool; /**< mempool for mbuf allocation */
uint16_t queue_id; /**< DPDK queue index. */
uint8_t port_id; /**< Device port identifier. */
+ uint16_t vq_queue_index; /**< PCI queue index */
void *vq_ring_virt_mem; /**< linear address of vring*/
- int vq_alignment;
- int vq_ring_size;
+ unsigned int vq_ring_size;
phys_addr_t vq_ring_mem; /**< physical address of vring */
struct vring vq_ring; /**< vring keeping desc, used and avail */
uint16_t vq_free_cnt; /**< num of desc available */
uint16_t vq_nentries; /**< vring desc numbers */
- uint16_t vq_queue_index; /**< PCI queue index */
+ uint16_t vq_free_thresh; /**< free threshold */
/**
* Head of the free chain in the descriptor table. If
* there are no free descriptors, this will be set to
@@ -150,12 +186,21 @@ struct virtqueue {
*/
uint16_t vq_used_cons_idx;
uint16_t vq_avail_idx;
+ uint64_t mbuf_initializer; /**< value to init mbufs. */
phys_addr_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
+ struct rte_mbuf **sw_ring; /**< RX software ring. */
+ /* dummy mbuf, for wraparound when processing RX ring. */
+ struct rte_mbuf fake_mbuf;
+
/* Statistics */
uint64_t packets;
uint64_t bytes;
uint64_t errors;
+ uint64_t multicast;
+ uint64_t broadcast;
+ /* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */
+ uint64_t size_bins[8];
struct vq_desc_extra {
void *cookie;
@@ -164,14 +209,12 @@ struct virtqueue {
};
/* If multiqueue is provided by host, then we suppport it. */
-#ifndef VIRTIO_NET_F_MQ
-/* Device supports Receive Flow Steering */
-#define VIRTIO_NET_F_MQ 0x400000
#define VIRTIO_NET_CTRL_MQ 4
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET 0
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000
-#endif
+
+#define VIRTIO_NET_CTRL_MAC_ADDR_SET 1
/**
* This is the first element of the scatter-gather list. If you don't
@@ -225,7 +268,7 @@ virtqueue_full(const struct virtqueue *vq)
static inline void
vq_update_avail_idx(struct virtqueue *vq)
{
- rte_compiler_barrier();
+ virtio_wmb();
vq->vq_ring.avail->idx = vq->vq_avail_idx;
}
@@ -255,7 +298,7 @@ static inline void
virtqueue_notify(struct virtqueue *vq)
{
/*
- * Ensure updated avail->idx is visible to host. mb() necessary?
+ * Ensure updated avail->idx is visible to host.
* For virtio on IA, the notificaiton is through io port operation
* which is a serialization instruction itself.
*/
diff --git a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/README b/src/dpdk22/drivers/net/vmxnet3/base/README
index f13fec72..599a3661 100755..100644
--- a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/README
+++ b/src/dpdk22/drivers/net/vmxnet3/base/README
@@ -38,13 +38,10 @@ In which, upt1_defs.h and vmxnet3_defs.h is introduced without any change.
The other 4 files: includeCheck.h, vmware_pack_begin.h, vmware_pack_end.h and vmxnet3_osdep.h
are crated to adapt to the needs from above 2 files.
-Updating driver
-===============
-
-The following modifications have been made to this code to integrate it with the
-Intel DPDK:
-
-
--------------
+Updating the driver
+===================
+NOTE: The source code in this directory should not be modified apart from
+the following file(s):
+ vmxnet3_osdep.h
diff --git a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/includeCheck.h b/src/dpdk22/drivers/net/vmxnet3/base/includeCheck.h
index 16308d28..310cebe7 100755..100644
--- a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/includeCheck.h
+++ b/src/dpdk22/drivers/net/vmxnet3/base/includeCheck.h
@@ -37,4 +37,3 @@
#include "vmxnet3_osdep.h"
#endif /* _INCLUDECHECK_H */
-
diff --git a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/upt1_defs.h b/src/dpdk22/drivers/net/vmxnet3/base/upt1_defs.h
index d9144e32..d9144e32 100755..100644
--- a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/upt1_defs.h
+++ b/src/dpdk22/drivers/net/vmxnet3/base/upt1_defs.h
diff --git a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/vmware_pack_begin.h b/src/dpdk22/drivers/net/vmxnet3/base/vmware_pack_begin.h
index 860ec4c3..860ec4c3 100755..100644
--- a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/vmware_pack_begin.h
+++ b/src/dpdk22/drivers/net/vmxnet3/base/vmware_pack_begin.h
diff --git a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/vmware_pack_end.h b/src/dpdk22/drivers/net/vmxnet3/base/vmware_pack_end.h
index 860ec4c3..860ec4c3 100755..100644
--- a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/vmware_pack_end.h
+++ b/src/dpdk22/drivers/net/vmxnet3/base/vmware_pack_end.h
diff --git a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/vmxnet3_defs.h b/src/dpdk22/drivers/net/vmxnet3/base/vmxnet3_defs.h
index 2b56574f..2b56574f 100755..100644
--- a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/vmxnet3_defs.h
+++ b/src/dpdk22/drivers/net/vmxnet3/base/vmxnet3_defs.h
diff --git a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/vmxnet3_osdep.h b/src/dpdk22/drivers/net/vmxnet3/base/vmxnet3_osdep.h
index b6e3469c..b6e3469c 100755..100644
--- a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/vmxnet3_osdep.h
+++ b/src/dpdk22/drivers/net/vmxnet3/base/vmxnet3_osdep.h
diff --git a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_ethdev.c b/src/dpdk22/drivers/net/vmxnet3/vmxnet3_ethdev.c
index ef0af16a..c363bf60 100755..100644
--- a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_ethdev.c
+++ b/src/dpdk22/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -52,7 +52,6 @@
#include <rte_branch_prediction.h>
#include <rte_memory.h>
#include <rte_memzone.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_alarm.h>
#include <rte_ether.h>
@@ -62,7 +61,7 @@
#include <rte_malloc.h>
#include <rte_dev.h>
-#include "vmxnet3/vmxnet3_defs.h"
+#include "base/vmxnet3_defs.h"
#include "vmxnet3_ring.h"
#include "vmxnet3_logs.h"
@@ -70,8 +69,8 @@
#define PROCESS_SYS_EVENTS 0
-static int eth_vmxnet3_dev_init(struct eth_driver *eth_drv,
- struct rte_eth_dev *eth_dev);
+static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev);
+static int eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev);
static int vmxnet3_dev_configure(struct rte_eth_dev *dev);
static int vmxnet3_dev_start(struct rte_eth_dev *dev);
static void vmxnet3_dev_stop(struct rte_eth_dev *dev);
@@ -87,13 +86,19 @@ static void vmxnet3_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static void vmxnet3_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
+static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vid, int on);
+static void vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static void vmxnet3_dev_vlan_offload_set_clear(struct rte_eth_dev *dev,
+ int mask, int clear);
+
#if PROCESS_SYS_EVENTS == 1
static void vmxnet3_process_events(struct vmxnet3_hw *);
#endif
/*
* The set of PCI devices this driver supports
*/
-static struct rte_pci_id pci_id_vmxnet3_map[] = {
+static const struct rte_pci_id pci_id_vmxnet3_map[] = {
#define RTE_PCI_DEV_ID_DECL_VMXNET3(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
#include "rte_pci_dev_ids.h"
@@ -101,7 +106,7 @@ static struct rte_pci_id pci_id_vmxnet3_map[] = {
{ .vendor_id = 0, /* sentinel */ },
};
-static struct eth_dev_ops vmxnet3_eth_dev_ops = {
+static const struct eth_dev_ops vmxnet3_eth_dev_ops = {
.dev_configure = vmxnet3_dev_configure,
.dev_start = vmxnet3_dev_start,
.dev_stop = vmxnet3_dev_stop,
@@ -113,6 +118,8 @@ static struct eth_dev_ops vmxnet3_eth_dev_ops = {
.link_update = vmxnet3_dev_link_update,
.stats_get = vmxnet3_dev_stats_get,
.dev_infos_get = vmxnet3_dev_info_get,
+ .vlan_filter_set = vmxnet3_dev_vlan_filter_set,
+ .vlan_offload_set = vmxnet3_dev_vlan_offload_set,
.rx_queue_setup = vmxnet3_dev_rx_queue_setup,
.rx_queue_release = vmxnet3_dev_rx_queue_release,
.tx_queue_setup = vmxnet3_dev_tx_queue_setup,
@@ -149,9 +156,36 @@ gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
* - On success, zero.
* - On failure, negative value.
*/
-static inline int
-rte_vmxnet3_dev_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
+
+static int
+vmxnet3_dev_atomic_read_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = link;
+ struct rte_eth_link *src = &(dev->data->dev_link);
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+/**
+ * Atomically writes the link status information into global
+ * structure rte_eth_dev.
+ *
+ * @param dev
+ * - Pointer to the structure rte_eth_dev to write to.
+ * - Pointer to the buffer to be saved with the link status.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, negative value.
+ */
+static int
+vmxnet3_dev_atomic_write_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
{
struct rte_eth_link *dst = &(dev->data->dev_link);
struct rte_eth_link *src = link;
@@ -182,8 +216,7 @@ vmxnet3_disable_intr(struct vmxnet3_hw *hw)
* It returns 0 on success.
*/
static int
-eth_vmxnet3_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
- struct rte_eth_dev *eth_dev)
+eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
{
struct rte_pci_device *pci_dev;
struct vmxnet3_hw *hw = eth_dev->data->dev_private;
@@ -197,12 +230,14 @@ eth_vmxnet3_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
pci_dev = eth_dev->pci_dev;
/*
- * for secondary processes, we don't initialise any further as primary
+ * for secondary processes, we don't initialize any further as primary
* has already done this work.
*/
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
/* Vendor and Device ID need to be set before init of shared code */
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
@@ -211,26 +246,25 @@ eth_vmxnet3_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
hw->num_rx_queues = 1;
hw->num_tx_queues = 1;
- hw->cur_mtu = ETHER_MTU;
hw->bufs_per_pkt = 1;
/* Check h/w version compatibility with driver. */
ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS);
- PMD_INIT_LOG(DEBUG, "Harware version : %d", ver);
+ PMD_INIT_LOG(DEBUG, "Hardware version : %d", ver);
if (ver & 0x1)
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS, 1);
else {
- PMD_INIT_LOG(ERR, "Uncompatiable h/w version, should be 0x1");
+ PMD_INIT_LOG(ERR, "Incompatible h/w version, should be 0x1");
return -EIO;
}
/* Check UPT version compatibility with driver. */
ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS);
- PMD_INIT_LOG(DEBUG, "UPT harware version : %d", ver);
+ PMD_INIT_LOG(DEBUG, "UPT hardware version : %d", ver);
if (ver & 0x1)
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_UVRS, 1);
else {
- PMD_INIT_LOG(ERR, "Incompatiable UPT version.");
+ PMD_INIT_LOG(ERR, "Incompatible UPT version.");
return -EIO;
}
@@ -263,13 +297,37 @@ eth_vmxnet3_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
return 0;
}
+static int
+eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct vmxnet3_hw *hw = eth_dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (hw->adapter_stopped == 0)
+ vmxnet3_dev_close(eth_dev);
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+
+ return 0;
+}
+
static struct eth_driver rte_vmxnet3_pmd = {
- {
+ .pci_drv = {
.name = "rte_vmxnet3_pmd",
.id_table = pci_id_vmxnet3_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
},
.eth_dev_init = eth_vmxnet3_dev_init,
+ .eth_dev_uninit = eth_vmxnet3_dev_uninit,
.dev_private_size = sizeof(struct vmxnet3_hw),
};
@@ -371,7 +429,7 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
Vmxnet3_DSDevRead *devRead = &shared->devRead;
uint32_t *mac_ptr;
uint32_t val, i;
- int ret;
+ int ret, mask;
shared->magic = VMXNET3_REV1_MAGIC;
devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM;
@@ -384,9 +442,9 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
devRead->misc.driverInfo.vmxnet3RevSpt = 1;
devRead->misc.driverInfo.uptVerSpt = 1;
+ devRead->misc.mtu = rte_le_to_cpu_32(dev->data->mtu);
devRead->misc.queueDescPA = hw->queueDescPA;
devRead->misc.queueDescLen = hw->queue_desc_len;
- devRead->misc.mtu = hw->cur_mtu;
devRead->misc.numTxQueues = hw->num_tx_queues;
devRead->misc.numRxQueues = hw->num_rx_queues;
@@ -442,9 +500,6 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
if (dev->data->dev_conf.rxmode.hw_ip_checksum)
devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
- devRead->misc.uptFeatures |= VMXNET3_F_RXVLAN;
-
if (port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
ret = vmxnet3_rss_configure(dev);
if (ret != VMXNET3_SUCCESS)
@@ -456,11 +511,14 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
devRead->rssConfDesc.confPA = hw->rss_confPA;
}
- if (dev->data->dev_conf.rxmode.hw_vlan_filter) {
- ret = vmxnet3_vlan_configure(dev);
- if (ret != VMXNET3_SUCCESS)
- return ret;
- }
+ mask = 0;
+ if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ mask |= ETH_VLAN_STRIP_MASK;
+
+ if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ mask |= ETH_VLAN_FILTER_MASK;
+
+ vmxnet3_dev_vlan_offload_set_clear(dev, mask, 1);
PMD_INIT_LOG(DEBUG,
"Writing MAC Address : %02x:%02x:%02x:%02x:%02x:%02x",
@@ -548,7 +606,7 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
- if (hw->adapter_stopped == TRUE) {
+ if (hw->adapter_stopped == 1) {
PMD_INIT_LOG(DEBUG, "Device already closed.");
return;
}
@@ -564,13 +622,13 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev)
/* reset the device */
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
PMD_INIT_LOG(DEBUG, "Device reset.");
- hw->adapter_stopped = FALSE;
+ hw->adapter_stopped = 0;
vmxnet3_dev_clear_queues(dev);
/* Clear recorded link status */
memset(&link, 0, sizeof(link));
- rte_vmxnet3_dev_atomic_write_link_status(dev, &link);
+ vmxnet3_dev_atomic_write_link_status(dev, &link);
}
/*
@@ -584,7 +642,7 @@ vmxnet3_dev_close(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
vmxnet3_dev_stop(dev);
- hw->adapter_stopped = TRUE;
+ hw->adapter_stopped = 1;
}
static void
@@ -645,6 +703,19 @@ vmxnet3_dev_info_get(__attribute__((unused))struct rte_eth_dev *dev, struct rte_
dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
ETH_TXQ_FLAGS_NOOFFLOADS;
+ dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
+
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = VMXNET3_RX_RING_MAX_SIZE,
+ .nb_min = VMXNET3_DEF_RX_RING_SIZE,
+ .nb_align = 1,
+ };
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = VMXNET3_TX_RING_MAX_SIZE,
+ .nb_min = VMXNET3_DEF_TX_RING_SIZE,
+ .nb_align = 1,
+ };
}
/* return 0 means link status changed, -1 means not changed */
@@ -652,28 +723,27 @@ static int
vmxnet3_dev_link_update(struct rte_eth_dev *dev, __attribute__((unused)) int wait_to_complete)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
- struct rte_eth_link link;
+ struct rte_eth_link old, link;
uint32_t ret;
+ if (dev->data->dev_started == 0)
+ return -1; /* Link status doesn't change for stopped dev */
+
+ memset(&link, 0, sizeof(link));
+ vmxnet3_dev_atomic_read_link_status(dev, &old);
+
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
- if (!ret) {
- PMD_INIT_LOG(ERR, "Link Status Negative : %s()", __func__);
- return -1;
- }
-
if (ret & 0x1) {
link.link_status = 1;
link.link_duplex = ETH_LINK_FULL_DUPLEX;
link.link_speed = ETH_LINK_SPEED_10000;
-
- rte_vmxnet3_dev_atomic_write_link_status(dev, &link);
-
- return 0;
}
- return -1;
+ vmxnet3_dev_atomic_write_link_status(dev, &link);
+
+ return (old.link_status == link.link_status) ? -1 : 0;
}
/* Updating rxmode through Vmxnet3_DriverShared structure in adapter */
@@ -695,8 +765,13 @@ static void
vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
+ uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
+ memset(vf_table, 0, VMXNET3_VFT_TABLE_SIZE);
vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 1);
+
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_VLAN_FILTERS);
}
/* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
@@ -704,8 +779,12 @@ static void
vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
+ uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
+ memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0);
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_VLAN_FILTERS);
}
/* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */
@@ -726,6 +805,76 @@ vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev)
vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 0);
}
+/* Enable/disable filter on vlan */
+static int
+vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
+ uint32_t *vf_table = rxConf->vfTable;
+
+ /* save state for restore */
+ if (on)
+ VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, vid);
+ else
+ VMXNET3_CLEAR_VFTABLE_ENTRY(hw->shadow_vfta, vid);
+
+ /* don't change active filter if in promiscious mode */
+ if (rxConf->rxMode & VMXNET3_RXM_PROMISC)
+ return 0;
+
+ /* set in hardware */
+ if (on)
+ VMXNET3_SET_VFTABLE_ENTRY(vf_table, vid);
+ else
+ VMXNET3_CLEAR_VFTABLE_ENTRY(vf_table, vid);
+
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+ return 0;
+}
+
+static void
+vmxnet3_dev_vlan_offload_set_clear(struct rte_eth_dev *dev,
+ int mask, int clear)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ Vmxnet3_DSDevRead *devRead = &hw->shared->devRead;
+ uint32_t *vf_table = devRead->rxFilterConf.vfTable;
+
+ if (mask & ETH_VLAN_STRIP_MASK)
+ devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
+ else
+ devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
+
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_FEATURE);
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ if (clear) {
+ memset(hw->shadow_vfta, 0,
+ VMXNET3_VFT_TABLE_SIZE);
+ /* allow untagged pkts */
+ VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, 0);
+ }
+ memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
+ } else {
+ /* allow any pkts -- no filtering */
+ if (clear)
+ memset(hw->shadow_vfta, 0xff, VMXNET3_VFT_TABLE_SIZE);
+ memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
+ }
+
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+}
+
+static void
+vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ vmxnet3_dev_vlan_offload_set_clear(dev, mask, 0);
+}
+
#if PROCESS_SYS_EVENTS == 1
static void
vmxnet3_process_events(struct vmxnet3_hw *hw)
diff --git a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_ethdev.h b/src/dpdk22/drivers/net/vmxnet3/vmxnet3_ethdev.h
index 0941cfc6..4f9d0bd2 100755..100644
--- a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_ethdev.h
+++ b/src/dpdk22/drivers/net/vmxnet3/vmxnet3_ethdev.h
@@ -35,9 +35,11 @@
#define _VMXNET3_ETHDEV_H_
#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
-#define VMXNET3_ASSERT(x) do { \
- if (!(x)) rte_panic("VMXNET3: x"); \
+#define VMXNET3_ASSERT(x) do { \
+ if (!(x)) rte_panic("VMXNET3: %s\n", #x); \
} while(0)
+#else
+#define VMXNET3_ASSERT(x) do { (void)(x); } while (0)
#endif
#define VMXNET3_MAX_MAC_ADDRS 1
@@ -61,6 +63,12 @@
#define VMXNET3_RSS_MAX_KEY_SIZE 40
#define VMXNET3_RSS_MAX_IND_TABLE_SIZE 128
+#define VMXNET3_RSS_OFFLOAD_ALL ( \
+ ETH_RSS_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_IPV6 | \
+ ETH_RSS_NONFRAG_IPV6_TCP)
+
/* RSS configuration structure - shared with device through GPA */
typedef
struct VMXNET3_RSSConf {
@@ -101,7 +109,6 @@ struct vmxnet3_hw {
uint8_t num_tx_queues;
uint8_t num_rx_queues;
uint8_t bufs_per_pkt;
- uint16_t cur_mtu;
Vmxnet3_TxQueueDesc *tqd_start; /* start address of all tx queue desc */
Vmxnet3_RxQueueDesc *rqd_start; /* start address of all rx queue desc */
@@ -115,6 +122,8 @@ struct vmxnet3_hw {
VMXNET3_RSSConf *rss_conf;
uint64_t rss_confPA;
vmxnet3_mf_table_t *mf_table;
+ uint32_t shadow_vfta[VMXNET3_VFT_SIZE];
+#define VMXNET3_VFT_TABLE_SIZE (VMXNET3_VFT_SIZE * sizeof(uint32_t))
};
#define VMXNET3_GET_ADDR_LO(reg) ((uint32_t)(reg))
@@ -167,7 +176,6 @@ int vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
int vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev);
int vmxnet3_rss_configure(struct rte_eth_dev *dev);
-int vmxnet3_vlan_configure(struct rte_eth_dev *dev);
uint16_t vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
diff --git a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_logs.h b/src/dpdk22/drivers/net/vmxnet3/vmxnet3_logs.h
index 82639a08..82639a08 100755..100644
--- a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_logs.h
+++ b/src/dpdk22/drivers/net/vmxnet3/vmxnet3_logs.h
diff --git a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_ring.h b/src/dpdk22/drivers/net/vmxnet3/vmxnet3_ring.h
index c5abdb69..612487e1 100755..100644
--- a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_ring.h
+++ b/src/dpdk22/drivers/net/vmxnet3/vmxnet3_ring.h
@@ -121,16 +121,13 @@ vmxnet3_comp_ring_adv_next2proc(struct vmxnet3_comp_ring *ring)
}
struct vmxnet3_txq_stats {
- uint64_t drop_total; /* # of pkts dropped by the driver, the
- * counters below track droppings due to
- * different reasons
- */
- uint64_t drop_oversized;
- uint64_t drop_hdr_inspect_err;
- uint64_t drop_tso;
- uint64_t deferred;
- uint64_t tx_ring_full;
- uint64_t linearized; /* # of pkts linearized */
+ uint64_t drop_total; /* # of pkts dropped by the driver,
+ * the counters below track droppings due to
+ * different reasons
+ */
+ uint64_t drop_too_many_segs;
+ uint64_t drop_tso;
+ uint64_t tx_ring_full;
};
typedef struct vmxnet3_tx_ctx {
diff --git a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_rxtx.c b/src/dpdk22/drivers/net/vmxnet3/vmxnet3_rxtx.c
index de0c11a0..4de5d896 100755..100644
--- a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_rxtx.c
+++ b/src/dpdk22/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -52,7 +52,6 @@
#include <rte_memory.h>
#include <rte_memzone.h>
#include <rte_launch.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
@@ -72,7 +71,7 @@
#include <rte_string_fns.h>
#include <rte_errno.h>
-#include "vmxnet3/vmxnet3_defs.h"
+#include "base/vmxnet3_defs.h"
#include "vmxnet3_ring.h"
#include "vmxnet3_logs.h"
@@ -84,16 +83,16 @@
#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
(uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
-static uint32_t rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
+static const uint32_t rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
-static inline int vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t* , uint8_t);
-static inline void vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *);
+static int vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t*, uint8_t);
+static void vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *);
#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED
static void vmxnet3_rxq_dump(struct vmxnet3_rx_queue *);
static void vmxnet3_txq_dump(struct vmxnet3_tx_queue *);
#endif
-static inline struct rte_mbuf *
+static struct rte_mbuf *
rte_rxmbuf_alloc(struct rte_mempool *mp)
{
struct rte_mbuf *m;
@@ -157,7 +156,7 @@ vmxnet3_txq_dump(struct vmxnet3_tx_queue *txq)
}
#endif
-static inline void
+static void
vmxnet3_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring)
{
while (ring->next2comp != ring->next2fill) {
@@ -296,7 +295,7 @@ vmxnet3_dev_clear_queues(struct rte_eth_dev *dev)
}
}
-static inline void
+static void
vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
{
int completed = 0;
@@ -306,54 +305,33 @@ vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
(comp_ring->base + comp_ring->next2proc);
while (tcd->gen == comp_ring->gen) {
-
/* Release cmd_ring descriptor and free mbuf */
-#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
VMXNET3_ASSERT(txq->cmd_ring.base[tcd->txdIdx].txd.eop == 1);
-#endif
- mbuf = txq->cmd_ring.buf_info[tcd->txdIdx].m;
- if (unlikely(mbuf == NULL))
- rte_panic("EOP desc does not point to a valid mbuf");
- else
- rte_pktmbuf_free(mbuf);
-
-
- txq->cmd_ring.buf_info[tcd->txdIdx].m = NULL;
- /* Mark the txd for which tcd was generated as completed */
- vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
+ while (txq->cmd_ring.next2comp != tcd->txdIdx) {
+ mbuf = txq->cmd_ring.buf_info[txq->cmd_ring.next2comp].m;
+ txq->cmd_ring.buf_info[txq->cmd_ring.next2comp].m = NULL;
+ rte_pktmbuf_free_seg(mbuf);
+
+ /* Mark the txd for which tcd was generated as completed */
+ vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
+ completed++;
+ }
vmxnet3_comp_ring_adv_next2proc(comp_ring);
tcd = (struct Vmxnet3_TxCompDesc *)(comp_ring->base +
comp_ring->next2proc);
- completed++;
}
PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.", completed);
}
-/* patch to convert multi-seg mbuf to one seg mbuf to support vmxnet3 driver for TRex
-*/
-typedef struct rte_mbuf * (*rte_mbuf_convert_to_one_seg_t)(struct rte_mbuf *m);
-
-rte_mbuf_convert_to_one_seg_t vmxnet3_xmit_convert_callback;
-
-int vmxnet3_xmit_set_callback(rte_mbuf_convert_to_one_seg_t cb){
- vmxnet3_xmit_convert_callback=cb;
-}
-
-
uint16_t
vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
uint16_t nb_tx;
- Vmxnet3_TxDesc *txd = NULL;
- vmxnet3_buf_info_t *tbi = NULL;
- struct vmxnet3_hw *hw;
- struct rte_mbuf *txm;
vmxnet3_tx_queue_t *txq = tx_queue;
-
- hw = txq->hw;
+ struct vmxnet3_hw *hw = txq->hw;
if (unlikely(txq->stopped)) {
PMD_TX_LOG(DEBUG, "Tx queue is stopped.");
@@ -365,92 +343,69 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
nb_tx = 0;
while (nb_tx < nb_pkts) {
+ Vmxnet3_GenericDesc *gdesc;
+ vmxnet3_buf_info_t *tbi;
+ uint32_t first2fill, avail, dw2;
+ struct rte_mbuf *txm = tx_pkts[nb_tx];
+ struct rte_mbuf *m_seg = txm;
+
+ /* Is this packet execessively fragmented, then drop */
+ if (unlikely(txm->nb_segs > VMXNET3_MAX_TXD_PER_PKT)) {
+ ++txq->stats.drop_too_many_segs;
+ ++txq->stats.drop_total;
+ rte_pktmbuf_free(txm);
+ ++nb_tx;
+ continue;
+ }
- if (vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring)) {
- int copy_size = 0;
-
- txm = tx_pkts[nb_tx];
- /* Don't support scatter packets yet, free them if met */
- if (txm->nb_segs != 1) {
- if (vmxnet3_xmit_convert_callback ){
- txm=vmxnet3_xmit_convert_callback(txm);
- }else{
- txq->stats.drop_total++;
- nb_tx++;
- rte_pktmbuf_free(txm);
- continue;
- }
- }
+ /* Is command ring full? */
+ avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
+ if (txm->nb_segs > avail) {
+ ++txq->stats.tx_ring_full;
+ break;
+ }
- if (!txm) {
- txq->stats.drop_total++;
- nb_tx++;
- continue;
- }
-
- /* Needs to minus ether header len */
- if (txm->data_len > (hw->cur_mtu + ETHER_HDR_LEN)) {
- PMD_TX_LOG(DEBUG, "Packet data_len higher than MTU");
- rte_pktmbuf_free(txm);
- txq->stats.drop_total++;
- nb_tx++;
- continue;
- }
+ /* use the previous gen bit for the SOP desc */
+ dw2 = (txq->cmd_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
+ first2fill = txq->cmd_ring.next2fill;
+ do {
+ /* Remember the transmit buffer for cleanup */
+ tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
+ tbi->m = m_seg;
- txd = (Vmxnet3_TxDesc *)(txq->cmd_ring.base + txq->cmd_ring.next2fill);
- if (rte_pktmbuf_pkt_len(txm) <= VMXNET3_HDR_COPY_SIZE) {
- struct Vmxnet3_TxDataDesc *tdd;
+ /* NB: the following assumes that VMXNET3 maximum
+ transmit buffer size (16K) is greater than
+ maximum sizeof mbuf segment size. */
+ gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
+ gdesc->txd.addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+ gdesc->dword[2] = dw2 | m_seg->data_len;
+ gdesc->dword[3] = 0;
- tdd = txq->data_ring.base + txq->cmd_ring.next2fill;
- copy_size = rte_pktmbuf_pkt_len(txm);
- rte_memcpy(tdd->data, rte_pktmbuf_mtod(txm, char *), copy_size);
- }
+ /* move to the next2fill descriptor */
+ vmxnet3_cmd_ring_adv_next2fill(&txq->cmd_ring);
- /* Fill the tx descriptor */
- tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
- tbi->bufPA = RTE_MBUF_DATA_DMA_ADDR(txm);
- if (copy_size)
- txd->addr = rte_cpu_to_le_64(txq->data_ring.basePA +
- txq->cmd_ring.next2fill *
- sizeof(struct Vmxnet3_TxDataDesc));
- else
- txd->addr = tbi->bufPA;
- txd->len = txm->data_len;
-
- /* Mark the last descriptor as End of Packet. */
- txd->cq = 1;
- txd->eop = 1;
-
- /* Add VLAN tag if requested */
- if (txm->ol_flags & PKT_TX_VLAN_PKT) {
- txd->ti = 1;
- txd->tci = rte_cpu_to_le_16(txm->vlan_tci);
- }
+ /* use the right gen for non-SOP desc */
+ dw2 = txq->cmd_ring.gen << VMXNET3_TXD_GEN_SHIFT;
+ } while ((m_seg = m_seg->next) != NULL);
- /* Record current mbuf for freeing it later in tx complete */
-#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
- VMXNET3_ASSERT(txm);
-#endif
- tbi->m = txm;
+ /* Update the EOP descriptor */
+ gdesc->dword[3] |= VMXNET3_TXD_EOP | VMXNET3_TXD_CQ;
- /* Set the offloading mode to default */
- txd->hlen = 0;
- txd->om = VMXNET3_OM_NONE;
- txd->msscof = 0;
+ /* Add VLAN tag if present */
+ gdesc = txq->cmd_ring.base + first2fill;
+ if (txm->ol_flags & PKT_TX_VLAN_PKT) {
+ gdesc->txd.ti = 1;
+ gdesc->txd.tci = txm->vlan_tci;
+ }
- /* finally flip the GEN bit of the SOP desc */
- txd->gen = txq->cmd_ring.gen;
- txq->shared->ctrl.txNumDeferred++;
+ /* TODO: Add transmit checksum offload here */
- /* move to the next2fill descriptor */
- vmxnet3_cmd_ring_adv_next2fill(&txq->cmd_ring);
- nb_tx++;
+ /* flip the GEN bit on the SOP */
+ rte_compiler_barrier();
+ gdesc->dword[2] ^= VMXNET3_TXD_GEN;
- } else {
- PMD_TX_LOG(DEBUG, "No free tx cmd desc(s)");
- txq->stats.drop_total += (nb_pkts - nb_tx);
- break;
- }
+ txq->shared->ctrl.txNumDeferred++;
+ nb_tx++;
}
PMD_TX_LOG(DEBUG, "vmxnet3 txThreshold: %u", txq->shared->ctrl.txThreshold);
@@ -477,7 +432,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* only for LRO.
*
*/
-static inline int
+static int
vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
{
int err = 0;
@@ -542,6 +497,43 @@ vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
return i;
}
+
+/* Receive side checksum and other offloads */
+static void
+vmxnet3_rx_offload(const Vmxnet3_RxCompDesc *rcd, struct rte_mbuf *rxm)
+{
+ /* Check for hardware stripped VLAN tag */
+ if (rcd->ts) {
+ rxm->ol_flags |= PKT_RX_VLAN_PKT;
+ rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
+ }
+
+ /* Check for RSS */
+ if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) {
+ rxm->ol_flags |= PKT_RX_RSS_HASH;
+ rxm->hash.rss = rcd->rssHash;
+ }
+
+ /* Check packet type, checksum errors, etc. Only support IPv4 for now. */
+ if (rcd->v4) {
+ struct ether_hdr *eth = rte_pktmbuf_mtod(rxm, struct ether_hdr *);
+ struct ipv4_hdr *ip = (struct ipv4_hdr *)(eth + 1);
+
+ if (((ip->version_ihl & 0xf) << 2) > (int)sizeof(struct ipv4_hdr))
+ rxm->packet_type = RTE_PTYPE_L3_IPV4_EXT;
+ else
+ rxm->packet_type = RTE_PTYPE_L3_IPV4;
+
+ if (!rcd->cnc) {
+ if (!rcd->ipc)
+ rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+
+ if ((rcd->tcp || rcd->udp) && !rcd->tuc)
+ rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ }
+ }
+}
+
/*
* Process the Rx Completion Ring of given vmxnet3_rx_queue
* for nb_pkts burst and return the number of packets received
@@ -591,16 +583,13 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
PMD_RX_LOG(DEBUG, "rxd idx: %d ring idx: %d.", idx, ring_idx);
-#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
VMXNET3_ASSERT(rcd->len <= rxd->len);
VMXNET3_ASSERT(rbi->m);
-#endif
+
if (unlikely(rcd->len == 0)) {
PMD_RX_LOG(DEBUG, "Rx buf was skipped. rxring[%d][%d]\n)",
ring_idx, idx);
-#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
VMXNET3_ASSERT(rcd->sop && rcd->eop);
-#endif
rte_pktmbuf_free_seg(rbi->m);
goto rcd_done;
}
@@ -613,9 +602,8 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rte_pktmbuf_free_seg(rbi->m);
goto rcd_done;
}
-#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
VMXNET3_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD);
-#endif
+
/* Get the packet buffer pointer from buf_info */
rxm = rbi->m;
@@ -642,17 +630,6 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
goto rcd_done;
}
- /* Check for hardware stripped VLAN tag */
- if (rcd->ts) {
- PMD_RX_LOG(DEBUG, "Received packet with vlan ID: %d.",
- rcd->tci);
- rxm->ol_flags = PKT_RX_VLAN_PKT;
- /* Copy vlan tag in packet buffer */
- rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
- } else {
- rxm->ol_flags = 0;
- rxm->vlan_tci = 0;
- }
/* Initialize newly received packet buffer */
rxm->port = rxq->port_id;
@@ -661,25 +638,10 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxm->pkt_len = (uint16_t)rcd->len;
rxm->data_len = (uint16_t)rcd->len;
rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rxm->ol_flags = 0;
+ rxm->vlan_tci = 0;
- /* Check packet type, checksum errors, etc. Only support IPv4 for now. */
- if (rcd->v4) {
- struct ether_hdr *eth = rte_pktmbuf_mtod(rxm, struct ether_hdr *);
- struct ipv4_hdr *ip = (struct ipv4_hdr *)(eth + 1);
-
- if (((ip->version_ihl & 0xf) << 2) > (int)sizeof(struct ipv4_hdr))
- rxm->ol_flags |= PKT_RX_IPV4_HDR_EXT;
- else
- rxm->ol_flags |= PKT_RX_IPV4_HDR;
-
- if (!rcd->cnc) {
- if (!rcd->ipc)
- rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
-
- if ((rcd->tcp || rcd->udp) && !rcd->tuc)
- rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
- }
- }
+ vmxnet3_rx_offload(rcd, rxm);
rx_pkts[nb_rx++] = rxm;
rcd_done:
@@ -750,15 +712,9 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
- if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS) !=
- ETH_TXQ_FLAGS_NOMULTSEGS) {
- PMD_INIT_LOG(ERR, "TX Multi segment not support yet");
- return -EINVAL;
- }
-
- if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOOFFLOADS) !=
- ETH_TXQ_FLAGS_NOOFFLOADS) {
- PMD_INIT_LOG(ERR, "TX not support offload function yet");
+ if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMS) !=
+ ETH_TXQ_FLAGS_NOXSUMS) {
+ PMD_INIT_LOG(ERR, "TX no support for checksum offload yet");
return -EINVAL;
}
@@ -857,14 +813,11 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint8_t i;
char mem_name[32];
uint16_t buf_size;
- struct rte_pktmbuf_pool_private *mbp_priv;
PMD_INIT_FUNC_TRACE();
- mbp_priv = (struct rte_pktmbuf_pool_private *)
- rte_mempool_get_priv(mp);
- buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
- RTE_PKTMBUF_HEADROOM);
+ buf_size = rte_pktmbuf_data_room_size(mp) -
+ RTE_PKTMBUF_HEADROOM;
if (dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size) {
PMD_INIT_LOG(ERR, "buf_size = %u, max_pkt_len = %u, "
@@ -1016,12 +969,6 @@ static uint8_t rss_intel_key[40] = {
int
vmxnet3_rss_configure(struct rte_eth_dev *dev)
{
-#define VMXNET3_RSS_OFFLOAD_ALL ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_IPV4_TCP | \
- ETH_RSS_IPV6 | \
- ETH_RSS_IPV6_TCP)
-
struct vmxnet3_hw *hw = dev->data->dev_private;
struct VMXNET3_RSSConf *dev_rss_conf;
struct rte_eth_rss_conf *port_rss_conf;
@@ -1060,37 +1007,12 @@ vmxnet3_rss_configure(struct rte_eth_dev *dev)
rss_hf = port_rss_conf->rss_hf & VMXNET3_RSS_OFFLOAD_ALL;
if (rss_hf & ETH_RSS_IPV4)
dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV4;
- if (rss_hf & ETH_RSS_IPV4_TCP)
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV4;
if (rss_hf & ETH_RSS_IPV6)
dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV6;
- if (rss_hf & ETH_RSS_IPV6_TCP)
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV6;
return VMXNET3_SUCCESS;
}
-
-/*
- * Configure VLAN Filter feature
- */
-int
-vmxnet3_vlan_configure(struct rte_eth_dev *dev)
-{
- uint8_t i;
- struct vmxnet3_hw *hw = dev->data->dev_private;
- uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
-
- PMD_INIT_FUNC_TRACE();
-
- /* Verify if this tag is already set */
- for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
- /* Filter all vlan tags out by default */
- vf_table[i] = 0;
- /* To-Do: Provide another routine in dev_ops for user config */
-
- PMD_INIT_LOG(DEBUG, "Registering VLAN portid: %"PRIu8" tag %u",
- dev->data->port_id, vf_table[i]);
- }
-
- return VMXNET3_SUCCESS;
-}
diff --git a/src/dpdk_lib18/librte_pmd_xenvirt/rte_eth_xenvirt.h b/src/dpdk22/drivers/net/xenvirt/rte_eth_xenvirt.h
index fc15a636..fc15a636 100755..100644
--- a/src/dpdk_lib18/librte_pmd_xenvirt/rte_eth_xenvirt.h
+++ b/src/dpdk22/drivers/net/xenvirt/rte_eth_xenvirt.h
diff --git a/src/dpdk_lib18/librte_pmd_xenvirt/rte_xen_lib.h b/src/dpdk22/drivers/net/xenvirt/rte_xen_lib.h
index 0ba7148a..d973eacb 100755..100644
--- a/src/dpdk_lib18/librte_pmd_xenvirt/rte_xen_lib.h
+++ b/src/dpdk22/drivers/net/xenvirt/rte_xen_lib.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -93,6 +93,9 @@ int
xenstore_init(void);
int
+xenstore_uninit(void);
+
+int
xenstore_write(const char *key_str, const char *val_str);
int
diff --git a/src/dpdk_lib18/librte_pmd_xenvirt/virtio_logs.h b/src/dpdk22/drivers/net/xenvirt/virtio_logs.h
index d6c33f7b..d6c33f7b 100755..100644
--- a/src/dpdk_lib18/librte_pmd_xenvirt/virtio_logs.h
+++ b/src/dpdk22/drivers/net/xenvirt/virtio_logs.h
diff --git a/src/dpdk_lib18/librte_pmd_xenvirt/virtqueue.h b/src/dpdk22/drivers/net/xenvirt/virtqueue.h
index 34a24fc5..53123472 100755..100644
--- a/src/dpdk_lib18/librte_pmd_xenvirt/virtqueue.h
+++ b/src/dpdk22/drivers/net/xenvirt/virtqueue.h
@@ -42,10 +42,11 @@
#include <rte_memory.h>
#include <rte_memzone.h>
#include <rte_mempool.h>
-#include <rte_mbuf.h>
#include "virtio_logs.h"
+struct rte_mbuf;
+
/* The alignment to use between consumer and producer parts of vring. */
#define VIRTIO_PCI_VRING_ALIGN 4096
@@ -54,7 +55,7 @@
* rather than gpa<->hva in virito spec.
*/
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- rte_pktmbuf_mtod(mb, uint64_t)
+ ((uint64_t)(uintptr_t)rte_pktmbuf_mtod(mb, void *))
enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 };
@@ -150,7 +151,7 @@ vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
*/
avail_idx = (uint16_t)(vq->vq_ring.avail->idx & (vq->vq_nentries - 1));
vq->vq_ring.avail->ring[avail_idx] = desc_idx;
- rte_compiler_barrier(); /* wmb , for IA memory model barrier is enough*/
+ rte_smp_wmb();
vq->vq_ring.avail->idx++;
}
@@ -197,7 +198,7 @@ virtqueue_enqueue_recv_refill(struct virtqueue *rxvq, struct rte_mbuf *cookie)
dxp->ndescs = needed;
start_dp[head_idx].addr =
- (uint64_t) ((uint64_t)cookie->buf_addr + RTE_PKTMBUF_HEADROOM - sizeof(struct virtio_net_hdr));
+ (uint64_t) ((uintptr_t)cookie->buf_addr + RTE_PKTMBUF_HEADROOM - sizeof(struct virtio_net_hdr));
start_dp[head_idx].len = cookie->buf_len - RTE_PKTMBUF_HEADROOM + sizeof(struct virtio_net_hdr);
start_dp[head_idx].flags = VRING_DESC_F_WRITE;
rxvq->vq_desc_head_idx = start_dp[head_idx].next;
diff --git a/src/dpdk_lib18/librte_acl/acl.h b/src/dpdk22/lib/librte_acl/acl.h
index 102fa51a..09d67841 100755..100644
--- a/src/dpdk_lib18/librte_acl/acl.h
+++ b/src/dpdk22/lib/librte_acl/acl.h
@@ -47,6 +47,11 @@ extern"C" {
#define RTE_ACL_DFA_MAX UINT8_MAX
#define RTE_ACL_DFA_SIZE (UINT8_MAX + 1)
+#define RTE_ACL_DFA_GR64_SIZE 64
+#define RTE_ACL_DFA_GR64_NUM (RTE_ACL_DFA_SIZE / RTE_ACL_DFA_GR64_SIZE)
+#define RTE_ACL_DFA_GR64_BIT \
+ (CHAR_BIT * sizeof(uint32_t) / RTE_ACL_DFA_GR64_NUM)
+
typedef int bits_t;
#define RTE_ACL_BIT_SET_SIZE ((UINT8_MAX + 1) / (sizeof(bits_t) * CHAR_BIT))
@@ -57,13 +62,50 @@ struct rte_acl_bitset {
#define RTE_ACL_NODE_DFA (0 << RTE_ACL_TYPE_SHIFT)
#define RTE_ACL_NODE_SINGLE (1U << RTE_ACL_TYPE_SHIFT)
-#define RTE_ACL_NODE_QEXACT (2U << RTE_ACL_TYPE_SHIFT)
#define RTE_ACL_NODE_QRANGE (3U << RTE_ACL_TYPE_SHIFT)
#define RTE_ACL_NODE_MATCH (4U << RTE_ACL_TYPE_SHIFT)
#define RTE_ACL_NODE_TYPE (7U << RTE_ACL_TYPE_SHIFT)
#define RTE_ACL_NODE_UNDEFINED UINT32_MAX
/*
+ * ACL RT structure is a set of multibit tries (with stride == 8)
+ * represented by an array of transitions. The next position is calculated
+ * based on the current position and the input byte.
+ * Each transition is 64 bit value with the following format:
+ * | node_type_specific : 32 | node_type : 3 | node_addr : 29 |
+ * For all node types except RTE_ACL_NODE_MATCH, node_addr is an index
+ * to the start of the node in the transtions array.
+ * Few different node types are used:
+ * RTE_ACL_NODE_MATCH:
+ * node_addr value is and index into an array that contains the return value
+ * and its priority for each category.
+ * Upper 32 bits of the transition value are not used for that node type.
+ * RTE_ACL_NODE_QRANGE:
+ * that node consist of up to 5 transitions.
+ * Upper 32 bits are interpreted as 4 signed character values which
+ * are ordered from smallest(INT8_MIN) to largest (INT8_MAX).
+ * These values define 5 ranges:
+ * INT8_MIN <= range[0] <= ((int8_t *)&transition)[4]
+ * ((int8_t *)&transition)[4] < range[1] <= ((int8_t *)&transition)[5]
+ * ((int8_t *)&transition)[5] < range[2] <= ((int8_t *)&transition)[6]
+ * ((int8_t *)&transition)[6] < range[3] <= ((int8_t *)&transition)[7]
+ * ((int8_t *)&transition)[7] < range[4] <= INT8_MAX
+ * So for input byte value within range[i] i-th transition within that node
+ * will be used.
+ * RTE_ACL_NODE_SINGLE:
+ * always transitions to the same node regardless of the input value.
+ * RTE_ACL_NODE_DFA:
+ * that node consits of up to 256 transitions.
+ * In attempt to conserve space all transitions are divided into 4 consecutive
+ * groups, by 64 transitions per group:
+ * group64[i] contains transitions[i * 64, .. i * 64 + 63].
+ * Upper 32 bits are interpreted as 4 unsigned character values one per group,
+ * which contain index to the start of the given group within the node.
+ * So to calculate transition index within the node for given input byte value:
+ * input_byte - ((uint8_t *)&transition)[4 + input_byte / 64].
+ */
+
+/*
* Structure of a node is a set of ptrs and each ptr has a bit map
* of values associated with this transition.
*/
@@ -100,19 +142,15 @@ struct rte_acl_node {
/* number of ranges (transitions w/ consecutive bits) */
int32_t id;
struct rte_acl_match_results *mrt; /* only valid when match_flag != 0 */
- char transitions[RTE_ACL_QUAD_SIZE];
- /* boundaries for ranged node */
+ union {
+ char transitions[RTE_ACL_QUAD_SIZE];
+ /* boundaries for ranged node */
+ uint8_t dfa_gr64[RTE_ACL_DFA_GR64_NUM];
+ };
struct rte_acl_node *next;
/* free list link or pointer to duplicate node during merge */
struct rte_acl_node *prev;
/* points to node from which this node was duplicated */
-
- uint32_t subtree_id;
- uint32_t subtree_ref_count;
-
-};
-enum {
- RTE_ACL_SUBTREE_NODE = 0x80000000
};
/*
@@ -138,7 +176,6 @@ enum {
struct rte_acl_trie {
uint32_t type;
uint32_t count;
- int32_t smallest; /* smallest rule in this trie */
uint32_t root_index;
const uint32_t *data_index;
uint32_t num_data_indexes;
@@ -173,7 +210,7 @@ struct rte_acl_ctx {
int rte_acl_gen(struct rte_acl_ctx *ctx, struct rte_acl_trie *trie,
struct rte_acl_bld_trie *node_bld_trie, uint32_t num_tries,
- uint32_t num_categories, uint32_t data_index_sz, int match_num);
+ uint32_t num_categories, uint32_t data_index_sz, size_t max_size);
typedef int (*rte_acl_classify_t)
(const struct rte_acl_ctx *, const uint8_t **, uint32_t *, uint32_t, uint32_t);
@@ -189,6 +226,14 @@ int
rte_acl_classify_sse(const struct rte_acl_ctx *ctx, const uint8_t **data,
uint32_t *results, uint32_t num, uint32_t categories);
+int
+rte_acl_classify_avx2(const struct rte_acl_ctx *ctx, const uint8_t **data,
+ uint32_t *results, uint32_t num, uint32_t categories);
+
+int
+rte_acl_classify_neon(const struct rte_acl_ctx *ctx, const uint8_t **data,
+ uint32_t *results, uint32_t num, uint32_t categories);
+
#ifdef __cplusplus
}
#endif /* __cplusplus */
diff --git a/src/dpdk_lib18/librte_acl/acl_run.h b/src/dpdk22/lib/librte_acl/acl_run.h
index c191053c..b2fc42c6 100755..100644
--- a/src/dpdk_lib18/librte_acl/acl_run.h
+++ b/src/dpdk22/lib/librte_acl/acl_run.h
@@ -35,12 +35,11 @@
#define _ACL_RUN_H_
#include <rte_acl.h>
-#include "acl_vect.h"
#include "acl.h"
+#define MAX_SEARCHES_AVX16 16
#define MAX_SEARCHES_SSE8 8
#define MAX_SEARCHES_SSE4 4
-#define MAX_SEARCHES_SSE2 2
#define MAX_SEARCHES_SCALAR 2
#define GET_NEXT_4BYTES(prm, idx) \
@@ -256,10 +255,6 @@ acl_match_check(uint64_t transition, int slot,
/* Fill the slot with the next trie or idle trie */
transition = acl_start_next_trie(flows, parms, slot, ctx);
-
- } else if (transition == ctx->idle) {
- /* reset indirection table for idle slots */
- parms[slot].data_index = idle;
}
return transition;
diff --git a/src/dpdk22/lib/librte_acl/acl_run_avx2.h b/src/dpdk22/lib/librte_acl/acl_run_avx2.h
new file mode 100644
index 00000000..b01a46a5
--- /dev/null
+++ b/src/dpdk22/lib/librte_acl/acl_run_avx2.h
@@ -0,0 +1,284 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "acl_run_sse.h"
+
+static const rte_ymm_t ymm_match_mask = {
+ .u32 = {
+ RTE_ACL_NODE_MATCH,
+ RTE_ACL_NODE_MATCH,
+ RTE_ACL_NODE_MATCH,
+ RTE_ACL_NODE_MATCH,
+ RTE_ACL_NODE_MATCH,
+ RTE_ACL_NODE_MATCH,
+ RTE_ACL_NODE_MATCH,
+ RTE_ACL_NODE_MATCH,
+ },
+};
+
+static const rte_ymm_t ymm_index_mask = {
+ .u32 = {
+ RTE_ACL_NODE_INDEX,
+ RTE_ACL_NODE_INDEX,
+ RTE_ACL_NODE_INDEX,
+ RTE_ACL_NODE_INDEX,
+ RTE_ACL_NODE_INDEX,
+ RTE_ACL_NODE_INDEX,
+ RTE_ACL_NODE_INDEX,
+ RTE_ACL_NODE_INDEX,
+ },
+};
+
+static const rte_ymm_t ymm_shuffle_input = {
+ .u32 = {
+ 0x00000000, 0x04040404, 0x08080808, 0x0c0c0c0c,
+ 0x00000000, 0x04040404, 0x08080808, 0x0c0c0c0c,
+ },
+};
+
+static const rte_ymm_t ymm_ones_16 = {
+ .u16 = {
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ },
+};
+
+static const rte_ymm_t ymm_range_base = {
+ .u32 = {
+ 0xffffff00, 0xffffff04, 0xffffff08, 0xffffff0c,
+ 0xffffff00, 0xffffff04, 0xffffff08, 0xffffff0c,
+ },
+};
+
+/*
+ * Process 8 transitions in parallel.
+ * tr_lo contains low 32 bits for 8 transition.
+ * tr_hi contains high 32 bits for 8 transition.
+ * next_input contains up to 4 input bytes for 8 flows.
+ */
+static inline __attribute__((always_inline)) ymm_t
+transition8(ymm_t next_input, const uint64_t *trans, ymm_t *tr_lo, ymm_t *tr_hi)
+{
+ const int32_t *tr;
+ ymm_t addr;
+
+ tr = (const int32_t *)(uintptr_t)trans;
+
+ /* Calculate the address (array index) for all 8 transitions. */
+ ACL_TR_CALC_ADDR(mm256, 256, addr, ymm_index_mask.y, next_input,
+ ymm_shuffle_input.y, ymm_ones_16.y, ymm_range_base.y,
+ *tr_lo, *tr_hi);
+
+ /* load lower 32 bits of 8 transactions at once. */
+ *tr_lo = _mm256_i32gather_epi32(tr, addr, sizeof(trans[0]));
+
+ next_input = _mm256_srli_epi32(next_input, CHAR_BIT);
+
+ /* load high 32 bits of 8 transactions at once. */
+ *tr_hi = _mm256_i32gather_epi32(tr + 1, addr, sizeof(trans[0]));
+
+ return next_input;
+}
+
+/*
+ * Process matches for 8 flows.
+ * tr_lo contains low 32 bits for 8 transition.
+ * tr_hi contains high 32 bits for 8 transition.
+ */
+static inline void
+acl_process_matches_avx2x8(const struct rte_acl_ctx *ctx,
+ struct parms *parms, struct acl_flow_data *flows, uint32_t slot,
+ ymm_t matches, ymm_t *tr_lo, ymm_t *tr_hi)
+{
+ ymm_t t0, t1;
+ ymm_t lo, hi;
+ xmm_t l0, l1;
+ uint32_t i;
+ uint64_t tr[MAX_SEARCHES_SSE8];
+
+ l1 = _mm256_extracti128_si256(*tr_lo, 1);
+ l0 = _mm256_castsi256_si128(*tr_lo);
+
+ for (i = 0; i != RTE_DIM(tr) / 2; i++) {
+
+ /*
+ * Extract low 32bits of each transition.
+ * That's enough to process the match.
+ */
+ tr[i] = (uint32_t)_mm_cvtsi128_si32(l0);
+ tr[i + 4] = (uint32_t)_mm_cvtsi128_si32(l1);
+
+ l0 = _mm_srli_si128(l0, sizeof(uint32_t));
+ l1 = _mm_srli_si128(l1, sizeof(uint32_t));
+
+ tr[i] = acl_match_check(tr[i], slot + i,
+ ctx, parms, flows, resolve_priority_sse);
+ tr[i + 4] = acl_match_check(tr[i + 4], slot + i + 4,
+ ctx, parms, flows, resolve_priority_sse);
+ }
+
+ /* Collect new transitions into 2 YMM registers. */
+ t0 = _mm256_set_epi64x(tr[5], tr[4], tr[1], tr[0]);
+ t1 = _mm256_set_epi64x(tr[7], tr[6], tr[3], tr[2]);
+
+ /* For each transition: put low 32 into tr_lo and high 32 into tr_hi */
+ ACL_TR_HILO(mm256, __m256, t0, t1, lo, hi);
+
+ /* Keep transitions wth NOMATCH intact. */
+ *tr_lo = _mm256_blendv_epi8(*tr_lo, lo, matches);
+ *tr_hi = _mm256_blendv_epi8(*tr_hi, hi, matches);
+}
+
+static inline void
+acl_match_check_avx2x8(const struct rte_acl_ctx *ctx, struct parms *parms,
+ struct acl_flow_data *flows, uint32_t slot,
+ ymm_t *tr_lo, ymm_t *tr_hi, ymm_t match_mask)
+{
+ uint32_t msk;
+ ymm_t matches, temp;
+
+ /* test for match node */
+ temp = _mm256_and_si256(match_mask, *tr_lo);
+ matches = _mm256_cmpeq_epi32(temp, match_mask);
+ msk = _mm256_movemask_epi8(matches);
+
+ while (msk != 0) {
+
+ acl_process_matches_avx2x8(ctx, parms, flows, slot,
+ matches, tr_lo, tr_hi);
+ temp = _mm256_and_si256(match_mask, *tr_lo);
+ matches = _mm256_cmpeq_epi32(temp, match_mask);
+ msk = _mm256_movemask_epi8(matches);
+ }
+}
+
+/*
+ * Execute trie traversal for up to 16 flows in parallel.
+ */
+static inline int
+search_avx2x16(const struct rte_acl_ctx *ctx, const uint8_t **data,
+ uint32_t *results, uint32_t total_packets, uint32_t categories)
+{
+ uint32_t n;
+ struct acl_flow_data flows;
+ uint64_t index_array[MAX_SEARCHES_AVX16];
+ struct completion cmplt[MAX_SEARCHES_AVX16];
+ struct parms parms[MAX_SEARCHES_AVX16];
+ ymm_t input[2], tr_lo[2], tr_hi[2];
+ ymm_t t0, t1;
+
+ acl_set_flow(&flows, cmplt, RTE_DIM(cmplt), data, results,
+ total_packets, categories, ctx->trans_table);
+
+ for (n = 0; n < RTE_DIM(cmplt); n++) {
+ cmplt[n].count = 0;
+ index_array[n] = acl_start_next_trie(&flows, parms, n, ctx);
+ }
+
+ t0 = _mm256_set_epi64x(index_array[5], index_array[4],
+ index_array[1], index_array[0]);
+ t1 = _mm256_set_epi64x(index_array[7], index_array[6],
+ index_array[3], index_array[2]);
+
+ ACL_TR_HILO(mm256, __m256, t0, t1, tr_lo[0], tr_hi[0]);
+
+ t0 = _mm256_set_epi64x(index_array[13], index_array[12],
+ index_array[9], index_array[8]);
+ t1 = _mm256_set_epi64x(index_array[15], index_array[14],
+ index_array[11], index_array[10]);
+
+ ACL_TR_HILO(mm256, __m256, t0, t1, tr_lo[1], tr_hi[1]);
+
+ /* Check for any matches. */
+ acl_match_check_avx2x8(ctx, parms, &flows, 0, &tr_lo[0], &tr_hi[0],
+ ymm_match_mask.y);
+ acl_match_check_avx2x8(ctx, parms, &flows, 8, &tr_lo[1], &tr_hi[1],
+ ymm_match_mask.y);
+
+ while (flows.started > 0) {
+
+ uint32_t in[MAX_SEARCHES_SSE8];
+
+ /* Gather 4 bytes of input data for first 8 flows. */
+ in[0] = GET_NEXT_4BYTES(parms, 0);
+ in[4] = GET_NEXT_4BYTES(parms, 4);
+ in[1] = GET_NEXT_4BYTES(parms, 1);
+ in[5] = GET_NEXT_4BYTES(parms, 5);
+ in[2] = GET_NEXT_4BYTES(parms, 2);
+ in[6] = GET_NEXT_4BYTES(parms, 6);
+ in[3] = GET_NEXT_4BYTES(parms, 3);
+ in[7] = GET_NEXT_4BYTES(parms, 7);
+ input[0] = _mm256_set_epi32(in[7], in[6], in[5], in[4],
+ in[3], in[2], in[1], in[0]);
+
+ /* Gather 4 bytes of input data for last 8 flows. */
+ in[0] = GET_NEXT_4BYTES(parms, 8);
+ in[4] = GET_NEXT_4BYTES(parms, 12);
+ in[1] = GET_NEXT_4BYTES(parms, 9);
+ in[5] = GET_NEXT_4BYTES(parms, 13);
+ in[2] = GET_NEXT_4BYTES(parms, 10);
+ in[6] = GET_NEXT_4BYTES(parms, 14);
+ in[3] = GET_NEXT_4BYTES(parms, 11);
+ in[7] = GET_NEXT_4BYTES(parms, 15);
+ input[1] = _mm256_set_epi32(in[7], in[6], in[5], in[4],
+ in[3], in[2], in[1], in[0]);
+
+ input[0] = transition8(input[0], flows.trans,
+ &tr_lo[0], &tr_hi[0]);
+ input[1] = transition8(input[1], flows.trans,
+ &tr_lo[1], &tr_hi[1]);
+
+ input[0] = transition8(input[0], flows.trans,
+ &tr_lo[0], &tr_hi[0]);
+ input[1] = transition8(input[1], flows.trans,
+ &tr_lo[1], &tr_hi[1]);
+
+ input[0] = transition8(input[0], flows.trans,
+ &tr_lo[0], &tr_hi[0]);
+ input[1] = transition8(input[1], flows.trans,
+ &tr_lo[1], &tr_hi[1]);
+
+ input[0] = transition8(input[0], flows.trans,
+ &tr_lo[0], &tr_hi[0]);
+ input[1] = transition8(input[1], flows.trans,
+ &tr_lo[1], &tr_hi[1]);
+
+ /* Check for any matches. */
+ acl_match_check_avx2x8(ctx, parms, &flows, 0,
+ &tr_lo[0], &tr_hi[0], ymm_match_mask.y);
+ acl_match_check_avx2x8(ctx, parms, &flows, 8,
+ &tr_lo[1], &tr_hi[1], ymm_match_mask.y);
+ }
+
+ return 0;
+}
diff --git a/src/dpdk22/lib/librte_acl/acl_run_neon.h b/src/dpdk22/lib/librte_acl/acl_run_neon.h
new file mode 100644
index 00000000..cf7c57fb
--- /dev/null
+++ b/src/dpdk22/lib/librte_acl/acl_run_neon.h
@@ -0,0 +1,289 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2015.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include "acl_run.h"
+#include "acl_vect.h"
+
+struct _neon_acl_const {
+ rte_xmm_t xmm_shuffle_input;
+ rte_xmm_t xmm_index_mask;
+ rte_xmm_t range_base;
+} neon_acl_const __attribute__((aligned(RTE_CACHE_LINE_SIZE))) = {
+ {
+ .u32 = {0x00000000, 0x04040404, 0x08080808, 0x0c0c0c0c}
+ },
+ {
+ .u32 = {RTE_ACL_NODE_INDEX, RTE_ACL_NODE_INDEX,
+ RTE_ACL_NODE_INDEX, RTE_ACL_NODE_INDEX}
+ },
+ {
+ .u32 = {0xffffff00, 0xffffff04, 0xffffff08, 0xffffff0c}
+ },
+};
+
+/*
+ * Resolve priority for multiple results (neon version).
+ * This consists comparing the priority of the current traversal with the
+ * running set of results for the packet.
+ * For each result, keep a running array of the result (rule number) and
+ * its priority for each category.
+ */
+static inline void
+resolve_priority_neon(uint64_t transition, int n, const struct rte_acl_ctx *ctx,
+ struct parms *parms,
+ const struct rte_acl_match_results *p,
+ uint32_t categories)
+{
+ uint32_t x;
+ int32x4_t results, priority, results1, priority1;
+ uint32x4_t selector;
+ int32_t *saved_results, *saved_priority;
+
+ for (x = 0; x < categories; x += RTE_ACL_RESULTS_MULTIPLIER) {
+ saved_results = (int32_t *)(&parms[n].cmplt->results[x]);
+ saved_priority = (int32_t *)(&parms[n].cmplt->priority[x]);
+
+ /* get results and priorities for completed trie */
+ results = vld1q_s32(
+ (const int32_t *)&p[transition].results[x]);
+ priority = vld1q_s32(
+ (const int32_t *)&p[transition].priority[x]);
+
+ /* if this is not the first completed trie */
+ if (parms[n].cmplt->count != ctx->num_tries) {
+ /* get running best results and their priorities */
+ results1 = vld1q_s32(saved_results);
+ priority1 = vld1q_s32(saved_priority);
+
+ /* select results that are highest priority */
+ selector = vcgtq_s32(priority1, priority);
+ results = vbslq_s32(selector, results1, results);
+ priority = vbslq_s32(selector, priority1, priority);
+ }
+
+ /* save running best results and their priorities */
+ vst1q_s32(saved_results, results);
+ vst1q_s32(saved_priority, priority);
+ }
+}
+
+/*
+ * Check for any match in 4 transitions
+ */
+static inline __attribute__((always_inline)) uint32_t
+check_any_match_x4(uint64_t val[])
+{
+ return ((val[0] | val[1] | val[2] | val[3]) & RTE_ACL_NODE_MATCH);
+}
+
+static inline __attribute__((always_inline)) void
+acl_match_check_x4(int slot, const struct rte_acl_ctx *ctx, struct parms *parms,
+ struct acl_flow_data *flows, uint64_t transitions[])
+{
+ while (check_any_match_x4(transitions)) {
+ transitions[0] = acl_match_check(transitions[0], slot, ctx,
+ parms, flows, resolve_priority_neon);
+ transitions[1] = acl_match_check(transitions[1], slot + 1, ctx,
+ parms, flows, resolve_priority_neon);
+ transitions[2] = acl_match_check(transitions[2], slot + 2, ctx,
+ parms, flows, resolve_priority_neon);
+ transitions[3] = acl_match_check(transitions[3], slot + 3, ctx,
+ parms, flows, resolve_priority_neon);
+ }
+}
+
+/*
+ * Process 4 transitions (in 2 NEON Q registers) in parallel
+ */
+static inline __attribute__((always_inline)) int32x4_t
+transition4(int32x4_t next_input, const uint64_t *trans, uint64_t transitions[])
+{
+ int32x4x2_t tr_hi_lo;
+ int32x4_t t, in, r;
+ uint32x4_t index_msk, node_type, addr;
+ uint32x4_t dfa_msk, mask, quad_ofs, dfa_ofs;
+
+ /* Move low 32 into tr_hi_lo.val[0] and high 32 into tr_hi_lo.val[1] */
+ tr_hi_lo = vld2q_s32((const int32_t *)transitions);
+
+ /* Calculate the address (array index) for all 4 transitions. */
+
+ index_msk = vld1q_u32((const uint32_t *)&neon_acl_const.xmm_index_mask);
+
+ /* Calc node type and node addr */
+ node_type = vbicq_s32(tr_hi_lo.val[0], index_msk);
+ addr = vandq_s32(tr_hi_lo.val[0], index_msk);
+
+ /* t = 0 */
+ t = veorq_s32(node_type, node_type);
+
+ /* mask for DFA type(0) nodes */
+ dfa_msk = vceqq_u32(node_type, t);
+
+ mask = vld1q_s32((const int32_t *)&neon_acl_const.xmm_shuffle_input);
+ in = vqtbl1q_u8((uint8x16_t)next_input, (uint8x16_t)mask);
+
+ /* DFA calculations. */
+ r = vshrq_n_u32(in, 30); /* div by 64 */
+ mask = vld1q_s32((const int32_t *)&neon_acl_const.range_base);
+ r = vaddq_u8(r, mask);
+ t = vshrq_n_u32(in, 24);
+ r = vqtbl1q_u8((uint8x16_t)tr_hi_lo.val[1], (uint8x16_t)r);
+ dfa_ofs = vsubq_s32(t, r);
+
+ /* QUAD/SINGLE calculations. */
+ t = vcgtq_s8(in, tr_hi_lo.val[1]);
+ t = vabsq_s8(t);
+ t = vpaddlq_u8(t);
+ quad_ofs = vpaddlq_u16(t);
+
+ /* blend DFA and QUAD/SINGLE. */
+ t = vbslq_u8(dfa_msk, dfa_ofs, quad_ofs);
+
+ /* calculate address for next transitions */
+ addr = vaddq_u32(addr, t);
+
+ /* Fill next transitions */
+ transitions[0] = trans[vgetq_lane_u32(addr, 0)];
+ transitions[1] = trans[vgetq_lane_u32(addr, 1)];
+ transitions[2] = trans[vgetq_lane_u32(addr, 2)];
+ transitions[3] = trans[vgetq_lane_u32(addr, 3)];
+
+ return vshrq_n_u32(next_input, CHAR_BIT);
+}
+
+/*
+ * Execute trie traversal with 8 traversals in parallel
+ */
+static inline int
+search_neon_8(const struct rte_acl_ctx *ctx, const uint8_t **data,
+ uint32_t *results, uint32_t total_packets, uint32_t categories)
+{
+ int n;
+ struct acl_flow_data flows;
+ uint64_t index_array[8];
+ struct completion cmplt[8];
+ struct parms parms[8];
+ int32x4_t input0, input1;
+
+ acl_set_flow(&flows, cmplt, RTE_DIM(cmplt), data, results,
+ total_packets, categories, ctx->trans_table);
+
+ for (n = 0; n < 8; n++) {
+ cmplt[n].count = 0;
+ index_array[n] = acl_start_next_trie(&flows, parms, n, ctx);
+ }
+
+ /* Check for any matches. */
+ acl_match_check_x4(0, ctx, parms, &flows, &index_array[0]);
+ acl_match_check_x4(4, ctx, parms, &flows, &index_array[4]);
+
+ while (flows.started > 0) {
+ /* Gather 4 bytes of input data for each stream. */
+ input0 = vsetq_lane_s32(GET_NEXT_4BYTES(parms, 0), input0, 0);
+ input1 = vsetq_lane_s32(GET_NEXT_4BYTES(parms, 4), input1, 0);
+
+ input0 = vsetq_lane_s32(GET_NEXT_4BYTES(parms, 1), input0, 1);
+ input1 = vsetq_lane_s32(GET_NEXT_4BYTES(parms, 5), input1, 1);
+
+ input0 = vsetq_lane_s32(GET_NEXT_4BYTES(parms, 2), input0, 2);
+ input1 = vsetq_lane_s32(GET_NEXT_4BYTES(parms, 6), input1, 2);
+
+ input0 = vsetq_lane_s32(GET_NEXT_4BYTES(parms, 3), input0, 3);
+ input1 = vsetq_lane_s32(GET_NEXT_4BYTES(parms, 7), input1, 3);
+
+ /* Process the 4 bytes of input on each stream. */
+
+ input0 = transition4(input0, flows.trans, &index_array[0]);
+ input1 = transition4(input1, flows.trans, &index_array[4]);
+
+ input0 = transition4(input0, flows.trans, &index_array[0]);
+ input1 = transition4(input1, flows.trans, &index_array[4]);
+
+ input0 = transition4(input0, flows.trans, &index_array[0]);
+ input1 = transition4(input1, flows.trans, &index_array[4]);
+
+ input0 = transition4(input0, flows.trans, &index_array[0]);
+ input1 = transition4(input1, flows.trans, &index_array[4]);
+
+ /* Check for any matches. */
+ acl_match_check_x4(0, ctx, parms, &flows, &index_array[0]);
+ acl_match_check_x4(4, ctx, parms, &flows, &index_array[4]);
+ }
+
+ return 0;
+}
+
+/*
+ * Execute trie traversal with 4 traversals in parallel
+ */
+static inline int
+search_neon_4(const struct rte_acl_ctx *ctx, const uint8_t **data,
+ uint32_t *results, int total_packets, uint32_t categories)
+{
+ int n;
+ struct acl_flow_data flows;
+ uint64_t index_array[4];
+ struct completion cmplt[4];
+ struct parms parms[4];
+ int32x4_t input;
+
+ acl_set_flow(&flows, cmplt, RTE_DIM(cmplt), data, results,
+ total_packets, categories, ctx->trans_table);
+
+ for (n = 0; n < 4; n++) {
+ cmplt[n].count = 0;
+ index_array[n] = acl_start_next_trie(&flows, parms, n, ctx);
+ }
+
+ /* Check for any matches. */
+ acl_match_check_x4(0, ctx, parms, &flows, index_array);
+
+ while (flows.started > 0) {
+ /* Gather 4 bytes of input data for each stream. */
+ input = vsetq_lane_s32(GET_NEXT_4BYTES(parms, 0), input, 0);
+ input = vsetq_lane_s32(GET_NEXT_4BYTES(parms, 1), input, 1);
+ input = vsetq_lane_s32(GET_NEXT_4BYTES(parms, 2), input, 2);
+ input = vsetq_lane_s32(GET_NEXT_4BYTES(parms, 3), input, 3);
+
+ /* Process the 4 bytes of input on each stream. */
+ input = transition4(input, flows.trans, index_array);
+ input = transition4(input, flows.trans, index_array);
+ input = transition4(input, flows.trans, index_array);
+ input = transition4(input, flows.trans, index_array);
+
+ /* Check for any matches. */
+ acl_match_check_x4(0, ctx, parms, &flows, index_array);
+ }
+
+ return 0;
+}
diff --git a/src/dpdk22/lib/librte_acl/acl_run_sse.h b/src/dpdk22/lib/librte_acl/acl_run_sse.h
new file mode 100644
index 00000000..ad40a674
--- /dev/null
+++ b/src/dpdk22/lib/librte_acl/acl_run_sse.h
@@ -0,0 +1,357 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "acl_run.h"
+#include "acl_vect.h"
+
+enum {
+ SHUFFLE32_SLOT1 = 0xe5,
+ SHUFFLE32_SLOT2 = 0xe6,
+ SHUFFLE32_SLOT3 = 0xe7,
+ SHUFFLE32_SWAP64 = 0x4e,
+};
+
+static const rte_xmm_t xmm_shuffle_input = {
+ .u32 = {0x00000000, 0x04040404, 0x08080808, 0x0c0c0c0c},
+};
+
+static const rte_xmm_t xmm_ones_16 = {
+ .u16 = {1, 1, 1, 1, 1, 1, 1, 1},
+};
+
+static const rte_xmm_t xmm_match_mask = {
+ .u32 = {
+ RTE_ACL_NODE_MATCH,
+ RTE_ACL_NODE_MATCH,
+ RTE_ACL_NODE_MATCH,
+ RTE_ACL_NODE_MATCH,
+ },
+};
+
+static const rte_xmm_t xmm_index_mask = {
+ .u32 = {
+ RTE_ACL_NODE_INDEX,
+ RTE_ACL_NODE_INDEX,
+ RTE_ACL_NODE_INDEX,
+ RTE_ACL_NODE_INDEX,
+ },
+};
+
+static const rte_xmm_t xmm_range_base = {
+ .u32 = {
+ 0xffffff00, 0xffffff04, 0xffffff08, 0xffffff0c,
+ },
+};
+
+/*
+ * Resolve priority for multiple results (sse version).
+ * This consists comparing the priority of the current traversal with the
+ * running set of results for the packet.
+ * For each result, keep a running array of the result (rule number) and
+ * its priority for each category.
+ */
+static inline void
+resolve_priority_sse(uint64_t transition, int n, const struct rte_acl_ctx *ctx,
+ struct parms *parms, const struct rte_acl_match_results *p,
+ uint32_t categories)
+{
+ uint32_t x;
+ xmm_t results, priority, results1, priority1, selector;
+ xmm_t *saved_results, *saved_priority;
+
+ for (x = 0; x < categories; x += RTE_ACL_RESULTS_MULTIPLIER) {
+
+ saved_results = (xmm_t *)(&parms[n].cmplt->results[x]);
+ saved_priority =
+ (xmm_t *)(&parms[n].cmplt->priority[x]);
+
+ /* get results and priorities for completed trie */
+ results = _mm_loadu_si128(
+ (const xmm_t *)&p[transition].results[x]);
+ priority = _mm_loadu_si128(
+ (const xmm_t *)&p[transition].priority[x]);
+
+ /* if this is not the first completed trie */
+ if (parms[n].cmplt->count != ctx->num_tries) {
+
+ /* get running best results and their priorities */
+ results1 = _mm_loadu_si128(saved_results);
+ priority1 = _mm_loadu_si128(saved_priority);
+
+ /* select results that are highest priority */
+ selector = _mm_cmpgt_epi32(priority1, priority);
+ results = _mm_blendv_epi8(results, results1, selector);
+ priority = _mm_blendv_epi8(priority, priority1,
+ selector);
+ }
+
+ /* save running best results and their priorities */
+ _mm_storeu_si128(saved_results, results);
+ _mm_storeu_si128(saved_priority, priority);
+ }
+}
+
+/*
+ * Extract transitions from an XMM register and check for any matches
+ */
+static void
+acl_process_matches(xmm_t *indices, int slot, const struct rte_acl_ctx *ctx,
+ struct parms *parms, struct acl_flow_data *flows)
+{
+ uint64_t transition1, transition2;
+
+ /* extract transition from low 64 bits. */
+ transition1 = _mm_cvtsi128_si64(*indices);
+
+ /* extract transition from high 64 bits. */
+ *indices = _mm_shuffle_epi32(*indices, SHUFFLE32_SWAP64);
+ transition2 = _mm_cvtsi128_si64(*indices);
+
+ transition1 = acl_match_check(transition1, slot, ctx,
+ parms, flows, resolve_priority_sse);
+ transition2 = acl_match_check(transition2, slot + 1, ctx,
+ parms, flows, resolve_priority_sse);
+
+ /* update indices with new transitions. */
+ *indices = _mm_set_epi64x(transition2, transition1);
+}
+
+/*
+ * Check for any match in 4 transitions (contained in 2 SSE registers)
+ */
+static inline __attribute__((always_inline)) void
+acl_match_check_x4(int slot, const struct rte_acl_ctx *ctx, struct parms *parms,
+ struct acl_flow_data *flows, xmm_t *indices1, xmm_t *indices2,
+ xmm_t match_mask)
+{
+ xmm_t temp;
+
+ /* put low 32 bits of each transition into one register */
+ temp = (xmm_t)_mm_shuffle_ps((__m128)*indices1, (__m128)*indices2,
+ 0x88);
+ /* test for match node */
+ temp = _mm_and_si128(match_mask, temp);
+
+ while (!_mm_testz_si128(temp, temp)) {
+ acl_process_matches(indices1, slot, ctx, parms, flows);
+ acl_process_matches(indices2, slot + 2, ctx, parms, flows);
+
+ temp = (xmm_t)_mm_shuffle_ps((__m128)*indices1,
+ (__m128)*indices2,
+ 0x88);
+ temp = _mm_and_si128(match_mask, temp);
+ }
+}
+
+/*
+ * Process 4 transitions (in 2 XMM registers) in parallel
+ */
+static inline __attribute__((always_inline)) xmm_t
+transition4(xmm_t next_input, const uint64_t *trans,
+ xmm_t *indices1, xmm_t *indices2)
+{
+ xmm_t addr, tr_lo, tr_hi;
+ uint64_t trans0, trans2;
+
+ /* Shuffle low 32 into tr_lo and high 32 into tr_hi */
+ ACL_TR_HILO(mm, __m128, *indices1, *indices2, tr_lo, tr_hi);
+
+ /* Calculate the address (array index) for all 4 transitions. */
+ ACL_TR_CALC_ADDR(mm, 128, addr, xmm_index_mask.x, next_input,
+ xmm_shuffle_input.x, xmm_ones_16.x, xmm_range_base.x,
+ tr_lo, tr_hi);
+
+ /* Gather 64 bit transitions and pack back into 2 registers. */
+
+ trans0 = trans[_mm_cvtsi128_si32(addr)];
+
+ /* get slot 2 */
+
+ /* {x0, x1, x2, x3} -> {x2, x1, x2, x3} */
+ addr = _mm_shuffle_epi32(addr, SHUFFLE32_SLOT2);
+ trans2 = trans[_mm_cvtsi128_si32(addr)];
+
+ /* get slot 1 */
+
+ /* {x2, x1, x2, x3} -> {x1, x1, x2, x3} */
+ addr = _mm_shuffle_epi32(addr, SHUFFLE32_SLOT1);
+ *indices1 = _mm_set_epi64x(trans[_mm_cvtsi128_si32(addr)], trans0);
+
+ /* get slot 3 */
+
+ /* {x1, x1, x2, x3} -> {x3, x1, x2, x3} */
+ addr = _mm_shuffle_epi32(addr, SHUFFLE32_SLOT3);
+ *indices2 = _mm_set_epi64x(trans[_mm_cvtsi128_si32(addr)], trans2);
+
+ return _mm_srli_epi32(next_input, CHAR_BIT);
+}
+
+/*
+ * Execute trie traversal with 8 traversals in parallel
+ */
+static inline int
+search_sse_8(const struct rte_acl_ctx *ctx, const uint8_t **data,
+ uint32_t *results, uint32_t total_packets, uint32_t categories)
+{
+ int n;
+ struct acl_flow_data flows;
+ uint64_t index_array[MAX_SEARCHES_SSE8];
+ struct completion cmplt[MAX_SEARCHES_SSE8];
+ struct parms parms[MAX_SEARCHES_SSE8];
+ xmm_t input0, input1;
+ xmm_t indices1, indices2, indices3, indices4;
+
+ acl_set_flow(&flows, cmplt, RTE_DIM(cmplt), data, results,
+ total_packets, categories, ctx->trans_table);
+
+ for (n = 0; n < MAX_SEARCHES_SSE8; n++) {
+ cmplt[n].count = 0;
+ index_array[n] = acl_start_next_trie(&flows, parms, n, ctx);
+ }
+
+ /*
+ * indices1 contains index_array[0,1]
+ * indices2 contains index_array[2,3]
+ * indices3 contains index_array[4,5]
+ * indices4 contains index_array[6,7]
+ */
+
+ indices1 = _mm_loadu_si128((xmm_t *) &index_array[0]);
+ indices2 = _mm_loadu_si128((xmm_t *) &index_array[2]);
+
+ indices3 = _mm_loadu_si128((xmm_t *) &index_array[4]);
+ indices4 = _mm_loadu_si128((xmm_t *) &index_array[6]);
+
+ /* Check for any matches. */
+ acl_match_check_x4(0, ctx, parms, &flows,
+ &indices1, &indices2, xmm_match_mask.x);
+ acl_match_check_x4(4, ctx, parms, &flows,
+ &indices3, &indices4, xmm_match_mask.x);
+
+ while (flows.started > 0) {
+
+ /* Gather 4 bytes of input data for each stream. */
+ input0 = _mm_cvtsi32_si128(GET_NEXT_4BYTES(parms, 0));
+ input1 = _mm_cvtsi32_si128(GET_NEXT_4BYTES(parms, 4));
+
+ input0 = _mm_insert_epi32(input0, GET_NEXT_4BYTES(parms, 1), 1);
+ input1 = _mm_insert_epi32(input1, GET_NEXT_4BYTES(parms, 5), 1);
+
+ input0 = _mm_insert_epi32(input0, GET_NEXT_4BYTES(parms, 2), 2);
+ input1 = _mm_insert_epi32(input1, GET_NEXT_4BYTES(parms, 6), 2);
+
+ input0 = _mm_insert_epi32(input0, GET_NEXT_4BYTES(parms, 3), 3);
+ input1 = _mm_insert_epi32(input1, GET_NEXT_4BYTES(parms, 7), 3);
+
+ /* Process the 4 bytes of input on each stream. */
+
+ input0 = transition4(input0, flows.trans,
+ &indices1, &indices2);
+ input1 = transition4(input1, flows.trans,
+ &indices3, &indices4);
+
+ input0 = transition4(input0, flows.trans,
+ &indices1, &indices2);
+ input1 = transition4(input1, flows.trans,
+ &indices3, &indices4);
+
+ input0 = transition4(input0, flows.trans,
+ &indices1, &indices2);
+ input1 = transition4(input1, flows.trans,
+ &indices3, &indices4);
+
+ input0 = transition4(input0, flows.trans,
+ &indices1, &indices2);
+ input1 = transition4(input1, flows.trans,
+ &indices3, &indices4);
+
+ /* Check for any matches. */
+ acl_match_check_x4(0, ctx, parms, &flows,
+ &indices1, &indices2, xmm_match_mask.x);
+ acl_match_check_x4(4, ctx, parms, &flows,
+ &indices3, &indices4, xmm_match_mask.x);
+ }
+
+ return 0;
+}
+
+/*
+ * Execute trie traversal with 4 traversals in parallel
+ */
+static inline int
+search_sse_4(const struct rte_acl_ctx *ctx, const uint8_t **data,
+ uint32_t *results, int total_packets, uint32_t categories)
+{
+ int n;
+ struct acl_flow_data flows;
+ uint64_t index_array[MAX_SEARCHES_SSE4];
+ struct completion cmplt[MAX_SEARCHES_SSE4];
+ struct parms parms[MAX_SEARCHES_SSE4];
+ xmm_t input, indices1, indices2;
+
+ acl_set_flow(&flows, cmplt, RTE_DIM(cmplt), data, results,
+ total_packets, categories, ctx->trans_table);
+
+ for (n = 0; n < MAX_SEARCHES_SSE4; n++) {
+ cmplt[n].count = 0;
+ index_array[n] = acl_start_next_trie(&flows, parms, n, ctx);
+ }
+
+ indices1 = _mm_loadu_si128((xmm_t *) &index_array[0]);
+ indices2 = _mm_loadu_si128((xmm_t *) &index_array[2]);
+
+ /* Check for any matches. */
+ acl_match_check_x4(0, ctx, parms, &flows,
+ &indices1, &indices2, xmm_match_mask.x);
+
+ while (flows.started > 0) {
+
+ /* Gather 4 bytes of input data for each stream. */
+ input = _mm_cvtsi32_si128(GET_NEXT_4BYTES(parms, 0));
+ input = _mm_insert_epi32(input, GET_NEXT_4BYTES(parms, 1), 1);
+ input = _mm_insert_epi32(input, GET_NEXT_4BYTES(parms, 2), 2);
+ input = _mm_insert_epi32(input, GET_NEXT_4BYTES(parms, 3), 3);
+
+ /* Process the 4 bytes of input on each stream. */
+ input = transition4(input, flows.trans, &indices1, &indices2);
+ input = transition4(input, flows.trans, &indices1, &indices2);
+ input = transition4(input, flows.trans, &indices1, &indices2);
+ input = transition4(input, flows.trans, &indices1, &indices2);
+
+ /* Check for any matches. */
+ acl_match_check_x4(0, ctx, parms, &flows,
+ &indices1, &indices2, xmm_match_mask.x);
+ }
+
+ return 0;
+}
diff --git a/src/dpdk22/lib/librte_acl/acl_vect.h b/src/dpdk22/lib/librte_acl/acl_vect.h
new file mode 100644
index 00000000..6cc19997
--- /dev/null
+++ b/src/dpdk22/lib/librte_acl/acl_vect.h
@@ -0,0 +1,116 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ACL_VECT_H_
+#define _RTE_ACL_VECT_H_
+
+/**
+ * @file
+ *
+ * RTE ACL SSE/AVX related header.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/*
+ * Takes 2 SIMD registers containing N transitions eachi (tr0, tr1).
+ * Shuffles it into different representation:
+ * lo - contains low 32 bits of given N transitions.
+ * hi - contains high 32 bits of given N transitions.
+ */
+#define ACL_TR_HILO(P, TC, tr0, tr1, lo, hi) do { \
+ lo = (typeof(lo))_##P##_shuffle_ps((TC)(tr0), (TC)(tr1), 0x88); \
+ hi = (typeof(hi))_##P##_shuffle_ps((TC)(tr0), (TC)(tr1), 0xdd); \
+} while (0)
+
+
+/*
+ * Calculate the address of the next transition for
+ * all types of nodes. Note that only DFA nodes and range
+ * nodes actually transition to another node. Match
+ * nodes not supposed to be encountered here.
+ * For quad range nodes:
+ * Calculate number of range boundaries that are less than the
+ * input value. Range boundaries for each node are in signed 8 bit,
+ * ordered from -128 to 127.
+ * This is effectively a popcnt of bytes that are greater than the
+ * input byte.
+ * Single nodes are processed in the same ways as quad range nodes.
+*/
+#define ACL_TR_CALC_ADDR(P, S, \
+ addr, index_mask, next_input, shuffle_input, \
+ ones_16, range_base, tr_lo, tr_hi) do { \
+ \
+ typeof(addr) in, node_type, r, t; \
+ typeof(addr) dfa_msk, dfa_ofs, quad_ofs; \
+ \
+ t = _##P##_xor_si##S(index_mask, index_mask); \
+ in = _##P##_shuffle_epi8(next_input, shuffle_input); \
+ \
+ /* Calc node type and node addr */ \
+ node_type = _##P##_andnot_si##S(index_mask, tr_lo); \
+ addr = _##P##_and_si##S(index_mask, tr_lo); \
+ \
+ /* mask for DFA type(0) nodes */ \
+ dfa_msk = _##P##_cmpeq_epi32(node_type, t); \
+ \
+ /* DFA calculations. */ \
+ r = _##P##_srli_epi32(in, 30); \
+ r = _##P##_add_epi8(r, range_base); \
+ t = _##P##_srli_epi32(in, 24); \
+ r = _##P##_shuffle_epi8(tr_hi, r); \
+ \
+ dfa_ofs = _##P##_sub_epi32(t, r); \
+ \
+ /* QUAD/SINGLE caluclations. */ \
+ t = _##P##_cmpgt_epi8(in, tr_hi); \
+ t = _##P##_sign_epi8(t, t); \
+ t = _##P##_maddubs_epi16(t, t); \
+ quad_ofs = _##P##_madd_epi16(t, ones_16); \
+ \
+ /* blend DFA and QUAD/SINGLE. */ \
+ t = _##P##_blendv_epi8(quad_ofs, dfa_ofs, dfa_msk); \
+ \
+ /* calculate address for next transitions. */ \
+ addr = _##P##_add_epi32(addr, t); \
+} while (0)
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_ACL_VECT_H_ */
diff --git a/src/dpdk_lib18/librte_acl/rte_acl.h b/src/dpdk22/lib/librte_acl/rte_acl.h
index 0d913eed..0979a098 100755..100644
--- a/src/dpdk_lib18/librte_acl/rte_acl.h
+++ b/src/dpdk22/lib/librte_acl/rte_acl.h
@@ -94,6 +94,8 @@ struct rte_acl_config {
uint32_t num_fields; /**< Number of field definitions. */
struct rte_acl_field_def defs[RTE_ACL_MAX_FIELDS];
/**< array of field definitions. */
+ size_t max_size;
+ /**< max memory limit for internal run-time structures. */
};
/**
@@ -113,13 +115,16 @@ struct rte_acl_field {
enum {
RTE_ACL_TYPE_SHIFT = 29,
- RTE_ACL_MAX_INDEX = LEN2MASK(RTE_ACL_TYPE_SHIFT),
+ RTE_ACL_MAX_INDEX = RTE_LEN2MASK(RTE_ACL_TYPE_SHIFT, uint32_t),
RTE_ACL_MAX_PRIORITY = RTE_ACL_MAX_INDEX,
RTE_ACL_MIN_PRIORITY = 0,
};
#define RTE_ACL_INVALID_USERDATA 0
+#define RTE_ACL_MASKLEN_TO_BITMASK(v, s) \
+((v) == 0 ? (v) : (typeof(v))((uint64_t)-1 << ((s) * CHAR_BIT - (v))))
+
/**
* Miscellaneous data for ACL rule.
*/
@@ -168,7 +173,6 @@ struct rte_acl_param {
* Pointer to ACL context structure that is used in future ACL
* operations, or NULL on error, with error code set in rte_errno.
* Possible rte_errno errors include:
- * - E_RTE_NO_TAILQ - no tailq list could be got for the ACL context list
* - EINVAL - invalid parameter passed to function
*/
struct rte_acl_ctx *
@@ -265,6 +269,9 @@ enum rte_acl_classify_alg {
RTE_ACL_CLASSIFY_DEFAULT = 0,
RTE_ACL_CLASSIFY_SCALAR = 1, /**< generic implementation. */
RTE_ACL_CLASSIFY_SSE = 2, /**< requires SSE4.1 support. */
+ RTE_ACL_CLASSIFY_AVX2 = 3, /**< requires AVX2 support. */
+ RTE_ACL_CLASSIFY_NEON = 4, /**< requires NEON support. */
+ RTE_ACL_CLASSIFY_NUM /* should always be the last one. */
};
/**
@@ -374,110 +381,6 @@ rte_acl_dump(const struct rte_acl_ctx *ctx);
void
rte_acl_list_dump(void);
-/**
- * Legacy support for 7-tuple IPv4 and VLAN rule.
- * This structure and corresponding API is deprecated.
- */
-struct rte_acl_ipv4vlan_rule {
- struct rte_acl_rule_data data; /**< Miscellaneous data for the rule. */
- uint8_t proto; /**< IPv4 protocol ID. */
- uint8_t proto_mask; /**< IPv4 protocol ID mask. */
- uint16_t vlan; /**< VLAN ID. */
- uint16_t vlan_mask; /**< VLAN ID mask. */
- uint16_t domain; /**< VLAN domain. */
- uint16_t domain_mask; /**< VLAN domain mask. */
- uint32_t src_addr; /**< IPv4 source address. */
- uint32_t src_mask_len; /**< IPv4 source address mask. */
- uint32_t dst_addr; /**< IPv4 destination address. */
- uint32_t dst_mask_len; /**< IPv4 destination address mask. */
- uint16_t src_port_low; /**< L4 source port low. */
- uint16_t src_port_high; /**< L4 source port high. */
- uint16_t dst_port_low; /**< L4 destination port low. */
- uint16_t dst_port_high; /**< L4 destination port high. */
-};
-
-/**
- * Specifies fields layout inside rte_acl_rule for rte_acl_ipv4vlan_rule.
- */
-enum {
- RTE_ACL_IPV4VLAN_PROTO_FIELD,
- RTE_ACL_IPV4VLAN_VLAN1_FIELD,
- RTE_ACL_IPV4VLAN_VLAN2_FIELD,
- RTE_ACL_IPV4VLAN_SRC_FIELD,
- RTE_ACL_IPV4VLAN_DST_FIELD,
- RTE_ACL_IPV4VLAN_SRCP_FIELD,
- RTE_ACL_IPV4VLAN_DSTP_FIELD,
- RTE_ACL_IPV4VLAN_NUM_FIELDS
-};
-
-/**
- * Macro to define rule size for rte_acl_ipv4vlan_rule.
- */
-#define RTE_ACL_IPV4VLAN_RULE_SZ \
- RTE_ACL_RULE_SZ(RTE_ACL_IPV4VLAN_NUM_FIELDS)
-
-/*
- * That effectively defines order of IPV4VLAN classifications:
- * - PROTO
- * - VLAN (TAG and DOMAIN)
- * - SRC IP ADDRESS
- * - DST IP ADDRESS
- * - PORTS (SRC and DST)
- */
-enum {
- RTE_ACL_IPV4VLAN_PROTO,
- RTE_ACL_IPV4VLAN_VLAN,
- RTE_ACL_IPV4VLAN_SRC,
- RTE_ACL_IPV4VLAN_DST,
- RTE_ACL_IPV4VLAN_PORTS,
- RTE_ACL_IPV4VLAN_NUM
-};
-
-/**
- * Add ipv4vlan rules to an existing ACL context.
- * This function is not multi-thread safe.
- *
- * @param ctx
- * ACL context to add patterns to.
- * @param rules
- * Array of rules to add to the ACL context.
- * Note that all fields in rte_acl_ipv4vlan_rule structures are expected
- * to be in host byte order.
- * @param num
- * Number of elements in the input array of rules.
- * @return
- * - -ENOMEM if there is no space in the ACL context for these rules.
- * - -EINVAL if the parameters are invalid.
- * - Zero if operation completed successfully.
- */
-int
-rte_acl_ipv4vlan_add_rules(struct rte_acl_ctx *ctx,
- const struct rte_acl_ipv4vlan_rule *rules,
- uint32_t num);
-
-/**
- * Analyze set of ipv4vlan rules and build required internal
- * run-time structures.
- * This function is not multi-thread safe.
- *
- * @param ctx
- * ACL context to build.
- * @param layout
- * Layout of input data to search through.
- * @param num_categories
- * Maximum number of categories to use in that build.
- * @return
- * - -ENOMEM if couldn't allocate enough memory.
- * - -EINVAL if the parameters are invalid.
- * - Negative error code if operation failed.
- * - Zero if operation completed successfully.
- */
-int
-rte_acl_ipv4vlan_build(struct rte_acl_ctx *ctx,
- const uint32_t layout[RTE_ACL_IPV4VLAN_NUM],
- uint32_t num_categories);
-
-
#ifdef __cplusplus
}
#endif
diff --git a/src/dpdk_lib18/librte_acl/rte_acl_osdep.h b/src/dpdk22/lib/librte_acl/rte_acl_osdep.h
index 046b22db..41f7e3d4 100755..100644
--- a/src/dpdk_lib18/librte_acl/rte_acl_osdep.h
+++ b/src/dpdk22/lib/librte_acl/rte_acl_osdep.h
@@ -56,19 +56,10 @@
* Common defines.
*/
-#define LEN2MASK(ln) ((uint32_t)(((uint64_t)1 << (ln)) - 1))
-
#define DIM(x) RTE_DIM(x)
-/*
- * To build ACL standalone.
- */
-#ifdef RTE_LIBRTE_ACL_STANDALONE
-#include <rte_acl_osdep_alone.h>
-#else
-
#include <rte_common.h>
-#include <rte_common_vect.h>
+#include <rte_vect.h>
#include <rte_memory.h>
#include <rte_log.h>
#include <rte_memcpy.h>
@@ -77,7 +68,6 @@
#include <rte_branch_prediction.h>
#include <rte_memzone.h>
#include <rte_malloc.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
#include <rte_per_lcore.h>
@@ -87,6 +77,4 @@
#include <rte_log.h>
#include <rte_debug.h>
-#endif /* RTE_LIBRTE_ACL_STANDALONE */
-
#endif /* _RTE_ACL_OSDEP_H_ */
diff --git a/src/dpdk_lib18/librte_acl/tb_mem.h b/src/dpdk22/lib/librte_acl/tb_mem.h
index a8dae94d..ca7af966 100755..100644
--- a/src/dpdk_lib18/librte_acl/tb_mem.h
+++ b/src/dpdk22/lib/librte_acl/tb_mem.h
@@ -48,6 +48,7 @@ extern "C" {
#endif
#include <rte_acl_osdep.h>
+#include <setjmp.h>
struct tb_mem_block {
struct tb_mem_block *next;
@@ -61,6 +62,8 @@ struct tb_mem_pool {
size_t alignment;
size_t min_alloc;
size_t alloc;
+ /* jump target in case of memory allocation failure. */
+ sigjmp_buf fail;
};
void *tb_alloc(struct tb_mem_pool *pool, size_t size);
diff --git a/src/dpdk_lib18/librte_cfgfile/rte_cfgfile.c b/src/dpdk22/lib/librte_cfgfile/rte_cfgfile.c
index b81c2738..a677dade 100755..100644
--- a/src/dpdk_lib18/librte_cfgfile/rte_cfgfile.c
+++ b/src/dpdk22/lib/librte_cfgfile/rte_cfgfile.c
@@ -92,7 +92,7 @@ rte_cfgfile_load(const char *filename, int flags)
int allocated_entries = 0;
int curr_section = -1;
int curr_entry = -1;
- char buffer[256];
+ char buffer[256] = {0};
int lineno = 0;
struct rte_cfgfile *cfg = NULL;
diff --git a/src/dpdk_lib18/librte_cfgfile/rte_cfgfile.h b/src/dpdk22/lib/librte_cfgfile/rte_cfgfile.h
index 7c9fc916..d4437826 100755..100644
--- a/src/dpdk_lib18/librte_cfgfile/rte_cfgfile.h
+++ b/src/dpdk22/lib/librte_cfgfile/rte_cfgfile.h
@@ -47,8 +47,13 @@ extern "C" {
*
***/
-#define CFG_NAME_LEN 32
-#define CFG_VALUE_LEN 64
+#ifndef CFG_NAME_LEN
+#define CFG_NAME_LEN 64
+#endif
+
+#ifndef CFG_VALUE_LEN
+#define CFG_VALUE_LEN 256
+#endif
/** Configuration file */
struct rte_cfgfile;
diff --git a/src/dpdk22/lib/librte_compat/rte_compat.h b/src/dpdk22/lib/librte_compat/rte_compat.h
new file mode 100644
index 00000000..1c3c8d52
--- /dev/null
+++ b/src/dpdk22/lib/librte_compat/rte_compat.h
@@ -0,0 +1,105 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Neil Horman <nhorman@tuxdriver.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_COMPAT_H_
+#define _RTE_COMPAT_H_
+#include <rte_common.h>
+
+#ifdef RTE_BUILD_SHARED_LIB
+
+/*
+ * Provides backwards compatibility when updating exported functions.
+ * When a symol is exported from a library to provide an API, it also provides a
+ * calling convention (ABI) that is embodied in its name, return type,
+ * arguments, etc. On occasion that function may need to change to accommodate
+ * new functionality, behavior, etc. When that occurs, it is desireable to
+ * allow for backwards compatibility for a time with older binaries that are
+ * dynamically linked to the dpdk. To support that, the __vsym and
+ * VERSION_SYMBOL macros are created. They, in conjunction with the
+ * <library>_version.map file for a given library allow for multiple versions of
+ * a symbol to exist in a shared library so that older binaries need not be
+ * immediately recompiled.
+ *
+ * Refer to the guidelines document in the docs subdirectory for details on the
+ * use of these macros
+ */
+
+/*
+ * Macro Parameters:
+ * b - function base name
+ * e - function version extension, to be concatenated with base name
+ * n - function symbol version string to be applied
+ * f - function prototype
+ * p - full function symbol name
+ */
+
+/*
+ * VERSION_SYMBOL
+ * Creates a symbol version table entry binding symbol <b>@DPDK_<n> to the internal
+ * function name <b>_<e>
+ */
+#define VERSION_SYMBOL(b, e, n) __asm__(".symver " RTE_STR(b) RTE_STR(e) ", " RTE_STR(b) "@DPDK_" RTE_STR(n))
+
+/*
+ * BIND_DEFAULT_SYMBOL
+ * Creates a symbol version entry instructing the linker to bind references to
+ * symbol <b> to the internal symbol <b>_<e>
+ */
+#define BIND_DEFAULT_SYMBOL(b, e, n) __asm__(".symver " RTE_STR(b) RTE_STR(e) ", " RTE_STR(b) "@@DPDK_" RTE_STR(n))
+#define __vsym __attribute__((used))
+
+/*
+ * MAP_STATIC_SYMBOL
+ * If a function has been bifurcated into multiple versions, none of which
+ * are defined as the exported symbol name in the map file, this macro can be
+ * used to alias a specific version of the symbol to its exported name. For
+ * example, if you have 2 versions of a function foo_v1 and foo_v2, where the
+ * former is mapped to foo@DPDK_1 and the latter is mapped to foo@DPDK_2 when
+ * building a shared library, this macro can be used to map either foo_v1 or
+ * foo_v2 to the symbol foo when building a static library, e.g.:
+ * MAP_STATIC_SYMBOL(void foo(), foo_v2);
+ */
+#define MAP_STATIC_SYMBOL(f, p)
+
+#else
+/*
+ * No symbol versioning in use
+ */
+#define VERSION_SYMBOL(b, e, n)
+#define __vsym
+#define BIND_DEFAULT_SYMBOL(b, e, n)
+#define MAP_STATIC_SYMBOL(f, p) f __attribute__((alias(RTE_STR(p))))
+/*
+ * RTE_BUILD_SHARED_LIB=n
+ */
+#endif
+
+
+#endif /* _RTE_COMPAT_H_ */
diff --git a/src/dpdk_lib18/librte_distributor/rte_distributor.h b/src/dpdk22/lib/librte_distributor/rte_distributor.h
index cc1d5590..7d36bc8a 100755..100644
--- a/src/dpdk_lib18/librte_distributor/rte_distributor.h
+++ b/src/dpdk22/lib/librte_distributor/rte_distributor.h
@@ -46,11 +46,10 @@
extern "C" {
#endif
-#include <rte_mbuf.h>
-
#define RTE_DISTRIBUTOR_NAMESIZE 32 /**< Length of name for instance */
struct rte_distributor;
+struct rte_mbuf;
/**
* Function to create a new distributor instance
diff --git a/src/dpdk_lib18/librte_eal/common/eal_common_cpuflags.c b/src/dpdk22/lib/librte_eal/common/eal_common_cpuflags.c
index 6fd360cb..8ba7b30e 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/eal_common_cpuflags.c
+++ b/src/dpdk22/lib/librte_eal/common/eal_common_cpuflags.c
@@ -30,6 +30,7 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <rte_common.h>
#include <rte_cpuflags.h>
/*
@@ -62,10 +63,10 @@ rte_cpu_check_supported(void)
static const enum rte_cpu_flag_t compile_time_flags[] = {
RTE_COMPILE_TIME_CPUFLAGS
};
- unsigned i;
+ unsigned count = RTE_DIM(compile_time_flags), i;
int ret;
- for (i = 0; i < sizeof(compile_time_flags)/sizeof(compile_time_flags[0]); i++) {
+ for (i = 0; i < count; i++) {
ret = rte_cpu_get_flag_enabled(compile_time_flags[i]);
if (ret < 0) {
diff --git a/src/dpdk_lib18/librte_eal/common/eal_common_dev.c b/src/dpdk22/lib/librte_eal/common/eal_common_dev.c
index eae56565..a8a4146c 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/eal_common_dev.c
+++ b/src/dpdk22/lib/librte_eal/common/eal_common_dev.c
@@ -32,6 +32,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <stdio.h>
#include <string.h>
#include <inttypes.h>
#include <sys/queue.h>
@@ -40,6 +41,7 @@
#include <rte_devargs.h>
#include <rte_debug.h>
#include <rte_devargs.h>
+#include <rte_log.h>
#include "eal_private.h"
@@ -62,6 +64,32 @@ rte_eal_driver_unregister(struct rte_driver *driver)
}
int
+rte_eal_vdev_init(const char *name, const char *args)
+{
+ struct rte_driver *driver;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ TAILQ_FOREACH(driver, &dev_driver_list, next) {
+ if (driver->type != PMD_VDEV)
+ continue;
+
+ /*
+ * search a driver prefix in virtual device name.
+ * For example, if the driver is pcap PMD, driver->name
+ * will be "eth_pcap", but "name" will be "eth_pcapN".
+ * So use strncmp to compare.
+ */
+ if (!strncmp(driver->name, name, strlen(driver->name)))
+ return driver->init(name, args);
+ }
+
+ RTE_LOG(ERR, EAL, "no driver found for %s\n", name);
+ return -EINVAL;
+}
+
+int
rte_eal_dev_init(void)
{
struct rte_devargs *devargs;
@@ -79,22 +107,11 @@ rte_eal_dev_init(void)
if (devargs->type != RTE_DEVTYPE_VIRTUAL)
continue;
- TAILQ_FOREACH(driver, &dev_driver_list, next) {
- if (driver->type != PMD_VDEV)
- continue;
-
- /* search a driver prefix in virtual device name */
- if (!strncmp(driver->name, devargs->virtual.drv_name,
- strlen(driver->name))) {
- driver->init(devargs->virtual.drv_name,
- devargs->args);
- break;
- }
- }
-
- if (driver == NULL) {
- rte_panic("no driver found for %s\n",
- devargs->virtual.drv_name);
+ if (rte_eal_vdev_init(devargs->virt.drv_name,
+ devargs->args)) {
+ RTE_LOG(ERR, EAL, "failed to initialize %s device\n",
+ devargs->virt.drv_name);
+ return -1;
}
}
@@ -107,3 +124,29 @@ rte_eal_dev_init(void)
}
return 0;
}
+
+int
+rte_eal_vdev_uninit(const char *name)
+{
+ struct rte_driver *driver;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ TAILQ_FOREACH(driver, &dev_driver_list, next) {
+ if (driver->type != PMD_VDEV)
+ continue;
+
+ /*
+ * search a driver prefix in virtual device name.
+ * For example, if the driver is pcap PMD, driver->name
+ * will be "eth_pcap", but "name" will be "eth_pcapN".
+ * So use strncmp to compare.
+ */
+ if (!strncmp(driver->name, name, strlen(driver->name)))
+ return driver->uninit(name);
+ }
+
+ RTE_LOG(ERR, EAL, "no driver found for %s\n", name);
+ return -EINVAL;
+}
diff --git a/src/dpdk_lib18/librte_eal/common/eal_common_devargs.c b/src/dpdk22/lib/librte_eal/common/eal_common_devargs.c
index 4c7d11af..5d075d04 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/eal_common_devargs.c
+++ b/src/dpdk22/lib/librte_eal/common/eal_common_devargs.c
@@ -31,11 +31,15 @@
*/
/* This file manages the list of devices and their arguments, as given
- * by the user at startup */
+ * by the user at startup
+ *
+ * Code here should not call rte_log since the EAL environment
+ * may not be initialized.
+ */
+#include <stdio.h>
#include <string.h>
-#include <rte_log.h>
#include <rte_pci.h>
#include <rte_devargs.h>
#include "eal_private.h"
@@ -44,65 +48,86 @@
struct rte_devargs_list devargs_list =
TAILQ_HEAD_INITIALIZER(devargs_list);
-/* store a whitelist parameter for later parsing */
int
-rte_eal_devargs_add(enum rte_devtype devtype, const char *devargs_str)
+rte_eal_parse_devargs_str(const char *devargs_str,
+ char **drvname, char **drvargs)
{
- struct rte_devargs *devargs;
- char buf[RTE_DEVARGS_LEN];
char *sep;
- int ret;
- ret = snprintf(buf, sizeof(buf), "%s", devargs_str);
- if (ret < 0 || ret >= (int)sizeof(buf)) {
- RTE_LOG(ERR, EAL, "user device args too large: <%s>\n",
- devargs_str);
+ if ((devargs_str) == NULL || (drvname) == NULL || (drvargs == NULL))
return -1;
- }
- /* use malloc instead of rte_malloc as it's called early at init */
- devargs = malloc(sizeof(*devargs));
- if (devargs == NULL) {
- RTE_LOG(ERR, EAL, "cannot allocate devargs\n");
+ *drvname = strdup(devargs_str);
+ if (drvname == NULL)
return -1;
- }
- memset(devargs, 0, sizeof(*devargs));
- devargs->type = devtype;
/* set the first ',' to '\0' to split name and arguments */
- sep = strchr(buf, ',');
+ sep = strchr(*drvname, ',');
if (sep != NULL) {
sep[0] = '\0';
- snprintf(devargs->args, sizeof(devargs->args), "%s", sep + 1);
+ *drvargs = strdup(sep + 1);
+ } else {
+ *drvargs = strdup("");
}
+ if (*drvargs == NULL) {
+ free(*drvname);
+ return -1;
+ }
+ return 0;
+}
+
+/* store a whitelist parameter for later parsing */
+int
+rte_eal_devargs_add(enum rte_devtype devtype, const char *devargs_str)
+{
+ struct rte_devargs *devargs = NULL;
+ char *buf = NULL;
+ int ret;
+
+ /* use malloc instead of rte_malloc as it's called early at init */
+ devargs = malloc(sizeof(*devargs));
+ if (devargs == NULL)
+ goto fail;
+
+ memset(devargs, 0, sizeof(*devargs));
+ devargs->type = devtype;
+
+ if (rte_eal_parse_devargs_str(devargs_str, &buf, &devargs->args))
+ goto fail;
+
switch (devargs->type) {
case RTE_DEVTYPE_WHITELISTED_PCI:
case RTE_DEVTYPE_BLACKLISTED_PCI:
/* try to parse pci identifier */
if (eal_parse_pci_BDF(buf, &devargs->pci.addr) != 0 &&
- eal_parse_pci_DomBDF(buf, &devargs->pci.addr) != 0) {
- RTE_LOG(ERR, EAL,
- "invalid PCI identifier <%s>\n", buf);
- free(devargs);
- return -1;
- }
+ eal_parse_pci_DomBDF(buf, &devargs->pci.addr) != 0)
+ goto fail;
+
break;
case RTE_DEVTYPE_VIRTUAL:
/* save driver name */
- ret = snprintf(devargs->virtual.drv_name,
- sizeof(devargs->virtual.drv_name), "%s", buf);
- if (ret < 0 || ret >= (int)sizeof(devargs->virtual.drv_name)) {
- RTE_LOG(ERR, EAL,
- "driver name too large: <%s>\n", buf);
- free(devargs);
- return -1;
- }
+ ret = snprintf(devargs->virt.drv_name,
+ sizeof(devargs->virt.drv_name), "%s", buf);
+ if (ret < 0 || ret >= (int)sizeof(devargs->virt.drv_name))
+ goto fail;
+
break;
}
+ free(buf);
TAILQ_INSERT_TAIL(&devargs_list, devargs, next);
return 0;
+
+fail:
+ if (buf)
+ free(buf);
+ if (devargs) {
+ free(devargs->args);
+ free(devargs);
+ }
+
+ return -1;
}
/* count the number of devices of a specified type */
@@ -144,7 +169,7 @@ rte_eal_devargs_dump(FILE *f)
devargs->args);
else if (devargs->type == RTE_DEVTYPE_VIRTUAL)
fprintf(f, " VIRTUAL %s %s\n",
- devargs->virtual.drv_name,
+ devargs->virt.drv_name,
devargs->args);
else
fprintf(f, " UNKNOWN %s\n", devargs->args);
diff --git a/src/dpdk_lib18/librte_eal/common/eal_common_errno.c b/src/dpdk22/lib/librte_eal/common/eal_common_errno.c
index 259f8958..de48d8e4 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/eal_common_errno.c
+++ b/src/dpdk22/lib/librte_eal/common/eal_common_errno.c
@@ -64,8 +64,6 @@ rte_strerror(int errnum)
return "Invalid call in secondary process";
case E_RTE_NO_CONFIG:
return "Missing rte_config structure";
- case E_RTE_NO_TAILQ:
- return "No TAILQ initialised";
default:
strerror_r(errnum, RTE_PER_LCORE(retval), RETVAL_SZ);
}
diff --git a/src/dpdk_lib18/librte_eal/common/eal_common_hexdump.c b/src/dpdk22/lib/librte_eal/common/eal_common_hexdump.c
index 6135133f..d5cbd703 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/eal_common_hexdump.c
+++ b/src/dpdk22/lib/librte_eal/common/eal_common_hexdump.c
@@ -103,7 +103,7 @@ rte_memdump(FILE *f, const char * title, const void * buf, unsigned int len)
line[0] = '\0';
for (i = 0, out = 0; i < len; i++) {
- // Make sure we do not overrun the line buffer length.
+ // Make sure we do not overrun the line buffer length.
if ( out >= (LINE_LEN - 4) ) {
fprintf(f, "%s", line);
out = 0;
@@ -118,4 +118,3 @@ rte_memdump(FILE *f, const char * title, const void * buf, unsigned int len)
fflush(f);
}
-
diff --git a/src/dpdk_lib18/librte_eal/common/eal_common_launch.c b/src/dpdk22/lib/librte_eal/common/eal_common_launch.c
index 599f83b5..229c3a03 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/eal_common_launch.c
+++ b/src/dpdk22/lib/librte_eal/common/eal_common_launch.c
@@ -39,7 +39,6 @@
#include <rte_launch.h>
#include <rte_memory.h>
#include <rte_memzone.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_atomic.h>
#include <rte_per_lcore.h>
@@ -117,4 +116,3 @@ rte_eal_mp_wait_lcore(void)
rte_eal_wait_lcore(lcore_id);
}
}
-
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_lcore.c b/src/dpdk22/lib/librte_eal/common/eal_common_lcore.c
index 662f0245..a4263ba5 100755..100644
--- a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_lcore.c
+++ b/src/dpdk22/lib/librte_eal/common/eal_common_lcore.c
@@ -32,7 +32,9 @@
*/
#include <unistd.h>
-#include <sys/sysctl.h>
+#include <limits.h>
+#include <string.h>
+#include <dirent.h>
#include <rte_log.h>
#include <rte_eal.h>
@@ -41,26 +43,12 @@
#include <rte_debug.h>
#include "eal_private.h"
-
-/* No topology information available on FreeBSD including NUMA info */
-#define cpu_core_id(X) 0
-#define cpu_socket_id(X) 0
-
-static int
-get_ncpus(void)
-{
- int mib[2] = {CTL_HW, HW_NCPU};
- int ncpu;
- size_t len = sizeof(ncpu);
-
- sysctl(mib, 2, &ncpu, &len, NULL, 0);
- RTE_LOG(INFO, EAL, "Sysctl reports %d cpus\n", ncpu);
- return ncpu;
-}
+#include "eal_thread.h"
/*
- * fill the cpu_info structure with as much info as we can get.
- * code is similar to linux version, but sadly available info is less.
+ * Parse /sys/devices/system/cpu to get the number of physical and logical
+ * processors on the machine. The function will fill the cpu_info
+ * structure.
*/
int
rte_eal_cpu_init(void)
@@ -70,36 +58,51 @@ rte_eal_cpu_init(void)
unsigned lcore_id;
unsigned count = 0;
- const unsigned ncpus = get_ncpus();
/*
* Parse the maximum set of logical cores, detect the subset of running
* ones and enable them by default.
*/
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
- lcore_config[lcore_id].detected = (lcore_id < ncpus);
+ lcore_config[lcore_id].core_index = count;
+
+ /* init cpuset for per lcore config */
+ CPU_ZERO(&lcore_config[lcore_id].cpuset);
+
+ /* in 1:1 mapping, record related cpu detected state */
+ lcore_config[lcore_id].detected = eal_cpu_detected(lcore_id);
if (lcore_config[lcore_id].detected == 0) {
config->lcore_role[lcore_id] = ROLE_OFF;
+ lcore_config[lcore_id].core_index = -1;
continue;
}
+
+ /* By default, lcore 1:1 map to cpu id */
+ CPU_SET(lcore_id, &lcore_config[lcore_id].cpuset);
+
/* By default, each detected core is enabled */
config->lcore_role[lcore_id] = ROLE_RTE;
- lcore_config[lcore_id].core_id = cpu_core_id(lcore_id);
- lcore_config[lcore_id].socket_id = cpu_socket_id(lcore_id);
+ lcore_config[lcore_id].core_id = eal_cpu_core_id(lcore_id);
+ lcore_config[lcore_id].socket_id = eal_cpu_socket_id(lcore_id);
if (lcore_config[lcore_id].socket_id >= RTE_MAX_NUMA_NODES)
#ifdef RTE_EAL_ALLOW_INV_SOCKET_ID
lcore_config[lcore_id].socket_id = 0;
#else
rte_panic("Socket ID (%u) is greater than "
"RTE_MAX_NUMA_NODES (%d)\n",
- lcore_config[lcore_id].socket_id, RTE_MAX_NUMA_NODES);
+ lcore_config[lcore_id].socket_id,
+ RTE_MAX_NUMA_NODES);
#endif
- RTE_LOG(DEBUG, EAL, "Detected lcore %u\n",
- lcore_id);
+
+ RTE_LOG(DEBUG, EAL, "Detected lcore %u as "
+ "core %u on socket %u\n",
+ lcore_id, lcore_config[lcore_id].core_id,
+ lcore_config[lcore_id].socket_id);
count++;
}
/* Set the count of enabled logical cores of the EAL configuration */
config->lcore_count = count;
- RTE_LOG(DEBUG, EAL, "Support maximum %u logical core(s) by configuration.\n",
+ RTE_LOG(DEBUG, EAL,
+ "Support maximum %u logical core(s) by configuration.\n",
RTE_MAX_LCORE);
RTE_LOG(DEBUG, EAL, "Detected %u lcore(s)\n", config->lcore_count);
diff --git a/src/dpdk_lib18/librte_eal/common/eal_common_log.c b/src/dpdk22/lib/librte_eal/common/eal_common_log.c
index cf576195..1ae8de70 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/eal_common_log.c
+++ b/src/dpdk22/lib/librte_eal/common/eal_common_log.c
@@ -48,7 +48,6 @@
#include <rte_launch.h>
#include <rte_common.h>
#include <rte_cycles.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
@@ -120,7 +119,10 @@ rte_log_add_in_history(const char *buf, size_t size)
/* get a buffer for adding in history */
if (log_history_size > RTE_LOG_HISTORY) {
hist_buf = STAILQ_FIRST(&log_history);
- STAILQ_REMOVE_HEAD(&log_history, next);
+ if (hist_buf) {
+ STAILQ_REMOVE_HEAD(&log_history, next);
+ log_history_size--;
+ }
}
else {
if (rte_mempool_mc_get(log_history_mp, &obj) < 0)
@@ -193,11 +195,20 @@ rte_set_log_type(uint32_t type, int enable)
rte_logs.type &= (~type);
}
+/* Get global log type */
+uint32_t
+rte_get_log_type(void)
+{
+ return rte_logs.type;
+}
+
/* get the current loglevel for the message beeing processed */
int rte_log_cur_msg_loglevel(void)
{
unsigned lcore_id;
lcore_id = rte_lcore_id();
+ if (lcore_id >= RTE_MAX_LCORE)
+ return rte_get_log_level();
return log_cur_msg[lcore_id].loglevel;
}
@@ -206,6 +217,8 @@ int rte_log_cur_msg_logtype(void)
{
unsigned lcore_id;
lcore_id = rte_lcore_id();
+ if (lcore_id >= RTE_MAX_LCORE)
+ return rte_get_log_type();
return log_cur_msg[lcore_id].logtype;
}
@@ -224,6 +237,7 @@ rte_log_dump_history(FILE *out)
rte_spinlock_lock(&log_list_lock);
tmp_log_history = log_history;
STAILQ_INIT(&log_history);
+ log_history_size = 0;
rte_spinlock_unlock(&log_list_lock);
for (i=0; i<RTE_LOG_HISTORY; i++) {
@@ -255,18 +269,21 @@ rte_log_dump_history(FILE *out)
* defined by the previous call to rte_openlog_stream().
*/
int
-rte_vlog(__attribute__((unused)) uint32_t level,
- __attribute__((unused)) uint32_t logtype,
- const char *format, va_list ap)
+rte_vlog(uint32_t level, uint32_t logtype, const char *format, va_list ap)
{
int ret;
FILE *f = rte_logs.file;
unsigned lcore_id;
+ if ((level > rte_logs.level) || !(logtype & rte_logs.type))
+ return 0;
+
/* save loglevel and logtype in a global per-lcore variable */
lcore_id = rte_lcore_id();
- log_cur_msg[lcore_id].loglevel = level;
- log_cur_msg[lcore_id].logtype = logtype;
+ if (lcore_id < RTE_MAX_LCORE) {
+ log_cur_msg[lcore_id].loglevel = level;
+ log_cur_msg[lcore_id].logtype = logtype;
+ }
ret = vfprintf(f, format, ap);
fflush(f);
@@ -276,6 +293,7 @@ rte_vlog(__attribute__((unused)) uint32_t level,
/*
* Generates a log message The message will be sent in the stream
* defined by the previous call to rte_openlog_stream().
+ * No need to check level here, done by rte_vlog().
*/
int
rte_log(uint32_t level, uint32_t logtype, const char *format, ...)
@@ -317,4 +335,3 @@ rte_eal_common_log_init(FILE *default_log)
rte_openlog_stream(default_log);
return 0;
}
-
diff --git a/src/dpdk_lib18/librte_eal/common/eal_common_memory.c b/src/dpdk22/lib/librte_eal/common/eal_common_memory.c
index 77830f80..b6475737 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/eal_common_memory.c
+++ b/src/dpdk22/lib/librte_eal/common/eal_common_memory.c
@@ -40,12 +40,12 @@
#include <rte_memory.h>
#include <rte_memzone.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
#include <rte_log.h>
#include "eal_private.h"
+#include "eal_internal_cfg.h"
/*
* Return a pointer to a read-only table of struct rte_physmem_desc
@@ -70,7 +70,7 @@ rte_eal_get_physmem_size(void)
/* get pointer to global configuration */
mcfg = rte_eal_get_configuration()->mem_config;
- for (i=0; i<RTE_MAX_MEMSEG; i++) {
+ for (i = 0; i < RTE_MAX_MEMSEG; i++) {
if (mcfg->memseg[i].addr == NULL)
break;
@@ -90,7 +90,7 @@ rte_dump_physmem_layout(FILE *f)
/* get pointer to global configuration */
mcfg = rte_eal_get_configuration()->mem_config;
- for (i=0; i<RTE_MAX_MEMSEG; i++) {
+ for (i = 0; i < RTE_MAX_MEMSEG; i++) {
if (mcfg->memseg[i].addr == NULL)
break;
@@ -119,3 +119,36 @@ unsigned rte_memory_get_nrank(void)
{
return rte_eal_get_configuration()->mem_config->nrank;
}
+
+static int
+rte_eal_memdevice_init(void)
+{
+ struct rte_config *config;
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+ return 0;
+
+ config = rte_eal_get_configuration();
+ config->mem_config->nchannel = internal_config.force_nchannel;
+ config->mem_config->nrank = internal_config.force_nrank;
+
+ return 0;
+}
+
+/* init memory subsystem */
+int
+rte_eal_memory_init(void)
+{
+ RTE_LOG(INFO, EAL, "Setting up physically contiguous memory...\n");
+
+ const int retval = rte_eal_process_type() == RTE_PROC_PRIMARY ?
+ rte_eal_hugepage_init() :
+ rte_eal_hugepage_attach();
+ if (retval < 0)
+ return -1;
+
+ if (internal_config.no_shconf == 0 && rte_eal_memdevice_init() < 0)
+ return -1;
+
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_eal/common/eal_common_memzone.c b/src/dpdk22/lib/librte_eal/common/eal_common_memzone.c
index b5a5d727..febc56b0 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/eal_common_memzone.c
+++ b/src/dpdk22/lib/librte_eal/common/eal_common_memzone.c
@@ -43,7 +43,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memzone.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
#include <rte_per_lcore.h>
@@ -51,15 +50,15 @@
#include <rte_string_fns.h>
#include <rte_common.h>
+#include "malloc_heap.h"
+#include "malloc_elem.h"
#include "eal_private.h"
-/* internal copy of free memory segments */
-static struct rte_memseg *free_memseg = NULL;
-
static inline const struct rte_memzone *
memzone_lookup_thread_unsafe(const char *name)
{
const struct rte_mem_config *mcfg;
+ const struct rte_memzone *mz;
unsigned i = 0;
/* get pointer to global configuration */
@@ -69,59 +68,58 @@ memzone_lookup_thread_unsafe(const char *name)
* the algorithm is not optimal (linear), but there are few
* zones and this function should be called at init only
*/
- for (i = 0; i < RTE_MAX_MEMZONE && mcfg->memzone[i].addr != NULL; i++) {
- if (!strncmp(name, mcfg->memzone[i].name, RTE_MEMZONE_NAMESIZE))
+ for (i = 0; i < RTE_MAX_MEMZONE; i++) {
+ mz = &mcfg->memzone[i];
+ if (mz->addr != NULL && !strncmp(name, mz->name, RTE_MEMZONE_NAMESIZE))
return &mcfg->memzone[i];
}
return NULL;
}
-/*
- * Return a pointer to a correctly filled memzone descriptor. If the
- * allocation cannot be done, return NULL.
- */
-const struct rte_memzone *
-rte_memzone_reserve(const char *name, size_t len, int socket_id,
- unsigned flags)
+static inline struct rte_memzone *
+get_next_free_memzone(void)
{
- return rte_memzone_reserve_aligned(name,
- len, socket_id, flags, RTE_CACHE_LINE_SIZE);
-}
+ struct rte_mem_config *mcfg;
+ unsigned i = 0;
-/*
- * Helper function for memzone_reserve_aligned_thread_unsafe().
- * Calculate address offset from the start of the segment.
- * Align offset in that way that it satisfy istart alignmnet and
- * buffer of the requested length would not cross specified boundary.
- */
-static inline phys_addr_t
-align_phys_boundary(const struct rte_memseg *ms, size_t len, size_t align,
- size_t bound)
-{
- phys_addr_t addr_offset, bmask, end, start;
- size_t step;
+ /* get pointer to global configuration */
+ mcfg = rte_eal_get_configuration()->mem_config;
- step = RTE_MAX(align, bound);
- bmask = ~((phys_addr_t)bound - 1);
+ for (i = 0; i < RTE_MAX_MEMZONE; i++) {
+ if (mcfg->memzone[i].addr == NULL)
+ return &mcfg->memzone[i];
+ }
- /* calculate offset to closest alignment */
- start = RTE_ALIGN_CEIL(ms->phys_addr, align);
- addr_offset = start - ms->phys_addr;
+ return NULL;
+}
- while (addr_offset + len < ms->len) {
+/* This function will return the greatest free block if a heap has been
+ * specified. If no heap has been specified, it will return the heap and
+ * length of the greatest free block available in all heaps */
+static size_t
+find_heap_max_free_elem(int *s, unsigned align)
+{
+ struct rte_mem_config *mcfg;
+ struct rte_malloc_socket_stats stats;
+ int i, socket = *s;
+ size_t len = 0;
- /* check, do we meet boundary condition */
- end = start + len - (len != 0);
- if ((start & bmask) == (end & bmask))
- break;
+ /* get pointer to global configuration */
+ mcfg = rte_eal_get_configuration()->mem_config;
- /* calculate next offset */
- start = RTE_ALIGN_CEIL(start + 1, step);
- addr_offset = start - ms->phys_addr;
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++) {
+ if ((socket != SOCKET_ID_ANY) && (socket != i))
+ continue;
+
+ malloc_heap_get_stats(&mcfg->malloc_heaps[i], &stats);
+ if (stats.greatest_free_size > len) {
+ len = stats.greatest_free_size;
+ *s = i;
+ }
}
- return (addr_offset);
+ return (len - MALLOC_ELEM_OVERHEAD - align);
}
static const struct rte_memzone *
@@ -129,19 +127,14 @@ memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
int socket_id, unsigned flags, unsigned align, unsigned bound)
{
struct rte_mem_config *mcfg;
- unsigned i = 0;
- int memseg_idx = -1;
- uint64_t addr_offset, seg_offset = 0;
size_t requested_len;
- size_t memseg_len = 0;
- phys_addr_t memseg_physaddr;
- void *memseg_addr;
+ int socket, i;
/* get pointer to global configuration */
mcfg = rte_eal_get_configuration()->mem_config;
/* no more room in config */
- if (mcfg->memzone_idx >= RTE_MAX_MEMZONE) {
+ if (mcfg->memzone_cnt >= RTE_MAX_MEMZONE) {
RTE_LOG(ERR, EAL, "%s(): No more room in config\n", __func__);
rte_errno = ENOSPC;
return NULL;
@@ -156,7 +149,7 @@ memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
}
/* if alignment is not a power of two */
- if (!rte_is_power_of_2(align)) {
+ if (align && !rte_is_power_of_2(align)) {
RTE_LOG(ERR, EAL, "%s(): Invalid alignment: %u\n", __func__,
align);
rte_errno = EINVAL;
@@ -167,7 +160,6 @@ memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
if (align < RTE_CACHE_LINE_SIZE)
align = RTE_CACHE_LINE_SIZE;
-
/* align length on cache boundary. Check for overflow before doing so */
if (len > SIZE_MAX - RTE_CACHE_LINE_MASK) {
rte_errno = EINVAL; /* requested size too big */
@@ -181,158 +173,93 @@ memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
requested_len = RTE_MAX((size_t)RTE_CACHE_LINE_SIZE, len);
/* check that boundary condition is valid */
- if (bound != 0 &&
- (requested_len > bound || !rte_is_power_of_2(bound))) {
+ if (bound != 0 && (requested_len > bound || !rte_is_power_of_2(bound))) {
rte_errno = EINVAL;
return NULL;
}
- /* find the smallest segment matching requirements */
- for (i = 0; i < RTE_MAX_MEMSEG; i++) {
- /* last segment */
- if (free_memseg[i].addr == NULL)
- break;
-
- /* empty segment, skip it */
- if (free_memseg[i].len == 0)
- continue;
-
- /* bad socket ID */
- if (socket_id != SOCKET_ID_ANY &&
- free_memseg[i].socket_id != SOCKET_ID_ANY &&
- socket_id != free_memseg[i].socket_id)
- continue;
-
- /*
- * calculate offset to closest alignment that
- * meets boundary conditions.
- */
- addr_offset = align_phys_boundary(free_memseg + i,
- requested_len, align, bound);
+ if ((socket_id != SOCKET_ID_ANY) && (socket_id >= RTE_MAX_NUMA_NODES)) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
- /* check len */
- if ((requested_len + addr_offset) > free_memseg[i].len)
- continue;
+ if (!rte_eal_has_hugepages())
+ socket_id = SOCKET_ID_ANY;
- /* check flags for hugepage sizes */
- if ((flags & RTE_MEMZONE_2MB) &&
- free_memseg[i].hugepage_sz == RTE_PGSIZE_1G)
- continue;
- if ((flags & RTE_MEMZONE_1GB) &&
- free_memseg[i].hugepage_sz == RTE_PGSIZE_2M)
- continue;
- if ((flags & RTE_MEMZONE_16MB) &&
- free_memseg[i].hugepage_sz == RTE_PGSIZE_16G)
- continue;
- if ((flags & RTE_MEMZONE_16GB) &&
- free_memseg[i].hugepage_sz == RTE_PGSIZE_16M)
- continue;
+ if (len == 0) {
+ if (bound != 0)
+ requested_len = bound;
+ else
+ requested_len = find_heap_max_free_elem(&socket_id, align);
+ }
- /* this segment is the best until now */
- if (memseg_idx == -1) {
- memseg_idx = i;
- memseg_len = free_memseg[i].len;
- seg_offset = addr_offset;
- }
- /* find the biggest contiguous zone */
- else if (len == 0) {
- if (free_memseg[i].len > memseg_len) {
- memseg_idx = i;
- memseg_len = free_memseg[i].len;
- seg_offset = addr_offset;
- }
- }
- /*
- * find the smallest (we already checked that current
- * zone length is > len
- */
- else if (free_memseg[i].len + align < memseg_len ||
- (free_memseg[i].len <= memseg_len + align &&
- addr_offset < seg_offset)) {
- memseg_idx = i;
- memseg_len = free_memseg[i].len;
- seg_offset = addr_offset;
+ if (socket_id == SOCKET_ID_ANY)
+ socket = malloc_get_numa_socket();
+ else
+ socket = socket_id;
+
+ /* allocate memory on heap */
+ void *mz_addr = malloc_heap_alloc(&mcfg->malloc_heaps[socket], NULL,
+ requested_len, flags, align, bound);
+
+ if ((mz_addr == NULL) && (socket_id == SOCKET_ID_ANY)) {
+ /* try other heaps */
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++) {
+ if (socket == i)
+ continue;
+
+ mz_addr = malloc_heap_alloc(&mcfg->malloc_heaps[i],
+ NULL, requested_len, flags, align, bound);
+ if (mz_addr != NULL)
+ break;
}
}
- /* no segment found */
- if (memseg_idx == -1) {
- /*
- * If RTE_MEMZONE_SIZE_HINT_ONLY flag is specified,
- * try allocating again without the size parameter otherwise -fail.
- */
- if ((flags & RTE_MEMZONE_SIZE_HINT_ONLY) &&
- ((flags & RTE_MEMZONE_1GB) || (flags & RTE_MEMZONE_2MB)
- || (flags & RTE_MEMZONE_16MB) || (flags & RTE_MEMZONE_16GB)))
- return memzone_reserve_aligned_thread_unsafe(name,
- len, socket_id, 0, align, bound);
-
+ if (mz_addr == NULL) {
rte_errno = ENOMEM;
return NULL;
}
- /* save aligned physical and virtual addresses */
- memseg_physaddr = free_memseg[memseg_idx].phys_addr + seg_offset;
- memseg_addr = RTE_PTR_ADD(free_memseg[memseg_idx].addr,
- (uintptr_t) seg_offset);
+ const struct malloc_elem *elem = malloc_elem_from_data(mz_addr);
- /* if we are looking for a biggest memzone */
- if (len == 0) {
- if (bound == 0)
- requested_len = memseg_len - seg_offset;
- else
- requested_len = RTE_ALIGN_CEIL(memseg_physaddr + 1,
- bound) - memseg_physaddr;
- }
-
- /* set length to correct value */
- len = (size_t)seg_offset + requested_len;
+ /* fill the zone in config */
+ struct rte_memzone *mz = get_next_free_memzone();
- /* update our internal state */
- free_memseg[memseg_idx].len -= len;
- free_memseg[memseg_idx].phys_addr += len;
- free_memseg[memseg_idx].addr =
- (char *)free_memseg[memseg_idx].addr + len;
+ if (mz == NULL) {
+ RTE_LOG(ERR, EAL, "%s(): Cannot find free memzone but there is room "
+ "in config!\n", __func__);
+ rte_errno = ENOSPC;
+ return NULL;
+ }
- /* fill the zone in config */
- struct rte_memzone *mz = &mcfg->memzone[mcfg->memzone_idx++];
+ mcfg->memzone_cnt++;
snprintf(mz->name, sizeof(mz->name), "%s", name);
- mz->phys_addr = memseg_physaddr;
- mz->addr = memseg_addr;
- mz->len = requested_len;
- mz->hugepage_sz = free_memseg[memseg_idx].hugepage_sz;
- mz->socket_id = free_memseg[memseg_idx].socket_id;
+ mz->phys_addr = rte_malloc_virt2phy(mz_addr);
+ mz->addr = mz_addr;
+ mz->len = (requested_len == 0 ? elem->size : requested_len);
+ mz->hugepage_sz = elem->ms->hugepage_sz;
+ mz->socket_id = elem->ms->socket_id;
mz->flags = 0;
- mz->memseg_id = memseg_idx;
+ mz->memseg_id = elem->ms - rte_eal_get_configuration()->mem_config->memseg;
return mz;
}
-/*
- * Return a pointer to a correctly filled memzone descriptor (with a
- * specified alignment). If the allocation cannot be done, return NULL.
- */
-const struct rte_memzone *
-rte_memzone_reserve_aligned(const char *name, size_t len,
- int socket_id, unsigned flags, unsigned align)
+static const struct rte_memzone *
+rte_memzone_reserve_thread_safe(const char *name, size_t len,
+ int socket_id, unsigned flags, unsigned align,
+ unsigned bound)
{
struct rte_mem_config *mcfg;
const struct rte_memzone *mz = NULL;
- /* both sizes cannot be explicitly called for */
- if (((flags & RTE_MEMZONE_1GB) && (flags & RTE_MEMZONE_2MB))
- || ((flags & RTE_MEMZONE_16MB) && (flags & RTE_MEMZONE_16GB))) {
- rte_errno = EINVAL;
- return NULL;
- }
-
/* get pointer to global configuration */
mcfg = rte_eal_get_configuration()->mem_config;
rte_rwlock_write_lock(&mcfg->mlock);
mz = memzone_reserve_aligned_thread_unsafe(
- name, len, socket_id, flags, align, 0);
+ name, len, socket_id, flags, align, bound);
rte_rwlock_write_unlock(&mcfg->mlock);
@@ -341,36 +268,84 @@ rte_memzone_reserve_aligned(const char *name, size_t len,
/*
* Return a pointer to a correctly filled memzone descriptor (with a
- * specified alignment and boundary).
- * If the allocation cannot be done, return NULL.
+ * specified alignment and boundary). If the allocation cannot be done,
+ * return NULL.
*/
const struct rte_memzone *
-rte_memzone_reserve_bounded(const char *name, size_t len,
- int socket_id, unsigned flags, unsigned align, unsigned bound)
+rte_memzone_reserve_bounded(const char *name, size_t len, int socket_id,
+ unsigned flags, unsigned align, unsigned bound)
+{
+ return rte_memzone_reserve_thread_safe(name, len, socket_id, flags,
+ align, bound);
+}
+
+/*
+ * Return a pointer to a correctly filled memzone descriptor (with a
+ * specified alignment). If the allocation cannot be done, return NULL.
+ */
+const struct rte_memzone *
+rte_memzone_reserve_aligned(const char *name, size_t len, int socket_id,
+ unsigned flags, unsigned align)
+{
+ return rte_memzone_reserve_thread_safe(name, len, socket_id, flags,
+ align, 0);
+}
+
+/*
+ * Return a pointer to a correctly filled memzone descriptor. If the
+ * allocation cannot be done, return NULL.
+ */
+const struct rte_memzone *
+rte_memzone_reserve(const char *name, size_t len, int socket_id,
+ unsigned flags)
+{
+ return rte_memzone_reserve_thread_safe(name, len, socket_id,
+ flags, RTE_CACHE_LINE_SIZE, 0);
+}
+
+int
+rte_memzone_free(const struct rte_memzone *mz)
{
struct rte_mem_config *mcfg;
- const struct rte_memzone *mz = NULL;
+ int ret = 0;
+ void *addr;
+ unsigned idx;
- /* both sizes cannot be explicitly called for */
- if (((flags & RTE_MEMZONE_1GB) && (flags & RTE_MEMZONE_2MB))
- || ((flags & RTE_MEMZONE_16MB) && (flags & RTE_MEMZONE_16GB))) {
- rte_errno = EINVAL;
- return NULL;
- }
+ if (mz == NULL)
+ return -EINVAL;
- /* get pointer to global configuration */
mcfg = rte_eal_get_configuration()->mem_config;
rte_rwlock_write_lock(&mcfg->mlock);
- mz = memzone_reserve_aligned_thread_unsafe(
- name, len, socket_id, flags, align, bound);
+ idx = ((uintptr_t)mz - (uintptr_t)mcfg->memzone);
+ idx = idx / sizeof(struct rte_memzone);
+
+ addr = mcfg->memzone[idx].addr;
+#ifdef RTE_LIBRTE_IVSHMEM
+ /*
+ * If ioremap_addr is set, it's an IVSHMEM memzone and we cannot
+ * free it.
+ */
+ if (mcfg->memzone[idx].ioremap_addr != 0)
+ ret = -EINVAL;
+#endif
+ if (addr == NULL)
+ ret = -EINVAL;
+ else if (mcfg->memzone_cnt == 0) {
+ rte_panic("%s(): memzone address not NULL but memzone_cnt is 0!\n",
+ __func__);
+ } else {
+ memset(&mcfg->memzone[idx], 0, sizeof(mcfg->memzone[idx]));
+ mcfg->memzone_cnt--;
+ }
rte_rwlock_write_unlock(&mcfg->mlock);
- return mz;
-}
+ rte_free(addr);
+ return ret;
+}
/*
* Lookup for the memzone identified by the given name
@@ -420,45 +395,6 @@ rte_memzone_dump(FILE *f)
}
/*
- * called by init: modify the free memseg list to have cache-aligned
- * addresses and cache-aligned lengths
- */
-static int
-memseg_sanitize(struct rte_memseg *memseg)
-{
- unsigned phys_align;
- unsigned virt_align;
- unsigned off;
-
- phys_align = memseg->phys_addr & RTE_CACHE_LINE_MASK;
- virt_align = (unsigned long)memseg->addr & RTE_CACHE_LINE_MASK;
-
- /*
- * sanity check: phys_addr and addr must have the same
- * alignment
- */
- if (phys_align != virt_align)
- return -1;
-
- /* memseg is really too small, don't bother with it */
- if (memseg->len < (2 * RTE_CACHE_LINE_SIZE)) {
- memseg->len = 0;
- return 0;
- }
-
- /* align start address */
- off = (RTE_CACHE_LINE_SIZE - phys_align) & RTE_CACHE_LINE_MASK;
- memseg->phys_addr += off;
- memseg->addr = (char *)memseg->addr + off;
- memseg->len -= off;
-
- /* align end address */
- memseg->len &= ~((uint64_t)RTE_CACHE_LINE_MASK);
-
- return 0;
-}
-
-/*
* Init the memzone subsystem
*/
int
@@ -466,14 +402,10 @@ rte_eal_memzone_init(void)
{
struct rte_mem_config *mcfg;
const struct rte_memseg *memseg;
- unsigned i = 0;
/* get pointer to global configuration */
mcfg = rte_eal_get_configuration()->mem_config;
- /* mirror the runtime memsegs from config */
- free_memseg = mcfg->free_memseg;
-
/* secondary processes don't need to initialise anything */
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
return 0;
@@ -486,33 +418,13 @@ rte_eal_memzone_init(void)
rte_rwlock_write_lock(&mcfg->mlock);
- /* fill in uninitialized free_memsegs */
- for (i = 0; i < RTE_MAX_MEMSEG; i++) {
- if (memseg[i].addr == NULL)
- break;
- if (free_memseg[i].addr != NULL)
- continue;
- memcpy(&free_memseg[i], &memseg[i], sizeof(struct rte_memseg));
- }
-
- /* make all zones cache-aligned */
- for (i = 0; i < RTE_MAX_MEMSEG; i++) {
- if (free_memseg[i].addr == NULL)
- break;
- if (memseg_sanitize(&free_memseg[i]) < 0) {
- RTE_LOG(ERR, EAL, "%s(): Sanity check failed\n", __func__);
- rte_rwlock_write_unlock(&mcfg->mlock);
- return -1;
- }
- }
-
/* delete all zones */
- mcfg->memzone_idx = 0;
+ mcfg->memzone_cnt = 0;
memset(mcfg->memzone, 0, sizeof(mcfg->memzone));
rte_rwlock_write_unlock(&mcfg->mlock);
- return 0;
+ return rte_eal_malloc_heap_init();
}
/* Walk all reserved memory zones */
diff --git a/src/dpdk22/lib/librte_eal/common/eal_common_options.c b/src/dpdk22/lib/librte_eal/common/eal_common_options.c
new file mode 100644
index 00000000..29942ea6
--- /dev/null
+++ b/src/dpdk22/lib/librte_eal/common/eal_common_options.c
@@ -0,0 +1,1023 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2014 6WIND S.A.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <syslog.h>
+#include <ctype.h>
+#include <limits.h>
+#include <errno.h>
+#include <getopt.h>
+#include <dlfcn.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <dirent.h>
+
+#include <rte_eal.h>
+#include <rte_log.h>
+#include <rte_lcore.h>
+#include <rte_version.h>
+#include <rte_devargs.h>
+#include <rte_memcpy.h>
+
+#include "eal_internal_cfg.h"
+#include "eal_options.h"
+#include "eal_filesystem.h"
+
+#define BITS_PER_HEX 4
+
+const char
+eal_short_options[] =
+ "b:" /* pci-blacklist */
+ "c:" /* coremask */
+ "d:" /* driver */
+ "h" /* help */
+ "l:" /* corelist */
+ "m:" /* memory size */
+ "n:" /* memory channels */
+ "r:" /* memory ranks */
+ "v" /* version */
+ "w:" /* pci-whitelist */
+ ;
+
+const struct option
+eal_long_options[] = {
+ {OPT_BASE_VIRTADDR, 1, NULL, OPT_BASE_VIRTADDR_NUM },
+ {OPT_CREATE_UIO_DEV, 0, NULL, OPT_CREATE_UIO_DEV_NUM },
+ {OPT_FILE_PREFIX, 1, NULL, OPT_FILE_PREFIX_NUM },
+ {OPT_HELP, 0, NULL, OPT_HELP_NUM },
+ {OPT_HUGE_DIR, 1, NULL, OPT_HUGE_DIR_NUM },
+ {OPT_HUGE_UNLINK, 0, NULL, OPT_HUGE_UNLINK_NUM },
+ {OPT_LCORES, 1, NULL, OPT_LCORES_NUM },
+ {OPT_LOG_LEVEL, 1, NULL, OPT_LOG_LEVEL_NUM },
+ {OPT_MASTER_LCORE, 1, NULL, OPT_MASTER_LCORE_NUM },
+ {OPT_NO_HPET, 0, NULL, OPT_NO_HPET_NUM },
+ {OPT_NO_HUGE, 0, NULL, OPT_NO_HUGE_NUM },
+ {OPT_NO_PCI, 0, NULL, OPT_NO_PCI_NUM },
+ {OPT_NO_SHCONF, 0, NULL, OPT_NO_SHCONF_NUM },
+ {OPT_PCI_BLACKLIST, 1, NULL, OPT_PCI_BLACKLIST_NUM },
+ {OPT_PCI_WHITELIST, 1, NULL, OPT_PCI_WHITELIST_NUM },
+ {OPT_PROC_TYPE, 1, NULL, OPT_PROC_TYPE_NUM },
+ {OPT_SOCKET_MEM, 1, NULL, OPT_SOCKET_MEM_NUM },
+ {OPT_SYSLOG, 1, NULL, OPT_SYSLOG_NUM },
+ {OPT_VDEV, 1, NULL, OPT_VDEV_NUM },
+ {OPT_VFIO_INTR, 1, NULL, OPT_VFIO_INTR_NUM },
+ {OPT_VMWARE_TSC_MAP, 0, NULL, OPT_VMWARE_TSC_MAP_NUM },
+ {OPT_XEN_DOM0, 0, NULL, OPT_XEN_DOM0_NUM },
+ {0, 0, NULL, 0 }
+};
+
+TAILQ_HEAD(shared_driver_list, shared_driver);
+
+/* Definition for shared object drivers. */
+struct shared_driver {
+ TAILQ_ENTRY(shared_driver) next;
+
+ char name[PATH_MAX];
+ void* lib_handle;
+};
+
+/* List of external loadable drivers */
+static struct shared_driver_list solib_list =
+TAILQ_HEAD_INITIALIZER(solib_list);
+
+/* Default path of external loadable drivers */
+static const char *default_solib_dir = RTE_EAL_PMD_PATH;
+
+static int master_lcore_parsed;
+static int mem_parsed;
+
+void
+eal_reset_internal_config(struct internal_config *internal_cfg)
+{
+ int i;
+
+ internal_cfg->memory = 0;
+ internal_cfg->force_nrank = 0;
+ internal_cfg->force_nchannel = 0;
+ internal_cfg->hugefile_prefix = HUGEFILE_PREFIX_DEFAULT;
+ internal_cfg->hugepage_dir = NULL;
+ internal_cfg->force_sockets = 0;
+ /* zero out the NUMA config */
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ internal_cfg->socket_mem[i] = 0;
+ /* zero out hugedir descriptors */
+ for (i = 0; i < MAX_HUGEPAGE_SIZES; i++)
+ internal_cfg->hugepage_info[i].lock_descriptor = -1;
+ internal_cfg->base_virtaddr = 0;
+
+ internal_cfg->syslog_facility = LOG_DAEMON;
+ /* default value from build option */
+ internal_cfg->log_level = RTE_LOG_LEVEL;
+
+ internal_cfg->xen_dom0_support = 0;
+
+ /* if set to NONE, interrupt mode is determined automatically */
+ internal_cfg->vfio_intr_mode = RTE_INTR_MODE_NONE;
+
+#ifdef RTE_LIBEAL_USE_HPET
+ internal_cfg->no_hpet = 0;
+#else
+ internal_cfg->no_hpet = 1;
+#endif
+ internal_cfg->vmware_tsc_map = 0;
+ internal_cfg->create_uio_dev = 0;
+}
+
+static int
+eal_plugin_add(const char *path)
+{
+ struct shared_driver *solib;
+
+ solib = malloc(sizeof(*solib));
+ if (solib == NULL) {
+ RTE_LOG(ERR, EAL, "malloc(solib) failed\n");
+ return -1;
+ }
+ memset(solib, 0, sizeof(*solib));
+ strncpy(solib->name, path, PATH_MAX-1);
+ solib->name[PATH_MAX-1] = 0;
+ TAILQ_INSERT_TAIL(&solib_list, solib, next);
+
+ return 0;
+}
+
+static int
+eal_plugindir_init(const char *path)
+{
+ DIR *d = NULL;
+ struct dirent *dent = NULL;
+ char sopath[PATH_MAX];
+
+ if (path == NULL || *path == '\0')
+ return 0;
+
+ d = opendir(path);
+ if (d == NULL) {
+ RTE_LOG(ERR, EAL, "failed to open directory %s: %s\n",
+ path, strerror(errno));
+ return -1;
+ }
+
+ while ((dent = readdir(d)) != NULL) {
+ struct stat sb;
+
+ snprintf(sopath, PATH_MAX-1, "%s/%s", path, dent->d_name);
+ sopath[PATH_MAX-1] = 0;
+
+ if (!(stat(sopath, &sb) == 0 && S_ISREG(sb.st_mode)))
+ continue;
+
+ if (eal_plugin_add(sopath) == -1)
+ break;
+ }
+
+ closedir(d);
+ /* XXX this ignores failures from readdir() itself */
+ return (dent == NULL) ? 0 : -1;
+}
+
+int
+eal_plugins_init(void)
+{
+ struct shared_driver *solib = NULL;
+
+ if (*default_solib_dir != '\0')
+ eal_plugin_add(default_solib_dir);
+
+ TAILQ_FOREACH(solib, &solib_list, next) {
+ struct stat sb;
+
+ if (stat(solib->name, &sb) == 0 && S_ISDIR(sb.st_mode)) {
+ if (eal_plugindir_init(solib->name) == -1) {
+ RTE_LOG(ERR, EAL,
+ "Cannot init plugin directory %s\n",
+ solib->name);
+ return -1;
+ }
+ } else {
+ RTE_LOG(DEBUG, EAL, "open shared lib %s\n",
+ solib->name);
+ solib->lib_handle = dlopen(solib->name, RTLD_NOW);
+ if (solib->lib_handle == NULL) {
+ RTE_LOG(ERR, EAL, "%s\n", dlerror());
+ return -1;
+ }
+ }
+
+ }
+ return 0;
+}
+
+/*
+ * Parse the coremask given as argument (hexadecimal string) and fill
+ * the global configuration (core role and core count) with the parsed
+ * value.
+ */
+static int xdigit2val(unsigned char c)
+{
+ int val;
+
+ if (isdigit(c))
+ val = c - '0';
+ else if (isupper(c))
+ val = c - 'A' + 10;
+ else
+ val = c - 'a' + 10;
+ return val;
+}
+
+static int
+eal_parse_coremask(const char *coremask)
+{
+ struct rte_config *cfg = rte_eal_get_configuration();
+ int i, j, idx = 0;
+ unsigned count = 0;
+ char c;
+ int val;
+
+ if (coremask == NULL)
+ return -1;
+ /* Remove all blank characters ahead and after .
+ * Remove 0x/0X if exists.
+ */
+ while (isblank(*coremask))
+ coremask++;
+ if (coremask[0] == '0' && ((coremask[1] == 'x')
+ || (coremask[1] == 'X')))
+ coremask += 2;
+ i = strlen(coremask);
+ while ((i > 0) && isblank(coremask[i - 1]))
+ i--;
+ if (i == 0)
+ return -1;
+
+ for (i = i - 1; i >= 0 && idx < RTE_MAX_LCORE; i--) {
+ c = coremask[i];
+ if (isxdigit(c) == 0) {
+ /* invalid characters */
+ return -1;
+ }
+ val = xdigit2val(c);
+ for (j = 0; j < BITS_PER_HEX && idx < RTE_MAX_LCORE; j++, idx++)
+ {
+ if ((1 << j) & val) {
+ if (!lcore_config[idx].detected) {
+ RTE_LOG(ERR, EAL, "lcore %u "
+ "unavailable\n", idx);
+ return -1;
+ }
+ cfg->lcore_role[idx] = ROLE_RTE;
+ lcore_config[idx].core_index = count;
+ count++;
+ } else {
+ cfg->lcore_role[idx] = ROLE_OFF;
+ lcore_config[idx].core_index = -1;
+ }
+ }
+ }
+ for (; i >= 0; i--)
+ if (coremask[i] != '0')
+ return -1;
+ for (; idx < RTE_MAX_LCORE; idx++) {
+ cfg->lcore_role[idx] = ROLE_OFF;
+ lcore_config[idx].core_index = -1;
+ }
+ if (count == 0)
+ return -1;
+ /* Update the count of enabled logical cores of the EAL configuration */
+ cfg->lcore_count = count;
+ return 0;
+}
+
+static int
+eal_parse_corelist(const char *corelist)
+{
+ struct rte_config *cfg = rte_eal_get_configuration();
+ int i, idx = 0;
+ unsigned count = 0;
+ char *end = NULL;
+ int min, max;
+
+ if (corelist == NULL)
+ return -1;
+
+ /* Remove all blank characters ahead and after */
+ while (isblank(*corelist))
+ corelist++;
+ i = strlen(corelist);
+ while ((i > 0) && isblank(corelist[i - 1]))
+ i--;
+
+ /* Reset config */
+ for (idx = 0; idx < RTE_MAX_LCORE; idx++) {
+ cfg->lcore_role[idx] = ROLE_OFF;
+ lcore_config[idx].core_index = -1;
+ }
+
+ /* Get list of cores */
+ min = RTE_MAX_LCORE;
+ do {
+ while (isblank(*corelist))
+ corelist++;
+ if (*corelist == '\0')
+ return -1;
+ errno = 0;
+ idx = strtoul(corelist, &end, 10);
+ if (errno || end == NULL)
+ return -1;
+ while (isblank(*end))
+ end++;
+ if (*end == '-') {
+ min = idx;
+ } else if ((*end == ',') || (*end == '\0')) {
+ max = idx;
+ if (min == RTE_MAX_LCORE)
+ min = idx;
+ for (idx = min; idx <= max; idx++) {
+ if (cfg->lcore_role[idx] != ROLE_RTE) {
+ cfg->lcore_role[idx] = ROLE_RTE;
+ lcore_config[idx].core_index = count;
+ count++;
+ }
+ }
+ min = RTE_MAX_LCORE;
+ } else
+ return -1;
+ corelist = end + 1;
+ } while (*end != '\0');
+
+ if (count == 0)
+ return -1;
+
+ /* Update the count of enabled logical cores of the EAL configuration */
+ cfg->lcore_count = count;
+
+ return 0;
+}
+
+/* Changes the lcore id of the master thread */
+static int
+eal_parse_master_lcore(const char *arg)
+{
+ char *parsing_end;
+ struct rte_config *cfg = rte_eal_get_configuration();
+
+ errno = 0;
+ cfg->master_lcore = (uint32_t) strtol(arg, &parsing_end, 0);
+ if (errno || parsing_end[0] != 0)
+ return -1;
+ if (cfg->master_lcore >= RTE_MAX_LCORE)
+ return -1;
+ master_lcore_parsed = 1;
+ return 0;
+}
+
+/*
+ * Parse elem, the elem could be single number/range or '(' ')' group
+ * 1) A single number elem, it's just a simple digit. e.g. 9
+ * 2) A single range elem, two digits with a '-' between. e.g. 2-6
+ * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6)
+ * Within group elem, '-' used for a range separator;
+ * ',' used for a single number.
+ */
+static int
+eal_parse_set(const char *input, uint16_t set[], unsigned num)
+{
+ unsigned idx;
+ const char *str = input;
+ char *end = NULL;
+ unsigned min, max;
+
+ memset(set, 0, num * sizeof(uint16_t));
+
+ while (isblank(*str))
+ str++;
+
+ /* only digit or left bracket is qualify for start point */
+ if ((!isdigit(*str) && *str != '(') || *str == '\0')
+ return -1;
+
+ /* process single number or single range of number */
+ if (*str != '(') {
+ errno = 0;
+ idx = strtoul(str, &end, 10);
+ if (errno || end == NULL || idx >= num)
+ return -1;
+ else {
+ while (isblank(*end))
+ end++;
+
+ min = idx;
+ max = idx;
+ if (*end == '-') {
+ /* process single <number>-<number> */
+ end++;
+ while (isblank(*end))
+ end++;
+ if (!isdigit(*end))
+ return -1;
+
+ errno = 0;
+ idx = strtoul(end, &end, 10);
+ if (errno || end == NULL || idx >= num)
+ return -1;
+ max = idx;
+ while (isblank(*end))
+ end++;
+ if (*end != ',' && *end != '\0')
+ return -1;
+ }
+
+ if (*end != ',' && *end != '\0' &&
+ *end != '@')
+ return -1;
+
+ for (idx = RTE_MIN(min, max);
+ idx <= RTE_MAX(min, max); idx++)
+ set[idx] = 1;
+
+ return end - input;
+ }
+ }
+
+ /* process set within bracket */
+ str++;
+ while (isblank(*str))
+ str++;
+ if (*str == '\0')
+ return -1;
+
+ min = RTE_MAX_LCORE;
+ do {
+
+ /* go ahead to the first digit */
+ while (isblank(*str))
+ str++;
+ if (!isdigit(*str))
+ return -1;
+
+ /* get the digit value */
+ errno = 0;
+ idx = strtoul(str, &end, 10);
+ if (errno || end == NULL || idx >= num)
+ return -1;
+
+ /* go ahead to separator '-',',' and ')' */
+ while (isblank(*end))
+ end++;
+ if (*end == '-') {
+ if (min == RTE_MAX_LCORE)
+ min = idx;
+ else /* avoid continuous '-' */
+ return -1;
+ } else if ((*end == ',') || (*end == ')')) {
+ max = idx;
+ if (min == RTE_MAX_LCORE)
+ min = idx;
+ for (idx = RTE_MIN(min, max);
+ idx <= RTE_MAX(min, max); idx++)
+ set[idx] = 1;
+
+ min = RTE_MAX_LCORE;
+ } else
+ return -1;
+
+ str = end + 1;
+ } while (*end != '\0' && *end != ')');
+
+ return str - input;
+}
+
+/* convert from set array to cpuset bitmap */
+static int
+convert_to_cpuset(rte_cpuset_t *cpusetp,
+ uint16_t *set, unsigned num)
+{
+ unsigned idx;
+
+ CPU_ZERO(cpusetp);
+
+ for (idx = 0; idx < num; idx++) {
+ if (!set[idx])
+ continue;
+
+ if (!lcore_config[idx].detected) {
+ RTE_LOG(ERR, EAL, "core %u "
+ "unavailable\n", idx);
+ return -1;
+ }
+
+ CPU_SET(idx, cpusetp);
+ }
+
+ return 0;
+}
+
+/*
+ * The format pattern: --lcores='<lcores[@cpus]>[<,lcores[@cpus]>...]'
+ * lcores, cpus could be a single digit/range or a group.
+ * '(' and ')' are necessary if it's a group.
+ * If not supply '@cpus', the value of cpus uses the same as lcores.
+ * e.g. '1,2@(5-7),(3-5)@(0,2),(0,6),7-8' means start 9 EAL thread as below
+ * lcore 0 runs on cpuset 0x41 (cpu 0,6)
+ * lcore 1 runs on cpuset 0x2 (cpu 1)
+ * lcore 2 runs on cpuset 0xe0 (cpu 5,6,7)
+ * lcore 3,4,5 runs on cpuset 0x5 (cpu 0,2)
+ * lcore 6 runs on cpuset 0x41 (cpu 0,6)
+ * lcore 7 runs on cpuset 0x80 (cpu 7)
+ * lcore 8 runs on cpuset 0x100 (cpu 8)
+ */
+static int
+eal_parse_lcores(const char *lcores)
+{
+ struct rte_config *cfg = rte_eal_get_configuration();
+ static uint16_t set[RTE_MAX_LCORE];
+ unsigned idx = 0;
+ int i;
+ unsigned count = 0;
+ const char *lcore_start = NULL;
+ const char *end = NULL;
+ int offset;
+ rte_cpuset_t cpuset;
+ int lflags = 0;
+ int ret = -1;
+
+ if (lcores == NULL)
+ return -1;
+
+ /* Remove all blank characters ahead and after */
+ while (isblank(*lcores))
+ lcores++;
+ i = strlen(lcores);
+ while ((i > 0) && isblank(lcores[i - 1]))
+ i--;
+
+ CPU_ZERO(&cpuset);
+
+ /* Reset lcore config */
+ for (idx = 0; idx < RTE_MAX_LCORE; idx++) {
+ cfg->lcore_role[idx] = ROLE_OFF;
+ lcore_config[idx].core_index = -1;
+ CPU_ZERO(&lcore_config[idx].cpuset);
+ }
+
+ /* Get list of cores */
+ do {
+ while (isblank(*lcores))
+ lcores++;
+ if (*lcores == '\0')
+ goto err;
+
+ /* record lcore_set start point */
+ lcore_start = lcores;
+
+ /* go across a complete bracket */
+ if (*lcore_start == '(') {
+ lcores += strcspn(lcores, ")");
+ if (*lcores++ == '\0')
+ goto err;
+ }
+
+ /* scan the separator '@', ','(next) or '\0'(finish) */
+ lcores += strcspn(lcores, "@,");
+
+ if (*lcores == '@') {
+ /* explicit assign cpu_set */
+ offset = eal_parse_set(lcores + 1, set, RTE_DIM(set));
+ if (offset < 0)
+ goto err;
+
+ /* prepare cpu_set and update the end cursor */
+ if (0 > convert_to_cpuset(&cpuset,
+ set, RTE_DIM(set)))
+ goto err;
+ end = lcores + 1 + offset;
+ } else { /* ',' or '\0' */
+ /* haven't given cpu_set, current loop done */
+ end = lcores;
+
+ /* go back to check <number>-<number> */
+ offset = strcspn(lcore_start, "(-");
+ if (offset < (end - lcore_start) &&
+ *(lcore_start + offset) != '(')
+ lflags = 1;
+ }
+
+ if (*end != ',' && *end != '\0')
+ goto err;
+
+ /* parse lcore_set from start point */
+ if (0 > eal_parse_set(lcore_start, set, RTE_DIM(set)))
+ goto err;
+
+ /* without '@', by default using lcore_set as cpu_set */
+ if (*lcores != '@' &&
+ 0 > convert_to_cpuset(&cpuset, set, RTE_DIM(set)))
+ goto err;
+
+ /* start to update lcore_set */
+ for (idx = 0; idx < RTE_MAX_LCORE; idx++) {
+ if (!set[idx])
+ continue;
+
+ if (cfg->lcore_role[idx] != ROLE_RTE) {
+ lcore_config[idx].core_index = count;
+ cfg->lcore_role[idx] = ROLE_RTE;
+ count++;
+ }
+
+ if (lflags) {
+ CPU_ZERO(&cpuset);
+ CPU_SET(idx, &cpuset);
+ }
+ rte_memcpy(&lcore_config[idx].cpuset, &cpuset,
+ sizeof(rte_cpuset_t));
+ }
+
+ lcores = end + 1;
+ } while (*end != '\0');
+
+ if (count == 0)
+ goto err;
+
+ cfg->lcore_count = count;
+ ret = 0;
+
+err:
+
+ return ret;
+}
+
+static int
+eal_parse_syslog(const char *facility, struct internal_config *conf)
+{
+ int i;
+ static struct {
+ const char *name;
+ int value;
+ } map[] = {
+ { "auth", LOG_AUTH },
+ { "cron", LOG_CRON },
+ { "daemon", LOG_DAEMON },
+ { "ftp", LOG_FTP },
+ { "kern", LOG_KERN },
+ { "lpr", LOG_LPR },
+ { "mail", LOG_MAIL },
+ { "news", LOG_NEWS },
+ { "syslog", LOG_SYSLOG },
+ { "user", LOG_USER },
+ { "uucp", LOG_UUCP },
+ { "local0", LOG_LOCAL0 },
+ { "local1", LOG_LOCAL1 },
+ { "local2", LOG_LOCAL2 },
+ { "local3", LOG_LOCAL3 },
+ { "local4", LOG_LOCAL4 },
+ { "local5", LOG_LOCAL5 },
+ { "local6", LOG_LOCAL6 },
+ { "local7", LOG_LOCAL7 },
+ { NULL, 0 }
+ };
+
+ for (i = 0; map[i].name; i++) {
+ if (!strcmp(facility, map[i].name)) {
+ conf->syslog_facility = map[i].value;
+ return 0;
+ }
+ }
+ return -1;
+}
+
+static int
+eal_parse_log_level(const char *level, uint32_t *log_level)
+{
+ char *end;
+ unsigned long tmp;
+
+ errno = 0;
+ tmp = strtoul(level, &end, 0);
+
+ /* check for errors */
+ if ((errno != 0) || (level[0] == '\0') ||
+ end == NULL || (*end != '\0'))
+ return -1;
+
+ /* log_level is a uint32_t */
+ if (tmp >= UINT32_MAX)
+ return -1;
+
+ *log_level = tmp;
+ return 0;
+}
+
+static enum rte_proc_type_t
+eal_parse_proc_type(const char *arg)
+{
+ if (strncasecmp(arg, "primary", sizeof("primary")) == 0)
+ return RTE_PROC_PRIMARY;
+ if (strncasecmp(arg, "secondary", sizeof("secondary")) == 0)
+ return RTE_PROC_SECONDARY;
+ if (strncasecmp(arg, "auto", sizeof("auto")) == 0)
+ return RTE_PROC_AUTO;
+
+ return RTE_PROC_INVALID;
+}
+
+int
+eal_parse_common_option(int opt, const char *optarg,
+ struct internal_config *conf)
+{
+ switch (opt) {
+ /* blacklist */
+ case 'b':
+ if (rte_eal_devargs_add(RTE_DEVTYPE_BLACKLISTED_PCI,
+ optarg) < 0) {
+ return -1;
+ }
+ break;
+ /* whitelist */
+ case 'w':
+ if (rte_eal_devargs_add(RTE_DEVTYPE_WHITELISTED_PCI,
+ optarg) < 0) {
+ return -1;
+ }
+ break;
+ /* coremask */
+ case 'c':
+ if (eal_parse_coremask(optarg) < 0) {
+ RTE_LOG(ERR, EAL, "invalid coremask\n");
+ return -1;
+ }
+ break;
+ /* corelist */
+ case 'l':
+ if (eal_parse_corelist(optarg) < 0) {
+ RTE_LOG(ERR, EAL, "invalid core list\n");
+ return -1;
+ }
+ break;
+ /* size of memory */
+ case 'm':
+ conf->memory = atoi(optarg);
+ conf->memory *= 1024ULL;
+ conf->memory *= 1024ULL;
+ mem_parsed = 1;
+ break;
+ /* force number of channels */
+ case 'n':
+ conf->force_nchannel = atoi(optarg);
+ if (conf->force_nchannel == 0 ||
+ conf->force_nchannel > 4) {
+ RTE_LOG(ERR, EAL, "invalid channel number\n");
+ return -1;
+ }
+ break;
+ /* force number of ranks */
+ case 'r':
+ conf->force_nrank = atoi(optarg);
+ if (conf->force_nrank == 0 ||
+ conf->force_nrank > 16) {
+ RTE_LOG(ERR, EAL, "invalid rank number\n");
+ return -1;
+ }
+ break;
+ /* force loading of external driver */
+ case 'd':
+ if (eal_plugin_add(optarg) == -1)
+ return -1;
+ break;
+ case 'v':
+ /* since message is explicitly requested by user, we
+ * write message at highest log level so it can always
+ * be seen
+ * even if info or warning messages are disabled */
+ RTE_LOG(CRIT, EAL, "RTE Version: '%s'\n", rte_version());
+ break;
+
+ /* long options */
+ case OPT_HUGE_UNLINK_NUM:
+ conf->hugepage_unlink = 1;
+ break;
+
+ case OPT_NO_HUGE_NUM:
+ conf->no_hugetlbfs = 1;
+ break;
+
+ case OPT_NO_PCI_NUM:
+ conf->no_pci = 1;
+ break;
+
+ case OPT_NO_HPET_NUM:
+ conf->no_hpet = 1;
+ break;
+
+ case OPT_VMWARE_TSC_MAP_NUM:
+ conf->vmware_tsc_map = 1;
+ break;
+
+ case OPT_NO_SHCONF_NUM:
+ conf->no_shconf = 1;
+ break;
+
+ case OPT_PROC_TYPE_NUM:
+ conf->process_type = eal_parse_proc_type(optarg);
+ break;
+
+ case OPT_MASTER_LCORE_NUM:
+ if (eal_parse_master_lcore(optarg) < 0) {
+ RTE_LOG(ERR, EAL, "invalid parameter for --"
+ OPT_MASTER_LCORE "\n");
+ return -1;
+ }
+ break;
+
+ case OPT_VDEV_NUM:
+ if (rte_eal_devargs_add(RTE_DEVTYPE_VIRTUAL,
+ optarg) < 0) {
+ return -1;
+ }
+ break;
+
+ case OPT_SYSLOG_NUM:
+ if (eal_parse_syslog(optarg, conf) < 0) {
+ RTE_LOG(ERR, EAL, "invalid parameters for --"
+ OPT_SYSLOG "\n");
+ return -1;
+ }
+ break;
+
+ case OPT_LOG_LEVEL_NUM: {
+ uint32_t log;
+
+ if (eal_parse_log_level(optarg, &log) < 0) {
+ RTE_LOG(ERR, EAL,
+ "invalid parameters for --"
+ OPT_LOG_LEVEL "\n");
+ return -1;
+ }
+ conf->log_level = log;
+ break;
+ }
+ case OPT_LCORES_NUM:
+ if (eal_parse_lcores(optarg) < 0) {
+ RTE_LOG(ERR, EAL, "invalid parameter for --"
+ OPT_LCORES "\n");
+ return -1;
+ }
+ break;
+
+ /* don't know what to do, leave this to caller */
+ default:
+ return 1;
+
+ }
+
+ return 0;
+}
+
+int
+eal_adjust_config(struct internal_config *internal_cfg)
+{
+ int i;
+ struct rte_config *cfg = rte_eal_get_configuration();
+
+ if (internal_config.process_type == RTE_PROC_AUTO)
+ internal_config.process_type = eal_proc_type_detect();
+
+ /* default master lcore is the first one */
+ if (!master_lcore_parsed)
+ cfg->master_lcore = rte_get_next_lcore(-1, 0, 0);
+
+ /* if no memory amounts were requested, this will result in 0 and
+ * will be overridden later, right after eal_hugepage_info_init() */
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ internal_cfg->memory += internal_cfg->socket_mem[i];
+
+ return 0;
+}
+
+int
+eal_check_common_options(struct internal_config *internal_cfg)
+{
+ struct rte_config *cfg = rte_eal_get_configuration();
+
+ if (cfg->lcore_role[cfg->master_lcore] != ROLE_RTE) {
+ RTE_LOG(ERR, EAL, "Master lcore is not enabled for DPDK\n");
+ return -1;
+ }
+
+ if (internal_cfg->process_type == RTE_PROC_INVALID) {
+ RTE_LOG(ERR, EAL, "Invalid process type specified\n");
+ return -1;
+ }
+ if (index(internal_cfg->hugefile_prefix, '%') != NULL) {
+ RTE_LOG(ERR, EAL, "Invalid char, '%%', in --"OPT_FILE_PREFIX" "
+ "option\n");
+ return -1;
+ }
+ if (mem_parsed && internal_cfg->force_sockets == 1) {
+ RTE_LOG(ERR, EAL, "Options -m and --"OPT_SOCKET_MEM" cannot "
+ "be specified at the same time\n");
+ return -1;
+ }
+ if (internal_cfg->no_hugetlbfs && internal_cfg->force_sockets == 1) {
+ RTE_LOG(ERR, EAL, "Option --"OPT_SOCKET_MEM" cannot "
+ "be specified together with --"OPT_NO_HUGE"\n");
+ return -1;
+ }
+
+ if (internal_cfg->no_hugetlbfs && internal_cfg->hugepage_unlink) {
+ RTE_LOG(ERR, EAL, "Option --"OPT_HUGE_UNLINK" cannot "
+ "be specified together with --"OPT_NO_HUGE"\n");
+ return -1;
+ }
+
+ if (rte_eal_devargs_type_count(RTE_DEVTYPE_WHITELISTED_PCI) != 0 &&
+ rte_eal_devargs_type_count(RTE_DEVTYPE_BLACKLISTED_PCI) != 0) {
+ RTE_LOG(ERR, EAL, "Options blacklist (-b) and whitelist (-w) "
+ "cannot be used at the same time\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+void
+eal_common_usage(void)
+{
+ printf("[options]\n\n"
+ "EAL common options:\n"
+ " -c COREMASK Hexadecimal bitmask of cores to run on\n"
+ " -l CORELIST List of cores to run on\n"
+ " The argument format is <c1>[-c2][,c3[-c4],...]\n"
+ " where c1, c2, etc are core indexes between 0 and %d\n"
+ " --"OPT_LCORES" COREMAP Map lcore set to physical cpu set\n"
+ " The argument format is\n"
+ " '<lcores[@cpus]>[<,lcores[@cpus]>...]'\n"
+ " lcores and cpus list are grouped by '(' and ')'\n"
+ " Within the group, '-' is used for range separator,\n"
+ " ',' is used for single number separator.\n"
+ " '( )' can be omitted for single element group,\n"
+ " '@' can be omitted if cpus and lcores have the same value\n"
+ " --"OPT_MASTER_LCORE" ID Core ID that is used as master\n"
+ " -n CHANNELS Number of memory channels\n"
+ " -m MB Memory to allocate (see also --"OPT_SOCKET_MEM")\n"
+ " -r RANKS Force number of memory ranks (don't detect)\n"
+ " -b, --"OPT_PCI_BLACKLIST" Add a PCI device in black list.\n"
+ " Prevent EAL from using this PCI device. The argument\n"
+ " format is <domain:bus:devid.func>.\n"
+ " -w, --"OPT_PCI_WHITELIST" Add a PCI device in white list.\n"
+ " Only use the specified PCI devices. The argument format\n"
+ " is <[domain:]bus:devid.func>. This option can be present\n"
+ " several times (once per device).\n"
+ " [NOTE: PCI whitelist cannot be used with -b option]\n"
+ " --"OPT_VDEV" Add a virtual device.\n"
+ " The argument format is <driver><id>[,key=val,...]\n"
+ " (ex: --vdev=eth_pcap0,iface=eth2).\n"
+ " -d LIB.so|DIR Add a driver or driver directory\n"
+ " (can be used multiple times)\n"
+ " --"OPT_VMWARE_TSC_MAP" Use VMware TSC map instead of native RDTSC\n"
+ " --"OPT_PROC_TYPE" Type of this process (primary|secondary|auto)\n"
+ " --"OPT_SYSLOG" Set syslog facility\n"
+ " --"OPT_LOG_LEVEL" Set default log level\n"
+ " -v Display version information on startup\n"
+ " -h, --help This help\n"
+ "\nEAL options for DEBUG use only:\n"
+ " --"OPT_HUGE_UNLINK" Unlink hugepage files after init\n"
+ " --"OPT_NO_HUGE" Use malloc instead of hugetlbfs\n"
+ " --"OPT_NO_PCI" Disable PCI\n"
+ " --"OPT_NO_HPET" Disable HPET\n"
+ " --"OPT_NO_SHCONF" No shared config (mmap'd files)\n"
+ "\n", RTE_MAX_LCORE);
+}
diff --git a/src/dpdk22/lib/librte_eal/common/eal_common_pci.c b/src/dpdk22/lib/librte_eal/common/eal_common_pci.c
new file mode 100644
index 00000000..dcfe9478
--- /dev/null
+++ b/src/dpdk22/lib/librte_eal/common/eal_common_pci.c
@@ -0,0 +1,464 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/* BSD LICENSE
+ *
+ * Copyright 2013-2014 6WIND S.A.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <sys/queue.h>
+#include <sys/mman.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_pci.h>
+#include <rte_per_lcore.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_string_fns.h>
+#include <rte_common.h>
+#include <rte_devargs.h>
+
+#include "eal_private.h"
+
+struct pci_driver_list pci_driver_list;
+struct pci_device_list pci_device_list;
+
+static struct rte_devargs *pci_devargs_lookup(struct rte_pci_device *dev)
+{
+ struct rte_devargs *devargs;
+
+ TAILQ_FOREACH(devargs, &devargs_list, next) {
+ if (devargs->type != RTE_DEVTYPE_BLACKLISTED_PCI &&
+ devargs->type != RTE_DEVTYPE_WHITELISTED_PCI)
+ continue;
+ if (!rte_eal_compare_pci_addr(&dev->addr, &devargs->pci.addr))
+ return devargs;
+ }
+ return NULL;
+}
+
+/* map a particular resource from a file */
+void *
+pci_map_resource(void *requested_addr, int fd, off_t offset, size_t size,
+ int additional_flags)
+{
+ void *mapaddr;
+
+ /* Map the PCI memory resource of device */
+ mapaddr = mmap(requested_addr, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | additional_flags, fd, offset);
+ if (mapaddr == MAP_FAILED) {
+ RTE_LOG(ERR, EAL, "%s(): cannot mmap(%d, %p, 0x%lx, 0x%lx): %s (%p)\n",
+ __func__, fd, requested_addr,
+ (unsigned long)size, (unsigned long)offset,
+ strerror(errno), mapaddr);
+ } else
+ RTE_LOG(DEBUG, EAL, " PCI memory mapped at %p\n", mapaddr);
+
+ return mapaddr;
+}
+
+/* unmap a particular resource */
+void
+pci_unmap_resource(void *requested_addr, size_t size)
+{
+ if (requested_addr == NULL)
+ return;
+
+ /* Unmap the PCI memory resource of device */
+ if (munmap(requested_addr, size)) {
+ RTE_LOG(ERR, EAL, "%s(): cannot munmap(%p, 0x%lx): %s\n",
+ __func__, requested_addr, (unsigned long)size,
+ strerror(errno));
+ } else
+ RTE_LOG(DEBUG, EAL, " PCI memory unmapped at %p\n",
+ requested_addr);
+}
+
+/*
+ * If vendor/device ID match, call the devinit() function of the
+ * driver.
+ */
+static int
+rte_eal_pci_probe_one_driver(struct rte_pci_driver *dr, struct rte_pci_device *dev)
+{
+ int ret;
+ const struct rte_pci_id *id_table;
+
+ for (id_table = dr->id_table; id_table->vendor_id != 0; id_table++) {
+
+ /* check if device's identifiers match the driver's ones */
+ if (id_table->vendor_id != dev->id.vendor_id &&
+ id_table->vendor_id != PCI_ANY_ID)
+ continue;
+ if (id_table->device_id != dev->id.device_id &&
+ id_table->device_id != PCI_ANY_ID)
+ continue;
+ if (id_table->subsystem_vendor_id != dev->id.subsystem_vendor_id &&
+ id_table->subsystem_vendor_id != PCI_ANY_ID)
+ continue;
+ if (id_table->subsystem_device_id != dev->id.subsystem_device_id &&
+ id_table->subsystem_device_id != PCI_ANY_ID)
+ continue;
+
+ struct rte_pci_addr *loc = &dev->addr;
+
+ RTE_LOG(DEBUG, EAL, "PCI device "PCI_PRI_FMT" on NUMA socket %i\n",
+ loc->domain, loc->bus, loc->devid, loc->function,
+ dev->numa_node);
+
+ RTE_LOG(DEBUG, EAL, " probe driver: %x:%x %s\n", dev->id.vendor_id,
+ dev->id.device_id, dr->name);
+
+ /* no initialization when blacklisted, return without error */
+ if (dev->devargs != NULL &&
+ dev->devargs->type == RTE_DEVTYPE_BLACKLISTED_PCI) {
+ RTE_LOG(DEBUG, EAL, " Device is blacklisted, not initializing\n");
+ return 1;
+ }
+
+ if (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) {
+#ifdef RTE_PCI_CONFIG
+ /*
+ * Set PCIe config space for high performance.
+ * Return value can be ignored.
+ */
+ pci_config_space_set(dev);
+#endif
+ /* map resources for devices that use igb_uio */
+ ret = pci_map_device(dev);
+ if (ret != 0)
+ return ret;
+ } else if (dr->drv_flags & RTE_PCI_DRV_FORCE_UNBIND &&
+ rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ /* unbind current driver */
+ if (pci_unbind_kernel_driver(dev) < 0)
+ return -1;
+ }
+
+ /* reference driver structure */
+ dev->driver = dr;
+
+ /* call the driver devinit() function */
+ return dr->devinit(dr, dev);
+ }
+ /* return positive value if driver is not found */
+ return 1;
+}
+
+/*
+ * If vendor/device ID match, call the devuninit() function of the
+ * driver.
+ */
+static int
+rte_eal_pci_detach_dev(struct rte_pci_driver *dr,
+ struct rte_pci_device *dev)
+{
+ const struct rte_pci_id *id_table;
+
+ if ((dr == NULL) || (dev == NULL))
+ return -EINVAL;
+
+ for (id_table = dr->id_table; id_table->vendor_id != 0; id_table++) {
+
+ /* check if device's identifiers match the driver's ones */
+ if (id_table->vendor_id != dev->id.vendor_id &&
+ id_table->vendor_id != PCI_ANY_ID)
+ continue;
+ if (id_table->device_id != dev->id.device_id &&
+ id_table->device_id != PCI_ANY_ID)
+ continue;
+ if (id_table->subsystem_vendor_id != dev->id.subsystem_vendor_id &&
+ id_table->subsystem_vendor_id != PCI_ANY_ID)
+ continue;
+ if (id_table->subsystem_device_id != dev->id.subsystem_device_id &&
+ id_table->subsystem_device_id != PCI_ANY_ID)
+ continue;
+
+ struct rte_pci_addr *loc = &dev->addr;
+
+ RTE_LOG(DEBUG, EAL, "PCI device "PCI_PRI_FMT" on NUMA socket %i\n",
+ loc->domain, loc->bus, loc->devid,
+ loc->function, dev->numa_node);
+
+ RTE_LOG(DEBUG, EAL, " remove driver: %x:%x %s\n", dev->id.vendor_id,
+ dev->id.device_id, dr->name);
+
+ if (dr->devuninit && (dr->devuninit(dev) < 0))
+ return -1; /* negative value is an error */
+
+ /* clear driver structure */
+ dev->driver = NULL;
+
+ if (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING)
+ /* unmap resources for devices that use igb_uio */
+ pci_unmap_device(dev);
+
+ return 0;
+ }
+
+ /* return positive value if driver is not found */
+ return 1;
+}
+
+/*
+ * If vendor/device ID match, call the devinit() function of all
+ * registered driver for the given device. Return -1 if initialization
+ * failed, return 1 if no driver is found for this device.
+ */
+static int
+pci_probe_all_drivers(struct rte_pci_device *dev)
+{
+ struct rte_pci_driver *dr = NULL;
+ int rc = 0;
+
+ if (dev == NULL)
+ return -1;
+
+ TAILQ_FOREACH(dr, &pci_driver_list, next) {
+ rc = rte_eal_pci_probe_one_driver(dr, dev);
+ if (rc < 0)
+ /* negative value is an error */
+ return -1;
+ if (rc > 0)
+ /* positive value means driver not found */
+ continue;
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * If vendor/device ID match, call the devuninit() function of all
+ * registered driver for the given device. Return -1 if initialization
+ * failed, return 1 if no driver is found for this device.
+ */
+static int
+pci_detach_all_drivers(struct rte_pci_device *dev)
+{
+ struct rte_pci_driver *dr = NULL;
+ int rc = 0;
+
+ if (dev == NULL)
+ return -1;
+
+ TAILQ_FOREACH(dr, &pci_driver_list, next) {
+ rc = rte_eal_pci_detach_dev(dr, dev);
+ if (rc < 0)
+ /* negative value is an error */
+ return -1;
+ if (rc > 0)
+ /* positive value means driver not found */
+ continue;
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * Find the pci device specified by pci address, then invoke probe function of
+ * the driver of the devive.
+ */
+int
+rte_eal_pci_probe_one(const struct rte_pci_addr *addr)
+{
+ struct rte_pci_device *dev = NULL;
+ int ret = 0;
+
+ if (addr == NULL)
+ return -1;
+
+ TAILQ_FOREACH(dev, &pci_device_list, next) {
+ if (rte_eal_compare_pci_addr(&dev->addr, addr))
+ continue;
+
+ ret = pci_probe_all_drivers(dev);
+ if (ret < 0)
+ goto err_return;
+ return 0;
+ }
+ return -1;
+
+err_return:
+ RTE_LOG(WARNING, EAL, "Requested device " PCI_PRI_FMT
+ " cannot be used\n", dev->addr.domain, dev->addr.bus,
+ dev->addr.devid, dev->addr.function);
+ return -1;
+}
+
+/*
+ * Detach device specified by its pci address.
+ */
+int
+rte_eal_pci_detach(const struct rte_pci_addr *addr)
+{
+ struct rte_pci_device *dev = NULL;
+ int ret = 0;
+
+ if (addr == NULL)
+ return -1;
+
+ TAILQ_FOREACH(dev, &pci_device_list, next) {
+ if (rte_eal_compare_pci_addr(&dev->addr, addr))
+ continue;
+
+ ret = pci_detach_all_drivers(dev);
+ if (ret < 0)
+ goto err_return;
+
+ TAILQ_REMOVE(&pci_device_list, dev, next);
+ return 0;
+ }
+ return -1;
+
+err_return:
+ RTE_LOG(WARNING, EAL, "Requested device " PCI_PRI_FMT
+ " cannot be used\n", dev->addr.domain, dev->addr.bus,
+ dev->addr.devid, dev->addr.function);
+ return -1;
+}
+
+/*
+ * Scan the content of the PCI bus, and call the devinit() function for
+ * all registered drivers that have a matching entry in its id_table
+ * for discovered devices.
+ */
+int
+rte_eal_pci_probe(void)
+{
+ struct rte_pci_device *dev = NULL;
+ struct rte_devargs *devargs;
+ int probe_all = 0;
+ int ret = 0;
+
+ if (rte_eal_devargs_type_count(RTE_DEVTYPE_WHITELISTED_PCI) == 0)
+ probe_all = 1;
+
+ TAILQ_FOREACH(dev, &pci_device_list, next) {
+
+ /* set devargs in PCI structure */
+ devargs = pci_devargs_lookup(dev);
+ if (devargs != NULL)
+ dev->devargs = devargs;
+
+ /* probe all or only whitelisted devices */
+ if (probe_all)
+ ret = pci_probe_all_drivers(dev);
+ else if (devargs != NULL &&
+ devargs->type == RTE_DEVTYPE_WHITELISTED_PCI)
+ ret = pci_probe_all_drivers(dev);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Requested device " PCI_PRI_FMT
+ " cannot be used\n", dev->addr.domain, dev->addr.bus,
+ dev->addr.devid, dev->addr.function);
+ }
+
+ return 0;
+}
+
+/* dump one device */
+static int
+pci_dump_one_device(FILE *f, struct rte_pci_device *dev)
+{
+ int i;
+
+ fprintf(f, PCI_PRI_FMT, dev->addr.domain, dev->addr.bus,
+ dev->addr.devid, dev->addr.function);
+ fprintf(f, " - vendor:%x device:%x\n", dev->id.vendor_id,
+ dev->id.device_id);
+
+ for (i = 0; i != sizeof(dev->mem_resource) /
+ sizeof(dev->mem_resource[0]); i++) {
+ fprintf(f, " %16.16"PRIx64" %16.16"PRIx64"\n",
+ dev->mem_resource[i].phys_addr,
+ dev->mem_resource[i].len);
+ }
+ return 0;
+}
+
+/* dump devices on the bus */
+void
+rte_eal_pci_dump(FILE *f)
+{
+ struct rte_pci_device *dev = NULL;
+
+ TAILQ_FOREACH(dev, &pci_device_list, next) {
+ pci_dump_one_device(f, dev);
+ }
+}
+
+/* register a driver */
+void
+rte_eal_pci_register(struct rte_pci_driver *driver)
+{
+ TAILQ_INSERT_TAIL(&pci_driver_list, driver, next);
+}
+
+/* unregister a driver */
+void
+rte_eal_pci_unregister(struct rte_pci_driver *driver)
+{
+ TAILQ_REMOVE(&pci_driver_list, driver, next);
+}
diff --git a/src/dpdk22/lib/librte_eal/common/eal_common_pci_uio.c b/src/dpdk22/lib/librte_eal/common/eal_common_pci_uio.c
new file mode 100644
index 00000000..f062e81d
--- /dev/null
+++ b/src/dpdk22/lib/librte_eal/common/eal_common_pci_uio.c
@@ -0,0 +1,222 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+
+#include <rte_eal.h>
+#include <rte_tailq.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+
+#include "eal_private.h"
+
+static struct rte_tailq_elem rte_uio_tailq = {
+ .name = "UIO_RESOURCE_LIST",
+};
+EAL_REGISTER_TAILQ(rte_uio_tailq)
+
+static int
+pci_uio_map_secondary(struct rte_pci_device *dev)
+{
+ int fd, i;
+ struct mapped_pci_resource *uio_res;
+ struct mapped_pci_res_list *uio_res_list =
+ RTE_TAILQ_CAST(rte_uio_tailq.head, mapped_pci_res_list);
+
+ TAILQ_FOREACH(uio_res, uio_res_list, next) {
+
+ /* skip this element if it doesn't match our PCI address */
+ if (rte_eal_compare_pci_addr(&uio_res->pci_addr, &dev->addr))
+ continue;
+
+ for (i = 0; i != uio_res->nb_maps; i++) {
+ /*
+ * open devname, to mmap it
+ */
+ fd = open(uio_res->maps[i].path, O_RDWR);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
+ uio_res->maps[i].path, strerror(errno));
+ return -1;
+ }
+
+ void *mapaddr = pci_map_resource(uio_res->maps[i].addr,
+ fd, (off_t)uio_res->maps[i].offset,
+ (size_t)uio_res->maps[i].size, 0);
+ /* fd is not needed in slave process, close it */
+ close(fd);
+ if (mapaddr != uio_res->maps[i].addr) {
+ RTE_LOG(ERR, EAL,
+ "Cannot mmap device resource file %s to address: %p\n",
+ uio_res->maps[i].path,
+ uio_res->maps[i].addr);
+ return -1;
+ }
+ }
+ return 0;
+ }
+
+ RTE_LOG(ERR, EAL, "Cannot find resource for device\n");
+ return 1;
+}
+
+/* map the PCI resource of a PCI device in virtual memory */
+int
+pci_uio_map_resource(struct rte_pci_device *dev)
+{
+ int i, map_idx = 0, ret;
+ uint64_t phaddr;
+ struct mapped_pci_resource *uio_res = NULL;
+ struct mapped_pci_res_list *uio_res_list =
+ RTE_TAILQ_CAST(rte_uio_tailq.head, mapped_pci_res_list);
+
+ dev->intr_handle.fd = -1;
+ dev->intr_handle.uio_cfg_fd = -1;
+ dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+
+ /* secondary processes - use already recorded details */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return pci_uio_map_secondary(dev);
+
+ /* allocate uio resource */
+ ret = pci_uio_alloc_resource(dev, &uio_res);
+ if (ret)
+ return ret;
+
+ /* Map all BARs */
+ for (i = 0; i != PCI_MAX_RESOURCE; i++) {
+ /* skip empty BAR */
+ phaddr = dev->mem_resource[i].phys_addr;
+ if (phaddr == 0)
+ continue;
+
+ ret = pci_uio_map_resource_by_index(dev, i,
+ uio_res, map_idx);
+ if (ret)
+ goto error;
+
+ map_idx++;
+ }
+
+ uio_res->nb_maps = map_idx;
+
+ TAILQ_INSERT_TAIL(uio_res_list, uio_res, next);
+
+ return 0;
+error:
+ for (i = 0; i < map_idx; i++) {
+ pci_unmap_resource(uio_res->maps[i].addr,
+ (size_t)uio_res->maps[i].size);
+ rte_free(uio_res->maps[i].path);
+ }
+ pci_uio_free_resource(dev, uio_res);
+ return -1;
+}
+
+static void
+pci_uio_unmap(struct mapped_pci_resource *uio_res)
+{
+ int i;
+
+ if (uio_res == NULL)
+ return;
+
+ for (i = 0; i != uio_res->nb_maps; i++) {
+ pci_unmap_resource(uio_res->maps[i].addr,
+ (size_t)uio_res->maps[i].size);
+ rte_free(uio_res->maps[i].path);
+ }
+}
+
+static struct mapped_pci_resource *
+pci_uio_find_resource(struct rte_pci_device *dev)
+{
+ struct mapped_pci_resource *uio_res;
+ struct mapped_pci_res_list *uio_res_list =
+ RTE_TAILQ_CAST(rte_uio_tailq.head, mapped_pci_res_list);
+
+ if (dev == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(uio_res, uio_res_list, next) {
+
+ /* skip this element if it doesn't match our PCI address */
+ if (!rte_eal_compare_pci_addr(&uio_res->pci_addr, &dev->addr))
+ return uio_res;
+ }
+ return NULL;
+}
+
+/* unmap the PCI resource of a PCI device in virtual memory */
+void
+pci_uio_unmap_resource(struct rte_pci_device *dev)
+{
+ struct mapped_pci_resource *uio_res;
+ struct mapped_pci_res_list *uio_res_list =
+ RTE_TAILQ_CAST(rte_uio_tailq.head, mapped_pci_res_list);
+
+ if (dev == NULL)
+ return;
+
+ /* find an entry for the device */
+ uio_res = pci_uio_find_resource(dev);
+ if (uio_res == NULL)
+ return;
+
+ /* secondary processes - just free maps */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return pci_uio_unmap(uio_res);
+
+ TAILQ_REMOVE(uio_res_list, uio_res, next);
+
+ /* unmap all resources */
+ pci_uio_unmap(uio_res);
+
+ /* free uio resource */
+ rte_free(uio_res);
+
+ /* close fd if in primary process */
+ close(dev->intr_handle.fd);
+ if (dev->intr_handle.uio_cfg_fd >= 0) {
+ close(dev->intr_handle.uio_cfg_fd);
+ dev->intr_handle.uio_cfg_fd = -1;
+ }
+
+ dev->intr_handle.fd = -1;
+ dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+}
diff --git a/src/dpdk_lib18/librte_eal/common/eal_common_string_fns.c b/src/dpdk22/lib/librte_eal/common/eal_common_string_fns.c
index 125a3e2d..125a3e2d 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/eal_common_string_fns.c
+++ b/src/dpdk22/lib/librte_eal/common/eal_common_string_fns.c
diff --git a/src/dpdk_lib18/librte_eal/common/eal_common_tailqs.c b/src/dpdk22/lib/librte_eal/common/eal_common_tailqs.c
index db9a1850..bb08ec8b 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/eal_common_tailqs.c
+++ b/src/dpdk22/lib/librte_eal/common/eal_common_tailqs.c
@@ -42,7 +42,6 @@
#include <rte_memory.h>
#include <rte_memzone.h>
#include <rte_launch.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
#include <rte_per_lcore.h>
@@ -52,16 +51,17 @@
#include <rte_branch_prediction.h>
#include <rte_log.h>
#include <rte_string_fns.h>
+#include <rte_debug.h>
#include "eal_private.h"
-/**
- * Name of tailq_head
- */
-const char* rte_tailq_names[RTE_MAX_TAILQ] = {
-#define rte_tailq_elem(idx, name) name,
-#include <rte_tailq_elem.h>
-};
+TAILQ_HEAD(rte_tailq_elem_head, rte_tailq_elem);
+/* local tailq list */
+static struct rte_tailq_elem_head rte_tailq_elem_head =
+ TAILQ_HEAD_INITIALIZER(rte_tailq_elem_head);
+
+/* number of tailqs registered, -1 before call to rte_eal_tailqs_init */
+static int rte_tailqs_count = -1;
struct rte_tailq_head *
rte_eal_tailq_lookup(const char *name)
@@ -73,40 +73,14 @@ rte_eal_tailq_lookup(const char *name)
return NULL;
for (i = 0; i < RTE_MAX_TAILQ; i++) {
- if (rte_tailq_names[i] == NULL)
- continue;
- if (!strncmp(name, rte_tailq_names[i], RTE_TAILQ_NAMESIZE-1))
+ if (!strncmp(name, mcfg->tailq_head[i].name,
+ RTE_TAILQ_NAMESIZE-1))
return &mcfg->tailq_head[i];
}
return NULL;
}
-inline struct rte_tailq_head *
-rte_eal_tailq_lookup_by_idx(const unsigned tailq_idx)
-{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
-
- if (tailq_idx >= RTE_MAX_TAILQ) {
- RTE_LOG(ERR, EAL, "%s(): No more room in config\n", __func__);
- return NULL;
- }
-
- return &mcfg->tailq_head[tailq_idx];
-}
-
-struct rte_tailq_head *
-rte_eal_tailq_reserve(const char *name)
-{
- return rte_eal_tailq_lookup(name);
-}
-
-inline struct rte_tailq_head *
-rte_eal_tailq_reserve_by_idx(const unsigned tailq_idx)
-{
- return rte_eal_tailq_lookup_by_idx(tailq_idx);
-}
-
void
rte_dump_tailq(FILE *f)
{
@@ -116,31 +90,113 @@ rte_dump_tailq(FILE *f)
mcfg = rte_eal_get_configuration()->mem_config;
rte_rwlock_read_lock(&mcfg->qlock);
- for (i=0; i < RTE_MAX_TAILQ; i++) {
+ for (i = 0; i < RTE_MAX_TAILQ; i++) {
const struct rte_tailq_head *tailq = &mcfg->tailq_head[i];
const struct rte_tailq_entry_head *head = &tailq->tailq_head;
- fprintf(f, "Tailq %u: qname:<%s>, tqh_first:%p, tqh_last:%p\n", i,
- (rte_tailq_names[i] != NULL ? rte_tailq_names[i]:"nil"),
- head->tqh_first, head->tqh_last);
+ fprintf(f, "Tailq %u: qname:<%s>, tqh_first:%p, tqh_last:%p\n",
+ i, tailq->name, head->tqh_first, head->tqh_last);
}
rte_rwlock_read_unlock(&mcfg->qlock);
}
-int
-rte_eal_tailqs_init(void)
+static struct rte_tailq_head *
+rte_eal_tailq_create(const char *name)
{
- unsigned i;
- struct rte_mem_config *mcfg = NULL;
+ struct rte_tailq_head *head = NULL;
- RTE_BUILD_BUG_ON(RTE_MAX_TAILQ < RTE_TAILQ_NUM);
+ if (!rte_eal_tailq_lookup(name) &&
+ (rte_tailqs_count + 1 < RTE_MAX_TAILQ)) {
+ struct rte_mem_config *mcfg;
- if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
mcfg = rte_eal_get_configuration()->mem_config;
- for (i = 0; i < RTE_MAX_TAILQ; i++)
- TAILQ_INIT(&mcfg->tailq_head[i].tailq_head);
+ head = &mcfg->tailq_head[rte_tailqs_count];
+ snprintf(head->name, sizeof(head->name) - 1, "%s", name);
+ TAILQ_INIT(&head->tailq_head);
+ rte_tailqs_count++;
}
+ return head;
+}
+
+/* local register, used to store "early" tailqs before rte_eal_init() and to
+ * ensure secondary process only registers tailqs once. */
+static int
+rte_eal_tailq_local_register(struct rte_tailq_elem *t)
+{
+ struct rte_tailq_elem *temp;
+
+ TAILQ_FOREACH(temp, &rte_tailq_elem_head, next) {
+ if (!strncmp(t->name, temp->name, sizeof(temp->name)))
+ return -1;
+ }
+
+ TAILQ_INSERT_TAIL(&rte_tailq_elem_head, t, next);
return 0;
}
+static void
+rte_eal_tailq_update(struct rte_tailq_elem *t)
+{
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ /* primary process is the only one that creates */
+ t->head = rte_eal_tailq_create(t->name);
+ } else {
+ t->head = rte_eal_tailq_lookup(t->name);
+ }
+}
+
+int
+rte_eal_tailq_register(struct rte_tailq_elem *t)
+{
+ if (rte_eal_tailq_local_register(t) < 0) {
+ RTE_LOG(ERR, EAL,
+ "%s tailq is already registered\n", t->name);
+ goto error;
+ }
+
+ /* if a register happens after rte_eal_tailqs_init(), then we can update
+ * tailq head */
+ if (rte_tailqs_count >= 0) {
+ rte_eal_tailq_update(t);
+ if (t->head == NULL) {
+ RTE_LOG(ERR, EAL,
+ "Cannot initialize tailq: %s\n", t->name);
+ TAILQ_REMOVE(&rte_tailq_elem_head, t, next);
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ t->head = NULL;
+ return -1;
+}
+
+int
+rte_eal_tailqs_init(void)
+{
+ struct rte_tailq_elem *t;
+
+ rte_tailqs_count = 0;
+
+ TAILQ_FOREACH(t, &rte_tailq_elem_head, next) {
+ /* second part of register job for "early" tailqs, see
+ * rte_eal_tailq_register and EAL_REGISTER_TAILQ */
+ rte_eal_tailq_update(t);
+ if (t->head == NULL) {
+ RTE_LOG(ERR, EAL,
+ "Cannot initialize tailq: %s\n", t->name);
+ /* no need to TAILQ_REMOVE, we are going to panic in
+ * rte_eal_init() */
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ rte_dump_tailq(stderr);
+ return -1;
+}
diff --git a/src/dpdk22/lib/librte_eal/common/eal_common_thread.c b/src/dpdk22/lib/librte_eal/common/eal_common_thread.c
new file mode 100644
index 00000000..2405e93f
--- /dev/null
+++ b/src/dpdk22/lib/librte_eal/common/eal_common_thread.c
@@ -0,0 +1,157 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <sched.h>
+#include <assert.h>
+#include <string.h>
+
+#include <rte_lcore.h>
+#include <rte_memory.h>
+#include <rte_log.h>
+
+#include "eal_thread.h"
+
+RTE_DECLARE_PER_LCORE(unsigned , _socket_id);
+
+unsigned rte_socket_id(void)
+{
+ return RTE_PER_LCORE(_socket_id);
+}
+
+int eal_cpuset_socket_id(rte_cpuset_t *cpusetp)
+{
+ unsigned cpu = 0;
+ int socket_id = SOCKET_ID_ANY;
+ int sid;
+
+ if (cpusetp == NULL)
+ return SOCKET_ID_ANY;
+
+ do {
+ if (!CPU_ISSET(cpu, cpusetp))
+ continue;
+
+ if (socket_id == SOCKET_ID_ANY)
+ socket_id = eal_cpu_socket_id(cpu);
+
+ sid = eal_cpu_socket_id(cpu);
+ if (socket_id != sid) {
+ socket_id = SOCKET_ID_ANY;
+ break;
+ }
+
+ } while (++cpu < RTE_MAX_LCORE);
+
+ return socket_id;
+}
+
+int
+rte_thread_set_affinity(rte_cpuset_t *cpusetp)
+{
+ int s;
+ unsigned lcore_id;
+ pthread_t tid;
+
+ tid = pthread_self();
+
+ s = pthread_setaffinity_np(tid, sizeof(rte_cpuset_t), cpusetp);
+ if (s != 0) {
+ RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n");
+ return -1;
+ }
+
+ /* store socket_id in TLS for quick access */
+ RTE_PER_LCORE(_socket_id) =
+ eal_cpuset_socket_id(cpusetp);
+
+ /* store cpuset in TLS for quick access */
+ memmove(&RTE_PER_LCORE(_cpuset), cpusetp,
+ sizeof(rte_cpuset_t));
+
+ lcore_id = rte_lcore_id();
+ if (lcore_id != (unsigned)LCORE_ID_ANY) {
+ /* EAL thread will update lcore_config */
+ lcore_config[lcore_id].socket_id = RTE_PER_LCORE(_socket_id);
+ memmove(&lcore_config[lcore_id].cpuset, cpusetp,
+ sizeof(rte_cpuset_t));
+ }
+
+ return 0;
+}
+
+void
+rte_thread_get_affinity(rte_cpuset_t *cpusetp)
+{
+ assert(cpusetp);
+ memmove(cpusetp, &RTE_PER_LCORE(_cpuset),
+ sizeof(rte_cpuset_t));
+}
+
+int
+eal_thread_dump_affinity(char *str, unsigned size)
+{
+ rte_cpuset_t cpuset;
+ unsigned cpu;
+ int ret;
+ unsigned int out = 0;
+
+ rte_thread_get_affinity(&cpuset);
+
+ for (cpu = 0; cpu < RTE_MAX_LCORE; cpu++) {
+ if (!CPU_ISSET(cpu, &cpuset))
+ continue;
+
+ ret = snprintf(str + out,
+ size - out, "%u,", cpu);
+ if (ret < 0 || (unsigned)ret >= size - out) {
+ /* string will be truncated */
+ ret = -1;
+ goto exit;
+ }
+
+ out += ret;
+ }
+
+ ret = 0;
+exit:
+ /* remove the last separator */
+ if (out > 0)
+ str[out - 1] = '\0';
+
+ return ret;
+}
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_alarm.c b/src/dpdk22/lib/librte_eal/common/eal_common_timer.c
index 204df85d..72371b88 100755..100644
--- a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_alarm.c
+++ b/src/dpdk22/lib/librte_eal/common/eal_common_timer.c
@@ -30,31 +30,57 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <stdlib.h>
+
+#include <string.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <sys/types.h>
#include <errno.h>
-#include <rte_alarm.h>
#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_cycles.h>
+
#include "eal_private.h"
-int
-rte_eal_alarm_init(void)
+/* The frequency of the RDTSC timer resolution */
+static uint64_t eal_tsc_resolution_hz;
+
+void
+rte_delay_us(unsigned us)
{
- return 0;
+ const uint64_t start = rte_get_timer_cycles();
+ const uint64_t ticks = (uint64_t)us * rte_get_timer_hz() / 1E6;
+ while ((rte_get_timer_cycles() - start) < ticks)
+ rte_pause();
}
+uint64_t
+rte_get_tsc_hz(void)
+{
+ return eal_tsc_resolution_hz;
+}
-int
-rte_eal_alarm_set(uint64_t us __rte_unused,
- rte_eal_alarm_callback cb_fn __rte_unused,
- void *cb_arg __rte_unused)
+static uint64_t
+estimate_tsc_freq(void)
{
- return -ENOTSUP;
+ RTE_LOG(WARNING, EAL, "WARNING: TSC frequency estimated roughly"
+ " - clock timings may be less accurate.\n");
+ /* assume that the sleep(1) will sleep for 1 second */
+ uint64_t start = rte_rdtsc();
+ sleep(1);
+ return rte_rdtsc() - start;
}
-int
-rte_eal_alarm_cancel(rte_eal_alarm_callback cb_fn __rte_unused,
- void *cb_arg __rte_unused)
+void
+set_tsc_freq(void)
{
- return -ENOTSUP;
+ uint64_t freq = get_tsc_freq();
+
+ if (!freq)
+ freq = estimate_tsc_freq();
+
+ RTE_LOG(INFO, EAL, "TSC frequency is ~%" PRIu64 " KHz\n", freq / 1000);
+ eal_tsc_resolution_hz = freq;
}
diff --git a/src/dpdk_lib18/librte_eal/common/eal_filesystem.h b/src/dpdk22/lib/librte_eal/common/eal_filesystem.h
index fdb4a70b..fdb4a70b 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/eal_filesystem.h
+++ b/src/dpdk22/lib/librte_eal/common/eal_filesystem.h
diff --git a/src/dpdk_lib18/librte_eal/common/eal_hugepages.h b/src/dpdk22/lib/librte_eal/common/eal_hugepages.h
index 38edac03..38edac03 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/eal_hugepages.h
+++ b/src/dpdk22/lib/librte_eal/common/eal_hugepages.h
diff --git a/src/dpdk_lib18/librte_eal/common/eal_internal_cfg.h b/src/dpdk22/lib/librte_eal/common/eal_internal_cfg.h
index e2ecb0d0..5f1367eb 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/eal_internal_cfg.h
+++ b/src/dpdk22/lib/librte_eal/common/eal_internal_cfg.h
@@ -64,6 +64,7 @@ struct internal_config {
volatile unsigned force_nchannel; /**< force number of channels */
volatile unsigned force_nrank; /**< force number of ranks */
volatile unsigned no_hugetlbfs; /**< true to disable hugetlbfs */
+ unsigned hugepage_unlink; /**< true to unlink backing files */
volatile unsigned xen_dom0_support; /**< support app running on Xen Dom0*/
volatile unsigned no_pci; /**< true to disable PCI */
volatile unsigned no_hpet; /**< true to disable HPET */
diff --git a/src/dpdk_lib18/librte_eal/common/eal_options.h b/src/dpdk22/lib/librte_eal/common/eal_options.h
index e476f8d8..a881c62e 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/eal_options.h
+++ b/src/dpdk22/lib/librte_eal/common/eal_options.h
@@ -35,48 +35,54 @@
enum {
/* long options mapped to a short option */
-#define OPT_PCI_WHITELIST "pci-whitelist"
- OPT_PCI_WHITELIST_NUM = 'w',
-#define OPT_PCI_BLACKLIST "pci-blacklist"
- OPT_PCI_BLACKLIST_NUM = 'b',
+#define OPT_HELP "help"
+ OPT_HELP_NUM = 'h',
+#define OPT_PCI_BLACKLIST "pci-blacklist"
+ OPT_PCI_BLACKLIST_NUM = 'b',
+#define OPT_PCI_WHITELIST "pci-whitelist"
+ OPT_PCI_WHITELIST_NUM = 'w',
/* first long only option value must be >= 256, so that we won't
* conflict with short options */
OPT_LONG_MIN_NUM = 256,
-#define OPT_HUGE_DIR "huge-dir"
- OPT_HUGE_DIR_NUM = OPT_LONG_MIN_NUM,
-#define OPT_MASTER_LCORE "master-lcore"
+#define OPT_BASE_VIRTADDR "base-virtaddr"
+ OPT_BASE_VIRTADDR_NUM,
+#define OPT_CREATE_UIO_DEV "create-uio-dev"
+ OPT_CREATE_UIO_DEV_NUM,
+#define OPT_FILE_PREFIX "file-prefix"
+ OPT_FILE_PREFIX_NUM,
+#define OPT_HUGE_DIR "huge-dir"
+ OPT_HUGE_DIR_NUM,
+#define OPT_HUGE_UNLINK "huge-unlink"
+ OPT_HUGE_UNLINK_NUM,
+#define OPT_LCORES "lcores"
+ OPT_LCORES_NUM,
+#define OPT_LOG_LEVEL "log-level"
+ OPT_LOG_LEVEL_NUM,
+#define OPT_MASTER_LCORE "master-lcore"
OPT_MASTER_LCORE_NUM,
-#define OPT_PROC_TYPE "proc-type"
+#define OPT_PROC_TYPE "proc-type"
OPT_PROC_TYPE_NUM,
-#define OPT_NO_SHCONF "no-shconf"
- OPT_NO_SHCONF_NUM,
-#define OPT_NO_HPET "no-hpet"
+#define OPT_NO_HPET "no-hpet"
OPT_NO_HPET_NUM,
-#define OPT_VMWARE_TSC_MAP "vmware-tsc-map"
- OPT_VMWARE_TSC_MAP_NUM,
-#define OPT_NO_PCI "no-pci"
- OPT_NO_PCI_NUM,
-#define OPT_NO_HUGE "no-huge"
+#define OPT_NO_HUGE "no-huge"
OPT_NO_HUGE_NUM,
-#define OPT_FILE_PREFIX "file-prefix"
- OPT_FILE_PREFIX_NUM,
-#define OPT_SOCKET_MEM "socket-mem"
+#define OPT_NO_PCI "no-pci"
+ OPT_NO_PCI_NUM,
+#define OPT_NO_SHCONF "no-shconf"
+ OPT_NO_SHCONF_NUM,
+#define OPT_SOCKET_MEM "socket-mem"
OPT_SOCKET_MEM_NUM,
-#define OPT_VDEV "vdev"
- OPT_VDEV_NUM,
-#define OPT_SYSLOG "syslog"
+#define OPT_SYSLOG "syslog"
OPT_SYSLOG_NUM,
-#define OPT_LOG_LEVEL "log-level"
- OPT_LOG_LEVEL_NUM,
-#define OPT_BASE_VIRTADDR "base-virtaddr"
- OPT_BASE_VIRTADDR_NUM,
-#define OPT_XEN_DOM0 "xen-dom0"
- OPT_XEN_DOM0_NUM,
-#define OPT_CREATE_UIO_DEV "create-uio-dev"
- OPT_CREATE_UIO_DEV_NUM,
-#define OPT_VFIO_INTR "vfio-intr"
+#define OPT_VDEV "vdev"
+ OPT_VDEV_NUM,
+#define OPT_VFIO_INTR "vfio-intr"
OPT_VFIO_INTR_NUM,
+#define OPT_VMWARE_TSC_MAP "vmware-tsc-map"
+ OPT_VMWARE_TSC_MAP_NUM,
+#define OPT_XEN_DOM0 "xen-dom0"
+ OPT_XEN_DOM0_NUM,
OPT_LONG_MAX_NUM
};
@@ -89,5 +95,6 @@ int eal_adjust_config(struct internal_config *internal_cfg);
int eal_check_common_options(struct internal_config *internal_cfg);
void eal_common_usage(void);
enum rte_proc_type_t eal_proc_type_detect(void);
+int eal_plugins_init(void);
#endif /* EAL_OPTIONS_H */
diff --git a/src/dpdk_lib18/librte_eal/common/eal_private.h b/src/dpdk22/lib/librte_eal/common/eal_private.h
index 232fcecc..072e672b 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/eal_private.h
+++ b/src/dpdk22/lib/librte_eal/common/eal_private.h
@@ -35,6 +35,7 @@
#define _EAL_PRIVATE_H_
#include <stdio.h>
+#include <rte_pci.h>
/**
* Initialize the memzone subsystem (private to eal).
@@ -154,15 +155,97 @@ struct rte_pci_driver;
struct rte_pci_device;
/**
- * Mmap memory for single PCI device
+ * Unbind kernel driver for this device
*
* This function is private to EAL.
*
* @return
* 0 on success, negative on error
*/
-int rte_eal_pci_probe_one_driver(struct rte_pci_driver *dr,
- struct rte_pci_device *dev);
+int pci_unbind_kernel_driver(struct rte_pci_device *dev);
+
+/**
+ * Map this device
+ *
+ * This function is private to EAL.
+ *
+ * @return
+ * 0 on success, negative on error and positive if no driver
+ * is found for the device.
+ */
+int pci_map_device(struct rte_pci_device *dev);
+
+/**
+ * Unmap this device
+ *
+ * This function is private to EAL.
+ */
+void pci_unmap_device(struct rte_pci_device *dev);
+
+/**
+ * Map the PCI resource of a PCI device in virtual memory
+ *
+ * This function is private to EAL.
+ *
+ * @return
+ * 0 on success, negative on error
+ */
+int pci_uio_map_resource(struct rte_pci_device *dev);
+
+/**
+ * Unmap the PCI resource of a PCI device
+ *
+ * This function is private to EAL.
+ */
+void pci_uio_unmap_resource(struct rte_pci_device *dev);
+
+/**
+ * Allocate uio resource for PCI device
+ *
+ * This function is private to EAL.
+ *
+ * @param dev
+ * PCI device to allocate uio resource
+ * @param uio_res
+ * Pointer to uio resource.
+ * If the function returns 0, the pointer will be filled.
+ * @return
+ * 0 on success, negative on error
+ */
+int pci_uio_alloc_resource(struct rte_pci_device *dev,
+ struct mapped_pci_resource **uio_res);
+
+/**
+ * Free uio resource for PCI device
+ *
+ * This function is private to EAL.
+ *
+ * @param dev
+ * PCI device to free uio resource
+ * @param uio_res
+ * Pointer to uio resource.
+ */
+void pci_uio_free_resource(struct rte_pci_device *dev,
+ struct mapped_pci_resource *uio_res);
+
+/**
+ * Map device memory to uio resource
+ *
+ * This function is private to EAL.
+ *
+ * @param dev
+ * PCI device that has memory information.
+ * @param res_idx
+ * Memory resource index of the PCI device.
+ * @param uio_res
+ * uio resource that will keep mapping information.
+ * @param map_idx
+ * Mapping information index of the uio resource.
+ * @return
+ * 0 on success, negative on error
+ */
+int pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
+ struct mapped_pci_resource *uio_res, int map_idx);
/**
* Init tail queues for non-EAL library structures. This is to allow
@@ -203,4 +286,64 @@ int rte_eal_alarm_init(void);
*/
int rte_eal_dev_init(void);
+/**
+ * Function is to check if the kernel module(like, vfio, vfio_iommu_type1,
+ * etc.) loaded.
+ *
+ * @param module_name
+ * The module's name which need to be checked
+ *
+ * @return
+ * -1 means some error happens(NULL pointer or open failure)
+ * 0 means the module not loaded
+ * 1 means the module loaded
+ */
+int rte_eal_check_module(const char *module_name);
+
+/**
+ * Get cpu core_id.
+ *
+ * This function is private to the EAL.
+ */
+unsigned eal_cpu_core_id(unsigned lcore_id);
+
+/**
+ * Check if cpu is present.
+ *
+ * This function is private to the EAL.
+ */
+int eal_cpu_detected(unsigned lcore_id);
+
+/**
+ * Set TSC frequency from precise value or estimation
+ *
+ * This function is private to the EAL.
+ */
+void set_tsc_freq(void);
+
+/**
+ * Get precise TSC frequency from system
+ *
+ * This function is private to the EAL.
+ */
+uint64_t get_tsc_freq(void);
+
+/**
+ * Prepare physical memory mapping
+ * i.e. hugepages on Linux and
+ * contigmem on BSD.
+ *
+ * This function is private to the EAL.
+ */
+int rte_eal_hugepage_init(void);
+
+/**
+ * Creates memory mapping in secondary process
+ * i.e. hugepages on Linux and
+ * contigmem on BSD.
+ *
+ * This function is private to the EAL.
+ */
+int rte_eal_hugepage_attach(void);
+
#endif /* _EAL_PRIVATE_H_ */
diff --git a/src/dpdk_lib18/librte_hash/rte_hash_crc.h b/src/dpdk22/lib/librte_eal/common/eal_thread.h
index b48b0db1..e4e76b9d 100755..100644
--- a/src/dpdk_lib18/librte_hash/rte_hash_crc.h
+++ b/src/dpdk22/lib/librte_eal/common/eal_thread.h
@@ -31,80 +31,70 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef _RTE_HASH_CRC_H_
-#define _RTE_HASH_CRC_H_
+#ifndef EAL_THREAD_H
+#define EAL_THREAD_H
+
+#include <rte_lcore.h>
/**
- * @file
+ * basic loop of thread, called for each thread by eal_init().
*
- * RTE CRC Hash
+ * @param arg
+ * opaque pointer
*/
+__attribute__((noreturn)) void *eal_thread_loop(void *arg);
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <stdint.h>
-#include <nmmintrin.h>
+/**
+ * Init per-lcore info for master thread
+ *
+ * @param lcore_id
+ * identifier of master lcore
+ */
+void eal_thread_init_master(unsigned lcore_id);
/**
- * Use single crc32 instruction to perform a hash on a 4 byte value.
+ * Get the NUMA socket id from cpu id.
+ * This function is private to EAL.
*
- * @param data
- * Data to perform hash on.
- * @param init_val
- * Value to initialise hash generator.
+ * @param cpu_id
+ * The logical process id.
* @return
- * 32bit calculated hash value.
+ * socket_id or SOCKET_ID_ANY
*/
-static inline uint32_t
-rte_hash_crc_4byte(uint32_t data, uint32_t init_val)
-{
- return _mm_crc32_u32(init_val, data);
-}
+unsigned eal_cpu_socket_id(unsigned cpu_id);
/**
- * Use crc32 instruction to perform a hash.
+ * Get the NUMA socket id from cpuset.
+ * This function is private to EAL.
*
- * @param data
- * Data to perform hash on.
- * @param data_len
- * How many bytes to use to calculate hash value.
- * @param init_val
- * Value to initialise hash generator.
+ * @param cpusetp
+ * The point to a valid cpu set.
* @return
- * 32bit calculated hash value.
+ * socket_id or SOCKET_ID_ANY
*/
-static inline uint32_t
-rte_hash_crc(const void *data, uint32_t data_len, uint32_t init_val)
-{
- unsigned i;
- uint32_t temp = 0;
- const uint32_t *p32 = (const uint32_t *)data;
-
- for (i = 0; i < data_len / 4; i++) {
- init_val = rte_hash_crc_4byte(*p32++, init_val);
- }
-
- switch (3 - (data_len & 0x03)) {
- case 0:
- temp |= *((const uint8_t *)p32 + 2) << 16;
- /* Fallthrough */
- case 1:
- temp |= *((const uint8_t *)p32 + 1) << 8;
- /* Fallthrough */
- case 2:
- temp |= *((const uint8_t *)p32);
- init_val = rte_hash_crc_4byte(temp, init_val);
- default:
- break;
- }
+int eal_cpuset_socket_id(rte_cpuset_t *cpusetp);
- return init_val;
-}
+/**
+ * Default buffer size to use with eal_thread_dump_affinity()
+ */
+#define RTE_CPU_AFFINITY_STR_LEN 256
-#ifdef __cplusplus
-}
-#endif
+/**
+ * Dump the current pthread cpuset.
+ * This function is private to EAL.
+ *
+ * Note:
+ * If the dump size is greater than the size of given buffer,
+ * the string will be truncated and with '\0' at the end.
+ *
+ * @param str
+ * The string buffer the cpuset will dump to.
+ * @param size
+ * The string buffer size.
+ * @return
+ * 0 for success, -1 if truncation happens.
+ */
+int
+eal_thread_dump_affinity(char *str, unsigned size);
-#endif /* _RTE_HASH_CRC_H_ */
+#endif /* EAL_THREAD_H */
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_atomic.h b/src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_atomic.h
index e93e8eef..41178c7b 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_atomic.h
+++ b/src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_atomic.h
@@ -53,6 +53,12 @@ extern "C" {
#define rte_rmb() _mm_lfence()
+#define rte_smp_mb() rte_mb()
+
+#define rte_smp_wmb() rte_compiler_barrier()
+
+#define rte_smp_rmb() rte_compiler_barrier()
+
/*------------------------- 16 bit atomic operations -------------------------*/
#ifndef RTE_FORCE_INTRINSICS
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_atomic_32.h b/src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_atomic_32.h
index 400d8a96..400d8a96 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_atomic_32.h
+++ b/src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_atomic_32.h
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_atomic_64.h b/src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_atomic_64.h
index 4de66000..4de66000 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_atomic_64.h
+++ b/src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_atomic_64.h
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_byteorder.h b/src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_byteorder.h
index ffdb6ef5..ffdb6ef5 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_byteorder.h
+++ b/src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_byteorder.h
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_byteorder_32.h b/src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_byteorder_32.h
index 51c306f8..51c306f8 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_byteorder_32.h
+++ b/src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_byteorder_32.h
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_byteorder_64.h b/src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_byteorder_64.h
index dda572bd..dda572bd 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_byteorder_64.h
+++ b/src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_byteorder_64.h
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_cpuflags.h b/src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_cpuflags.h
index a58dd7bc..dd565535 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_cpuflags.h
+++ b/src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_cpuflags.h
@@ -151,104 +151,104 @@ enum rte_cpu_flag_t {
};
enum cpu_register_t {
- REG_EAX = 0,
- REG_EBX,
- REG_ECX,
- REG_EDX,
+ RTE_REG_EAX = 0,
+ RTE_REG_EBX,
+ RTE_REG_ECX,
+ RTE_REG_EDX,
};
static const struct feature_entry cpu_feature_table[] = {
- FEAT_DEF(SSE3, 0x00000001, 0, REG_ECX, 0)
- FEAT_DEF(PCLMULQDQ, 0x00000001, 0, REG_ECX, 1)
- FEAT_DEF(DTES64, 0x00000001, 0, REG_ECX, 2)
- FEAT_DEF(MONITOR, 0x00000001, 0, REG_ECX, 3)
- FEAT_DEF(DS_CPL, 0x00000001, 0, REG_ECX, 4)
- FEAT_DEF(VMX, 0x00000001, 0, REG_ECX, 5)
- FEAT_DEF(SMX, 0x00000001, 0, REG_ECX, 6)
- FEAT_DEF(EIST, 0x00000001, 0, REG_ECX, 7)
- FEAT_DEF(TM2, 0x00000001, 0, REG_ECX, 8)
- FEAT_DEF(SSSE3, 0x00000001, 0, REG_ECX, 9)
- FEAT_DEF(CNXT_ID, 0x00000001, 0, REG_ECX, 10)
- FEAT_DEF(FMA, 0x00000001, 0, REG_ECX, 12)
- FEAT_DEF(CMPXCHG16B, 0x00000001, 0, REG_ECX, 13)
- FEAT_DEF(XTPR, 0x00000001, 0, REG_ECX, 14)
- FEAT_DEF(PDCM, 0x00000001, 0, REG_ECX, 15)
- FEAT_DEF(PCID, 0x00000001, 0, REG_ECX, 17)
- FEAT_DEF(DCA, 0x00000001, 0, REG_ECX, 18)
- FEAT_DEF(SSE4_1, 0x00000001, 0, REG_ECX, 19)
- FEAT_DEF(SSE4_2, 0x00000001, 0, REG_ECX, 20)
- FEAT_DEF(X2APIC, 0x00000001, 0, REG_ECX, 21)
- FEAT_DEF(MOVBE, 0x00000001, 0, REG_ECX, 22)
- FEAT_DEF(POPCNT, 0x00000001, 0, REG_ECX, 23)
- FEAT_DEF(TSC_DEADLINE, 0x00000001, 0, REG_ECX, 24)
- FEAT_DEF(AES, 0x00000001, 0, REG_ECX, 25)
- FEAT_DEF(XSAVE, 0x00000001, 0, REG_ECX, 26)
- FEAT_DEF(OSXSAVE, 0x00000001, 0, REG_ECX, 27)
- FEAT_DEF(AVX, 0x00000001, 0, REG_ECX, 28)
- FEAT_DEF(F16C, 0x00000001, 0, REG_ECX, 29)
- FEAT_DEF(RDRAND, 0x00000001, 0, REG_ECX, 30)
-
- FEAT_DEF(FPU, 0x00000001, 0, REG_EDX, 0)
- FEAT_DEF(VME, 0x00000001, 0, REG_EDX, 1)
- FEAT_DEF(DE, 0x00000001, 0, REG_EDX, 2)
- FEAT_DEF(PSE, 0x00000001, 0, REG_EDX, 3)
- FEAT_DEF(TSC, 0x00000001, 0, REG_EDX, 4)
- FEAT_DEF(MSR, 0x00000001, 0, REG_EDX, 5)
- FEAT_DEF(PAE, 0x00000001, 0, REG_EDX, 6)
- FEAT_DEF(MCE, 0x00000001, 0, REG_EDX, 7)
- FEAT_DEF(CX8, 0x00000001, 0, REG_EDX, 8)
- FEAT_DEF(APIC, 0x00000001, 0, REG_EDX, 9)
- FEAT_DEF(SEP, 0x00000001, 0, REG_EDX, 11)
- FEAT_DEF(MTRR, 0x00000001, 0, REG_EDX, 12)
- FEAT_DEF(PGE, 0x00000001, 0, REG_EDX, 13)
- FEAT_DEF(MCA, 0x00000001, 0, REG_EDX, 14)
- FEAT_DEF(CMOV, 0x00000001, 0, REG_EDX, 15)
- FEAT_DEF(PAT, 0x00000001, 0, REG_EDX, 16)
- FEAT_DEF(PSE36, 0x00000001, 0, REG_EDX, 17)
- FEAT_DEF(PSN, 0x00000001, 0, REG_EDX, 18)
- FEAT_DEF(CLFSH, 0x00000001, 0, REG_EDX, 19)
- FEAT_DEF(DS, 0x00000001, 0, REG_EDX, 21)
- FEAT_DEF(ACPI, 0x00000001, 0, REG_EDX, 22)
- FEAT_DEF(MMX, 0x00000001, 0, REG_EDX, 23)
- FEAT_DEF(FXSR, 0x00000001, 0, REG_EDX, 24)
- FEAT_DEF(SSE, 0x00000001, 0, REG_EDX, 25)
- FEAT_DEF(SSE2, 0x00000001, 0, REG_EDX, 26)
- FEAT_DEF(SS, 0x00000001, 0, REG_EDX, 27)
- FEAT_DEF(HTT, 0x00000001, 0, REG_EDX, 28)
- FEAT_DEF(TM, 0x00000001, 0, REG_EDX, 29)
- FEAT_DEF(PBE, 0x00000001, 0, REG_EDX, 31)
-
- FEAT_DEF(DIGTEMP, 0x00000006, 0, REG_EAX, 0)
- FEAT_DEF(TRBOBST, 0x00000006, 0, REG_EAX, 1)
- FEAT_DEF(ARAT, 0x00000006, 0, REG_EAX, 2)
- FEAT_DEF(PLN, 0x00000006, 0, REG_EAX, 4)
- FEAT_DEF(ECMD, 0x00000006, 0, REG_EAX, 5)
- FEAT_DEF(PTM, 0x00000006, 0, REG_EAX, 6)
-
- FEAT_DEF(MPERF_APERF_MSR, 0x00000006, 0, REG_ECX, 0)
- FEAT_DEF(ACNT2, 0x00000006, 0, REG_ECX, 1)
- FEAT_DEF(ENERGY_EFF, 0x00000006, 0, REG_ECX, 3)
-
- FEAT_DEF(FSGSBASE, 0x00000007, 0, REG_EBX, 0)
- FEAT_DEF(BMI1, 0x00000007, 0, REG_EBX, 2)
- FEAT_DEF(HLE, 0x00000007, 0, REG_EBX, 4)
- FEAT_DEF(AVX2, 0x00000007, 0, REG_EBX, 5)
- FEAT_DEF(SMEP, 0x00000007, 0, REG_EBX, 6)
- FEAT_DEF(BMI2, 0x00000007, 0, REG_EBX, 7)
- FEAT_DEF(ERMS, 0x00000007, 0, REG_EBX, 8)
- FEAT_DEF(INVPCID, 0x00000007, 0, REG_EBX, 10)
- FEAT_DEF(RTM, 0x00000007, 0, REG_EBX, 11)
-
- FEAT_DEF(LAHF_SAHF, 0x80000001, 0, REG_ECX, 0)
- FEAT_DEF(LZCNT, 0x80000001, 0, REG_ECX, 4)
-
- FEAT_DEF(SYSCALL, 0x80000001, 0, REG_EDX, 11)
- FEAT_DEF(XD, 0x80000001, 0, REG_EDX, 20)
- FEAT_DEF(1GB_PG, 0x80000001, 0, REG_EDX, 26)
- FEAT_DEF(RDTSCP, 0x80000001, 0, REG_EDX, 27)
- FEAT_DEF(EM64T, 0x80000001, 0, REG_EDX, 29)
-
- FEAT_DEF(INVTSC, 0x80000007, 0, REG_EDX, 8)
+ FEAT_DEF(SSE3, 0x00000001, 0, RTE_REG_ECX, 0)
+ FEAT_DEF(PCLMULQDQ, 0x00000001, 0, RTE_REG_ECX, 1)
+ FEAT_DEF(DTES64, 0x00000001, 0, RTE_REG_ECX, 2)
+ FEAT_DEF(MONITOR, 0x00000001, 0, RTE_REG_ECX, 3)
+ FEAT_DEF(DS_CPL, 0x00000001, 0, RTE_REG_ECX, 4)
+ FEAT_DEF(VMX, 0x00000001, 0, RTE_REG_ECX, 5)
+ FEAT_DEF(SMX, 0x00000001, 0, RTE_REG_ECX, 6)
+ FEAT_DEF(EIST, 0x00000001, 0, RTE_REG_ECX, 7)
+ FEAT_DEF(TM2, 0x00000001, 0, RTE_REG_ECX, 8)
+ FEAT_DEF(SSSE3, 0x00000001, 0, RTE_REG_ECX, 9)
+ FEAT_DEF(CNXT_ID, 0x00000001, 0, RTE_REG_ECX, 10)
+ FEAT_DEF(FMA, 0x00000001, 0, RTE_REG_ECX, 12)
+ FEAT_DEF(CMPXCHG16B, 0x00000001, 0, RTE_REG_ECX, 13)
+ FEAT_DEF(XTPR, 0x00000001, 0, RTE_REG_ECX, 14)
+ FEAT_DEF(PDCM, 0x00000001, 0, RTE_REG_ECX, 15)
+ FEAT_DEF(PCID, 0x00000001, 0, RTE_REG_ECX, 17)
+ FEAT_DEF(DCA, 0x00000001, 0, RTE_REG_ECX, 18)
+ FEAT_DEF(SSE4_1, 0x00000001, 0, RTE_REG_ECX, 19)
+ FEAT_DEF(SSE4_2, 0x00000001, 0, RTE_REG_ECX, 20)
+ FEAT_DEF(X2APIC, 0x00000001, 0, RTE_REG_ECX, 21)
+ FEAT_DEF(MOVBE, 0x00000001, 0, RTE_REG_ECX, 22)
+ FEAT_DEF(POPCNT, 0x00000001, 0, RTE_REG_ECX, 23)
+ FEAT_DEF(TSC_DEADLINE, 0x00000001, 0, RTE_REG_ECX, 24)
+ FEAT_DEF(AES, 0x00000001, 0, RTE_REG_ECX, 25)
+ FEAT_DEF(XSAVE, 0x00000001, 0, RTE_REG_ECX, 26)
+ FEAT_DEF(OSXSAVE, 0x00000001, 0, RTE_REG_ECX, 27)
+ FEAT_DEF(AVX, 0x00000001, 0, RTE_REG_ECX, 28)
+ FEAT_DEF(F16C, 0x00000001, 0, RTE_REG_ECX, 29)
+ FEAT_DEF(RDRAND, 0x00000001, 0, RTE_REG_ECX, 30)
+
+ FEAT_DEF(FPU, 0x00000001, 0, RTE_REG_EDX, 0)
+ FEAT_DEF(VME, 0x00000001, 0, RTE_REG_EDX, 1)
+ FEAT_DEF(DE, 0x00000001, 0, RTE_REG_EDX, 2)
+ FEAT_DEF(PSE, 0x00000001, 0, RTE_REG_EDX, 3)
+ FEAT_DEF(TSC, 0x00000001, 0, RTE_REG_EDX, 4)
+ FEAT_DEF(MSR, 0x00000001, 0, RTE_REG_EDX, 5)
+ FEAT_DEF(PAE, 0x00000001, 0, RTE_REG_EDX, 6)
+ FEAT_DEF(MCE, 0x00000001, 0, RTE_REG_EDX, 7)
+ FEAT_DEF(CX8, 0x00000001, 0, RTE_REG_EDX, 8)
+ FEAT_DEF(APIC, 0x00000001, 0, RTE_REG_EDX, 9)
+ FEAT_DEF(SEP, 0x00000001, 0, RTE_REG_EDX, 11)
+ FEAT_DEF(MTRR, 0x00000001, 0, RTE_REG_EDX, 12)
+ FEAT_DEF(PGE, 0x00000001, 0, RTE_REG_EDX, 13)
+ FEAT_DEF(MCA, 0x00000001, 0, RTE_REG_EDX, 14)
+ FEAT_DEF(CMOV, 0x00000001, 0, RTE_REG_EDX, 15)
+ FEAT_DEF(PAT, 0x00000001, 0, RTE_REG_EDX, 16)
+ FEAT_DEF(PSE36, 0x00000001, 0, RTE_REG_EDX, 17)
+ FEAT_DEF(PSN, 0x00000001, 0, RTE_REG_EDX, 18)
+ FEAT_DEF(CLFSH, 0x00000001, 0, RTE_REG_EDX, 19)
+ FEAT_DEF(DS, 0x00000001, 0, RTE_REG_EDX, 21)
+ FEAT_DEF(ACPI, 0x00000001, 0, RTE_REG_EDX, 22)
+ FEAT_DEF(MMX, 0x00000001, 0, RTE_REG_EDX, 23)
+ FEAT_DEF(FXSR, 0x00000001, 0, RTE_REG_EDX, 24)
+ FEAT_DEF(SSE, 0x00000001, 0, RTE_REG_EDX, 25)
+ FEAT_DEF(SSE2, 0x00000001, 0, RTE_REG_EDX, 26)
+ FEAT_DEF(SS, 0x00000001, 0, RTE_REG_EDX, 27)
+ FEAT_DEF(HTT, 0x00000001, 0, RTE_REG_EDX, 28)
+ FEAT_DEF(TM, 0x00000001, 0, RTE_REG_EDX, 29)
+ FEAT_DEF(PBE, 0x00000001, 0, RTE_REG_EDX, 31)
+
+ FEAT_DEF(DIGTEMP, 0x00000006, 0, RTE_REG_EAX, 0)
+ FEAT_DEF(TRBOBST, 0x00000006, 0, RTE_REG_EAX, 1)
+ FEAT_DEF(ARAT, 0x00000006, 0, RTE_REG_EAX, 2)
+ FEAT_DEF(PLN, 0x00000006, 0, RTE_REG_EAX, 4)
+ FEAT_DEF(ECMD, 0x00000006, 0, RTE_REG_EAX, 5)
+ FEAT_DEF(PTM, 0x00000006, 0, RTE_REG_EAX, 6)
+
+ FEAT_DEF(MPERF_APERF_MSR, 0x00000006, 0, RTE_REG_ECX, 0)
+ FEAT_DEF(ACNT2, 0x00000006, 0, RTE_REG_ECX, 1)
+ FEAT_DEF(ENERGY_EFF, 0x00000006, 0, RTE_REG_ECX, 3)
+
+ FEAT_DEF(FSGSBASE, 0x00000007, 0, RTE_REG_EBX, 0)
+ FEAT_DEF(BMI1, 0x00000007, 0, RTE_REG_EBX, 2)
+ FEAT_DEF(HLE, 0x00000007, 0, RTE_REG_EBX, 4)
+ FEAT_DEF(AVX2, 0x00000007, 0, RTE_REG_EBX, 5)
+ FEAT_DEF(SMEP, 0x00000007, 0, RTE_REG_EBX, 6)
+ FEAT_DEF(BMI2, 0x00000007, 0, RTE_REG_EBX, 7)
+ FEAT_DEF(ERMS, 0x00000007, 0, RTE_REG_EBX, 8)
+ FEAT_DEF(INVPCID, 0x00000007, 0, RTE_REG_EBX, 10)
+ FEAT_DEF(RTM, 0x00000007, 0, RTE_REG_EBX, 11)
+
+ FEAT_DEF(LAHF_SAHF, 0x80000001, 0, RTE_REG_ECX, 0)
+ FEAT_DEF(LZCNT, 0x80000001, 0, RTE_REG_ECX, 4)
+
+ FEAT_DEF(SYSCALL, 0x80000001, 0, RTE_REG_EDX, 11)
+ FEAT_DEF(XD, 0x80000001, 0, RTE_REG_EDX, 20)
+ FEAT_DEF(1GB_PG, 0x80000001, 0, RTE_REG_EDX, 26)
+ FEAT_DEF(RDTSCP, 0x80000001, 0, RTE_REG_EDX, 27)
+ FEAT_DEF(EM64T, 0x80000001, 0, RTE_REG_EDX, 29)
+
+ FEAT_DEF(INVTSC, 0x80000007, 0, RTE_REG_EDX, 8)
};
static inline void
@@ -257,18 +257,18 @@ rte_cpu_get_features(uint32_t leaf, uint32_t subleaf, cpuid_registers_t out)
#if defined(__i386__) && defined(__PIC__)
/* %ebx is a forbidden register if we compile with -fPIC or -fPIE */
asm volatile("movl %%ebx,%0 ; cpuid ; xchgl %%ebx,%0"
- : "=r" (out[REG_EBX]),
- "=a" (out[REG_EAX]),
- "=c" (out[REG_ECX]),
- "=d" (out[REG_EDX])
+ : "=r" (out[RTE_REG_EBX]),
+ "=a" (out[RTE_REG_EAX]),
+ "=c" (out[RTE_REG_ECX]),
+ "=d" (out[RTE_REG_EDX])
: "a" (leaf), "c" (subleaf));
#else
asm volatile("cpuid"
- : "=a" (out[REG_EAX]),
- "=b" (out[REG_EBX]),
- "=c" (out[REG_ECX]),
- "=d" (out[REG_EDX])
+ : "=a" (out[RTE_REG_EAX]),
+ "=b" (out[RTE_REG_EBX]),
+ "=c" (out[RTE_REG_ECX]),
+ "=d" (out[RTE_REG_EDX])
: "a" (leaf), "c" (subleaf));
#endif
@@ -292,8 +292,8 @@ rte_cpu_get_flag_enabled(enum rte_cpu_flag_t feature)
return -EFAULT;
rte_cpu_get_features(feat->leaf & 0xffff0000, 0, regs);
- if (((regs[REG_EAX] ^ feat->leaf) & 0xffff0000) ||
- regs[REG_EAX] < feat->leaf)
+ if (((regs[RTE_REG_EAX] ^ feat->leaf) & 0xffff0000) ||
+ regs[RTE_REG_EAX] < feat->leaf)
return 0;
/* get the cpuid leaf containing the desired feature */
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_cycles.h b/src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_cycles.h
index 6e3c7d89..6e3c7d89 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_cycles.h
+++ b/src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_cycles.h
diff --git a/src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_memcpy.h b/src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
new file mode 100644
index 00000000..6a574263
--- /dev/null
+++ b/src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
@@ -0,0 +1,639 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_MEMCPY_X86_64_H_
+#define _RTE_MEMCPY_X86_64_H_
+
+/**
+ * @file
+ *
+ * Functions for SSE/AVX/AVX2 implementation of memcpy().
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <rte_vect.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Copy bytes from one location to another. The locations must not overlap.
+ *
+ * @note This is implemented as a macro, so it's address should not be taken
+ * and care is needed as parameter expressions may be evaluated multiple times.
+ *
+ * @param dst
+ * Pointer to the destination of the data.
+ * @param src
+ * Pointer to the source data.
+ * @param n
+ * Number of bytes to copy.
+ * @return
+ * Pointer to the destination data.
+ */
+static inline void *
+rte_memcpy(void *dst, const void *src, size_t n) __attribute__((always_inline));
+
+#ifdef RTE_MACHINE_CPUFLAG_AVX2
+
+/**
+ * AVX2 implementation below
+ */
+
+/**
+ * Copy 16 bytes from one location to another,
+ * locations should not overlap.
+ */
+static inline void
+rte_mov16(uint8_t *dst, const uint8_t *src)
+{
+ __m128i xmm0;
+
+ xmm0 = _mm_loadu_si128((const __m128i *)src);
+ _mm_storeu_si128((__m128i *)dst, xmm0);
+}
+
+/**
+ * Copy 32 bytes from one location to another,
+ * locations should not overlap.
+ */
+static inline void
+rte_mov32(uint8_t *dst, const uint8_t *src)
+{
+ __m256i ymm0;
+
+ ymm0 = _mm256_loadu_si256((const __m256i *)src);
+ _mm256_storeu_si256((__m256i *)dst, ymm0);
+}
+
+/**
+ * Copy 64 bytes from one location to another,
+ * locations should not overlap.
+ */
+static inline void
+rte_mov64(uint8_t *dst, const uint8_t *src)
+{
+ rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
+ rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
+}
+
+/**
+ * Copy 128 bytes from one location to another,
+ * locations should not overlap.
+ */
+static inline void
+rte_mov128(uint8_t *dst, const uint8_t *src)
+{
+ rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
+ rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
+ rte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);
+ rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
+}
+
+/**
+ * Copy 256 bytes from one location to another,
+ * locations should not overlap.
+ */
+static inline void
+rte_mov256(uint8_t *dst, const uint8_t *src)
+{
+ rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
+ rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
+ rte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);
+ rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
+ rte_mov32((uint8_t *)dst + 4 * 32, (const uint8_t *)src + 4 * 32);
+ rte_mov32((uint8_t *)dst + 5 * 32, (const uint8_t *)src + 5 * 32);
+ rte_mov32((uint8_t *)dst + 6 * 32, (const uint8_t *)src + 6 * 32);
+ rte_mov32((uint8_t *)dst + 7 * 32, (const uint8_t *)src + 7 * 32);
+}
+
+/**
+ * Copy 64-byte blocks from one location to another,
+ * locations should not overlap.
+ */
+static inline void
+rte_mov64blocks(uint8_t *dst, const uint8_t *src, size_t n)
+{
+ __m256i ymm0, ymm1;
+
+ while (n >= 64) {
+ ymm0 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 0 * 32));
+ n -= 64;
+ ymm1 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 1 * 32));
+ src = (const uint8_t *)src + 64;
+ _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 0 * 32), ymm0);
+ _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 1 * 32), ymm1);
+ dst = (uint8_t *)dst + 64;
+ }
+}
+
+/**
+ * Copy 256-byte blocks from one location to another,
+ * locations should not overlap.
+ */
+static inline void
+rte_mov256blocks(uint8_t *dst, const uint8_t *src, size_t n)
+{
+ __m256i ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7;
+
+ while (n >= 256) {
+ ymm0 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 0 * 32));
+ n -= 256;
+ ymm1 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 1 * 32));
+ ymm2 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 2 * 32));
+ ymm3 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 3 * 32));
+ ymm4 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 4 * 32));
+ ymm5 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 5 * 32));
+ ymm6 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 6 * 32));
+ ymm7 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 7 * 32));
+ src = (const uint8_t *)src + 256;
+ _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 0 * 32), ymm0);
+ _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 1 * 32), ymm1);
+ _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 2 * 32), ymm2);
+ _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 3 * 32), ymm3);
+ _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 4 * 32), ymm4);
+ _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 5 * 32), ymm5);
+ _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 6 * 32), ymm6);
+ _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 7 * 32), ymm7);
+ dst = (uint8_t *)dst + 256;
+ }
+}
+
+static inline void *
+rte_memcpy(void *dst, const void *src, size_t n)
+{
+ uintptr_t dstu = (uintptr_t)dst;
+ uintptr_t srcu = (uintptr_t)src;
+ void *ret = dst;
+ size_t dstofss;
+ size_t bits;
+
+ /**
+ * Copy less than 16 bytes
+ */
+ if (n < 16) {
+ if (n & 0x01) {
+ *(uint8_t *)dstu = *(const uint8_t *)srcu;
+ srcu = (uintptr_t)((const uint8_t *)srcu + 1);
+ dstu = (uintptr_t)((uint8_t *)dstu + 1);
+ }
+ if (n & 0x02) {
+ *(uint16_t *)dstu = *(const uint16_t *)srcu;
+ srcu = (uintptr_t)((const uint16_t *)srcu + 1);
+ dstu = (uintptr_t)((uint16_t *)dstu + 1);
+ }
+ if (n & 0x04) {
+ *(uint32_t *)dstu = *(const uint32_t *)srcu;
+ srcu = (uintptr_t)((const uint32_t *)srcu + 1);
+ dstu = (uintptr_t)((uint32_t *)dstu + 1);
+ }
+ if (n & 0x08) {
+ *(uint64_t *)dstu = *(const uint64_t *)srcu;
+ }
+ return ret;
+ }
+
+ /**
+ * Fast way when copy size doesn't exceed 512 bytes
+ */
+ if (n <= 32) {
+ rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+ rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+ return ret;
+ }
+ if (n <= 64) {
+ rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+ rte_mov32((uint8_t *)dst - 32 + n, (const uint8_t *)src - 32 + n);
+ return ret;
+ }
+ if (n <= 512) {
+ if (n >= 256) {
+ n -= 256;
+ rte_mov256((uint8_t *)dst, (const uint8_t *)src);
+ src = (const uint8_t *)src + 256;
+ dst = (uint8_t *)dst + 256;
+ }
+ if (n >= 128) {
+ n -= 128;
+ rte_mov128((uint8_t *)dst, (const uint8_t *)src);
+ src = (const uint8_t *)src + 128;
+ dst = (uint8_t *)dst + 128;
+ }
+ if (n >= 64) {
+ n -= 64;
+ rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+ src = (const uint8_t *)src + 64;
+ dst = (uint8_t *)dst + 64;
+ }
+COPY_BLOCK_64_BACK31:
+ if (n > 32) {
+ rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+ rte_mov32((uint8_t *)dst - 32 + n, (const uint8_t *)src - 32 + n);
+ return ret;
+ }
+ if (n > 0) {
+ rte_mov32((uint8_t *)dst - 32 + n, (const uint8_t *)src - 32 + n);
+ }
+ return ret;
+ }
+
+ /**
+ * Make store aligned when copy size exceeds 512 bytes
+ */
+ dstofss = 32 - ((uintptr_t)dst & 0x1F);
+ n -= dstofss;
+ rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+ src = (const uint8_t *)src + dstofss;
+ dst = (uint8_t *)dst + dstofss;
+
+ /**
+ * Copy 256-byte blocks.
+ * Use copy block function for better instruction order control,
+ * which is important when load is unaligned.
+ */
+ rte_mov256blocks((uint8_t *)dst, (const uint8_t *)src, n);
+ bits = n;
+ n = n & 255;
+ bits -= n;
+ src = (const uint8_t *)src + bits;
+ dst = (uint8_t *)dst + bits;
+
+ /**
+ * Copy 64-byte blocks.
+ * Use copy block function for better instruction order control,
+ * which is important when load is unaligned.
+ */
+ if (n >= 64) {
+ rte_mov64blocks((uint8_t *)dst, (const uint8_t *)src, n);
+ bits = n;
+ n = n & 63;
+ bits -= n;
+ src = (const uint8_t *)src + bits;
+ dst = (uint8_t *)dst + bits;
+ }
+
+ /**
+ * Copy whatever left
+ */
+ goto COPY_BLOCK_64_BACK31;
+}
+
+#else /* RTE_MACHINE_CPUFLAG_AVX2 */
+
+/**
+ * SSE & AVX implementation below
+ */
+
+/**
+ * Copy 16 bytes from one location to another,
+ * locations should not overlap.
+ */
+static inline void
+rte_mov16(uint8_t *dst, const uint8_t *src)
+{
+ __m128i xmm0;
+
+ xmm0 = _mm_loadu_si128((const __m128i *)(const __m128i *)src);
+ _mm_storeu_si128((__m128i *)dst, xmm0);
+}
+
+/**
+ * Copy 32 bytes from one location to another,
+ * locations should not overlap.
+ */
+static inline void
+rte_mov32(uint8_t *dst, const uint8_t *src)
+{
+ rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
+ rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
+}
+
+/**
+ * Copy 64 bytes from one location to another,
+ * locations should not overlap.
+ */
+static inline void
+rte_mov64(uint8_t *dst, const uint8_t *src)
+{
+ rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
+ rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
+ rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
+ rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
+}
+
+/**
+ * Copy 128 bytes from one location to another,
+ * locations should not overlap.
+ */
+static inline void
+rte_mov128(uint8_t *dst, const uint8_t *src)
+{
+ rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
+ rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
+ rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
+ rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
+ rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
+ rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
+ rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
+ rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
+}
+
+/**
+ * Copy 256 bytes from one location to another,
+ * locations should not overlap.
+ */
+static inline void
+rte_mov256(uint8_t *dst, const uint8_t *src)
+{
+ rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
+ rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
+ rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
+ rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
+ rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
+ rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
+ rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
+ rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
+ rte_mov16((uint8_t *)dst + 8 * 16, (const uint8_t *)src + 8 * 16);
+ rte_mov16((uint8_t *)dst + 9 * 16, (const uint8_t *)src + 9 * 16);
+ rte_mov16((uint8_t *)dst + 10 * 16, (const uint8_t *)src + 10 * 16);
+ rte_mov16((uint8_t *)dst + 11 * 16, (const uint8_t *)src + 11 * 16);
+ rte_mov16((uint8_t *)dst + 12 * 16, (const uint8_t *)src + 12 * 16);
+ rte_mov16((uint8_t *)dst + 13 * 16, (const uint8_t *)src + 13 * 16);
+ rte_mov16((uint8_t *)dst + 14 * 16, (const uint8_t *)src + 14 * 16);
+ rte_mov16((uint8_t *)dst + 15 * 16, (const uint8_t *)src + 15 * 16);
+}
+
+/**
+ * Macro for copying unaligned block from one location to another with constant load offset,
+ * 47 bytes leftover maximum,
+ * locations should not overlap.
+ * Requirements:
+ * - Store is aligned
+ * - Load offset is <offset>, which must be immediate value within [1, 15]
+ * - For <src>, make sure <offset> bit backwards & <16 - offset> bit forwards are available for loading
+ * - <dst>, <src>, <len> must be variables
+ * - __m128i <xmm0> ~ <xmm8> must be pre-defined
+ */
+#define MOVEUNALIGNED_LEFT47_IMM(dst, src, len, offset) \
+({ \
+ int tmp; \
+ while (len >= 128 + 16 - offset) { \
+ xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 0 * 16)); \
+ len -= 128; \
+ xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 1 * 16)); \
+ xmm2 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 2 * 16)); \
+ xmm3 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 3 * 16)); \
+ xmm4 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 4 * 16)); \
+ xmm5 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 5 * 16)); \
+ xmm6 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 6 * 16)); \
+ xmm7 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 7 * 16)); \
+ xmm8 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 8 * 16)); \
+ src = (const uint8_t *)src + 128; \
+ _mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \
+ _mm_storeu_si128((__m128i *)((uint8_t *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \
+ _mm_storeu_si128((__m128i *)((uint8_t *)dst + 2 * 16), _mm_alignr_epi8(xmm3, xmm2, offset)); \
+ _mm_storeu_si128((__m128i *)((uint8_t *)dst + 3 * 16), _mm_alignr_epi8(xmm4, xmm3, offset)); \
+ _mm_storeu_si128((__m128i *)((uint8_t *)dst + 4 * 16), _mm_alignr_epi8(xmm5, xmm4, offset)); \
+ _mm_storeu_si128((__m128i *)((uint8_t *)dst + 5 * 16), _mm_alignr_epi8(xmm6, xmm5, offset)); \
+ _mm_storeu_si128((__m128i *)((uint8_t *)dst + 6 * 16), _mm_alignr_epi8(xmm7, xmm6, offset)); \
+ _mm_storeu_si128((__m128i *)((uint8_t *)dst + 7 * 16), _mm_alignr_epi8(xmm8, xmm7, offset)); \
+ dst = (uint8_t *)dst + 128; \
+ } \
+ tmp = len; \
+ len = ((len - 16 + offset) & 127) + 16 - offset; \
+ tmp -= len; \
+ src = (const uint8_t *)src + tmp; \
+ dst = (uint8_t *)dst + tmp; \
+ if (len >= 32 + 16 - offset) { \
+ while (len >= 32 + 16 - offset) { \
+ xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 0 * 16)); \
+ len -= 32; \
+ xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 1 * 16)); \
+ xmm2 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 2 * 16)); \
+ src = (const uint8_t *)src + 32; \
+ _mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \
+ _mm_storeu_si128((__m128i *)((uint8_t *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \
+ dst = (uint8_t *)dst + 32; \
+ } \
+ tmp = len; \
+ len = ((len - 16 + offset) & 31) + 16 - offset; \
+ tmp -= len; \
+ src = (const uint8_t *)src + tmp; \
+ dst = (uint8_t *)dst + tmp; \
+ } \
+})
+
+/**
+ * Macro for copying unaligned block from one location to another,
+ * 47 bytes leftover maximum,
+ * locations should not overlap.
+ * Use switch here because the aligning instruction requires immediate value for shift count.
+ * Requirements:
+ * - Store is aligned
+ * - Load offset is <offset>, which must be within [1, 15]
+ * - For <src>, make sure <offset> bit backwards & <16 - offset> bit forwards are available for loading
+ * - <dst>, <src>, <len> must be variables
+ * - __m128i <xmm0> ~ <xmm8> used in MOVEUNALIGNED_LEFT47_IMM must be pre-defined
+ */
+#define MOVEUNALIGNED_LEFT47(dst, src, len, offset) \
+({ \
+ switch (offset) { \
+ case 0x01: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x01); break; \
+ case 0x02: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x02); break; \
+ case 0x03: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x03); break; \
+ case 0x04: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x04); break; \
+ case 0x05: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x05); break; \
+ case 0x06: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x06); break; \
+ case 0x07: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x07); break; \
+ case 0x08: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x08); break; \
+ case 0x09: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x09); break; \
+ case 0x0A: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0A); break; \
+ case 0x0B: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0B); break; \
+ case 0x0C: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0C); break; \
+ case 0x0D: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0D); break; \
+ case 0x0E: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0E); break; \
+ case 0x0F: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0F); break; \
+ default:; \
+ } \
+})
+
+static inline void *
+rte_memcpy(void *dst, const void *src, size_t n)
+{
+ __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8;
+ uintptr_t dstu = (uintptr_t)dst;
+ uintptr_t srcu = (uintptr_t)src;
+ void *ret = dst;
+ size_t dstofss;
+ size_t srcofs;
+
+ /**
+ * Copy less than 16 bytes
+ */
+ if (n < 16) {
+ if (n & 0x01) {
+ *(uint8_t *)dstu = *(const uint8_t *)srcu;
+ srcu = (uintptr_t)((const uint8_t *)srcu + 1);
+ dstu = (uintptr_t)((uint8_t *)dstu + 1);
+ }
+ if (n & 0x02) {
+ *(uint16_t *)dstu = *(const uint16_t *)srcu;
+ srcu = (uintptr_t)((const uint16_t *)srcu + 1);
+ dstu = (uintptr_t)((uint16_t *)dstu + 1);
+ }
+ if (n & 0x04) {
+ *(uint32_t *)dstu = *(const uint32_t *)srcu;
+ srcu = (uintptr_t)((const uint32_t *)srcu + 1);
+ dstu = (uintptr_t)((uint32_t *)dstu + 1);
+ }
+ if (n & 0x08) {
+ *(uint64_t *)dstu = *(const uint64_t *)srcu;
+ }
+ return ret;
+ }
+
+ /**
+ * Fast way when copy size doesn't exceed 512 bytes
+ */
+ if (n <= 32) {
+ rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+ rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+ return ret;
+ }
+ if (n <= 48) {
+ rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+ rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+ return ret;
+ }
+ if (n <= 64) {
+ rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+ rte_mov16((uint8_t *)dst + 32, (const uint8_t *)src + 32);
+ rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+ return ret;
+ }
+ if (n <= 128) {
+ goto COPY_BLOCK_128_BACK15;
+ }
+ if (n <= 512) {
+ if (n >= 256) {
+ n -= 256;
+ rte_mov128((uint8_t *)dst, (const uint8_t *)src);
+ rte_mov128((uint8_t *)dst + 128, (const uint8_t *)src + 128);
+ src = (const uint8_t *)src + 256;
+ dst = (uint8_t *)dst + 256;
+ }
+COPY_BLOCK_255_BACK15:
+ if (n >= 128) {
+ n -= 128;
+ rte_mov128((uint8_t *)dst, (const uint8_t *)src);
+ src = (const uint8_t *)src + 128;
+ dst = (uint8_t *)dst + 128;
+ }
+COPY_BLOCK_128_BACK15:
+ if (n >= 64) {
+ n -= 64;
+ rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+ src = (const uint8_t *)src + 64;
+ dst = (uint8_t *)dst + 64;
+ }
+COPY_BLOCK_64_BACK15:
+ if (n >= 32) {
+ n -= 32;
+ rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+ src = (const uint8_t *)src + 32;
+ dst = (uint8_t *)dst + 32;
+ }
+ if (n > 16) {
+ rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+ rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+ return ret;
+ }
+ if (n > 0) {
+ rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+ }
+ return ret;
+ }
+
+ /**
+ * Make store aligned when copy size exceeds 512 bytes,
+ * and make sure the first 15 bytes are copied, because
+ * unaligned copy functions require up to 15 bytes
+ * backwards access.
+ */
+ dstofss = 16 - ((uintptr_t)dst & 0x0F) + 16;
+ n -= dstofss;
+ rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+ src = (const uint8_t *)src + dstofss;
+ dst = (uint8_t *)dst + dstofss;
+ srcofs = ((uintptr_t)src & 0x0F);
+
+ /**
+ * For aligned copy
+ */
+ if (srcofs == 0) {
+ /**
+ * Copy 256-byte blocks
+ */
+ for (; n >= 256; n -= 256) {
+ rte_mov256((uint8_t *)dst, (const uint8_t *)src);
+ dst = (uint8_t *)dst + 256;
+ src = (const uint8_t *)src + 256;
+ }
+
+ /**
+ * Copy whatever left
+ */
+ goto COPY_BLOCK_255_BACK15;
+ }
+
+ /**
+ * For copy with unaligned load
+ */
+ MOVEUNALIGNED_LEFT47(dst, src, n, srcofs);
+
+ /**
+ * Copy whatever left
+ */
+ goto COPY_BLOCK_64_BACK15;
+}
+
+#endif /* RTE_MACHINE_CPUFLAG_AVX2 */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MEMCPY_X86_64_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_prefetch.h b/src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_prefetch.h
index ec2454dc..8e6e02cc 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_prefetch.h
+++ b/src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_prefetch.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -40,19 +40,19 @@ extern "C" {
#include "generic/rte_prefetch.h"
-static inline void rte_prefetch0(volatile void *p)
+static inline void rte_prefetch0(const volatile void *p)
{
- asm volatile ("prefetcht0 %[p]" : [p] "+m" (*(volatile char *)p));
+ asm volatile ("prefetcht0 %[p]" : : [p] "m" (*(const volatile char *)p));
}
-static inline void rte_prefetch1(volatile void *p)
+static inline void rte_prefetch1(const volatile void *p)
{
- asm volatile ("prefetcht1 %[p]" : [p] "+m" (*(volatile char *)p));
+ asm volatile ("prefetcht1 %[p]" : : [p] "m" (*(const volatile char *)p));
}
-static inline void rte_prefetch2(volatile void *p)
+static inline void rte_prefetch2(const volatile void *p)
{
- asm volatile ("prefetcht2 %[p]" : [p] "+m" (*(volatile char *)p));
+ asm volatile ("prefetcht2 %[p]" : : [p] "m" (*(const volatile char *)p));
}
#ifdef __cplusplus
diff --git a/src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_rtm.h b/src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_rtm.h
new file mode 100644
index 00000000..d9356419
--- /dev/null
+++ b/src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_rtm.h
@@ -0,0 +1,73 @@
+#ifndef _RTE_RTM_H_
+#define _RTE_RTM_H_ 1
+
+/*
+ * Copyright (c) 2012,2013 Intel Corporation
+ * Author: Andi Kleen
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that: (1) source code distributions
+ * retain the above copyright notice and this paragraph in its entirety, (2)
+ * distributions including binary code include the above copyright notice and
+ * this paragraph in its entirety in the documentation or other materials
+ * provided with the distribution
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/* Official RTM intrinsics interface matching gcc/icc, but works
+ on older gcc compatible compilers and binutils. */
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#define RTE_XBEGIN_STARTED (~0u)
+#define RTE_XABORT_EXPLICIT (1 << 0)
+#define RTE_XABORT_RETRY (1 << 1)
+#define RTE_XABORT_CONFLICT (1 << 2)
+#define RTE_XABORT_CAPACITY (1 << 3)
+#define RTE_XABORT_DEBUG (1 << 4)
+#define RTE_XABORT_NESTED (1 << 5)
+#define RTE_XABORT_CODE(x) (((x) >> 24) & 0xff)
+
+static __attribute__((__always_inline__)) inline
+unsigned int rte_xbegin(void)
+{
+ unsigned int ret = RTE_XBEGIN_STARTED;
+
+ asm volatile(".byte 0xc7,0xf8 ; .long 0" : "+a" (ret) :: "memory");
+ return ret;
+}
+
+static __attribute__((__always_inline__)) inline
+void rte_xend(void)
+{
+ asm volatile(".byte 0x0f,0x01,0xd5" ::: "memory");
+}
+
+static __attribute__((__always_inline__)) inline
+void rte_xabort(const unsigned int status)
+{
+ asm volatile(".byte 0xc6,0xf8,%P0" :: "i" (status) : "memory");
+}
+
+static __attribute__((__always_inline__)) inline
+int rte_xtest(void)
+{
+ unsigned char out;
+
+ asm volatile(".byte 0x0f,0x01,0xd6 ; setnz %0" :
+ "=r" (out) :: "memory");
+ return out;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_RTM_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_ring/rte_eth_ring.h b/src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_rwlock.h
index e6ae19ed..afd1c3c2 100755..100644
--- a/src/dpdk_lib18/librte_pmd_ring/rte_eth_ring.h
+++ b/src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_rwlock.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,33 +31,52 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef _RTE_ETH_RING_H_
-#define _RTE_ETH_RING_H_
+#ifndef _RTE_RWLOCK_X86_64_H_
+#define _RTE_RWLOCK_X86_64_H_
#ifdef __cplusplus
extern "C" {
#endif
-#include <rte_ring.h>
+#include "generic/rte_rwlock.h"
+#include "rte_spinlock.h"
-int rte_eth_from_rings(const char *name,
- struct rte_ring * const rx_queues[],
- const unsigned nb_rx_queues,
- struct rte_ring *const tx_queues[],
- const unsigned nb_tx_queues,
- const unsigned numa_node);
+static inline void
+rte_rwlock_read_lock_tm(rte_rwlock_t *rwl)
+{
+ if (likely(rte_try_tm(&rwl->cnt)))
+ return;
+ rte_rwlock_read_lock(rwl);
+}
-int rte_eth_ring_pair_create(const char *name, const unsigned numa_node);
-int rte_eth_ring_pair_attach(const char *name, const unsigned numa_node);
+static inline void
+rte_rwlock_read_unlock_tm(rte_rwlock_t *rwl)
+{
+ if (unlikely(rwl->cnt))
+ rte_rwlock_read_unlock(rwl);
+ else
+ rte_xend();
+}
-/**
- * For use by test apps only. Called as part of EAL init to set up any dummy NICs
- * configured on command line.
- */
-int rte_pmd_ring_devinit(const char *name, const char *params);
+static inline void
+rte_rwlock_write_lock_tm(rte_rwlock_t *rwl)
+{
+ if (likely(rte_try_tm(&rwl->cnt)))
+ return;
+ rte_rwlock_write_lock(rwl);
+}
+
+static inline void
+rte_rwlock_write_unlock_tm(rte_rwlock_t *rwl)
+{
+ if (unlikely(rwl->cnt))
+ rte_rwlock_write_unlock(rwl);
+ else
+ rte_xend();
+}
#ifdef __cplusplus
}
#endif
-#endif
+#endif /* _RTE_RWLOCK_X86_64_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_spinlock.h b/src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_spinlock.h
index 54fba957..20ef0a79 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_spinlock.h
+++ b/src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_spinlock.h
@@ -39,6 +39,13 @@ extern "C" {
#endif
#include "generic/rte_spinlock.h"
+#include "rte_rtm.h"
+#include "rte_cpuflags.h"
+#include "rte_branch_prediction.h"
+#include "rte_common.h"
+
+#define RTE_RTM_MAX_RETRIES (10)
+#define RTE_XABORT_LOCK_BUSY (0xff)
#ifndef RTE_FORCE_INTRINSICS
static inline void
@@ -87,6 +94,106 @@ rte_spinlock_trylock (rte_spinlock_t *sl)
}
#endif
+static uint8_t rtm_supported; /* cache the flag to avoid the overhead
+ of the rte_cpu_get_flag_enabled function */
+
+static inline void __attribute__((constructor))
+rte_rtm_init(void)
+{
+ rtm_supported = rte_cpu_get_flag_enabled(RTE_CPUFLAG_RTM);
+}
+
+static inline int rte_tm_supported(void)
+{
+ return rtm_supported;
+}
+
+static inline int
+rte_try_tm(volatile int *lock)
+{
+ if (!rtm_supported)
+ return 0;
+
+ int retries = RTE_RTM_MAX_RETRIES;
+
+ while (likely(retries--)) {
+
+ unsigned int status = rte_xbegin();
+
+ if (likely(RTE_XBEGIN_STARTED == status)) {
+ if (unlikely(*lock))
+ rte_xabort(RTE_XABORT_LOCK_BUSY);
+ else
+ return 1;
+ }
+ while (*lock)
+ rte_pause();
+
+ if ((status & RTE_XABORT_EXPLICIT) &&
+ (RTE_XABORT_CODE(status) == RTE_XABORT_LOCK_BUSY))
+ continue;
+
+ if ((status & RTE_XABORT_RETRY) == 0) /* do not retry */
+ break;
+ }
+ return 0;
+}
+
+static inline void
+rte_spinlock_lock_tm(rte_spinlock_t *sl)
+{
+ if (likely(rte_try_tm(&sl->locked)))
+ return;
+
+ rte_spinlock_lock(sl); /* fall-back */
+}
+
+static inline int
+rte_spinlock_trylock_tm(rte_spinlock_t *sl)
+{
+ if (likely(rte_try_tm(&sl->locked)))
+ return 1;
+
+ return rte_spinlock_trylock(sl);
+}
+
+static inline void
+rte_spinlock_unlock_tm(rte_spinlock_t *sl)
+{
+ if (unlikely(sl->locked))
+ rte_spinlock_unlock(sl);
+ else
+ rte_xend();
+}
+
+static inline void
+rte_spinlock_recursive_lock_tm(rte_spinlock_recursive_t *slr)
+{
+ if (likely(rte_try_tm(&slr->sl.locked)))
+ return;
+
+ rte_spinlock_recursive_lock(slr); /* fall-back */
+}
+
+static inline void
+rte_spinlock_recursive_unlock_tm(rte_spinlock_recursive_t *slr)
+{
+ if (unlikely(slr->sl.locked))
+ rte_spinlock_recursive_unlock(slr);
+ else
+ rte_xend();
+}
+
+static inline int
+rte_spinlock_recursive_trylock_tm(rte_spinlock_recursive_t *slr)
+{
+ if (likely(rte_try_tm(&slr->sl.locked)))
+ return 1;
+
+ return rte_spinlock_recursive_trylock(slr);
+}
+
+
#ifdef __cplusplus
}
#endif
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_common_vect.h b/src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_vect.h
index 95bf4b1a..b698797c 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/rte_common_vect.h
+++ b/src/dpdk22/lib/librte_eal/common/include/arch/x86/rte_vect.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef _RTE_COMMON_VECT_H_
-#define _RTE_COMMON_VECT_H_
+#ifndef _RTE_VECT_H_
+#define _RTE_VECT_H_
/**
* @file
@@ -50,10 +50,18 @@
#include <emmintrin.h>
#endif
+#ifdef __SSE3__
+#include <tmmintrin.h>
+#endif
+
#if defined(__SSE4_2__) || defined(__SSE4_1__)
#include <smmintrin.h>
#endif
+#if defined(__AVX__)
+#include <immintrin.h>
+#endif
+
#else
#include <x86intrin.h>
@@ -70,7 +78,7 @@ typedef __m128i xmm_t;
#define XMM_MASK (XMM_SIZE - 1)
typedef union rte_xmm {
- xmm_t m;
+ xmm_t x;
uint8_t u8[XMM_SIZE / sizeof(uint8_t)];
uint16_t u16[XMM_SIZE / sizeof(uint16_t)];
uint32_t u32[XMM_SIZE / sizeof(uint32_t)];
@@ -78,16 +86,47 @@ typedef union rte_xmm {
double pd[XMM_SIZE / sizeof(double)];
} rte_xmm_t;
+#ifdef __AVX__
+
+typedef __m256i ymm_t;
+
+#define YMM_SIZE (sizeof(ymm_t))
+#define YMM_MASK (YMM_SIZE - 1)
+
+typedef union rte_ymm {
+ ymm_t y;
+ xmm_t x[YMM_SIZE / sizeof(xmm_t)];
+ uint8_t u8[YMM_SIZE / sizeof(uint8_t)];
+ uint16_t u16[YMM_SIZE / sizeof(uint16_t)];
+ uint32_t u32[YMM_SIZE / sizeof(uint32_t)];
+ uint64_t u64[YMM_SIZE / sizeof(uint64_t)];
+ double pd[YMM_SIZE / sizeof(double)];
+} rte_ymm_t;
+
+#endif /* __AVX__ */
+
#ifdef RTE_ARCH_I686
#define _mm_cvtsi128_si64(a) ({ \
rte_xmm_t m; \
- m.m = (a); \
+ m.x = (a); \
(m.u64[0]); \
})
#endif
+/*
+ * Prior to version 12.1 icc doesn't support _mm_set_epi64x.
+ */
+#if (defined(__ICC) && __ICC < 1210)
+#define _mm_set_epi64x(a, b) ({ \
+ rte_xmm_t m; \
+ m.u64[0] = b; \
+ m.u64[1] = a; \
+ (m.x); \
+})
+#endif /* (defined(__ICC) && __ICC < 1210) */
+
#ifdef __cplusplus
}
#endif
-#endif /* _RTE_COMMON__VECT_H_ */
+#endif /* _RTE_VECT_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/generic/rte_atomic.h b/src/dpdk22/lib/librte_eal/common/include/generic/rte_atomic.h
index 6c7581ad..26d1f56d 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/generic/rte_atomic.h
+++ b/src/dpdk22/lib/librte_eal/common/include/generic/rte_atomic.h
@@ -72,6 +72,33 @@ static inline void rte_wmb(void);
*/
static inline void rte_rmb(void);
+/**
+ * General memory barrier between lcores
+ *
+ * Guarantees that the LOAD and STORE operations that precede the
+ * rte_smp_mb() call are globally visible across the lcores
+ * before the the LOAD and STORE operations that follows it.
+ */
+static inline void rte_smp_mb(void);
+
+/**
+ * Write memory barrier between lcores
+ *
+ * Guarantees that the STORE operations that precede the
+ * rte_smp_wmb() call are globally visible across the lcores
+ * before the the STORE operations that follows it.
+ */
+static inline void rte_smp_wmb(void);
+
+/**
+ * Read memory barrier between lcores
+ *
+ * Guarantees that the LOAD operations that precede the
+ * rte_smp_rmb() call are globally visible across the lcores
+ * before the the LOAD operations that follows it.
+ */
+static inline void rte_smp_rmb(void);
+
#endif /* __DOXYGEN__ */
/**
diff --git a/src/dpdk_lib18/librte_eal/common/include/generic/rte_byteorder.h b/src/dpdk22/lib/librte_eal/common/include/generic/rte_byteorder.h
index c46fdcf2..c46fdcf2 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/generic/rte_byteorder.h
+++ b/src/dpdk22/lib/librte_eal/common/include/generic/rte_byteorder.h
diff --git a/src/dpdk_lib18/librte_eal/common/include/generic/rte_cpuflags.h b/src/dpdk22/lib/librte_eal/common/include/generic/rte_cpuflags.h
index 7f048387..5738a2a7 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/generic/rte_cpuflags.h
+++ b/src/dpdk22/lib/librte_eal/common/include/generic/rte_cpuflags.h
@@ -47,12 +47,16 @@
/**
* Enumeration of all CPU features supported
*/
+#ifdef __DOXYGEN__
enum rte_cpu_flag_t;
+#endif
/**
* Enumeration of CPU registers
*/
+#ifdef __DOXYGEN__
enum cpu_register_t;
+#endif
typedef uint32_t cpuid_registers_t[4];
@@ -74,8 +78,12 @@ struct feature_entry {
/**
* An array that holds feature entries
+ *
+ * Defined in arch-specific rte_cpuflags.h.
*/
+#ifdef __DOXYGEN__
static const struct feature_entry cpu_feature_table[];
+#endif
/**
* Execute CPUID instruction and get contents of a specific register
@@ -89,15 +97,17 @@ rte_cpu_get_features(uint32_t leaf, uint32_t subleaf, cpuid_registers_t out);
/**
* Function for checking a CPU flag availability
*
- * @param flag
+ * @param feature
* CPU flag to query CPU for
* @return
* 1 if flag is available
* 0 if flag is not available
* -ENOENT if flag is invalid
*/
+#ifdef __DOXYGEN__
static inline int
rte_cpu_get_flag_enabled(enum rte_cpu_flag_t feature);
+#endif
/**
* This function checks that the currently used CPU supports the CPU features
diff --git a/src/dpdk_lib18/librte_eal/common/include/generic/rte_cycles.h b/src/dpdk22/lib/librte_eal/common/include/generic/rte_cycles.h
index 7700f411..8cc21f20 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/generic/rte_cycles.h
+++ b/src/dpdk22/lib/librte_eal/common/include/generic/rte_cycles.h
@@ -130,12 +130,12 @@ rte_get_hpet_hz(void);
* then the HPET functions are unavailable and should not be called.
*
* @param make_default
- * If set, the hpet timer becomes the default timer whose values are
- * returned by the rte_get_timer_hz/cycles API calls
+ * If set, the hpet timer becomes the default timer whose values are
+ * returned by the rte_get_timer_hz/cycles API calls
*
* @return
- * 0 on success,
- * -1 on error, and the make_default parameter is ignored.
+ * 0 on success,
+ * -1 on error, and the make_default parameter is ignored.
*/
int rte_eal_hpet_init(int make_default);
diff --git a/src/dpdk_lib18/librte_eal/common/include/generic/rte_memcpy.h b/src/dpdk22/lib/librte_eal/common/include/generic/rte_memcpy.h
index 03e84773..03e84773 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/generic/rte_memcpy.h
+++ b/src/dpdk22/lib/librte_eal/common/include/generic/rte_memcpy.h
diff --git a/src/dpdk_lib18/librte_eal/common/include/generic/rte_prefetch.h b/src/dpdk22/lib/librte_eal/common/include/generic/rte_prefetch.h
index 217f319b..725715ff 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/generic/rte_prefetch.h
+++ b/src/dpdk22/lib/librte_eal/common/include/generic/rte_prefetch.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -51,14 +51,14 @@
* @param p
* Address to prefetch
*/
-static inline void rte_prefetch0(volatile void *p);
+static inline void rte_prefetch0(const volatile void *p);
/**
* Prefetch a cache line into all cache levels except the 0th cache level.
* @param p
* Address to prefetch
*/
-static inline void rte_prefetch1(volatile void *p);
+static inline void rte_prefetch1(const volatile void *p);
/**
* Prefetch a cache line into all cache levels except the 0th and 1th cache
@@ -66,6 +66,6 @@ static inline void rte_prefetch1(volatile void *p);
* @param p
* Address to prefetch
*/
-static inline void rte_prefetch2(volatile void *p);
+static inline void rte_prefetch2(const volatile void *p);
#endif /* _RTE_PREFETCH_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_rwlock.h b/src/dpdk22/lib/librte_eal/common/include/generic/rte_rwlock.h
index 115731de..7a0fdc55 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/rte_rwlock.h
+++ b/src/dpdk22/lib/librte_eal/common/include/generic/rte_rwlock.h
@@ -151,6 +151,56 @@ rte_rwlock_write_unlock(rte_rwlock_t *rwl)
rte_atomic32_inc((rte_atomic32_t *)(intptr_t)&rwl->cnt);
}
+/**
+ * Try to execute critical section in a hardware memory transaction, if it
+ * fails or not available take a read lock
+ *
+ * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
+ * transaction always aborts the transaction since the CPU is not able to
+ * roll-back should the transaction fail. Therefore, hardware transactional
+ * locks are not advised to be used around rte_eth_rx_burst() and
+ * rte_eth_tx_burst() calls.
+ *
+ * @param rwl
+ * A pointer to a rwlock structure.
+ */
+static inline void
+rte_rwlock_read_lock_tm(rte_rwlock_t *rwl);
+
+/**
+ * Commit hardware memory transaction or release the read lock if the lock is used as a fall-back
+ *
+ * @param rwl
+ * A pointer to the rwlock structure.
+ */
+static inline void
+rte_rwlock_read_unlock_tm(rte_rwlock_t *rwl);
+
+/**
+ * Try to execute critical section in a hardware memory transaction, if it
+ * fails or not available take a write lock
+ *
+ * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
+ * transaction always aborts the transaction since the CPU is not able to
+ * roll-back should the transaction fail. Therefore, hardware transactional
+ * locks are not advised to be used around rte_eth_rx_burst() and
+ * rte_eth_tx_burst() calls.
+ *
+ * @param rwl
+ * A pointer to a rwlock structure.
+ */
+static inline void
+rte_rwlock_write_lock_tm(rte_rwlock_t *rwl);
+
+/**
+ * Commit hardware memory transaction or release the write lock if the lock is used as a fall-back
+ *
+ * @param rwl
+ * A pointer to a rwlock structure.
+ */
+static inline void
+rte_rwlock_write_unlock_tm(rte_rwlock_t *rwl);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/dpdk_lib18/librte_eal/common/include/generic/rte_spinlock.h b/src/dpdk22/lib/librte_eal/common/include/generic/rte_spinlock.h
index dea885c3..4e0a3c30 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/generic/rte_spinlock.h
+++ b/src/dpdk22/lib/librte_eal/common/include/generic/rte_spinlock.h
@@ -145,6 +145,59 @@ static inline int rte_spinlock_is_locked (rte_spinlock_t *sl)
}
/**
+ * Test if hardware transactional memory (lock elision) is supported
+ *
+ * @return
+ * 1 if the hardware transactional memory is supported; 0 otherwise.
+ */
+static inline int rte_tm_supported(void);
+
+/**
+ * Try to execute critical section in a hardware memory transaction,
+ * if it fails or not available take the spinlock.
+ *
+ * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
+ * transaction always aborts the transaction since the CPU is not able to
+ * roll-back should the transaction fail. Therefore, hardware transactional
+ * locks are not advised to be used around rte_eth_rx_burst() and
+ * rte_eth_tx_burst() calls.
+ *
+ * @param sl
+ * A pointer to the spinlock.
+ */
+static inline void
+rte_spinlock_lock_tm(rte_spinlock_t *sl);
+
+/**
+ * Commit hardware memory transaction or release the spinlock if
+ * the spinlock is used as a fall-back
+ *
+ * @param sl
+ * A pointer to the spinlock.
+ */
+static inline void
+rte_spinlock_unlock_tm(rte_spinlock_t *sl);
+
+/**
+ * Try to execute critical section in a hardware memory transaction,
+ * if it fails or not available try to take the lock.
+ *
+ * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
+ * transaction always aborts the transaction since the CPU is not able to
+ * roll-back should the transaction fail. Therefore, hardware transactional
+ * locks are not advised to be used around rte_eth_rx_burst() and
+ * rte_eth_tx_burst() calls.
+ *
+ * @param sl
+ * A pointer to the spinlock.
+ * @return
+ * 1 if the hardware memory transaction is successfully started
+ * or lock is successfully taken; 0 otherwise.
+ */
+static inline int
+rte_spinlock_trylock_tm(rte_spinlock_t *sl);
+
+/**
* The rte_spinlock_recursive_t type.
*/
typedef struct {
@@ -179,7 +232,7 @@ static inline void rte_spinlock_recursive_init(rte_spinlock_recursive_t *slr)
*/
static inline void rte_spinlock_recursive_lock(rte_spinlock_recursive_t *slr)
{
- int id = rte_lcore_id();
+ int id = rte_gettid();
if (slr->user != id) {
rte_spinlock_lock(&slr->sl);
@@ -212,7 +265,7 @@ static inline void rte_spinlock_recursive_unlock(rte_spinlock_recursive_t *slr)
*/
static inline int rte_spinlock_recursive_trylock(rte_spinlock_recursive_t *slr)
{
- int id = rte_lcore_id();
+ int id = rte_gettid();
if (slr->user != id) {
if (rte_spinlock_trylock(&slr->sl) == 0)
@@ -223,4 +276,50 @@ static inline int rte_spinlock_recursive_trylock(rte_spinlock_recursive_t *slr)
return 1;
}
+
+/**
+ * Try to execute critical section in a hardware memory transaction,
+ * if it fails or not available take the recursive spinlocks
+ *
+ * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
+ * transaction always aborts the transaction since the CPU is not able to
+ * roll-back should the transaction fail. Therefore, hardware transactional
+ * locks are not advised to be used around rte_eth_rx_burst() and
+ * rte_eth_tx_burst() calls.
+ *
+ * @param slr
+ * A pointer to the recursive spinlock.
+ */
+static inline void rte_spinlock_recursive_lock_tm(
+ rte_spinlock_recursive_t *slr);
+
+/**
+ * Commit hardware memory transaction or release the recursive spinlock
+ * if the recursive spinlock is used as a fall-back
+ *
+ * @param slr
+ * A pointer to the recursive spinlock.
+ */
+static inline void rte_spinlock_recursive_unlock_tm(
+ rte_spinlock_recursive_t *slr);
+
+/**
+ * Try to execute critical section in a hardware memory transaction,
+ * if it fails or not available try to take the recursive lock
+ *
+ * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
+ * transaction always aborts the transaction since the CPU is not able to
+ * roll-back should the transaction fail. Therefore, hardware transactional
+ * locks are not advised to be used around rte_eth_rx_burst() and
+ * rte_eth_tx_burst() calls.
+ *
+ * @param slr
+ * A pointer to the recursive spinlock.
+ * @return
+ * 1 if the hardware memory transaction is successfully started
+ * or lock is successfully taken; 0 otherwise.
+ */
+static inline int rte_spinlock_recursive_trylock_tm(
+ rte_spinlock_recursive_t *slr);
+
#endif /* _RTE_SPINLOCK_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_alarm.h b/src/dpdk22/lib/librte_eal/common/include/rte_alarm.h
index 4012cd67..4012cd67 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/rte_alarm.h
+++ b/src/dpdk22/lib/librte_eal/common/include/rte_alarm.h
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_branch_prediction.h b/src/dpdk22/lib/librte_eal/common/include/rte_branch_prediction.h
index a6a56d17..a6a56d17 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/rte_branch_prediction.h
+++ b/src/dpdk22/lib/librte_eal/common/include/rte_branch_prediction.h
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_common.h b/src/dpdk22/lib/librte_eal/common/include/rte_common.h
index 921b91f3..b58a3841 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/rte_common.h
+++ b/src/dpdk22/lib/librte_eal/common/include/rte_common.h
@@ -51,6 +51,37 @@ extern "C" {
#include <errno.h>
#include <limits.h>
+#ifndef typeof
+#define typeof __typeof__
+#endif
+
+#ifndef asm
+#define asm __asm__
+#endif
+
+#ifdef RTE_ARCH_STRICT_ALIGN
+typedef uint64_t unaligned_uint64_t __attribute__ ((aligned(1)));
+typedef uint32_t unaligned_uint32_t __attribute__ ((aligned(1)));
+typedef uint16_t unaligned_uint16_t __attribute__ ((aligned(1)));
+#else
+typedef uint64_t unaligned_uint64_t;
+typedef uint32_t unaligned_uint32_t;
+typedef uint16_t unaligned_uint16_t;
+#endif
+
+/**
+ * Force alignment
+ */
+#define __rte_aligned(a) __attribute__((__aligned__(a)))
+
+/**
+ * Force a structure to be packed
+ */
+#define __rte_packed __attribute__((__packed__))
+
+/******* Macro to mark functions and fields scheduled for removal *****/
+#define __rte_deprecated __attribute__((__deprecated__))
+
/*********** Macros to eliminate unused variable warnings ********/
/**
@@ -85,25 +116,6 @@ extern "C" {
/*********** Macros/static functions for doing alignment ********/
-/**
- * Function which rounds an unsigned int down to a given power-of-two value.
- * Takes uintptr_t types as parameters, as this type of operation is most
- * commonly done for pointer alignment. (See also RTE_ALIGN_FLOOR,
- * RTE_ALIGN_CEIL, RTE_ALIGN, RTE_PTR_ALIGN_FLOOR, RTE_PTR_ALIGN_CEL,
- * RTE_PTR_ALIGN macros)
- * @param ptr
- * The value to be rounded down
- * @param align
- * The power-of-two of which the result must be a multiple.
- * @return
- * Function returns a properly aligned value where align is a power-of-two.
- * If align is not a power-of-two, result will be incorrect.
- */
-static inline uintptr_t
-rte_align_floor_int(uintptr_t ptr, uintptr_t align)
-{
- return (ptr & ~(align - 1));
-}
/**
* Macro to align a pointer to a given power-of-two. The resultant
@@ -112,7 +124,7 @@ rte_align_floor_int(uintptr_t ptr, uintptr_t align)
* must be a power-of-two value.
*/
#define RTE_PTR_ALIGN_FLOOR(ptr, align) \
- (typeof(ptr))rte_align_floor_int((uintptr_t)ptr, align)
+ ((typeof(ptr))RTE_ALIGN_FLOOR((uintptr_t)ptr, align))
/**
* Macro to align a value to a given power-of-two. The resultant value
@@ -203,7 +215,7 @@ extern int RTE_BUILD_BUG_ON_detected_error;
static inline int
rte_is_power_of_2(uint32_t n)
{
- return ((n-1) & n) == 0;
+ return n && !(n & (n - 1));
}
/**
@@ -231,8 +243,8 @@ rte_align32pow2(uint32_t x)
/**
* Aligns 64b input parameter to the next power of 2
*
- * @param x
- * The 64b value to algin
+ * @param v
+ * The 64b value to align
*
* @return
* Input parameter aligned to the next power of 2
@@ -302,7 +314,7 @@ rte_pause(void) {}
static inline uint32_t
rte_bsf32(uint32_t v)
{
- return (__builtin_ctz(v));
+ return __builtin_ctz(v);
}
#ifndef offsetof
@@ -314,7 +326,7 @@ rte_bsf32(uint32_t v)
/** Take a macro value and get a string version of it */
#define RTE_STR(x) _RTE_STR(x)
-/** Mask value of type <tp> for the first <ln> bit set. */
+/** Mask value of type "tp" for the first "ln" bit set. */
#define RTE_LEN2MASK(ln, tp) \
((tp)((uint64_t)-1 >> (sizeof(uint64_t) * CHAR_BIT - (ln))))
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_debug.h b/src/dpdk22/lib/librte_eal/common/include/rte_debug.h
index 82ee3b34..94129fab 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/rte_debug.h
+++ b/src/dpdk22/lib/librte_eal/common/include/rte_debug.h
@@ -70,10 +70,8 @@ void rte_dump_registers(void);
*
* The function never returns.
*
- * @param format
- * The format string
- * @param args
- * The variable list of arguments.
+ * @param ...
+ * The format string, followed by the variable list of arguments.
*/
#define rte_panic(...) rte_panic_(__func__, __VA_ARGS__, "dummy")
#define rte_panic_(func, format, ...) __rte_panic(func, format "%.0s", __VA_ARGS__)
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_dev.h b/src/dpdk22/lib/librte_eal/common/include/rte_dev.h
index f7e3a104..f1b55079 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/rte_dev.h
+++ b/src/dpdk22/lib/librte_eal/common/include/rte_dev.h
@@ -46,8 +46,61 @@
extern "C" {
#endif
+#include <stdio.h>
#include <sys/queue.h>
+#include <rte_log.h>
+
+__attribute__((format(printf, 2, 0)))
+static inline void
+rte_pmd_debug_trace(const char *func_name, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+
+ char buffer[vsnprintf(NULL, 0, fmt, ap) + 1];
+
+ va_end(ap);
+
+ va_start(ap, fmt);
+ vsnprintf(buffer, sizeof(buffer), fmt, ap);
+ va_end(ap);
+
+ rte_log(RTE_LOG_ERR, RTE_LOGTYPE_PMD, "%s: %s", func_name, buffer);
+}
+
+/* Macros for checking for restricting functions to primary instance only */
+#define RTE_PROC_PRIMARY_OR_ERR_RET(retval) do { \
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
+ RTE_PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
+ return retval; \
+ } \
+} while (0)
+
+#define RTE_PROC_PRIMARY_OR_RET() do { \
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
+ RTE_PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
+ return; \
+ } \
+} while (0)
+
+/* Macros to check for invalid function pointers */
+#define RTE_FUNC_PTR_OR_ERR_RET(func, retval) do { \
+ if ((func) == NULL) { \
+ RTE_PMD_DEBUG_TRACE("Function not supported\n"); \
+ return retval; \
+ } \
+} while (0)
+
+#define RTE_FUNC_PTR_OR_RET(func) do { \
+ if ((func) == NULL) { \
+ RTE_PMD_DEBUG_TRACE("Function not supported\n"); \
+ return; \
+ } \
+} while (0)
+
+
/** Double linked list of device drivers. */
TAILQ_HEAD(rte_driver_list, rte_driver);
@@ -57,6 +110,11 @@ TAILQ_HEAD(rte_driver_list, rte_driver);
typedef int (rte_dev_init_t)(const char *name, const char *args);
/**
+ * Uninitilization function called for each device driver once.
+ */
+typedef int (rte_dev_uninit_t)(const char *name);
+
+/**
* Driver type enumeration
*/
enum pmd_type {
@@ -72,6 +130,7 @@ struct rte_driver {
enum pmd_type type; /**< PMD Driver type */
const char *name; /**< Driver name. */
rte_dev_init_t *init; /**< Device init. function. */
+ rte_dev_uninit_t *uninit; /**< Device uninit. function. */
};
/**
@@ -97,6 +156,28 @@ void rte_eal_driver_unregister(struct rte_driver *driver);
*/
int rte_eal_dev_init(void);
+/**
+ * Initialize a driver specified by name.
+ *
+ * @param name
+ * The pointer to a driver name to be initialized.
+ * @param args
+ * The pointer to arguments used by driver initialization.
+ * @return
+ * 0 on success, negative on error
+ */
+int rte_eal_vdev_init(const char *name, const char *args);
+
+/**
+ * Uninitalize a driver specified by name.
+ *
+ * @param name
+ * The pointer to a driver name to be initialized.
+ * @return
+ * 0 on success, negative on error
+ */
+int rte_eal_vdev_uninit(const char *name);
+
#define PMD_REGISTER_DRIVER(d)\
void devinitfn_ ##d(void);\
void __attribute__((constructor, used)) devinitfn_ ##d(void)\
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_devargs.h b/src/dpdk22/lib/librte_eal/common/include/rte_devargs.h
index 9f9c98f1..53c59f56 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/rte_devargs.h
+++ b/src/dpdk22/lib/librte_eal/common/include/rte_devargs.h
@@ -86,10 +86,10 @@ struct rte_devargs {
struct {
/** Driver name. */
char drv_name[32];
- } virtual;
+ } virt;
};
-#define RTE_DEVARGS_LEN 256
- char args[RTE_DEVARGS_LEN]; /**< Arguments string as given by user. */
+ /** Arguments string as given by user or "" for no argument. */
+ char *args;
};
/** user device double-linked queue type definition */
@@ -99,6 +99,34 @@ TAILQ_HEAD(rte_devargs_list, rte_devargs);
extern struct rte_devargs_list devargs_list;
/**
+ * Parse a devargs string.
+ *
+ * For PCI devices, the format of arguments string is "PCI_ADDR" or
+ * "PCI_ADDR,key=val,key2=val2,...". Examples: "08:00.1", "0000:5:00.0",
+ * "04:00.0,arg=val".
+ *
+ * For virtual devices, the format of arguments string is "DRIVER_NAME*"
+ * or "DRIVER_NAME*,key=val,key2=val2,...". Examples: "eth_ring",
+ * "eth_ring0", "eth_pmdAnything,arg=0:arg2=1".
+ *
+ * The function parses the arguments string to get driver name and driver
+ * arguments.
+ *
+ * @param devargs_str
+ * The arguments as given by the user.
+ * @param drvname
+ * The pointer to the string to store parsed driver name.
+ * @param drvargs
+ * The pointer to the string to store parsed driver arguments.
+ *
+ * @return
+ * - 0 on success
+ * - A negative value on error
+ */
+int rte_eal_parse_devargs_str(const char *devargs_str,
+ char **drvname, char **drvargs);
+
+/**
* Add a device to the user device list
*
* For PCI devices, the format of arguments string is "PCI_ADDR" or
@@ -113,7 +141,7 @@ extern struct rte_devargs_list devargs_list;
*
* @param devtype
* The type of the device.
- * @param devargs_list
+ * @param devargs_str
* The arguments as given by the user.
*
* @return
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_eal.h b/src/dpdk22/lib/librte_eal/common/include/rte_eal.h
index f4ecd2e0..d2816a84 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/rte_eal.h
+++ b/src/dpdk22/lib/librte_eal/common/include/rte_eal.h
@@ -41,6 +41,9 @@
*/
#include <stdint.h>
+#include <sched.h>
+
+#include <rte_per_lcore.h>
#ifdef __cplusplus
extern "C" {
@@ -48,6 +51,9 @@ extern "C" {
#define RTE_MAGIC 19820526 /**< Magic number written by the main partition when ready. */
+/* Maximum thread_name length. */
+#define RTE_MAX_THREAD_NAME_LEN 16
+
/**
* The lcore role (used in RTE or not).
*/
@@ -168,7 +174,7 @@ typedef void (*rte_usage_hook_t)(const char * prgname);
* This routine is optional for the application and will behave as if the set
* routine was never called as the default behavior.
*
- * @param func
+ * @param usage_func
* The func argument is a function pointer to the application usage routine.
* Called function is defined using rte_usage_hook_t typedef, which is of
* the form void rte_usage_func(const char * prgname).
@@ -180,7 +186,7 @@ typedef void (*rte_usage_hook_t)(const char * prgname);
* the caller to daisy chain the usage routines if needing more then one.
*/
rte_usage_hook_t
-rte_set_application_usage_hook( rte_usage_hook_t usage_func );
+rte_set_application_usage_hook(rte_usage_hook_t usage_func);
/**
* macro to get the lock of tailq in mem_config
@@ -192,64 +198,6 @@ rte_set_application_usage_hook( rte_usage_hook_t usage_func );
*/
#define RTE_EAL_MEMPOOL_RWLOCK (&rte_eal_get_configuration()->mem_config->mplock)
-
-/**
- * Utility macro to do a thread-safe tailq 'INSERT' of rte_mem_config
- *
- * @param idx
- * a kind of tailq define in enum rte_tailq_t
- *
- * @param type
- * type of list(tailq head)
- *
- * @param elm
- * The element will be added into the list
- *
- */
-#define RTE_EAL_TAILQ_INSERT_TAIL(idx, type, elm) do { \
- struct type *list; \
- list = RTE_TAILQ_LOOKUP_BY_IDX(idx, type); \
- rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); \
- TAILQ_INSERT_TAIL(list, elm, next); \
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); \
-} while (0)
-
-/**
- * Utility macro to do a thread-safe tailq 'REMOVE' of rte_mem_config
- *
- * @param idx
- * a kind of tailq define in enum rte_tailq_t
- *
- * @param type
- * type of list(tailq head)
- *
- * @param elm
- * The element will be remove from the list
- *
- */
-#define RTE_EAL_TAILQ_REMOVE(idx, type, elm) do { \
- struct type *list; \
- list = RTE_TAILQ_LOOKUP_BY_IDX(idx, type); \
- rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); \
- TAILQ_REMOVE(list, elm, next); \
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); \
-} while (0) \
-
-
-/**
- * macro to check TAILQ exist
- *
- * @param idx
- * a kind of tailq define in enum rte_tailq_t
- *
- */
-#define RTE_EAL_TAILQ_EXIST_CHECK(idx) do { \
- if (RTE_TAILQ_LOOKUP_BY_IDX(idx, rte_tailq_head) == NULL){ \
- rte_errno = E_RTE_NO_TAILQ; \
- return NULL; \
- } \
-} while(0)
-
/**
* Whether EAL is using huge pages (disabled by --no-huge option).
* The no-huge mode cannot be used with UIO poll-mode drivers like igb/ixgbe.
@@ -262,6 +210,30 @@ rte_set_application_usage_hook( rte_usage_hook_t usage_func );
*/
int rte_eal_has_hugepages(void);
+/**
+ * A wrap API for syscall gettid.
+ *
+ * @return
+ * On success, returns the thread ID of calling process.
+ * It is always successful.
+ */
+int rte_sys_gettid(void);
+
+/**
+ * Get system unique thread id.
+ *
+ * @return
+ * On success, returns the thread ID of calling process.
+ * It is always successful.
+ */
+static inline int rte_gettid(void)
+{
+ static RTE_DEFINE_PER_LCORE(int, _thread_id) = -1;
+ if (RTE_PER_LCORE(_thread_id) == -1)
+ RTE_PER_LCORE(_thread_id) = rte_sys_gettid();
+ return RTE_PER_LCORE(_thread_id);
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_eal_memconfig.h b/src/dpdk22/lib/librte_eal/common/include/rte_eal_memconfig.h
index d6359e51..2b5e0b17 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/rte_eal_memconfig.h
+++ b/src/dpdk22/lib/librte_eal/common/include/rte_eal_memconfig.h
@@ -45,15 +45,6 @@ extern "C" {
#endif
/**
- * Index type of tailq_head
- */
-enum rte_tailq_t {
-#define rte_tailq_elem(idx, name) idx,
-#define rte_tailq_end(idx) idx
-#include <rte_tailq_elem.h>
-};
-
-/**
* the structure for the memory configuration for the RTE.
* Used by the rte_config structure. It is separated out, as for multi-process
* support, the memory details should be shared across instances
@@ -76,15 +67,12 @@ struct rte_mem_config {
rte_rwlock_t qlock; /**< used for tailq operation for thread safe. */
rte_rwlock_t mplock; /**< only used by mempool LIB for thread-safe. */
- uint32_t memzone_idx; /**< Index of memzone */
+ uint32_t memzone_cnt; /**< Number of allocated memzones */
/* memory segments and zones */
struct rte_memseg memseg[RTE_MAX_MEMSEG]; /**< Physmem descriptors. */
struct rte_memzone memzone[RTE_MAX_MEMZONE]; /**< Memzone descriptors. */
- /* Runtime Physmem descriptors. */
- struct rte_memseg free_memseg[RTE_MAX_MEMSEG];
-
struct rte_tailq_head tailq_head[RTE_MAX_TAILQ]; /**< Tailqs for objects */
/* Heaps of Malloc per socket */
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_errno.h b/src/dpdk22/lib/librte_eal/common/include/rte_errno.h
index 45910cdc..2e5cc454 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/rte_errno.h
+++ b/src/dpdk22/lib/librte_eal/common/include/rte_errno.h
@@ -84,7 +84,6 @@ enum {
E_RTE_SECONDARY, /**< Operation not allowed in secondary processes */
E_RTE_NO_CONFIG, /**< Missing rte_config */
- E_RTE_NO_TAILQ, /**< Uninitialised TAILQ */
RTE_MAX_ERRNO /**< Max RTE error number */
};
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_hexdump.h b/src/dpdk22/lib/librte_eal/common/include/rte_hexdump.h
index 891c77bf..5c18a50b 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/rte_hexdump.h
+++ b/src/dpdk22/lib/librte_eal/common/include/rte_hexdump.h
@@ -49,15 +49,15 @@ extern "C" {
* Dump out memory in a special hex dump format.
*
* @param f
-* A pointer to a file for output
+* A pointer to a file for output
* @param title
-* If not NULL this string is printed as a header to the output.
+* If not NULL this string is printed as a header to the output.
* @param buf
-* This is the buffer address to print out.
+* This is the buffer address to print out.
* @param len
-* The number of bytes to dump out
+* The number of bytes to dump out
* @return
-* None.
+* None.
*/
extern void
@@ -67,15 +67,15 @@ rte_hexdump(FILE *f, const char * title, const void * buf, unsigned int len);
* Dump out memory in a hex format with colons between bytes.
*
* @param f
-* A pointer to a file for output
+* A pointer to a file for output
* @param title
-* If not NULL this string is printed as a header to the output.
+* If not NULL this string is printed as a header to the output.
* @param buf
-* This is the buffer address to print out.
+* This is the buffer address to print out.
* @param len
-* The number of bytes to dump out
+* The number of bytes to dump out
* @return
-* None.
+* None.
*/
void
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_interrupts.h b/src/dpdk22/lib/librte_eal/common/include/rte_interrupts.h
index 609c34bc..ff11ef3a 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/rte_interrupts.h
+++ b/src/dpdk22/lib/librte_eal/common/include/rte_interrupts.h
@@ -118,4 +118,3 @@ int rte_intr_disable(struct rte_intr_handle *intr_handle);
#endif
#endif
-
diff --git a/src/dpdk22/lib/librte_eal/common/include/rte_keepalive.h b/src/dpdk22/lib/librte_eal/common/include/rte_keepalive.h
new file mode 100644
index 00000000..02472c02
--- /dev/null
+++ b/src/dpdk22/lib/librte_eal/common/include/rte_keepalive.h
@@ -0,0 +1,146 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2015 Intel Shannon Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @file rte_keepalive.h
+ * DPDK RTE LCore Keepalive Monitor.
+ *
+ **/
+
+#ifndef _KEEPALIVE_H_
+#define _KEEPALIVE_H_
+
+#include <rte_memory.h>
+
+#ifndef RTE_KEEPALIVE_MAXCORES
+/**
+ * Number of cores to track.
+ * @note Must be larger than the highest core id. */
+#define RTE_KEEPALIVE_MAXCORES RTE_MAX_LCORE
+#endif
+
+
+/**
+ * Keepalive failure callback.
+ *
+ * Receives a data pointer passed to rte_keepalive_create() and the id of the
+ * failed core.
+ */
+typedef void (*rte_keepalive_failure_callback_t)(
+ void *data,
+ const int id_core);
+
+
+/**
+ * Keepalive state structure.
+ * @internal
+ */
+struct rte_keepalive {
+ /** Core Liveness. */
+ enum {
+ ALIVE = 1,
+ MISSING = 0,
+ DEAD = 2,
+ GONE = 3
+ } __rte_cache_aligned state_flags[RTE_KEEPALIVE_MAXCORES];
+
+ /** Last-seen-alive timestamps */
+ uint64_t last_alive[RTE_KEEPALIVE_MAXCORES];
+
+ /**
+ * Cores to check.
+ * Indexed by core id, non-zero if the core should be checked.
+ */
+ uint8_t active_cores[RTE_KEEPALIVE_MAXCORES];
+
+ /** Dead core handler. */
+ rte_keepalive_failure_callback_t callback;
+
+ /**
+ * Dead core handler app data.
+ * Pointer is passed to dead core handler.
+ */
+ void *callback_data;
+ uint64_t tsc_initial;
+ uint64_t tsc_mhz;
+};
+
+
+/**
+ * Initialise keepalive sub-system.
+ * @param callback
+ * Function called upon detection of a dead core.
+ * @param data
+ * Data pointer to be passed to function callback.
+ * @return
+ * Keepalive structure success, NULL on failure.
+ */
+struct rte_keepalive *rte_keepalive_create(
+ rte_keepalive_failure_callback_t callback,
+ void *data);
+
+
+/**
+ * Checks & handles keepalive state of monitored cores.
+ * @param *ptr_timer Triggering timer (unused)
+ * @param *ptr_data Data pointer (keepalive structure)
+ */
+void rte_keepalive_dispatch_pings(void *ptr_timer, void *ptr_data);
+
+
+/**
+ * Registers a core for keepalive checks.
+ * @param *keepcfg
+ * Keepalive structure pointer
+ * @param id_core
+ * ID number of core to register.
+ */
+void rte_keepalive_register_core(struct rte_keepalive *keepcfg,
+ const int id_core);
+
+
+/**
+ * Per-core keepalive check.
+ * @param *keepcfg
+ * Keepalive structure pointer
+ *
+ * This function needs to be called from within the main process loop of
+ * the LCore to be checked.
+ */
+static inline void
+rte_keepalive_mark_alive(struct rte_keepalive *keepcfg)
+{
+ keepcfg->state_flags[rte_lcore_id()] = ALIVE;
+}
+
+
+#endif /* _KEEPALIVE_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_launch.h b/src/dpdk22/lib/librte_eal/common/include/rte_launch.h
index dd1946da..dd1946da 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/rte_launch.h
+++ b/src/dpdk22/lib/librte_eal/common/include/rte_launch.h
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_lcore.h b/src/dpdk22/lib/librte_eal/common/include/rte_lcore.h
index 49b2c034..25460b92 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/rte_lcore.h
+++ b/src/dpdk22/lib/librte_eal/common/include/rte_lcore.h
@@ -48,7 +48,14 @@
extern "C" {
#endif
-#define LCORE_ID_ANY -1 /**< Any lcore. */
+#define LCORE_ID_ANY UINT32_MAX /**< Any lcore. */
+
+#if defined(__linux__)
+ typedef cpu_set_t rte_cpuset_t;
+#elif defined(__FreeBSD__)
+#include <pthread_np.h>
+ typedef cpuset_t rte_cpuset_t;
+#endif
/**
* Structure storing internal configuration (per-lcore)
@@ -65,6 +72,7 @@ struct lcore_config {
unsigned socket_id; /**< physical socket id for this lcore */
unsigned core_id; /**< core number on socket for this lcore */
int core_index; /**< relative index, starting from 0 */
+ rte_cpuset_t cpuset; /**< cpu set which the lcore affinity to */
};
/**
@@ -72,12 +80,13 @@ struct lcore_config {
*/
extern struct lcore_config lcore_config[RTE_MAX_LCORE];
-RTE_DECLARE_PER_LCORE(unsigned, _lcore_id); /**< Per core "core id". */
+RTE_DECLARE_PER_LCORE(unsigned, _lcore_id); /**< Per thread "lcore id". */
+RTE_DECLARE_PER_LCORE(rte_cpuset_t, _cpuset); /**< Per thread "cpuset". */
/**
* Return the ID of the execution unit we are running on.
* @return
- * Logical core ID
+ * Logical core ID (in EAL thread) or LCORE_ID_ANY (in non-EAL thread)
*/
static inline unsigned
rte_lcore_id(void)
@@ -135,11 +144,7 @@ rte_lcore_index(int lcore_id)
* @return
* the ID of current lcoreid's physical socket
*/
-static inline unsigned
-rte_socket_id(void)
-{
- return lcore_config[rte_lcore_id()].socket_id;
-}
+unsigned rte_socket_id(void);
/**
* Get the ID of the physical socket of the specified lcore
@@ -221,6 +226,48 @@ rte_get_next_lcore(unsigned i, int skip_master, int wrap)
i<RTE_MAX_LCORE; \
i = rte_get_next_lcore(i, 1, 0))
+/**
+ * Set core affinity of the current thread.
+ * Support both EAL and non-EAL thread and update TLS.
+ *
+ * @param cpusetp
+ * Point to cpu_set_t for setting current thread affinity.
+ * @return
+ * On success, return 0; otherwise return -1;
+ */
+int rte_thread_set_affinity(rte_cpuset_t *cpusetp);
+
+/**
+ * Get core affinity of the current thread.
+ *
+ * @param cpusetp
+ * Point to cpu_set_t for getting current thread cpu affinity.
+ * It presumes input is not NULL, otherwise it causes panic.
+ *
+ */
+void rte_thread_get_affinity(rte_cpuset_t *cpusetp);
+
+/**
+ * Set thread names.
+ *
+ * Macro to wrap `pthread_setname_np()` with a glibc version check.
+ * Only glibc >= 2.12 supports this feature.
+ *
+ * This macro only used for Linux, BSD does direct libc call.
+ * BSD libc version of function is `pthread_set_name_np()`.
+ */
+#if defined(__DOXYGEN__)
+#define rte_thread_setname(...) pthread_setname_np(__VA_ARGS__)
+#endif
+
+#if defined(__GLIBC__) && defined(__GLIBC_PREREQ)
+#if __GLIBC_PREREQ(2, 12)
+#define rte_thread_setname(...) pthread_setname_np(__VA_ARGS__)
+#else
+#define rte_thread_setname(...) 0
+#endif
+#endif
+
#ifdef __cplusplus
}
#endif
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_log.h b/src/dpdk22/lib/librte_eal/common/include/rte_log.h
index db1ea08c..2e47e7f6 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/rte_log.h
+++ b/src/dpdk22/lib/librte_eal/common/include/rte_log.h
@@ -77,6 +77,8 @@ extern struct rte_logs rte_logs;
#define RTE_LOGTYPE_PORT 0x00002000 /**< Log related to port. */
#define RTE_LOGTYPE_TABLE 0x00004000 /**< Log related to table. */
#define RTE_LOGTYPE_PIPELINE 0x00008000 /**< Log related to pipeline. */
+#define RTE_LOGTYPE_MBUF 0x00010000 /**< Log related to mbuf. */
+#define RTE_LOGTYPE_CRYPTODEV 0x00020000 /**< Log related to cryptodev. */
/* these log types can be used in an application */
#define RTE_LOGTYPE_USER1 0x01000000 /**< User-defined log type 1. */
@@ -144,6 +146,11 @@ uint32_t rte_get_log_level(void);
void rte_set_log_type(uint32_t type, int enable);
/**
+ * Get the global log type.
+ */
+uint32_t rte_get_log_type(void);
+
+/**
* Get the current loglevel for the message being processed.
*
* Before calling the user-defined stream for logging, the log
@@ -284,19 +291,15 @@ int rte_vlog(uint32_t level, uint32_t logtype, const char *format, va_list ap)
* @param t
* The log type, for example, EAL. The short name is expanded by the
* macro, so it cannot be an integer value.
- * @param fmt
+ * @param ...
* The fmt string, as in printf(3), followed by the variable arguments
* required by the format.
- * @param args
- * The variable list of arguments according to the format string.
* @return
* - 0: Success.
* - Negative on error.
*/
#define RTE_LOG(l, t, ...) \
- (void)(((RTE_LOG_ ## l <= RTE_LOG_LEVEL) && \
- (RTE_LOG_ ## l <= rte_logs.level) && \
- (RTE_LOGTYPE_ ## t & rte_logs.type)) ? \
+ (void)((RTE_LOG_ ## l <= RTE_LOG_LEVEL) ? \
rte_log(RTE_LOG_ ## l, \
RTE_LOGTYPE_ ## t, # t ": " __VA_ARGS__) : \
0)
diff --git a/src/dpdk_lib18/librte_malloc/rte_malloc.h b/src/dpdk22/lib/librte_eal/common/include/rte_malloc.h
index 74bb78c7..74bb78c7 100755..100644
--- a/src/dpdk_lib18/librte_malloc/rte_malloc.h
+++ b/src/dpdk22/lib/librte_eal/common/include/rte_malloc.h
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_malloc_heap.h b/src/dpdk22/lib/librte_eal/common/include/rte_malloc_heap.h
index 716216f2..b2703562 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/rte_malloc_heap.h
+++ b/src/dpdk22/lib/librte_eal/common/include/rte_malloc_heap.h
@@ -40,7 +40,7 @@
#include <rte_memory.h>
/* Number of free lists per heap, grouped by size. */
-#define RTE_HEAP_NUM_FREELISTS 5
+#define RTE_HEAP_NUM_FREELISTS 13
/**
* Structure to hold malloc heap
@@ -48,7 +48,6 @@
struct malloc_heap {
rte_spinlock_t lock;
LIST_HEAD(, malloc_elem) free_head[RTE_HEAP_NUM_FREELISTS];
- unsigned mz_count;
unsigned alloc_count;
size_t total_size;
} __rte_cache_aligned;
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_memory.h b/src/dpdk22/lib/librte_eal/common/include/rte_memory.h
index 7f8103f4..9c9e40f2 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/rte_memory.h
+++ b/src/dpdk22/lib/librte_eal/common/include/rte_memory.h
@@ -52,13 +52,19 @@
extern "C" {
#endif
+#include <rte_common.h>
+
enum rte_page_sizes {
- RTE_PGSIZE_4K = 1ULL << 12,
- RTE_PGSIZE_2M = 1ULL << 21,
- RTE_PGSIZE_1G = 1ULL << 30,
- RTE_PGSIZE_64K = 1ULL << 16,
- RTE_PGSIZE_16M = 1ULL << 24,
- RTE_PGSIZE_16G = 1ULL << 34
+ RTE_PGSIZE_4K = 1ULL << 12,
+ RTE_PGSIZE_64K = 1ULL << 16,
+ RTE_PGSIZE_256K = 1ULL << 18,
+ RTE_PGSIZE_2M = 1ULL << 21,
+ RTE_PGSIZE_16M = 1ULL << 24,
+ RTE_PGSIZE_256M = 1ULL << 28,
+ RTE_PGSIZE_512M = 1ULL << 29,
+ RTE_PGSIZE_1G = 1ULL << 30,
+ RTE_PGSIZE_4G = 1ULL << 32,
+ RTE_PGSIZE_16G = 1ULL << 34,
};
#define SOCKET_ID_ANY -1 /**< Any NUMA socket. */
@@ -74,7 +80,7 @@ enum rte_page_sizes {
/**
* Force alignment to cache line.
*/
-#define __rte_cache_aligned __attribute__((__aligned__(RTE_CACHE_LINE_SIZE)))
+#define __rte_cache_aligned __rte_aligned(RTE_CACHE_LINE_SIZE)
typedef uint64_t phys_addr_t; /**< Physical address definition. */
#define RTE_BAD_PHYS_ADDR ((phys_addr_t)-1)
@@ -100,7 +106,7 @@ struct rte_memseg {
/**< store segment MFNs */
uint64_t mfn[DOM0_NUM_MEMBLOCK];
#endif
-} __attribute__((__packed__));
+} __rte_packed;
/**
* Lock page in physical memory and prevent from swapping.
@@ -176,6 +182,13 @@ unsigned rte_memory_get_nchannel(void);
unsigned rte_memory_get_nrank(void);
#ifdef RTE_LIBRTE_XEN_DOM0
+
+/**< Internal use only - should DOM0 memory mapping be used */
+extern int rte_xen_dom0_supported(void);
+
+/**< Internal use only - phys to virt mapping for xen */
+phys_addr_t rte_xen_mem_phy2mch(uint32_t, const phys_addr_t);
+
/**
* Return the physical address of elt, which is an element of the pool mp.
*
@@ -187,7 +200,14 @@ unsigned rte_memory_get_nrank(void);
* @return
* The physical address or error.
*/
-phys_addr_t rte_mem_phy2mch(uint32_t memseg_id, const phys_addr_t phy_addr);
+static inline phys_addr_t
+rte_mem_phy2mch(uint32_t memseg_id, const phys_addr_t phy_addr)
+{
+ if (rte_xen_dom0_supported())
+ return rte_xen_mem_phy2mch(memseg_id, phy_addr);
+ else
+ return phy_addr;
+}
/**
* Memory init for supporting application running on Xen domain0.
@@ -196,7 +216,7 @@ phys_addr_t rte_mem_phy2mch(uint32_t memseg_id, const phys_addr_t phy_addr);
*
* @return
* 0: successfully
- * negative: error
+ * negative: error
*/
int rte_xen_dom0_memory_init(void);
@@ -210,7 +230,19 @@ int rte_xen_dom0_memory_init(void);
* negative: error
*/
int rte_xen_dom0_memory_attach(void);
+#else
+static inline int rte_xen_dom0_supported(void)
+{
+ return 0;
+}
+
+static inline phys_addr_t
+rte_mem_phy2mch(uint32_t memseg_id __rte_unused, const phys_addr_t phy_addr)
+{
+ return phy_addr;
+}
#endif
+
#ifdef __cplusplus
}
#endif
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_memzone.h b/src/dpdk22/lib/librte_eal/common/include/rte_memzone.h
index 81b6ad40..f69b5a87 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/rte_memzone.h
+++ b/src/dpdk22/lib/librte_eal/common/include/rte_memzone.h
@@ -60,8 +60,12 @@ extern "C" {
#define RTE_MEMZONE_2MB 0x00000001 /**< Use 2MB pages. */
#define RTE_MEMZONE_1GB 0x00000002 /**< Use 1GB pages. */
-#define RTE_MEMZONE_16MB 0x00000100 /**< Use 16MB pages. */
-#define RTE_MEMZONE_16GB 0x00000200 /**< Use 16GB pages. */
+#define RTE_MEMZONE_16MB 0x00000100 /**< Use 16MB pages. */
+#define RTE_MEMZONE_16GB 0x00000200 /**< Use 16GB pages. */
+#define RTE_MEMZONE_256KB 0x00010000 /**< Use 256KB pages. */
+#define RTE_MEMZONE_256MB 0x00020000 /**< Use 256MB pages. */
+#define RTE_MEMZONE_512MB 0x00040000 /**< Use 512MB pages. */
+#define RTE_MEMZONE_4GB 0x00080000 /**< Use 4GB pages. */
#define RTE_MEMZONE_SIZE_HINT_ONLY 0x00000004 /**< Use available page size */
/**
@@ -88,7 +92,7 @@ struct rte_memzone {
int32_t socket_id; /**< NUMA socket ID. */
uint32_t flags; /**< Characteristics of this memzone. */
- uint32_t memseg_id; /** <store the memzone is from which memseg. */
+ uint32_t memseg_id; /**< Memseg it belongs. */
} __attribute__((__packed__));
/**
@@ -96,7 +100,7 @@ struct rte_memzone {
*
* This function reserves some memory and returns a pointer to a
* correctly filled memzone descriptor. If the allocation cannot be
- * done, return NULL. Note: A reserved zone cannot be freed.
+ * done, return NULL.
*
* @param name
* The name of the memzone. If it already exists, the function will
@@ -110,11 +114,15 @@ struct rte_memzone {
* constraint for the reserved zone.
* @param flags
* The flags parameter is used to request memzones to be
- * taken from 1GB or 2MB hugepages.
- * - RTE_MEMZONE_2MB - Reserve from 2MB pages
- * - RTE_MEMZONE_1GB - Reserve from 1GB pages
- * - RTE_MEMZONE_16MB - Reserve from 16MB pages
- * - RTE_MEMZONE_16GB - Reserve from 16GB pages
+ * taken from specifically sized hugepages.
+ * - RTE_MEMZONE_2MB - Reserved from 2MB pages
+ * - RTE_MEMZONE_1GB - Reserved from 1GB pages
+ * - RTE_MEMZONE_16MB - Reserved from 16MB pages
+ * - RTE_MEMZONE_16GB - Reserved from 16GB pages
+ * - RTE_MEMZONE_256KB - Reserved from 256KB pages
+ * - RTE_MEMZONE_256MB - Reserved from 256MB pages
+ * - RTE_MEMZONE_512MB - Reserved from 512MB pages
+ * - RTE_MEMZONE_4GB - Reserved from 4GB pages
* - RTE_MEMZONE_SIZE_HINT_ONLY - Allow alternative page size to be used if
* the requested page size is unavailable.
* If this flag is not set, the function
@@ -143,7 +151,6 @@ const struct rte_memzone *rte_memzone_reserve(const char *name,
* boundary, and returns a pointer to a correctly filled memzone
* descriptor. If the allocation cannot be done or if the alignment
* is not a power of 2, returns NULL.
- * Note: A reserved zone cannot be freed.
*
* @param name
* The name of the memzone. If it already exists, the function will
@@ -157,11 +164,15 @@ const struct rte_memzone *rte_memzone_reserve(const char *name,
* constraint for the reserved zone.
* @param flags
* The flags parameter is used to request memzones to be
- * taken from 1GB or 2MB hugepages.
- * - RTE_MEMZONE_2MB - Reserve from 2MB pages
- * - RTE_MEMZONE_1GB - Reserve from 1GB pages
- * - RTE_MEMZONE_16MB - Reserve from 16MB pages
- * - RTE_MEMZONE_16GB - Reserve from 16GB pages
+ * taken from specifically sized hugepages.
+ * - RTE_MEMZONE_2MB - Reserved from 2MB pages
+ * - RTE_MEMZONE_1GB - Reserved from 1GB pages
+ * - RTE_MEMZONE_16MB - Reserved from 16MB pages
+ * - RTE_MEMZONE_16GB - Reserved from 16GB pages
+ * - RTE_MEMZONE_256KB - Reserved from 256KB pages
+ * - RTE_MEMZONE_256MB - Reserved from 256MB pages
+ * - RTE_MEMZONE_512MB - Reserved from 512MB pages
+ * - RTE_MEMZONE_4GB - Reserved from 4GB pages
* - RTE_MEMZONE_SIZE_HINT_ONLY - Allow alternative page size to be used if
* the requested page size is unavailable.
* If this flag is not set, the function
@@ -195,7 +206,6 @@ const struct rte_memzone *rte_memzone_reserve_aligned(const char *name,
* Memory buffer is reserved in a way, that it wouldn't cross specified
* boundary. That implies that requested length should be less or equal
* then boundary.
- * Note: A reserved zone cannot be freed.
*
* @param name
* The name of the memzone. If it already exists, the function will
@@ -209,11 +219,15 @@ const struct rte_memzone *rte_memzone_reserve_aligned(const char *name,
* constraint for the reserved zone.
* @param flags
* The flags parameter is used to request memzones to be
- * taken from 1GB or 2MB hugepages.
- * - RTE_MEMZONE_2MB - Reserve from 2MB pages
- * - RTE_MEMZONE_1GB - Reserve from 1GB pages
- * - RTE_MEMZONE_16MB - Reserve from 16MB pages
- * - RTE_MEMZONE_16GB - Reserve from 16GB pages
+ * taken from specifically sized hugepages.
+ * - RTE_MEMZONE_2MB - Reserved from 2MB pages
+ * - RTE_MEMZONE_1GB - Reserved from 1GB pages
+ * - RTE_MEMZONE_16MB - Reserved from 16MB pages
+ * - RTE_MEMZONE_16GB - Reserved from 16GB pages
+ * - RTE_MEMZONE_256KB - Reserved from 256KB pages
+ * - RTE_MEMZONE_256MB - Reserved from 256MB pages
+ * - RTE_MEMZONE_512MB - Reserved from 512MB pages
+ * - RTE_MEMZONE_4GB - Reserved from 4GB pages
* - RTE_MEMZONE_SIZE_HINT_ONLY - Allow alternative page size to be used if
* the requested page size is unavailable.
* If this flag is not set, the function
@@ -240,6 +254,19 @@ const struct rte_memzone *rte_memzone_reserve_bounded(const char *name,
unsigned flags, unsigned align, unsigned bound);
/**
+ * Free a memzone.
+ *
+ * Note: an IVSHMEM zone cannot be freed.
+ *
+ * @param mz
+ * A pointer to the memzone
+ * @return
+ * -EINVAL - invalid parameter, IVSHMEM memzone.
+ * 0 - success
+ */
+int rte_memzone_free(const struct rte_memzone *mz);
+
+/**
* Lookup for a memzone.
*
* Get a pointer to a descriptor of an already reserved memory
@@ -258,7 +285,7 @@ const struct rte_memzone *rte_memzone_lookup(const char *name);
* @param f
* A pointer to a file for output
*/
-void rte_memzone_dump(FILE *);
+void rte_memzone_dump(FILE *f);
/**
* Walk list of all memzones
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_pci.h b/src/dpdk22/lib/librte_eal/common/include/rte_pci.h
index 66ed7933..334c12e5 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/rte_pci.h
+++ b/src/dpdk22/lib/librte_eal/common/include/rte_pci.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -74,6 +74,7 @@
extern "C" {
#endif
+#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <errno.h>
@@ -93,10 +94,10 @@ extern struct pci_device_list pci_device_list; /**< Global list of PCI devices.
#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
/** Formatting string for PCI device identifier: Ex: 0000:00:01.0 */
-#define PCI_PRI_FMT "%.4"PRIx16":%.2"PRIx8":%.2"PRIx8".%"PRIx8
+#define PCI_PRI_FMT "%.4" PRIx16 ":%.2" PRIx8 ":%.2" PRIx8 ".%" PRIx8
/** Short formatting string, without domain, for PCI device: Ex: 00:01.0 */
-#define PCI_SHORT_PRI_FMT "%.2"PRIx8":%.2"PRIx8".%"PRIx8
+#define PCI_SHORT_PRI_FMT "%.2" PRIx8 ":%.2" PRIx8 ".%" PRIx8
/** Nb. of values in PCI device identifier format string. */
#define PCI_FMT_NVAL 4
@@ -104,6 +105,9 @@ extern struct pci_device_list pci_device_list; /**< Global list of PCI devices.
/** Nb. of values in PCI resource format. */
#define PCI_RESOURCE_FMT_NVAL 3
+/** IO resource type: memory address space */
+#define IORESOURCE_MEM 0x00000200
+
/**
* A structure describing a PCI resource.
*/
@@ -114,7 +118,7 @@ struct rte_pci_resource {
};
/** Maximum number of PCI resources. */
-#define PCI_MAX_RESOURCE 7
+#define PCI_MAX_RESOURCE 6
/**
* A structure describing an ID for a PCI driver. Each driver provides a
@@ -139,6 +143,15 @@ struct rte_pci_addr {
struct rte_devargs;
+enum rte_kernel_driver {
+ RTE_KDRV_UNKNOWN = 0,
+ RTE_KDRV_IGB_UIO,
+ RTE_KDRV_VFIO,
+ RTE_KDRV_UIO_GENERIC,
+ RTE_KDRV_NIC_UIO,
+ RTE_KDRV_NONE,
+};
+
/**
* A structure describing a PCI device.
*/
@@ -148,10 +161,11 @@ struct rte_pci_device {
struct rte_pci_id id; /**< PCI ID. */
struct rte_pci_resource mem_resource[PCI_MAX_RESOURCE]; /**< PCI Memory Resource */
struct rte_intr_handle intr_handle; /**< Interrupt handle */
- const struct rte_pci_driver *driver; /**< Associated driver */
+ struct rte_pci_driver *driver; /**< Associated driver */
uint16_t max_vfs; /**< sriov enable if not zero */
int numa_node; /**< NUMA node connection */
struct rte_devargs *devargs; /**< Device user arguments */
+ enum rte_kernel_driver kdrv; /**< Kernel driver passthrough */
};
/** Any PCI device identifier (vendor, device, ...) */
@@ -181,13 +195,19 @@ struct rte_pci_driver;
typedef int (pci_devinit_t)(struct rte_pci_driver *, struct rte_pci_device *);
/**
+ * Uninitialisation function for the driver called during hotplugging.
+ */
+typedef int (pci_devuninit_t)(struct rte_pci_device *);
+
+/**
* A structure describing a PCI driver.
*/
struct rte_pci_driver {
TAILQ_ENTRY(rte_pci_driver) next; /**< Next in list. */
const char *name; /**< Driver name. */
pci_devinit_t *devinit; /**< Device init. function. */
- struct rte_pci_id *id_table; /**< ID table, NULL terminated. */
+ pci_devuninit_t *devuninit; /**< Device uninit function. */
+ const struct rte_pci_id *id_table; /**< ID table, NULL terminated. */
uint32_t drv_flags; /**< Flags contolling handling of device. */
};
@@ -199,6 +219,36 @@ struct rte_pci_driver {
#define RTE_PCI_DRV_FORCE_UNBIND 0x0004
/** Device driver supports link state interrupt */
#define RTE_PCI_DRV_INTR_LSC 0x0008
+/** Device driver supports detaching capability */
+#define RTE_PCI_DRV_DETACHABLE 0x0010
+
+/**
+ * A structure describing a PCI mapping.
+ */
+struct pci_map {
+ void *addr;
+ char *path;
+ uint64_t offset;
+ uint64_t size;
+ uint64_t phaddr;
+};
+
+/**
+ * A structure describing a mapped PCI resource.
+ * For multi-process we need to reproduce all PCI mappings in secondary
+ * processes, so save them in a tailq.
+ */
+struct mapped_pci_resource {
+ TAILQ_ENTRY(mapped_pci_resource) next;
+
+ struct rte_pci_addr pci_addr;
+ char path[PATH_MAX];
+ int nb_maps;
+ struct pci_map maps[PCI_MAX_RESOURCE];
+};
+
+/** mapped pci device list */
+TAILQ_HEAD(mapped_pci_res_list, mapped_pci_resource);
/**< Internal use only - Macro used by pci addr parsing functions **/
#define GET_PCIADDR_FIELD(in, fd, lim, dlm) \
@@ -208,7 +258,7 @@ do { \
errno = 0; \
val = strtoul((in), &end, 16); \
if (errno != 0 || end[0] != (dlm) || val > (lim)) \
- return (-EINVAL); \
+ return -EINVAL; \
(fd) = (typeof (fd))val; \
(in) = end + 1; \
} while(0)
@@ -219,10 +269,10 @@ do { \
* a domain prefix (i.e. domain returned is always 0)
*
* @param input
- * The input string to be parsed. Should have the format XX:XX.X
+ * The input string to be parsed. Should have the format XX:XX.X
* @param dev_addr
- * The PCI Bus-Device-Function address to be returned. Domain will always be
- * returned as 0
+ * The PCI Bus-Device-Function address to be returned. Domain will always be
+ * returned as 0
* @return
* 0 on success, negative on error.
*/
@@ -233,7 +283,7 @@ eal_parse_pci_BDF(const char *input, struct rte_pci_addr *dev_addr)
GET_PCIADDR_FIELD(input, dev_addr->bus, UINT8_MAX, ':');
GET_PCIADDR_FIELD(input, dev_addr->devid, UINT8_MAX, '.');
GET_PCIADDR_FIELD(input, dev_addr->function, UINT8_MAX, 0);
- return (0);
+ return 0;
}
/**
@@ -242,9 +292,9 @@ eal_parse_pci_BDF(const char *input, struct rte_pci_addr *dev_addr)
* a domain prefix.
*
* @param input
- * The input string to be parsed. Should have the format XXXX:XX:XX.X
+ * The input string to be parsed. Should have the format XXXX:XX:XX.X
* @param dev_addr
- * The PCI Bus-Device-Function address to be returned
+ * The PCI Bus-Device-Function address to be returned
* @return
* 0 on success, negative on error.
*/
@@ -255,10 +305,54 @@ eal_parse_pci_DomBDF(const char *input, struct rte_pci_addr *dev_addr)
GET_PCIADDR_FIELD(input, dev_addr->bus, UINT8_MAX, ':');
GET_PCIADDR_FIELD(input, dev_addr->devid, UINT8_MAX, '.');
GET_PCIADDR_FIELD(input, dev_addr->function, UINT8_MAX, 0);
- return (0);
+ return 0;
}
#undef GET_PCIADDR_FIELD
+/* Compare two PCI device addresses. */
+/**
+ * Utility function to compare two PCI device addresses.
+ *
+ * @param addr
+ * The PCI Bus-Device-Function address to compare
+ * @param addr2
+ * The PCI Bus-Device-Function address to compare
+ * @return
+ * 0 on equal PCI address.
+ * Positive on addr is greater than addr2.
+ * Negative on addr is less than addr2, or error.
+ */
+static inline int
+rte_eal_compare_pci_addr(const struct rte_pci_addr *addr,
+ const struct rte_pci_addr *addr2)
+{
+ uint64_t dev_addr, dev_addr2;
+
+ if ((addr == NULL) || (addr2 == NULL))
+ return -1;
+
+ dev_addr = (addr->domain << 24) | (addr->bus << 16) |
+ (addr->devid << 8) | addr->function;
+ dev_addr2 = (addr2->domain << 24) | (addr2->bus << 16) |
+ (addr2->devid << 8) | addr2->function;
+
+ if (dev_addr > dev_addr2)
+ return 1;
+ else if (dev_addr < dev_addr2)
+ return -1;
+ else
+ return 0;
+}
+
+/**
+ * Scan the content of the PCI bus, and the devices in the devices
+ * list
+ *
+ * @return
+ * 0 on success, negative on error
+ */
+int rte_eal_pci_scan(void);
+
/**
* Probe the PCI bus for registered drivers.
*
@@ -273,6 +367,68 @@ eal_parse_pci_DomBDF(const char *input, struct rte_pci_addr *dev_addr)
int rte_eal_pci_probe(void);
/**
+ * @internal
+ * Map a particular resource from a file.
+ *
+ * @param requested_addr
+ * The starting address for the new mapping range.
+ * @param fd
+ * The file descriptor.
+ * @param offset
+ * The offset for the mapping range.
+ * @param size
+ * The size for the mapping range.
+ * @param additional_flags
+ * The additional flags for the mapping range.
+ * @return
+ * - On success, the function returns a pointer to the mapped area.
+ * - On error, the value MAP_FAILED is returned.
+ */
+void *pci_map_resource(void *requested_addr, int fd, off_t offset,
+ size_t size, int additional_flags);
+
+/**
+ * @internal
+ * Unmap a particular resource.
+ *
+ * @param requested_addr
+ * The address for the unmapping range.
+ * @param size
+ * The size for the unmapping range.
+ */
+void pci_unmap_resource(void *requested_addr, size_t size);
+
+/**
+ * Probe the single PCI device.
+ *
+ * Scan the content of the PCI bus, and find the pci device specified by pci
+ * address, then call the probe() function for registered driver that has a
+ * matching entry in its id_table for discovered device.
+ *
+ * @param addr
+ * The PCI Bus-Device-Function address to probe.
+ * @return
+ * - 0 on success.
+ * - Negative on error.
+ */
+int rte_eal_pci_probe_one(const struct rte_pci_addr *addr);
+
+/**
+ * Close the single PCI device.
+ *
+ * Scan the content of the PCI bus, and find the pci device specified by pci
+ * address, then call the devuninit() function for registered driver that has a
+ * matching entry in its id_table for discovered device.
+ *
+ * @param addr
+ * The PCI Bus-Device-Function address to close.
+ * @return
+ * - 0 on success.
+ * - Negative on error.
+ */
+int rte_eal_pci_detach(const struct rte_pci_addr *addr);
+
+/**
* Dump the content of the PCI bus.
*
* @param f
@@ -298,6 +454,49 @@ void rte_eal_pci_register(struct rte_pci_driver *driver);
*/
void rte_eal_pci_unregister(struct rte_pci_driver *driver);
+/**
+ * Read PCI config space.
+ *
+ * @param device
+ * A pointer to a rte_pci_device structure describing the device
+ * to use
+ * @param buf
+ * A data buffer where the bytes should be read into
+ * @param len
+ * The length of the data buffer.
+ * @param offset
+ * The offset into PCI config space
+ */
+int rte_eal_pci_read_config(const struct rte_pci_device *device,
+ void *buf, size_t len, off_t offset);
+
+/**
+ * Write PCI config space.
+ *
+ * @param device
+ * A pointer to a rte_pci_device structure describing the device
+ * to use
+ * @param buf
+ * A data buffer containing the bytes should be written
+ * @param len
+ * The length of the data buffer.
+ * @param offset
+ * The offset into PCI config space
+ */
+int rte_eal_pci_write_config(const struct rte_pci_device *device,
+ const void *buf, size_t len, off_t offset);
+
+#ifdef RTE_PCI_CONFIG
+/**
+ * Set special config space registers for performance purpose.
+ *
+ * @param dev
+ * A pointer to a rte_pci_device structure describing the device
+ * to use
+ */
+void pci_config_space_set(struct rte_pci_device *dev);
+#endif /* RTE_PCI_CONFIG */
+
#ifdef __cplusplus
}
#endif
diff --git a/src/dpdk_lib18/librte_vhost/eventfd_link/eventfd_link.h b/src/dpdk22/lib/librte_eal/common/include/rte_pci_dev_feature_defs.h
index ea619ec0..08222510 100755..100644
--- a/src/dpdk_lib18/librte_vhost/eventfd_link/eventfd_link.h
+++ b/src/dpdk22/lib/librte_eal/common/include/rte_pci_dev_feature_defs.h
@@ -1,8 +1,8 @@
/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
*
- * GPL LICENSE SUMMARY
+ * GPL LICENSE SUMMARY
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
*
@@ -24,7 +24,7 @@
* Contact Information:
* Intel Corporation
*
- * BSD LICENSE
+ * BSD LICENSE
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
@@ -33,15 +33,15 @@
* modification, are permitted provided that the following conditions
* are met:
*
- * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@@ -54,23 +54,17 @@
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
-#ifndef _EVENTFD_LINK_H_
-#define _EVENTFD_LINK_H_
-
-/*
- * ioctl to copy an fd entry in calling process to an fd in a target process
- */
-#define EVENTFD_COPY 1
+#ifndef _RTE_PCI_DEV_DEFS_H_
+#define _RTE_PCI_DEV_DEFS_H_
-/*
- * arguements for the EVENTFD_COPY ioctl
- */
-struct eventfd_copy {
- unsigned target_fd; /* fd in the target pid */
- unsigned source_fd; /* fd in the calling pid */
- pid_t target_pid; /* pid of the target pid */
+/* interrupt mode */
+enum rte_intr_mode {
+ RTE_INTR_MODE_NONE = 0,
+ RTE_INTR_MODE_LEGACY,
+ RTE_INTR_MODE_MSI,
+ RTE_INTR_MODE_MSIX
};
-#endif /* _EVENTFD_LINK_H_ */
+
+#endif /* _RTE_PCI_DEV_DEFS_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_pci_dev_features.h b/src/dpdk22/lib/librte_eal/common/include/rte_pci_dev_features.h
index 01200de9..67b986a6 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/rte_pci_dev_features.h
+++ b/src/dpdk22/lib/librte_eal/common/include/rte_pci_dev_features.h
@@ -1,4 +1,29 @@
/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation
+ *
* BSD LICENSE
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_pci_dev_ids.h b/src/dpdk22/lib/librte_eal/common/include/rte_pci_dev_ids.h
index c922de92..e31b9345 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/rte_pci_dev_ids.h
+++ b/src/dpdk22/lib/librte_eal/common/include/rte_pci_dev_ids.h
@@ -4,7 +4,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -132,6 +132,26 @@
#define RTE_PCI_DEV_ID_DECL_VMXNET3(vend, dev)
#endif
+#ifndef RTE_PCI_DEV_ID_DECL_FM10K
+#define RTE_PCI_DEV_ID_DECL_FM10K(vend, dev)
+#endif
+
+#ifndef RTE_PCI_DEV_ID_DECL_FM10KVF
+#define RTE_PCI_DEV_ID_DECL_FM10KVF(vend, dev)
+#endif
+
+#ifndef RTE_PCI_DEV_ID_DECL_ENIC
+#define RTE_PCI_DEV_ID_DECL_ENIC(vend, dev)
+#endif
+
+#ifndef RTE_PCI_DEV_ID_DECL_BNX2X
+#define RTE_PCI_DEV_ID_DECL_BNX2X(vend, dev)
+#endif
+
+#ifndef RTE_PCI_DEV_ID_DECL_BNX2XVF
+#define RTE_PCI_DEV_ID_DECL_BNX2XVF(vend, dev)
+#endif
+
#ifndef PCI_VENDOR_ID_INTEL
/** Vendor ID used by Intel devices */
#define PCI_VENDOR_ID_INTEL 0x8086
@@ -147,6 +167,16 @@
#define PCI_VENDOR_ID_VMWARE 0x15AD
#endif
+#ifndef PCI_VENDOR_ID_CISCO
+/** Vendor ID used by Cisco VIC devices */
+#define PCI_VENDOR_ID_CISCO 0x1137
+#endif
+
+#ifndef PCI_VENDOR_ID_BROADCOM
+/** Vendor ID used by Broadcom devices */
+#define PCI_VENDOR_ID_BROADCOM 0x14E4
+#endif
+
/******************** Physical EM devices from e1000_hw.h ********************/
#define E1000_DEV_ID_82542 0x1000
@@ -242,6 +272,11 @@
#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B
#define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A
#define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559
+#define E1000_DEV_ID_PCH_I218_LM2 0x15A0
+#define E1000_DEV_ID_PCH_I218_V2 0x15A1
+#define E1000_DEV_ID_PCH_I218_LM3 0x15A2
+#define E1000_DEV_ID_PCH_I218_V3 0x15A3
+
/*
* Tested (supported) on VM emulated HW.
@@ -274,6 +309,7 @@ RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82572EI)
RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82573L)
RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82574L)
RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82574LA)
+RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82583V)
/******************** Physical IGB devices from e1000_hw.h ********************/
@@ -393,8 +429,9 @@ RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_DH89XXCC_SFP)
#define IXGBE_DEV_ID_82599_LS 0x154F
#define IXGBE_DEV_ID_X540T 0x1528
#define IXGBE_DEV_ID_X540T1 0x1560
-#define IXGBE_DEV_ID_X550EM_X 0x15A7
#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC
+#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD
+#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE
#define IXGBE_DEV_ID_X550T 0x1563
#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA
#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB
@@ -442,8 +479,9 @@ RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_T3_LOM)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_LS)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X540T)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X540T1)
-RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_X)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_X_SFP)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_X_10G_T)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_X_1G_T)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550T)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_X_KX4)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_X_KR)
@@ -463,6 +501,13 @@ RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_BYPASS)
#define I40E_DEV_ID_QSFP_B 0x1584
#define I40E_DEV_ID_QSFP_C 0x1585
#define I40E_DEV_ID_10G_BASE_T 0x1586
+#define I40E_DEV_ID_20G_KR2 0x1587
+#define I40E_DEV_ID_20G_KR2_A 0x1588
+#define I40E_DEV_ID_10G_BASE_T4 0x1589
+#define I40E_DEV_ID_X722_A0 0x374C
+#define I40E_DEV_ID_SFP_X722 0x37D0
+#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
+#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_SFP_XL710)
RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_QEMU)
@@ -473,6 +518,21 @@ RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_QSFP_A)
RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_QSFP_B)
RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_QSFP_C)
RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_10G_BASE_T)
+RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_20G_KR2)
+RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_20G_KR2_A)
+RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_10G_BASE_T4)
+RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_X722_A0)
+RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_SFP_X722)
+RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_1G_BASE_T_X722)
+RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_10G_BASE_T_X722)
+
+/*************** Physical FM10K devices from fm10k_type.h ***************/
+
+#define FM10K_DEV_ID_PF 0x15A4
+#define FM10K_DEV_ID_SDI_FM10420_QDA2 0x15D0
+
+RTE_PCI_DEV_ID_DECL_FM10K(PCI_VENDOR_ID_INTEL, FM10K_DEV_ID_PF)
+RTE_PCI_DEV_ID_DECL_FM10K(PCI_VENDOR_ID_INTEL, FM10K_DEV_ID_SDI_FM10420_QDA2)
/****************** Virtual IGB devices from e1000_hw.h ******************/
@@ -510,9 +570,13 @@ RTE_PCI_DEV_ID_DECL_IXGBEVF(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV)
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
+#define I40E_DEV_ID_X722_VF 0x37CD
+#define I40E_DEV_ID_X722_VF_HV 0x37D9
RTE_PCI_DEV_ID_DECL_I40EVF(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_VF)
RTE_PCI_DEV_ID_DECL_I40EVF(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_VF_HV)
+RTE_PCI_DEV_ID_DECL_I40EVF(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_X722_VF)
+RTE_PCI_DEV_ID_DECL_I40EVF(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_X722_VF_HV)
/****************** Virtio devices from virtio.h ******************/
@@ -526,9 +590,70 @@ RTE_PCI_DEV_ID_DECL_VIRTIO(PCI_VENDOR_ID_QUMRANET, QUMRANET_DEV_ID_VIRTIO)
RTE_PCI_DEV_ID_DECL_VMXNET3(PCI_VENDOR_ID_VMWARE, VMWARE_DEV_ID_VMXNET3)
+/*************** Virtual FM10K devices from fm10k_type.h ***************/
+
+#define FM10K_DEV_ID_VF 0x15A5
+
+RTE_PCI_DEV_ID_DECL_FM10KVF(PCI_VENDOR_ID_INTEL, FM10K_DEV_ID_VF)
+
+/****************** Cisco VIC devices ******************/
+
+#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
+#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
+
+RTE_PCI_DEV_ID_DECL_ENIC(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET)
+RTE_PCI_DEV_ID_DECL_ENIC(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF)
+
+/****************** QLogic devices ******************/
+
+/* Broadcom/QLogic BNX2X */
+#define BNX2X_DEV_ID_57710 0x164e
+#define BNX2X_DEV_ID_57711 0x164f
+#define BNX2X_DEV_ID_57711E 0x1650
+#define BNX2X_DEV_ID_57712 0x1662
+#define BNX2X_DEV_ID_57712_MF 0x1663
+#define BNX2X_DEV_ID_57712_VF 0x166f
+#define BNX2X_DEV_ID_57713 0x1651
+#define BNX2X_DEV_ID_57713E 0x1652
+#define BNX2X_DEV_ID_57800 0x168a
+#define BNX2X_DEV_ID_57800_MF 0x16a5
+#define BNX2X_DEV_ID_57800_VF 0x16a9
+#define BNX2X_DEV_ID_57810 0x168e
+#define BNX2X_DEV_ID_57810_MF 0x16ae
+#define BNX2X_DEV_ID_57810_VF 0x16af
+#define BNX2X_DEV_ID_57811 0x163d
+#define BNX2X_DEV_ID_57811_MF 0x163e
+#define BNX2X_DEV_ID_57811_VF 0x163f
+
+#define BNX2X_DEV_ID_57840_OBS 0x168d
+#define BNX2X_DEV_ID_57840_OBS_MF 0x16ab
+#define BNX2X_DEV_ID_57840_4_10 0x16a1
+#define BNX2X_DEV_ID_57840_2_20 0x16a2
+#define BNX2X_DEV_ID_57840_MF 0x16a4
+#define BNX2X_DEV_ID_57840_VF 0x16ad
+
+RTE_PCI_DEV_ID_DECL_BNX2X(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57800)
+RTE_PCI_DEV_ID_DECL_BNX2XVF(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57800_VF)
+RTE_PCI_DEV_ID_DECL_BNX2X(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57711)
+RTE_PCI_DEV_ID_DECL_BNX2X(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57810)
+RTE_PCI_DEV_ID_DECL_BNX2XVF(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57810_VF)
+RTE_PCI_DEV_ID_DECL_BNX2X(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57811)
+RTE_PCI_DEV_ID_DECL_BNX2XVF(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57811_VF)
+RTE_PCI_DEV_ID_DECL_BNX2X(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57840_OBS)
+RTE_PCI_DEV_ID_DECL_BNX2X(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57840_4_10)
+RTE_PCI_DEV_ID_DECL_BNX2X(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57840_2_20)
+RTE_PCI_DEV_ID_DECL_BNX2XVF(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57840_VF)
+#ifdef RTE_LIBRTE_BNX2X_MF_SUPPORT
+RTE_PCI_DEV_ID_DECL_BNX2X(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57810_MF)
+RTE_PCI_DEV_ID_DECL_BNX2X(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57811_MF)
+RTE_PCI_DEV_ID_DECL_BNX2X(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57840_MF)
+#endif
+
/*
* Undef all RTE_PCI_DEV_ID_DECL_* here.
*/
+#undef RTE_PCI_DEV_ID_DECL_BNX2X
+#undef RTE_PCI_DEV_ID_DECL_BNX2XVF
#undef RTE_PCI_DEV_ID_DECL_EM
#undef RTE_PCI_DEV_ID_DECL_IGB
#undef RTE_PCI_DEV_ID_DECL_IGBVF
@@ -538,3 +663,5 @@ RTE_PCI_DEV_ID_DECL_VMXNET3(PCI_VENDOR_ID_VMWARE, VMWARE_DEV_ID_VMXNET3)
#undef RTE_PCI_DEV_ID_DECL_I40EVF
#undef RTE_PCI_DEV_ID_DECL_VIRTIO
#undef RTE_PCI_DEV_ID_DECL_VMXNET3
+#undef RTE_PCI_DEV_ID_DECL_FM10K
+#undef RTE_PCI_DEV_ID_DECL_FM10KVF
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_per_lcore.h b/src/dpdk22/lib/librte_eal/common/include/rte_per_lcore.h
index 5434729a..5434729a 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/rte_per_lcore.h
+++ b/src/dpdk22/lib/librte_eal/common/include/rte_per_lcore.h
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_random.h b/src/dpdk22/lib/librte_eal/common/include/rte_random.h
index 24ae8363..24ae8363 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/rte_random.h
+++ b/src/dpdk22/lib/librte_eal/common/include/rte_random.h
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_string_fns.h b/src/dpdk22/lib/librte_eal/common/include/rte_string_fns.h
index cfca2f8d..cfca2f8d 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/rte_string_fns.h
+++ b/src/dpdk22/lib/librte_eal/common/include/rte_string_fns.h
diff --git a/src/dpdk22/lib/librte_eal/common/include/rte_tailq.h b/src/dpdk22/lib/librte_eal/common/include/rte_tailq.h
new file mode 100644
index 00000000..4a686e68
--- /dev/null
+++ b/src/dpdk22/lib/librte_eal/common/include/rte_tailq.h
@@ -0,0 +1,162 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_TAILQ_H_
+#define _RTE_TAILQ_H_
+
+/**
+ * @file
+ * Here defines rte_tailq APIs for only internal use
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <rte_debug.h>
+
+/** dummy structure type used by the rte_tailq APIs */
+struct rte_tailq_entry {
+ TAILQ_ENTRY(rte_tailq_entry) next; /**< Pointer entries for a tailq list */
+ void *data; /**< Pointer to the data referenced by this tailq entry */
+};
+/** dummy */
+TAILQ_HEAD(rte_tailq_entry_head, rte_tailq_entry);
+
+#define RTE_TAILQ_NAMESIZE 32
+
+/**
+ * The structure defining a tailq header entry for storing
+ * in the rte_config structure in shared memory. Each tailq
+ * is identified by name.
+ * Any library storing a set of objects e.g. rings, mempools, hash-tables,
+ * is recommended to use an entry here, so as to make it easy for
+ * a multi-process app to find already-created elements in shared memory.
+ */
+struct rte_tailq_head {
+ struct rte_tailq_entry_head tailq_head; /**< NOTE: must be first element */
+ char name[RTE_TAILQ_NAMESIZE];
+};
+
+struct rte_tailq_elem {
+ /**
+ * Reference to head in shared mem, updated at init time by
+ * rte_eal_tailqs_init()
+ */
+ struct rte_tailq_head *head;
+ TAILQ_ENTRY(rte_tailq_elem) next;
+ const char name[RTE_TAILQ_NAMESIZE];
+};
+
+/**
+ * Return the first tailq entry casted to the right struct.
+ */
+#define RTE_TAILQ_CAST(tailq_entry, struct_name) \
+ (struct struct_name *)&(tailq_entry)->tailq_head
+
+/**
+ * Utility macro to make looking up a tailqueue for a particular struct easier.
+ *
+ * @param name
+ * The name of tailq
+ *
+ * @param struct_name
+ * The name of the list type we are using. (Generally this is the same as the
+ * first parameter passed to TAILQ_HEAD macro)
+ *
+ * @return
+ * The return value from rte_eal_tailq_lookup, typecast to the appropriate
+ * structure pointer type.
+ * NULL on error, since the tailq_head is the first
+ * element in the rte_tailq_head structure.
+ */
+#define RTE_TAILQ_LOOKUP(name, struct_name) \
+ RTE_TAILQ_CAST(rte_eal_tailq_lookup(name), struct_name)
+
+/**
+ * Dump tail queues to the console.
+ *
+ * @param f
+ * A pointer to a file for output
+ */
+void rte_dump_tailq(FILE *f);
+
+/**
+ * Lookup for a tail queue.
+ *
+ * Get a pointer to a tail queue header of a tail
+ * queue identified by the name given as an argument.
+ * Note: this function is not multi-thread safe, and should only be called from
+ * a single thread at a time
+ *
+ * @param name
+ * The name of the queue.
+ * @return
+ * A pointer to the tail queue head structure.
+ */
+struct rte_tailq_head *rte_eal_tailq_lookup(const char *name);
+
+/**
+ * Register a tail queue.
+ *
+ * Register a tail queue from shared memory.
+ * This function is mainly used by EAL_REGISTER_TAILQ macro which is used to
+ * register tailq from the different dpdk libraries. Since this macro is a
+ * constructor, the function has no access to dpdk shared memory, so the
+ * registered tailq can not be used before call to rte_eal_init() which calls
+ * rte_eal_tailqs_init().
+ *
+ * @param t
+ * The tailq element which contains the name of the tailq you want to
+ * create (/retrieve when in secondary process).
+ * @return
+ * 0 on success or -1 in case of an error.
+ */
+int rte_eal_tailq_register(struct rte_tailq_elem *t);
+
+#define EAL_REGISTER_TAILQ(t) \
+void tailqinitfn_ ##t(void); \
+void __attribute__((constructor, used)) tailqinitfn_ ##t(void) \
+{ \
+ if (rte_eal_tailq_register(&t) < 0) \
+ rte_panic("Cannot initialize tailq: %s\n", t.name); \
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_TAILQ_H_ */
diff --git a/src/dpdk22/lib/librte_eal/common/include/rte_time.h b/src/dpdk22/lib/librte_eal/common/include/rte_time.h
new file mode 100644
index 00000000..4b13b9c1
--- /dev/null
+++ b/src/dpdk22/lib/librte_eal/common/include/rte_time.h
@@ -0,0 +1,122 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define NSEC_PER_SEC 1000000000L
+
+/**
+ * Structure to hold the parameters of a running cycle counter to assist
+ * in converting cycles to nanoseconds.
+ */
+struct rte_timecounter {
+ /** Last cycle counter value read. */
+ uint64_t cycle_last;
+ /** Nanoseconds count. */
+ uint64_t nsec;
+ /** Bitmask separating nanosecond and sub-nanoseconds. */
+ uint64_t nsec_mask;
+ /** Sub-nanoseconds count. */
+ uint64_t nsec_frac;
+ /** Bitmask for two's complement substraction of non-64 bit counters. */
+ uint64_t cc_mask;
+ /** Cycle to nanosecond divisor (power of two). */
+ uint32_t cc_shift;
+};
+
+/**
+ * Converts cyclecounter cycles to nanoseconds.
+ */
+static inline uint64_t
+rte_cyclecounter_cycles_to_ns(struct rte_timecounter *tc, uint64_t cycles)
+{
+ uint64_t ns;
+
+ /* Add fractional nanoseconds. */
+ ns = cycles + tc->nsec_frac;
+ tc->nsec_frac = ns & tc->nsec_mask;
+
+ /* Shift to get only nanoseconds. */
+ return ns >> tc->cc_shift;
+}
+
+/**
+ * Update the internal nanosecond count in the structure.
+ */
+static inline uint64_t
+rte_timecounter_update(struct rte_timecounter *tc, uint64_t cycle_now)
+{
+ uint64_t cycle_delta, ns_offset;
+
+ /* Calculate the delta since the last call. */
+ if (tc->cycle_last <= cycle_now)
+ cycle_delta = (cycle_now - tc->cycle_last) & tc->cc_mask;
+ else
+ /* Handle cycle counts that have wrapped around . */
+ cycle_delta = (~(tc->cycle_last - cycle_now) & tc->cc_mask) + 1;
+
+ /* Convert to nanoseconds. */
+ ns_offset = rte_cyclecounter_cycles_to_ns(tc, cycle_delta);
+
+ /* Store current cycle counter for next call. */
+ tc->cycle_last = cycle_now;
+
+ /* Update the nanosecond count. */
+ tc->nsec += ns_offset;
+
+ return tc->nsec;
+}
+
+/**
+ * Convert from timespec structure into nanosecond units.
+ */
+static inline uint64_t
+rte_timespec_to_ns(const struct timespec *ts)
+{
+ return ((uint64_t) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
+}
+
+/**
+ * Convert from nanosecond units into timespec structure.
+ */
+static inline struct timespec
+rte_ns_to_timespec(uint64_t nsec)
+{
+ struct timespec ts = {0, 0};
+
+ if (nsec == 0)
+ return ts;
+
+ ts.tv_sec = nsec / NSEC_PER_SEC;
+ ts.tv_nsec = nsec % NSEC_PER_SEC;
+
+ return ts;
+}
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_version.h b/src/dpdk22/lib/librte_eal/common/include/rte_version.h
index d2686ae3..bb3e9fc2 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/rte_version.h
+++ b/src/dpdk22/lib/librte_eal/common/include/rte_version.h
@@ -44,6 +44,7 @@ extern "C" {
#endif
#include <stdint.h>
+#include <string.h>
#include <rte_common.h>
/**
@@ -54,12 +55,12 @@ extern "C" {
/**
* Major version number i.e. the x in x.y.z
*/
-#define RTE_VER_MAJOR 1
+#define RTE_VER_MAJOR 2
/**
* Minor version number i.e. the y in x.y.z
*/
-#define RTE_VER_MINOR 8
+#define RTE_VER_MINOR 2
/**
* Patch level number i.e. the z in x.y.z
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_warnings.h b/src/dpdk22/lib/librte_eal/common/include/rte_warnings.h
index da80877f..da80877f 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/include/rte_warnings.h
+++ b/src/dpdk22/lib/librte_eal/common/include/rte_warnings.h
diff --git a/src/dpdk_lib18/librte_malloc/malloc_elem.c b/src/dpdk22/lib/librte_eal/common/malloc_elem.c
index ef26e472..b54ee330 100755..100644
--- a/src/dpdk_lib18/librte_malloc/malloc_elem.c
+++ b/src/dpdk22/lib/librte_eal/common/malloc_elem.c
@@ -37,8 +37,6 @@
#include <sys/queue.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_per_lcore.h>
@@ -57,10 +55,10 @@
*/
void
malloc_elem_init(struct malloc_elem *elem,
- struct malloc_heap *heap, const struct rte_memzone *mz, size_t size)
+ struct malloc_heap *heap, const struct rte_memseg *ms, size_t size)
{
elem->heap = heap;
- elem->mz = mz;
+ elem->ms = ms;
elem->prev = NULL;
memset(&elem->free_list, 0, sizeof(elem->free_list));
elem->state = ELEM_FREE;
@@ -71,12 +69,12 @@ malloc_elem_init(struct malloc_elem *elem,
}
/*
- * initialise a dummy malloc_elem header for the end-of-memzone marker
+ * initialise a dummy malloc_elem header for the end-of-memseg marker
*/
void
malloc_elem_mkend(struct malloc_elem *elem, struct malloc_elem *prev)
{
- malloc_elem_init(elem, prev->heap, prev->mz, 0);
+ malloc_elem_init(elem, prev->heap, prev->ms, 0);
elem->prev = prev;
elem->state = ELEM_BUSY; /* mark busy so its never merged */
}
@@ -87,12 +85,24 @@ malloc_elem_mkend(struct malloc_elem *elem, struct malloc_elem *prev)
* fit, return NULL.
*/
static void *
-elem_start_pt(struct malloc_elem *elem, size_t size, unsigned align)
+elem_start_pt(struct malloc_elem *elem, size_t size, unsigned align,
+ size_t bound)
{
- const uintptr_t end_pt = (uintptr_t)elem +
+ const size_t bmask = ~(bound - 1);
+ uintptr_t end_pt = (uintptr_t)elem +
elem->size - MALLOC_ELEM_TRAILER_LEN;
- const uintptr_t new_data_start = rte_align_floor_int((end_pt - size),align);
- const uintptr_t new_elem_start = new_data_start - MALLOC_ELEM_HEADER_LEN;
+ uintptr_t new_data_start = RTE_ALIGN_FLOOR((end_pt - size), align);
+ uintptr_t new_elem_start;
+
+ /* check boundary */
+ if ((new_data_start & bmask) != ((end_pt - 1) & bmask)) {
+ end_pt = RTE_ALIGN_FLOOR(end_pt, bound);
+ new_data_start = RTE_ALIGN_FLOOR((end_pt - size), align);
+ if (((end_pt - 1) & bmask) != (new_data_start & bmask))
+ return NULL;
+ }
+
+ new_elem_start = new_data_start - MALLOC_ELEM_HEADER_LEN;
/* if the new start point is before the exist start, it won't fit */
return (new_elem_start < (uintptr_t)elem) ? NULL : (void *)new_elem_start;
@@ -103,9 +113,10 @@ elem_start_pt(struct malloc_elem *elem, size_t size, unsigned align)
* alignment request from the current element
*/
int
-malloc_elem_can_hold(struct malloc_elem *elem, size_t size, unsigned align)
+malloc_elem_can_hold(struct malloc_elem *elem, size_t size, unsigned align,
+ size_t bound)
{
- return elem_start_pt(elem, size, align) != NULL;
+ return elem_start_pt(elem, size, align, bound) != NULL;
}
/*
@@ -116,10 +127,10 @@ static void
split_elem(struct malloc_elem *elem, struct malloc_elem *split_pt)
{
struct malloc_elem *next_elem = RTE_PTR_ADD(elem, elem->size);
- const unsigned old_elem_size = (uintptr_t)split_pt - (uintptr_t)elem;
- const unsigned new_elem_size = elem->size - old_elem_size;
+ const size_t old_elem_size = (uintptr_t)split_pt - (uintptr_t)elem;
+ const size_t new_elem_size = elem->size - old_elem_size;
- malloc_elem_init(split_pt, elem->heap, elem->mz, new_elem_size);
+ malloc_elem_init(split_pt, elem->heap, elem->ms, new_elem_size);
split_pt->prev = elem;
next_elem->prev = split_pt;
elem->size = old_elem_size;
@@ -169,8 +180,9 @@ malloc_elem_free_list_index(size_t size)
void
malloc_elem_free_list_insert(struct malloc_elem *elem)
{
- size_t idx = malloc_elem_free_list_index(elem->size - MALLOC_ELEM_HEADER_LEN);
+ size_t idx;
+ idx = malloc_elem_free_list_index(elem->size - MALLOC_ELEM_HEADER_LEN);
elem->state = ELEM_FREE;
LIST_INSERT_HEAD(&elem->heap->free_head[idx], elem, free_list);
}
@@ -191,12 +203,26 @@ elem_free_list_remove(struct malloc_elem *elem)
* is not done here, as it's done there previously.
*/
struct malloc_elem *
-malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align)
+malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align,
+ size_t bound)
{
- struct malloc_elem *new_elem = elem_start_pt(elem, size, align);
- const unsigned old_elem_size = (uintptr_t)new_elem - (uintptr_t)elem;
+ struct malloc_elem *new_elem = elem_start_pt(elem, size, align, bound);
+ const size_t old_elem_size = (uintptr_t)new_elem - (uintptr_t)elem;
+ const size_t trailer_size = elem->size - old_elem_size - size -
+ MALLOC_ELEM_OVERHEAD;
+
+ elem_free_list_remove(elem);
- if (old_elem_size < MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE){
+ if (trailer_size > MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
+ /* split it, too much free space after elem */
+ struct malloc_elem *new_free_elem =
+ RTE_PTR_ADD(new_elem, size + MALLOC_ELEM_OVERHEAD);
+
+ split_elem(elem, new_free_elem);
+ malloc_elem_free_list_insert(new_free_elem);
+ }
+
+ if (old_elem_size < MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
/* don't split it, pad the element instead */
elem->state = ELEM_BUSY;
elem->pad = old_elem_size;
@@ -209,8 +235,6 @@ malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align)
new_elem->size = elem->size - elem->pad;
set_header(new_elem);
}
- /* remove element from free list */
- elem_free_list_remove(elem);
return new_elem;
}
@@ -220,7 +244,6 @@ malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align)
* Re-insert original element, in case its new size makes it
* belong on a different list.
*/
- elem_free_list_remove(elem);
split_elem(elem, new_elem);
new_elem->state = ELEM_BUSY;
malloc_elem_free_list_insert(elem);
diff --git a/src/dpdk_lib18/librte_malloc/malloc_elem.h b/src/dpdk22/lib/librte_eal/common/malloc_elem.h
index 9790b1a0..e05d2ea0 100755..100644
--- a/src/dpdk_lib18/librte_malloc/malloc_elem.h
+++ b/src/dpdk22/lib/librte_eal/common/malloc_elem.h
@@ -47,9 +47,9 @@ enum elem_state {
struct malloc_elem {
struct malloc_heap *heap;
- struct malloc_elem *volatile prev; /* points to prev elem in memzone */
+ struct malloc_elem *volatile prev; /* points to prev elem in memseg */
LIST_ENTRY(malloc_elem) free_list; /* list of free elements in heap */
- const struct rte_memzone *mz;
+ const struct rte_memseg *ms;
volatile enum elem_state state;
uint32_t pad;
size_t size;
@@ -136,11 +136,11 @@ malloc_elem_from_data(const void *data)
void
malloc_elem_init(struct malloc_elem *elem,
struct malloc_heap *heap,
- const struct rte_memzone *mz,
+ const struct rte_memseg *ms,
size_t size);
/*
- * initialise a dummy malloc_elem header for the end-of-memzone marker
+ * initialise a dummy malloc_elem header for the end-of-memseg marker
*/
void
malloc_elem_mkend(struct malloc_elem *elem,
@@ -151,14 +151,16 @@ malloc_elem_mkend(struct malloc_elem *elem,
* of the requested size and with the requested alignment
*/
int
-malloc_elem_can_hold(struct malloc_elem *elem, size_t size, unsigned align);
+malloc_elem_can_hold(struct malloc_elem *elem, size_t size,
+ unsigned align, size_t bound);
/*
* reserve a block of data in an existing malloc_elem. If the malloc_elem
* is much larger than the data block requested, we split the element in two.
*/
struct malloc_elem *
-malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align);
+malloc_elem_alloc(struct malloc_elem *elem, size_t size,
+ unsigned align, size_t bound);
/*
* free a malloc_elem block by adding it to the free list. If the
diff --git a/src/dpdk_lib18/librte_malloc/malloc_heap.c b/src/dpdk22/lib/librte_eal/common/malloc_heap.c
index 95fcfecf..d170d037 100755..100644
--- a/src/dpdk_lib18/librte_malloc/malloc_heap.c
+++ b/src/dpdk22/lib/librte_eal/common/malloc_heap.c
@@ -39,8 +39,6 @@
#include <sys/queue.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
#include <rte_launch.h>
@@ -55,123 +53,125 @@
#include "malloc_elem.h"
#include "malloc_heap.h"
-/* since the memzone size starts with a digit, it will appear unquoted in
- * rte_config.h, so quote it so it can be passed to rte_str_to_size */
-#define MALLOC_MEMZONE_SIZE RTE_STR(RTE_MALLOC_MEMZONE_SIZE)
-
-/*
- * returns the configuration setting for the memzone size as a size_t value
- */
-static inline size_t
-get_malloc_memzone_size(void)
+static unsigned
+check_hugepage_sz(unsigned flags, uint64_t hugepage_sz)
{
- return rte_str_to_size(MALLOC_MEMZONE_SIZE);
+ unsigned check_flag = 0;
+
+ if (!(flags & ~RTE_MEMZONE_SIZE_HINT_ONLY))
+ return 1;
+
+ switch (hugepage_sz) {
+ case RTE_PGSIZE_256K:
+ check_flag = RTE_MEMZONE_256KB;
+ break;
+ case RTE_PGSIZE_2M:
+ check_flag = RTE_MEMZONE_2MB;
+ break;
+ case RTE_PGSIZE_16M:
+ check_flag = RTE_MEMZONE_16MB;
+ break;
+ case RTE_PGSIZE_256M:
+ check_flag = RTE_MEMZONE_256MB;
+ break;
+ case RTE_PGSIZE_512M:
+ check_flag = RTE_MEMZONE_512MB;
+ break;
+ case RTE_PGSIZE_1G:
+ check_flag = RTE_MEMZONE_1GB;
+ break;
+ case RTE_PGSIZE_4G:
+ check_flag = RTE_MEMZONE_4GB;
+ break;
+ case RTE_PGSIZE_16G:
+ check_flag = RTE_MEMZONE_16GB;
+ }
+
+ return (check_flag & flags);
}
/*
- * reserve an extra memory zone and make it available for use by a particular
- * heap. This reserves the zone and sets a dummy malloc_elem header at the end
+ * Expand the heap with a memseg.
+ * This reserves the zone and sets a dummy malloc_elem header at the end
* to prevent overflow. The rest of the zone is added to free list as a single
* large free block
*/
-static int
-malloc_heap_add_memzone(struct malloc_heap *heap, size_t size, unsigned align)
+static void
+malloc_heap_add_memseg(struct malloc_heap *heap, struct rte_memseg *ms)
{
- const unsigned mz_flags = 0;
- const size_t block_size = get_malloc_memzone_size();
- /* ensure the data we want to allocate will fit in the memzone */
- const size_t min_size = size + align + MALLOC_ELEM_OVERHEAD * 2;
- const struct rte_memzone *mz = NULL;
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- unsigned numa_socket = heap - mcfg->malloc_heaps;
-
- size_t mz_size = min_size;
- if (mz_size < block_size)
- mz_size = block_size;
-
- char mz_name[RTE_MEMZONE_NAMESIZE];
- snprintf(mz_name, sizeof(mz_name), "MALLOC_S%u_HEAP_%u",
- numa_socket, heap->mz_count++);
-
- /* try getting a block. if we fail and we don't need as big a block
- * as given in the config, we can shrink our request and try again
- */
- do {
- mz = rte_memzone_reserve(mz_name, mz_size, numa_socket,
- mz_flags);
- if (mz == NULL)
- mz_size /= 2;
- } while (mz == NULL && mz_size > min_size);
- if (mz == NULL)
- return -1;
-
/* allocate the memory block headers, one at end, one at start */
- struct malloc_elem *start_elem = (struct malloc_elem *)mz->addr;
- struct malloc_elem *end_elem = RTE_PTR_ADD(mz->addr,
- mz_size - MALLOC_ELEM_OVERHEAD);
+ struct malloc_elem *start_elem = (struct malloc_elem *)ms->addr;
+ struct malloc_elem *end_elem = RTE_PTR_ADD(ms->addr,
+ ms->len - MALLOC_ELEM_OVERHEAD);
end_elem = RTE_PTR_ALIGN_FLOOR(end_elem, RTE_CACHE_LINE_SIZE);
+ const size_t elem_size = (uintptr_t)end_elem - (uintptr_t)start_elem;
- const unsigned elem_size = (uintptr_t)end_elem - (uintptr_t)start_elem;
- malloc_elem_init(start_elem, heap, mz, elem_size);
+ malloc_elem_init(start_elem, heap, ms, elem_size);
malloc_elem_mkend(end_elem, start_elem);
malloc_elem_free_list_insert(start_elem);
- /* increase heap total size by size of new memzone */
- heap->total_size+=mz_size - MALLOC_ELEM_OVERHEAD;
- return 0;
+ heap->total_size += elem_size;
}
/*
* Iterates through the freelist for a heap to find a free element
* which can store data of the required size and with the requested alignment.
+ * If size is 0, find the biggest available elem.
* Returns null on failure, or pointer to element on success.
*/
static struct malloc_elem *
-find_suitable_element(struct malloc_heap *heap, size_t size, unsigned align)
+find_suitable_element(struct malloc_heap *heap, size_t size,
+ unsigned flags, size_t align, size_t bound)
{
size_t idx;
- struct malloc_elem *elem;
+ struct malloc_elem *elem, *alt_elem = NULL;
for (idx = malloc_elem_free_list_index(size);
- idx < RTE_HEAP_NUM_FREELISTS; idx++)
- {
+ idx < RTE_HEAP_NUM_FREELISTS; idx++) {
for (elem = LIST_FIRST(&heap->free_head[idx]);
- !!elem; elem = LIST_NEXT(elem, free_list))
- {
- if (malloc_elem_can_hold(elem, size, align))
- return elem;
+ !!elem; elem = LIST_NEXT(elem, free_list)) {
+ if (malloc_elem_can_hold(elem, size, align, bound)) {
+ if (check_hugepage_sz(flags, elem->ms->hugepage_sz))
+ return elem;
+ if (alt_elem == NULL)
+ alt_elem = elem;
+ }
}
}
+
+ if ((alt_elem != NULL) && (flags & RTE_MEMZONE_SIZE_HINT_ONLY))
+ return alt_elem;
+
return NULL;
}
/*
- * Main function called by malloc to allocate a block of memory from the
- * heap. It locks the free list, scans it, and adds a new memzone if the
- * scan fails. Once the new memzone is added, it re-scans and should return
+ * Main function to allocate a block of memory from the heap.
+ * It locks the free list, scans it, and adds a new memseg if the
+ * scan fails. Once the new memseg is added, it re-scans and should return
* the new element after releasing the lock.
*/
void *
malloc_heap_alloc(struct malloc_heap *heap,
- const char *type __attribute__((unused)), size_t size, unsigned align)
+ const char *type __attribute__((unused)), size_t size, unsigned flags,
+ size_t align, size_t bound)
{
+ struct malloc_elem *elem;
+
size = RTE_CACHE_LINE_ROUNDUP(size);
align = RTE_CACHE_LINE_ROUNDUP(align);
+
rte_spinlock_lock(&heap->lock);
- struct malloc_elem *elem = find_suitable_element(heap, size, align);
- if (elem == NULL){
- if ((malloc_heap_add_memzone(heap, size, align)) == 0)
- elem = find_suitable_element(heap, size, align);
- }
- if (elem != NULL){
- elem = malloc_elem_alloc(elem, size, align);
+ elem = find_suitable_element(heap, size, flags, align, bound);
+ if (elem != NULL) {
+ elem = malloc_elem_alloc(elem, size, align, bound);
/* increase heap's count of allocated elements */
heap->alloc_count++;
}
rte_spinlock_unlock(&heap->lock);
- return elem == NULL ? NULL : (void *)(&elem[1]);
+ return elem == NULL ? NULL : (void *)(&elem[1]);
}
/*
@@ -208,3 +208,29 @@ malloc_heap_get_stats(const struct malloc_heap *heap,
return 0;
}
+int
+rte_eal_malloc_heap_init(void)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ unsigned ms_cnt;
+ struct rte_memseg *ms;
+
+ if (mcfg == NULL)
+ return -1;
+
+ for (ms = &mcfg->memseg[0], ms_cnt = 0;
+ (ms_cnt < RTE_MAX_MEMSEG) && (ms->len > 0);
+ ms_cnt++, ms++) {
+#ifdef RTE_LIBRTE_IVSHMEM
+ /*
+ * if segment has ioremap address set, it's an IVSHMEM segment and
+ * it is not memory to allocate from.
+ */
+ if (ms->ioremap_addr != 0)
+ continue;
+#endif
+ malloc_heap_add_memseg(&mcfg->malloc_heaps[ms->socket_id], ms);
+ }
+
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_malloc/malloc_heap.h b/src/dpdk22/lib/librte_eal/common/malloc_heap.h
index b4aec451..3ccbef0f 100755..100644
--- a/src/dpdk_lib18/librte_malloc/malloc_heap.h
+++ b/src/dpdk22/lib/librte_eal/common/malloc_heap.h
@@ -44,19 +44,24 @@ extern "C" {
static inline unsigned
malloc_get_numa_socket(void)
{
- return rte_socket_id();
+ unsigned socket_id = rte_socket_id();
+
+ if (socket_id == (unsigned)SOCKET_ID_ANY)
+ return 0;
+
+ return socket_id;
}
void *
-malloc_heap_alloc(struct malloc_heap *heap, const char *type,
- size_t size, unsigned align);
+malloc_heap_alloc(struct malloc_heap *heap, const char *type, size_t size,
+ unsigned flags, size_t align, size_t bound);
int
malloc_heap_get_stats(const struct malloc_heap *heap,
struct rte_malloc_socket_stats *socket_stats);
int
-rte_eal_heap_memzone_init(void);
+rte_eal_malloc_heap_init(void);
#ifdef __cplusplus
}
diff --git a/src/dpdk22/lib/librte_eal/common/rte_keepalive.c b/src/dpdk22/lib/librte_eal/common/rte_keepalive.c
new file mode 100644
index 00000000..736fd0f4
--- /dev/null
+++ b/src/dpdk22/lib/librte_eal/common/rte_keepalive.c
@@ -0,0 +1,113 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2015 Intel Shannon Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_keepalive.h>
+#include <rte_malloc.h>
+
+static void
+print_trace(const char *msg, struct rte_keepalive *keepcfg, int idx_core)
+{
+ RTE_LOG(INFO, EAL, "%sLast seen %" PRId64 "ms ago.\n",
+ msg,
+ ((rte_rdtsc() - keepcfg->last_alive[idx_core])*1000)
+ / rte_get_tsc_hz()
+ );
+}
+
+
+void
+rte_keepalive_dispatch_pings(__rte_unused void *ptr_timer,
+ void *ptr_data)
+{
+ struct rte_keepalive *keepcfg = ptr_data;
+ int idx_core;
+
+ for (idx_core = 0; idx_core < RTE_KEEPALIVE_MAXCORES; idx_core++) {
+ if (keepcfg->active_cores[idx_core] == 0)
+ continue;
+
+ switch (keepcfg->state_flags[idx_core]) {
+ case ALIVE: /* Alive */
+ keepcfg->state_flags[idx_core] = MISSING;
+ keepcfg->last_alive[idx_core] = rte_rdtsc();
+ break;
+ case MISSING: /* MIA */
+ print_trace("Core MIA. ", keepcfg, idx_core);
+ keepcfg->state_flags[idx_core] = DEAD;
+ break;
+ case DEAD: /* Dead */
+ keepcfg->state_flags[idx_core] = GONE;
+ print_trace("Core died. ", keepcfg, idx_core);
+ if (keepcfg->callback)
+ keepcfg->callback(
+ keepcfg->callback_data,
+ idx_core
+ );
+ break;
+ case GONE: /* Buried */
+ break;
+ }
+ }
+}
+
+
+struct rte_keepalive *
+rte_keepalive_create(rte_keepalive_failure_callback_t callback,
+ void *data)
+{
+ struct rte_keepalive *keepcfg;
+
+ keepcfg = rte_zmalloc("RTE_EAL_KEEPALIVE",
+ sizeof(struct rte_keepalive),
+ RTE_CACHE_LINE_SIZE);
+ if (keepcfg != NULL) {
+ keepcfg->callback = callback;
+ keepcfg->callback_data = data;
+ keepcfg->tsc_initial = rte_rdtsc();
+ keepcfg->tsc_mhz = rte_get_tsc_hz() / 1000;
+ }
+ return keepcfg;
+}
+
+
+void
+rte_keepalive_register_core(struct rte_keepalive *keepcfg, const int id_core)
+{
+ if (id_core < RTE_KEEPALIVE_MAXCORES)
+ keepcfg->active_cores[id_core] = 1;
+}
diff --git a/src/dpdk_lib18/librte_malloc/rte_malloc.c b/src/dpdk22/lib/librte_eal/common/rte_malloc.c
index b966fc7d..47deb007 100755..100644
--- a/src/dpdk_lib18/librte_malloc/rte_malloc.c
+++ b/src/dpdk22/lib/librte_eal/common/rte_malloc.c
@@ -39,8 +39,6 @@
#include <rte_memcpy.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
#include <rte_branch_prediction.h>
@@ -75,9 +73,12 @@ rte_malloc_socket(const char *type, size_t size, unsigned align, int socket_arg)
void *ret;
/* return NULL if size is 0 or alignment is not power-of-2 */
- if (size == 0 || !rte_is_power_of_2(align))
+ if (size == 0 || (align && !rte_is_power_of_2(align)))
return NULL;
+ if (!rte_eal_has_hugepages())
+ socket_arg = SOCKET_ID_ANY;
+
if (socket_arg == SOCKET_ID_ANY)
socket = malloc_get_numa_socket();
else
@@ -88,7 +89,7 @@ rte_malloc_socket(const char *type, size_t size, unsigned align, int socket_arg)
return NULL;
ret = malloc_heap_alloc(&mcfg->malloc_heaps[socket], type,
- size, align == 0 ? 1 : align);
+ size, 0, align == 0 ? 1 : align, 0);
if (ret != NULL || socket_arg != SOCKET_ID_ANY)
return ret;
@@ -99,7 +100,7 @@ rte_malloc_socket(const char *type, size_t size, unsigned align, int socket_arg)
continue;
ret = malloc_heap_alloc(&mcfg->malloc_heaps[i], type,
- size, align == 0 ? 1 : align);
+ size, 0, align == 0 ? 1 : align, 0);
if (ret != NULL)
return ret;
}
@@ -257,5 +258,5 @@ rte_malloc_virt2phy(const void *addr)
const struct malloc_elem *elem = malloc_elem_from_data(addr);
if (elem == NULL)
return 0;
- return elem->mz->phys_addr + ((uintptr_t)addr - (uintptr_t)elem->mz->addr);
+ return elem->ms->phys_addr + ((uintptr_t)addr - (uintptr_t)elem->ms->addr);
}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal.c b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal.c
index 2fb1acc2..635ec363 100755..100644
--- a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal.c
+++ b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal.c
@@ -43,7 +43,6 @@
#include <getopt.h>
#include <sys/file.h>
#include <fcntl.h>
-#include <dlfcn.h>
#include <stddef.h>
#include <errno.h>
#include <limits.h>
@@ -59,7 +58,6 @@
#include <rte_memory.h>
#include <rte_memzone.h>
#include <rte_launch.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
#include <rte_per_lcore.h>
@@ -76,7 +74,6 @@
#include <rte_version.h>
#include <rte_atomic.h>
#include <malloc_heap.h>
-#include <rte_eth_ring.h>
#include "eal_private.h"
#include "eal_thread.h"
@@ -92,20 +89,6 @@
/* Allow the application to print its usage message too if set */
static rte_usage_hook_t rte_application_usage_hook = NULL;
-TAILQ_HEAD(shared_driver_list, shared_driver);
-
-/* Definition for shared object drivers. */
-struct shared_driver {
- TAILQ_ENTRY(shared_driver) next;
-
- char name[PATH_MAX];
- void* lib_handle;
-};
-
-/* List of external loadable drivers */
-static struct shared_driver_list solib_list =
-TAILQ_HEAD_INITIALIZER(solib_list);
-
/* early configuration structure, when memory config is not mmapped */
static struct rte_mem_config early_mem_config;
@@ -352,17 +335,13 @@ eal_usage(const char *prgname)
printf("\nUsage: %s ", prgname);
eal_common_usage();
printf("EAL Linux options:\n"
- " -d LIB.so : add driver (can be used multiple times)\n"
- " --"OPT_XEN_DOM0" : support application running on Xen Domain0 "
- "without hugetlbfs\n"
- " --"OPT_SOCKET_MEM" : memory to allocate on specific\n"
- " sockets (use comma separated values)\n"
- " --"OPT_HUGE_DIR" : directory where hugetlbfs is mounted\n"
- " --"OPT_FILE_PREFIX": prefix for hugepage filenames\n"
- " --"OPT_BASE_VIRTADDR": specify base virtual address\n"
- " --"OPT_VFIO_INTR": specify desired interrupt mode for VFIO "
- "(legacy|msi|msix)\n"
- " --"OPT_CREATE_UIO_DEV": create /dev/uioX (usually done by hotplug)\n"
+ " --"OPT_SOCKET_MEM" Memory to allocate on sockets (comma separated values)\n"
+ " --"OPT_HUGE_DIR" Directory where hugetlbfs is mounted\n"
+ " --"OPT_FILE_PREFIX" Prefix for hugepage filenames\n"
+ " --"OPT_BASE_VIRTADDR" Base virtual address\n"
+ " --"OPT_CREATE_UIO_DEV" Create /dev/uioX (usually done by hotplug)\n"
+ " --"OPT_VFIO_INTR" Interrupt mode for VFIO (legacy|msi|msix)\n"
+ " --"OPT_XEN_DOM0" Support running on Xen dom0 without hugetlbfs\n"
"\n");
/* Allow the application to print its usage message too if hook is set */
if ( rte_application_usage_hook ) {
@@ -503,17 +482,19 @@ eal_get_hugepage_mem_size(void)
return (size < SIZE_MAX) ? (size_t)(size) : SIZE_MAX;
}
-/* Parse the argument given in the command line of the application */
-static int
-eal_parse_args(int argc, char **argv)
+/* Parse the arguments for --log-level only */
+static void
+eal_log_level_parse(int argc, char **argv)
{
- int opt, ret;
+ int opt;
char **argvopt;
int option_index;
- char *prgname = argv[0];
- struct shared_driver *solib;
+ const int old_optind = optind;
+ const int old_optopt = optopt;
+ char * const old_optarg = optarg;
argvopt = argv;
+ optind = 1;
eal_reset_internal_config(&internal_config);
@@ -524,31 +505,62 @@ eal_parse_args(int argc, char **argv)
/* getopt is not happy, stop right now */
if (opt == '?')
- return -1;
+ break;
+
+ ret = (opt == OPT_LOG_LEVEL_NUM) ?
+ eal_parse_common_option(opt, optarg, &internal_config) : 0;
+
+ /* common parser is not happy */
+ if (ret < 0)
+ break;
+ }
+
+ /* restore getopt lib */
+ optind = old_optind;
+ optopt = old_optopt;
+ optarg = old_optarg;
+}
+
+/* Parse the argument given in the command line of the application */
+static int
+eal_parse_args(int argc, char **argv)
+{
+ int opt, ret;
+ char **argvopt;
+ int option_index;
+ char *prgname = argv[0];
+ const int old_optind = optind;
+ const int old_optopt = optopt;
+ char * const old_optarg = optarg;
+
+ argvopt = argv;
+ optind = 1;
+
+ while ((opt = getopt_long(argc, argvopt, eal_short_options,
+ eal_long_options, &option_index)) != EOF) {
+
+ /* getopt is not happy, stop right now */
+ if (opt == '?') {
+ eal_usage(prgname);
+ ret = -1;
+ goto out;
+ }
ret = eal_parse_common_option(opt, optarg, &internal_config);
/* common parser is not happy */
if (ret < 0) {
eal_usage(prgname);
- return -1;
+ ret = -1;
+ goto out;
}
/* common parser handled this option */
if (ret == 0)
continue;
switch (opt) {
- /* force loading of external driver */
- case 'd':
- solib = malloc(sizeof(*solib));
- if (solib == NULL) {
- RTE_LOG(ERR, EAL, "malloc(solib) failed\n");
- return -1;
- }
- memset(solib, 0, sizeof(*solib));
- strncpy(solib->name, optarg, PATH_MAX-1);
- solib->name[PATH_MAX-1] = 0;
- TAILQ_INSERT_TAIL(&solib_list, solib, next);
- break;
+ case 'h':
+ eal_usage(prgname);
+ exit(EXIT_SUCCESS);
/* long options */
case OPT_XEN_DOM0_NUM:
@@ -558,7 +570,8 @@ eal_parse_args(int argc, char **argv)
RTE_LOG(ERR, EAL, "Can't support DPDK app "
"running on Dom0, please configure"
" RTE_LIBRTE_XEN_DOM0=y\n");
- return -1;
+ ret = -1;
+ goto out;
#endif
break;
@@ -575,7 +588,8 @@ eal_parse_args(int argc, char **argv)
RTE_LOG(ERR, EAL, "invalid parameters for --"
OPT_SOCKET_MEM "\n");
eal_usage(prgname);
- return -1;
+ ret = -1;
+ goto out;
}
break;
@@ -584,7 +598,8 @@ eal_parse_args(int argc, char **argv)
RTE_LOG(ERR, EAL, "invalid parameter for --"
OPT_BASE_VIRTADDR "\n");
eal_usage(prgname);
- return -1;
+ ret = -1;
+ goto out;
}
break;
@@ -593,7 +608,8 @@ eal_parse_args(int argc, char **argv)
RTE_LOG(ERR, EAL, "invalid parameters for --"
OPT_VFIO_INTR "\n");
eal_usage(prgname);
- return -1;
+ ret = -1;
+ goto out;
}
break;
@@ -615,17 +631,21 @@ eal_parse_args(int argc, char **argv)
"on Linux\n", opt);
}
eal_usage(prgname);
- return -1;
+ ret = -1;
+ goto out;
}
}
- if (eal_adjust_config(&internal_config) != 0)
- return -1;
+ if (eal_adjust_config(&internal_config) != 0) {
+ ret = -1;
+ goto out;
+ }
/* sanity checks */
if (eal_check_common_options(&internal_config) != 0) {
eal_usage(prgname);
- return -1;
+ ret = -1;
+ goto out;
}
/* --xen-dom0 doesn't make sense with --socket-mem */
@@ -633,13 +653,20 @@ eal_parse_args(int argc, char **argv)
RTE_LOG(ERR, EAL, "Options --"OPT_SOCKET_MEM" cannot be specified "
"together with --"OPT_XEN_DOM0"\n");
eal_usage(prgname);
- return -1;
+ ret = -1;
+ goto out;
}
if (optind >= 0)
argv[optind-1] = prgname;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+
+out:
+ /* restore getopt lib */
+ optind = old_optind;
+ optopt = old_optopt;
+ optarg = old_optarg;
+
return ret;
}
@@ -700,8 +727,9 @@ rte_eal_init(int argc, char **argv)
int i, fctret, ret;
pthread_t thread_id;
static rte_atomic32_t run_once = RTE_ATOMIC32_INIT(0);
- struct shared_driver *solib = NULL;
const char *logid;
+ char cpuset[RTE_CPU_AFFINITY_STR_LEN];
+ char thread_name[RTE_MAX_THREAD_NAME_LEN];
if (!rte_atomic32_test_and_set(&run_once))
return -1;
@@ -714,6 +742,11 @@ rte_eal_init(int argc, char **argv)
if (rte_eal_log_early_init() < 0)
rte_panic("Cannot init early logs\n");
+ eal_log_level_parse(argc, argv);
+
+ /* set log level as early as possible */
+ rte_set_log_level(internal_config.log_level);
+
if (rte_eal_cpu_init() < 0)
rte_panic("Cannot detect lcores\n");
@@ -721,9 +754,6 @@ rte_eal_init(int argc, char **argv)
if (fctret < 0)
exit(1);
- /* set log level as early as possible */
- rte_set_log_level(internal_config.log_level);
-
if (internal_config.no_hugetlbfs == 0 &&
internal_config.process_type != RTE_PROC_SECONDARY &&
internal_config.xen_dom0_support == 0 &&
@@ -783,9 +813,6 @@ rte_eal_init(int argc, char **argv)
if (rte_eal_alarm_init() < 0)
rte_panic("Cannot init interrupt-handling thread\n");
- if (rte_eal_intr_init() < 0)
- rte_panic("Cannot init interrupt-handling thread\n");
-
if (rte_eal_timer_init() < 0)
rte_panic("Cannot init HPET or TSC timers\n");
@@ -793,21 +820,23 @@ rte_eal_init(int argc, char **argv)
rte_eal_mcfg_complete();
- TAILQ_FOREACH(solib, &solib_list, next) {
- RTE_LOG(INFO, EAL, "open shared lib %s\n", solib->name);
- solib->lib_handle = dlopen(solib->name, RTLD_NOW);
- if (solib->lib_handle == NULL)
- RTE_LOG(WARNING, EAL, "%s\n", dlerror());
- }
+ if (eal_plugins_init() < 0)
+ rte_panic("Cannot init plugins\n");
eal_thread_init_master(rte_config.master_lcore);
- RTE_LOG(DEBUG, EAL, "Master core %u is ready (tid=%x)\n",
- rte_config.master_lcore, (int)thread_id);
+ ret = eal_thread_dump_affinity(cpuset, RTE_CPU_AFFINITY_STR_LEN);
+
+ RTE_LOG(DEBUG, EAL, "Master lcore %u is ready (tid=%x;cpuset=[%s%s])\n",
+ rte_config.master_lcore, (int)thread_id, cpuset,
+ ret == 0 ? "" : "...");
if (rte_eal_dev_init() < 0)
rte_panic("Cannot init pmd devices\n");
+ if (rte_eal_intr_init() < 0)
+ rte_panic("Cannot init interrupt-handling thread\n");
+
RTE_LCORE_FOREACH_SLAVE(i) {
/*
@@ -826,6 +855,15 @@ rte_eal_init(int argc, char **argv)
eal_thread_loop, NULL);
if (ret != 0)
rte_panic("Cannot create thread\n");
+
+ /* Set thread_name for aid in debugging. */
+ snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN,
+ "lcore-slave-%d", i);
+ ret = rte_thread_setname(lcore_config[i].thread_id,
+ thread_name);
+ if (ret != 0)
+ RTE_LOG(ERR, EAL,
+ "Cannot set name for lcore thread\n");
}
/*
@@ -846,16 +884,44 @@ rte_eal_init(int argc, char **argv)
enum rte_lcore_role_t
rte_eal_lcore_role(unsigned lcore_id)
{
- return (rte_config.lcore_role[lcore_id]);
+ return rte_config.lcore_role[lcore_id];
}
enum rte_proc_type_t
rte_eal_process_type(void)
{
- return (rte_config.process_type);
+ return rte_config.process_type;
}
int rte_eal_has_hugepages(void)
{
return ! internal_config.no_hugetlbfs;
}
+
+int
+rte_eal_check_module(const char *module_name)
+{
+ char mod_name[30]; /* Any module names can be longer than 30 bytes? */
+ int ret = 0;
+ int n;
+
+ if (NULL == module_name)
+ return -1;
+
+ FILE *fd = fopen("/proc/modules", "r");
+ if (NULL == fd) {
+ RTE_LOG(ERR, EAL, "Open /proc/modules failed!"
+ " error %i (%s)\n", errno, strerror(errno));
+ return -1;
+ }
+ while (!feof(fd)) {
+ n = fscanf(fd, "%29s %*[^\n]", mod_name);
+ if ((n == 1) && !strcmp(mod_name, module_name)) {
+ ret = 1;
+ break;
+ }
+ }
+ fclose(fd);
+
+ return ret;
+}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_alarm.c b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_alarm.c
index e8da32fc..8b042abc 100755..100644
--- a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_alarm.c
+++ b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_alarm.c
@@ -45,7 +45,6 @@
#include <rte_alarm.h>
#include <rte_common.h>
#include <rte_per_lcore.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_lcore.h>
@@ -64,6 +63,12 @@
#define MS_PER_S 1000
#define US_PER_S (US_PER_MS * MS_PER_S)
+#ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */
+#define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW
+#else
+#define CLOCK_TYPE_ID CLOCK_MONOTONIC
+#endif
+
struct alarm_entry {
LIST_ENTRY(alarm_entry) next;
struct timeval time;
@@ -100,14 +105,14 @@ static void
eal_alarm_callback(struct rte_intr_handle *hdl __rte_unused,
void *arg __rte_unused)
{
- struct timeval now;
+ struct timespec now;
struct alarm_entry *ap;
rte_spinlock_lock(&alarm_list_lk);
while ((ap = LIST_FIRST(&alarm_list)) !=NULL &&
- gettimeofday(&now, NULL) == 0 &&
+ clock_gettime(CLOCK_TYPE_ID, &now) == 0 &&
(ap->time.tv_sec < now.tv_sec || (ap->time.tv_sec == now.tv_sec &&
- ap->time.tv_usec <= now.tv_usec))){
+ (ap->time.tv_usec * NS_PER_US) <= now.tv_nsec))) {
ap->executing = 1;
ap->executing_id = pthread_self();
rte_spinlock_unlock(&alarm_list_lk);
@@ -127,11 +132,11 @@ eal_alarm_callback(struct rte_intr_handle *hdl __rte_unused,
atime.it_value.tv_sec = ap->time.tv_sec;
atime.it_value.tv_nsec = ap->time.tv_usec * NS_PER_US;
/* perform borrow for subtraction if necessary */
- if (now.tv_usec > ap->time.tv_usec)
+ if (now.tv_nsec > (ap->time.tv_usec * NS_PER_US))
atime.it_value.tv_sec--, atime.it_value.tv_nsec += US_PER_S * NS_PER_US;
atime.it_value.tv_sec -= now.tv_sec;
- atime.it_value.tv_nsec -= now.tv_usec * NS_PER_US;
+ atime.it_value.tv_nsec -= now.tv_nsec;
timerfd_settime(intr_handle.fd, 0, &atime, NULL);
}
rte_spinlock_unlock(&alarm_list_lk);
@@ -140,7 +145,7 @@ eal_alarm_callback(struct rte_intr_handle *hdl __rte_unused,
int
rte_eal_alarm_set(uint64_t us, rte_eal_alarm_callback cb_fn, void *cb_arg)
{
- struct timeval now;
+ struct timespec now;
int ret = 0;
struct alarm_entry *ap, *new_alarm;
@@ -153,12 +158,12 @@ rte_eal_alarm_set(uint64_t us, rte_eal_alarm_callback cb_fn, void *cb_arg)
return -ENOMEM;
/* use current time to calculate absolute time of alarm */
- gettimeofday(&now, NULL);
+ clock_gettime(CLOCK_TYPE_ID, &now);
new_alarm->cb_fn = cb_fn;
new_alarm->cb_arg = cb_arg;
- new_alarm->time.tv_usec = (now.tv_usec + us) % US_PER_S;
- new_alarm->time.tv_sec = now.tv_sec + ((now.tv_usec + us) / US_PER_S);
+ new_alarm->time.tv_usec = ((now.tv_nsec / NS_PER_US) + us) % US_PER_S;
+ new_alarm->time.tv_sec = now.tv_sec + (((now.tv_nsec / NS_PER_US) + us) / US_PER_S);
rte_spinlock_lock(&alarm_list_lk);
if (!handler_registered) {
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_debug.c b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_debug.c
index 44fc4f33..907fbfa7 100755..100644
--- a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_debug.c
+++ b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_debug.c
@@ -53,11 +53,17 @@ void rte_dump_stack(void)
size = backtrace(func, BACKTRACE_SIZE);
symb = backtrace_symbols(func, size);
+
+ if (symb == NULL)
+ return;
+
while (size > 0) {
rte_log(RTE_LOG_ERR, RTE_LOGTYPE_EAL,
"%d: [%s]\n", size, symb[size - 1]);
size --;
}
+
+ free(symb);
}
/* not implemented in this environment */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_hugepage_info.c b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c
index 590cb565..18858e2d 100755..100644
--- a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_hugepage_info.c
+++ b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c
@@ -47,7 +47,6 @@
#include <rte_memory.h>
#include <rte_memzone.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_per_lcore.h>
@@ -62,33 +61,24 @@
static const char sys_dir_path[] = "/sys/kernel/mm/hugepages";
-static int32_t
+/* this function is only called from eal_hugepage_info_init which itself
+ * is only called from a primary process */
+static uint32_t
get_num_hugepages(const char *subdir)
{
char path[PATH_MAX];
long unsigned resv_pages, num_pages = 0;
- const char *nr_hp_file;
+ const char *nr_hp_file = "free_hugepages";
const char *nr_rsvd_file = "resv_hugepages";
/* first, check how many reserved pages kernel reports */
snprintf(path, sizeof(path), "%s/%s/%s",
sys_dir_path, subdir, nr_rsvd_file);
-
if (eal_parse_sysfs_value(path, &resv_pages) < 0)
return 0;
- /* if secondary process, just look at the number of hugepages,
- * otherwise look at number of free hugepages */
- if (internal_config.process_type == RTE_PROC_SECONDARY)
- nr_hp_file = "nr_hugepages";
- else
- nr_hp_file = "free_hugepages";
-
- memset(path, 0, sizeof(path));
-
snprintf(path, sizeof(path), "%s/%s/%s",
sys_dir_path, subdir, nr_hp_file);
-
if (eal_parse_sysfs_value(path, &num_pages) < 0)
return 0;
@@ -96,11 +86,18 @@ get_num_hugepages(const char *subdir)
RTE_LOG(WARNING, EAL, "No free hugepages reported in %s\n",
subdir);
- /* adjust num_pages in case of primary process */
- if (num_pages > 0 && internal_config.process_type == RTE_PROC_PRIMARY)
+ /* adjust num_pages */
+ if (num_pages >= resv_pages)
num_pages -= resv_pages;
+ else if (resv_pages)
+ num_pages = 0;
+
+ /* we want to return a uint32_t and more than this looks suspicious
+ * anyway ... */
+ if (num_pages > UINT32_MAX)
+ num_pages = UINT32_MAX;
- return (int32_t)num_pages;
+ return num_pages;
}
static uint64_t
@@ -192,15 +189,6 @@ get_hugepage_dir(uint64_t hugepage_sz)
return retval;
}
-static inline void
-swap_hpi(struct hugepage_info *a, struct hugepage_info *b)
-{
- char buf[sizeof(*a)];
- memcpy(buf, a, sizeof(buf));
- memcpy(a, b, sizeof(buf));
- memcpy(b, buf, sizeof(buf));
-}
-
/*
* Clear the hugepage directory of whatever hugepage files
* there are. Checks if the file is locked (i.e.
@@ -217,7 +205,7 @@ clear_hugedir(const char * hugedir)
/* open directory */
dir = opendir(hugedir);
if (!dir) {
- RTE_LOG(INFO, EAL, "Unable to open hugepage directory %s\n",
+ RTE_LOG(ERR, EAL, "Unable to open hugepage directory %s\n",
hugedir);
goto error;
}
@@ -225,7 +213,7 @@ clear_hugedir(const char * hugedir)
dirent = readdir(dir);
if (!dirent) {
- RTE_LOG(INFO, EAL, "Unable to read hugepage directory %s\n",
+ RTE_LOG(ERR, EAL, "Unable to read hugepage directory %s\n",
hugedir);
goto error;
}
@@ -265,12 +253,21 @@ error:
if (dir)
closedir(dir);
- RTE_LOG(INFO, EAL, "Error while clearing hugepage dir: %s\n",
+ RTE_LOG(ERR, EAL, "Error while clearing hugepage dir: %s\n",
strerror(errno));
return -1;
}
+static int
+compare_hpi(const void *a, const void *b)
+{
+ const struct hugepage_info *hpi_a = a;
+ const struct hugepage_info *hpi_b = b;
+
+ return hpi_b->hugepage_sz - hpi_a->hugepage_sz;
+}
+
/*
* when we initialize the hugepage info, everything goes
* to socket 0 by default. it will later get sorted by memory
@@ -282,76 +279,85 @@ eal_hugepage_info_init(void)
const char dirent_start_text[] = "hugepages-";
const size_t dirent_start_len = sizeof(dirent_start_text) - 1;
unsigned i, num_sizes = 0;
+ DIR *dir;
+ struct dirent *dirent;
- DIR *dir = opendir(sys_dir_path);
+ dir = opendir(sys_dir_path);
if (dir == NULL)
- rte_panic("Cannot open directory %s to read system hugepage info\n",
- sys_dir_path);
+ rte_panic("Cannot open directory %s to read system hugepage "
+ "info\n", sys_dir_path);
- struct dirent *dirent = readdir(dir);
- while(dirent != NULL){
- if (strncmp(dirent->d_name, dirent_start_text, dirent_start_len) == 0){
- struct hugepage_info *hpi = \
- &internal_config.hugepage_info[num_sizes];
- hpi->hugepage_sz = rte_str_to_size(&dirent->d_name[dirent_start_len]);
- hpi->hugedir = get_hugepage_dir(hpi->hugepage_sz);
-
- /* first, check if we have a mountpoint */
- if (hpi->hugedir == NULL){
- int32_t num_pages;
- if ((num_pages = get_num_hugepages(dirent->d_name)) > 0)
- RTE_LOG(INFO, EAL, "%u hugepages of size %llu reserved, "\
- "but no mounted hugetlbfs found for that size\n",
- (unsigned)num_pages,
- (unsigned long long)hpi->hugepage_sz);
- } else {
- /* try to obtain a writelock */
- hpi->lock_descriptor = open(hpi->hugedir, O_RDONLY);
-
- /* if blocking lock failed */
- if (flock(hpi->lock_descriptor, LOCK_EX) == -1) {
- RTE_LOG(CRIT, EAL, "Failed to lock hugepage directory!\n");
- closedir(dir);
- return -1;
- }
- /* clear out the hugepages dir from unused pages */
- if (clear_hugedir(hpi->hugedir) == -1) {
- closedir(dir);
- return -1;
- }
+ for (dirent = readdir(dir); dirent != NULL; dirent = readdir(dir)) {
+ struct hugepage_info *hpi;
- /* for now, put all pages into socket 0,
- * later they will be sorted */
- hpi->num_pages[0] = get_num_hugepages(dirent->d_name);
+ if (strncmp(dirent->d_name, dirent_start_text,
+ dirent_start_len) != 0)
+ continue;
+
+ if (num_sizes >= MAX_HUGEPAGE_SIZES)
+ break;
+
+ hpi = &internal_config.hugepage_info[num_sizes];
+ hpi->hugepage_sz =
+ rte_str_to_size(&dirent->d_name[dirent_start_len]);
+ hpi->hugedir = get_hugepage_dir(hpi->hugepage_sz);
+
+ /* first, check if we have a mountpoint */
+ if (hpi->hugedir == NULL) {
+ uint32_t num_pages;
+
+ num_pages = get_num_hugepages(dirent->d_name);
+ if (num_pages > 0)
+ RTE_LOG(NOTICE, EAL,
+ "%" PRIu32 " hugepages of size "
+ "%" PRIu64 " reserved, but no mounted "
+ "hugetlbfs found for that size\n",
+ num_pages, hpi->hugepage_sz);
+ continue;
+ }
+
+ /* try to obtain a writelock */
+ hpi->lock_descriptor = open(hpi->hugedir, O_RDONLY);
+
+ /* if blocking lock failed */
+ if (flock(hpi->lock_descriptor, LOCK_EX) == -1) {
+ RTE_LOG(CRIT, EAL,
+ "Failed to lock hugepage directory!\n");
+ break;
+ }
+ /* clear out the hugepages dir from unused pages */
+ if (clear_hugedir(hpi->hugedir) == -1)
+ break;
+
+ /* for now, put all pages into socket 0,
+ * later they will be sorted */
+ hpi->num_pages[0] = get_num_hugepages(dirent->d_name);
#ifndef RTE_ARCH_64
- /* for 32-bit systems, limit number of hugepages to 1GB per page size */
- hpi->num_pages[0] = RTE_MIN(hpi->num_pages[0],
- RTE_PGSIZE_1G / hpi->hugepage_sz);
+ /* for 32-bit systems, limit number of hugepages to
+ * 1GB per page size */
+ hpi->num_pages[0] = RTE_MIN(hpi->num_pages[0],
+ RTE_PGSIZE_1G / hpi->hugepage_sz);
#endif
- num_sizes++;
- }
- }
- dirent = readdir(dir);
+ num_sizes++;
}
closedir(dir);
+
+ /* something went wrong, and we broke from the for loop above */
+ if (dirent != NULL)
+ return -1;
+
internal_config.num_hugepage_sizes = num_sizes;
/* sort the page directory entries by size, largest to smallest */
- for (i = 0; i < num_sizes; i++){
- unsigned j;
- for (j = i+1; j < num_sizes; j++)
- if (internal_config.hugepage_info[j-1].hugepage_sz < \
- internal_config.hugepage_info[j].hugepage_sz)
- swap_hpi(&internal_config.hugepage_info[j-1],
- &internal_config.hugepage_info[j]);
- }
+ qsort(&internal_config.hugepage_info[0], num_sizes,
+ sizeof(internal_config.hugepage_info[0]), compare_hpi);
/* now we have all info, check we have at least one valid size */
for (i = 0; i < num_sizes; i++)
if (internal_config.hugepage_info[i].hugedir != NULL &&
- internal_config.hugepage_info[i].num_pages[0] > 0)
+ internal_config.hugepage_info[i].num_pages[0] > 0)
return 0;
/* no valid hugepage mounts available, return error */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_interrupts.c b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_interrupts.c
index dc2668a4..06b26a9e 100755..100644
--- a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_interrupts.c
+++ b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_interrupts.c
@@ -44,13 +44,14 @@
#include <sys/epoll.h>
#include <sys/signalfd.h>
#include <sys/ioctl.h>
+#include <sys/eventfd.h>
+#include <assert.h>
#include <rte_common.h>
#include <rte_interrupts.h>
#include <rte_memory.h>
#include <rte_memzone.h>
#include <rte_launch.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
@@ -67,8 +68,12 @@
#include "eal_private.h"
#include "eal_vfio.h"
+#include "eal_thread.h"
#define EAL_INTR_EPOLL_WAIT_FOREVER (-1)
+#define NB_OTHER_INTR 1
+
+static RTE_DEFINE_PER_LCORE(int, _epfd) = -1; /**< epoll fd per thread */
/**
* union for pipe fds.
@@ -127,6 +132,9 @@ static pthread_t intr_thread;
#ifdef VFIO_PRESENT
#define IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + sizeof(int))
+/* irq set buffer length for queue interrupts and LSC interrupt */
+#define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
+ sizeof(int) * (RTE_MAX_RXTX_INTR_VEC_ID + 1))
/* enable legacy (INTx) interrupts */
static int
@@ -218,7 +226,7 @@ vfio_disable_intx(struct rte_intr_handle *intr_handle) {
return 0;
}
-/* enable MSI-X interrupts */
+/* enable MSI interrupts */
static int
vfio_enable_msi(struct rte_intr_handle *intr_handle) {
int len, ret;
@@ -244,27 +252,10 @@ vfio_enable_msi(struct rte_intr_handle *intr_handle) {
intr_handle->fd);
return -1;
}
-
- /* manually trigger interrupt to enable it */
- memset(irq_set, 0, len);
- len = sizeof(struct vfio_irq_set);
- irq_set->argsz = len;
- irq_set->count = 1;
- irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
- irq_set->index = VFIO_PCI_MSI_IRQ_INDEX;
- irq_set->start = 0;
-
- ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
-
- if (ret) {
- RTE_LOG(ERR, EAL, "Error triggering MSI interrupts for fd %d\n",
- intr_handle->fd);
- return -1;
- }
return 0;
}
-/* disable MSI-X interrupts */
+/* disable MSI interrupts */
static int
vfio_disable_msi(struct rte_intr_handle *intr_handle) {
struct vfio_irq_set *irq_set;
@@ -293,7 +284,7 @@ vfio_disable_msi(struct rte_intr_handle *intr_handle) {
static int
vfio_enable_msix(struct rte_intr_handle *intr_handle) {
int len, ret;
- char irq_set_buf[IRQ_SET_BUF_LEN];
+ char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
struct vfio_irq_set *irq_set;
int *fd_ptr;
@@ -301,12 +292,20 @@ vfio_enable_msix(struct rte_intr_handle *intr_handle) {
irq_set = (struct vfio_irq_set *) irq_set_buf;
irq_set->argsz = len;
- irq_set->count = 1;
+ if (!intr_handle->max_intr)
+ intr_handle->max_intr = 1;
+ else if (intr_handle->max_intr > RTE_MAX_RXTX_INTR_VEC_ID)
+ intr_handle->max_intr = RTE_MAX_RXTX_INTR_VEC_ID + 1;
+
+ irq_set->count = intr_handle->max_intr;
irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
irq_set->start = 0;
fd_ptr = (int *) &irq_set->data;
- *fd_ptr = intr_handle->fd;
+ /* INTR vector offset 0 reserve for non-efds mapping */
+ fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = intr_handle->fd;
+ memcpy(&fd_ptr[RTE_INTR_VEC_RXTX_OFFSET], intr_handle->efds,
+ sizeof(*intr_handle->efds) * intr_handle->nb_efd);
ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
@@ -316,22 +315,6 @@ vfio_enable_msix(struct rte_intr_handle *intr_handle) {
return -1;
}
- /* manually trigger interrupt to enable it */
- memset(irq_set, 0, len);
- len = sizeof(struct vfio_irq_set);
- irq_set->argsz = len;
- irq_set->count = 1;
- irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
- irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
- irq_set->start = 0;
-
- ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
-
- if (ret) {
- RTE_LOG(ERR, EAL, "Error triggering MSI-X interrupts for fd %d\n",
- intr_handle->fd);
- return -1;
- }
return 0;
}
@@ -339,7 +322,7 @@ vfio_enable_msix(struct rte_intr_handle *intr_handle) {
static int
vfio_disable_msix(struct rte_intr_handle *intr_handle) {
struct vfio_irq_set *irq_set;
- char irq_set_buf[IRQ_SET_BUF_LEN];
+ char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
int len, ret;
len = sizeof(struct vfio_irq_set);
@@ -361,6 +344,82 @@ vfio_disable_msix(struct rte_intr_handle *intr_handle) {
}
#endif
+static int
+uio_intx_intr_disable(struct rte_intr_handle *intr_handle)
+{
+ unsigned char command_high;
+
+ /* use UIO config file descriptor for uio_pci_generic */
+ if (pread(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
+ RTE_LOG(ERR, EAL,
+ "Error reading interrupts status for fd %d\n",
+ intr_handle->uio_cfg_fd);
+ return -1;
+ }
+ /* disable interrupts */
+ command_high |= 0x4;
+ if (pwrite(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
+ RTE_LOG(ERR, EAL,
+ "Error disabling interrupts for fd %d\n",
+ intr_handle->uio_cfg_fd);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+uio_intx_intr_enable(struct rte_intr_handle *intr_handle)
+{
+ unsigned char command_high;
+
+ /* use UIO config file descriptor for uio_pci_generic */
+ if (pread(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
+ RTE_LOG(ERR, EAL,
+ "Error reading interrupts status for fd %d\n",
+ intr_handle->uio_cfg_fd);
+ return -1;
+ }
+ /* enable interrupts */
+ command_high &= ~0x4;
+ if (pwrite(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
+ RTE_LOG(ERR, EAL,
+ "Error enabling interrupts for fd %d\n",
+ intr_handle->uio_cfg_fd);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+uio_intr_disable(struct rte_intr_handle *intr_handle)
+{
+ const int value = 0;
+
+ if (write(intr_handle->fd, &value, sizeof(value)) < 0) {
+ RTE_LOG(ERR, EAL,
+ "Error disabling interrupts for fd %d (%s)\n",
+ intr_handle->fd, strerror(errno));
+ return -1;
+ }
+ return 0;
+}
+
+static int
+uio_intr_enable(struct rte_intr_handle *intr_handle)
+{
+ const int value = 1;
+
+ if (write(intr_handle->fd, &value, sizeof(value)) < 0) {
+ RTE_LOG(ERR, EAL,
+ "Error enabling interrupts for fd %d (%s)\n",
+ intr_handle->fd, strerror(errno));
+ return -1;
+ }
+ return 0;
+}
+
int
rte_intr_callback_register(struct rte_intr_handle *intr_handle,
rte_intr_callback_fn cb, void *cb_arg)
@@ -430,7 +489,7 @@ rte_intr_callback_register(struct rte_intr_handle *intr_handle,
if (write(intr_pipe.writefd, "1", 1) < 0)
return -EPIPE;
- return (ret);
+ return ret;
}
int
@@ -494,26 +553,24 @@ rte_intr_callback_unregister(struct rte_intr_handle *intr_handle,
ret = -EPIPE;
}
- return (ret);
+ return ret;
}
int
rte_intr_enable(struct rte_intr_handle *intr_handle)
{
- const int value = 1;
-
- if (!intr_handle || intr_handle->fd < 0)
+ if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
return -1;
switch (intr_handle->type){
/* write to the uio fd to enable the interrupt */
case RTE_INTR_HANDLE_UIO:
- if (write(intr_handle->fd, &value, sizeof(value)) < 0) {
- RTE_LOG(ERR, EAL,
- "Error enabling interrupts for fd %d\n",
- intr_handle->fd);
+ if (uio_intr_enable(intr_handle))
+ return -1;
+ break;
+ case RTE_INTR_HANDLE_UIO_INTX:
+ if (uio_intx_intr_enable(intr_handle))
return -1;
- }
break;
/* not used at this moment */
case RTE_INTR_HANDLE_ALARM:
@@ -546,20 +603,18 @@ rte_intr_enable(struct rte_intr_handle *intr_handle)
int
rte_intr_disable(struct rte_intr_handle *intr_handle)
{
- const int value = 0;
-
- if (!intr_handle || intr_handle->fd < 0)
+ if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
return -1;
switch (intr_handle->type){
/* write to the uio fd to disable the interrupt */
case RTE_INTR_HANDLE_UIO:
- if (write(intr_handle->fd, &value, sizeof(value)) < 0){
- RTE_LOG(ERR, EAL,
- "Error disabling interrupts for fd %d\n",
- intr_handle->fd);
+ if (uio_intr_disable(intr_handle))
+ return -1;
+ break;
+ case RTE_INTR_HANDLE_UIO_INTX:
+ if (uio_intx_intr_disable(intr_handle))
return -1;
- }
break;
/* not used at this moment */
case RTE_INTR_HANDLE_ALARM:
@@ -627,6 +682,7 @@ eal_intr_process_interrupts(struct epoll_event *events, int nfds)
/* set the length to be read dor different handle type */
switch (src->intr_handle.type) {
case RTE_INTR_HANDLE_UIO:
+ case RTE_INTR_HANDLE_UIO_INTX:
bytes_read = sizeof(buf.uio_intr_count);
break;
case RTE_INTR_HANDLE_ALARM:
@@ -639,24 +695,30 @@ eal_intr_process_interrupts(struct epoll_event *events, int nfds)
bytes_read = sizeof(buf.vfio_intr_count);
break;
#endif
+ case RTE_INTR_HANDLE_EXT:
default:
bytes_read = 1;
break;
}
- /**
- * read out to clear the ready-to-be-read flag
- * for epoll_wait.
- */
- bytes_read = read(events[n].data.fd, &buf, bytes_read);
-
- if (bytes_read < 0)
- RTE_LOG(ERR, EAL, "Error reading from file "
- "descriptor %d: %s\n", events[n].data.fd,
- strerror(errno));
- else if (bytes_read == 0)
- RTE_LOG(ERR, EAL, "Read nothing from file "
- "descriptor %d\n", events[n].data.fd);
+ if (src->intr_handle.type != RTE_INTR_HANDLE_EXT) {
+ /**
+ * read out to clear the ready-to-be-read flag
+ * for epoll_wait.
+ */
+ bytes_read = read(events[n].data.fd, &buf, bytes_read);
+ if (bytes_read < 0) {
+ if (errno == EINTR || errno == EWOULDBLOCK)
+ continue;
+
+ RTE_LOG(ERR, EAL, "Error reading from file "
+ "descriptor %d: %s\n",
+ events[n].data.fd,
+ strerror(errno));
+ } else if (bytes_read == 0)
+ RTE_LOG(ERR, EAL, "Read nothing from file "
+ "descriptor %d\n", events[n].data.fd);
+ }
/* grab a lock, again to call callbacks and update status. */
rte_spinlock_lock(&intr_lock);
@@ -802,7 +864,8 @@ eal_intr_thread_main(__rte_unused void *arg)
int
rte_eal_intr_init(void)
{
- int ret = 0;
+ int ret = 0, ret_1 = 0;
+ char thread_name[RTE_MAX_THREAD_NAME_LEN];
/* init the global interrupt source head */
TAILQ_INIT(&intr_sources);
@@ -817,10 +880,345 @@ rte_eal_intr_init(void)
/* create the host thread to wait/handle the interrupt */
ret = pthread_create(&intr_thread, NULL,
eal_intr_thread_main, NULL);
- if (ret != 0)
+ if (ret != 0) {
RTE_LOG(ERR, EAL,
"Failed to create thread for interrupt handling\n");
+ } else {
+ /* Set thread_name for aid in debugging. */
+ snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN,
+ "eal-intr-thread");
+ ret_1 = rte_thread_setname(intr_thread, thread_name);
+ if (ret_1 != 0)
+ RTE_LOG(ERR, EAL,
+ "Failed to set thread name for interrupt handling\n");
+ }
return -ret;
}
+static void
+eal_intr_proc_rxtx_intr(int fd, const struct rte_intr_handle *intr_handle)
+{
+ union rte_intr_read_buffer buf;
+ int bytes_read = 1;
+ int nbytes;
+
+ switch (intr_handle->type) {
+ case RTE_INTR_HANDLE_UIO:
+ case RTE_INTR_HANDLE_UIO_INTX:
+ bytes_read = sizeof(buf.uio_intr_count);
+ break;
+#ifdef VFIO_PRESENT
+ case RTE_INTR_HANDLE_VFIO_MSIX:
+ case RTE_INTR_HANDLE_VFIO_MSI:
+ case RTE_INTR_HANDLE_VFIO_LEGACY:
+ bytes_read = sizeof(buf.vfio_intr_count);
+ break;
+#endif
+ default:
+ bytes_read = 1;
+ RTE_LOG(INFO, EAL, "unexpected intr type\n");
+ break;
+ }
+
+ /**
+ * read out to clear the ready-to-be-read flag
+ * for epoll_wait.
+ */
+ do {
+ nbytes = read(fd, &buf, bytes_read);
+ if (nbytes < 0) {
+ if (errno == EINTR || errno == EWOULDBLOCK ||
+ errno == EAGAIN)
+ continue;
+ RTE_LOG(ERR, EAL,
+ "Error reading from fd %d: %s\n",
+ fd, strerror(errno));
+ } else if (nbytes == 0)
+ RTE_LOG(ERR, EAL, "Read nothing from fd %d\n", fd);
+ return;
+ } while (1);
+}
+
+static int
+eal_epoll_process_event(struct epoll_event *evs, unsigned int n,
+ struct rte_epoll_event *events)
+{
+ unsigned int i, count = 0;
+ struct rte_epoll_event *rev;
+
+ for (i = 0; i < n; i++) {
+ rev = evs[i].data.ptr;
+ if (!rev || !rte_atomic32_cmpset(&rev->status, RTE_EPOLL_VALID,
+ RTE_EPOLL_EXEC))
+ continue;
+
+ events[count].status = RTE_EPOLL_VALID;
+ events[count].fd = rev->fd;
+ events[count].epfd = rev->epfd;
+ events[count].epdata.event = rev->epdata.event;
+ events[count].epdata.data = rev->epdata.data;
+ if (rev->epdata.cb_fun)
+ rev->epdata.cb_fun(rev->fd,
+ rev->epdata.cb_arg);
+
+ rte_compiler_barrier();
+ rev->status = RTE_EPOLL_VALID;
+ count++;
+ }
+ return count;
+}
+
+static inline int
+eal_init_tls_epfd(void)
+{
+ int pfd = epoll_create(255);
+
+ if (pfd < 0) {
+ RTE_LOG(ERR, EAL,
+ "Cannot create epoll instance\n");
+ return -1;
+ }
+ return pfd;
+}
+
+int
+rte_intr_tls_epfd(void)
+{
+ if (RTE_PER_LCORE(_epfd) == -1)
+ RTE_PER_LCORE(_epfd) = eal_init_tls_epfd();
+
+ return RTE_PER_LCORE(_epfd);
+}
+
+int
+rte_epoll_wait(int epfd, struct rte_epoll_event *events,
+ int maxevents, int timeout)
+{
+ struct epoll_event evs[maxevents];
+ int rc;
+
+ if (!events) {
+ RTE_LOG(ERR, EAL, "rte_epoll_event can't be NULL\n");
+ return -1;
+ }
+
+ /* using per thread epoll fd */
+ if (epfd == RTE_EPOLL_PER_THREAD)
+ epfd = rte_intr_tls_epfd();
+
+ while (1) {
+ rc = epoll_wait(epfd, evs, maxevents, timeout);
+ if (likely(rc > 0)) {
+ /* epoll_wait has at least one fd ready to read */
+ rc = eal_epoll_process_event(evs, rc, events);
+ break;
+ } else if (rc < 0) {
+ if (errno == EINTR)
+ continue;
+ /* epoll_wait fail */
+ RTE_LOG(ERR, EAL, "epoll_wait returns with fail %s\n",
+ strerror(errno));
+ rc = -1;
+ break;
+ } else {
+ /* rc == 0, epoll_wait timed out */
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static inline void
+eal_epoll_data_safe_free(struct rte_epoll_event *ev)
+{
+ while (!rte_atomic32_cmpset(&ev->status, RTE_EPOLL_VALID,
+ RTE_EPOLL_INVALID))
+ while (ev->status != RTE_EPOLL_VALID)
+ rte_pause();
+ memset(&ev->epdata, 0, sizeof(ev->epdata));
+ ev->fd = -1;
+ ev->epfd = -1;
+}
+
+int
+rte_epoll_ctl(int epfd, int op, int fd,
+ struct rte_epoll_event *event)
+{
+ struct epoll_event ev;
+
+ if (!event) {
+ RTE_LOG(ERR, EAL, "rte_epoll_event can't be NULL\n");
+ return -1;
+ }
+
+ /* using per thread epoll fd */
+ if (epfd == RTE_EPOLL_PER_THREAD)
+ epfd = rte_intr_tls_epfd();
+
+ if (op == EPOLL_CTL_ADD) {
+ event->status = RTE_EPOLL_VALID;
+ event->fd = fd; /* ignore fd in event */
+ event->epfd = epfd;
+ ev.data.ptr = (void *)event;
+ }
+
+ ev.events = event->epdata.event;
+ if (epoll_ctl(epfd, op, fd, &ev) < 0) {
+ RTE_LOG(ERR, EAL, "Error op %d fd %d epoll_ctl, %s\n",
+ op, fd, strerror(errno));
+ if (op == EPOLL_CTL_ADD)
+ /* rollback status when CTL_ADD fail */
+ event->status = RTE_EPOLL_INVALID;
+ return -1;
+ }
+
+ if (op == EPOLL_CTL_DEL && event->status != RTE_EPOLL_INVALID)
+ eal_epoll_data_safe_free(event);
+
+ return 0;
+}
+
+int
+rte_intr_rx_ctl(struct rte_intr_handle *intr_handle, int epfd,
+ int op, unsigned int vec, void *data)
+{
+ struct rte_epoll_event *rev;
+ struct rte_epoll_data *epdata;
+ int epfd_op;
+ unsigned int efd_idx;
+ int rc = 0;
+
+ efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
+ (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
+
+ if (!intr_handle || intr_handle->nb_efd == 0 ||
+ efd_idx >= intr_handle->nb_efd) {
+ RTE_LOG(ERR, EAL, "Wrong intr vector number.\n");
+ return -EPERM;
+ }
+
+ switch (op) {
+ case RTE_INTR_EVENT_ADD:
+ epfd_op = EPOLL_CTL_ADD;
+ rev = &intr_handle->elist[efd_idx];
+ if (rev->status != RTE_EPOLL_INVALID) {
+ RTE_LOG(INFO, EAL, "Event already been added.\n");
+ return -EEXIST;
+ }
+
+ /* attach to intr vector fd */
+ epdata = &rev->epdata;
+ epdata->event = EPOLLIN | EPOLLPRI | EPOLLET;
+ epdata->data = data;
+ epdata->cb_fun = (rte_intr_event_cb_t)eal_intr_proc_rxtx_intr;
+ epdata->cb_arg = (void *)intr_handle;
+ rc = rte_epoll_ctl(epfd, epfd_op,
+ intr_handle->efds[efd_idx], rev);
+ if (!rc)
+ RTE_LOG(DEBUG, EAL,
+ "efd %d associated with vec %d added on epfd %d"
+ "\n", rev->fd, vec, epfd);
+ else
+ rc = -EPERM;
+ break;
+ case RTE_INTR_EVENT_DEL:
+ epfd_op = EPOLL_CTL_DEL;
+ rev = &intr_handle->elist[efd_idx];
+ if (rev->status == RTE_EPOLL_INVALID) {
+ RTE_LOG(INFO, EAL, "Event does not exist.\n");
+ return -EPERM;
+ }
+
+ rc = rte_epoll_ctl(rev->epfd, epfd_op, rev->fd, rev);
+ if (rc)
+ rc = -EPERM;
+ break;
+ default:
+ RTE_LOG(ERR, EAL, "event op type mismatch\n");
+ rc = -EPERM;
+ }
+
+ return rc;
+}
+
+int
+rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
+{
+ uint32_t i;
+ int fd;
+ uint32_t n = RTE_MIN(nb_efd, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
+
+ assert(nb_efd != 0);
+
+ if (intr_handle->type == RTE_INTR_HANDLE_VFIO_MSIX) {
+ for (i = 0; i < n; i++) {
+ fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL,
+ "can't setup eventfd, error %i (%s)\n",
+ errno, strerror(errno));
+ return -1;
+ }
+ intr_handle->efds[i] = fd;
+ }
+ intr_handle->nb_efd = n;
+ intr_handle->max_intr = NB_OTHER_INTR + n;
+ } else {
+ intr_handle->efds[0] = intr_handle->fd;
+ intr_handle->nb_efd = RTE_MIN(nb_efd, 1U);
+ intr_handle->max_intr = NB_OTHER_INTR;
+ }
+
+ return 0;
+}
+
+void
+rte_intr_efd_disable(struct rte_intr_handle *intr_handle)
+{
+ uint32_t i;
+ struct rte_epoll_event *rev;
+
+ for (i = 0; i < intr_handle->nb_efd; i++) {
+ rev = &intr_handle->elist[i];
+ if (rev->status == RTE_EPOLL_INVALID)
+ continue;
+ if (rte_epoll_ctl(rev->epfd, EPOLL_CTL_DEL, rev->fd, rev)) {
+ /* force free if the entry valid */
+ eal_epoll_data_safe_free(rev);
+ rev->status = RTE_EPOLL_INVALID;
+ }
+ }
+
+ if (intr_handle->max_intr > intr_handle->nb_efd) {
+ for (i = 0; i < intr_handle->nb_efd; i++)
+ close(intr_handle->efds[i]);
+ }
+ intr_handle->nb_efd = 0;
+ intr_handle->max_intr = 0;
+}
+
+int
+rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
+{
+ return !(!intr_handle->nb_efd);
+}
+
+int
+rte_intr_allow_others(struct rte_intr_handle *intr_handle)
+{
+ if (!rte_intr_dp_is_en(intr_handle))
+ return 1;
+ else
+ return !!(intr_handle->max_intr - intr_handle->nb_efd);
+}
+
+int
+rte_intr_cap_multiple(struct rte_intr_handle *intr_handle)
+{
+ if (intr_handle->type == RTE_INTR_HANDLE_VFIO_MSIX)
+ return 1;
+
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_ivshmem.c b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_ivshmem.c
index 413a9bae..589019b1 100755..100644
--- a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_ivshmem.c
+++ b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_ivshmem.c
@@ -53,7 +53,6 @@
#include <rte_malloc.h>
#include <rte_common.h>
#include <rte_ivshmem.h>
-#include <rte_tailq_elem.h>
#include "eal_internal_cfg.h"
#include "eal_private.h"
@@ -726,15 +725,6 @@ map_all_segments(void)
* expect memsegs to be empty */
memcpy(&mcfg->memseg[i], &ms,
sizeof(struct rte_memseg));
- memcpy(&mcfg->free_memseg[i], &ms,
- sizeof(struct rte_memseg));
-
-
- /* adjust the free_memseg so that there's no free space left */
- mcfg->free_memseg[i].ioremap_addr += mcfg->free_memseg[i].len;
- mcfg->free_memseg[i].phys_addr += mcfg->free_memseg[i].len;
- mcfg->free_memseg[i].addr_64 += mcfg->free_memseg[i].len;
- mcfg->free_memseg[i].len = 0;
close(fd);
@@ -764,8 +754,8 @@ rte_eal_ivshmem_obj_init(void)
return 0;
/* check that we have an initialised ring tail queue */
- if ((ring_list =
- RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_RING, rte_ring_list)) == NULL) {
+ ring_list = RTE_TAILQ_LOOKUP(RTE_TAILQ_RING_NAME, rte_ring_list);
+ if (ring_list == NULL) {
RTE_LOG(ERR, EAL, "No rte_ring tailq found!\n");
return -1;
}
@@ -778,12 +768,12 @@ rte_eal_ivshmem_obj_init(void)
seg = &ivshmem_config->segment[i];
/* add memzone */
- if (mcfg->memzone_idx == RTE_MAX_MEMZONE) {
+ if (mcfg->memzone_cnt == RTE_MAX_MEMZONE) {
RTE_LOG(ERR, EAL, "No more memory zones available!\n");
return -1;
}
- idx = mcfg->memzone_idx;
+ idx = mcfg->memzone_cnt;
RTE_LOG(DEBUG, EAL, "Found memzone: '%s' at %p (len 0x%" PRIx64 ")\n",
seg->entry.mz.name, seg->entry.mz.addr, seg->entry.mz.len);
@@ -806,13 +796,13 @@ rte_eal_ivshmem_obj_init(void)
}
}
- mcfg->memzone_idx++;
+ mcfg->memzone_cnt++;
}
rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
/* find rings */
- for (i = 0; i < mcfg->memzone_idx; i++) {
+ for (i = 0; i < mcfg->memzone_cnt; i++) {
mz = &mcfg->memzone[i];
/* check if memzone has a ring prefix */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_debug.c b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_lcore.c
index 44fc4f33..de5b4260 100755..100644
--- a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_debug.c
+++ b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_lcore.c
@@ -31,83 +31,80 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <execinfo.h>
-#include <stdarg.h>
-#include <signal.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <stdint.h>
+#include <unistd.h>
+#include <limits.h>
+#include <string.h>
+#include <dirent.h>
#include <rte_log.h>
-#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_lcore.h>
#include <rte_common.h>
+#include <rte_string_fns.h>
+#include <rte_debug.h>
-#define BACKTRACE_SIZE 256
-
-/* dump the stack of the calling core */
-void rte_dump_stack(void)
-{
- void *func[BACKTRACE_SIZE];
- char **symb = NULL;
- int size;
+#include "eal_private.h"
+#include "eal_filesystem.h"
+#include "eal_thread.h"
- size = backtrace(func, BACKTRACE_SIZE);
- symb = backtrace_symbols(func, size);
- while (size > 0) {
- rte_log(RTE_LOG_ERR, RTE_LOGTYPE_EAL,
- "%d: [%s]\n", size, symb[size - 1]);
- size --;
- }
-}
+#define SYS_CPU_DIR "/sys/devices/system/cpu/cpu%u"
+#define CORE_ID_FILE "topology/core_id"
+#define NUMA_NODE_PATH "/sys/devices/system/node"
-/* not implemented in this environment */
-void rte_dump_registers(void)
+/* Check if a cpu is present by the presence of the cpu information for it */
+int
+eal_cpu_detected(unsigned lcore_id)
{
- return;
-}
+ char path[PATH_MAX];
+ int len = snprintf(path, sizeof(path), SYS_CPU_DIR
+ "/"CORE_ID_FILE, lcore_id);
+ if (len <= 0 || (unsigned)len >= sizeof(path))
+ return 0;
+ if (access(path, F_OK) != 0)
+ return 0;
-/* call abort(), it will generate a coredump if enabled */
-void __rte_panic(const char *funcname, const char *format, ...)
-{
- va_list ap;
-
- /* disable history */
- rte_log_set_history(0);
-
- rte_log(RTE_LOG_CRIT, RTE_LOGTYPE_EAL, "PANIC in %s():\n", funcname);
- va_start(ap, format);
- rte_vlog(RTE_LOG_CRIT, RTE_LOGTYPE_EAL, format, ap);
- va_end(ap);
- rte_dump_stack();
- rte_dump_registers();
- abort();
+ return 1;
}
/*
- * Like rte_panic this terminates the application. However, no traceback is
- * provided and no core-dump is generated.
+ * Get CPU socket id (NUMA node) for a logical core.
+ *
+ * This searches each nodeX directories in /sys for the symlink for the given
+ * lcore_id and returns the numa node where the lcore is found. If lcore is not
+ * found on any numa node, returns zero.
*/
-void
-rte_exit(int exit_code, const char *format, ...)
+unsigned
+eal_cpu_socket_id(unsigned lcore_id)
{
- va_list ap;
+ unsigned socket;
- /* disable history */
- rte_log_set_history(0);
+ for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) {
+ char path[PATH_MAX];
- if (exit_code != 0)
- RTE_LOG(CRIT, EAL, "Error - exiting with code: %d\n"
- " Cause: ", exit_code);
+ snprintf(path, sizeof(path), "%s/node%u/cpu%u", NUMA_NODE_PATH,
+ socket, lcore_id);
+ if (access(path, F_OK) == 0)
+ return socket;
+ }
+ return 0;
+}
+
+/* Get the cpu core id value from the /sys/.../cpuX core_id value */
+unsigned
+eal_cpu_core_id(unsigned lcore_id)
+{
+ char path[PATH_MAX];
+ unsigned long id;
- va_start(ap, format);
- rte_vlog(RTE_LOG_CRIT, RTE_LOGTYPE_EAL, format, ap);
- va_end(ap);
+ int len = snprintf(path, sizeof(path), SYS_CPU_DIR "/%s", lcore_id, CORE_ID_FILE);
+ if (len <= 0 || (unsigned)len >= sizeof(path))
+ goto err;
+ if (eal_parse_sysfs_value(path, &id) != 0)
+ goto err;
+ return (unsigned)id;
-#ifndef RTE_EAL_ALWAYS_PANIC_ON_ERROR
- exit(exit_code);
-#else
- rte_dump_stack();
- rte_dump_registers();
- abort();
-#endif
+err:
+ RTE_LOG(ERR, EAL, "Error reading core id value from %s "
+ "for lcore %u - assuming core 0\n", SYS_CPU_DIR, lcore_id);
+ return 0;
}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_log.c b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_log.c
index 94dedfb3..0b133c3e 100755..100644
--- a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_log.c
+++ b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_log.c
@@ -40,7 +40,6 @@
#include <rte_memory.h>
#include <rte_memzone.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_per_lcore.h>
@@ -83,33 +82,8 @@ console_log_write(__attribute__((unused)) void *c, const char *buf, size_t size)
return ret;
}
-static ssize_t
-console_log_read(__attribute__((unused)) void *c,
- __attribute__((unused)) char *buf,
- __attribute__((unused)) size_t size)
-{
- return 0;
-}
-
-static int
-console_log_seek(__attribute__((unused)) void *c,
- __attribute__((unused)) off64_t *offset,
- __attribute__((unused)) int whence)
-{
- return -1;
-}
-
-static int
-console_log_close(__attribute__((unused)) void *c)
-{
- return 0;
-}
-
static cookie_io_functions_t console_log_func = {
- .read = console_log_read,
.write = console_log_write,
- .seek = console_log_seek,
- .close = console_log_close
};
/*
@@ -150,33 +124,8 @@ early_log_write(__attribute__((unused)) void *c, const char *buf, size_t size)
return ret;
}
-static ssize_t
-early_log_read(__attribute__((unused)) void *c,
- __attribute__((unused)) char *buf,
- __attribute__((unused)) size_t size)
-{
- return 0;
-}
-
-static int
-early_log_seek(__attribute__((unused)) void *c,
- __attribute__((unused)) off64_t *offset,
- __attribute__((unused)) int whence)
-{
- return -1;
-}
-
-static int
-early_log_close(__attribute__((unused)) void *c)
-{
- return 0;
-}
-
static cookie_io_functions_t early_log_func = {
- .read = early_log_read,
.write = early_log_write,
- .seek = early_log_seek,
- .close = early_log_close
};
static FILE *early_log_stream;
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_memory.c b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_memory.c
index bae25079..846fd31f 100755..100644
--- a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_memory.c
+++ b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -85,7 +85,6 @@
#include <rte_memory.h>
#include <rte_memzone.h>
#include <rte_launch.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
#include <rte_per_lcore.h>
@@ -98,6 +97,13 @@
#include "eal_filesystem.h"
#include "eal_hugepages.h"
+#ifdef RTE_LIBRTE_XEN_DOM0
+int rte_xen_dom0_supported(void)
+{
+ return internal_config.xen_dom0_support;
+}
+#endif
+
/**
* @file
* Huge page mapping under linux
@@ -112,8 +118,28 @@
static uint64_t baseaddr_offset;
+static unsigned proc_pagemap_readable;
+
#define RANDOMIZE_VA_SPACE_FILE "/proc/sys/kernel/randomize_va_space"
+static void
+test_proc_pagemap_readable(void)
+{
+ int fd = open("/proc/self/pagemap", O_RDONLY);
+
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL,
+ "Cannot open /proc/self/pagemap: %s. "
+ "virt2phys address translation will not work\n",
+ strerror(errno));
+ return;
+ }
+
+ /* Is readable */
+ close(fd);
+ proc_pagemap_readable = 1;
+}
+
/* Lock page in physical memory and prevent from swapping. */
int
rte_mem_lock_page(const void *virt)
@@ -136,6 +162,10 @@ rte_mem_virt2phy(const void *virtaddr)
int page_size;
off_t offset;
+ /* Cannot parse /proc/self/pagemap, no need to log errors everywhere */
+ if (!proc_pagemap_readable)
+ return RTE_BAD_PHYS_ADDR;
+
/* standard page size */
page_size = getpagesize();
@@ -240,7 +270,7 @@ get_virtual_area(size_t *size, size_t hugepage_sz)
}
else addr = NULL;
- RTE_LOG(INFO, EAL, "Ask a virtual area of 0x%zx bytes\n", *size);
+ RTE_LOG(DEBUG, EAL, "Ask a virtual area of 0x%zx bytes\n", *size);
fd = open("/dev/zero", O_RDONLY);
if (fd < 0){
@@ -256,7 +286,8 @@ get_virtual_area(size_t *size, size_t hugepage_sz)
if (addr == MAP_FAILED) {
close(fd);
- RTE_LOG(INFO, EAL, "Cannot get a virtual area\n");
+ RTE_LOG(ERR, EAL, "Cannot get a virtual area: %s\n",
+ strerror(errno));
return NULL;
}
@@ -269,7 +300,7 @@ get_virtual_area(size_t *size, size_t hugepage_sz)
aligned_addr &= (~(hugepage_sz - 1));
addr = (void *)(aligned_addr);
- RTE_LOG(INFO, EAL, "Virtual area found at %p (size = 0x%zx)\n",
+ RTE_LOG(DEBUG, EAL, "Virtual area found at %p (size = 0x%zx)\n",
addr, *size);
/* increment offset */
@@ -605,13 +636,13 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
f = fopen("/proc/self/numa_maps", "r");
if (f == NULL) {
- RTE_LOG(INFO, EAL, "cannot open /proc/self/numa_maps,"
+ RTE_LOG(NOTICE, EAL, "cannot open /proc/self/numa_maps,"
" consider that all memory is in socket_id 0\n");
return 0;
}
snprintf(hugedir_str, sizeof(hugedir_str),
- "%s/", hpi->hugedir);
+ "%s/%s", hpi->hugedir, internal_config.hugefile_prefix);
/* parse numa map */
while (fgets(buf, sizeof(buf), f) != NULL) {
@@ -762,6 +793,30 @@ copy_hugepages_to_shared_mem(struct hugepage_file * dst, int dest_size,
return 0;
}
+static int
+unlink_hugepage_files(struct hugepage_file *hugepg_tbl,
+ unsigned num_hp_info)
+{
+ unsigned socket, size;
+ int page, nrpages = 0;
+
+ /* get total number of hugepages */
+ for (size = 0; size < num_hp_info; size++)
+ for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++)
+ nrpages +=
+ internal_config.hugepage_info[size].num_pages[socket];
+
+ for (page = 0; page < nrpages; page++) {
+ struct hugepage_file *hp = &hugepg_tbl[page];
+
+ if (hp->final_va != NULL && unlink(hp->filepath)) {
+ RTE_LOG(WARNING, EAL, "%s(): Removing %s failed: %s\n",
+ __func__, hp->filepath, strerror(errno));
+ }
+ }
+ return 0;
+}
+
/*
* unmaps hugepages that are not going to be used. since we originally allocate
* ALL hugepages (not just those we need), additional unmapping needs to be done.
@@ -881,7 +936,7 @@ get_socket_mem_size(int socket)
size += hpi->hugepage_sz * hpi->num_pages[socket];
}
- return (size);
+ return size;
}
/*
@@ -1002,7 +1057,7 @@ calc_num_pages_per_socket(uint64_t * memory,
0x100000);
available = requested -
((unsigned) (memory[socket] / 0x100000));
- RTE_LOG(INFO, EAL, "Not enough memory available on socket %u! "
+ RTE_LOG(ERR, EAL, "Not enough memory available on socket %u! "
"Requested: %uMB, available: %uMB\n", socket,
requested, available);
return -1;
@@ -1013,7 +1068,7 @@ calc_num_pages_per_socket(uint64_t * memory,
if (total_mem > 0) {
requested = (unsigned) (internal_config.memory / 0x100000);
available = requested - (unsigned) (total_mem / 0x100000);
- RTE_LOG(INFO, EAL, "Not enough memory available! Requested: %uMB,"
+ RTE_LOG(ERR, EAL, "Not enough memory available! Requested: %uMB,"
" available: %uMB\n", requested, available);
return -1;
}
@@ -1031,7 +1086,7 @@ calc_num_pages_per_socket(uint64_t * memory,
* 6. unmap the first mapping
* 7. fill memsegs in configuration with contiguous zones
*/
-static int
+int
rte_eal_hugepage_init(void)
{
struct rte_mem_config *mcfg;
@@ -1048,6 +1103,8 @@ rte_eal_hugepage_init(void)
int new_pages_count[MAX_HUGEPAGE_SIZES];
#endif
+ test_proc_pagemap_readable();
+
memset(used_hp, 0, sizeof(used_hp));
/* get pointer to global configuration */
@@ -1064,8 +1121,9 @@ rte_eal_hugepage_init(void)
}
mcfg->memseg[0].phys_addr = (phys_addr_t)(uintptr_t)addr;
mcfg->memseg[0].addr = addr;
+ mcfg->memseg[0].hugepage_sz = RTE_PGSIZE_4K;
mcfg->memseg[0].len = internal_config.memory;
- mcfg->memseg[0].socket_id = SOCKET_ID_ANY;
+ mcfg->memseg[0].socket_id = 0;
return 0;
}
@@ -1080,7 +1138,6 @@ rte_eal_hugepage_init(void)
#endif
}
-
/* calculate total number of hugepages available. at this point we haven't
* yet started sorting them so they all are on socket 0 */
for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
@@ -1189,7 +1246,9 @@ rte_eal_hugepage_init(void)
int socket = tmp_hp[i].socket_id;
/* find a hugepage info with right size and increment num_pages */
- for (j = 0; j < (int) internal_config.num_hugepage_sizes; j++) {
+ const int nb_hpsizes = RTE_MIN(MAX_HUGEPAGE_SIZES,
+ (int)internal_config.num_hugepage_sizes);
+ for (j = 0; j < nb_hpsizes; j++) {
if (tmp_hp[i].size ==
internal_config.hugepage_info[j].hugepage_sz) {
#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
@@ -1219,13 +1278,13 @@ rte_eal_hugepage_init(void)
for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
if (used_hp[i].num_pages[j] > 0) {
- RTE_LOG(INFO, EAL,
- "Requesting %u pages of size %uMB"
- " from socket %i\n",
- used_hp[i].num_pages[j],
- (unsigned)
- (used_hp[i].hugepage_sz / 0x100000),
- j);
+ RTE_LOG(DEBUG, EAL,
+ "Requesting %u pages of size %uMB"
+ " from socket %i\n",
+ used_hp[i].num_pages[j],
+ (unsigned)
+ (used_hp[i].hugepage_sz / 0x100000),
+ j);
}
}
}
@@ -1261,6 +1320,13 @@ rte_eal_hugepage_init(void)
goto fail;
}
+ /* free the hugepage backing files */
+ if (internal_config.hugepage_unlink &&
+ unlink_hugepage_files(tmp_hp, internal_config.num_hugepage_sizes) < 0) {
+ RTE_LOG(ERR, EAL, "Unlinking hugepage files failed!\n");
+ goto fail;
+ }
+
/* free the temporary hugepage table */
free(tmp_hp);
tmp_hp = NULL;
@@ -1340,7 +1406,7 @@ rte_eal_hugepage_init(void)
"of memory.\n",
i, nr_hugefiles, RTE_STR(CONFIG_RTE_MAX_MEMSEG),
RTE_MAX_MEMSEG);
- return (-ENOMEM);
+ return -ENOMEM;
}
return 0;
@@ -1369,7 +1435,7 @@ getFileSize(int fd)
* configuration and finds the hugepages which form that segment, mapping them
* in order to form a contiguous block in the virtual memory space
*/
-static int
+int
rte_eal_hugepage_attach(void)
{
const struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
@@ -1386,6 +1452,8 @@ rte_eal_hugepage_attach(void)
"into secondary processes\n");
}
+ test_proc_pagemap_readable();
+
if (internal_config.xen_dom0_support) {
#ifdef RTE_LIBRTE_XEN_DOM0
if (rte_xen_dom0_memory_attach() < 0) {
@@ -1529,36 +1597,3 @@ error:
close(fd_hugepage);
return -1;
}
-
-static int
-rte_eal_memdevice_init(void)
-{
- struct rte_config *config;
-
- if (rte_eal_process_type() == RTE_PROC_SECONDARY)
- return 0;
-
- config = rte_eal_get_configuration();
- config->mem_config->nchannel = internal_config.force_nchannel;
- config->mem_config->nrank = internal_config.force_nrank;
-
- return 0;
-}
-
-
-/* init memory subsystem */
-int
-rte_eal_memory_init(void)
-{
- RTE_LOG(INFO, EAL, "Setting up memory...\n");
- const int retval = rte_eal_process_type() == RTE_PROC_PRIMARY ?
- rte_eal_hugepage_init() :
- rte_eal_hugepage_attach();
- if (retval < 0)
- return -1;
-
- if (internal_config.no_shconf == 0 && rte_eal_memdevice_init() < 0)
- return -1;
-
- return 0;
-}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci.c b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_pci.c
index b5f54101..bc5b5bee 100755..100644
--- a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci.c
+++ b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_pci.c
@@ -33,16 +33,14 @@
#include <string.h>
#include <dirent.h>
-#include <sys/mman.h>
#include <rte_log.h>
#include <rte_pci.h>
-#include <rte_tailq.h>
#include <rte_eal_memconfig.h>
#include <rte_malloc.h>
#include <rte_devargs.h>
+#include <rte_memcpy.h>
-#include "rte_pci_dev_ids.h"
#include "eal_filesystem.h"
#include "eal_private.h"
#include "eal_pci_init.h"
@@ -56,10 +54,8 @@
* IGB_UIO driver (or doesn't initialize, if the device wasn't bound to it).
*/
-struct mapped_pci_res_list *pci_res_list = NULL;
-
/* unbind kernel driver for this device */
-static int
+int
pci_unbind_kernel_driver(struct rte_pci_device *dev)
{
int n;
@@ -97,6 +93,85 @@ error:
return -1;
}
+static int
+pci_get_kernel_driver_by_path(const char *filename, char *dri_name)
+{
+ int count;
+ char path[PATH_MAX];
+ char *name;
+
+ if (!filename || !dri_name)
+ return -1;
+
+ count = readlink(filename, path, PATH_MAX);
+ if (count >= PATH_MAX)
+ return -1;
+
+ /* For device does not have a driver */
+ if (count < 0)
+ return 1;
+
+ path[count] = '\0';
+
+ name = strrchr(path, '/');
+ if (name) {
+ strncpy(dri_name, name + 1, strlen(name + 1) + 1);
+ return 0;
+ }
+
+ return -1;
+}
+
+/* Map pci device */
+int
+pci_map_device(struct rte_pci_device *dev)
+{
+ int ret = -1;
+
+ /* try mapping the NIC resources using VFIO if it exists */
+ switch (dev->kdrv) {
+ case RTE_KDRV_VFIO:
+#ifdef VFIO_PRESENT
+ if (pci_vfio_is_enabled())
+ ret = pci_vfio_map_resource(dev);
+#endif
+ break;
+ case RTE_KDRV_IGB_UIO:
+ case RTE_KDRV_UIO_GENERIC:
+ /* map resources for devices that use uio */
+ ret = pci_uio_map_resource(dev);
+ break;
+ default:
+ RTE_LOG(DEBUG, EAL,
+ " Not managed by a supported kernel driver, skipped\n");
+ ret = 1;
+ break;
+ }
+
+ return ret;
+}
+
+/* Unmap pci device */
+void
+pci_unmap_device(struct rte_pci_device *dev)
+{
+ /* try unmapping the NIC resources using VFIO if it exists */
+ switch (dev->kdrv) {
+ case RTE_KDRV_VFIO:
+ RTE_LOG(ERR, EAL, "Hotplug doesn't support vfio yet\n");
+ break;
+ case RTE_KDRV_IGB_UIO:
+ case RTE_KDRV_UIO_GENERIC:
+ /* unmap resources for devices that use uio */
+ pci_uio_unmap_resource(dev);
+ break;
+ default:
+ RTE_LOG(DEBUG, EAL,
+ " Not managed by a supported kernel driver, skipped\n");
+ break;
+ }
+}
+
void *
pci_find_max_end_va(void)
{
@@ -115,31 +190,7 @@ pci_find_max_end_va(void)
return RTE_PTR_ADD(last->addr, last->len);
}
-
-/* map a particular resource from a file */
-void *
-pci_map_resource(void *requested_addr, int fd, off_t offset, size_t size)
-{
- void *mapaddr;
-
- /* Map the PCI memory resource of device */
- mapaddr = mmap(requested_addr, size, PROT_READ | PROT_WRITE,
- MAP_SHARED, fd, offset);
- if (mapaddr == MAP_FAILED) {
- RTE_LOG(ERR, EAL, "%s(): cannot mmap(%d, %p, 0x%lx, 0x%lx): %s (%p)\n",
- __func__, fd, requested_addr,
- (unsigned long)size, (unsigned long)offset,
- strerror(errno), mapaddr);
- } else {
- RTE_LOG(DEBUG, EAL, " PCI memory mapped at %p\n", mapaddr);
- }
-
- return mapaddr;
-}
-
/* parse the "resource" sysfs file */
-#define IORESOURCE_MEM 0x00000200
-
static int
pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev)
{
@@ -200,20 +251,6 @@ error:
return -1;
}
-/* Compare two PCI device addresses. */
-static int
-pci_addr_comparison(struct rte_pci_addr *addr, struct rte_pci_addr *addr2)
-{
- uint64_t dev_addr = (addr->domain << 24) + (addr->bus << 16) + (addr->devid << 8) + addr->function;
- uint64_t dev_addr2 = (addr2->domain << 24) + (addr2->bus << 16) + (addr2->devid << 8) + addr2->function;
-
- if (dev_addr > dev_addr2)
- return 1;
- else
- return 0;
-}
-
-
/* Scan one pci sysfs entry, and fill the devices list from it. */
static int
pci_scan_one(const char *dirname, uint16_t domain, uint8_t bus,
@@ -222,11 +259,12 @@ pci_scan_one(const char *dirname, uint16_t domain, uint8_t bus,
char filename[PATH_MAX];
unsigned long tmp;
struct rte_pci_device *dev;
+ char driver[PATH_MAX];
+ int ret;
dev = malloc(sizeof(*dev));
- if (dev == NULL) {
+ if (dev == NULL)
return -1;
- }
memset(dev, 0, sizeof(*dev));
dev->addr.domain = domain;
@@ -272,16 +310,23 @@ pci_scan_one(const char *dirname, uint16_t domain, uint8_t bus,
dev->max_vfs = 0;
snprintf(filename, sizeof(filename), "%s/max_vfs", dirname);
if (!access(filename, F_OK) &&
- eal_parse_sysfs_value(filename, &tmp) == 0) {
+ eal_parse_sysfs_value(filename, &tmp) == 0)
dev->max_vfs = (uint16_t)tmp;
+ else {
+ /* for non igb_uio driver, need kernel version >= 3.8 */
+ snprintf(filename, sizeof(filename),
+ "%s/sriov_numvfs", dirname);
+ if (!access(filename, F_OK) &&
+ eal_parse_sysfs_value(filename, &tmp) == 0)
+ dev->max_vfs = (uint16_t)tmp;
}
/* get numa node */
snprintf(filename, sizeof(filename), "%s/numa_node",
dirname);
if (access(filename, R_OK) != 0) {
- /* if no NUMA support just set node to -1 */
- dev->numa_node = -1;
+ /* if no NUMA support, set default to 0 */
+ dev->numa_node = 0;
} else {
if (eal_parse_sysfs_value(filename, &tmp) < 0) {
free(dev);
@@ -298,20 +343,49 @@ pci_scan_one(const char *dirname, uint16_t domain, uint8_t bus,
return -1;
}
+ /* parse driver */
+ snprintf(filename, sizeof(filename), "%s/driver", dirname);
+ ret = pci_get_kernel_driver_by_path(filename, driver);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Fail to get kernel driver\n");
+ free(dev);
+ return -1;
+ }
+
+ if (!ret) {
+ if (!strcmp(driver, "vfio-pci"))
+ dev->kdrv = RTE_KDRV_VFIO;
+ else if (!strcmp(driver, "igb_uio"))
+ dev->kdrv = RTE_KDRV_IGB_UIO;
+ else if (!strcmp(driver, "uio_pci_generic"))
+ dev->kdrv = RTE_KDRV_UIO_GENERIC;
+ else
+ dev->kdrv = RTE_KDRV_UNKNOWN;
+ } else
+ dev->kdrv = RTE_KDRV_UNKNOWN;
+
/* device is valid, add in list (sorted) */
if (TAILQ_EMPTY(&pci_device_list)) {
TAILQ_INSERT_TAIL(&pci_device_list, dev, next);
- }
- else {
- struct rte_pci_device *dev2 = NULL;
+ } else {
+ struct rte_pci_device *dev2;
+ int ret;
TAILQ_FOREACH(dev2, &pci_device_list, next) {
- if (pci_addr_comparison(&dev->addr, &dev2->addr))
+ ret = rte_eal_compare_pci_addr(&dev->addr, &dev2->addr);
+ if (ret > 0)
continue;
- else {
+
+ if (ret < 0) {
TAILQ_INSERT_BEFORE(dev2, dev, next);
- return 0;
+ } else { /* already registered */
+ dev2->kdrv = dev->kdrv;
+ dev2->max_vfs = dev->max_vfs;
+ memmove(dev2->mem_resource, dev->mem_resource,
+ sizeof(dev->mem_resource));
+ free(dev);
}
+ return 0;
}
TAILQ_INSERT_TAIL(&pci_device_list, dev, next);
}
@@ -370,8 +444,8 @@ error:
* Scan the content of the PCI bus, and the devices in the devices
* list
*/
-static int
-pci_scan(void)
+int
+rte_eal_pci_scan(void)
{
struct dirent *e;
DIR *dir;
@@ -483,7 +557,7 @@ pci_config_max_read_request_size(struct rte_pci_device *dev)
return 0;
}
-static void
+void
pci_config_space_set(struct rte_pci_device *dev)
{
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
@@ -497,99 +571,54 @@ pci_config_space_set(struct rte_pci_device *dev)
}
#endif
-static int
-pci_map_device(struct rte_pci_device *dev)
+/* Read PCI config space. */
+int rte_eal_pci_read_config(const struct rte_pci_device *device,
+ void *buf, size_t len, off_t offset)
{
- int ret, mapped = 0;
+ const struct rte_intr_handle *intr_handle = &device->intr_handle;
+
+ switch (intr_handle->type) {
+ case RTE_INTR_HANDLE_UIO:
+ case RTE_INTR_HANDLE_UIO_INTX:
+ return pci_uio_read_config(intr_handle, buf, len, offset);
- /* try mapping the NIC resources using VFIO if it exists */
#ifdef VFIO_PRESENT
- if (pci_vfio_is_enabled()) {
- ret = pci_vfio_map_resource(dev);
- if (ret == 0)
- mapped = 1;
- else if (ret < 0)
- return ret;
- }
+ case RTE_INTR_HANDLE_VFIO_MSIX:
+ case RTE_INTR_HANDLE_VFIO_MSI:
+ case RTE_INTR_HANDLE_VFIO_LEGACY:
+ return pci_vfio_read_config(intr_handle, buf, len, offset);
#endif
- /* map resources for devices that use igb_uio */
- if (!mapped) {
- ret = pci_uio_map_resource(dev);
- if (ret != 0)
- return ret;
+ default:
+ RTE_LOG(ERR, EAL,
+ "Unknown handle type of fd %d\n",
+ intr_handle->fd);
+ return -1;
}
- return 0;
}
-/*
- * If vendor/device ID match, call the devinit() function of the
- * driver.
- */
-int
-rte_eal_pci_probe_one_driver(struct rte_pci_driver *dr, struct rte_pci_device *dev)
+/* Write PCI config space. */
+int rte_eal_pci_write_config(const struct rte_pci_device *device,
+ const void *buf, size_t len, off_t offset)
{
- int ret;
- struct rte_pci_id *id_table;
-
- for (id_table = dr->id_table ; id_table->vendor_id != 0; id_table++) {
+ const struct rte_intr_handle *intr_handle = &device->intr_handle;
- /* check if device's identifiers match the driver's ones */
- if (id_table->vendor_id != dev->id.vendor_id &&
- id_table->vendor_id != PCI_ANY_ID)
- continue;
- if (id_table->device_id != dev->id.device_id &&
- id_table->device_id != PCI_ANY_ID)
- continue;
- if (id_table->subsystem_vendor_id != dev->id.subsystem_vendor_id &&
- id_table->subsystem_vendor_id != PCI_ANY_ID)
- continue;
- if (id_table->subsystem_device_id != dev->id.subsystem_device_id &&
- id_table->subsystem_device_id != PCI_ANY_ID)
- continue;
+ switch (intr_handle->type) {
+ case RTE_INTR_HANDLE_UIO:
+ case RTE_INTR_HANDLE_UIO_INTX:
+ return pci_uio_write_config(intr_handle, buf, len, offset);
- struct rte_pci_addr *loc = &dev->addr;
-
- RTE_LOG(DEBUG, EAL, "PCI device "PCI_PRI_FMT" on NUMA socket %i\n",
- loc->domain, loc->bus, loc->devid, loc->function,
- dev->numa_node);
-
- RTE_LOG(DEBUG, EAL, " probe driver: %x:%x %s\n", dev->id.vendor_id,
- dev->id.device_id, dr->name);
-
- /* no initialization when blacklisted, return without error */
- if (dev->devargs != NULL &&
- dev->devargs->type == RTE_DEVTYPE_BLACKLISTED_PCI) {
- RTE_LOG(DEBUG, EAL, " Device is blacklisted, not initializing\n");
- return 1;
- }
-
- if (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) {
-#ifdef RTE_PCI_CONFIG
- /*
- * Set PCIe config space for high performance.
- * Return value can be ignored.
- */
- pci_config_space_set(dev);
+#ifdef VFIO_PRESENT
+ case RTE_INTR_HANDLE_VFIO_MSIX:
+ case RTE_INTR_HANDLE_VFIO_MSI:
+ case RTE_INTR_HANDLE_VFIO_LEGACY:
+ return pci_vfio_write_config(intr_handle, buf, len, offset);
#endif
- /* map resources for devices that use igb_uio */
- ret = pci_map_device(dev);
- if (ret != 0)
- return ret;
- } else if (dr->drv_flags & RTE_PCI_DRV_FORCE_UNBIND &&
- rte_eal_process_type() == RTE_PROC_PRIMARY) {
- /* unbind current driver */
- if (pci_unbind_kernel_driver(dev) < 0)
- return -1;
- }
-
- /* reference driver structure */
- dev->driver = dr;
-
- /* call the driver devinit() function */
- return dr->devinit(dr, dev);
+ default:
+ RTE_LOG(ERR, EAL,
+ "Unknown handle type of fd %d\n",
+ intr_handle->fd);
+ return -1;
}
- /* return positive value if driver is not found */
- return 1;
}
/* Init the PCI EAL subsystem */
@@ -598,14 +627,12 @@ rte_eal_pci_init(void)
{
TAILQ_INIT(&pci_driver_list);
TAILQ_INIT(&pci_device_list);
- pci_res_list = RTE_TAILQ_RESERVE_BY_IDX(RTE_TAILQ_PCI,
- mapped_pci_res_list);
/* for debug purposes, PCI can be disabled */
if (internal_config.no_pci)
return 0;
- if (pci_scan() < 0) {
+ if (rte_eal_pci_scan() < 0) {
RTE_LOG(ERR, EAL, "%s(): Cannot scan PCI bus\n", __func__);
return -1;
}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci_init.h b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_pci_init.h
index 1070eb88..a17c7083 100755..100644
--- a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci_init.h
+++ b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_pci_init.h
@@ -36,40 +36,23 @@
#include "eal_vfio.h"
-struct pci_map {
- void *addr;
- uint64_t offset;
- uint64_t size;
- uint64_t phaddr;
-};
-
-/*
- * For multi-process we need to reproduce all PCI mappings in secondary
- * processes, so save them in a tailq.
- */
-struct mapped_pci_resource {
- TAILQ_ENTRY(mapped_pci_resource) next;
-
- struct rte_pci_addr pci_addr;
- char path[PATH_MAX];
- int nb_maps;
- struct pci_map maps[PCI_MAX_RESOURCE];
-};
-
-TAILQ_HEAD(mapped_pci_res_list, mapped_pci_resource);
-extern struct mapped_pci_res_list *pci_res_list;
-
/*
* Helper function to map PCI resources right after hugepages in virtual memory
*/
extern void *pci_map_addr;
void *pci_find_max_end_va(void);
-void *pci_map_resource(void *requested_addr, int fd, off_t offset,
- size_t size);
+int pci_uio_alloc_resource(struct rte_pci_device *dev,
+ struct mapped_pci_resource **uio_res);
+void pci_uio_free_resource(struct rte_pci_device *dev,
+ struct mapped_pci_resource *uio_res);
+int pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
+ struct mapped_pci_resource *uio_res, int map_idx);
-/* map IGB_UIO resource prototype */
-int pci_uio_map_resource(struct rte_pci_device *dev);
+int pci_uio_read_config(const struct rte_intr_handle *intr_handle,
+ void *buf, size_t len, off_t offs);
+int pci_uio_write_config(const struct rte_intr_handle *intr_handle,
+ const void *buf, size_t len, off_t offs);
#ifdef VFIO_PRESENT
@@ -79,6 +62,12 @@ int pci_vfio_enable(void);
int pci_vfio_is_enabled(void);
int pci_vfio_mp_sync_setup(void);
+/* access config space */
+int pci_vfio_read_config(const struct rte_intr_handle *intr_handle,
+ void *buf, size_t len, off_t offs);
+int pci_vfio_write_config(const struct rte_intr_handle *intr_handle,
+ const void *buf, size_t len, off_t offs);
+
/* map VFIO resource prototype */
int pci_vfio_map_resource(struct rte_pci_device *dev);
int pci_vfio_get_group_fd(int iommu_group_fd);
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci_uio.c b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_pci_uio.c
index e53f06b8..ac50e13f 100755..100644
--- a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci_uio.c
+++ b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_pci_uio.c
@@ -32,128 +32,67 @@
*/
#include <string.h>
+#include <unistd.h>
#include <fcntl.h>
#include <dirent.h>
#include <sys/stat.h>
#include <sys/mman.h>
+#include <linux/pci_regs.h>
#include <rte_log.h>
#include <rte_pci.h>
+#include <rte_eal_memconfig.h>
#include <rte_common.h>
#include <rte_malloc.h>
-#include <rte_tailq.h>
-#include "rte_pci_dev_ids.h"
#include "eal_filesystem.h"
#include "eal_pci_init.h"
-static int pci_parse_sysfs_value(const char *filename, uint64_t *val);
-
void *pci_map_addr = NULL;
-
#define OFF_MAX ((uint64_t)(off_t)-1)
-static int
-pci_uio_get_mappings(const char *devname, struct pci_map maps[], int nb_maps)
-{
- int i;
- char dirname[PATH_MAX];
- char filename[PATH_MAX];
- uint64_t offset, size;
-
- for (i = 0; i != nb_maps; i++) {
-
- /* check if map directory exists */
- snprintf(dirname, sizeof(dirname),
- "%s/maps/map%u", devname, i);
-
- if (access(dirname, F_OK) != 0)
- break;
-
- /* get mapping offset */
- snprintf(filename, sizeof(filename),
- "%s/offset", dirname);
- if (pci_parse_sysfs_value(filename, &offset) < 0) {
- RTE_LOG(ERR, EAL,
- "%s(): cannot parse offset of %s\n",
- __func__, dirname);
- return -1;
- }
-
- /* get mapping size */
- snprintf(filename, sizeof(filename),
- "%s/size", dirname);
- if (pci_parse_sysfs_value(filename, &size) < 0) {
- RTE_LOG(ERR, EAL,
- "%s(): cannot parse size of %s\n",
- __func__, dirname);
- return -1;
- }
-
- /* get mapping physical address */
- snprintf(filename, sizeof(filename),
- "%s/addr", dirname);
- if (pci_parse_sysfs_value(filename, &maps[i].phaddr) < 0) {
- RTE_LOG(ERR, EAL,
- "%s(): cannot parse addr of %s\n",
- __func__, dirname);
- return -1;
- }
-
- if ((offset > OFF_MAX) || (size > SIZE_MAX)) {
- RTE_LOG(ERR, EAL,
- "%s(): offset/size exceed system max value\n",
- __func__);
- return -1;
- }
- maps[i].offset = offset;
- maps[i].size = size;
- }
+int
+pci_uio_read_config(const struct rte_intr_handle *intr_handle,
+ void *buf, size_t len, off_t offset)
+{
+ return pread(intr_handle->uio_cfg_fd, buf, len, offset);
+}
- return i;
+int
+pci_uio_write_config(const struct rte_intr_handle *intr_handle,
+ const void *buf, size_t len, off_t offset)
+{
+ return pwrite(intr_handle->uio_cfg_fd, buf, len, offset);
}
static int
-pci_uio_map_secondary(struct rte_pci_device *dev)
+pci_uio_set_bus_master(int dev_fd)
{
- int fd, i;
- struct mapped_pci_resource *uio_res;
-
- TAILQ_FOREACH(uio_res, pci_res_list, next) {
+ uint16_t reg;
+ int ret;
- /* skip this element if it doesn't match our PCI address */
- if (memcmp(&uio_res->pci_addr, &dev->addr, sizeof(dev->addr)))
- continue;
+ ret = pread(dev_fd, &reg, sizeof(reg), PCI_COMMAND);
+ if (ret != sizeof(reg)) {
+ RTE_LOG(ERR, EAL,
+ "Cannot read command from PCI config space!\n");
+ return -1;
+ }
- for (i = 0; i != uio_res->nb_maps; i++) {
- /*
- * open devname, to mmap it
- */
- fd = open(uio_res->path, O_RDWR);
- if (fd < 0) {
- RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
- uio_res->path, strerror(errno));
- return -1;
- }
-
- if (pci_map_resource(uio_res->maps[i].addr, fd,
- (off_t)uio_res->maps[i].offset,
- (size_t)uio_res->maps[i].size)
- != uio_res->maps[i].addr) {
- RTE_LOG(ERR, EAL,
- "Cannot mmap device resource\n");
- close(fd);
- return -1;
- }
- /* fd is not needed in slave process, close it */
- close(fd);
- }
+ /* return if bus mastering is already on */
+ if (reg & PCI_COMMAND_MASTER)
return 0;
+
+ reg |= PCI_COMMAND_MASTER;
+
+ ret = pwrite(dev_fd, &reg, sizeof(reg), PCI_COMMAND);
+ if (ret != sizeof(reg)) {
+ RTE_LOG(ERR, EAL,
+ "Cannot write command to PCI config space!\n");
+ return -1;
}
- RTE_LOG(ERR, EAL, "Cannot find resource for device\n");
- return 1;
+ return 0;
}
static int
@@ -176,7 +115,7 @@ pci_mknod_uio_dev(const char *sysfs_uio_path, unsigned uio_num)
return -1;
}
- ret = fscanf(f, "%d:%d", &major, &minor);
+ ret = fscanf(f, "%u:%u", &major, &minor);
if (ret != 2) {
RTE_LOG(ERR, EAL, "%s(): cannot parse sysfs to get major:minor\n",
__func__);
@@ -276,29 +215,34 @@ pci_get_uio_dev(struct rte_pci_device *dev, char *dstbuf,
return uio_num;
}
-/* map the PCI resource of a PCI device in virtual memory */
+void
+pci_uio_free_resource(struct rte_pci_device *dev,
+ struct mapped_pci_resource *uio_res)
+{
+ rte_free(uio_res);
+
+ if (dev->intr_handle.uio_cfg_fd >= 0) {
+ close(dev->intr_handle.uio_cfg_fd);
+ dev->intr_handle.uio_cfg_fd = -1;
+ }
+ if (dev->intr_handle.fd) {
+ close(dev->intr_handle.fd);
+ dev->intr_handle.fd = -1;
+ dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+ }
+}
+
int
-pci_uio_map_resource(struct rte_pci_device *dev)
+pci_uio_alloc_resource(struct rte_pci_device *dev,
+ struct mapped_pci_resource **uio_res)
{
- int i, j;
char dirname[PATH_MAX];
+ char cfgname[PATH_MAX];
char devname[PATH_MAX]; /* contains the /dev/uioX */
- void *mapaddr;
int uio_num;
- uint64_t phaddr;
- uint64_t offset;
- uint64_t pagesz;
- int nb_maps;
- struct rte_pci_addr *loc = &dev->addr;
- struct mapped_pci_resource *uio_res;
- struct pci_map *maps;
-
- dev->intr_handle.fd = -1;
- dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+ struct rte_pci_addr *loc;
- /* secondary processes - use already recorded details */
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return pci_uio_map_secondary(dev);
+ loc = &dev->addr;
/* find uio resource */
uio_num = pci_get_uio_dev(dev, dirname, sizeof(dirname));
@@ -314,127 +258,108 @@ pci_uio_map_resource(struct rte_pci_device *dev)
if (dev->intr_handle.fd < 0) {
RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
devname, strerror(errno));
- return -1;
+ goto error;
+ }
+
+ snprintf(cfgname, sizeof(cfgname),
+ "/sys/class/uio/uio%u/device/config", uio_num);
+ dev->intr_handle.uio_cfg_fd = open(cfgname, O_RDWR);
+ if (dev->intr_handle.uio_cfg_fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
+ cfgname, strerror(errno));
+ goto error;
+ }
+
+ if (dev->kdrv == RTE_KDRV_IGB_UIO)
+ dev->intr_handle.type = RTE_INTR_HANDLE_UIO;
+ else {
+ dev->intr_handle.type = RTE_INTR_HANDLE_UIO_INTX;
+
+ /* set bus master that is not done by uio_pci_generic */
+ if (pci_uio_set_bus_master(dev->intr_handle.uio_cfg_fd)) {
+ RTE_LOG(ERR, EAL, "Cannot set up bus mastering!\n");
+ goto error;
+ }
}
- dev->intr_handle.type = RTE_INTR_HANDLE_UIO;
/* allocate the mapping details for secondary processes*/
- uio_res = rte_zmalloc("UIO_RES", sizeof(*uio_res), 0);
- if (uio_res == NULL) {
+ *uio_res = rte_zmalloc("UIO_RES", sizeof(**uio_res), 0);
+ if (*uio_res == NULL) {
RTE_LOG(ERR, EAL,
"%s(): cannot store uio mmap details\n", __func__);
- return -1;
+ goto error;
}
- snprintf(uio_res->path, sizeof(uio_res->path), "%s", devname);
- memcpy(&uio_res->pci_addr, &dev->addr, sizeof(uio_res->pci_addr));
+ snprintf((*uio_res)->path, sizeof((*uio_res)->path), "%s", devname);
+ memcpy(&(*uio_res)->pci_addr, &dev->addr, sizeof((*uio_res)->pci_addr));
- /* collect info about device mappings */
- nb_maps = pci_uio_get_mappings(dirname, uio_res->maps,
- RTE_DIM(uio_res->maps));
- if (nb_maps < 0) {
- rte_free(uio_res);
- return nb_maps;
- }
+ return 0;
- uio_res->nb_maps = nb_maps;
+error:
+ pci_uio_free_resource(dev, *uio_res);
+ return -1;
+}
- /* Map all BARs */
- pagesz = sysconf(_SC_PAGESIZE);
+int
+pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
+ struct mapped_pci_resource *uio_res, int map_idx)
+{
+ int fd;
+ char devname[PATH_MAX]; /* contains the /dev/uioX */
+ void *mapaddr;
+ struct rte_pci_addr *loc;
+ struct pci_map *maps;
+ loc = &dev->addr;
maps = uio_res->maps;
- for (i = 0; i != PCI_MAX_RESOURCE; i++) {
- int fd;
- /* skip empty BAR */
- phaddr = dev->mem_resource[i].phys_addr;
- if (phaddr == 0)
- continue;
+ /* update devname for mmap */
+ snprintf(devname, sizeof(devname),
+ SYSFS_PCI_DEVICES "/" PCI_PRI_FMT "/resource%d",
+ loc->domain, loc->bus, loc->devid,
+ loc->function, res_idx);
+
+ /* allocate memory to keep path */
+ maps[map_idx].path = rte_malloc(NULL, strlen(devname) + 1, 0);
+ if (maps[map_idx].path == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot allocate memory for path: %s\n",
+ strerror(errno));
+ return -1;
+ }
- for (j = 0; j != nb_maps && (phaddr != maps[j].phaddr ||
- dev->mem_resource[i].len != maps[j].size);
- j++)
- ;
-
- /* if matching map is found, then use it */
- if (j != nb_maps) {
- int fail = 0;
- offset = j * pagesz;
-
- /*
- * open devname, to mmap it
- */
- fd = open(devname, O_RDWR);
- if (fd < 0) {
- RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
- devname, strerror(errno));
- return -1;
- }
-
- if (maps[j].addr != NULL)
- fail = 1;
- else {
- /* try mapping somewhere close to the end of hugepages */
- if (pci_map_addr == NULL)
- pci_map_addr = pci_find_max_end_va();
-
- mapaddr = pci_map_resource(pci_map_addr, fd, (off_t)offset,
- (size_t)maps[j].size);
- if (mapaddr == MAP_FAILED)
- fail = 1;
-
- pci_map_addr = RTE_PTR_ADD(mapaddr, (size_t) maps[j].size);
- }
-
- if (fail) {
- rte_free(uio_res);
- close(fd);
- return -1;
- }
- close(fd);
-
- maps[j].addr = mapaddr;
- maps[j].offset = offset;
- dev->mem_resource[i].addr = mapaddr;
- }
+ /*
+ * open resource file, to mmap it
+ */
+ fd = open(devname, O_RDWR);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
+ devname, strerror(errno));
+ goto error;
}
- TAILQ_INSERT_TAIL(pci_res_list, uio_res, next);
+ /* try mapping somewhere close to the end of hugepages */
+ if (pci_map_addr == NULL)
+ pci_map_addr = pci_find_max_end_va();
- return 0;
-}
+ mapaddr = pci_map_resource(pci_map_addr, fd, 0,
+ (size_t)dev->mem_resource[res_idx].len, 0);
+ close(fd);
+ if (mapaddr == MAP_FAILED)
+ goto error;
-/*
- * parse a sysfs file containing one integer value
- * different to the eal version, as it needs to work with 64-bit values
- */
-static int
-pci_parse_sysfs_value(const char *filename, uint64_t *val)
-{
- FILE *f;
- char buf[BUFSIZ];
- char *end = NULL;
+ pci_map_addr = RTE_PTR_ADD(mapaddr,
+ (size_t)dev->mem_resource[res_idx].len);
- f = fopen(filename, "r");
- if (f == NULL) {
- RTE_LOG(ERR, EAL, "%s(): cannot open sysfs value %s\n",
- __func__, filename);
- return -1;
- }
+ maps[map_idx].phaddr = dev->mem_resource[res_idx].phys_addr;
+ maps[map_idx].size = dev->mem_resource[res_idx].len;
+ maps[map_idx].addr = mapaddr;
+ maps[map_idx].offset = 0;
+ strcpy(maps[map_idx].path, devname);
+ dev->mem_resource[res_idx].addr = mapaddr;
- if (fgets(buf, sizeof(buf), f) == NULL) {
- RTE_LOG(ERR, EAL, "%s(): cannot read sysfs value %s\n",
- __func__, filename);
- fclose(f);
- return -1;
- }
- *val = strtoull(buf, &end, 0);
- if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) {
- RTE_LOG(ERR, EAL, "%s(): cannot parse sysfs value %s\n",
- __func__, filename);
- fclose(f);
- return -1;
- }
- fclose(f);
return 0;
+
+error:
+ rte_free(maps[map_idx].path);
+ return -1;
}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci_vfio.c b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_pci_vfio.c
index c1246e8d..74f91bad 100755..100644
--- a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci_vfio.c
+++ b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_pci_vfio.c
@@ -41,9 +41,9 @@
#include <rte_log.h>
#include <rte_pci.h>
-#include <rte_tailq.h>
#include <rte_eal_memconfig.h>
#include <rte_malloc.h>
+#include <eal_private.h>
#include "eal_filesystem.h"
#include "eal_pci_init.h"
@@ -61,6 +61,14 @@
#ifdef VFIO_PRESENT
+#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+
+static struct rte_tailq_elem rte_vfio_tailq = {
+ .name = "VFIO_RESOURCE_LIST",
+};
+EAL_REGISTER_TAILQ(rte_vfio_tailq)
+
#define VFIO_DIR "/dev/vfio"
#define VFIO_CONTAINER_PATH "/dev/vfio/vfio"
#define VFIO_GROUP_FMT "/dev/vfio/%u"
@@ -69,12 +77,30 @@
/* per-process VFIO config */
static struct vfio_config vfio_cfg;
+int
+pci_vfio_read_config(const struct rte_intr_handle *intr_handle,
+ void *buf, size_t len, off_t offs)
+{
+ return pread64(intr_handle->vfio_dev_fd, buf, len,
+ VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + offs);
+}
+
+int
+pci_vfio_write_config(const struct rte_intr_handle *intr_handle,
+ const void *buf, size_t len, off_t offs)
+{
+ return pwrite64(intr_handle->vfio_dev_fd, buf, len,
+ VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + offs);
+}
+
/* get PCI BAR number where MSI-X interrupts are */
static int
-pci_vfio_get_msix_bar(int fd, int *msix_bar)
+pci_vfio_get_msix_bar(int fd, int *msix_bar, uint32_t *msix_table_offset,
+ uint32_t *msix_table_size)
{
int ret;
uint32_t reg;
+ uint16_t flags;
uint8_t cap_id, cap_offset;
/* read PCI capability pointer from config space */
@@ -133,7 +159,18 @@ pci_vfio_get_msix_bar(int fd, int *msix_bar)
return -1;
}
+ ret = pread64(fd, &flags, sizeof(flags),
+ VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
+ cap_offset + 2);
+ if (ret != sizeof(flags)) {
+ RTE_LOG(ERR, EAL, "Cannot read table flags from PCI config "
+ "space!\n");
+ return -1;
+ }
+
*msix_bar = reg & RTE_PCI_MSIX_TABLE_BIR;
+ *msix_table_offset = reg & RTE_PCI_MSIX_TABLE_OFFSET;
+ *msix_table_size = 16 * (1 + (flags & RTE_PCI_MSIX_FLAGS_QSIZE));
return 0;
}
@@ -273,7 +310,7 @@ pci_vfio_setup_interrupts(struct rte_pci_device *dev, int vfio_dev_fd)
}
/* set up an eventfd for interrupts */
- fd = eventfd(0, 0);
+ fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
if (fd < 0) {
RTE_LOG(ERR, EAL, " cannot set up eventfd, "
"error %i (%s)\n", errno, strerror(errno));
@@ -340,9 +377,11 @@ pci_vfio_get_container_fd(void)
if (ret != 1) {
if (ret < 0)
RTE_LOG(ERR, EAL, " could not get IOMMU type, "
- "error %i (%s)\n", errno, strerror(errno));
+ "error %i (%s)\n", errno,
+ strerror(errno));
else
- RTE_LOG(ERR, EAL, " unsupported IOMMU type!\n");
+ RTE_LOG(ERR, EAL, " unsupported IOMMU type "
+ "detected in VFIO\n");
close(vfio_container_fd);
return -1;
}
@@ -460,14 +499,15 @@ pci_vfio_get_group_fd(int iommu_group_no)
}
/* parse IOMMU group number for a PCI device
- * returns -1 for errors, 0 for non-existent group */
+ * returns 1 on success, -1 for errors, 0 for non-existent group
+ */
static int
-pci_vfio_get_group_no(const char *pci_addr)
+pci_vfio_get_group_no(const char *pci_addr, int *iommu_group_no)
{
char linkname[PATH_MAX];
char filename[PATH_MAX];
char *tok[16], *group_tok, *end;
- int ret, iommu_group_no;
+ int ret;
memset(linkname, 0, sizeof(linkname));
memset(filename, 0, sizeof(filename));
@@ -494,13 +534,13 @@ pci_vfio_get_group_no(const char *pci_addr)
errno = 0;
group_tok = tok[ret - 1];
end = group_tok;
- iommu_group_no = strtol(group_tok, &end, 10);
+ *iommu_group_no = strtol(group_tok, &end, 10);
if ((end != group_tok && *end != '\0') || errno != 0) {
RTE_LOG(ERR, EAL, " %s error parsing IOMMU number!\n", pci_addr);
return -1;
}
- return iommu_group_no;
+ return 1;
}
static void
@@ -528,7 +568,11 @@ pci_vfio_map_resource(struct rte_pci_device *dev)
struct rte_pci_addr *loc = &dev->addr;
int i, ret, msix_bar;
struct mapped_pci_resource *vfio_res = NULL;
+ struct mapped_pci_res_list *vfio_res_list = RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
+
struct pci_map *maps;
+ uint32_t msix_table_offset = 0;
+ uint32_t msix_table_size = 0;
dev->intr_handle.fd = -1;
dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
@@ -538,16 +582,15 @@ pci_vfio_map_resource(struct rte_pci_device *dev)
loc->domain, loc->bus, loc->devid, loc->function);
/* get group number */
- iommu_group_no = pci_vfio_get_group_no(pci_addr);
-
- /* if 0, group doesn't exist */
- if (iommu_group_no == 0) {
+ ret = pci_vfio_get_group_no(pci_addr, &iommu_group_no);
+ if (ret == 0) {
RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
- pci_addr);
+ pci_addr);
return 1;
}
+
/* if negative, something failed */
- else if (iommu_group_no < 0)
+ if (ret < 0)
return -1;
/* get the actual group fd */
@@ -654,9 +697,10 @@ pci_vfio_map_resource(struct rte_pci_device *dev)
}
/* get MSI-X BAR, if any (we have to know where it is because we can't
- * mmap it when using VFIO) */
+ * easily mmap it when using VFIO) */
msix_bar = -1;
- ret = pci_vfio_get_msix_bar(vfio_dev_fd, &msix_bar);
+ ret = pci_vfio_get_msix_bar(vfio_dev_fd, &msix_bar,
+ &msix_table_offset, &msix_table_size);
if (ret < 0) {
RTE_LOG(ERR, EAL, " %s cannot get MSI-X BAR number!\n", pci_addr);
close(vfio_dev_fd);
@@ -679,7 +723,7 @@ pci_vfio_map_resource(struct rte_pci_device *dev)
VFIO_PCI_BAR5_REGION_INDEX + 1);
} else {
/* if we're in a secondary process, just find our tailq entry */
- TAILQ_FOREACH(vfio_res, pci_res_list, next) {
+ TAILQ_FOREACH(vfio_res, vfio_res_list, next) {
if (memcmp(&vfio_res->pci_addr, &dev->addr, sizeof(dev->addr)))
continue;
break;
@@ -699,6 +743,9 @@ pci_vfio_map_resource(struct rte_pci_device *dev)
for (i = 0; i < (int) vfio_res->nb_maps; i++) {
struct vfio_region_info reg = { .argsz = sizeof(reg) };
void *bar_addr;
+ struct memreg {
+ unsigned long offset, size;
+ } memreg[2] = {};
reg.index = i;
@@ -717,21 +764,77 @@ pci_vfio_map_resource(struct rte_pci_device *dev)
if ((reg.flags & VFIO_REGION_INFO_FLAG_MMAP) == 0)
continue;
- /* skip MSI-X BAR */
- if (i == msix_bar)
- continue;
+ if (i == msix_bar) {
+ /*
+ * VFIO will not let us map the MSI-X table,
+ * but we can map around it.
+ */
+ uint32_t table_start = msix_table_offset;
+ uint32_t table_end = table_start + msix_table_size;
+ table_end = (table_end + ~PAGE_MASK) & PAGE_MASK;
+ table_start &= PAGE_MASK;
+
+ if (table_start == 0 && table_end >= reg.size) {
+ /* Cannot map this BAR */
+ RTE_LOG(DEBUG, EAL, "Skipping BAR %d\n", i);
+ continue;
+ } else {
+ memreg[0].offset = reg.offset;
+ memreg[0].size = table_start;
+ memreg[1].offset = table_end;
+ memreg[1].size = reg.size - table_end;
+
+ RTE_LOG(DEBUG, EAL,
+ "Trying to map BAR %d that contains the MSI-X "
+ "table. Trying offsets: "
+ "0x%04lx:0x%04lx, 0x%04lx:0x%04lx\n", i,
+ memreg[0].offset, memreg[0].size,
+ memreg[1].offset, memreg[1].size);
+ }
+ } else {
+ memreg[0].offset = reg.offset;
+ memreg[0].size = reg.size;
+ }
+ /* try to figure out an address */
if (internal_config.process_type == RTE_PROC_PRIMARY) {
/* try mapping somewhere close to the end of hugepages */
if (pci_map_addr == NULL)
pci_map_addr = pci_find_max_end_va();
- bar_addr = pci_map_resource(pci_map_addr, vfio_dev_fd, reg.offset,
- reg.size);
+ bar_addr = pci_map_addr;
pci_map_addr = RTE_PTR_ADD(bar_addr, (size_t) reg.size);
} else {
- bar_addr = pci_map_resource(maps[i].addr, vfio_dev_fd, reg.offset,
- reg.size);
+ bar_addr = maps[i].addr;
+ }
+
+ /* reserve the address using an inaccessible mapping */
+ bar_addr = mmap(bar_addr, reg.size, 0, MAP_PRIVATE |
+ MAP_ANONYMOUS, -1, 0);
+ if (bar_addr != MAP_FAILED) {
+ void *map_addr = NULL;
+ if (memreg[0].size) {
+ /* actual map of first part */
+ map_addr = pci_map_resource(bar_addr, vfio_dev_fd,
+ memreg[0].offset,
+ memreg[0].size,
+ MAP_FIXED);
+ }
+
+ /* if there's a second part, try to map it */
+ if (map_addr != MAP_FAILED
+ && memreg[1].offset && memreg[1].size) {
+ void *second_addr = RTE_PTR_ADD(bar_addr, memreg[1].offset);
+ map_addr = pci_map_resource(second_addr,
+ vfio_dev_fd, memreg[1].offset,
+ memreg[1].size,
+ MAP_FIXED);
+ }
+
+ if (map_addr == MAP_FAILED || !map_addr) {
+ munmap(bar_addr, reg.size);
+ bar_addr = MAP_FAILED;
+ }
}
if (bar_addr == MAP_FAILED ||
@@ -748,6 +851,7 @@ pci_vfio_map_resource(struct rte_pci_device *dev)
maps[i].addr = bar_addr;
maps[i].offset = reg.offset;
maps[i].size = reg.size;
+ maps[i].path = NULL; /* vfio doesn't have per-resource paths */
dev->mem_resource[i].addr = bar_addr;
}
@@ -773,7 +877,7 @@ pci_vfio_map_resource(struct rte_pci_device *dev)
}
if (internal_config.process_type == RTE_PROC_PRIMARY)
- TAILQ_INSERT_TAIL(pci_res_list, vfio_res, next);
+ TAILQ_INSERT_TAIL(vfio_res_list, vfio_res, next);
return 0;
}
@@ -783,18 +887,35 @@ pci_vfio_enable(void)
{
/* initialize group list */
int i;
+ int module_vfio_type1;
for (i = 0; i < VFIO_MAX_GROUPS; i++) {
vfio_cfg.vfio_groups[i].fd = -1;
vfio_cfg.vfio_groups[i].group_no = -1;
}
+
+ module_vfio_type1 = rte_eal_check_module("vfio_iommu_type1");
+
+ /* return error directly */
+ if (module_vfio_type1 == -1) {
+ RTE_LOG(INFO, EAL, "Could not get loaded module details!\n");
+ return -1;
+ }
+
+ /* return 0 if VFIO modules not loaded */
+ if (module_vfio_type1 == 0) {
+ RTE_LOG(INFO, EAL, "VFIO modules not all loaded, "
+ "skip VFIO support...\n");
+ return 0;
+ }
+
vfio_cfg.vfio_container_fd = pci_vfio_get_container_fd();
/* check if we have VFIO driver enabled */
if (vfio_cfg.vfio_container_fd != -1)
vfio_cfg.vfio_enabled = 1;
else
- RTE_LOG(INFO, EAL, "VFIO support could not be initialized\n");
+ RTE_LOG(NOTICE, EAL, "VFIO support could not be initialized\n");
return 0;
}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c
index 6588fb1f..d9188fde 100755..100644
--- a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c
+++ b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c
@@ -34,6 +34,7 @@
#include <string.h>
#include <fcntl.h>
#include <sys/socket.h>
+#include <pthread.h>
/* sys/un.h with __USE_MISC uses strlen, which is unsafe */
#ifdef __USE_MISC
@@ -49,12 +50,12 @@
#include <rte_log.h>
#include <rte_pci.h>
-#include <rte_tailq.h>
#include <rte_eal_memconfig.h>
#include <rte_malloc.h>
#include "eal_filesystem.h"
#include "eal_pci_init.h"
+#include "eal_thread.h"
/**
* @file
@@ -375,6 +376,7 @@ int
pci_vfio_mp_sync_setup(void)
{
int ret;
+ char thread_name[RTE_MAX_THREAD_NAME_LEN];
if (vfio_mp_sync_socket_setup() < 0) {
RTE_LOG(ERR, EAL, "Failed to set up local socket!\n");
@@ -384,11 +386,19 @@ pci_vfio_mp_sync_setup(void)
ret = pthread_create(&socket_thread, NULL,
pci_vfio_mp_sync_thread, NULL);
if (ret) {
- RTE_LOG(ERR, EAL, "Failed to create thread for communication with "
- "secondary processes!\n");
+ RTE_LOG(ERR, EAL,
+ "Failed to create thread for communication with secondary processes!\n");
close(mp_socket_fd);
return -1;
}
+
+ /* Set thread_name for aid in debugging. */
+ snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "pci-vfio-sync");
+ ret = rte_thread_setname(socket_thread, thread_name);
+ if (ret)
+ RTE_LOG(ERR, EAL,
+ "Failed to set thread name for secondary processes!\n");
+
return 0;
}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_thread.c b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_thread.c
index 80a985f2..18bd8e04 100755..100644
--- a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_thread.c
+++ b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_thread.c
@@ -39,6 +39,7 @@
#include <pthread.h>
#include <sched.h>
#include <sys/queue.h>
+#include <sys/syscall.h>
#include <rte_debug.h>
#include <rte_atomic.h>
@@ -47,7 +48,6 @@
#include <rte_memory.h>
#include <rte_memzone.h>
#include <rte_per_lcore.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
@@ -55,7 +55,9 @@
#include "eal_private.h"
#include "eal_thread.h"
-RTE_DEFINE_PER_LCORE(unsigned, _lcore_id);
+RTE_DEFINE_PER_LCORE(unsigned, _lcore_id) = LCORE_ID_ANY;
+RTE_DEFINE_PER_LCORE(unsigned, _socket_id) = (unsigned)SOCKET_ID_ANY;
+RTE_DEFINE_PER_LCORE(rte_cpuset_t, _cpuset);
/*
* Send a message to a slave lcore identified by slave_id to call a
@@ -94,62 +96,17 @@ rte_eal_remote_launch(int (*f)(void *), void *arg, unsigned slave_id)
return 0;
}
-/* set affinity for current thread */
+/* set affinity for current EAL thread */
static int
eal_thread_set_affinity(void)
{
- int s;
- pthread_t thread;
+ unsigned lcore_id = rte_lcore_id();
-/*
- * According to the section VERSIONS of the CPU_ALLOC man page:
- *
- * The CPU_ZERO(), CPU_SET(), CPU_CLR(), and CPU_ISSET() macros were added
- * in glibc 2.3.3.
- *
- * CPU_COUNT() first appeared in glibc 2.6.
- *
- * CPU_AND(), CPU_OR(), CPU_XOR(), CPU_EQUAL(), CPU_ALLOC(),
- * CPU_ALLOC_SIZE(), CPU_FREE(), CPU_ZERO_S(), CPU_SET_S(), CPU_CLR_S(),
- * CPU_ISSET_S(), CPU_AND_S(), CPU_OR_S(), CPU_XOR_S(), and CPU_EQUAL_S()
- * first appeared in glibc 2.7.
- */
-#if defined(CPU_ALLOC)
- size_t size;
- cpu_set_t *cpusetp;
-
- cpusetp = CPU_ALLOC(RTE_MAX_LCORE);
- if (cpusetp == NULL) {
- RTE_LOG(ERR, EAL, "CPU_ALLOC failed\n");
- return -1;
- }
-
- size = CPU_ALLOC_SIZE(RTE_MAX_LCORE);
- CPU_ZERO_S(size, cpusetp);
- CPU_SET_S(rte_lcore_id(), size, cpusetp);
-
- thread = pthread_self();
- s = pthread_setaffinity_np(thread, size, cpusetp);
- if (s != 0) {
- RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n");
- CPU_FREE(cpusetp);
- return -1;
- }
+ /* acquire system unique id */
+ rte_gettid();
- CPU_FREE(cpusetp);
-#else /* CPU_ALLOC */
- cpu_set_t cpuset;
- CPU_ZERO( &cpuset );
- CPU_SET( rte_lcore_id(), &cpuset );
-
- thread = pthread_self();
- s = pthread_setaffinity_np(thread, sizeof( cpuset ), &cpuset);
- if (s != 0) {
- RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n");
- return -1;
- }
-#endif
- return 0;
+ /* update EAL thread core affinity */
+ return rte_thread_set_affinity(&lcore_config[lcore_id].cpuset);
}
void eal_thread_init_master(unsigned lcore_id)
@@ -171,6 +128,7 @@ eal_thread_loop(__attribute__((unused)) void *arg)
unsigned lcore_id;
pthread_t thread_id;
int m2s, s2m;
+ char cpuset[RTE_CPU_AFFINITY_STR_LEN];
thread_id = pthread_self();
@@ -182,9 +140,6 @@ eal_thread_loop(__attribute__((unused)) void *arg)
if (lcore_id == RTE_MAX_LCORE)
rte_panic("cannot retrieve lcore id\n");
- RTE_LOG(DEBUG, EAL, "Core %u is ready (tid=%x)\n",
- lcore_id, (int)thread_id);
-
m2s = lcore_config[lcore_id].pipe_master2slave[0];
s2m = lcore_config[lcore_id].pipe_slave2master[1];
@@ -195,6 +150,11 @@ eal_thread_loop(__attribute__((unused)) void *arg)
if (eal_thread_set_affinity() < 0)
rte_panic("cannot set affinity\n");
+ ret = eal_thread_dump_affinity(cpuset, RTE_CPU_AFFINITY_STR_LEN);
+
+ RTE_LOG(DEBUG, EAL, "lcore %u is ready (tid=%x;cpuset=[%s%s])\n",
+ lcore_id, (int)thread_id, cpuset, ret == 0 ? "" : "...");
+
/* read on our pipe to get commands */
while (1) {
void *fct_arg;
@@ -231,3 +191,9 @@ eal_thread_loop(__attribute__((unused)) void *arg)
/* pthread_exit(NULL); */
/* return NULL; */
}
+
+/* require calling thread tid by gettid() */
+int rte_sys_gettid(void)
+{
+ return (int)syscall(SYS_gettid);
+}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_timer.c b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_timer.c
index 6321e42e..9ceff330 100755..100644
--- a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_timer.c
+++ b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_timer.c
@@ -47,7 +47,6 @@
#include <rte_common.h>
#include <rte_log.h>
#include <rte_cycles.h>
-#include <rte_tailq.h>
#include <rte_memory.h>
#include <rte_memzone.h>
#include <rte_eal.h>
@@ -58,9 +57,6 @@
enum timer_source eal_timer_source = EAL_TIMER_HPET;
-/* The frequency of the RDTSC timer resolution */
-static uint64_t eal_tsc_resolution_hz = 0;
-
#ifdef RTE_LIBEAL_USE_HPET
#define DEV_HPET "/dev/hpet"
@@ -161,23 +157,6 @@ rte_get_hpet_cycles(void)
#endif
-
-void
-rte_delay_us(unsigned us)
-{
- const uint64_t start = rte_get_timer_cycles();
- const uint64_t ticks = (uint64_t)us * rte_get_timer_hz() / 1E6;
- while ((rte_get_timer_cycles() - start) < ticks)
- rte_pause();
-}
-
-uint64_t
-rte_get_tsc_hz(void)
-{
- return eal_tsc_resolution_hz;
-}
-
-
#ifdef RTE_LIBEAL_USE_HPET
/*
* Open and mmap /dev/hpet (high precision event timer) that will
@@ -187,9 +166,10 @@ int
rte_eal_hpet_init(int make_default)
{
int fd, ret;
+ char thread_name[RTE_MAX_THREAD_NAME_LEN];
if (internal_config.no_hpet) {
- RTE_LOG(INFO, EAL, "HPET is disabled\n");
+ RTE_LOG(NOTICE, EAL, "HPET is disabled\n");
return -1;
}
@@ -229,12 +209,21 @@ rte_eal_hpet_init(int make_default)
* msb (hpet is 32 bits by default under linux) */
ret = pthread_create(&msb_inc_thread_id, NULL,
(void *(*)(void *))hpet_msb_inc, NULL);
- if (ret < 0) {
+ if (ret != 0) {
RTE_LOG(ERR, EAL, "ERROR: Cannot create HPET timer thread!\n");
internal_config.no_hpet = 1;
return -1;
}
+ /*
+ * Set thread_name for aid in debugging.
+ */
+ snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "hpet-msb-inc");
+ ret = rte_thread_setname(msb_inc_thread_id, thread_name);
+ if (ret != 0)
+ RTE_LOG(ERR, EAL,
+ "ERROR: Cannot set HPET timer thread name!\n");
+
if (make_default)
eal_timer_source = EAL_TIMER_HPET;
return 0;
@@ -276,16 +265,16 @@ check_tsc_flags(void)
fclose(stream);
}
-static int
-set_tsc_freq_from_clock(void)
+uint64_t
+get_tsc_freq(void)
{
-#if 0
-//CLOCK_MONOTONIC_RAW
+#ifdef CLOCK_MONOTONIC_RAW
#define NS_PER_SEC 1E9
struct timespec sleeptime = {.tv_nsec = 5E8 }; /* 1/2 second */
struct timespec t_start, t_end;
+ uint64_t tsc_hz;
if (clock_gettime(CLOCK_MONOTONIC_RAW, &t_start) == 0) {
uint64_t ns, end, start = rte_rdtsc();
@@ -296,40 +285,11 @@ set_tsc_freq_from_clock(void)
ns += (t_end.tv_nsec - t_start.tv_nsec);
double secs = (double)ns/NS_PER_SEC;
- eal_tsc_resolution_hz = (uint64_t)((end - start)/secs);
- return 0;
+ tsc_hz = (uint64_t)((end - start)/secs);
+ return tsc_hz;
}
#endif
- return -1;
-}
-
-static void
-set_tsc_freq_fallback(void)
-{
- RTE_LOG(WARNING, EAL, "WARNING: clock_gettime cannot use "
- "CLOCK_MONOTONIC_RAW and HPET is not available"
- " - clock timings may be less accurate.\n");
- /* assume that the sleep(1) will sleep for 1 second */
- uint64_t start = rte_rdtsc();
- sleep(1);
- eal_tsc_resolution_hz = rte_rdtsc() - start;
-}
-/*
- * This function measures the TSC frequency. It uses a variety of approaches.
- *
- * 1. If kernel provides CLOCK_MONOTONIC_RAW we use that to tune the TSC value
- * 2. If kernel does not provide that, and we have HPET support, tune using HPET
- * 3. Lastly, if neither of the above can be used, just sleep for 1 second and
- * tune off that, printing a warning about inaccuracy of timing
- */
-static void
-set_tsc_freq(void)
-{
- if (set_tsc_freq_from_clock() < 0)
- set_tsc_freq_fallback();
-
- RTE_LOG(INFO, EAL, "TSC frequency is ~%"PRIu64" KHz\n",
- eal_tsc_resolution_hz/1000);
+ return 0;
}
int
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_vfio.h b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_vfio.h
index 03e693e0..72ec3f62 100755..100644
--- a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_vfio.h
+++ b/src/dpdk22/lib/librte_eal/linuxapp/eal/eal_vfio.h
@@ -43,9 +43,13 @@
#include <linux/vfio.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
-#define RTE_PCI_MSIX_TABLE_BIR 0x7
+#define RTE_PCI_MSIX_TABLE_BIR 0x7
+#define RTE_PCI_MSIX_TABLE_OFFSET 0xfffffff8
+#define RTE_PCI_MSIX_FLAGS_QSIZE 0x07ff
#else
-#define RTE_PCI_MSIX_TABLE_BIR PCI_MSIX_TABLE_BIR
+#define RTE_PCI_MSIX_TABLE_BIR PCI_MSIX_TABLE_BIR
+#define RTE_PCI_MSIX_TABLE_OFFSET PCI_MSIX_TABLE_OFFSET
+#define RTE_PCI_MSIX_FLAGS_QSIZE PCI_MSIX_FLAGS_QSIZE
#endif
#define VFIO_PRESENT
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/include/exec-env/rte_dom0_common.h b/src/dpdk22/lib/librte_eal/linuxapp/eal/include/exec-env/rte_dom0_common.h
index d9707780..d9707780 100755..100644
--- a/src/dpdk_lib18/librte_eal/linuxapp/eal/include/exec-env/rte_dom0_common.h
+++ b/src/dpdk22/lib/librte_eal/linuxapp/eal/include/exec-env/rte_dom0_common.h
diff --git a/src/dpdk22/lib/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h b/src/dpdk22/lib/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h
new file mode 100644
index 00000000..3dacbff8
--- /dev/null
+++ b/src/dpdk22/lib/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h
@@ -0,0 +1,228 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_INTERRUPTS_H_
+#error "don't include this file directly, please include generic <rte_interrupts.h>"
+#endif
+
+#ifndef _RTE_LINUXAPP_INTERRUPTS_H_
+#define _RTE_LINUXAPP_INTERRUPTS_H_
+
+#define RTE_MAX_RXTX_INTR_VEC_ID 32
+#define RTE_INTR_VEC_ZERO_OFFSET 0
+#define RTE_INTR_VEC_RXTX_OFFSET 1
+
+enum rte_intr_handle_type {
+ RTE_INTR_HANDLE_UNKNOWN = 0,
+ RTE_INTR_HANDLE_UIO, /**< uio device handle */
+ RTE_INTR_HANDLE_UIO_INTX, /**< uio generic handle */
+ RTE_INTR_HANDLE_VFIO_LEGACY, /**< vfio device handle (legacy) */
+ RTE_INTR_HANDLE_VFIO_MSI, /**< vfio device handle (MSI) */
+ RTE_INTR_HANDLE_VFIO_MSIX, /**< vfio device handle (MSIX) */
+ RTE_INTR_HANDLE_ALARM, /**< alarm handle */
+ RTE_INTR_HANDLE_EXT, /**< external handler */
+ RTE_INTR_HANDLE_MAX
+};
+
+#define RTE_INTR_EVENT_ADD 1UL
+#define RTE_INTR_EVENT_DEL 2UL
+
+typedef void (*rte_intr_event_cb_t)(int fd, void *arg);
+
+struct rte_epoll_data {
+ uint32_t event; /**< event type */
+ void *data; /**< User data */
+ rte_intr_event_cb_t cb_fun; /**< IN: callback fun */
+ void *cb_arg; /**< IN: callback arg */
+};
+
+enum {
+ RTE_EPOLL_INVALID = 0,
+ RTE_EPOLL_VALID,
+ RTE_EPOLL_EXEC,
+};
+
+/** interrupt epoll event obj, taken by epoll_event.ptr */
+struct rte_epoll_event {
+ volatile uint32_t status; /**< OUT: event status */
+ int fd; /**< OUT: event fd */
+ int epfd; /**< OUT: epoll instance the ev associated with */
+ struct rte_epoll_data epdata;
+};
+
+/** Handle for interrupts. */
+struct rte_intr_handle {
+ union {
+ int vfio_dev_fd; /**< VFIO device file descriptor */
+ int uio_cfg_fd; /**< UIO config file descriptor
+ for uio_pci_generic */
+ };
+ int fd; /**< interrupt event file descriptor */
+ enum rte_intr_handle_type type; /**< handle type */
+ uint32_t max_intr; /**< max interrupt requested */
+ uint32_t nb_efd; /**< number of available efd(event fd) */
+ int efds[RTE_MAX_RXTX_INTR_VEC_ID]; /**< intr vectors/efds mapping */
+ struct rte_epoll_event elist[RTE_MAX_RXTX_INTR_VEC_ID];
+ /**< intr vector epoll event */
+ int *intr_vec; /**< intr vector number array */
+};
+
+#define RTE_EPOLL_PER_THREAD -1 /**< to hint using per thread epfd */
+
+/**
+ * It waits for events on the epoll instance.
+ *
+ * @param epfd
+ * Epoll instance fd on which the caller wait for events.
+ * @param events
+ * Memory area contains the events that will be available for the caller.
+ * @param maxevents
+ * Up to maxevents are returned, must greater than zero.
+ * @param timeout
+ * Specifying a timeout of -1 causes a block indefinitely.
+ * Specifying a timeout equal to zero cause to return immediately.
+ * @return
+ * - On success, returns the number of available event.
+ * - On failure, a negative value.
+ */
+int
+rte_epoll_wait(int epfd, struct rte_epoll_event *events,
+ int maxevents, int timeout);
+
+/**
+ * It performs control operations on epoll instance referred by the epfd.
+ * It requests that the operation op be performed for the target fd.
+ *
+ * @param epfd
+ * Epoll instance fd on which the caller perform control operations.
+ * @param op
+ * The operation be performed for the target fd.
+ * @param fd
+ * The target fd on which the control ops perform.
+ * @param event
+ * Describes the object linked to the fd.
+ * Note: The caller must take care the object deletion after CTL_DEL.
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+int
+rte_epoll_ctl(int epfd, int op, int fd,
+ struct rte_epoll_event *event);
+
+/**
+ * The function returns the per thread epoll instance.
+ *
+ * @return
+ * epfd the epoll instance referred to.
+ */
+int
+rte_intr_tls_epfd(void);
+
+/**
+ * @param intr_handle
+ * Pointer to the interrupt handle.
+ * @param epfd
+ * Epoll instance fd which the intr vector associated to.
+ * @param op
+ * The operation be performed for the vector.
+ * Operation type of {ADD, DEL}.
+ * @param vec
+ * RX intr vector number added to the epoll instance wait list.
+ * @param data
+ * User raw data.
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+int
+rte_intr_rx_ctl(struct rte_intr_handle *intr_handle,
+ int epfd, int op, unsigned int vec, void *data);
+
+/**
+ * It enables the packet I/O interrupt event if it's necessary.
+ * It creates event fd for each interrupt vector when MSIX is used,
+ * otherwise it multiplexes a single event fd.
+ *
+ * @param intr_handle
+ * Pointer to the interrupt handle.
+ * @param nb_efd
+ * Number of interrupt vector trying to enable.
+ * The value 0 is not allowed.
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+int
+rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd);
+
+/**
+ * It disables the packet I/O interrupt event.
+ * It deletes registered eventfds and closes the open fds.
+ *
+ * @param intr_handle
+ * Pointer to the interrupt handle.
+ */
+void
+rte_intr_efd_disable(struct rte_intr_handle *intr_handle);
+
+/**
+ * The packet I/O interrupt on datapath is enabled or not.
+ *
+ * @param intr_handle
+ * Pointer to the interrupt handle.
+ */
+int
+rte_intr_dp_is_en(struct rte_intr_handle *intr_handle);
+
+/**
+ * The interrupt handle instance allows other causes or not.
+ * Other causes stand for any none packet I/O interrupts.
+ *
+ * @param intr_handle
+ * Pointer to the interrupt handle.
+ */
+int
+rte_intr_allow_others(struct rte_intr_handle *intr_handle);
+
+/**
+ * The multiple interrupt vector capability of interrupt handle instance.
+ * It returns zero if no multiple interrupt vector support.
+ *
+ * @param intr_handle
+ * Pointer to the interrupt handle.
+ */
+int
+rte_intr_cap_multiple(struct rte_intr_handle *intr_handle);
+
+#endif /* _RTE_LINUXAPP_INTERRUPTS_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h b/src/dpdk22/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
index 1e55c2d9..bd1cc094 100755..100644
--- a/src/dpdk_lib18/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
+++ b/src/dpdk22/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
@@ -117,9 +117,9 @@ struct rte_kni_mbuf {
uint16_t data_off; /**< Start address of data in segment buffer. */
char pad1[4];
uint64_t ol_flags; /**< Offload features. */
- char pad2[2];
- uint16_t data_len; /**< Amount of data in segment buffer. */
+ char pad2[4];
uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
+ uint16_t data_len; /**< Amount of data in segment buffer. */
/* fields on second cache line */
char pad3[8] __attribute__((__aligned__(RTE_CACHE_LINE_SIZE)));
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/igb_uio/compat.h b/src/dpdk22/lib/librte_eal/linuxapp/igb_uio/compat.h
index c1d45a66..c1d45a66 100755..100644
--- a/src/dpdk_lib18/librte_eal/linuxapp/igb_uio/compat.h
+++ b/src/dpdk22/lib/librte_eal/linuxapp/igb_uio/compat.h
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/xen_dom0/compat.h b/src/dpdk22/lib/librte_eal/linuxapp/xen_dom0/compat.h
index e6eb97f2..e6eb97f2 100755..100644
--- a/src/dpdk_lib18/librte_eal/linuxapp/xen_dom0/compat.h
+++ b/src/dpdk22/lib/librte_eal/linuxapp/xen_dom0/compat.h
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/xen_dom0/dom0_mm_dev.h b/src/dpdk22/lib/librte_eal/linuxapp/xen_dom0/dom0_mm_dev.h
index a9dd0d26..9d5ffb22 100755..100644
--- a/src/dpdk_lib18/librte_eal/linuxapp/xen_dom0/dom0_mm_dev.h
+++ b/src/dpdk22/lib/librte_eal/linuxapp/xen_dom0/dom0_mm_dev.h
@@ -69,7 +69,7 @@
#define MAX_EXCHANGE_FAIL_TIME 5 /**< Maximum times of allowing exchange fail .*/
#define MAX_MEMBLOCK_SIZE (2 * DOM0_MEMBLOCK_SIZE)
#define MAX_NUM_ORDER (DOM0_CONTIG_NUM_ORDER + 1)
-#define SIZE_PER_BLOCK 2 /** < size of per memory block(2MB)).*/
+#define SIZE_PER_BLOCK 2 /**< Size of memory block (2MB).*/
/**
* A structure describing the private information for a dom0 device.
diff --git a/src/dpdk_lib18/librte_eal/common/eal_thread.h b/src/dpdk22/lib/librte_ether/rte_dev_info.h
index b53b84d3..291bd4d7 100755..100644
--- a/src/dpdk_lib18/librte_eal/common/eal_thread.h
+++ b/src/dpdk22/lib/librte_ether/rte_dev_info.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,23 +31,27 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef EAL_THREAD_H
-#define EAL_THREAD_H
+#ifndef _RTE_DEV_INFO_H_
+#define _RTE_DEV_INFO_H_
-/**
- * basic loop of thread, called for each thread by eal_init().
- *
- * @param arg
- * opaque pointer
+/*
+ * Placeholder for accessing device registers
*/
-__attribute__((noreturn)) void *eal_thread_loop(void *arg);
+struct rte_dev_reg_info {
+ void *data; /**< Buffer for return registers */
+ uint32_t offset; /**< Start register table location for access */
+ uint32_t length; /**< Number of registers to fetch */
+ uint32_t version; /**< Device version */
+};
-/**
- * Init per-lcore info for master thread
- *
- * @param lcore_id
- * identifier of master lcore
+/*
+ * Placeholder for accessing device eeprom
*/
-void eal_thread_init_master(unsigned lcore_id);
+struct rte_dev_eeprom_info {
+ void *data; /**< Buffer for return eeprom */
+ uint32_t offset; /**< Start eeprom address for access*/
+ uint32_t length; /**< Length of eeprom region to access */
+ uint32_t magic; /**< Device-specific key, such as device-id */
+};
-#endif /* EAL_THREAD_H */
+#endif /* _RTE_DEV_INFO_H_ */
diff --git a/src/dpdk22/lib/librte_ether/rte_eth_ctrl.h b/src/dpdk22/lib/librte_ether/rte_eth_ctrl.h
new file mode 100644
index 00000000..ce224adb
--- /dev/null
+++ b/src/dpdk22/lib/librte_ether/rte_eth_ctrl.h
@@ -0,0 +1,811 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ETH_CTRL_H_
+#define _RTE_ETH_CTRL_H_
+
+/**
+ * @file
+ *
+ * Ethernet device features and related data structures used
+ * by control APIs should be defined in this file.
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * A packet can be identified by hardware as different flow types. Different
+ * NIC hardwares may support different flow types.
+ * Basically, the NIC hardware identifies the flow type as deep protocol as
+ * possible, and exclusively. For example, if a packet is identified as
+ * 'RTE_ETH_FLOW_NONFRAG_IPV4_TCP', it will not be any of other flow types,
+ * though it is an actual IPV4 packet.
+ * Note that the flow types are used to define RSS offload types in
+ * rte_ethdev.h.
+ */
+#define RTE_ETH_FLOW_UNKNOWN 0
+#define RTE_ETH_FLOW_RAW 1
+#define RTE_ETH_FLOW_IPV4 2
+#define RTE_ETH_FLOW_FRAG_IPV4 3
+#define RTE_ETH_FLOW_NONFRAG_IPV4_TCP 4
+#define RTE_ETH_FLOW_NONFRAG_IPV4_UDP 5
+#define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP 6
+#define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER 7
+#define RTE_ETH_FLOW_IPV6 8
+#define RTE_ETH_FLOW_FRAG_IPV6 9
+#define RTE_ETH_FLOW_NONFRAG_IPV6_TCP 10
+#define RTE_ETH_FLOW_NONFRAG_IPV6_UDP 11
+#define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP 12
+#define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13
+#define RTE_ETH_FLOW_L2_PAYLOAD 14
+#define RTE_ETH_FLOW_IPV6_EX 15
+#define RTE_ETH_FLOW_IPV6_TCP_EX 16
+#define RTE_ETH_FLOW_IPV6_UDP_EX 17
+#define RTE_ETH_FLOW_MAX 18
+
+/**
+ * Feature filter types
+ */
+enum rte_filter_type {
+ RTE_ETH_FILTER_NONE = 0,
+ RTE_ETH_FILTER_MACVLAN,
+ RTE_ETH_FILTER_ETHERTYPE,
+ RTE_ETH_FILTER_FLEXIBLE,
+ RTE_ETH_FILTER_SYN,
+ RTE_ETH_FILTER_NTUPLE,
+ RTE_ETH_FILTER_TUNNEL,
+ RTE_ETH_FILTER_FDIR,
+ RTE_ETH_FILTER_HASH,
+ RTE_ETH_FILTER_MAX
+};
+
+/**
+ * Generic operations on filters
+ */
+enum rte_filter_op {
+ /** used to check whether the type filter is supported */
+ RTE_ETH_FILTER_NOP = 0,
+ RTE_ETH_FILTER_ADD, /**< add filter entry */
+ RTE_ETH_FILTER_UPDATE, /**< update filter entry */
+ RTE_ETH_FILTER_DELETE, /**< delete filter entry */
+ RTE_ETH_FILTER_FLUSH, /**< flush all entries */
+ RTE_ETH_FILTER_GET, /**< get filter entry */
+ RTE_ETH_FILTER_SET, /**< configurations */
+ RTE_ETH_FILTER_INFO, /**< retrieve information */
+ RTE_ETH_FILTER_STATS, /**< retrieve statistics */
+ RTE_ETH_FILTER_OP_MAX
+};
+
+/**
+ * MAC filter type
+ */
+enum rte_mac_filter_type {
+ RTE_MAC_PERFECT_MATCH = 1, /**< exact match of MAC addr. */
+ RTE_MACVLAN_PERFECT_MATCH, /**< exact match of MAC addr and VLAN ID. */
+ RTE_MAC_HASH_MATCH, /**< hash match of MAC addr. */
+ /** hash match of MAC addr and exact match of VLAN ID. */
+ RTE_MACVLAN_HASH_MATCH,
+};
+
+/**
+ * MAC filter info
+ */
+struct rte_eth_mac_filter {
+ uint8_t is_vf; /**< 1 for VF, 0 for port dev */
+ uint16_t dst_id; /**< VF ID, available when is_vf is 1*/
+ enum rte_mac_filter_type filter_type; /**< MAC filter type */
+ struct ether_addr mac_addr;
+};
+
+/**
+ * Define all structures for Ethertype Filter type.
+ */
+
+#define RTE_ETHTYPE_FLAGS_MAC 0x0001 /**< If set, compare mac */
+#define RTE_ETHTYPE_FLAGS_DROP 0x0002 /**< If set, drop packet when match */
+
+/**
+ * A structure used to define the ethertype filter entry
+ * to support RTE_ETH_FILTER_ETHERTYPE with RTE_ETH_FILTER_ADD,
+ * RTE_ETH_FILTER_DELETE and RTE_ETH_FILTER_GET operations.
+ */
+struct rte_eth_ethertype_filter {
+ struct ether_addr mac_addr; /**< Mac address to match. */
+ uint16_t ether_type; /**< Ether type to match */
+ uint16_t flags; /**< Flags from RTE_ETHTYPE_FLAGS_* */
+ uint16_t queue; /**< Queue assigned to when match*/
+};
+
+#define RTE_FLEX_FILTER_MAXLEN 128 /**< bytes to use in flex filter. */
+#define RTE_FLEX_FILTER_MASK_SIZE \
+ (RTE_ALIGN(RTE_FLEX_FILTER_MAXLEN, CHAR_BIT) / CHAR_BIT)
+ /**< mask bytes in flex filter. */
+
+/**
+ * A structure used to define the flex filter entry
+ * to support RTE_ETH_FILTER_FLEXIBLE with RTE_ETH_FILTER_ADD,
+ * RTE_ETH_FILTER_DELETE and RTE_ETH_FILTER_GET operations.
+ */
+struct rte_eth_flex_filter {
+ uint16_t len;
+ uint8_t bytes[RTE_FLEX_FILTER_MAXLEN]; /**< flex bytes in big endian.*/
+ uint8_t mask[RTE_FLEX_FILTER_MASK_SIZE]; /**< if mask bit is 1b, do
+ not compare corresponding byte. */
+ uint8_t priority;
+ uint16_t queue; /**< Queue assigned to when match. */
+};
+
+/**
+ * A structure used to define the TCP syn filter entry
+ * to support RTE_ETH_FILTER_SYN with RTE_ETH_FILTER_ADD,
+ * RTE_ETH_FILTER_DELETE and RTE_ETH_FILTER_GET operations.
+ */
+struct rte_eth_syn_filter {
+ uint8_t hig_pri; /**< 1 - higher priority than other filters,
+ 0 - lower priority. */
+ uint16_t queue; /**< Queue assigned to when match */
+};
+
+/**
+ * Define all structures for ntuple Filter type.
+ */
+
+#define RTE_NTUPLE_FLAGS_DST_IP 0x0001 /**< If set, dst_ip is part of ntuple */
+#define RTE_NTUPLE_FLAGS_SRC_IP 0x0002 /**< If set, src_ip is part of ntuple */
+#define RTE_NTUPLE_FLAGS_DST_PORT 0x0004 /**< If set, dst_port is part of ntuple */
+#define RTE_NTUPLE_FLAGS_SRC_PORT 0x0008 /**< If set, src_port is part of ntuple */
+#define RTE_NTUPLE_FLAGS_PROTO 0x0010 /**< If set, protocol is part of ntuple */
+#define RTE_NTUPLE_FLAGS_TCP_FLAG 0x0020 /**< If set, tcp flag is involved */
+
+#define RTE_5TUPLE_FLAGS ( \
+ RTE_NTUPLE_FLAGS_DST_IP | \
+ RTE_NTUPLE_FLAGS_SRC_IP | \
+ RTE_NTUPLE_FLAGS_DST_PORT | \
+ RTE_NTUPLE_FLAGS_SRC_PORT | \
+ RTE_NTUPLE_FLAGS_PROTO)
+
+#define RTE_2TUPLE_FLAGS ( \
+ RTE_NTUPLE_FLAGS_DST_PORT | \
+ RTE_NTUPLE_FLAGS_PROTO)
+
+#define TCP_URG_FLAG 0x20
+#define TCP_ACK_FLAG 0x10
+#define TCP_PSH_FLAG 0x08
+#define TCP_RST_FLAG 0x04
+#define TCP_SYN_FLAG 0x02
+#define TCP_FIN_FLAG 0x01
+#define TCP_FLAG_ALL 0x3F
+
+/**
+ * A structure used to define the ntuple filter entry
+ * to support RTE_ETH_FILTER_NTUPLE with RTE_ETH_FILTER_ADD,
+ * RTE_ETH_FILTER_DELETE and RTE_ETH_FILTER_GET operations.
+ */
+struct rte_eth_ntuple_filter {
+ uint16_t flags; /**< Flags from RTE_NTUPLE_FLAGS_* */
+ uint32_t dst_ip; /**< Destination IP address in big endian. */
+ uint32_t dst_ip_mask; /**< Mask of destination IP address. */
+ uint32_t src_ip; /**< Source IP address in big endian. */
+ uint32_t src_ip_mask; /**< Mask of destination IP address. */
+ uint16_t dst_port; /**< Destination port in big endian. */
+ uint16_t dst_port_mask; /**< Mask of destination port. */
+ uint16_t src_port; /**< Source Port in big endian. */
+ uint16_t src_port_mask; /**< Mask of source port. */
+ uint8_t proto; /**< L4 protocol. */
+ uint8_t proto_mask; /**< Mask of L4 protocol. */
+ /** tcp_flags only meaningful when the proto is TCP.
+ The packet matched above ntuple fields and contain
+ any set bit in tcp_flags will hit this filter. */
+ uint8_t tcp_flags;
+ uint16_t priority; /**< seven levels (001b-111b), 111b is highest,
+ used when more than one filter matches. */
+ uint16_t queue; /**< Queue assigned to when match*/
+};
+
+/**
+ * Tunneled type.
+ */
+enum rte_eth_tunnel_type {
+ RTE_TUNNEL_TYPE_NONE = 0,
+ RTE_TUNNEL_TYPE_VXLAN,
+ RTE_TUNNEL_TYPE_GENEVE,
+ RTE_TUNNEL_TYPE_TEREDO,
+ RTE_TUNNEL_TYPE_NVGRE,
+ RTE_TUNNEL_TYPE_MAX,
+};
+
+/**
+ * filter type of tunneling packet
+ */
+#define ETH_TUNNEL_FILTER_OMAC 0x01 /**< filter by outer MAC addr */
+#define ETH_TUNNEL_FILTER_OIP 0x02 /**< filter by outer IP Addr */
+#define ETH_TUNNEL_FILTER_TENID 0x04 /**< filter by tenant ID */
+#define ETH_TUNNEL_FILTER_IMAC 0x08 /**< filter by inner MAC addr */
+#define ETH_TUNNEL_FILTER_IVLAN 0x10 /**< filter by inner VLAN ID */
+#define ETH_TUNNEL_FILTER_IIP 0x20 /**< filter by inner IP addr */
+
+#define RTE_TUNNEL_FILTER_IMAC_IVLAN (ETH_TUNNEL_FILTER_IMAC | \
+ ETH_TUNNEL_FILTER_IVLAN)
+#define RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID (ETH_TUNNEL_FILTER_IMAC | \
+ ETH_TUNNEL_FILTER_IVLAN | \
+ ETH_TUNNEL_FILTER_TENID)
+#define RTE_TUNNEL_FILTER_IMAC_TENID (ETH_TUNNEL_FILTER_IMAC | \
+ ETH_TUNNEL_FILTER_TENID)
+#define RTE_TUNNEL_FILTER_OMAC_TENID_IMAC (ETH_TUNNEL_FILTER_OMAC | \
+ ETH_TUNNEL_FILTER_TENID | \
+ ETH_TUNNEL_FILTER_IMAC)
+
+/**
+ * Select IPv4 or IPv6 for tunnel filters.
+ */
+enum rte_tunnel_iptype {
+ RTE_TUNNEL_IPTYPE_IPV4 = 0, /**< IPv4. */
+ RTE_TUNNEL_IPTYPE_IPV6, /**< IPv6. */
+};
+
+/**
+ * Tunneling Packet filter configuration.
+ */
+struct rte_eth_tunnel_filter_conf {
+ struct ether_addr *outer_mac; /**< Outer MAC address filter. */
+ struct ether_addr *inner_mac; /**< Inner MAC address filter. */
+ uint16_t inner_vlan; /**< Inner VLAN filter. */
+ enum rte_tunnel_iptype ip_type; /**< IP address type. */
+ union {
+ uint32_t ipv4_addr; /**< IPv4 source address to match. */
+ uint32_t ipv6_addr[4]; /**< IPv6 source address to match. */
+ } ip_addr; /**< IPv4/IPv6 source address to match (union of above). */
+
+ uint16_t filter_type; /**< Filter type. */
+ enum rte_eth_tunnel_type tunnel_type; /**< Tunnel Type. */
+ uint32_t tenant_id; /**< Tenant number. */
+ uint16_t queue_id; /**< Queue number. */
+};
+
+/**
+ * Global eth device configuration type.
+ */
+enum rte_eth_global_cfg_type {
+ RTE_ETH_GLOBAL_CFG_TYPE_UNKNOWN = 0,
+ RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN,
+ RTE_ETH_GLOBAL_CFG_TYPE_MAX,
+};
+
+/**
+ * Global eth device configuration.
+ */
+struct rte_eth_global_cfg {
+ enum rte_eth_global_cfg_type cfg_type; /**< Global config type. */
+ union {
+ uint8_t gre_key_len; /**< Valid GRE key length in byte. */
+ uint64_t reserved; /**< Reserve space for future use. */
+ } cfg;
+};
+
+#define RTE_ETH_FDIR_MAX_FLEXLEN 16 /**< Max length of flexbytes. */
+#define RTE_ETH_INSET_SIZE_MAX 128 /**< Max length of input set. */
+
+/**
+ * Input set fields for Flow Director and Hash filters
+ */
+enum rte_eth_input_set_field {
+ RTE_ETH_INPUT_SET_UNKNOWN = 0,
+
+ /* L2 */
+ RTE_ETH_INPUT_SET_L2_SRC_MAC = 1,
+ RTE_ETH_INPUT_SET_L2_DST_MAC,
+ RTE_ETH_INPUT_SET_L2_OUTER_VLAN,
+ RTE_ETH_INPUT_SET_L2_INNER_VLAN,
+ RTE_ETH_INPUT_SET_L2_ETHERTYPE,
+
+ /* L3 */
+ RTE_ETH_INPUT_SET_L3_SRC_IP4 = 129,
+ RTE_ETH_INPUT_SET_L3_DST_IP4,
+ RTE_ETH_INPUT_SET_L3_SRC_IP6,
+ RTE_ETH_INPUT_SET_L3_DST_IP6,
+ RTE_ETH_INPUT_SET_L3_IP4_TOS,
+ RTE_ETH_INPUT_SET_L3_IP4_PROTO,
+ RTE_ETH_INPUT_SET_L3_IP6_TC,
+ RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER,
+
+ /* L4 */
+ RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT = 257,
+ RTE_ETH_INPUT_SET_L4_UDP_DST_PORT,
+ RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT,
+ RTE_ETH_INPUT_SET_L4_TCP_DST_PORT,
+ RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT,
+ RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT,
+ RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG,
+
+ /* Tunnel */
+ RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC = 385,
+ RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_SRC_MAC,
+ RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN,
+ RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY,
+ RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY,
+
+ /* Flexible Payload */
+ RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD = 641,
+ RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD,
+ RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD,
+ RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD,
+ RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD,
+ RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD,
+ RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD,
+ RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD,
+
+ RTE_ETH_INPUT_SET_DEFAULT = 65533,
+ RTE_ETH_INPUT_SET_NONE = 65534,
+ RTE_ETH_INPUT_SET_MAX = 65535,
+};
+
+/**
+ * Filters input set operations
+ */
+enum rte_filter_input_set_op {
+ RTE_ETH_INPUT_SET_OP_UNKNOWN,
+ RTE_ETH_INPUT_SET_SELECT, /**< select input set */
+ RTE_ETH_INPUT_SET_ADD, /**< add input set entry */
+ RTE_ETH_INPUT_SET_OP_MAX
+};
+
+
+/**
+ * A structure used to define the input set configuration for
+ * flow director and hash filters
+ */
+struct rte_eth_input_set_conf {
+ uint16_t flow_type;
+ uint16_t inset_size;
+ enum rte_eth_input_set_field field[RTE_ETH_INSET_SIZE_MAX];
+ enum rte_filter_input_set_op op;
+};
+
+/**
+ * A structure used to define the input for L2 flow
+ */
+struct rte_eth_l2_flow {
+ uint16_t ether_type; /**< Ether type to match */
+};
+
+/**
+ * A structure used to define the input for IPV4 flow
+ */
+struct rte_eth_ipv4_flow {
+ uint32_t src_ip; /**< IPv4 source address to match. */
+ uint32_t dst_ip; /**< IPv4 destination address to match. */
+};
+
+/**
+ * A structure used to define the input for IPV4 UDP flow
+ */
+struct rte_eth_udpv4_flow {
+ struct rte_eth_ipv4_flow ip; /**< IPv4 fields to match. */
+ uint16_t src_port; /**< UDP source port to match. */
+ uint16_t dst_port; /**< UDP destination port to match. */
+};
+
+/**
+ * A structure used to define the input for IPV4 TCP flow
+ */
+struct rte_eth_tcpv4_flow {
+ struct rte_eth_ipv4_flow ip; /**< IPv4 fields to match. */
+ uint16_t src_port; /**< TCP source port to match. */
+ uint16_t dst_port; /**< TCP destination port to match. */
+};
+
+/**
+ * A structure used to define the input for IPV4 SCTP flow
+ */
+struct rte_eth_sctpv4_flow {
+ struct rte_eth_ipv4_flow ip; /**< IPv4 fields to match. */
+ uint16_t src_port; /**< SCTP source port to match. */
+ uint16_t dst_port; /**< SCTP destination port to match. */
+ uint32_t verify_tag; /**< Verify tag to match */
+};
+
+/**
+ * A structure used to define the input for IPV6 flow
+ */
+struct rte_eth_ipv6_flow {
+ uint32_t src_ip[4]; /**< IPv6 source address to match. */
+ uint32_t dst_ip[4]; /**< IPv6 destination address to match. */
+};
+
+/**
+ * A structure used to define the input for IPV6 UDP flow
+ */
+struct rte_eth_udpv6_flow {
+ struct rte_eth_ipv6_flow ip; /**< IPv6 fields to match. */
+ uint16_t src_port; /**< UDP source port to match. */
+ uint16_t dst_port; /**< UDP destination port to match. */
+};
+
+/**
+ * A structure used to define the input for IPV6 TCP flow
+ */
+struct rte_eth_tcpv6_flow {
+ struct rte_eth_ipv6_flow ip; /**< IPv6 fields to match. */
+ uint16_t src_port; /**< TCP source port to match. */
+ uint16_t dst_port; /**< TCP destination port to match. */
+};
+
+/**
+ * A structure used to define the input for IPV6 SCTP flow
+ */
+struct rte_eth_sctpv6_flow {
+ struct rte_eth_ipv6_flow ip; /**< IPv6 fields to match. */
+ uint16_t src_port; /**< SCTP source port to match. */
+ uint16_t dst_port; /**< SCTP destination port to match. */
+ uint32_t verify_tag; /**< Verify tag to match */
+};
+
+/**
+ * A structure used to define the input for MAC VLAN flow
+ */
+struct rte_eth_mac_vlan_flow {
+ struct ether_addr mac_addr; /**< Mac address to match. */
+};
+
+/**
+ * Tunnel type for flow director.
+ */
+enum rte_eth_fdir_tunnel_type {
+ RTE_FDIR_TUNNEL_TYPE_UNKNOWN = 0,
+ RTE_FDIR_TUNNEL_TYPE_NVGRE,
+ RTE_FDIR_TUNNEL_TYPE_VXLAN,
+};
+
+/**
+ * A structure used to define the input for tunnel flow, now it's VxLAN or
+ * NVGRE
+ */
+struct rte_eth_tunnel_flow {
+ enum rte_eth_fdir_tunnel_type tunnel_type; /**< Tunnel type to match. */
+ uint32_t tunnel_id; /**< Tunnel ID to match. TNI, VNI... */
+ struct ether_addr mac_addr; /**< Mac address to match. */
+};
+
+/**
+ * An union contains the inputs for all types of flow
+ */
+union rte_eth_fdir_flow {
+ struct rte_eth_l2_flow l2_flow;
+ struct rte_eth_udpv4_flow udp4_flow;
+ struct rte_eth_tcpv4_flow tcp4_flow;
+ struct rte_eth_sctpv4_flow sctp4_flow;
+ struct rte_eth_ipv4_flow ip4_flow;
+ struct rte_eth_udpv6_flow udp6_flow;
+ struct rte_eth_tcpv6_flow tcp6_flow;
+ struct rte_eth_sctpv6_flow sctp6_flow;
+ struct rte_eth_ipv6_flow ipv6_flow;
+ struct rte_eth_mac_vlan_flow mac_vlan_flow;
+ struct rte_eth_tunnel_flow tunnel_flow;
+};
+
+/**
+ * A structure used to contain extend input of flow
+ */
+struct rte_eth_fdir_flow_ext {
+ uint16_t vlan_tci;
+ uint8_t flexbytes[RTE_ETH_FDIR_MAX_FLEXLEN];
+ /**< It is filled by the flexible payload to match. */
+ uint8_t is_vf; /**< 1 for VF, 0 for port dev */
+ uint16_t dst_id; /**< VF ID, available when is_vf is 1*/
+};
+
+/**
+ * A structure used to define the input for a flow director filter entry
+ */
+struct rte_eth_fdir_input {
+ uint16_t flow_type;
+ union rte_eth_fdir_flow flow;
+ /**< Flow fields to match, dependent on flow_type */
+ struct rte_eth_fdir_flow_ext flow_ext;
+ /**< Additional fields to match */
+};
+
+/**
+ * Behavior will be taken if FDIR match
+ */
+enum rte_eth_fdir_behavior {
+ RTE_ETH_FDIR_ACCEPT = 0,
+ RTE_ETH_FDIR_REJECT,
+ RTE_ETH_FDIR_PASSTHRU,
+};
+
+/**
+ * Flow director report status
+ * It defines what will be reported if FDIR entry is matched.
+ */
+enum rte_eth_fdir_status {
+ RTE_ETH_FDIR_NO_REPORT_STATUS = 0, /**< Report nothing. */
+ RTE_ETH_FDIR_REPORT_ID, /**< Only report FD ID. */
+ RTE_ETH_FDIR_REPORT_ID_FLEX_4, /**< Report FD ID and 4 flex bytes. */
+ RTE_ETH_FDIR_REPORT_FLEX_8, /**< Report 8 flex bytes. */
+};
+
+/**
+ * A structure used to define an action when match FDIR packet filter.
+ */
+struct rte_eth_fdir_action {
+ uint16_t rx_queue; /**< Queue assigned to if FDIR match. */
+ enum rte_eth_fdir_behavior behavior; /**< Behavior will be taken */
+ enum rte_eth_fdir_status report_status; /**< Status report option */
+ uint8_t flex_off;
+ /**< If report_status is RTE_ETH_FDIR_REPORT_ID_FLEX_4 or
+ RTE_ETH_FDIR_REPORT_FLEX_8, flex_off specifies where the reported
+ flex bytes start from in flexible payload. */
+};
+
+/**
+ * A structure used to define the flow director filter entry by filter_ctrl API
+ * It supports RTE_ETH_FILTER_FDIR with RTE_ETH_FILTER_ADD and
+ * RTE_ETH_FILTER_DELETE operations.
+ */
+struct rte_eth_fdir_filter {
+ uint32_t soft_id;
+ /**< ID, an unique value is required when deal with FDIR entry */
+ struct rte_eth_fdir_input input; /**< Input set */
+ struct rte_eth_fdir_action action; /**< Action taken when match */
+};
+
+/**
+ * A structure used to configure FDIR masks that are used by the device
+ * to match the various fields of RX packet headers.
+ */
+struct rte_eth_fdir_masks {
+ uint16_t vlan_tci_mask;
+ struct rte_eth_ipv4_flow ipv4_mask;
+ struct rte_eth_ipv6_flow ipv6_mask;
+ uint16_t src_port_mask;
+ uint16_t dst_port_mask;
+ uint8_t mac_addr_byte_mask; /** Per byte MAC address mask */
+ uint32_t tunnel_id_mask; /** tunnel ID mask */
+ uint8_t tunnel_type_mask;
+};
+
+/**
+ * Payload type
+ */
+enum rte_eth_payload_type {
+ RTE_ETH_PAYLOAD_UNKNOWN = 0,
+ RTE_ETH_RAW_PAYLOAD,
+ RTE_ETH_L2_PAYLOAD,
+ RTE_ETH_L3_PAYLOAD,
+ RTE_ETH_L4_PAYLOAD,
+ RTE_ETH_PAYLOAD_MAX = 8,
+};
+
+/**
+ * A structure used to select bytes extracted from the protocol layers to
+ * flexible payload for filter
+ */
+struct rte_eth_flex_payload_cfg {
+ enum rte_eth_payload_type type; /**< Payload type */
+ uint16_t src_offset[RTE_ETH_FDIR_MAX_FLEXLEN];
+ /**< Offset in bytes from the beginning of packet's payload
+ src_offset[i] indicates the flexbyte i's offset in original
+ packet payload. This value should be less than
+ flex_payload_limit in struct rte_eth_fdir_info.*/
+};
+
+/**
+ * A structure used to define FDIR masks for flexible payload
+ * for each flow type
+ */
+struct rte_eth_fdir_flex_mask {
+ uint16_t flow_type;
+ uint8_t mask[RTE_ETH_FDIR_MAX_FLEXLEN];
+ /**< Mask for the whole flexible payload */
+};
+
+/**
+ * A structure used to define all flexible payload related setting
+ * include flex payload and flex mask
+ */
+struct rte_eth_fdir_flex_conf {
+ uint16_t nb_payloads; /**< The number of following payload cfg */
+ uint16_t nb_flexmasks; /**< The number of following mask */
+ struct rte_eth_flex_payload_cfg flex_set[RTE_ETH_PAYLOAD_MAX];
+ /**< Flex payload configuration for each payload type */
+ struct rte_eth_fdir_flex_mask flex_mask[RTE_ETH_FLOW_MAX];
+ /**< Flex mask configuration for each flow type */
+};
+
+/**
+ * Flow Director setting modes: none, signature or perfect.
+ */
+enum rte_fdir_mode {
+ RTE_FDIR_MODE_NONE = 0, /**< Disable FDIR support. */
+ RTE_FDIR_MODE_SIGNATURE, /**< Enable FDIR signature filter mode. */
+ RTE_FDIR_MODE_PERFECT, /**< Enable FDIR perfect filter mode. */
+ RTE_FDIR_MODE_PERFECT_MAC_VLAN, /**< Enable FDIR filter mode - MAC VLAN. */
+ RTE_FDIR_MODE_PERFECT_TUNNEL, /**< Enable FDIR filter mode - tunnel. */
+};
+
+#define UINT32_BIT (CHAR_BIT * sizeof(uint32_t))
+#define RTE_FLOW_MASK_ARRAY_SIZE \
+ (RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT32_BIT)/UINT32_BIT)
+
+/**
+ * A structure used to get the information of flow director filter.
+ * It supports RTE_ETH_FILTER_FDIR with RTE_ETH_FILTER_INFO operation.
+ * It includes the mode, flexible payload configuration information,
+ * capabilities and supported flow types, flexible payload characters.
+ * It can be gotten to help taking specific configurations per device.
+ */
+struct rte_eth_fdir_info {
+ enum rte_fdir_mode mode; /**< Flow director mode */
+ struct rte_eth_fdir_masks mask;
+ /** Flex payload configuration information */
+ struct rte_eth_fdir_flex_conf flex_conf;
+ uint32_t guarant_spc; /**< Guaranteed spaces.*/
+ uint32_t best_spc; /**< Best effort spaces.*/
+ /** Bit mask for every supported flow type. */
+ uint32_t flow_types_mask[RTE_FLOW_MASK_ARRAY_SIZE];
+ uint32_t max_flexpayload; /**< Total flex payload in bytes. */
+ /** Flexible payload unit in bytes. Size and alignments of all flex
+ payload segments should be multiplies of this value. */
+ uint32_t flex_payload_unit;
+ /** Max number of flexible payload continuous segments.
+ Each segment should be a multiple of flex_payload_unit.*/
+ uint32_t max_flex_payload_segment_num;
+ /** Maximum src_offset in bytes allowed. It indicates that
+ src_offset[i] in struct rte_eth_flex_payload_cfg should be less
+ than this value. */
+ uint16_t flex_payload_limit;
+ /** Flex bitmask unit in bytes. Size of flex bitmasks should be a
+ multiply of this value. */
+ uint32_t flex_bitmask_unit;
+ /** Max supported size of flex bitmasks in flex_bitmask_unit */
+ uint32_t max_flex_bitmask_num;
+};
+
+/**
+ * A structure used to define the statistics of flow director.
+ * It supports RTE_ETH_FILTER_FDIR with RTE_ETH_FILTER_STATS operation.
+ */
+struct rte_eth_fdir_stats {
+ uint32_t collision; /**< Number of filters with collision. */
+ uint32_t free; /**< Number of free filters. */
+ uint32_t maxhash;
+ /**< The lookup hash value of the added filter that updated the value
+ of the MAXLEN field */
+ uint32_t maxlen; /**< Longest linked list of filters. */
+ uint64_t add; /**< Number of added filters. */
+ uint64_t remove; /**< Number of removed filters. */
+ uint64_t f_add; /**< Number of failed added filters. */
+ uint64_t f_remove; /**< Number of failed removed filters. */
+ uint32_t guarant_cnt; /**< Number of filters in guaranteed spaces. */
+ uint32_t best_cnt; /**< Number of filters in best effort spaces. */
+};
+
+/**
+ * Flow Director filter information types.
+ */
+enum rte_eth_fdir_filter_info_type {
+ RTE_ETH_FDIR_FILTER_INFO_TYPE_UNKNOWN = 0,
+ /** Flow Director filter input set configuration */
+ RTE_ETH_FDIR_FILTER_INPUT_SET_SELECT,
+ RTE_ETH_FDIR_FILTER_INFO_TYPE_MAX,
+};
+
+/**
+ * A structure used to set FDIR filter information, to support filter type
+ * of 'RTE_ETH_FILTER_FDIR' RTE_ETH_FDIR_FILTER_INPUT_SET_SELECT operation.
+ */
+struct rte_eth_fdir_filter_info {
+ enum rte_eth_fdir_filter_info_type info_type; /**< Information type */
+ /** Details of fdir filter information */
+ union {
+ /** Flow Director input set configuration per port */
+ struct rte_eth_input_set_conf input_set_conf;
+ } info;
+};
+
+/**
+ * Hash filter information types.
+ * - RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT is for getting/setting the
+ * information/configuration of 'symmetric hash enable' per port.
+ * - RTE_ETH_HASH_FILTER_GLOBAL_CONFIG is for getting/setting the global
+ * configurations of hash filters. Those global configurations are valid
+ * for all ports of the same NIC.
+ * - RTE_ETH_HASH_FILTER_INPUT_SET_SELECT is for setting the global
+ * hash input set fields
+ */
+enum rte_eth_hash_filter_info_type {
+ RTE_ETH_HASH_FILTER_INFO_TYPE_UNKNOWN = 0,
+ /** Symmetric hash enable per port */
+ RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT,
+ /** Configure globally for hash filter */
+ RTE_ETH_HASH_FILTER_GLOBAL_CONFIG,
+ /** Global Hash filter input set configuration */
+ RTE_ETH_HASH_FILTER_INPUT_SET_SELECT,
+ RTE_ETH_HASH_FILTER_INFO_TYPE_MAX,
+};
+
+/**
+ * Hash function types.
+ */
+enum rte_eth_hash_function {
+ RTE_ETH_HASH_FUNCTION_DEFAULT = 0,
+ RTE_ETH_HASH_FUNCTION_TOEPLITZ, /**< Toeplitz */
+ RTE_ETH_HASH_FUNCTION_SIMPLE_XOR, /**< Simple XOR */
+ RTE_ETH_HASH_FUNCTION_MAX,
+};
+
+#define RTE_SYM_HASH_MASK_ARRAY_SIZE \
+ (RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT32_BIT)/UINT32_BIT)
+/**
+ * A structure used to set or get global hash function configurations which
+ * include symmetric hash enable per flow type and hash function type.
+ * Each bit in sym_hash_enable_mask[] indicates if the symmetric hash of the
+ * corresponding flow type is enabled or not.
+ * Each bit in valid_bit_mask[] indicates if the corresponding bit in
+ * sym_hash_enable_mask[] is valid or not. For the configurations gotten, it
+ * also means if the flow type is supported by hardware or not.
+ */
+struct rte_eth_hash_global_conf {
+ enum rte_eth_hash_function hash_func; /**< Hash function type */
+ /** Bit mask for symmetric hash enable per flow type */
+ uint32_t sym_hash_enable_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
+ /** Bit mask indicates if the corresponding bit is valid */
+ uint32_t valid_bit_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
+};
+
+/**
+ * A structure used to set or get hash filter information, to support filter
+ * type of 'RTE_ETH_FILTER_HASH' and its operations.
+ */
+struct rte_eth_hash_filter_info {
+ enum rte_eth_hash_filter_info_type info_type; /**< Information type */
+ /** Details of hash filter information */
+ union {
+ /** For RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT */
+ uint8_t enable;
+ /** Global configurations of hash filter */
+ struct rte_eth_hash_global_conf global_conf;
+ /** Global configurations of hash filter input set */
+ struct rte_eth_input_set_conf input_set_conf;
+ } info;
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_ETH_CTRL_H_ */
diff --git a/src/dpdk22/lib/librte_ether/rte_ethdev.c b/src/dpdk22/lib/librte_ether/rte_ethdev.c
new file mode 100644
index 00000000..ed971b49
--- /dev/null
+++ b/src/dpdk22/lib/librte_ether/rte_ethdev.c
@@ -0,0 +1,3241 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <netinet/in.h>
+
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_errno.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+
+#include "rte_ether.h"
+#include "rte_ethdev.h"
+
+static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
+struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
+static struct rte_eth_dev_data *rte_eth_dev_data;
+static uint8_t nb_ports;
+
+/* spinlock for eth device callbacks */
+static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
+
+/* store statistics names and its offset in stats structure */
+struct rte_eth_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned offset;
+};
+
+static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
+ {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
+ {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
+ {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
+ {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
+ {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
+ {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
+ {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
+ rx_nombuf)},
+};
+
+#define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
+
+static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
+ {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
+ {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
+ {"errors", offsetof(struct rte_eth_stats, q_errors)},
+};
+
+#define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) / \
+ sizeof(rte_rxq_stats_strings[0]))
+
+static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
+ {"packets", offsetof(struct rte_eth_stats, q_opackets)},
+ {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
+};
+#define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) / \
+ sizeof(rte_txq_stats_strings[0]))
+
+
+/**
+ * The user application callback description.
+ *
+ * It contains callback address to be registered by user application,
+ * the pointer to the parameters for callback, and the event type.
+ */
+struct rte_eth_dev_callback {
+ TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
+ rte_eth_dev_cb_fn cb_fn; /**< Callback address */
+ void *cb_arg; /**< Parameter for callback */
+ enum rte_eth_event_type event; /**< Interrupt event type */
+ uint32_t active; /**< Callback is executing */
+};
+
+enum {
+ STAT_QMAP_TX = 0,
+ STAT_QMAP_RX
+};
+
+enum {
+ DEV_DETACHED = 0,
+ DEV_ATTACHED
+};
+
+static void
+rte_eth_dev_data_alloc(void)
+{
+ const unsigned flags = 0;
+ const struct rte_memzone *mz;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
+ RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
+ rte_socket_id(), flags);
+ } else
+ mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
+ if (mz == NULL)
+ rte_panic("Cannot allocate memzone for ethernet port data\n");
+
+ rte_eth_dev_data = mz->addr;
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ memset(rte_eth_dev_data, 0,
+ RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
+}
+
+struct rte_eth_dev *
+rte_eth_dev_allocated(const char *name)
+{
+ unsigned i;
+
+ for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
+ if ((rte_eth_devices[i].attached == DEV_ATTACHED) &&
+ strcmp(rte_eth_devices[i].data->name, name) == 0)
+ return &rte_eth_devices[i];
+ }
+ return NULL;
+}
+
+static uint8_t
+rte_eth_dev_find_free_port(void)
+{
+ unsigned i;
+
+ for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
+ if (rte_eth_devices[i].attached == DEV_DETACHED)
+ return i;
+ }
+ return RTE_MAX_ETHPORTS;
+}
+
+struct rte_eth_dev *
+rte_eth_dev_allocate(const char *name, enum rte_eth_dev_type type)
+{
+ uint8_t port_id;
+ struct rte_eth_dev *eth_dev;
+
+ port_id = rte_eth_dev_find_free_port();
+ if (port_id == RTE_MAX_ETHPORTS) {
+ RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
+ return NULL;
+ }
+
+ if (rte_eth_dev_data == NULL)
+ rte_eth_dev_data_alloc();
+
+ if (rte_eth_dev_allocated(name) != NULL) {
+ RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
+ name);
+ return NULL;
+ }
+
+ eth_dev = &rte_eth_devices[port_id];
+ eth_dev->data = &rte_eth_dev_data[port_id];
+ snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
+ eth_dev->data->port_id = port_id;
+ eth_dev->attached = DEV_ATTACHED;
+ eth_dev->dev_type = type;
+ nb_ports++;
+ return eth_dev;
+}
+
+static int
+rte_eth_dev_create_unique_device_name(char *name, size_t size,
+ struct rte_pci_device *pci_dev)
+{
+ int ret;
+
+ if ((name == NULL) || (pci_dev == NULL))
+ return -EINVAL;
+
+ ret = snprintf(name, size, "%d:%d.%d",
+ pci_dev->addr.bus, pci_dev->addr.devid,
+ pci_dev->addr.function);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+int
+rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
+{
+ if (eth_dev == NULL)
+ return -EINVAL;
+
+ eth_dev->attached = DEV_DETACHED;
+ nb_ports--;
+ return 0;
+}
+
+static int
+rte_eth_dev_init(struct rte_pci_driver *pci_drv,
+ struct rte_pci_device *pci_dev)
+{
+ struct eth_driver *eth_drv;
+ struct rte_eth_dev *eth_dev;
+ char ethdev_name[RTE_ETH_NAME_MAX_LEN];
+
+ int diag;
+
+ eth_drv = (struct eth_driver *)pci_drv;
+
+ /* Create unique Ethernet device name using PCI address */
+ rte_eth_dev_create_unique_device_name(ethdev_name,
+ sizeof(ethdev_name), pci_dev);
+
+ eth_dev = rte_eth_dev_allocate(ethdev_name, RTE_ETH_DEV_PCI);
+ if (eth_dev == NULL)
+ return -ENOMEM;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
+ eth_drv->dev_private_size,
+ RTE_CACHE_LINE_SIZE);
+ if (eth_dev->data->dev_private == NULL)
+ rte_panic("Cannot allocate memzone for private port data\n");
+ }
+ eth_dev->pci_dev = pci_dev;
+ eth_dev->driver = eth_drv;
+ eth_dev->data->rx_mbuf_alloc_failed = 0;
+
+ /* init user callbacks */
+ TAILQ_INIT(&(eth_dev->link_intr_cbs));
+
+ /*
+ * Set the default MTU.
+ */
+ eth_dev->data->mtu = ETHER_MTU;
+
+ /* Invoke PMD device initialization function */
+ diag = (*eth_drv->eth_dev_init)(eth_dev);
+ if (diag == 0)
+ return 0;
+
+ RTE_PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x) failed\n",
+ pci_drv->name,
+ (unsigned) pci_dev->id.vendor_id,
+ (unsigned) pci_dev->id.device_id);
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(eth_dev->data->dev_private);
+ rte_eth_dev_release_port(eth_dev);
+ return diag;
+}
+
+static int
+rte_eth_dev_uninit(struct rte_pci_device *pci_dev)
+{
+ const struct eth_driver *eth_drv;
+ struct rte_eth_dev *eth_dev;
+ char ethdev_name[RTE_ETH_NAME_MAX_LEN];
+ int ret;
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ /* Create unique Ethernet device name using PCI address */
+ rte_eth_dev_create_unique_device_name(ethdev_name,
+ sizeof(ethdev_name), pci_dev);
+
+ eth_dev = rte_eth_dev_allocated(ethdev_name);
+ if (eth_dev == NULL)
+ return -ENODEV;
+
+ eth_drv = (const struct eth_driver *)pci_dev->driver;
+
+ /* Invoke PMD device uninit function */
+ if (*eth_drv->eth_dev_uninit) {
+ ret = (*eth_drv->eth_dev_uninit)(eth_dev);
+ if (ret)
+ return ret;
+ }
+
+ /* free ether device */
+ rte_eth_dev_release_port(eth_dev);
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(eth_dev->data->dev_private);
+
+ eth_dev->pci_dev = NULL;
+ eth_dev->driver = NULL;
+ eth_dev->data = NULL;
+
+ return 0;
+}
+
+/**
+ * Register an Ethernet [Poll Mode] driver.
+ *
+ * Function invoked by the initialization function of an Ethernet driver
+ * to simultaneously register itself as a PCI driver and as an Ethernet
+ * Poll Mode Driver.
+ * Invokes the rte_eal_pci_register() function to register the *pci_drv*
+ * structure embedded in the *eth_drv* structure, after having stored the
+ * address of the rte_eth_dev_init() function in the *devinit* field of
+ * the *pci_drv* structure.
+ * During the PCI probing phase, the rte_eth_dev_init() function is
+ * invoked for each PCI [Ethernet device] matching the embedded PCI
+ * identifiers provided by the driver.
+ */
+void
+rte_eth_driver_register(struct eth_driver *eth_drv)
+{
+ eth_drv->pci_drv.devinit = rte_eth_dev_init;
+ eth_drv->pci_drv.devuninit = rte_eth_dev_uninit;
+ rte_eal_pci_register(&eth_drv->pci_drv);
+}
+
+int
+rte_eth_dev_is_valid_port(uint8_t port_id)
+{
+ if (port_id >= RTE_MAX_ETHPORTS ||
+ rte_eth_devices[port_id].attached != DEV_ATTACHED)
+ return 0;
+ else
+ return 1;
+}
+
+int
+rte_eth_dev_socket_id(uint8_t port_id)
+{
+ if (!rte_eth_dev_is_valid_port(port_id))
+ return -1;
+ return rte_eth_devices[port_id].data->numa_node;
+}
+
+uint8_t
+rte_eth_dev_count(void)
+{
+ return nb_ports;
+}
+
+static enum rte_eth_dev_type
+rte_eth_dev_get_device_type(uint8_t port_id)
+{
+ if (!rte_eth_dev_is_valid_port(port_id))
+ return RTE_ETH_DEV_UNKNOWN;
+ return rte_eth_devices[port_id].dev_type;
+}
+
+static int
+rte_eth_dev_get_addr_by_port(uint8_t port_id, struct rte_pci_addr *addr)
+{
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+
+ if (addr == NULL) {
+ RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
+ return -EINVAL;
+ }
+
+ *addr = rte_eth_devices[port_id].pci_dev->addr;
+ return 0;
+}
+
+static int
+rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
+{
+ char *tmp;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+
+ if (name == NULL) {
+ RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
+ return -EINVAL;
+ }
+
+ /* shouldn't check 'rte_eth_devices[i].data',
+ * because it might be overwritten by VDEV PMD */
+ tmp = rte_eth_dev_data[port_id].name;
+ strcpy(name, tmp);
+ return 0;
+}
+
+static int
+rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
+{
+ int i;
+
+ if (name == NULL) {
+ RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
+ return -EINVAL;
+ }
+
+ *port_id = RTE_MAX_ETHPORTS;
+
+ for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
+
+ if (!strncmp(name,
+ rte_eth_dev_data[i].name, strlen(name))) {
+
+ *port_id = i;
+
+ return 0;
+ }
+ }
+ return -ENODEV;
+}
+
+static int
+rte_eth_dev_get_port_by_addr(const struct rte_pci_addr *addr, uint8_t *port_id)
+{
+ int i;
+ struct rte_pci_device *pci_dev = NULL;
+
+ if (addr == NULL) {
+ RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
+ return -EINVAL;
+ }
+
+ *port_id = RTE_MAX_ETHPORTS;
+
+ for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
+
+ pci_dev = rte_eth_devices[i].pci_dev;
+
+ if (pci_dev &&
+ !rte_eal_compare_pci_addr(&pci_dev->addr, addr)) {
+
+ *port_id = i;
+
+ return 0;
+ }
+ }
+ return -ENODEV;
+}
+
+static int
+rte_eth_dev_is_detachable(uint8_t port_id)
+{
+ uint32_t dev_flags;
+
+ if (!rte_eth_dev_is_valid_port(port_id)) {
+ RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -EINVAL;
+ }
+
+ switch (rte_eth_devices[port_id].data->kdrv) {
+ case RTE_KDRV_IGB_UIO:
+ case RTE_KDRV_UIO_GENERIC:
+ case RTE_KDRV_NIC_UIO:
+ case RTE_KDRV_NONE:
+ break;
+ case RTE_KDRV_VFIO:
+ default:
+ return -ENOTSUP;
+ }
+ dev_flags = rte_eth_devices[port_id].data->dev_flags;
+ return !(dev_flags & RTE_ETH_DEV_DETACHABLE);
+}
+
+/* attach the new physical device, then store port_id of the device */
+static int
+rte_eth_dev_attach_pdev(struct rte_pci_addr *addr, uint8_t *port_id)
+{
+ if ((addr == NULL) || (port_id == NULL))
+ goto err;
+
+ /* re-construct pci_device_list */
+ if (rte_eal_pci_scan())
+ goto err;
+ /* Invoke probe func of the driver can handle the new device. */
+ if (rte_eal_pci_probe_one(addr))
+ goto err;
+
+ if (rte_eth_dev_get_port_by_addr(addr, port_id))
+ goto err;
+
+ return 0;
+err:
+ RTE_LOG(ERR, EAL, "Driver, cannot attach the device\n");
+ return -1;
+}
+
+/* detach the new physical device, then store pci_addr of the device */
+static int
+rte_eth_dev_detach_pdev(uint8_t port_id, struct rte_pci_addr *addr)
+{
+ struct rte_pci_addr freed_addr;
+ struct rte_pci_addr vp;
+
+ if (addr == NULL)
+ goto err;
+
+ /* check whether the driver supports detach feature, or not */
+ if (rte_eth_dev_is_detachable(port_id))
+ goto err;
+
+ /* get pci address by port id */
+ if (rte_eth_dev_get_addr_by_port(port_id, &freed_addr))
+ goto err;
+
+ /* Zeroed pci addr means the port comes from virtual device */
+ vp.domain = vp.bus = vp.devid = vp.function = 0;
+ if (rte_eal_compare_pci_addr(&vp, &freed_addr) == 0)
+ goto err;
+
+ /* invoke devuninit func of the pci driver,
+ * also remove the device from pci_device_list */
+ if (rte_eal_pci_detach(&freed_addr))
+ goto err;
+
+ *addr = freed_addr;
+ return 0;
+err:
+ RTE_LOG(ERR, EAL, "Driver, cannot detach the device\n");
+ return -1;
+}
+
+/* attach the new virtual device, then store port_id of the device */
+static int
+rte_eth_dev_attach_vdev(const char *vdevargs, uint8_t *port_id)
+{
+ char *name = NULL, *args = NULL;
+ int ret = -1;
+
+ if ((vdevargs == NULL) || (port_id == NULL))
+ goto end;
+
+ /* parse vdevargs, then retrieve device name and args */
+ if (rte_eal_parse_devargs_str(vdevargs, &name, &args))
+ goto end;
+
+ /* walk around dev_driver_list to find the driver of the device,
+ * then invoke probe function of the driver.
+ * rte_eal_vdev_init() updates port_id allocated after
+ * initialization.
+ */
+ if (rte_eal_vdev_init(name, args))
+ goto end;
+
+ if (rte_eth_dev_get_port_by_name(name, port_id))
+ goto end;
+
+ ret = 0;
+end:
+ if (name)
+ free(name);
+ if (args)
+ free(args);
+
+ if (ret < 0)
+ RTE_LOG(ERR, EAL, "Driver, cannot attach the device\n");
+ return ret;
+}
+
+/* detach the new virtual device, then store the name of the device */
+static int
+rte_eth_dev_detach_vdev(uint8_t port_id, char *vdevname)
+{
+ char name[RTE_ETH_NAME_MAX_LEN];
+
+ if (vdevname == NULL)
+ goto err;
+
+ /* check whether the driver supports detach feature, or not */
+ if (rte_eth_dev_is_detachable(port_id))
+ goto err;
+
+ /* get device name by port id */
+ if (rte_eth_dev_get_name_by_port(port_id, name))
+ goto err;
+ /* walk around dev_driver_list to find the driver of the device,
+ * then invoke uninit function of the driver */
+ if (rte_eal_vdev_uninit(name))
+ goto err;
+
+ strncpy(vdevname, name, sizeof(name));
+ return 0;
+err:
+ RTE_LOG(ERR, EAL, "Driver, cannot detach the device\n");
+ return -1;
+}
+
+/* attach the new device, then store port_id of the device */
+int
+rte_eth_dev_attach(const char *devargs, uint8_t *port_id)
+{
+ struct rte_pci_addr addr;
+
+ if ((devargs == NULL) || (port_id == NULL))
+ return -EINVAL;
+
+ if (eal_parse_pci_DomBDF(devargs, &addr) == 0)
+ return rte_eth_dev_attach_pdev(&addr, port_id);
+ else
+ return rte_eth_dev_attach_vdev(devargs, port_id);
+}
+
+/* detach the device, then store the name of the device */
+int
+rte_eth_dev_detach(uint8_t port_id, char *name)
+{
+ struct rte_pci_addr addr;
+ int ret;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ if (rte_eth_dev_get_device_type(port_id) == RTE_ETH_DEV_PCI) {
+ ret = rte_eth_dev_get_addr_by_port(port_id, &addr);
+ if (ret < 0)
+ return ret;
+
+ ret = rte_eth_dev_detach_pdev(port_id, &addr);
+ if (ret == 0)
+ snprintf(name, RTE_ETH_NAME_MAX_LEN,
+ "%04x:%02x:%02x.%d",
+ addr.domain, addr.bus,
+ addr.devid, addr.function);
+
+ return ret;
+ } else
+ return rte_eth_dev_detach_vdev(port_id, name);
+}
+
+static int
+rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
+{
+ uint16_t old_nb_queues = dev->data->nb_rx_queues;
+ void **rxq;
+ unsigned i;
+
+ if (dev->data->rx_queues == NULL) { /* first time configuration */
+ dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
+ sizeof(dev->data->rx_queues[0]) * nb_queues,
+ RTE_CACHE_LINE_SIZE);
+ if (dev->data->rx_queues == NULL) {
+ dev->data->nb_rx_queues = 0;
+ return -(ENOMEM);
+ }
+ } else { /* re-configure */
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
+
+ rxq = dev->data->rx_queues;
+
+ for (i = nb_queues; i < old_nb_queues; i++)
+ (*dev->dev_ops->rx_queue_release)(rxq[i]);
+ rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
+ RTE_CACHE_LINE_SIZE);
+ if (rxq == NULL)
+ return -(ENOMEM);
+ if (nb_queues > old_nb_queues) {
+ uint16_t new_qs = nb_queues - old_nb_queues;
+
+ memset(rxq + old_nb_queues, 0,
+ sizeof(rxq[0]) * new_qs);
+ }
+
+ dev->data->rx_queues = rxq;
+
+ }
+ dev->data->nb_rx_queues = nb_queues;
+ return 0;
+}
+
+int
+rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
+{
+ struct rte_eth_dev *dev;
+
+ /* This function is only safe when called from the primary process
+ * in a multi-process setup*/
+ RTE_PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+
+ dev = &rte_eth_devices[port_id];
+ if (rx_queue_id >= dev->data->nb_rx_queues) {
+ RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
+
+ if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
+ RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
+ " already started\n",
+ rx_queue_id, port_id);
+ return 0;
+ }
+
+ return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
+
+}
+
+int
+rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
+{
+ struct rte_eth_dev *dev;
+
+ /* This function is only safe when called from the primary process
+ * in a multi-process setup*/
+ RTE_PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+
+ dev = &rte_eth_devices[port_id];
+ if (rx_queue_id >= dev->data->nb_rx_queues) {
+ RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
+
+ if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
+ RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
+ " already stopped\n",
+ rx_queue_id, port_id);
+ return 0;
+ }
+
+ return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
+
+}
+
+int
+rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
+{
+ struct rte_eth_dev *dev;
+
+ /* This function is only safe when called from the primary process
+ * in a multi-process setup*/
+ RTE_PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+
+ dev = &rte_eth_devices[port_id];
+ if (tx_queue_id >= dev->data->nb_tx_queues) {
+ RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
+
+ if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
+ RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
+ " already started\n",
+ tx_queue_id, port_id);
+ return 0;
+ }
+
+ return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
+
+}
+
+int
+rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
+{
+ struct rte_eth_dev *dev;
+
+ /* This function is only safe when called from the primary process
+ * in a multi-process setup*/
+ RTE_PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+
+ dev = &rte_eth_devices[port_id];
+ if (tx_queue_id >= dev->data->nb_tx_queues) {
+ RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
+
+ if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
+ RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
+ " already stopped\n",
+ tx_queue_id, port_id);
+ return 0;
+ }
+
+ return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
+
+}
+
+static int
+rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
+{
+ uint16_t old_nb_queues = dev->data->nb_tx_queues;
+ void **txq;
+ unsigned i;
+
+ if (dev->data->tx_queues == NULL) { /* first time configuration */
+ dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
+ sizeof(dev->data->tx_queues[0]) * nb_queues,
+ RTE_CACHE_LINE_SIZE);
+ if (dev->data->tx_queues == NULL) {
+ dev->data->nb_tx_queues = 0;
+ return -(ENOMEM);
+ }
+ } else { /* re-configure */
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
+
+ txq = dev->data->tx_queues;
+
+ for (i = nb_queues; i < old_nb_queues; i++)
+ (*dev->dev_ops->tx_queue_release)(txq[i]);
+ txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
+ RTE_CACHE_LINE_SIZE);
+ if (txq == NULL)
+ return -ENOMEM;
+ if (nb_queues > old_nb_queues) {
+ uint16_t new_qs = nb_queues - old_nb_queues;
+
+ memset(txq + old_nb_queues, 0,
+ sizeof(txq[0]) * new_qs);
+ }
+
+ dev->data->tx_queues = txq;
+
+ }
+ dev->data->nb_tx_queues = nb_queues;
+ return 0;
+}
+
+int
+rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
+ const struct rte_eth_conf *dev_conf)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ int diag;
+
+ /* This function is only safe when called from the primary process
+ * in a multi-process setup*/
+ RTE_PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+
+ if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
+ RTE_PMD_DEBUG_TRACE(
+ "Number of RX queues requested (%u) is greater than max supported(%d)\n",
+ nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
+ return -EINVAL;
+ }
+
+ if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
+ RTE_PMD_DEBUG_TRACE(
+ "Number of TX queues requested (%u) is greater than max supported(%d)\n",
+ nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
+ return -EINVAL;
+ }
+
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+
+ if (dev->data->dev_started) {
+ RTE_PMD_DEBUG_TRACE(
+ "port %d must be stopped to allow configuration\n", port_id);
+ return -EBUSY;
+ }
+
+ /*
+ * Check that the numbers of RX and TX queues are not greater
+ * than the maximum number of RX and TX queues supported by the
+ * configured device.
+ */
+ (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
+ if (nb_rx_q > dev_info.max_rx_queues) {
+ RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
+ port_id, nb_rx_q, dev_info.max_rx_queues);
+ return -EINVAL;
+ }
+ if (nb_rx_q == 0) {
+ RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
+ return -EINVAL;
+ }
+
+ if (nb_tx_q > dev_info.max_tx_queues) {
+ RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
+ port_id, nb_tx_q, dev_info.max_tx_queues);
+ return -EINVAL;
+ }
+ if (nb_tx_q == 0) {
+ RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
+ return -EINVAL;
+ }
+
+ /* Copy the dev_conf parameter into the dev structure */
+ memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
+
+ /*
+ * If link state interrupt is enabled, check that the
+ * device supports it.
+ */
+ if ((dev_conf->intr_conf.lsc == 1) &&
+ (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
+ RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
+ dev->data->drv_name);
+ return -EINVAL;
+ }
+
+ /*
+ * If jumbo frames are enabled, check that the maximum RX packet
+ * length is supported by the configured device.
+ */
+ if (dev_conf->rxmode.jumbo_frame == 1) {
+ if (dev_conf->rxmode.max_rx_pkt_len >
+ dev_info.max_rx_pktlen) {
+ RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
+ " > max valid value %u\n",
+ port_id,
+ (unsigned)dev_conf->rxmode.max_rx_pkt_len,
+ (unsigned)dev_info.max_rx_pktlen);
+ return -EINVAL;
+ } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
+ RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
+ " < min valid value %u\n",
+ port_id,
+ (unsigned)dev_conf->rxmode.max_rx_pkt_len,
+ (unsigned)ETHER_MIN_LEN);
+ return -EINVAL;
+ }
+ } else {
+ if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
+ dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
+ /* Use default value */
+ dev->data->dev_conf.rxmode.max_rx_pkt_len =
+ ETHER_MAX_LEN;
+ }
+
+ /*
+ * Setup new number of RX/TX queues and reconfigure device.
+ */
+ diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
+ if (diag != 0) {
+ RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
+ port_id, diag);
+ return diag;
+ }
+
+ diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
+ if (diag != 0) {
+ RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
+ port_id, diag);
+ rte_eth_dev_rx_queue_config(dev, 0);
+ return diag;
+ }
+
+ diag = (*dev->dev_ops->dev_configure)(dev);
+ if (diag != 0) {
+ RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
+ port_id, diag);
+ rte_eth_dev_rx_queue_config(dev, 0);
+ rte_eth_dev_tx_queue_config(dev, 0);
+ return diag;
+ }
+
+ return 0;
+}
+
+static void
+rte_eth_dev_config_restore(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct ether_addr addr;
+ uint16_t i;
+ uint32_t pool = 0;
+
+ dev = &rte_eth_devices[port_id];
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+
+ if (RTE_ETH_DEV_SRIOV(dev).active)
+ pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
+
+ /* replay MAC address configuration */
+ for (i = 0; i < dev_info.max_mac_addrs; i++) {
+ addr = dev->data->mac_addrs[i];
+
+ /* skip zero address */
+ if (is_zero_ether_addr(&addr))
+ continue;
+
+ /* add address to the hardware */
+ if (*dev->dev_ops->mac_addr_add &&
+ (dev->data->mac_pool_sel[i] & (1ULL << pool)))
+ (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
+ else {
+ RTE_PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
+ port_id);
+ /* exit the loop but not return an error */
+ break;
+ }
+ }
+
+ /* replay promiscuous configuration */
+ if (rte_eth_promiscuous_get(port_id) == 1)
+ rte_eth_promiscuous_enable(port_id);
+ else if (rte_eth_promiscuous_get(port_id) == 0)
+ rte_eth_promiscuous_disable(port_id);
+
+ /* replay all multicast configuration */
+ if (rte_eth_allmulticast_get(port_id) == 1)
+ rte_eth_allmulticast_enable(port_id);
+ else if (rte_eth_allmulticast_get(port_id) == 0)
+ rte_eth_allmulticast_disable(port_id);
+}
+
+int
+rte_eth_dev_start(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+ int diag;
+
+ /* This function is only safe when called from the primary process
+ * in a multi-process setup*/
+ RTE_PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
+
+ if (dev->data->dev_started != 0) {
+ RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
+ " already started\n",
+ port_id);
+ return 0;
+ }
+
+ diag = (*dev->dev_ops->dev_start)(dev);
+ if (diag == 0)
+ dev->data->dev_started = 1;
+ else
+ return diag;
+
+ rte_eth_dev_config_restore(port_id);
+
+ if (dev->data->dev_conf.intr_conf.lsc == 0) {
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
+ (*dev->dev_ops->link_update)(dev, 0);
+ }
+ return 0;
+}
+
+void
+rte_eth_dev_stop(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ /* This function is only safe when called from the primary process
+ * in a multi-process setup*/
+ RTE_PROC_PRIMARY_OR_RET();
+
+ RTE_ETH_VALID_PORTID_OR_RET(port_id);
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
+
+ if (dev->data->dev_started == 0) {
+ RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
+ " already stopped\n",
+ port_id);
+ return;
+ }
+
+ dev->data->dev_started = 0;
+ (*dev->dev_ops->dev_stop)(dev);
+}
+
+int
+rte_eth_dev_set_link_up(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ /* This function is only safe when called from the primary process
+ * in a multi-process setup*/
+ RTE_PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
+ return (*dev->dev_ops->dev_set_link_up)(dev);
+}
+
+int
+rte_eth_dev_set_link_down(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ /* This function is only safe when called from the primary process
+ * in a multi-process setup*/
+ RTE_PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
+ return (*dev->dev_ops->dev_set_link_down)(dev);
+}
+
+void
+rte_eth_dev_close(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ /* This function is only safe when called from the primary process
+ * in a multi-process setup*/
+ RTE_PROC_PRIMARY_OR_RET();
+
+ RTE_ETH_VALID_PORTID_OR_RET(port_id);
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
+ dev->data->dev_started = 0;
+ (*dev->dev_ops->dev_close)(dev);
+
+ rte_free(dev->data->rx_queues);
+ dev->data->rx_queues = NULL;
+ rte_free(dev->data->tx_queues);
+ dev->data->tx_queues = NULL;
+}
+
+int
+rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ int ret;
+ uint32_t mbp_buf_size;
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+
+ /* This function is only safe when called from the primary process
+ * in a multi-process setup*/
+ RTE_PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+
+ dev = &rte_eth_devices[port_id];
+ if (rx_queue_id >= dev->data->nb_rx_queues) {
+ RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
+ return -EINVAL;
+ }
+
+ if (dev->data->dev_started) {
+ RTE_PMD_DEBUG_TRACE(
+ "port %d must be stopped to allow configuration\n", port_id);
+ return -EBUSY;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
+
+ /*
+ * Check the size of the mbuf data buffer.
+ * This value must be provided in the private data of the memory pool.
+ * First check that the memory pool has a valid private data.
+ */
+ rte_eth_dev_info_get(port_id, &dev_info);
+ if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
+ RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
+ mp->name, (int) mp->private_data_size,
+ (int) sizeof(struct rte_pktmbuf_pool_private));
+ return -ENOSPC;
+ }
+ mbp_buf_size = rte_pktmbuf_data_room_size(mp);
+
+ if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
+ RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
+ "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
+ "=%d)\n",
+ mp->name,
+ (int)mbp_buf_size,
+ (int)(RTE_PKTMBUF_HEADROOM +
+ dev_info.min_rx_bufsize),
+ (int)RTE_PKTMBUF_HEADROOM,
+ (int)dev_info.min_rx_bufsize);
+ return -EINVAL;
+ }
+
+ if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
+ nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
+ nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
+
+ RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
+ "should be: <= %hu, = %hu, and a product of %hu\n",
+ nb_rx_desc,
+ dev_info.rx_desc_lim.nb_max,
+ dev_info.rx_desc_lim.nb_min,
+ dev_info.rx_desc_lim.nb_align);
+ return -EINVAL;
+ }
+
+ if (rx_conf == NULL)
+ rx_conf = &dev_info.default_rxconf;
+
+ ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
+ socket_id, rx_conf, mp);
+ if (!ret) {
+ if (!dev->data->min_rx_buf_size ||
+ dev->data->min_rx_buf_size > mbp_buf_size)
+ dev->data->min_rx_buf_size = mbp_buf_size;
+ }
+
+ return ret;
+}
+
+int
+rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+
+ /* This function is only safe when called from the primary process
+ * in a multi-process setup*/
+ RTE_PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+
+ dev = &rte_eth_devices[port_id];
+ if (tx_queue_id >= dev->data->nb_tx_queues) {
+ RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
+ return -EINVAL;
+ }
+
+ if (dev->data->dev_started) {
+ RTE_PMD_DEBUG_TRACE(
+ "port %d must be stopped to allow configuration\n", port_id);
+ return -EBUSY;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+
+ if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
+ nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
+ nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
+ RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
+ "should be: <= %hu, = %hu, and a product of %hu\n",
+ nb_tx_desc,
+ dev_info.tx_desc_lim.nb_max,
+ dev_info.tx_desc_lim.nb_min,
+ dev_info.tx_desc_lim.nb_align);
+ return -EINVAL;
+ }
+
+ if (tx_conf == NULL)
+ tx_conf = &dev_info.default_txconf;
+
+ return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
+ socket_id, tx_conf);
+}
+
+void
+rte_eth_promiscuous_enable(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_RET(port_id);
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
+ (*dev->dev_ops->promiscuous_enable)(dev);
+ dev->data->promiscuous = 1;
+}
+
+void
+rte_eth_promiscuous_disable(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_RET(port_id);
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
+ dev->data->promiscuous = 0;
+ (*dev->dev_ops->promiscuous_disable)(dev);
+}
+
+int
+rte_eth_promiscuous_get(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+
+ dev = &rte_eth_devices[port_id];
+ return dev->data->promiscuous;
+}
+
+void
+rte_eth_allmulticast_enable(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_RET(port_id);
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
+ (*dev->dev_ops->allmulticast_enable)(dev);
+ dev->data->all_multicast = 1;
+}
+
+void
+rte_eth_allmulticast_disable(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_RET(port_id);
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
+ dev->data->all_multicast = 0;
+ (*dev->dev_ops->allmulticast_disable)(dev);
+}
+
+int
+rte_eth_allmulticast_get(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+
+ dev = &rte_eth_devices[port_id];
+ return dev->data->all_multicast;
+}
+
+static inline int
+rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = link;
+ struct rte_eth_link *src = &(dev->data->dev_link);
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+void
+rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_RET(port_id);
+ dev = &rte_eth_devices[port_id];
+
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ rte_eth_dev_atomic_read_link_status(dev, eth_link);
+ else {
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
+ (*dev->dev_ops->link_update)(dev, 1);
+ *eth_link = dev->data->dev_link;
+ }
+}
+
+void
+rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_RET(port_id);
+ dev = &rte_eth_devices[port_id];
+
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ rte_eth_dev_atomic_read_link_status(dev, eth_link);
+ else {
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
+ (*dev->dev_ops->link_update)(dev, 0);
+ *eth_link = dev->data->dev_link;
+ }
+}
+
+int
+rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+
+ dev = &rte_eth_devices[port_id];
+ memset(stats, 0, sizeof(*stats));
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+ (*dev->dev_ops->stats_get)(dev, stats);
+ stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
+ return 0;
+}
+
+void
+rte_eth_stats_reset(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_RET(port_id);
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
+ (*dev->dev_ops->stats_reset)(dev);
+ dev->data->rx_mbuf_alloc_failed = 0;
+}
+
+/* retrieve ethdev extended statistics */
+int
+rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
+ unsigned n)
+{
+ struct rte_eth_stats eth_stats;
+ struct rte_eth_dev *dev;
+ unsigned count = 0, i, q;
+ signed xcount = 0;
+ uint64_t val, *stats_ptr;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+
+ dev = &rte_eth_devices[port_id];
+
+ /* Return generic statistics */
+ count = RTE_NB_STATS + (dev->data->nb_rx_queues * RTE_NB_RXQ_STATS) +
+ (dev->data->nb_tx_queues * RTE_NB_TXQ_STATS);
+
+ /* implemented by the driver */
+ if (dev->dev_ops->xstats_get != NULL) {
+ /* Retrieve the xstats from the driver at the end of the
+ * xstats struct.
+ */
+ xcount = (*dev->dev_ops->xstats_get)(dev, &xstats[count],
+ (n > count) ? n - count : 0);
+
+ if (xcount < 0)
+ return xcount;
+ }
+
+ if (n < count + xcount)
+ return count + xcount;
+
+ /* now fill the xstats structure */
+ count = 0;
+ rte_eth_stats_get(port_id, &eth_stats);
+
+ /* global stats */
+ for (i = 0; i < RTE_NB_STATS; i++) {
+ stats_ptr = RTE_PTR_ADD(&eth_stats,
+ rte_stats_strings[i].offset);
+ val = *stats_ptr;
+ snprintf(xstats[count].name, sizeof(xstats[count].name),
+ "%s", rte_stats_strings[i].name);
+ xstats[count++].value = val;
+ }
+
+ /* per-rxq stats */
+ for (q = 0; q < dev->data->nb_rx_queues; q++) {
+ for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
+ stats_ptr = RTE_PTR_ADD(&eth_stats,
+ rte_rxq_stats_strings[i].offset +
+ q * sizeof(uint64_t));
+ val = *stats_ptr;
+ snprintf(xstats[count].name, sizeof(xstats[count].name),
+ "rx_q%u_%s", q,
+ rte_rxq_stats_strings[i].name);
+ xstats[count++].value = val;
+ }
+ }
+
+ /* per-txq stats */
+ for (q = 0; q < dev->data->nb_tx_queues; q++) {
+ for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
+ stats_ptr = RTE_PTR_ADD(&eth_stats,
+ rte_txq_stats_strings[i].offset +
+ q * sizeof(uint64_t));
+ val = *stats_ptr;
+ snprintf(xstats[count].name, sizeof(xstats[count].name),
+ "tx_q%u_%s", q,
+ rte_txq_stats_strings[i].name);
+ xstats[count++].value = val;
+ }
+ }
+
+ return count + xcount;
+}
+
+/* reset ethdev extended statistics */
+void
+rte_eth_xstats_reset(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_RET(port_id);
+ dev = &rte_eth_devices[port_id];
+
+ /* implemented by the driver */
+ if (dev->dev_ops->xstats_reset != NULL) {
+ (*dev->dev_ops->xstats_reset)(dev);
+ return;
+ }
+
+ /* fallback to default */
+ rte_eth_stats_reset(port_id);
+}
+
+static int
+set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
+ uint8_t is_rx)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
+ return (*dev->dev_ops->queue_stats_mapping_set)
+ (dev, queue_id, stat_idx, is_rx);
+}
+
+
+int
+rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
+ uint8_t stat_idx)
+{
+ return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
+ STAT_QMAP_TX);
+}
+
+
+int
+rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
+ uint8_t stat_idx)
+{
+ return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
+ STAT_QMAP_RX);
+}
+
+
+void
+rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
+{
+ struct rte_eth_dev *dev;
+ const struct rte_eth_desc_lim lim = {
+ .nb_max = UINT16_MAX,
+ .nb_min = 0,
+ .nb_align = 1,
+ };
+
+ RTE_ETH_VALID_PORTID_OR_RET(port_id);
+ dev = &rte_eth_devices[port_id];
+
+ memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
+ dev_info->rx_desc_lim = lim;
+ dev_info->tx_desc_lim = lim;
+
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
+ (*dev->dev_ops->dev_infos_get)(dev, dev_info);
+ dev_info->pci_dev = dev->pci_dev;
+ dev_info->driver_name = dev->data->drv_name;
+}
+
+void
+rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_RET(port_id);
+ dev = &rte_eth_devices[port_id];
+ ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
+}
+
+
+int
+rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ *mtu = dev->data->mtu;
+ return 0;
+}
+
+int
+rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
+{
+ int ret;
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
+
+ ret = (*dev->dev_ops->mtu_set)(dev, mtu);
+ if (!ret)
+ dev->data->mtu = mtu;
+
+ return ret;
+}
+
+int
+rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+ if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) {
+ RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
+ return -ENOSYS;
+ }
+
+ if (vlan_id > 4095) {
+ RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
+ port_id, (unsigned) vlan_id);
+ return -EINVAL;
+ }
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
+
+ return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
+}
+
+int
+rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+ if (rx_queue_id >= dev->data->nb_rx_queues) {
+ RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
+ (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
+
+ return 0;
+}
+
+int
+rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
+ (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
+
+ return 0;
+}
+
+int
+rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
+{
+ struct rte_eth_dev *dev;
+ int ret = 0;
+ int mask = 0;
+ int cur, org = 0;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ /*check which option changed by application*/
+ cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
+ org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
+ if (cur != org) {
+ dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
+ mask |= ETH_VLAN_STRIP_MASK;
+ }
+
+ cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
+ org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
+ if (cur != org) {
+ dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
+ mask |= ETH_VLAN_FILTER_MASK;
+ }
+
+ cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
+ org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
+ if (cur != org) {
+ dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
+ mask |= ETH_VLAN_EXTEND_MASK;
+ }
+
+ /*no change*/
+ if (mask == 0)
+ return ret;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
+ (*dev->dev_ops->vlan_offload_set)(dev, mask);
+
+ return ret;
+}
+
+int
+rte_eth_dev_get_vlan_offload(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+ int ret = 0;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ ret |= ETH_VLAN_STRIP_OFFLOAD;
+
+ if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ ret |= ETH_VLAN_FILTER_OFFLOAD;
+
+ if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+ ret |= ETH_VLAN_EXTEND_OFFLOAD;
+
+ return ret;
+}
+
+int
+rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
+ (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
+
+ return 0;
+}
+
+int
+rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
+ memset(fc_conf, 0, sizeof(*fc_conf));
+ return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
+}
+
+int
+rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
+ RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
+ return -EINVAL;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
+ return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
+}
+
+int
+rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
+ RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
+ return -EINVAL;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ /* High water, low water validation are device specific */
+ if (*dev->dev_ops->priority_flow_ctrl_set)
+ return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
+ return -ENOTSUP;
+}
+
+static int
+rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ uint16_t i, num;
+
+ if (!reta_conf)
+ return -EINVAL;
+
+ if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
+ RTE_PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
+ RTE_RETA_GROUP_SIZE);
+ return -EINVAL;
+ }
+
+ num = reta_size / RTE_RETA_GROUP_SIZE;
+ for (i = 0; i < num; i++) {
+ if (reta_conf[i].mask)
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int
+rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size,
+ uint8_t max_rxq)
+{
+ uint16_t i, idx, shift;
+
+ if (!reta_conf)
+ return -EINVAL;
+
+ if (max_rxq == 0) {
+ RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if ((reta_conf[idx].mask & (1ULL << shift)) &&
+ (reta_conf[idx].reta[shift] >= max_rxq)) {
+ RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
+ "the maximum rxq index: %u\n", idx, shift,
+ reta_conf[idx].reta[shift], max_rxq);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int
+rte_eth_dev_rss_reta_update(uint8_t port_id,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct rte_eth_dev *dev;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ /* Check mask bits */
+ ret = rte_eth_check_reta_mask(reta_conf, reta_size);
+ if (ret < 0)
+ return ret;
+
+ dev = &rte_eth_devices[port_id];
+
+ /* Check entry value */
+ ret = rte_eth_check_reta_entry(reta_conf, reta_size,
+ dev->data->nb_rx_queues);
+ if (ret < 0)
+ return ret;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
+ return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
+}
+
+int
+rte_eth_dev_rss_reta_query(uint8_t port_id,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct rte_eth_dev *dev;
+ int ret;
+
+ if (port_id >= nb_ports) {
+ RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
+ }
+
+ /* Check mask bits */
+ ret = rte_eth_check_reta_mask(reta_conf, reta_size);
+ if (ret < 0)
+ return ret;
+
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
+ return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
+}
+
+int
+rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
+{
+ struct rte_eth_dev *dev;
+ uint16_t rss_hash_protos;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ rss_hash_protos = rss_conf->rss_hf;
+ if ((rss_hash_protos != 0) &&
+ ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
+ RTE_PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
+ rss_hash_protos);
+ return -EINVAL;
+ }
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
+ return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
+}
+
+int
+rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
+ return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
+}
+
+int
+rte_eth_dev_udp_tunnel_add(uint8_t port_id,
+ struct rte_eth_udp_tunnel *udp_tunnel)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ if (udp_tunnel == NULL) {
+ RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
+ return -EINVAL;
+ }
+
+ if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
+ RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
+ return -EINVAL;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_add, -ENOTSUP);
+ return (*dev->dev_ops->udp_tunnel_add)(dev, udp_tunnel);
+}
+
+int
+rte_eth_dev_udp_tunnel_delete(uint8_t port_id,
+ struct rte_eth_udp_tunnel *udp_tunnel)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ if (udp_tunnel == NULL) {
+ RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
+ return -EINVAL;
+ }
+
+ if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
+ RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_del, -ENOTSUP);
+ return (*dev->dev_ops->udp_tunnel_del)(dev, udp_tunnel);
+}
+
+int
+rte_eth_led_on(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
+ return (*dev->dev_ops->dev_led_on)(dev);
+}
+
+int
+rte_eth_led_off(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
+ return (*dev->dev_ops->dev_led_off)(dev);
+}
+
+/*
+ * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
+ * an empty spot.
+ */
+static int
+get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
+{
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ unsigned i;
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+
+ for (i = 0; i < dev_info.max_mac_addrs; i++)
+ if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
+ return i;
+
+ return -1;
+}
+
+static const struct ether_addr null_mac_addr;
+
+int
+rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
+ uint32_t pool)
+{
+ struct rte_eth_dev *dev;
+ int index;
+ uint64_t pool_mask;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
+
+ if (is_zero_ether_addr(addr)) {
+ RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
+ port_id);
+ return -EINVAL;
+ }
+ if (pool >= ETH_64_POOLS) {
+ RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
+ return -EINVAL;
+ }
+
+ index = get_mac_addr_index(port_id, addr);
+ if (index < 0) {
+ index = get_mac_addr_index(port_id, &null_mac_addr);
+ if (index < 0) {
+ RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
+ port_id);
+ return -ENOSPC;
+ }
+ } else {
+ pool_mask = dev->data->mac_pool_sel[index];
+
+ /* Check if both MAC address and pool is already there, and do nothing */
+ if (pool_mask & (1ULL << pool))
+ return 0;
+ }
+
+ /* Update NIC */
+ (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
+
+ /* Update address in NIC data structure */
+ ether_addr_copy(addr, &dev->data->mac_addrs[index]);
+
+ /* Update pool bitmap in NIC data structure */
+ dev->data->mac_pool_sel[index] |= (1ULL << pool);
+
+ return 0;
+}
+
+int
+rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
+{
+ struct rte_eth_dev *dev;
+ int index;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
+
+ index = get_mac_addr_index(port_id, addr);
+ if (index == 0) {
+ RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
+ return -EADDRINUSE;
+ } else if (index < 0)
+ return 0; /* Do nothing if address wasn't found */
+
+ /* Update NIC */
+ (*dev->dev_ops->mac_addr_remove)(dev, index);
+
+ /* Update address in NIC data structure */
+ ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
+
+ /* reset pool bitmap */
+ dev->data->mac_pool_sel[index] = 0;
+
+ return 0;
+}
+
+int
+rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ if (!is_valid_assigned_ether_addr(addr))
+ return -EINVAL;
+
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
+
+ /* Update default address in NIC data structure */
+ ether_addr_copy(addr, &dev->data->mac_addrs[0]);
+
+ (*dev->dev_ops->mac_addr_set)(dev, addr);
+
+ return 0;
+}
+
+int
+rte_eth_dev_set_vf_rxmode(uint8_t port_id, uint16_t vf,
+ uint16_t rx_mode, uint8_t on)
+{
+ uint16_t num_vfs;
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ rte_eth_dev_info_get(port_id, &dev_info);
+
+ num_vfs = dev_info.max_vfs;
+ if (vf > num_vfs) {
+ RTE_PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
+ return -EINVAL;
+ }
+
+ if (rx_mode == 0) {
+ RTE_PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
+ return -EINVAL;
+ }
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
+ return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
+}
+
+/*
+ * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
+ * an empty spot.
+ */
+static int
+get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
+{
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ unsigned i;
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+ if (!dev->data->hash_mac_addrs)
+ return -1;
+
+ for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
+ if (memcmp(addr, &dev->data->hash_mac_addrs[i],
+ ETHER_ADDR_LEN) == 0)
+ return i;
+
+ return -1;
+}
+
+int
+rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
+ uint8_t on)
+{
+ int index;
+ int ret;
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ if (is_zero_ether_addr(addr)) {
+ RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
+ port_id);
+ return -EINVAL;
+ }
+
+ index = get_hash_mac_addr_index(port_id, addr);
+ /* Check if it's already there, and do nothing */
+ if ((index >= 0) && (on))
+ return 0;
+
+ if (index < 0) {
+ if (!on) {
+ RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
+ "set in UTA\n", port_id);
+ return -EINVAL;
+ }
+
+ index = get_hash_mac_addr_index(port_id, &null_mac_addr);
+ if (index < 0) {
+ RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
+ port_id);
+ return -ENOSPC;
+ }
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
+ ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
+ if (ret == 0) {
+ /* Update address in NIC data structure */
+ if (on)
+ ether_addr_copy(addr,
+ &dev->data->hash_mac_addrs[index]);
+ else
+ ether_addr_copy(&null_mac_addr,
+ &dev->data->hash_mac_addrs[index]);
+ }
+
+ return ret;
+}
+
+int
+rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
+ return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
+}
+
+int
+rte_eth_dev_set_vf_rx(uint8_t port_id, uint16_t vf, uint8_t on)
+{
+ uint16_t num_vfs;
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ rte_eth_dev_info_get(port_id, &dev_info);
+
+ num_vfs = dev_info.max_vfs;
+ if (vf > num_vfs) {
+ RTE_PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
+ return (*dev->dev_ops->set_vf_rx)(dev, vf, on);
+}
+
+int
+rte_eth_dev_set_vf_tx(uint8_t port_id, uint16_t vf, uint8_t on)
+{
+ uint16_t num_vfs;
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ rte_eth_dev_info_get(port_id, &dev_info);
+
+ num_vfs = dev_info.max_vfs;
+ if (vf > num_vfs) {
+ RTE_PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
+ return (*dev->dev_ops->set_vf_tx)(dev, vf, on);
+}
+
+int
+rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
+ uint64_t vf_mask, uint8_t vlan_on)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+
+ if (vlan_id > ETHER_MAX_VLAN_ID) {
+ RTE_PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
+ vlan_id);
+ return -EINVAL;
+ }
+
+ if (vf_mask == 0) {
+ RTE_PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
+ return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
+ vf_mask, vlan_on);
+}
+
+int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
+ uint16_t tx_rate)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_link link;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ rte_eth_dev_info_get(port_id, &dev_info);
+ link = dev->data->dev_link;
+
+ if (queue_idx > dev_info.max_tx_queues) {
+ RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
+ "invalid queue id=%d\n", port_id, queue_idx);
+ return -EINVAL;
+ }
+
+ if (tx_rate > link.link_speed) {
+ RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
+ "bigger than link speed= %d\n",
+ tx_rate, link.link_speed);
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
+ return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
+}
+
+int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
+ uint64_t q_msk)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_link link;
+
+ if (q_msk == 0)
+ return 0;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ rte_eth_dev_info_get(port_id, &dev_info);
+ link = dev->data->dev_link;
+
+ if (vf > dev_info.max_vfs) {
+ RTE_PMD_DEBUG_TRACE("set VF rate limit:port %d: "
+ "invalid vf id=%d\n", port_id, vf);
+ return -EINVAL;
+ }
+
+ if (tx_rate > link.link_speed) {
+ RTE_PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
+ "bigger than link speed= %d\n",
+ tx_rate, link.link_speed);
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
+ return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
+}
+
+int
+rte_eth_mirror_rule_set(uint8_t port_id,
+ struct rte_eth_mirror_conf *mirror_conf,
+ uint8_t rule_id, uint8_t on)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ if (mirror_conf->rule_type == 0) {
+ RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
+ return -EINVAL;
+ }
+
+ if (mirror_conf->dst_pool >= ETH_64_POOLS) {
+ RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
+ ETH_64_POOLS - 1);
+ return -EINVAL;
+ }
+
+ if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
+ ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
+ (mirror_conf->pool_mask == 0)) {
+ RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
+ return -EINVAL;
+ }
+
+ if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
+ mirror_conf->vlan.vlan_mask == 0) {
+ RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
+ return -EINVAL;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
+
+ return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
+}
+
+int
+rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
+
+ return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
+}
+
+int
+rte_eth_dev_callback_register(uint8_t port_id,
+ enum rte_eth_event_type event,
+ rte_eth_dev_cb_fn cb_fn, void *cb_arg)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_callback *user_cb;
+
+ if (!cb_fn)
+ return -EINVAL;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+
+ dev = &rte_eth_devices[port_id];
+ rte_spinlock_lock(&rte_eth_dev_cb_lock);
+
+ TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
+ if (user_cb->cb_fn == cb_fn &&
+ user_cb->cb_arg == cb_arg &&
+ user_cb->event == event) {
+ break;
+ }
+ }
+
+ /* create a new callback. */
+ if (user_cb == NULL)
+ user_cb = rte_zmalloc("INTR_USER_CALLBACK",
+ sizeof(struct rte_eth_dev_callback), 0);
+ if (user_cb != NULL) {
+ user_cb->cb_fn = cb_fn;
+ user_cb->cb_arg = cb_arg;
+ user_cb->event = event;
+ TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
+ }
+
+ rte_spinlock_unlock(&rte_eth_dev_cb_lock);
+ return (user_cb == NULL) ? -ENOMEM : 0;
+}
+
+int
+rte_eth_dev_callback_unregister(uint8_t port_id,
+ enum rte_eth_event_type event,
+ rte_eth_dev_cb_fn cb_fn, void *cb_arg)
+{
+ int ret;
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_callback *cb, *next;
+
+ if (!cb_fn)
+ return -EINVAL;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+
+ dev = &rte_eth_devices[port_id];
+ rte_spinlock_lock(&rte_eth_dev_cb_lock);
+
+ ret = 0;
+ for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
+
+ next = TAILQ_NEXT(cb, next);
+
+ if (cb->cb_fn != cb_fn || cb->event != event ||
+ (cb->cb_arg != (void *)-1 &&
+ cb->cb_arg != cb_arg))
+ continue;
+
+ /*
+ * if this callback is not executing right now,
+ * then remove it.
+ */
+ if (cb->active == 0) {
+ TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
+ rte_free(cb);
+ } else {
+ ret = -EAGAIN;
+ }
+ }
+
+ rte_spinlock_unlock(&rte_eth_dev_cb_lock);
+ return ret;
+}
+
+void
+_rte_eth_dev_callback_process(struct rte_eth_dev *dev,
+ enum rte_eth_event_type event)
+{
+ struct rte_eth_dev_callback *cb_lst;
+ struct rte_eth_dev_callback dev_cb;
+
+ rte_spinlock_lock(&rte_eth_dev_cb_lock);
+ TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
+ if (cb_lst->cb_fn == NULL || cb_lst->event != event)
+ continue;
+ dev_cb = *cb_lst;
+ cb_lst->active = 1;
+ rte_spinlock_unlock(&rte_eth_dev_cb_lock);
+ dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
+ dev_cb.cb_arg);
+ rte_spinlock_lock(&rte_eth_dev_cb_lock);
+ cb_lst->active = 0;
+ }
+ rte_spinlock_unlock(&rte_eth_dev_cb_lock);
+}
+
+int
+rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
+{
+ uint32_t vec;
+ struct rte_eth_dev *dev;
+ struct rte_intr_handle *intr_handle;
+ uint16_t qid;
+ int rc;
+
+ if (!rte_eth_dev_is_valid_port(port_id)) {
+ RTE_PMD_DEBUG_TRACE("Invalid port_id=%u\n", port_id);
+ return -ENODEV;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ intr_handle = &dev->pci_dev->intr_handle;
+ if (!intr_handle->intr_vec) {
+ RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
+ return -EPERM;
+ }
+
+ for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
+ vec = intr_handle->intr_vec[qid];
+ rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
+ if (rc && rc != -EEXIST) {
+ RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
+ " op %d epfd %d vec %u\n",
+ port_id, qid, op, epfd, vec);
+ }
+ }
+
+ return 0;
+}
+
+const struct rte_memzone *
+rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
+ uint16_t queue_id, size_t size, unsigned align,
+ int socket_id)
+{
+ char z_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+
+ snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
+ dev->driver->pci_drv.name, ring_name,
+ dev->data->port_id, queue_id);
+
+ mz = rte_memzone_lookup(z_name);
+ if (mz)
+ return mz;
+
+ if (rte_xen_dom0_supported())
+ return rte_memzone_reserve_bounded(z_name, size, socket_id,
+ 0, align, RTE_PGSIZE_2M);
+ else
+ return rte_memzone_reserve_aligned(z_name, size, socket_id,
+ 0, align);
+}
+
+int
+rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
+ int epfd, int op, void *data)
+{
+ uint32_t vec;
+ struct rte_eth_dev *dev;
+ struct rte_intr_handle *intr_handle;
+ int rc;
+
+ if (!rte_eth_dev_is_valid_port(port_id)) {
+ RTE_PMD_DEBUG_TRACE("Invalid port_id=%u\n", port_id);
+ return -ENODEV;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ if (queue_id >= dev->data->nb_rx_queues) {
+ RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
+ return -EINVAL;
+ }
+
+ intr_handle = &dev->pci_dev->intr_handle;
+ if (!intr_handle->intr_vec) {
+ RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
+ return -EPERM;
+ }
+
+ vec = intr_handle->intr_vec[queue_id];
+ rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
+ if (rc && rc != -EEXIST) {
+ RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
+ " op %d epfd %d vec %u\n",
+ port_id, queue_id, op, epfd, vec);
+ return rc;
+ }
+
+ return 0;
+}
+
+int
+rte_eth_dev_rx_intr_enable(uint8_t port_id,
+ uint16_t queue_id)
+{
+ struct rte_eth_dev *dev;
+
+ if (!rte_eth_dev_is_valid_port(port_id)) {
+ RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
+ }
+
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
+ return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id);
+}
+
+int
+rte_eth_dev_rx_intr_disable(uint8_t port_id,
+ uint16_t queue_id)
+{
+ struct rte_eth_dev *dev;
+
+ if (!rte_eth_dev_is_valid_port(port_id)) {
+ RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
+ }
+
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
+ return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
+}
+
+#ifdef RTE_NIC_BYPASS
+int rte_eth_dev_bypass_init(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
+ (*dev->dev_ops->bypass_init)(dev);
+ return 0;
+}
+
+int
+rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
+ (*dev->dev_ops->bypass_state_show)(dev, state);
+ return 0;
+}
+
+int
+rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
+ (*dev->dev_ops->bypass_state_set)(dev, new_state);
+ return 0;
+}
+
+int
+rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
+ (*dev->dev_ops->bypass_event_show)(dev, event, state);
+ return 0;
+}
+
+int
+rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
+ (*dev->dev_ops->bypass_event_set)(dev, event, state);
+ return 0;
+}
+
+int
+rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
+ (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
+ return 0;
+}
+
+int
+rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
+ (*dev->dev_ops->bypass_ver_show)(dev, ver);
+ return 0;
+}
+
+int
+rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
+ (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
+ return 0;
+}
+
+int
+rte_eth_dev_bypass_wd_reset(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
+ (*dev->dev_ops->bypass_wd_reset)(dev);
+ return 0;
+}
+#endif
+
+int
+rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
+ return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
+ RTE_ETH_FILTER_NOP, NULL);
+}
+
+int
+rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op, void *arg)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
+ return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
+}
+
+void *
+rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
+ rte_rx_callback_fn fn, void *user_param)
+{
+#ifndef RTE_ETHDEV_RXTX_CALLBACKS
+ rte_errno = ENOTSUP;
+ return NULL;
+#endif
+ /* check input parameters */
+ if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
+ queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
+
+ if (cb == NULL) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ cb->fn.rx = fn;
+ cb->param = user_param;
+
+ /* Add the callbacks in fifo order. */
+ struct rte_eth_rxtx_callback *tail =
+ rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
+
+ if (!tail) {
+ rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
+
+ } else {
+ while (tail->next)
+ tail = tail->next;
+ tail->next = cb;
+ }
+
+ return cb;
+}
+
+void *
+rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
+ rte_tx_callback_fn fn, void *user_param)
+{
+#ifndef RTE_ETHDEV_RXTX_CALLBACKS
+ rte_errno = ENOTSUP;
+ return NULL;
+#endif
+ /* check input parameters */
+ if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
+ queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
+
+ if (cb == NULL) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ cb->fn.tx = fn;
+ cb->param = user_param;
+
+ /* Add the callbacks in fifo order. */
+ struct rte_eth_rxtx_callback *tail =
+ rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
+
+ if (!tail) {
+ rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
+
+ } else {
+ while (tail->next)
+ tail = tail->next;
+ tail->next = cb;
+ }
+
+ return cb;
+}
+
+int
+rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
+ struct rte_eth_rxtx_callback *user_cb)
+{
+#ifndef RTE_ETHDEV_RXTX_CALLBACKS
+ return -ENOTSUP;
+#endif
+ /* Check input parameters. */
+ if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
+ queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
+ return -EINVAL;
+ }
+
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
+ struct rte_eth_rxtx_callback *prev_cb;
+
+ /* Reset head pointer and remove user cb if first in the list. */
+ if (cb == user_cb) {
+ dev->post_rx_burst_cbs[queue_id] = user_cb->next;
+ return 0;
+ }
+
+ /* Remove the user cb from the callback list. */
+ do {
+ prev_cb = cb;
+ cb = cb->next;
+
+ if (cb == user_cb) {
+ prev_cb->next = user_cb->next;
+ return 0;
+ }
+
+ } while (cb != NULL);
+
+ /* Callback wasn't found. */
+ return -EINVAL;
+}
+
+int
+rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
+ struct rte_eth_rxtx_callback *user_cb)
+{
+#ifndef RTE_ETHDEV_RXTX_CALLBACKS
+ return -ENOTSUP;
+#endif
+ /* Check input parameters. */
+ if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
+ queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
+ return -EINVAL;
+ }
+
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
+ struct rte_eth_rxtx_callback *prev_cb;
+
+ /* Reset head pointer and remove user cb if first in the list. */
+ if (cb == user_cb) {
+ dev->pre_tx_burst_cbs[queue_id] = user_cb->next;
+ return 0;
+ }
+
+ /* Remove the user cb from the callback list. */
+ do {
+ prev_cb = cb;
+ cb = cb->next;
+
+ if (cb == user_cb) {
+ prev_cb->next = user_cb->next;
+ return 0;
+ }
+
+ } while (cb != NULL);
+
+ /* Callback wasn't found. */
+ return -EINVAL;
+}
+
+int
+rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ if (qinfo == NULL)
+ return -EINVAL;
+
+ dev = &rte_eth_devices[port_id];
+ if (queue_id >= dev->data->nb_rx_queues) {
+ RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
+
+ memset(qinfo, 0, sizeof(*qinfo));
+ dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
+ return 0;
+}
+
+int
+rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ if (qinfo == NULL)
+ return -EINVAL;
+
+ dev = &rte_eth_devices[port_id];
+ if (queue_id >= dev->data->nb_tx_queues) {
+ RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
+
+ memset(qinfo, 0, sizeof(*qinfo));
+ dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
+ return 0;
+}
+
+int
+rte_eth_dev_set_mc_addr_list(uint8_t port_id,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
+ return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
+}
+
+int
+rte_eth_timesync_enable(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
+ return (*dev->dev_ops->timesync_enable)(dev);
+}
+
+int
+rte_eth_timesync_disable(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
+ return (*dev->dev_ops->timesync_disable)(dev);
+}
+
+int
+rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp,
+ uint32_t flags)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
+ return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
+}
+
+int
+rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
+ return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
+}
+
+int
+rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
+ return (*dev->dev_ops->timesync_adjust_time)(dev, delta);
+}
+
+int
+rte_eth_timesync_read_time(uint8_t port_id, struct timespec *timestamp)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
+ return (*dev->dev_ops->timesync_read_time)(dev, timestamp);
+}
+
+int
+rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *timestamp)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
+ return (*dev->dev_ops->timesync_write_time)(dev, timestamp);
+}
+
+int
+rte_eth_dev_get_reg_length(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg_length, -ENOTSUP);
+ return (*dev->dev_ops->get_reg_length)(dev);
+}
+
+int
+rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
+ return (*dev->dev_ops->get_reg)(dev, info);
+}
+
+int
+rte_eth_dev_get_eeprom_length(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
+ return (*dev->dev_ops->get_eeprom_length)(dev);
+}
+
+int
+rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
+ return (*dev->dev_ops->get_eeprom)(dev, info);
+}
+
+int
+rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
+ return (*dev->dev_ops->set_eeprom)(dev, info);
+}
+
+int
+rte_eth_dev_get_dcb_info(uint8_t port_id,
+ struct rte_eth_dcb_info *dcb_info)
+{
+ struct rte_eth_dev *dev;
+
+ if (!rte_eth_dev_is_valid_port(port_id)) {
+ RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
+ return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
+}
+
+void
+rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, struct rte_pci_device *pci_dev)
+{
+ if ((eth_dev == NULL) || (pci_dev == NULL)) {
+ RTE_PMD_DEBUG_TRACE("NULL pointer eth_dev=%p pci_dev=%p\n",
+ eth_dev, pci_dev);
+ return;
+ }
+
+ eth_dev->data->dev_flags = 0;
+ if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
+ if (pci_dev->driver->drv_flags & RTE_PCI_DRV_DETACHABLE)
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
+
+ eth_dev->data->kdrv = pci_dev->kdrv;
+ eth_dev->data->numa_node = pci_dev->numa_node;
+ eth_dev->data->drv_name = pci_dev->driver->name;
+}
diff --git a/src/dpdk_lib18/librte_ether/rte_ethdev.h b/src/dpdk22/lib/librte_ether/rte_ethdev.h
index ce0528f5..bada8ade 100755..100644
--- a/src/dpdk_lib18/librte_ether/rte_ethdev.h
+++ b/src/dpdk22/lib/librte_ether/rte_ethdev.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -172,12 +172,21 @@ extern "C" {
#include <stdint.h>
+#include <rte_dev.h>
+
+/* Use this macro to check if LRO API is supported */
+#define RTE_ETHDEV_HAS_LRO_SUPPORT
+
#include <rte_log.h>
#include <rte_interrupts.h>
#include <rte_pci.h>
-#include <rte_mbuf.h>
+#include <rte_dev.h>
+#include <rte_devargs.h>
#include "rte_ether.h"
#include "rte_eth_ctrl.h"
+#include "rte_dev_info.h"
+
+struct rte_mbuf;
/**
* A structure used to retrieve statistics for an Ethernet port.
@@ -187,19 +196,31 @@ struct rte_eth_stats {
uint64_t opackets; /**< Total number of successfully transmitted packets.*/
uint64_t ibytes; /**< Total number of successfully received bytes. */
uint64_t obytes; /**< Total number of successfully transmitted bytes. */
- uint64_t imissed; /**< Total of RX missed packets (e.g full FIFO). */
- uint64_t ibadcrc; /**< Total of RX packets with CRC error. */
- uint64_t ibadlen; /**< Total of RX packets with bad length. */
+ uint64_t imissed;
+ /**< Total of RX packets dropped by the HW,
+ * because there are no available mbufs (i.e. RX queues are full).
+ */
+ uint64_t ibadcrc __rte_deprecated;
+ /**< Deprecated; Total of RX packets with CRC error. */
+ uint64_t ibadlen __rte_deprecated;
+ /**< Deprecated; Total of RX packets with bad length. */
uint64_t ierrors; /**< Total number of erroneous received packets. */
uint64_t oerrors; /**< Total number of failed transmitted packets. */
- uint64_t imcasts; /**< Total number of multicast received packets. */
+ uint64_t imcasts;
+ /**< Deprecated; Total number of multicast received packets. */
uint64_t rx_nombuf; /**< Total number of RX mbuf allocation failures. */
- uint64_t fdirmatch; /**< Total number of RX packets matching a filter. */
- uint64_t fdirmiss; /**< Total number of RX packets not matching any filter. */
- uint64_t tx_pause_xon; /**< Total nb. of XON pause frame sent. */
- uint64_t rx_pause_xon; /**< Total nb. of XON pause frame received. */
- uint64_t tx_pause_xoff; /**< Total nb. of XOFF pause frame sent. */
- uint64_t rx_pause_xoff; /**< Total nb. of XOFF pause frame received. */
+ uint64_t fdirmatch __rte_deprecated;
+ /**< Deprecated; Total number of RX packets matching a filter. */
+ uint64_t fdirmiss __rte_deprecated;
+ /**< Deprecated; Total number of RX packets not matching any filter. */
+ uint64_t tx_pause_xon __rte_deprecated;
+ /**< Deprecated; Total nb. of XON pause frame sent. */
+ uint64_t rx_pause_xon __rte_deprecated;
+ /**< Deprecated; Total nb. of XON pause frame received. */
+ uint64_t tx_pause_xoff __rte_deprecated;
+ /**< Deprecated; Total nb. of XOFF pause frame sent. */
+ uint64_t rx_pause_xoff __rte_deprecated;
+ /**< Deprecated; Total nb. of XOFF pause frame received. */
uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
/**< Total number of queue RX packets. */
uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
@@ -318,14 +339,15 @@ struct rte_eth_rxmode {
enum rte_eth_rx_mq_mode mq_mode;
uint32_t max_rx_pkt_len; /**< Only used if jumbo_frame enabled. */
uint16_t split_hdr_size; /**< hdr buf size (header_split enabled).*/
- uint8_t header_split : 1, /**< Header Split enable. */
+ uint16_t header_split : 1, /**< Header Split enable. */
hw_ip_checksum : 1, /**< IP/UDP/TCP checksum offload enable. */
hw_vlan_filter : 1, /**< VLAN filter enable. */
hw_vlan_strip : 1, /**< VLAN strip enable. */
hw_vlan_extend : 1, /**< Extended VLAN enable. */
jumbo_frame : 1, /**< Jumbo Frame Receipt enable. */
hw_strip_crc : 1, /**< Enable CRC stripping by hardware. */
- enable_scatter : 1; /**< Enable scatter packets rx handler */
+ enable_scatter : 1, /**< Enable scatter packets rx handler */
+ enable_lro : 1; /**< Enable LRO */
};
/**
@@ -351,97 +373,70 @@ struct rte_eth_rss_conf {
uint64_t rss_hf; /**< Hash functions to apply - see below. */
};
-/* Supported RSS offloads */
-/* for 1G & 10G */
-#define ETH_RSS_IPV4_SHIFT 0
-#define ETH_RSS_IPV4_TCP_SHIFT 1
-#define ETH_RSS_IPV6_SHIFT 2
-#define ETH_RSS_IPV6_EX_SHIFT 3
-#define ETH_RSS_IPV6_TCP_SHIFT 4
-#define ETH_RSS_IPV6_TCP_EX_SHIFT 5
-#define ETH_RSS_IPV4_UDP_SHIFT 6
-#define ETH_RSS_IPV6_UDP_SHIFT 7
-#define ETH_RSS_IPV6_UDP_EX_SHIFT 8
-/* for 40G only */
-#define ETH_RSS_NONF_IPV4_UDP_SHIFT 31
-#define ETH_RSS_NONF_IPV4_TCP_SHIFT 33
-#define ETH_RSS_NONF_IPV4_SCTP_SHIFT 34
-#define ETH_RSS_NONF_IPV4_OTHER_SHIFT 35
-#define ETH_RSS_FRAG_IPV4_SHIFT 36
-#define ETH_RSS_NONF_IPV6_UDP_SHIFT 41
-#define ETH_RSS_NONF_IPV6_TCP_SHIFT 43
-#define ETH_RSS_NONF_IPV6_SCTP_SHIFT 44
-#define ETH_RSS_NONF_IPV6_OTHER_SHIFT 45
-#define ETH_RSS_FRAG_IPV6_SHIFT 46
-#define ETH_RSS_FCOE_OX_SHIFT 48
-#define ETH_RSS_FCOE_RX_SHIFT 49
-#define ETH_RSS_FCOE_OTHER_SHIFT 50
-#define ETH_RSS_L2_PAYLOAD_SHIFT 63
-
-/* for 1G & 10G */
-#define ETH_RSS_IPV4 (1 << ETH_RSS_IPV4_SHIFT)
-#define ETH_RSS_IPV4_TCP (1 << ETH_RSS_IPV4_TCP_SHIFT)
-#define ETH_RSS_IPV6 (1 << ETH_RSS_IPV6_SHIFT)
-#define ETH_RSS_IPV6_EX (1 << ETH_RSS_IPV6_EX_SHIFT)
-#define ETH_RSS_IPV6_TCP (1 << ETH_RSS_IPV6_TCP_SHIFT)
-#define ETH_RSS_IPV6_TCP_EX (1 << ETH_RSS_IPV6_TCP_EX_SHIFT)
-#define ETH_RSS_IPV4_UDP (1 << ETH_RSS_IPV4_UDP_SHIFT)
-#define ETH_RSS_IPV6_UDP (1 << ETH_RSS_IPV6_UDP_SHIFT)
-#define ETH_RSS_IPV6_UDP_EX (1 << ETH_RSS_IPV6_UDP_EX_SHIFT)
-/* for 40G only */
-#define ETH_RSS_NONF_IPV4_UDP (1ULL << ETH_RSS_NONF_IPV4_UDP_SHIFT)
-#define ETH_RSS_NONF_IPV4_TCP (1ULL << ETH_RSS_NONF_IPV4_TCP_SHIFT)
-#define ETH_RSS_NONF_IPV4_SCTP (1ULL << ETH_RSS_NONF_IPV4_SCTP_SHIFT)
-#define ETH_RSS_NONF_IPV4_OTHER (1ULL << ETH_RSS_NONF_IPV4_OTHER_SHIFT)
-#define ETH_RSS_FRAG_IPV4 (1ULL << ETH_RSS_FRAG_IPV4_SHIFT)
-#define ETH_RSS_NONF_IPV6_UDP (1ULL << ETH_RSS_NONF_IPV6_UDP_SHIFT)
-#define ETH_RSS_NONF_IPV6_TCP (1ULL << ETH_RSS_NONF_IPV6_TCP_SHIFT)
-#define ETH_RSS_NONF_IPV6_SCTP (1ULL << ETH_RSS_NONF_IPV6_SCTP_SHIFT)
-#define ETH_RSS_NONF_IPV6_OTHER (1ULL << ETH_RSS_NONF_IPV6_OTHER_SHIFT)
-#define ETH_RSS_FRAG_IPV6 (1ULL << ETH_RSS_FRAG_IPV6_SHIFT)
-/* FCOE relevant should not be used */
-#define ETH_RSS_FCOE_OX (1ULL << ETH_RSS_FCOE_OX_SHIFT)
-#define ETH_RSS_FCOE_RX (1ULL << ETH_RSS_FCOE_RX_SHIFT)
-#define ETH_RSS_FCOE_OTHER (1ULL << ETH_RSS_FCOE_OTHER_SHIFT)
-#define ETH_RSS_L2_PAYLOAD (1ULL << ETH_RSS_L2_PAYLOAD_SHIFT)
+/*
+ * The RSS offload types are defined based on flow types which are defined
+ * in rte_eth_ctrl.h. Different NIC hardwares may support different RSS offload
+ * types. The supported flow types or RSS offload types can be queried by
+ * rte_eth_dev_info_get().
+ */
+#define ETH_RSS_IPV4 (1ULL << RTE_ETH_FLOW_IPV4)
+#define ETH_RSS_FRAG_IPV4 (1ULL << RTE_ETH_FLOW_FRAG_IPV4)
+#define ETH_RSS_NONFRAG_IPV4_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
+#define ETH_RSS_NONFRAG_IPV4_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP)
+#define ETH_RSS_NONFRAG_IPV4_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP)
+#define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER)
+#define ETH_RSS_IPV6 (1ULL << RTE_ETH_FLOW_IPV6)
+#define ETH_RSS_FRAG_IPV6 (1ULL << RTE_ETH_FLOW_FRAG_IPV6)
+#define ETH_RSS_NONFRAG_IPV6_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP)
+#define ETH_RSS_NONFRAG_IPV6_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
+#define ETH_RSS_NONFRAG_IPV6_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP)
+#define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER)
+#define ETH_RSS_L2_PAYLOAD (1ULL << RTE_ETH_FLOW_L2_PAYLOAD)
+#define ETH_RSS_IPV6_EX (1ULL << RTE_ETH_FLOW_IPV6_EX)
+#define ETH_RSS_IPV6_TCP_EX (1ULL << RTE_ETH_FLOW_IPV6_TCP_EX)
+#define ETH_RSS_IPV6_UDP_EX (1ULL << RTE_ETH_FLOW_IPV6_UDP_EX)
#define ETH_RSS_IP ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_IPV6 | \
- ETH_RSS_NONF_IPV4_OTHER | \
- ETH_RSS_FRAG_IPV4 | \
- ETH_RSS_NONF_IPV6_OTHER | \
- ETH_RSS_FRAG_IPV6)
+ ETH_RSS_IPV4 | \
+ ETH_RSS_FRAG_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_OTHER | \
+ ETH_RSS_IPV6 | \
+ ETH_RSS_FRAG_IPV6 | \
+ ETH_RSS_NONFRAG_IPV6_OTHER | \
+ ETH_RSS_IPV6_EX)
+
#define ETH_RSS_UDP ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_IPV6 | \
- ETH_RSS_IPV4_UDP | \
- ETH_RSS_IPV6_UDP | \
- ETH_RSS_IPV6_UDP_EX | \
- ETH_RSS_NONF_IPV4_UDP | \
- ETH_RSS_NONF_IPV6_UDP)
+ ETH_RSS_NONFRAG_IPV4_UDP | \
+ ETH_RSS_NONFRAG_IPV6_UDP | \
+ ETH_RSS_IPV6_UDP_EX)
+
+#define ETH_RSS_TCP ( \
+ ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_IPV6_TCP_EX)
+
+#define ETH_RSS_SCTP ( \
+ ETH_RSS_NONFRAG_IPV4_SCTP | \
+ ETH_RSS_NONFRAG_IPV6_SCTP)
+
/**< Mask of valid RSS hash protocols */
#define ETH_RSS_PROTO_MASK ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_IPV4_TCP | \
- ETH_RSS_IPV6 | \
- ETH_RSS_IPV6_EX | \
- ETH_RSS_IPV6_TCP | \
- ETH_RSS_IPV6_TCP_EX | \
- ETH_RSS_IPV4_UDP | \
- ETH_RSS_IPV6_UDP | \
- ETH_RSS_IPV6_UDP_EX | \
- ETH_RSS_NONF_IPV4_UDP | \
- ETH_RSS_NONF_IPV4_TCP | \
- ETH_RSS_NONF_IPV4_SCTP | \
- ETH_RSS_NONF_IPV4_OTHER | \
- ETH_RSS_FRAG_IPV4 | \
- ETH_RSS_NONF_IPV6_UDP | \
- ETH_RSS_NONF_IPV6_TCP | \
- ETH_RSS_NONF_IPV6_SCTP | \
- ETH_RSS_NONF_IPV6_OTHER | \
- ETH_RSS_FRAG_IPV6 | \
- ETH_RSS_L2_PAYLOAD)
+ ETH_RSS_IPV4 | \
+ ETH_RSS_FRAG_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_NONFRAG_IPV4_UDP | \
+ ETH_RSS_NONFRAG_IPV4_SCTP | \
+ ETH_RSS_NONFRAG_IPV4_OTHER | \
+ ETH_RSS_IPV6 | \
+ ETH_RSS_FRAG_IPV6 | \
+ ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_NONFRAG_IPV6_UDP | \
+ ETH_RSS_NONFRAG_IPV6_SCTP | \
+ ETH_RSS_NONFRAG_IPV6_OTHER | \
+ ETH_RSS_L2_PAYLOAD | \
+ ETH_RSS_IPV6_EX | \
+ ETH_RSS_IPV6_TCP_EX | \
+ ETH_RSS_IPV6_UDP_EX)
/*
* Definitions used for redirection table entry size.
@@ -487,31 +482,33 @@ struct rte_eth_rss_conf {
#define ETH_VMDQ_ACCEPT_BROADCAST 0x0008 /**< accept broadcast packets. */
#define ETH_VMDQ_ACCEPT_MULTICAST 0x0010 /**< multicast promiscuous. */
-/* Definitions used for VMDQ mirror rules setting */
-#define ETH_VMDQ_NUM_MIRROR_RULE 4 /**< Maximum nb. of mirror rules. . */
+/** Maximum nb. of vlan per mirror rule */
+#define ETH_MIRROR_MAX_VLANS 64
-#define ETH_VMDQ_POOL_MIRROR 0x0001 /**< Virtual Pool Mirroring. */
-#define ETH_VMDQ_UPLINK_MIRROR 0x0002 /**< Uplink Port Mirroring. */
-#define ETH_VMDQ_DOWNLIN_MIRROR 0x0004 /**< Downlink Port Mirroring. */
-#define ETH_VMDQ_VLAN_MIRROR 0x0008 /**< VLAN Mirroring. */
+#define ETH_MIRROR_VIRTUAL_POOL_UP 0x01 /**< Virtual Pool uplink Mirroring. */
+#define ETH_MIRROR_UPLINK_PORT 0x02 /**< Uplink Port Mirroring. */
+#define ETH_MIRROR_DOWNLINK_PORT 0x04 /**< Downlink Port Mirroring. */
+#define ETH_MIRROR_VLAN 0x08 /**< VLAN Mirroring. */
+#define ETH_MIRROR_VIRTUAL_POOL_DOWN 0x10 /**< Virtual Pool downlink Mirroring. */
/**
* A structure used to configure VLAN traffic mirror of an Ethernet port.
*/
struct rte_eth_vlan_mirror {
uint64_t vlan_mask; /**< mask for valid VLAN ID. */
- uint16_t vlan_id[ETH_VMDQ_MAX_VLAN_FILTERS];
- /** VLAN ID list for vlan mirror. */
+ /** VLAN ID list for vlan mirroring. */
+ uint16_t vlan_id[ETH_MIRROR_MAX_VLANS];
};
/**
* A structure used to configure traffic mirror of an Ethernet port.
*/
-struct rte_eth_vmdq_mirror_conf {
- uint8_t rule_type_mask; /**< Mirroring rule type mask we want to set */
- uint8_t dst_pool; /**< Destination pool for this mirror rule. */
+struct rte_eth_mirror_conf {
+ uint8_t rule_type; /**< Mirroring rule type */
+ uint8_t dst_pool; /**< Destination pool for this mirror rule. */
uint64_t pool_mask; /**< Bitmap of pool for pool mirroring */
- struct rte_eth_vlan_mirror vlan; /**< VLAN ID setting for VLAN mirroring */
+ /** VLAN ID setting for VLAN mirroring. */
+ struct rte_eth_vlan_mirror vlan;
};
/**
@@ -550,20 +547,20 @@ enum rte_eth_nb_pools {
/* This structure may be extended in future. */
struct rte_eth_dcb_rx_conf {
enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs */
- uint8_t dcb_queue[ETH_DCB_NUM_USER_PRIORITIES];
- /**< Possible DCB queue,4 or 8. */
+ /** Traffic class each UP mapped to. */
+ uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
};
struct rte_eth_vmdq_dcb_tx_conf {
enum rte_eth_nb_pools nb_queue_pools; /**< With DCB, 16 or 32 pools. */
- uint8_t dcb_queue[ETH_DCB_NUM_USER_PRIORITIES];
- /**< Possible DCB queue,4 or 8. */
+ /** Traffic class each UP mapped to. */
+ uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
};
struct rte_eth_dcb_tx_conf {
enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs. */
- uint8_t dcb_queue[ETH_DCB_NUM_USER_PRIORITIES];
- /**< Possible DCB queue,4 or 8. */
+ /** Traffic class each UP mapped to. */
+ uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
};
struct rte_eth_vmdq_tx_conf {
@@ -590,7 +587,7 @@ struct rte_eth_vmdq_dcb_conf {
uint16_t vlan_id; /**< The vlan id of the received frame */
uint64_t pools; /**< Bitmask of pools for packet rx */
} pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq vlan pool maps. */
- uint8_t dcb_queue[ETH_DCB_NUM_USER_PRIORITIES];
+ uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
/**< Selects a queue in a pool */
};
@@ -643,18 +640,32 @@ struct rte_eth_rxconf {
#define ETH_TXQ_FLAGS_NOOFFLOADS \
(ETH_TXQ_FLAGS_NOVLANOFFL | ETH_TXQ_FLAGS_NOXSUMSCTP | \
ETH_TXQ_FLAGS_NOXSUMUDP | ETH_TXQ_FLAGS_NOXSUMTCP)
+#define ETH_TXQ_FLAGS_NOXSUMS \
+ (ETH_TXQ_FLAGS_NOXSUMSCTP | ETH_TXQ_FLAGS_NOXSUMUDP | \
+ ETH_TXQ_FLAGS_NOXSUMTCP)
/**
* A structure used to configure a TX ring of an Ethernet port.
*/
struct rte_eth_txconf {
struct rte_eth_thresh tx_thresh; /**< TX ring threshold registers. */
uint16_t tx_rs_thresh; /**< Drives the setting of RS bit on TXDs. */
- uint16_t tx_free_thresh; /**< Drives the freeing of TX buffers. */
+ uint16_t tx_free_thresh; /**< Start freeing TX buffers if there are
+ less free descriptors than this value. */
+
uint32_t txq_flags; /**< Set flags for the Tx queue */
uint8_t tx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */
};
/**
+ * A structure contains information about HW descriptor ring limitations.
+ */
+struct rte_eth_desc_lim {
+ uint16_t nb_max; /**< Max allowed number of descriptors. */
+ uint16_t nb_min; /**< Min allowed number of descriptors. */
+ uint16_t nb_align; /**< Number of descriptors should be aligned to. */
+};
+
+/**
* This enum indicates the flow control mode
*/
enum rte_eth_fc_mode {
@@ -718,10 +729,9 @@ struct rte_fdir_conf {
enum rte_fdir_mode mode; /**< Flow Director mode. */
enum rte_fdir_pballoc_type pballoc; /**< Space for FDIR filters. */
enum rte_fdir_status_mode status; /**< How to report FDIR hash. */
- /** Offset of flexbytes field in RX packets (in 16-bit word units). */
- uint8_t flexbytes_offset;
/** RX queue of packets matching a "drop" filter in perfect mode. */
uint8_t drop_queue;
+ struct rte_eth_fdir_masks mask;
struct rte_eth_fdir_flex_conf flex_conf;
/**< Flex payload configuration. */
};
@@ -735,119 +745,13 @@ struct rte_eth_udp_tunnel {
};
/**
- * Possible l4type of FDIR filters.
- */
-enum rte_l4type {
- RTE_FDIR_L4TYPE_NONE = 0, /**< None. */
- RTE_FDIR_L4TYPE_UDP, /**< UDP. */
- RTE_FDIR_L4TYPE_TCP, /**< TCP. */
- RTE_FDIR_L4TYPE_SCTP, /**< SCTP. */
-};
-
-/**
- * Select IPv4 or IPv6 FDIR filters.
- */
-enum rte_iptype {
- RTE_FDIR_IPTYPE_IPV4 = 0, /**< IPv4. */
- RTE_FDIR_IPTYPE_IPV6 , /**< IPv6. */
-};
-
-/**
- * A structure used to define a FDIR packet filter.
- */
-struct rte_fdir_filter {
- uint16_t flex_bytes; /**< Flex bytes value to match. */
- uint16_t vlan_id; /**< VLAN ID value to match, 0 otherwise. */
- uint16_t port_src; /**< Source port to match, 0 otherwise. */
- uint16_t port_dst; /**< Destination port to match, 0 otherwise. */
- union {
- uint32_t ipv4_addr; /**< IPv4 source address to match. */
- uint32_t ipv6_addr[4]; /**< IPv6 source address to match. */
- } ip_src; /**< IPv4/IPv6 source address to match (union of above). */
- union {
- uint32_t ipv4_addr; /**< IPv4 destination address to match. */
- uint32_t ipv6_addr[4]; /**< IPv6 destination address to match */
- } ip_dst; /**< IPv4/IPv6 destination address to match (union of above). */
- enum rte_l4type l4type; /**< l4type to match: NONE/UDP/TCP/SCTP. */
- enum rte_iptype iptype; /**< IP packet type to match: IPv4 or IPv6. */
-};
-
-/**
- * A structure used to configure FDIR masks that are used by the device
- * to match the various fields of RX packet headers.
- * @note The only_ip_flow field has the opposite meaning compared to other
- * masks!
- */
-struct rte_fdir_masks {
- /** When set to 1, packet l4type is \b NOT relevant in filters, and
- source and destination port masks must be set to zero. */
- uint8_t only_ip_flow;
- /** If set to 1, vlan_id is relevant in filters. */
- uint8_t vlan_id;
- /** If set to 1, vlan_prio is relevant in filters. */
- uint8_t vlan_prio;
- /** If set to 1, flexbytes is relevant in filters. */
- uint8_t flexbytes;
- /** If set to 1, set the IPv6 masks. Otherwise set the IPv4 masks. */
- uint8_t set_ipv6_mask;
- /** When set to 1, comparison of destination IPv6 address with IP6AT
- registers is meaningful. */
- uint8_t comp_ipv6_dst;
- /** Mask of Destination IPv4 Address. All bits set to 1 define the
- relevant bits to use in the destination address of an IPv4 packet
- when matching it against FDIR filters. */
- uint32_t dst_ipv4_mask;
- /** Mask of Source IPv4 Address. All bits set to 1 define
- the relevant bits to use in the source address of an IPv4 packet
- when matching it against FDIR filters. */
- uint32_t src_ipv4_mask;
- /** Mask of Source IPv6 Address. All bits set to 1 define the
- relevant BYTES to use in the source address of an IPv6 packet
- when matching it against FDIR filters. */
- uint16_t dst_ipv6_mask;
- /** Mask of Destination IPv6 Address. All bits set to 1 define the
- relevant BYTES to use in the destination address of an IPv6 packet
- when matching it against FDIR filters. */
- uint16_t src_ipv6_mask;
- /** Mask of Source Port. All bits set to 1 define the relevant
- bits to use in the source port of an IP packets when matching it
- against FDIR filters. */
- uint16_t src_port_mask;
- /** Mask of Destination Port. All bits set to 1 define the relevant
- bits to use in the destination port of an IP packet when matching it
- against FDIR filters. */
- uint16_t dst_port_mask;
-};
-
-/**
- * A structure used to report the status of the flow director filters in use.
- */
-struct rte_eth_fdir {
- /** Number of filters with collision indication. */
- uint16_t collision;
- /** Number of free (non programmed) filters. */
- uint16_t free;
- /** The Lookup hash value of the added filter that updated the value
- of the MAXLEN field */
- uint16_t maxhash;
- /** Longest linked list of filters in the table. */
- uint8_t maxlen;
- /** Number of added filters. */
- uint64_t add;
- /** Number of removed filters. */
- uint64_t remove;
- /** Number of failed added filters (no more space in device). */
- uint64_t f_add;
- /** Number of failed removed filters. */
- uint64_t f_remove;
-};
-
-/**
* A structure used to enable/disable specific device interrupts.
*/
struct rte_intr_conf {
/** enable/disable lsc interrupt. 0 (default) - disable, 1 enable */
uint16_t lsc;
+ /** enable/disable rxq interrupt. 0 (default) - disable, 1 enable */
+ uint16_t rxq;
};
/**
@@ -905,6 +809,7 @@ struct rte_eth_conf {
#define DEV_RX_OFFLOAD_UDP_CKSUM 0x00000004
#define DEV_RX_OFFLOAD_TCP_CKSUM 0x00000008
#define DEV_RX_OFFLOAD_TCP_LRO 0x00000010
+#define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020
/**
* TX offload capabilities of a device.
@@ -916,6 +821,8 @@ struct rte_eth_conf {
#define DEV_TX_OFFLOAD_SCTP_CKSUM 0x00000010
#define DEV_TX_OFFLOAD_TCP_TSO 0x00000020
#define DEV_TX_OFFLOAD_UDP_TSO 0x00000040
+#define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080 /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
struct rte_eth_dev_info {
struct rte_pci_device *pci_dev; /**< Device PCI information. */
@@ -935,13 +842,38 @@ struct rte_eth_dev_info {
uint32_t tx_offload_capa; /**< Device TX offload capabilities. */
uint16_t reta_size;
/**< Device redirection table size, the total number of entries. */
+ uint8_t hash_key_size; /**< Hash key size in bytes */
+ /** Bit mask of RSS offloads, the bit offset also means flow type */
+ uint64_t flow_type_rss_offloads;
struct rte_eth_rxconf default_rxconf; /**< Default RX configuration */
struct rte_eth_txconf default_txconf; /**< Default TX configuration */
uint16_t vmdq_queue_base; /**< First queue ID for VMDQ pools. */
uint16_t vmdq_queue_num; /**< Queue number for VMDQ pools. */
uint16_t vmdq_pool_base; /**< First ID of VMDQ pools. */
+ struct rte_eth_desc_lim rx_desc_lim; /**< RX descriptors limits */
+ struct rte_eth_desc_lim tx_desc_lim; /**< TX descriptors limits */
};
+/**
+ * Ethernet device RX queue information structure.
+ * Used to retieve information about configured queue.
+ */
+struct rte_eth_rxq_info {
+ struct rte_mempool *mp; /**< mempool used by that queue. */
+ struct rte_eth_rxconf conf; /**< queue config parameters. */
+ uint8_t scattered_rx; /**< scattered packets RX supported. */
+ uint16_t nb_desc; /**< configured number of RXDs. */
+} __rte_cache_aligned;
+
+/**
+ * Ethernet device TX queue information structure.
+ * Used to retieve information about configured queue.
+ */
+struct rte_eth_txq_info {
+ struct rte_eth_txconf conf; /**< queue config parameters. */
+ uint16_t nb_desc; /**< configured number of TXDs. */
+} __rte_cache_aligned;
+
/** Maximum name length for extended statistics counters */
#define RTE_ETH_XSTATS_NAME_SIZE 64
@@ -957,78 +889,73 @@ struct rte_eth_xstats {
uint64_t value;
};
-struct rte_eth_dev;
-
-struct rte_eth_dev_callback;
-/** @internal Structure to keep track of registered callbacks */
-TAILQ_HEAD(rte_eth_dev_cb_list, rte_eth_dev_callback);
-
-#define TCP_UGR_FLAG 0x20
-#define TCP_ACK_FLAG 0x10
-#define TCP_PSH_FLAG 0x08
-#define TCP_RST_FLAG 0x04
-#define TCP_SYN_FLAG 0x02
-#define TCP_FIN_FLAG 0x01
-#define TCP_FLAG_ALL 0x3F
+#define ETH_DCB_NUM_TCS 8
+#define ETH_MAX_VMDQ_POOL 64
/**
- * A structure used to define an ethertype filter.
+ * A structure used to get the information of queue and
+ * TC mapping on both TX and RX paths.
*/
-struct rte_ethertype_filter {
- uint16_t ethertype; /**< little endian. */
- uint8_t priority_en; /**< compare priority enable. */
- uint8_t priority;
+struct rte_eth_dcb_tc_queue_mapping {
+ /** rx queues assigned to tc per Pool */
+ struct {
+ uint8_t base;
+ uint8_t nb_queue;
+ } tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
+ /** rx queues assigned to tc per Pool */
+ struct {
+ uint8_t base;
+ uint8_t nb_queue;
+ } tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
};
/**
- * A structure used to define an syn filter.
+ * A structure used to get the information of DCB.
+ * It includes TC UP mapping and queue TC mapping.
*/
-struct rte_syn_filter {
- uint8_t hig_pri; /**< 1 means higher pri than 2tuple, 5tupe,
- and flex filter, 0 means lower pri. */
+struct rte_eth_dcb_info {
+ uint8_t nb_tcs; /**< number of TCs */
+ uint8_t prio_tc[ETH_DCB_NUM_USER_PRIORITIES]; /**< Priority to tc */
+ uint8_t tc_bws[ETH_DCB_NUM_TCS]; /**< TX BW percentage for each TC */
+ /** rx queues assigned to tc */
+ struct rte_eth_dcb_tc_queue_mapping tc_queue;
};
/**
- * A structure used to define a 2tuple filter.
+ * RX/TX queue states
*/
-struct rte_2tuple_filter {
- uint16_t dst_port; /**< big endian. */
- uint8_t protocol;
- uint8_t tcp_flags;
- uint16_t priority; /**< used when more than one filter matches. */
- uint8_t dst_port_mask:1, /**< if mask is 1b, means not compare. */
- protocol_mask:1;
-};
+#define RTE_ETH_QUEUE_STATE_STOPPED 0
+#define RTE_ETH_QUEUE_STATE_STARTED 1
-/**
- * A structure used to define a flex filter.
- */
-struct rte_flex_filter {
- uint16_t len;
- uint32_t dwords[32]; /**< flex bytes in big endian. */
- uint8_t mask[16]; /**< if mask bit is 1b, do not compare
- corresponding byte in dwords. */
- uint8_t priority;
-};
+struct rte_eth_dev;
+
+struct rte_eth_dev_callback;
+/** @internal Structure to keep track of registered callbacks */
+TAILQ_HEAD(rte_eth_dev_cb_list, rte_eth_dev_callback);
-/**
- * A structure used to define a 5tuple filter.
- */
-struct rte_5tuple_filter {
- uint32_t dst_ip; /**< destination IP address in big endian. */
- uint32_t src_ip; /**< source IP address in big endian. */
- uint16_t dst_port; /**< destination port in big endian. */
- uint16_t src_port; /**< source Port big endian. */
- uint8_t protocol; /**< l4 protocol. */
- uint8_t tcp_flags; /**< tcp flags. */
- uint16_t priority; /**< seven evels (001b-111b), 111b is highest,
- used when more than one filter matches. */
- uint8_t dst_ip_mask:1, /**< if mask is 1b, do not compare dst ip. */
- src_ip_mask:1, /**< if mask is 1b, do not compare src ip. */
- dst_port_mask:1, /**< if mask is 1b, do not compare dst port. */
- src_port_mask:1, /**< if mask is 1b, do not compare src port. */
- protocol_mask:1; /**< if mask is 1b, do not compare protocol. */
-};
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+#define RTE_PMD_DEBUG_TRACE(...) \
+ rte_pmd_debug_trace(__func__, __VA_ARGS__)
+#else
+#define RTE_PMD_DEBUG_TRACE(...)
+#endif
+
+
+/* Macros to check for valid port */
+#define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
+ if (!rte_eth_dev_is_valid_port(port_id)) { \
+ RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
+ return retval; \
+ } \
+} while (0)
+
+#define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
+ if (!rte_eth_dev_is_valid_port(port_id)) { \
+ RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
+ return; \
+ } \
+} while (0)
/*
* Definitions of all functions exported by an Ethernet driver through the
@@ -1117,15 +1044,29 @@ typedef int (*eth_tx_queue_setup_t)(struct rte_eth_dev *dev,
const struct rte_eth_txconf *tx_conf);
/**< @internal Setup a transmit queue of an Ethernet device. */
+typedef int (*eth_rx_enable_intr_t)(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+/**< @internal Enable interrupt of a receive queue of an Ethernet device. */
+
+typedef int (*eth_rx_disable_intr_t)(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+/**< @internal Disable interrupt of a receive queue of an Ethernet device. */
+
typedef void (*eth_queue_release_t)(void *queue);
/**< @internal Release memory resources allocated by given RX/TX queue. */
typedef uint32_t (*eth_rx_queue_count_t)(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
-/**< @Get number of available descriptors on a receive queue of an Ethernet device. */
+/**< @internal Get number of available descriptors on a receive queue of an Ethernet device. */
typedef int (*eth_rx_descriptor_done_t)(void *rxq, uint16_t offset);
-/**< @Check DD bit of specific RX descriptor */
+/**< @internal Check DD bit of specific RX descriptor */
+
+typedef void (*eth_rxq_info_get_t)(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id, struct rte_eth_rxq_info *qinfo);
+
+typedef void (*eth_txq_info_get_t)(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id, struct rte_eth_txq_info *qinfo);
typedef int (*mtu_set_t)(struct rte_eth_dev *dev, uint16_t mtu);
/**< @internal Set MTU. */
@@ -1162,45 +1103,6 @@ typedef uint16_t (*eth_tx_burst_t)(void *txq,
uint16_t nb_pkts);
/**< @internal Send output packets on a transmit queue of an Ethernet device. */
-typedef int (*fdir_add_signature_filter_t)(struct rte_eth_dev *dev,
- struct rte_fdir_filter *fdir_ftr,
- uint8_t rx_queue);
-/**< @internal Setup a new signature filter rule on an Ethernet device */
-
-typedef int (*fdir_update_signature_filter_t)(struct rte_eth_dev *dev,
- struct rte_fdir_filter *fdir_ftr,
- uint8_t rx_queue);
-/**< @internal Update a signature filter rule on an Ethernet device */
-
-typedef int (*fdir_remove_signature_filter_t)(struct rte_eth_dev *dev,
- struct rte_fdir_filter *fdir_ftr);
-/**< @internal Remove a signature filter rule on an Ethernet device */
-
-typedef void (*fdir_infos_get_t)(struct rte_eth_dev *dev,
- struct rte_eth_fdir *fdir);
-/**< @internal Get information about fdir status */
-
-typedef int (*fdir_add_perfect_filter_t)(struct rte_eth_dev *dev,
- struct rte_fdir_filter *fdir_ftr,
- uint16_t soft_id, uint8_t rx_queue,
- uint8_t drop);
-/**< @internal Setup a new perfect filter rule on an Ethernet device */
-
-typedef int (*fdir_update_perfect_filter_t)(struct rte_eth_dev *dev,
- struct rte_fdir_filter *fdir_ftr,
- uint16_t soft_id, uint8_t rx_queue,
- uint8_t drop);
-/**< @internal Update a perfect filter rule on an Ethernet device */
-
-typedef int (*fdir_remove_perfect_filter_t)(struct rte_eth_dev *dev,
- struct rte_fdir_filter *fdir_ftr,
- uint16_t soft_id);
-/**< @internal Remove a perfect filter rule on an Ethernet device */
-
-typedef int (*fdir_set_masks_t)(struct rte_eth_dev *dev,
- struct rte_fdir_masks *fdir_masks);
-/**< @internal Setup flow director masks on an Ethernet device */
-
typedef int (*flow_ctrl_get_t)(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
/**< @internal Get current flow control parameter on an Ethernet device */
@@ -1246,6 +1148,10 @@ typedef void (*eth_mac_addr_add_t)(struct rte_eth_dev *dev,
uint32_t vmdq);
/**< @internal Set a MAC address into Receive Address Address Register */
+typedef void (*eth_mac_addr_set_t)(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr);
+/**< @internal Set a MAC address into Receive Address Address Register */
+
typedef int (*eth_uc_hash_table_set_t)(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
uint8_t on);
@@ -1289,7 +1195,7 @@ typedef int (*eth_set_vf_rate_limit_t)(struct rte_eth_dev *dev,
/**< @internal Set VF TX rate */
typedef int (*eth_mirror_rule_set_t)(struct rte_eth_dev *dev,
- struct rte_eth_vmdq_mirror_conf *mirror_conf,
+ struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id,
uint8_t on);
/**< @internal Add a traffic mirroring rule on an Ethernet device */
@@ -1306,6 +1212,54 @@ typedef int (*eth_udp_tunnel_del_t)(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *tunnel_udp);
/**< @internal Delete tunneling UDP info */
+typedef int (*eth_set_mc_addr_list_t)(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr);
+/**< @internal set the list of multicast addresses on an Ethernet device */
+
+typedef int (*eth_timesync_enable_t)(struct rte_eth_dev *dev);
+/**< @internal Function used to enable IEEE1588/802.1AS timestamping. */
+
+typedef int (*eth_timesync_disable_t)(struct rte_eth_dev *dev);
+/**< @internal Function used to disable IEEE1588/802.1AS timestamping. */
+
+typedef int (*eth_timesync_read_rx_timestamp_t)(struct rte_eth_dev *dev,
+ struct timespec *timestamp,
+ uint32_t flags);
+/**< @internal Function used to read an RX IEEE1588/802.1AS timestamp. */
+
+typedef int (*eth_timesync_read_tx_timestamp_t)(struct rte_eth_dev *dev,
+ struct timespec *timestamp);
+/**< @internal Function used to read a TX IEEE1588/802.1AS timestamp. */
+
+typedef int (*eth_timesync_adjust_time)(struct rte_eth_dev *dev, int64_t);
+/**< @internal Function used to adjust the device clock */
+
+typedef int (*eth_timesync_read_time)(struct rte_eth_dev *dev,
+ struct timespec *timestamp);
+/**< @internal Function used to get time from the device clock. */
+
+typedef int (*eth_timesync_write_time)(struct rte_eth_dev *dev,
+ const struct timespec *timestamp);
+/**< @internal Function used to get time from the device clock */
+
+typedef int (*eth_get_reg_length_t)(struct rte_eth_dev *dev);
+/**< @internal Retrieve device register count */
+
+typedef int (*eth_get_reg_t)(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *info);
+/**< @internal Retrieve registers */
+
+typedef int (*eth_get_eeprom_length_t)(struct rte_eth_dev *dev);
+/**< @internal Retrieve eeprom size */
+
+typedef int (*eth_get_eeprom_t)(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info);
+/**< @internal Retrieve eeprom data */
+
+typedef int (*eth_set_eeprom_t)(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info);
+/**< @internal Program eeprom data */
#ifdef RTE_NIC_BYPASS
@@ -1361,79 +1315,16 @@ typedef int32_t (*bypass_ver_show_t)(struct rte_eth_dev *dev, uint32_t *ver);
typedef int32_t (*bypass_wd_reset_t)(struct rte_eth_dev *dev);
#endif
-typedef int (*eth_add_syn_filter_t)(struct rte_eth_dev *dev,
- struct rte_syn_filter *filter, uint16_t rx_queue);
-/**< @internal add syn filter rule on an Ethernet device */
-
-typedef int (*eth_remove_syn_filter_t)(struct rte_eth_dev *dev);
-/**< @internal remove syn filter rule on an Ethernet device */
-
-typedef int (*eth_get_syn_filter_t)(struct rte_eth_dev *dev,
- struct rte_syn_filter *filter, uint16_t *rx_queue);
-/**< @internal Get syn filter rule on an Ethernet device */
-
-typedef int (*eth_add_ethertype_filter_t)(struct rte_eth_dev *dev,
- uint16_t index, struct rte_ethertype_filter *filter,
- uint16_t rx_queue);
-/**< @internal Setup a new ethertype filter rule on an Ethernet device */
-
-typedef int (*eth_remove_ethertype_filter_t)(struct rte_eth_dev *dev,
- uint16_t index);
-/**< @internal Remove an ethertype filter rule on an Ethernet device */
-
-typedef int (*eth_get_ethertype_filter_t)(struct rte_eth_dev *dev,
- uint16_t index, struct rte_ethertype_filter *filter,
- uint16_t *rx_queue);
-/**< @internal Get an ethertype filter rule on an Ethernet device */
-
-typedef int (*eth_add_2tuple_filter_t)(struct rte_eth_dev *dev,
- uint16_t index, struct rte_2tuple_filter *filter,
- uint16_t rx_queue);
-/**< @internal Setup a new 2tuple filter rule on an Ethernet device */
-
-typedef int (*eth_remove_2tuple_filter_t)(struct rte_eth_dev *dev,
- uint16_t index);
-/**< @internal Remove a 2tuple filter rule on an Ethernet device */
-
-typedef int (*eth_get_2tuple_filter_t)(struct rte_eth_dev *dev,
- uint16_t index, struct rte_2tuple_filter *filter,
- uint16_t *rx_queue);
-/**< @internal Get a 2tuple filter rule on an Ethernet device */
-
-typedef int (*eth_add_5tuple_filter_t)(struct rte_eth_dev *dev,
- uint16_t index, struct rte_5tuple_filter *filter,
- uint16_t rx_queue);
-/**< @internal Setup a new 5tuple filter rule on an Ethernet device */
-
-typedef int (*eth_remove_5tuple_filter_t)(struct rte_eth_dev *dev,
- uint16_t index);
-/**< @internal Remove a 5tuple filter rule on an Ethernet device */
-
-typedef int (*eth_get_5tuple_filter_t)(struct rte_eth_dev *dev,
- uint16_t index, struct rte_5tuple_filter *filter,
- uint16_t *rx_queue);
-/**< @internal Get a 5tuple filter rule on an Ethernet device */
-
-typedef int (*eth_add_flex_filter_t)(struct rte_eth_dev *dev,
- uint16_t index, struct rte_flex_filter *filter,
- uint16_t rx_queue);
-/**< @internal Setup a new flex filter rule on an Ethernet device */
-
-typedef int (*eth_remove_flex_filter_t)(struct rte_eth_dev *dev,
- uint16_t index);
-/**< @internal Remove a flex filter rule on an Ethernet device */
-
-typedef int (*eth_get_flex_filter_t)(struct rte_eth_dev *dev,
- uint16_t index, struct rte_flex_filter *filter,
- uint16_t *rx_queue);
-/**< @internal Get a flex filter rule on an Ethernet device */
-
typedef int (*eth_filter_ctrl_t)(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg);
/**< @internal Take operations to assigned filter type on an Ethernet device */
+typedef int (*eth_get_dcb_info)(struct rte_eth_dev *dev,
+ struct rte_eth_dcb_info *dcb_info);
+/**< @internal Get dcb information on an Ethernet device */
+
/**
* @internal A structure containing the functions exported by an Ethernet driver.
*/
@@ -1470,6 +1361,10 @@ struct eth_dev_ops {
eth_queue_release_t rx_queue_release;/**< Release RX queue.*/
eth_rx_queue_count_t rx_queue_count; /**< Get Rx queue count. */
eth_rx_descriptor_done_t rx_descriptor_done; /**< Check rxd DD bit */
+ /**< Enable Rx queue interrupt. */
+ eth_rx_enable_intr_t rx_queue_intr_enable;
+ /**< Disable Rx queue interrupt.*/
+ eth_rx_disable_intr_t rx_queue_intr_disable;
eth_tx_queue_setup_t tx_queue_setup;/**< Set up device TX queue.*/
eth_queue_release_t tx_queue_release;/**< Release TX queue.*/
eth_dev_led_on_t dev_led_on; /**< Turn on LED. */
@@ -1479,6 +1374,7 @@ struct eth_dev_ops {
priority_flow_ctrl_set_t priority_flow_ctrl_set; /**< Setup priority flow control.*/
eth_mac_addr_remove_t mac_addr_remove; /**< Remove MAC address */
eth_mac_addr_add_t mac_addr_add; /**< Add a MAC address */
+ eth_mac_addr_set_t mac_addr_set; /**< Set a MAC address */
eth_uc_hash_table_set_t uc_hash_table_set; /**< Set Unicast Table Array */
eth_uc_all_hash_table_set_t uc_all_hash_table_set; /**< Set Unicast hash bitmap */
eth_mirror_rule_set_t mirror_rule_set; /**< Add a traffic mirror rule.*/
@@ -1491,27 +1387,21 @@ struct eth_dev_ops {
eth_udp_tunnel_del_t udp_tunnel_del;
eth_set_queue_rate_limit_t set_queue_rate_limit; /**< Set queue rate limit */
eth_set_vf_rate_limit_t set_vf_rate_limit; /**< Set VF rate limit */
-
- /** Add a signature filter. */
- fdir_add_signature_filter_t fdir_add_signature_filter;
- /** Update a signature filter. */
- fdir_update_signature_filter_t fdir_update_signature_filter;
- /** Remove a signature filter. */
- fdir_remove_signature_filter_t fdir_remove_signature_filter;
- /** Get information about FDIR status. */
- fdir_infos_get_t fdir_infos_get;
- /** Add a perfect filter. */
- fdir_add_perfect_filter_t fdir_add_perfect_filter;
- /** Update a perfect filter. */
- fdir_update_perfect_filter_t fdir_update_perfect_filter;
- /** Remove a perfect filter. */
- fdir_remove_perfect_filter_t fdir_remove_perfect_filter;
- /** Setup masks for FDIR filtering. */
- fdir_set_masks_t fdir_set_masks;
/** Update redirection table. */
reta_update_t reta_update;
/** Query redirection table. */
reta_query_t reta_query;
+
+ eth_get_reg_length_t get_reg_length;
+ /**< Get # of registers */
+ eth_get_reg_t get_reg;
+ /**< Get registers */
+ eth_get_eeprom_length_t get_eeprom_length;
+ /**< Get eeprom length */
+ eth_get_eeprom_t get_eeprom;
+ /**< Get eeprom data */
+ eth_set_eeprom_t set_eeprom;
+ /**< Set eeprom */
/* bypass control */
#ifdef RTE_NIC_BYPASS
bypass_init_t bypass_init;
@@ -1529,22 +1419,104 @@ struct eth_dev_ops {
rss_hash_update_t rss_hash_update;
/** Get current RSS hash configuration. */
rss_hash_conf_get_t rss_hash_conf_get;
- eth_add_syn_filter_t add_syn_filter; /**< add syn filter. */
- eth_remove_syn_filter_t remove_syn_filter; /**< remove syn filter. */
- eth_get_syn_filter_t get_syn_filter; /**< get syn filter. */
- eth_add_ethertype_filter_t add_ethertype_filter; /**< add ethertype filter. */
- eth_remove_ethertype_filter_t remove_ethertype_filter; /**< remove ethertype filter. */
- eth_get_ethertype_filter_t get_ethertype_filter; /**< get ethertype filter. */
- eth_add_2tuple_filter_t add_2tuple_filter; /**< add 2tuple filter. */
- eth_remove_2tuple_filter_t remove_2tuple_filter; /**< remove 2tuple filter. */
- eth_get_2tuple_filter_t get_2tuple_filter; /**< get 2tuple filter. */
- eth_add_5tuple_filter_t add_5tuple_filter; /**< add 5tuple filter. */
- eth_remove_5tuple_filter_t remove_5tuple_filter; /**< remove 5tuple filter. */
- eth_get_5tuple_filter_t get_5tuple_filter; /**< get 5tuple filter. */
- eth_add_flex_filter_t add_flex_filter; /**< add flex filter. */
- eth_remove_flex_filter_t remove_flex_filter; /**< remove flex filter. */
- eth_get_flex_filter_t get_flex_filter; /**< get flex filter. */
- eth_filter_ctrl_t filter_ctrl; /**< common filter control*/
+ eth_filter_ctrl_t filter_ctrl;
+ /**< common filter control. */
+ eth_set_mc_addr_list_t set_mc_addr_list; /**< set list of mcast addrs */
+ eth_rxq_info_get_t rxq_info_get;
+ /**< retrieve RX queue information. */
+ eth_txq_info_get_t txq_info_get;
+ /**< retrieve TX queue information. */
+ /** Turn IEEE1588/802.1AS timestamping on. */
+ eth_timesync_enable_t timesync_enable;
+ /** Turn IEEE1588/802.1AS timestamping off. */
+ eth_timesync_disable_t timesync_disable;
+ /** Read the IEEE1588/802.1AS RX timestamp. */
+ eth_timesync_read_rx_timestamp_t timesync_read_rx_timestamp;
+ /** Read the IEEE1588/802.1AS TX timestamp. */
+ eth_timesync_read_tx_timestamp_t timesync_read_tx_timestamp;
+
+ /** Get DCB information */
+ eth_get_dcb_info get_dcb_info;
+ /** Adjust the device clock.*/
+ eth_timesync_adjust_time timesync_adjust_time;
+ /** Get the device clock time. */
+ eth_timesync_read_time timesync_read_time;
+ /** Set the device clock time. */
+ eth_timesync_write_time timesync_write_time;
+};
+
+/**
+ * Function type used for RX packet processing packet callbacks.
+ *
+ * The callback function is called on RX with a burst of packets that have
+ * been received on the given port and queue.
+ *
+ * @param port
+ * The Ethernet port on which RX is being performed.
+ * @param queue
+ * The queue on the Ethernet port which is being used to receive the packets.
+ * @param pkts
+ * The burst of packets that have just been received.
+ * @param nb_pkts
+ * The number of packets in the burst pointed to by "pkts".
+ * @param max_pkts
+ * The max number of packets that can be stored in the "pkts" array.
+ * @param user_param
+ * The arbitrary user parameter passed in by the application when the callback
+ * was originally configured.
+ * @return
+ * The number of packets returned to the user.
+ */
+typedef uint16_t (*rte_rx_callback_fn)(uint8_t port, uint16_t queue,
+ struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
+ void *user_param);
+
+/**
+ * Function type used for TX packet processing packet callbacks.
+ *
+ * The callback function is called on TX with a burst of packets immediately
+ * before the packets are put onto the hardware queue for transmission.
+ *
+ * @param port
+ * The Ethernet port on which TX is being performed.
+ * @param queue
+ * The queue on the Ethernet port which is being used to transmit the packets.
+ * @param pkts
+ * The burst of packets that are about to be transmitted.
+ * @param nb_pkts
+ * The number of packets in the burst pointed to by "pkts".
+ * @param user_param
+ * The arbitrary user parameter passed in by the application when the callback
+ * was originally configured.
+ * @return
+ * The number of packets to be written to the NIC.
+ */
+typedef uint16_t (*rte_tx_callback_fn)(uint8_t port, uint16_t queue,
+ struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
+
+/**
+ * @internal
+ * Structure used to hold information about the callbacks to be called for a
+ * queue on RX and TX.
+ */
+struct rte_eth_rxtx_callback {
+ struct rte_eth_rxtx_callback *next;
+ union{
+ rte_rx_callback_fn rx;
+ rte_tx_callback_fn tx;
+ } fn;
+ void *param;
+};
+
+/**
+ * The eth device type.
+ */
+enum rte_eth_dev_type {
+ RTE_ETH_DEV_UNKNOWN, /**< unknown device type */
+ RTE_ETH_DEV_PCI,
+ /**< Physical function and Virtual function of PCI devices */
+ RTE_ETH_DEV_VIRTUAL, /**< non hardware device */
+ RTE_ETH_DEV_MAX /**< max value of this enum */
};
/**
@@ -1562,9 +1534,22 @@ struct rte_eth_dev {
eth_tx_burst_t tx_pkt_burst; /**< Pointer to PMD transmit function. */
struct rte_eth_dev_data *data; /**< Pointer to device data */
const struct eth_driver *driver;/**< Driver for this device */
- struct eth_dev_ops *dev_ops; /**< Functions exported by PMD */
+ const struct eth_dev_ops *dev_ops; /**< Functions exported by PMD */
struct rte_pci_device *pci_dev; /**< PCI info. supplied by probing */
- struct rte_eth_dev_cb_list callbacks; /**< User application callbacks */
+ /** User application callbacks for NIC interrupts */
+ struct rte_eth_dev_cb_list link_intr_cbs;
+ /**
+ * User-supplied functions called from rx_burst to post-process
+ * received packets before passing them to the user
+ */
+ struct rte_eth_rxtx_callback *post_rx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
+ /**
+ * User-supplied functions called from tx_burst to pre-process
+ * received packets before passing them to the driver for transmission.
+ */
+ struct rte_eth_rxtx_callback *pre_tx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
+ uint8_t attached; /**< Flag indicating the port is attached */
+ enum rte_eth_dev_type dev_type; /**< Flag indicating the device type */
};
struct rte_eth_dev_sriov {
@@ -1615,9 +1600,23 @@ struct rte_eth_dev_data {
uint8_t promiscuous : 1, /**< RX promiscuous mode ON(1) / OFF(0). */
scattered_rx : 1, /**< RX of scattered packets is ON(1) / OFF(0) */
all_multicast : 1, /**< RX all multicast mode ON(1) / OFF(0). */
- dev_started : 1; /**< Device state: STARTED(1) / STOPPED(0). */
+ dev_started : 1, /**< Device state: STARTED(1) / STOPPED(0). */
+ lro : 1; /**< RX LRO is ON(1) / OFF(0) */
+ uint8_t rx_queue_state[RTE_MAX_QUEUES_PER_PORT];
+ /** Queues state: STARTED(1) / STOPPED(0) */
+ uint8_t tx_queue_state[RTE_MAX_QUEUES_PER_PORT];
+ /** Queues state: STARTED(1) / STOPPED(0) */
+ uint32_t dev_flags; /**< Capabilities */
+ enum rte_kernel_driver kdrv; /**< Kernel driver passthrough */
+ int numa_node; /**< NUMA node connection */
+ const char *drv_name; /**< Driver name */
};
+/** Device supports hotplug detach */
+#define RTE_ETH_DEV_DETACHABLE 0x0001
+/** Device supports link state interrupt */
+#define RTE_ETH_DEV_INTR_LSC 0x0002
+
/**
* @internal
* The pool of *rte_eth_dev* structures. The size of the pool
@@ -1629,7 +1628,11 @@ extern struct rte_eth_dev rte_eth_devices[];
* Get the total number of Ethernet devices that have been successfully
* initialized by the [matching] Ethernet driver during the PCI probing phase.
* All devices whose port identifier is in the range
- * [0, rte_eth_dev_count() - 1] can be operated on by network applications.
+ * [0, rte_eth_dev_count() - 1] can be operated on by network applications
+ * immediately after invoking rte_eal_init().
+ * If the application unplugs a port using hotplug function, The enabled port
+ * numbers may be noncontiguous. In the case, the applications need to manage
+ * enabled port by themselves.
*
* @return
* - The total number of usable Ethernet devices.
@@ -1637,16 +1640,67 @@ extern struct rte_eth_dev rte_eth_devices[];
extern uint8_t rte_eth_dev_count(void);
/**
- * Function for internal use by dummy drivers primarily, e.g. ring-based
- * driver.
+ * @internal
+ * Returns a ethdev slot specified by the unique identifier name.
+ *
+ * @param name
+ * The pointer to the Unique identifier name for each Ethernet device
+ * @return
+ * - The pointer to the ethdev slot, on success. NULL on error
+ */
+extern struct rte_eth_dev *rte_eth_dev_allocated(const char *name);
+
+/**
+ * @internal
* Allocates a new ethdev slot for an ethernet device and returns the pointer
* to that slot for the driver to use.
*
* @param name Unique identifier name for each Ethernet device
+ * @param type Device type of this Ethernet device
* @return
* - Slot in the rte_dev_devices array for a new device;
*/
-struct rte_eth_dev *rte_eth_dev_allocate(const char *name);
+struct rte_eth_dev *rte_eth_dev_allocate(const char *name,
+ enum rte_eth_dev_type type);
+
+/**
+ * @internal
+ * Release the specified ethdev port.
+ *
+ * @param eth_dev
+ * The *eth_dev* pointer is the address of the *rte_eth_dev* structure.
+ * @return
+ * - 0 on success, negative on error
+ */
+int rte_eth_dev_release_port(struct rte_eth_dev *eth_dev);
+
+/**
+ * Attach a new Ethernet device specified by aruguments.
+ *
+ * @param devargs
+ * A pointer to a strings array describing the new device
+ * to be attached. The strings should be a pci address like
+ * '0000:01:00.0' or virtual device name like 'eth_pcap0'.
+ * @param port_id
+ * A pointer to a port identifier actually attached.
+ * @return
+ * 0 on success and port_id is filled, negative on error
+ */
+int rte_eth_dev_attach(const char *devargs, uint8_t *port_id);
+
+/**
+ * Detach a Ethernet device specified by port identifier.
+ * This function must be called when the device is in the
+ * closed state.
+ *
+ * @param port_id
+ * The port identifier of the device to detach.
+ * @param devname
+ * A pointer to a device name actually detached.
+ * @return
+ * 0 on success and devname is filled, negative on error
+ */
+int rte_eth_dev_detach(uint8_t port_id, char *devname);
struct eth_driver;
/**
@@ -1654,9 +1708,6 @@ struct eth_driver;
* Initialization function of an Ethernet driver invoked for each matching
* Ethernet PCI device detected during the PCI probing phase.
*
- * @param eth_drv
- * The pointer to the [matching] Ethernet driver structure supplied by
- * the PMD when it registered itself.
* @param eth_dev
* The *eth_dev* pointer is the address of the *rte_eth_dev* structure
* associated with the matching device and which have been [automatically]
@@ -1667,6 +1718,8 @@ struct eth_driver;
* - *pci_dev*: Holds the pointers to the *rte_pci_device* structure which
* contains the generic PCI information of the matching device.
*
+ * - *driver*: Holds the pointer to the *eth_driver* structure.
+ *
* - *dev_private*: Holds a pointer to the device private data structure.
*
* - *mtu*: Contains the default Ethernet maximum frame length (1500).
@@ -1680,8 +1733,24 @@ struct eth_driver;
* of the *eth_dev* structure.
* - <0: Error code of the device initialization failure.
*/
-typedef int (*eth_dev_init_t)(struct eth_driver *eth_drv,
- struct rte_eth_dev *eth_dev);
+typedef int (*eth_dev_init_t)(struct rte_eth_dev *eth_dev);
+
+/**
+ * @internal
+ * Finalization function of an Ethernet driver invoked for each matching
+ * Ethernet PCI device detected during the PCI closing phase.
+ *
+ * @param eth_dev
+ * The *eth_dev* pointer is the address of the *rte_eth_dev* structure
+ * associated with the matching device and which have been [automatically]
+ * allocated in the *rte_eth_devices* array.
+ * @return
+ * - 0: Success, the device is properly finalized by the driver.
+ * In particular, the driver MUST free the *dev_ops* pointer
+ * of the *eth_dev* structure.
+ * - <0: Error code of the device initialization failure.
+ */
+typedef int (*eth_dev_uninit_t)(struct rte_eth_dev *eth_dev);
/**
* @internal
@@ -1694,11 +1763,14 @@ typedef int (*eth_dev_init_t)(struct eth_driver *eth_drv,
*
* - The *eth_dev_init* function invoked for each matching PCI device.
*
+ * - The *eth_dev_uninit* function invoked for each matching PCI device.
+ *
* - The size of the private data to allocate for each matching device.
*/
struct eth_driver {
struct rte_pci_driver pci_drv; /**< The PMD is also a PCI driver. */
eth_dev_init_t eth_dev_init; /**< Device init function. */
+ eth_dev_uninit_t eth_dev_uninit; /**< Device uninit function. */
unsigned int dev_private_size; /**< Size of device private data. */
};
@@ -1852,6 +1924,17 @@ extern int rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
extern int rte_eth_dev_socket_id(uint8_t port_id);
/*
+ * Check if port_id of device is attached
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device
+ * @return
+ * - 0 if port is out of range or not attached
+ * - 1 if device is attached
+ */
+extern int rte_eth_dev_is_valid_port(uint8_t port_id);
+
+/*
* Allocate mbuf from mempool, setup the DMA physical address
* and then start RX for specified queue of a port. It is used
* when rx_deferred_start flag of the specified queue is true.
@@ -1973,7 +2056,9 @@ extern int rte_eth_dev_set_link_up(uint8_t port_id);
extern int rte_eth_dev_set_link_down(uint8_t port_id);
/**
- * Close an Ethernet device. The device cannot be restarted!
+ * Close a stopped Ethernet device. The device cannot be restarted!
+ * The function frees all resources except for needed by the
+ * closed state. To free these resources, call rte_eth_dev_detach().
*
* @param port_id
* The port identifier of the Ethernet device.
@@ -2077,8 +2162,10 @@ extern void rte_eth_link_get_nowait(uint8_t port_id,
* - *obytes* with the total of successfully transmitted bytes.
* - *ierrors* with the total of erroneous received packets.
* - *oerrors* with the total of failed transmitted packets.
+ * @return
+ * Zero if successful. Non-zero otherwise.
*/
-extern void rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats);
+extern int rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats);
/**
* Reset the general I/O statistics of an Ethernet device.
@@ -2405,20 +2492,38 @@ extern int rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on);
* of pointers to *rte_mbuf* structures effectively supplied to the
* *rx_pkts* array.
*/
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
-extern uint16_t rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
- struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
-#else
static inline uint16_t
rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
- struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+ struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
{
- struct rte_eth_dev *dev;
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
- dev = &rte_eth_devices[port_id];
- return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id], rx_pkts, nb_pkts);
-}
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
+
+ if (queue_id >= dev->data->nb_rx_queues) {
+ RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
+ return 0;
+ }
#endif
+ int16_t nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
+ rx_pkts, nb_pkts);
+
+#ifdef RTE_ETHDEV_RXTX_CALLBACKS
+ struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
+
+ if (unlikely(cb != NULL)) {
+ do {
+ nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
+ nb_pkts, cb->param);
+ cb = cb->next;
+ } while (cb != NULL);
+ }
+#endif
+
+ return nb_rx;
+}
/**
* Get the number of used descriptors in a specific queue
@@ -2428,20 +2533,18 @@ rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
* @param queue_id
* The queue id on the specific port.
* @return
- * The number of used descriptors in the specific queue.
+ * The number of used descriptors in the specific queue, or:
+ * (-EINVAL) if *port_id* is invalid
+ * (-ENOTSUP) if the device does not support this function
*/
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
-extern uint32_t rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id);
-#else
-static inline uint32_t
+static inline int
rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
{
- struct rte_eth_dev *dev;
-
- dev = &rte_eth_devices[port_id];
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
}
-#endif
/**
* Check if the DD bit of the specific RX descriptor in the queue has been set
@@ -2450,28 +2553,23 @@ rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
* The port identifier of the Ethernet device.
* @param queue_id
* The queue id on the specific port.
- * @offset
+ * @param offset
* The offset of the descriptor ID from tail.
* @return
* - (1) if the specific DD bit is set.
* - (0) if the specific DD bit is not set.
* - (-ENODEV) if *port_id* invalid.
+ * - (-ENOTSUP) if the device does not support this function
*/
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
-extern int rte_eth_rx_descriptor_done(uint8_t port_id,
- uint16_t queue_id,
- uint16_t offset);
-#else
static inline int
rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
{
- struct rte_eth_dev *dev;
-
- dev = &rte_eth_devices[port_id];
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
return (*dev->dev_ops->rx_descriptor_done)( \
dev->data->rx_queues[queue_id], offset);
}
-#endif
/**
* Send a burst of output packets on a transmit queue of an Ethernet device.
@@ -2510,10 +2608,9 @@ rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
* transparently free the memory buffers of packets previously sent.
* This feature is driven by the *tx_free_thresh* value supplied to the
* rte_eth_dev_configure() function at device configuration time.
- * When the number of previously sent packets reached the "minimum transmit
- * packets to free" threshold, the rte_eth_tx_burst() function must
- * [attempt to] free the *rte_mbuf* buffers of those packets whose
- * transmission was effectively completed.
+ * When the number of free TX descriptors drops below this threshold, the
+ * rte_eth_tx_burst() function must [attempt to] free the *rte_mbuf* buffers
+ * of those packets whose transmission was effectively completed.
*
* @param port_id
* The port identifier of the Ethernet device.
@@ -2532,236 +2629,36 @@ rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
* the transmit ring. The return value can be less than the value of the
* *tx_pkts* parameter when the transmit ring is full or has been filled up.
*/
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
-extern uint16_t rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
- struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
-#else
static inline uint16_t
rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
- struct rte_eth_dev *dev;
-
- dev = &rte_eth_devices[port_id];
- return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
-}
-#endif
-
-/**
- * Setup a new signature filter rule on an Ethernet device
- *
- * @param port_id
- * The port identifier of the Ethernet device.
- * @param fdir_filter
- * The pointer to the fdir filter structure describing the signature filter
- * rule.
- * The *rte_fdir_filter* structure includes the values of the different fields
- * to match: source and destination IP addresses, vlan id, flexbytes, source
- * and destination ports, and so on.
- * @param rx_queue
- * The index of the RX queue where to store RX packets matching the added
- * signature filter defined in fdir_filter.
- * @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support flow director mode.
- * - (-ENODEV) if *port_id* invalid.
- * - (-ENOSYS) if the FDIR mode is not configured in signature mode
- * on *port_id*.
- * - (-EINVAL) if the fdir_filter information is not correct.
- */
-int rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
- struct rte_fdir_filter *fdir_filter,
- uint8_t rx_queue);
-
-/**
- * Update a signature filter rule on an Ethernet device.
- * If the rule doesn't exits, it is created.
- *
- * @param port_id
- * The port identifier of the Ethernet device.
- * @param fdir_ftr
- * The pointer to the structure describing the signature filter rule.
- * The *rte_fdir_filter* structure includes the values of the different fields
- * to match: source and destination IP addresses, vlan id, flexbytes, source
- * and destination ports, and so on.
- * @param rx_queue
- * The index of the RX queue where to store RX packets matching the added
- * signature filter defined in fdir_ftr.
- * @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support flow director mode.
- * - (-ENODEV) if *port_id* invalid.
- * - (-ENOSYS) if the flow director mode is not configured in signature mode
- * on *port_id*.
- * - (-EINVAL) if the fdir_filter information is not correct.
- */
-int rte_eth_dev_fdir_update_signature_filter(uint8_t port_id,
- struct rte_fdir_filter *fdir_ftr,
- uint8_t rx_queue);
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
-/**
- * Remove a signature filter rule on an Ethernet device.
- *
- * @param port_id
- * The port identifier of the Ethernet device.
- * @param fdir_ftr
- * The pointer to the structure describing the signature filter rule.
- * The *rte_fdir_filter* structure includes the values of the different fields
- * to match: source and destination IP addresses, vlan id, flexbytes, source
- * and destination ports, and so on.
- * @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support flow director mode.
- * - (-ENODEV) if *port_id* invalid.
- * - (-ENOSYS) if the flow director mode is not configured in signature mode
- * on *port_id*.
- * - (-EINVAL) if the fdir_filter information is not correct.
- */
-int rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id,
- struct rte_fdir_filter *fdir_ftr);
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
-/**
- * Retrieve the flow director information of an Ethernet device.
- *
- * @param port_id
- * The port identifier of the Ethernet device.
- * @param fdir
- * A pointer to a structure of type *rte_eth_dev_fdir* to be filled with
- * the flow director information of the Ethernet device.
- * @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support flow director mode.
- * - (-ENODEV) if *port_id* invalid.
- * - (-ENOSYS) if the flow director mode is not configured on *port_id*.
- */
-int rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir);
+ if (queue_id >= dev->data->nb_tx_queues) {
+ RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
+ return 0;
+ }
+#endif
-/**
- * Add a new perfect filter rule on an Ethernet device.
- *
- * @param port_id
- * The port identifier of the Ethernet device.
- * @param fdir_filter
- * The pointer to the structure describing the perfect filter rule.
- * The *rte_fdir_filter* structure includes the values of the different fields
- * to match: source and destination IP addresses, vlan id, flexbytes, source
- * and destination ports, and so on.
- * IPv6 are not supported.
- * @param soft_id
- * The 16-bit value supplied in the field hash.fdir.id of mbuf for RX
- * packets matching the perfect filter.
- * @param rx_queue
- * The index of the RX queue where to store RX packets matching the added
- * perfect filter defined in fdir_filter.
- * @param drop
- * If drop is set to 1, matching RX packets are stored into the RX drop
- * queue defined in the rte_fdir_conf.
- * @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support flow director mode.
- * - (-ENODEV) if *port_id* invalid.
- * - (-ENOSYS) if the flow director mode is not configured in perfect mode
- * on *port_id*.
- * - (-EINVAL) if the fdir_filter information is not correct.
- */
-int rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id,
- struct rte_fdir_filter *fdir_filter,
- uint16_t soft_id, uint8_t rx_queue,
- uint8_t drop);
+#ifdef RTE_ETHDEV_RXTX_CALLBACKS
+ struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
-/**
- * Update a perfect filter rule on an Ethernet device.
- * If the rule doesn't exits, it is created.
- *
- * @param port_id
- * The port identifier of the Ethernet device.
- * @param fdir_filter
- * The pointer to the structure describing the perfect filter rule.
- * The *rte_fdir_filter* structure includes the values of the different fields
- * to match: source and destination IP addresses, vlan id, flexbytes, source
- * and destination ports, and so on.
- * IPv6 are not supported.
- * @param soft_id
- * The 16-bit value supplied in the field hash.fdir.id of mbuf for RX
- * packets matching the perfect filter.
- * @param rx_queue
- * The index of the RX queue where to store RX packets matching the added
- * perfect filter defined in fdir_filter.
- * @param drop
- * If drop is set to 1, matching RX packets are stored into the RX drop
- * queue defined in the rte_fdir_conf.
- * @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support flow director mode.
- * - (-ENODEV) if *port_id* invalid.
- * - (-ENOSYS) if the flow director mode is not configured in perfect mode
- * on *port_id*.
- * - (-EINVAL) if the fdir_filter information is not correct.
- */
-int rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id,
- struct rte_fdir_filter *fdir_filter,
- uint16_t soft_id, uint8_t rx_queue,
- uint8_t drop);
+ if (unlikely(cb != NULL)) {
+ do {
+ nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
+ cb->param);
+ cb = cb->next;
+ } while (cb != NULL);
+ }
+#endif
-/**
- * Remove a perfect filter rule on an Ethernet device.
- *
- * @param port_id
- * The port identifier of the Ethernet device.
- * @param fdir_filter
- * The pointer to the structure describing the perfect filter rule.
- * The *rte_fdir_filter* structure includes the values of the different fields
- * to match: source and destination IP addresses, vlan id, flexbytes, source
- * and destination ports, and so on.
- * IPv6 are not supported.
- * @param soft_id
- * The soft_id value provided when adding/updating the removed filter.
- * @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support flow director mode.
- * - (-ENODEV) if *port_id* invalid.
- * - (-ENOSYS) if the flow director mode is not configured in perfect mode
- * on *port_id*.
- * - (-EINVAL) if the fdir_filter information is not correct.
- */
-int rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
- struct rte_fdir_filter *fdir_filter,
- uint16_t soft_id);
-/**
- * Configure globally the masks for flow director mode for an Ethernet device.
- * For example, the device can match packets with only the first 24 bits of
- * the IPv4 source address.
- *
- * The following fields can be masked: IPv4 addresses and L4 port numbers.
- * The following fields can be either enabled or disabled completely for the
- * matching functionality: VLAN ID tag; VLAN Priority + CFI bit; Flexible 2-byte
- * tuple.
- * IPv6 masks are not supported.
- *
- * All filters must comply with the masks previously configured.
- * For example, with a mask equal to 255.255.255.0 for the source IPv4 address,
- * all IPv4 filters must be created with a source IPv4 address that fits the
- * "X.X.X.0" format.
- *
- * This function flushes all filters that have been previously added in
- * the device.
- *
- * @param port_id
- * The port identifier of the Ethernet device.
- * @param fdir_mask
- * The pointer to the fdir mask structure describing relevant headers fields
- * and relevant bits to use when matching packets addresses and ports.
- * IPv6 masks are not supported.
- * @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support flow director mode.
- * - (-ENODEV) if *port_id* invalid.
- * - (-ENOSYS) if the flow director mode is not configured in perfect
- * mode on *port_id*.
- * - (-EINVAL) if the fdir_filter information is not correct
- */
-int rte_eth_dev_fdir_set_masks(uint8_t port_id,
- struct rte_fdir_masks *fdir_mask);
+ return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
+}
/**
* The eth device event type for interrupt, and maybe others in the future.
@@ -2836,6 +2733,92 @@ void _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
enum rte_eth_event_type event);
/**
+ * When there is no rx packet coming in Rx Queue for a long time, we can
+ * sleep lcore related to RX Queue for power saving, and enable rx interrupt
+ * to be triggered when rx packect arrives.
+ *
+ * The rte_eth_dev_rx_intr_enable() function enables rx queue
+ * interrupt on specific rx queue of a port.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param queue_id
+ * The index of the receive queue from which to retrieve input packets.
+ * The value must be in the range [0, nb_rx_queue - 1] previously supplied
+ * to rte_eth_dev_configure().
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if underlying hardware OR driver doesn't support
+ * that operation.
+ * - (-ENODEV) if *port_id* invalid.
+ */
+int rte_eth_dev_rx_intr_enable(uint8_t port_id, uint16_t queue_id);
+
+/**
+ * When lcore wakes up from rx interrupt indicating packet coming, disable rx
+ * interrupt and returns to polling mode.
+ *
+ * The rte_eth_dev_rx_intr_disable() function disables rx queue
+ * interrupt on specific rx queue of a port.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param queue_id
+ * The index of the receive queue from which to retrieve input packets.
+ * The value must be in the range [0, nb_rx_queue - 1] previously supplied
+ * to rte_eth_dev_configure().
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if underlying hardware OR driver doesn't support
+ * that operation.
+ * - (-ENODEV) if *port_id* invalid.
+ */
+int rte_eth_dev_rx_intr_disable(uint8_t port_id, uint16_t queue_id);
+
+/**
+ * RX Interrupt control per port.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param epfd
+ * Epoll instance fd which the intr vector associated to.
+ * Using RTE_EPOLL_PER_THREAD allows to use per thread epoll instance.
+ * @param op
+ * The operation be performed for the vector.
+ * Operation type of {RTE_INTR_EVENT_ADD, RTE_INTR_EVENT_DEL}.
+ * @param data
+ * User raw data.
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+int rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data);
+
+/**
+ * RX Interrupt control per queue.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param queue_id
+ * The index of the receive queue from which to retrieve input packets.
+ * The value must be in the range [0, nb_rx_queue - 1] previously supplied
+ * to rte_eth_dev_configure().
+ * @param epfd
+ * Epoll instance fd which the intr vector associated to.
+ * Using RTE_EPOLL_PER_THREAD allows to use per thread epoll instance.
+ * @param op
+ * The operation be performed for the vector.
+ * Operation type of {RTE_INTR_EVENT_ADD, RTE_INTR_EVENT_DEL}.
+ * @param data
+ * User raw data.
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+int rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
+ int epfd, int op, void *data);
+
+/**
* Turn on the LED on the Ethernet device.
* This function turns on the LED on the Ethernet device.
*
@@ -2950,6 +2933,22 @@ int rte_eth_dev_mac_addr_add(uint8_t port, struct ether_addr *mac_addr,
int rte_eth_dev_mac_addr_remove(uint8_t port, struct ether_addr *mac_addr);
/**
+ * Set the default MAC address.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param mac_addr
+ * New default MAC address.
+ * @return
+ * - (0) if successful, or *mac_addr* didn't exist.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if MAC address is invalid.
+ */
+int rte_eth_dev_default_mac_addr_set(uint8_t port, struct ether_addr *mac_addr);
+
+
+/**
* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device.
*
* @param port
@@ -3136,7 +3135,7 @@ rte_eth_dev_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id,
* - (-EINVAL) if the mr_conf information is not correct.
*/
int rte_eth_mirror_rule_set(uint8_t port_id,
- struct rte_eth_vmdq_mirror_conf *mirror_conf,
+ struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id,
uint8_t on);
@@ -3164,7 +3163,7 @@ int rte_eth_mirror_rule_reset(uint8_t port_id,
* @param queue_idx
* The queue id.
* @param tx_rate
- * The tx rate allocated from the total link speed for this queue.
+ * The tx rate in Mbps. Allocated from the total port link speed.
* @return
* - (0) if successful.
* - (-ENOTSUP) if hardware doesn't support this feature.
@@ -3229,7 +3228,7 @@ int rte_eth_dev_bypass_state_show(uint8_t port, uint32_t *state);
*
* @param port
* The port identifier of the Ethernet device.
- * @param state
+ * @param new_state
* The current bypass state.
* - (1) Normal mode
* - (2) Bypass mode
@@ -3294,7 +3293,7 @@ int rte_eth_dev_bypass_event_store(uint8_t port, uint32_t event, uint32_t state)
*
* @param port
* The port identifier of the Ethernet device.
- * @param state
+ * @param timeout
* The timeout to be set.
* - (0) 0 seconds (timer is off)
* - (1) 1.5 seconds
@@ -3428,329 +3427,465 @@ rte_eth_dev_udp_tunnel_delete(uint8_t port_id,
struct rte_eth_udp_tunnel *tunnel_udp);
/**
- * add syn filter
+ * Check whether the filter type is supported on an Ethernet device.
+ * All the supported filter types are defined in 'rte_eth_ctrl.h'.
*
* @param port_id
* The port identifier of the Ethernet device.
- * @param rx_queue
- * The index of RX queue where to store RX packets matching the syn filter.
- * @param filter
- * The pointer to the structure describing the syn filter rule.
+ * @param filter_type
+ * Filter type.
* @return
* - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support.
- * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) if hardware doesn't support this filter type.
+ * - (-ENODEV) if *port_id* invalid.
*/
-int rte_eth_dev_add_syn_filter(uint8_t port_id,
- struct rte_syn_filter *filter, uint16_t rx_queue);
+int rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type);
/**
- * remove syn filter
+ * Take operations to assigned filter type on an Ethernet device.
+ * All the supported operations and filter types are defined in 'rte_eth_ctrl.h'.
*
* @param port_id
* The port identifier of the Ethernet device.
+ * @param filter_type
+ * Filter type.
+ * @param filter_op
+ * Type of operation.
+ * @param arg
+ * A pointer to arguments defined specifically for the operation.
* @return
* - (0) if successful.
* - (-ENOTSUP) if hardware doesn't support.
- * - (-EINVAL) if bad parameter.
+ * - (-ENODEV) if *port_id* invalid.
+ * - others depends on the specific operations implementation.
*/
-int rte_eth_dev_remove_syn_filter(uint8_t port_id);
+int rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op, void *arg);
/**
- * get syn filter
+ * Get DCB information on an Ethernet device.
*
* @param port_id
* The port identifier of the Ethernet device.
- * @param filter
- * The pointer to the structure describing the syn filter.
- * @param rx_queue
- * A pointer to get the queue index of syn filter.
+ * @param dcb_info
+ * dcb information.
* @return
* - (0) if successful.
+ * - (-ENODEV) if port identifier is invalid.
* - (-ENOTSUP) if hardware doesn't support.
- * - (-EINVAL) if bad parameter.
*/
-int rte_eth_dev_get_syn_filter(uint8_t port_id,
- struct rte_syn_filter *filter, uint16_t *rx_queue);
+int rte_eth_dev_get_dcb_info(uint8_t port_id,
+ struct rte_eth_dcb_info *dcb_info);
/**
- * Add a new ethertype filter rule on an Ethernet device.
+ * Add a callback to be called on packet RX on a given port and queue.
+ *
+ * This API configures a function to be called for each burst of
+ * packets received on a given NIC port queue. The return value is a pointer
+ * that can be used to later remove the callback using
+ * rte_eth_remove_rx_callback().
+ *
+ * Multiple functions are called in the order that they are added.
*
* @param port_id
* The port identifier of the Ethernet device.
- * @param index
- * The identifier of ethertype filter.
- * @param filter
- * The pointer to the structure describing the ethertype filter rule.
- * The *rte_ethertype_filter* structure includes the values of the different
- * fields to match: ethertype and priority in vlan tag.
- * priority in vlan tag is not supported for E1000 dev.
- * @param rx_queue
- * The index of the RX queue where to store RX packets matching the added
- * ethertype filter.
+ * @param queue_id
+ * The queue on the Ethernet device on which the callback is to be added.
+ * @param fn
+ * The callback function
+ * @param user_param
+ * A generic pointer parameter which will be passed to each invocation of the
+ * callback function on this port and queue.
+ *
* @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support ethertype filter.
- * - (-ENODEV) if *port_id* invalid.
- * - (-EINVAL) if the filter information is not correct.
+ * NULL on error.
+ * On success, a pointer value which can later be used to remove the callback.
*/
-int rte_eth_dev_add_ethertype_filter(uint8_t port_id, uint16_t index,
- struct rte_ethertype_filter *filter, uint16_t rx_queue);
+void *rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
+ rte_rx_callback_fn fn, void *user_param);
/**
- * remove an ethertype filter rule on an Ethernet device.
+ * Add a callback to be called on packet TX on a given port and queue.
+ *
+ * This API configures a function to be called for each burst of
+ * packets sent on a given NIC port queue. The return value is a pointer
+ * that can be used to later remove the callback using
+ * rte_eth_remove_tx_callback().
+ *
+ * Multiple functions are called in the order that they are added.
*
* @param port_id
* The port identifier of the Ethernet device.
- * @param index
- * The identifier of ethertype filter.
+ * @param queue_id
+ * The queue on the Ethernet device on which the callback is to be added.
+ * @param fn
+ * The callback function
+ * @param user_param
+ * A generic pointer parameter which will be passed to each invocation of the
+ * callback function on this port and queue.
+ *
* @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support ethertype filter.
- * - (-ENODEV) if *port_id* invalid.
- * - (-EINVAL) if the filter information is not correct.
+ * NULL on error.
+ * On success, a pointer value which can later be used to remove the callback.
*/
-int rte_eth_dev_remove_ethertype_filter(uint8_t port_id,
- uint16_t index);
+void *rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
+ rte_tx_callback_fn fn, void *user_param);
/**
- * Get an ethertype filter rule on an Ethernet device.
+ * Remove an RX packet callback from a given port and queue.
+ *
+ * This function is used to removed callbacks that were added to a NIC port
+ * queue using rte_eth_add_rx_callback().
+ *
+ * Note: the callback is removed from the callback list but it isn't freed
+ * since the it may still be in use. The memory for the callback can be
+ * subsequently freed back by the application by calling rte_free():
+ *
+ * - Immediately - if the port is stopped, or the user knows that no
+ * callbacks are in flight e.g. if called from the thread doing RX/TX
+ * on that queue.
+ *
+ * - After a short delay - where the delay is sufficient to allow any
+ * in-flight callbacks to complete.
*
* @param port_id
* The port identifier of the Ethernet device.
- * @param index
- * The identifier of ethertype filter.
- * @param filter
- * A pointer to a structure of type *rte_ethertype_filter* to be filled with
- * the information of the Ethertype filter.
- * @param rx_queue
- * A pointer to get the queue index.
+ * @param queue_id
+ * The queue on the Ethernet device from which the callback is to be removed.
+ * @param user_cb
+ * User supplied callback created via rte_eth_add_rx_callback().
+ *
* @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support ethertype filter.
- * - (-ENODEV) if *port_id* invalid.
- * - (-EINVAL) if the filter information is not correct.
- * - (-ENOENT) if no enabled filter in this index.
+ * - 0: Success. Callback was removed.
+ * - -ENOTSUP: Callback support is not available.
+ * - -EINVAL: The port_id or the queue_id is out of range, or the callback
+ * is NULL or not found for the port/queue.
*/
-int rte_eth_dev_get_ethertype_filter(uint8_t port_id, uint16_t index,
- struct rte_ethertype_filter *filter, uint16_t *rx_queue);
+int rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
+ struct rte_eth_rxtx_callback *user_cb);
/**
- * Add a new 2tuple filter rule on an Ethernet device.
+ * Remove a TX packet callback from a given port and queue.
+ *
+ * This function is used to removed callbacks that were added to a NIC port
+ * queue using rte_eth_add_tx_callback().
+ *
+ * Note: the callback is removed from the callback list but it isn't freed
+ * since the it may still be in use. The memory for the callback can be
+ * subsequently freed back by the application by calling rte_free():
+ *
+ * - Immediately - if the port is stopped, or the user knows that no
+ * callbacks are in flight e.g. if called from the thread doing RX/TX
+ * on that queue.
+ *
+ * - After a short delay - where the delay is sufficient to allow any
+ * in-flight callbacks to complete.
*
* @param port_id
* The port identifier of the Ethernet device.
- * @param index
- * The identifier of 2tuple filter.
- * @param filter
- * The pointer to the structure describing the 2tuple filter rule.
- * The *rte_2tuple_filter* structure includes the values of the different
- * fields to match: protocol, dst_port and
- * tcp_flags if the protocol is tcp type.
- * @param rx_queue
- * The index of the RX queue where to store RX packets matching the added
- * 2tuple filter.
+ * @param queue_id
+ * The queue on the Ethernet device from which the callback is to be removed.
+ * @param user_cb
+ * User supplied callback created via rte_eth_add_tx_callback().
+ *
* @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support 2tuple filter.
- * - (-ENODEV) if *port_id* invalid.
- * - (-EINVAL) if the filter information is not correct.
+ * - 0: Success. Callback was removed.
+ * - -ENOTSUP: Callback support is not available.
+ * - -EINVAL: The port_id or the queue_id is out of range, or the callback
+ * is NULL or not found for the port/queue.
*/
-int rte_eth_dev_add_2tuple_filter(uint8_t port_id, uint16_t index,
- struct rte_2tuple_filter *filter, uint16_t rx_queue);
+int rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
+ struct rte_eth_rxtx_callback *user_cb);
/**
- * remove a 2tuple filter rule on an Ethernet device.
+ * Retrieve information about given port's RX queue.
*
* @param port_id
* The port identifier of the Ethernet device.
- * @param index
- * The identifier of 2tuple filter.
+ * @param queue_id
+ * The RX queue on the Ethernet device for which information
+ * will be retrieved.
+ * @param qinfo
+ * A pointer to a structure of type *rte_eth_rxq_info_info* to be filled with
+ * the information of the Ethernet device.
+ *
* @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support 2tuple filter.
- * - (-ENODEV) if *port_id* invalid.
- * - (-EINVAL) if the filter information is not correct.
+ * - 0: Success
+ * - -ENOTSUP: routine is not supported by the device PMD.
+ * - -EINVAL: The port_id or the queue_id is out of range.
*/
-int rte_eth_dev_remove_2tuple_filter(uint8_t port_id, uint16_t index);
+int rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo);
/**
- * Get an 2tuple filter rule on an Ethernet device.
+ * Retrieve information about given port's TX queue.
*
* @param port_id
* The port identifier of the Ethernet device.
- * @param index
- * The identifier of 2tuple filter.
- * @param filter
- * A pointer to a structure of type *rte_2tuple_filter* to be filled with
- * the information of the 2tuple filter.
- * @param rx_queue
- * A pointer to get the queue index.
+ * @param queue_id
+ * The TX queue on the Ethernet device for which information
+ * will be retrieved.
+ * @param qinfo
+ * A pointer to a structure of type *rte_eth_txq_info_info* to be filled with
+ * the information of the Ethernet device.
+ *
* @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support 2tuple filter.
+ * - 0: Success
+ * - -ENOTSUP: routine is not supported by the device PMD.
+ * - -EINVAL: The port_id or the queue_id is out of range.
+ */
+int rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo);
+
+/*
+ * Retrieve number of available registers for access
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (>=0) number of registers if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
* - (-ENODEV) if *port_id* invalid.
- * - (-EINVAL) if the filter information is not correct.
- * - (-ENOENT) if no enabled filter in this index.
+ * - others depends on the specific operations implementation.
*/
-int rte_eth_dev_get_2tuple_filter(uint8_t port_id, uint16_t index,
- struct rte_2tuple_filter *filter, uint16_t *rx_queue);
+int rte_eth_dev_get_reg_length(uint8_t port_id);
/**
- * Add a new 5tuple filter rule on an Ethernet device.
+ * Retrieve device registers and register attributes
*
* @param port_id
* The port identifier of the Ethernet device.
- * @param index
- * The identifier of 5tuple filter.
- * @param filter
- * The pointer to the structure describing the 5tuple filter rule.
- * The *rte_5tuple_filter* structure includes the values of the different
- * fields to match: dst src IP, dst src port, protocol and relative masks
- * @param rx_queue
- * The index of the RX queue where to store RX packets matching the added
- * 5tuple filter.
+ * @param info
+ * The template includes buffer for register data and attribute to be filled.
* @return
* - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support 5tuple filter.
+ * - (-ENOTSUP) if hardware doesn't support.
* - (-ENODEV) if *port_id* invalid.
- * - (-EINVAL) if the filter information is not correct.
+ * - others depends on the specific operations implementation.
*/
-int rte_eth_dev_add_5tuple_filter(uint8_t port_id, uint16_t index,
- struct rte_5tuple_filter *filter, uint16_t rx_queue);
+int rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info);
/**
- * remove a 5tuple filter rule on an Ethernet device.
+ * Retrieve size of device EEPROM
*
* @param port_id
* The port identifier of the Ethernet device.
- * @param index
- * The identifier of 5tuple filter.
* @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support 5tuple filter.
+ * - (>=0) EEPROM size if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
* - (-ENODEV) if *port_id* invalid.
- * - (-EINVAL) if the filter information is not correct.
+ * - others depends on the specific operations implementation.
*/
-int rte_eth_dev_remove_5tuple_filter(uint8_t port_id, uint16_t index);
+int rte_eth_dev_get_eeprom_length(uint8_t port_id);
/**
- * Get an 5tuple filter rule on an Ethernet device.
+ * Retrieve EEPROM and EEPROM attribute
*
* @param port_id
* The port identifier of the Ethernet device.
- * @param index
- * The identifier of 5tuple filter.
- * @param filter
- * A pointer to a structure of type *rte_5tuple_filter* to be filled with
- * the information of the 5tuple filter.
- * @param rx_queue
- * A pointer to get the queue index.
+ * @param info
+ * The template includes buffer for return EEPROM data and
+ * EEPROM attributes to be filled.
* @return
* - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support 5tuple filter.
+ * - (-ENOTSUP) if hardware doesn't support.
* - (-ENODEV) if *port_id* invalid.
- * - (-EINVAL) if the filter information is not correct.
+ * - others depends on the specific operations implementation.
*/
-int rte_eth_dev_get_5tuple_filter(uint8_t port_id, uint16_t index,
- struct rte_5tuple_filter *filter, uint16_t *rx_queue);
+int rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info);
/**
- * Add a new flex filter rule on an Ethernet device.
+ * Program EEPROM with provided data
*
* @param port_id
* The port identifier of the Ethernet device.
- * @param index
- * The identifier of flex filter.
- * @param filter
- * The pointer to the structure describing the flex filter rule.
- * The *rte_flex_filter* structure includes the values of the different fields
- * to match: the dwords (first len bytes of packet ) and relative masks.
- * @param rx_queue
- * The index of the RX queue where to store RX packets matching the added
- * flex filter.
+ * @param info
+ * The template includes EEPROM data for programming and
+ * EEPROM attributes to be filled
* @return
* - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support flex filter.
+ * - (-ENOTSUP) if hardware doesn't support.
* - (-ENODEV) if *port_id* invalid.
- * - (-EINVAL) if the filter information is not correct.
- * - (-ENOENT) if no enabled filter in this index.
+ * - others depends on the specific operations implementation.
*/
-int rte_eth_dev_add_flex_filter(uint8_t port_id, uint16_t index,
- struct rte_flex_filter *filter, uint16_t rx_queue);
+int rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info);
/**
- * remove a flex filter rule on an Ethernet device.
+ * Set the list of multicast addresses to filter on an Ethernet device.
*
* @param port_id
* The port identifier of the Ethernet device.
- * @param index
- * The identifier of flex filter.
+ * @param mc_addr_set
+ * The array of multicast addresses to set. Equal to NULL when the function
+ * is invoked to flush the set of filtered addresses.
+ * @param nb_mc_addr
+ * The number of multicast addresses in the *mc_addr_set* array. Equal to 0
+ * when the function is invoked to flush the set of filtered addresses.
* @return
* - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support flex filter.
* - (-ENODEV) if *port_id* invalid.
- * - (-EINVAL) if the filter information is not correct.
+ * - (-ENOTSUP) if PMD of *port_id* doesn't support multicast filtering.
+ * - (-ENOSPC) if *port_id* has not enough multicast filtering resources.
*/
-int rte_eth_dev_remove_flex_filter(uint8_t port_id, uint16_t index);
+int rte_eth_dev_set_mc_addr_list(uint8_t port_id,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr);
/**
- * Get an flex filter rule on an Ethernet device.
+ * Enable IEEE1588/802.1AS timestamping for an Ethernet device.
*
* @param port_id
* The port identifier of the Ethernet device.
- * @param index
- * The identifier of flex filter.
- * @param filter
- * A pointer to a structure of type *rte_flex_filter* to be filled with
- * the information of the flex filter.
- * @param rx_queue
- * A pointer to get the queue index.
+ *
* @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support flex filter.
- * - (-ENODEV) if *port_id* invalid.
- * - (-EINVAL) if the filter information is not correct.
- * - (-ENOENT) if no enabled filter in this index.
+ * - 0: Success.
+ * - -ENODEV: The port ID is invalid.
+ * - -ENOTSUP: The function is not supported by the Ethernet driver.
*/
-int rte_eth_dev_get_flex_filter(uint8_t port_id, uint16_t index,
- struct rte_flex_filter *filter, uint16_t *rx_queue);
+extern int rte_eth_timesync_enable(uint8_t port_id);
/**
- * Check whether the filter type is supported on an Ethernet device.
- * All the supported filter types are defined in 'rte_eth_ctrl.h'.
+ * Disable IEEE1588/802.1AS timestamping for an Ethernet device.
*
* @param port_id
* The port identifier of the Ethernet device.
- * @param filter_type
- * Filter type.
+ *
* @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support this filter type.
- * - (-ENODEV) if *port_id* invalid.
+ * - 0: Success.
+ * - -ENODEV: The port ID is invalid.
+ * - -ENOTSUP: The function is not supported by the Ethernet driver.
*/
-int rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type);
+extern int rte_eth_timesync_disable(uint8_t port_id);
/**
- * Take operations to assigned filter type on an Ethernet device.
- * All the supported operations and filter types are defined in 'rte_eth_ctrl.h'.
+ * Read an IEEE1588/802.1AS RX timestamp from an Ethernet device.
*
* @param port_id
* The port identifier of the Ethernet device.
- * @param filter_type
- * Filter type.
- * @param filter_op
- * Type of operation.
- * @param arg
- * A pointer to arguments defined specifically for the operation.
+ * @param timestamp
+ * Pointer to the timestamp struct.
+ * @param flags
+ * Device specific flags. Used to pass the RX timesync register index to
+ * i40e. Unused in igb/ixgbe, pass 0 instead.
+ *
* @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support.
- * - (-ENODEV) if *port_id* invalid.
- * - others depends on the specific operations implementation.
+ * - 0: Success.
+ * - -EINVAL: No timestamp is available.
+ * - -ENODEV: The port ID is invalid.
+ * - -ENOTSUP: The function is not supported by the Ethernet driver.
*/
-int rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
- enum rte_filter_op filter_op, void *arg);
+extern int rte_eth_timesync_read_rx_timestamp(uint8_t port_id,
+ struct timespec *timestamp,
+ uint32_t flags);
+
+/**
+ * Read an IEEE1588/802.1AS TX timestamp from an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param timestamp
+ * Pointer to the timestamp struct.
+ *
+ * @return
+ * - 0: Success.
+ * - -EINVAL: No timestamp is available.
+ * - -ENODEV: The port ID is invalid.
+ * - -ENOTSUP: The function is not supported by the Ethernet driver.
+ */
+extern int rte_eth_timesync_read_tx_timestamp(uint8_t port_id,
+ struct timespec *timestamp);
+
+/**
+ * Adjust the timesync clock on an Ethernet device.
+ *
+ * This is usually used in conjunction with other Ethdev timesync functions to
+ * synchronize the device time using the IEEE1588/802.1AS protocol.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param delta
+ * The adjustment in nanoseconds.
+ *
+ * @return
+ * - 0: Success.
+ * - -ENODEV: The port ID is invalid.
+ * - -ENOTSUP: The function is not supported by the Ethernet driver.
+ */
+extern int rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta);
+
+/**
+ * Read the time from the timesync clock on an Ethernet device.
+ *
+ * This is usually used in conjunction with other Ethdev timesync functions to
+ * synchronize the device time using the IEEE1588/802.1AS protocol.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param time
+ * Pointer to the timespec struct that holds the time.
+ *
+ * @return
+ * - 0: Success.
+ */
+extern int rte_eth_timesync_read_time(uint8_t port_id, struct timespec *time);
+
+/**
+ * Set the time of the timesync clock on an Ethernet device.
+ *
+ * This is usually used in conjunction with other Ethdev timesync functions to
+ * synchronize the device time using the IEEE1588/802.1AS protocol.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param time
+ * Pointer to the timespec struct that holds the time.
+ *
+ * @return
+ * - 0: Success.
+ * - -EINVAL: No timestamp is available.
+ * - -ENODEV: The port ID is invalid.
+ * - -ENOTSUP: The function is not supported by the Ethernet driver.
+ */
+extern int rte_eth_timesync_write_time(uint8_t port_id,
+ const struct timespec *time);
+
+/**
+ * Copy pci device info to the Ethernet device data.
+ *
+ * @param eth_dev
+ * The *eth_dev* pointer is the address of the *rte_eth_dev* structure.
+ * @param pci_dev
+ * The *pci_dev* pointer is the address of the *rte_pci_device* structure.
+ *
+ * @return
+ * - 0 on success, negative on error
+ */
+extern void rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, struct rte_pci_device *pci_dev);
+
+
+/**
+ * Create memzone for HW rings.
+ * malloc can't be used as the physical address is needed.
+ * If the memzone is already created, then this function returns a ptr
+ * to the old one.
+ *
+ * @param eth_dev
+ * The *eth_dev* pointer is the address of the *rte_eth_dev* structure
+ * @param name
+ * The name of the memory zone
+ * @param queue_id
+ * The index of the queue to add to name
+ * @param size
+ * The sizeof of the memory area
+ * @param align
+ * Alignment for resulting memzone. Must be a power of 2.
+ * @param socket_id
+ * The *socket_id* argument is the socket identifier in case of NUMA.
+ */
+const struct rte_memzone *
+rte_eth_dma_zone_reserve(const struct rte_eth_dev *eth_dev, const char *name,
+ uint16_t queue_id, size_t size,
+ unsigned align, int socket_id);
#ifdef __cplusplus
}
diff --git a/src/dpdk_lib18/librte_ether/rte_ether.h b/src/dpdk22/lib/librte_ether/rte_ether.h
index 7e7d22cc..07c17d7e 100755..100644
--- a/src/dpdk_lib18/librte_ether/rte_ether.h
+++ b/src/dpdk22/lib/librte_ether/rte_ether.h
@@ -49,6 +49,8 @@ extern "C" {
#include <rte_memcpy.h>
#include <rte_random.h>
+#include <rte_mbuf.h>
+#include <rte_byteorder.h>
#define ETHER_ADDR_LEN 6 /**< Length of Ethernet address. */
#define ETHER_TYPE_LEN 2 /**< Length of Ethernet type field. */
@@ -173,7 +175,7 @@ static inline int is_multicast_ether_addr(const struct ether_addr *ea)
*/
static inline int is_broadcast_ether_addr(const struct ether_addr *ea)
{
- const uint16_t *ea_words = (const uint16_t *)ea;
+ const unaligned_uint16_t *ea_words = (const unaligned_uint16_t *)ea;
return (ea_words[0] == 0xFFFF && ea_words[1] == 0xFFFF &&
ea_words[2] == 0xFFFF);
@@ -275,7 +277,7 @@ static inline void ether_addr_copy(const struct ether_addr *ea_from,
* A pointer to buffer contains the formatted MAC address.
* @param size
* The format buffer size.
- * @param ea_to
+ * @param eth_addr
* A pointer to a ether_addr structure.
*/
static inline void
@@ -329,10 +331,84 @@ struct vxlan_hdr {
#define ETHER_TYPE_VLAN 0x8100 /**< IEEE 802.1Q VLAN tagging. */
#define ETHER_TYPE_1588 0x88F7 /**< IEEE 802.1AS 1588 Precise Time Protocol. */
#define ETHER_TYPE_SLOW 0x8809 /**< Slow protocols (LACP and Marker). */
+#define ETHER_TYPE_TEB 0x6558 /**< Transparent Ethernet Bridging. */
#define ETHER_VXLAN_HLEN (sizeof(struct udp_hdr) + sizeof(struct vxlan_hdr))
/**< VXLAN tunnel header length. */
+/**
+ * Extract VLAN tag information into mbuf
+ *
+ * Software version of VLAN stripping
+ *
+ * @param m
+ * The packet mbuf.
+ * @return
+ * - 0: Success
+ * - 1: not a vlan packet
+ */
+static inline int rte_vlan_strip(struct rte_mbuf *m)
+{
+ struct ether_hdr *eh
+ = rte_pktmbuf_mtod(m, struct ether_hdr *);
+
+ if (eh->ether_type != rte_cpu_to_be_16(ETHER_TYPE_VLAN))
+ return -1;
+
+ struct vlan_hdr *vh = (struct vlan_hdr *)(eh + 1);
+ m->ol_flags |= PKT_RX_VLAN_PKT;
+ m->vlan_tci = rte_be_to_cpu_16(vh->vlan_tci);
+
+ /* Copy ether header over rather than moving whole packet */
+ memmove(rte_pktmbuf_adj(m, sizeof(struct vlan_hdr)),
+ eh, 2 * ETHER_ADDR_LEN);
+
+ return 0;
+}
+
+/**
+ * Insert VLAN tag into mbuf.
+ *
+ * Software version of VLAN unstripping
+ *
+ * @param m
+ * The packet mbuf.
+ * @return
+ * - 0: On success
+ * -EPERM: mbuf is is shared overwriting would be unsafe
+ * -ENOSPC: not enough headroom in mbuf
+ */
+static inline int rte_vlan_insert(struct rte_mbuf **m)
+{
+ struct ether_hdr *oh, *nh;
+ struct vlan_hdr *vh;
+
+ /* Can't insert header if mbuf is shared */
+ if (rte_mbuf_refcnt_read(*m) > 1) {
+ struct rte_mbuf *copy;
+
+ copy = rte_pktmbuf_clone(*m, (*m)->pool);
+ if (unlikely(copy == NULL))
+ return -ENOMEM;
+ rte_pktmbuf_free(*m);
+ *m = copy;
+ }
+
+ oh = rte_pktmbuf_mtod(*m, struct ether_hdr *);
+ nh = (struct ether_hdr *)
+ rte_pktmbuf_prepend(*m, sizeof(struct vlan_hdr));
+ if (nh == NULL)
+ return -ENOSPC;
+
+ memmove(nh, oh, 2 * ETHER_ADDR_LEN);
+ nh->ether_type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
+
+ vh = (struct vlan_hdr *) (nh + 1);
+ vh->vlan_tci = rte_cpu_to_be_16((*m)->vlan_tci);
+
+ return 0;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/src/dpdk22/lib/librte_hash/rte_cmp_arm64.h b/src/dpdk22/lib/librte_hash/rte_cmp_arm64.h
new file mode 100644
index 00000000..6fd937b1
--- /dev/null
+++ b/src/dpdk22/lib/librte_hash/rte_cmp_arm64.h
@@ -0,0 +1,114 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Cavium networks. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* Functions to compare multiple of 16 byte keys (up to 128 bytes) */
+static int
+rte_hash_k16_cmp_eq(const void *key1, const void *key2,
+ size_t key_len __rte_unused)
+{
+ uint64_t x0, x1, y0, y1;
+
+ asm volatile(
+ "ldp %x[x1], %x[x0], [%x[p1]]"
+ : [x1]"=r"(x1), [x0]"=r"(x0)
+ : [p1]"r"(key1)
+ );
+ asm volatile(
+ "ldp %x[y1], %x[y0], [%x[p2]]"
+ : [y1]"=r"(y1), [y0]"=r"(y0)
+ : [p2]"r"(key2)
+ );
+ x0 ^= y0;
+ x1 ^= y1;
+ return !(x0 == 0 && x1 == 0);
+}
+
+static int
+rte_hash_k32_cmp_eq(const void *key1, const void *key2, size_t key_len)
+{
+ return rte_hash_k16_cmp_eq(key1, key2, key_len) ||
+ rte_hash_k16_cmp_eq((const char *) key1 + 16,
+ (const char *) key2 + 16, key_len);
+}
+
+static int
+rte_hash_k48_cmp_eq(const void *key1, const void *key2, size_t key_len)
+{
+ return rte_hash_k16_cmp_eq(key1, key2, key_len) ||
+ rte_hash_k16_cmp_eq((const char *) key1 + 16,
+ (const char *) key2 + 16, key_len) ||
+ rte_hash_k16_cmp_eq((const char *) key1 + 32,
+ (const char *) key2 + 32, key_len);
+}
+
+static int
+rte_hash_k64_cmp_eq(const void *key1, const void *key2, size_t key_len)
+{
+ return rte_hash_k32_cmp_eq(key1, key2, key_len) ||
+ rte_hash_k32_cmp_eq((const char *) key1 + 32,
+ (const char *) key2 + 32, key_len);
+}
+
+static int
+rte_hash_k80_cmp_eq(const void *key1, const void *key2, size_t key_len)
+{
+ return rte_hash_k64_cmp_eq(key1, key2, key_len) ||
+ rte_hash_k16_cmp_eq((const char *) key1 + 64,
+ (const char *) key2 + 64, key_len);
+}
+
+static int
+rte_hash_k96_cmp_eq(const void *key1, const void *key2, size_t key_len)
+{
+ return rte_hash_k64_cmp_eq(key1, key2, key_len) ||
+ rte_hash_k32_cmp_eq((const char *) key1 + 64,
+ (const char *) key2 + 64, key_len);
+}
+
+static int
+rte_hash_k112_cmp_eq(const void *key1, const void *key2, size_t key_len)
+{
+ return rte_hash_k64_cmp_eq(key1, key2, key_len) ||
+ rte_hash_k32_cmp_eq((const char *) key1 + 64,
+ (const char *) key2 + 64, key_len) ||
+ rte_hash_k16_cmp_eq((const char *) key1 + 96,
+ (const char *) key2 + 96, key_len);
+}
+
+static int
+rte_hash_k128_cmp_eq(const void *key1, const void *key2, size_t key_len)
+{
+ return rte_hash_k64_cmp_eq(key1, key2, key_len) ||
+ rte_hash_k64_cmp_eq((const char *) key1 + 64,
+ (const char *) key2 + 64, key_len);
+}
diff --git a/src/dpdk22/lib/librte_hash/rte_cmp_x86.h b/src/dpdk22/lib/librte_hash/rte_cmp_x86.h
new file mode 100644
index 00000000..7f79bace
--- /dev/null
+++ b/src/dpdk22/lib/librte_hash/rte_cmp_x86.h
@@ -0,0 +1,109 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* Functions to compare multiple of 16 byte keys (up to 128 bytes) */
+static int
+rte_hash_k16_cmp_eq(const void *key1, const void *key2, size_t key_len __rte_unused)
+{
+ const __m128i k1 = _mm_loadu_si128((const __m128i *) key1);
+ const __m128i k2 = _mm_loadu_si128((const __m128i *) key2);
+#ifdef RTE_MACHINE_CPUFLAG_SSE4_1
+ const __m128i x = _mm_xor_si128(k1, k2);
+
+ return !_mm_test_all_zeros(x, x);
+#else
+ const __m128i x = _mm_cmpeq_epi32(k1, k2);
+
+ return (_mm_movemask_epi8(x) != 0xffff);
+#endif
+}
+
+static int
+rte_hash_k32_cmp_eq(const void *key1, const void *key2, size_t key_len)
+{
+ return rte_hash_k16_cmp_eq(key1, key2, key_len) ||
+ rte_hash_k16_cmp_eq((const char *) key1 + 16,
+ (const char *) key2 + 16, key_len);
+}
+
+static int
+rte_hash_k48_cmp_eq(const void *key1, const void *key2, size_t key_len)
+{
+ return rte_hash_k16_cmp_eq(key1, key2, key_len) ||
+ rte_hash_k16_cmp_eq((const char *) key1 + 16,
+ (const char *) key2 + 16, key_len) ||
+ rte_hash_k16_cmp_eq((const char *) key1 + 32,
+ (const char *) key2 + 32, key_len);
+}
+
+static int
+rte_hash_k64_cmp_eq(const void *key1, const void *key2, size_t key_len)
+{
+ return rte_hash_k32_cmp_eq(key1, key2, key_len) ||
+ rte_hash_k32_cmp_eq((const char *) key1 + 32,
+ (const char *) key2 + 32, key_len);
+}
+
+static int
+rte_hash_k80_cmp_eq(const void *key1, const void *key2, size_t key_len)
+{
+ return rte_hash_k64_cmp_eq(key1, key2, key_len) ||
+ rte_hash_k16_cmp_eq((const char *) key1 + 64,
+ (const char *) key2 + 64, key_len);
+}
+
+static int
+rte_hash_k96_cmp_eq(const void *key1, const void *key2, size_t key_len)
+{
+ return rte_hash_k64_cmp_eq(key1, key2, key_len) ||
+ rte_hash_k32_cmp_eq((const char *) key1 + 64,
+ (const char *) key2 + 64, key_len);
+}
+
+static int
+rte_hash_k112_cmp_eq(const void *key1, const void *key2, size_t key_len)
+{
+ return rte_hash_k64_cmp_eq(key1, key2, key_len) ||
+ rte_hash_k32_cmp_eq((const char *) key1 + 64,
+ (const char *) key2 + 64, key_len) ||
+ rte_hash_k16_cmp_eq((const char *) key1 + 96,
+ (const char *) key2 + 96, key_len);
+}
+
+static int
+rte_hash_k128_cmp_eq(const void *key1, const void *key2, size_t key_len)
+{
+ return rte_hash_k64_cmp_eq(key1, key2, key_len) ||
+ rte_hash_k64_cmp_eq((const char *) key1 + 64,
+ (const char *) key2 + 64, key_len);
+}
diff --git a/src/dpdk22/lib/librte_hash/rte_crc_arm64.h b/src/dpdk22/lib/librte_hash/rte_crc_arm64.h
new file mode 100644
index 00000000..02e26bca
--- /dev/null
+++ b/src/dpdk22/lib/librte_hash/rte_crc_arm64.h
@@ -0,0 +1,151 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Cavium networks. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_CRC_ARM64_H_
+#define _RTE_CRC_ARM64_H_
+
+/**
+ * @file
+ *
+ * RTE CRC arm64 Hash
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <rte_cpuflags.h>
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+
+static inline uint32_t
+crc32c_arm64_u32(uint32_t data, uint32_t init_val)
+{
+ asm(".arch armv8-a+crc");
+ __asm__ volatile(
+ "crc32cw %w[crc], %w[crc], %w[value]"
+ : [crc] "+r" (init_val)
+ : [value] "r" (data));
+ return init_val;
+}
+
+static inline uint32_t
+crc32c_arm64_u64(uint64_t data, uint32_t init_val)
+{
+ asm(".arch armv8-a+crc");
+ __asm__ volatile(
+ "crc32cx %w[crc], %w[crc], %x[value]"
+ : [crc] "+r" (init_val)
+ : [value] "r" (data));
+ return init_val;
+}
+
+/**
+ * Allow or disallow use of arm64 SIMD instrinsics for CRC32 hash
+ * calculation.
+ *
+ * @param alg
+ * An OR of following flags:
+ * - (CRC32_SW) Don't use arm64 crc intrinsics
+ * - (CRC32_ARM64) Use ARMv8 CRC intrinsic if available
+ *
+ */
+static inline void
+rte_hash_crc_set_alg(uint8_t alg)
+{
+ switch (alg) {
+ case CRC32_ARM64:
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_CRC32))
+ alg = CRC32_SW;
+ case CRC32_SW:
+ crc32_alg = alg;
+ default:
+ break;
+ }
+}
+
+/* Setting the best available algorithm */
+static inline void __attribute__((constructor))
+rte_hash_crc_init_alg(void)
+{
+ rte_hash_crc_set_alg(CRC32_ARM64);
+}
+
+/**
+ * Use single crc32 instruction to perform a hash on a 4 byte value.
+ * Fall back to software crc32 implementation in case arm64 crc intrinsics is
+ * not supported
+ *
+ * @param data
+ * Data to perform hash on.
+ * @param init_val
+ * Value to initialise hash generator.
+ * @return
+ * 32bit calculated hash value.
+ */
+static inline uint32_t
+rte_hash_crc_4byte(uint32_t data, uint32_t init_val)
+{
+ if (likely(crc32_alg & CRC32_ARM64))
+ return crc32c_arm64_u32(data, init_val);
+
+ return crc32c_1word(data, init_val);
+}
+
+/**
+ * Use single crc32 instruction to perform a hash on a 8 byte value.
+ * Fall back to software crc32 implementation in case arm64 crc intrinsics is
+ * not supported
+ *
+ * @param data
+ * Data to perform hash on.
+ * @param init_val
+ * Value to initialise hash generator.
+ * @return
+ * 32bit calculated hash value.
+ */
+static inline uint32_t
+rte_hash_crc_8byte(uint64_t data, uint32_t init_val)
+{
+ if (likely(crc32_alg == CRC32_ARM64))
+ return crc32c_arm64_u64(data, init_val);
+
+ return crc32c_2words(data, init_val);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CRC_ARM64_H_ */
diff --git a/src/dpdk22/lib/librte_hash/rte_cuckoo_hash.c b/src/dpdk22/lib/librte_hash/rte_cuckoo_hash.c
new file mode 100644
index 00000000..3e3167c5
--- /dev/null
+++ b/src/dpdk22/lib/librte_hash/rte_cuckoo_hash.c
@@ -0,0 +1,1243 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <stdint.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <sys/queue.h>
+
+#include <rte_common.h>
+#include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
+#include <rte_log.h>
+#include <rte_memcpy.h>
+#include <rte_prefetch.h>
+#include <rte_branch_prediction.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_per_lcore.h>
+#include <rte_errno.h>
+#include <rte_string_fns.h>
+#include <rte_cpuflags.h>
+#include <rte_log.h>
+#include <rte_rwlock.h>
+#include <rte_spinlock.h>
+#include <rte_ring.h>
+#include <rte_compat.h>
+
+#include "rte_hash.h"
+#if defined(RTE_ARCH_X86_64) || defined(RTE_ARCH_I686) || defined(RTE_ARCH_X86_X32)
+#include "rte_cmp_x86.h"
+#endif
+
+#if defined(RTE_ARCH_ARM64)
+#include "rte_cmp_arm64.h"
+#endif
+
+TAILQ_HEAD(rte_hash_list, rte_tailq_entry);
+
+static struct rte_tailq_elem rte_hash_tailq = {
+ .name = "RTE_HASH",
+};
+EAL_REGISTER_TAILQ(rte_hash_tailq)
+
+/* Macro to enable/disable run-time checking of function parameters */
+#if defined(RTE_LIBRTE_HASH_DEBUG)
+#define RETURN_IF_TRUE(cond, retval) do { \
+ if (cond) \
+ return retval; \
+} while (0)
+#else
+#define RETURN_IF_TRUE(cond, retval)
+#endif
+
+/* Hash function used if none is specified */
+#if defined(RTE_MACHINE_CPUFLAG_SSE4_2) || defined(RTE_MACHINE_CPUFLAG_CRC32)
+#include <rte_hash_crc.h>
+#define DEFAULT_HASH_FUNC rte_hash_crc
+#else
+#include <rte_jhash.h>
+#define DEFAULT_HASH_FUNC rte_jhash
+#endif
+
+/** Number of items per bucket. */
+#define RTE_HASH_BUCKET_ENTRIES 4
+
+#define NULL_SIGNATURE 0
+
+#define KEY_ALIGNMENT 16
+
+#define LCORE_CACHE_SIZE 8
+
+struct lcore_cache {
+ unsigned len; /**< Cache len */
+ void *objs[LCORE_CACHE_SIZE]; /**< Cache objects */
+} __rte_cache_aligned;
+
+/** A hash table structure. */
+struct rte_hash {
+ char name[RTE_HASH_NAMESIZE]; /**< Name of the hash. */
+ uint32_t entries; /**< Total table entries. */
+ uint32_t num_buckets; /**< Number of buckets in table. */
+ uint32_t key_len; /**< Length of hash key. */
+ rte_hash_function hash_func; /**< Function used to calculate hash. */
+ uint32_t hash_func_init_val; /**< Init value used by hash_func. */
+ rte_hash_cmp_eq_t rte_hash_cmp_eq; /**< Function used to compare keys. */
+ uint32_t bucket_bitmask; /**< Bitmask for getting bucket index
+ from hash signature. */
+ uint32_t key_entry_size; /**< Size of each key entry. */
+
+ struct rte_ring *free_slots; /**< Ring that stores all indexes
+ of the free slots in the key table */
+ void *key_store; /**< Table storing all keys and data */
+ struct rte_hash_bucket *buckets; /**< Table with buckets storing all the
+ hash values and key indexes
+ to the key table*/
+ uint8_t hw_trans_mem_support; /**< Hardware transactional
+ memory support */
+ struct lcore_cache *local_free_slots;
+ /**< Local cache per lcore, storing some indexes of the free slots */
+} __rte_cache_aligned;
+
+/* Structure storing both primary and secondary hashes */
+struct rte_hash_signatures {
+ union {
+ struct {
+ hash_sig_t current;
+ hash_sig_t alt;
+ };
+ uint64_t sig;
+ };
+};
+
+/* Structure that stores key-value pair */
+struct rte_hash_key {
+ union {
+ uintptr_t idata;
+ void *pdata;
+ };
+ /* Variable key size */
+ char key[0];
+} __attribute__((aligned(KEY_ALIGNMENT)));
+
+/** Bucket structure */
+struct rte_hash_bucket {
+ struct rte_hash_signatures signatures[RTE_HASH_BUCKET_ENTRIES];
+ /* Includes dummy key index that always contains index 0 */
+ uint32_t key_idx[RTE_HASH_BUCKET_ENTRIES + 1];
+ uint8_t flag[RTE_HASH_BUCKET_ENTRIES];
+} __rte_cache_aligned;
+
+struct rte_hash *
+rte_hash_find_existing(const char *name)
+{
+ struct rte_hash *h = NULL;
+ struct rte_tailq_entry *te;
+ struct rte_hash_list *hash_list;
+
+ hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
+
+ rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
+ TAILQ_FOREACH(te, hash_list, next) {
+ h = (struct rte_hash *) te->data;
+ if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
+ break;
+ }
+ rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ if (te == NULL) {
+ rte_errno = ENOENT;
+ return NULL;
+ }
+ return h;
+}
+
+void rte_hash_set_cmp_func(struct rte_hash *h, rte_hash_cmp_eq_t func)
+{
+ h->rte_hash_cmp_eq = func;
+}
+
+struct rte_hash *
+rte_hash_create(const struct rte_hash_parameters *params)
+{
+ struct rte_hash *h = NULL;
+ struct rte_tailq_entry *te = NULL;
+ struct rte_hash_list *hash_list;
+ struct rte_ring *r = NULL;
+ char hash_name[RTE_HASH_NAMESIZE];
+ void *k = NULL;
+ void *buckets = NULL;
+ char ring_name[RTE_RING_NAMESIZE];
+ unsigned num_key_slots;
+ unsigned hw_trans_mem_support = 0;
+ unsigned i;
+
+ hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
+
+ if (params == NULL) {
+ RTE_LOG(ERR, HASH, "rte_hash_create has no parameters\n");
+ return NULL;
+ }
+
+ /* Check for valid parameters */
+ if ((params->entries > RTE_HASH_ENTRIES_MAX) ||
+ (params->entries < RTE_HASH_BUCKET_ENTRIES) ||
+ !rte_is_power_of_2(RTE_HASH_BUCKET_ENTRIES) ||
+ (params->key_len == 0)) {
+ rte_errno = EINVAL;
+ RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
+ return NULL;
+ }
+
+ /* Check extra flags field to check extra options. */
+ if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT)
+ hw_trans_mem_support = 1;
+
+ snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);
+
+ /* Guarantee there's no existing */
+ h = rte_hash_find_existing(params->name);
+ if (h != NULL)
+ return h;
+
+ te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0);
+ if (te == NULL) {
+ RTE_LOG(ERR, HASH, "tailq entry allocation failed\n");
+ goto err;
+ }
+
+ h = (struct rte_hash *)rte_zmalloc_socket(hash_name, sizeof(struct rte_hash),
+ RTE_CACHE_LINE_SIZE, params->socket_id);
+
+ if (h == NULL) {
+ RTE_LOG(ERR, HASH, "memory allocation failed\n");
+ goto err;
+ }
+
+ const uint32_t num_buckets = rte_align32pow2(params->entries)
+ / RTE_HASH_BUCKET_ENTRIES;
+
+ buckets = rte_zmalloc_socket(NULL,
+ num_buckets * sizeof(struct rte_hash_bucket),
+ RTE_CACHE_LINE_SIZE, params->socket_id);
+
+ if (buckets == NULL) {
+ RTE_LOG(ERR, HASH, "memory allocation failed\n");
+ goto err;
+ }
+
+ const uint32_t key_entry_size = sizeof(struct rte_hash_key) + params->key_len;
+
+ /* Store all keys and leave the first entry as a dummy entry for lookup_bulk */
+ if (hw_trans_mem_support)
+ /*
+ * Increase number of slots by total number of indices
+ * that can be stored in the lcore caches
+ * except for the first cache
+ */
+ num_key_slots = params->entries + (RTE_MAX_LCORE - 1) *
+ LCORE_CACHE_SIZE + 1;
+ else
+ num_key_slots = params->entries + 1;
+
+ const uint64_t key_tbl_size = (uint64_t) key_entry_size * num_key_slots;
+
+ k = rte_zmalloc_socket(NULL, key_tbl_size,
+ RTE_CACHE_LINE_SIZE, params->socket_id);
+
+ if (k == NULL) {
+ RTE_LOG(ERR, HASH, "memory allocation failed\n");
+ goto err;
+ }
+
+/*
+ * If x86 architecture is used, select appropriate compare function,
+ * which may use x86 instrinsics, otherwise use memcmp
+ */
+#if defined(RTE_ARCH_X86_64) || defined(RTE_ARCH_I686) ||\
+ defined(RTE_ARCH_X86_X32) || defined(RTE_ARCH_ARM64)
+ /* Select function to compare keys */
+ switch (params->key_len) {
+ case 16:
+ h->rte_hash_cmp_eq = rte_hash_k16_cmp_eq;
+ break;
+ case 32:
+ h->rte_hash_cmp_eq = rte_hash_k32_cmp_eq;
+ break;
+ case 48:
+ h->rte_hash_cmp_eq = rte_hash_k48_cmp_eq;
+ break;
+ case 64:
+ h->rte_hash_cmp_eq = rte_hash_k64_cmp_eq;
+ break;
+ case 80:
+ h->rte_hash_cmp_eq = rte_hash_k80_cmp_eq;
+ break;
+ case 96:
+ h->rte_hash_cmp_eq = rte_hash_k96_cmp_eq;
+ break;
+ case 112:
+ h->rte_hash_cmp_eq = rte_hash_k112_cmp_eq;
+ break;
+ case 128:
+ h->rte_hash_cmp_eq = rte_hash_k128_cmp_eq;
+ break;
+ default:
+ /* If key is not multiple of 16, use generic memcmp */
+ h->rte_hash_cmp_eq = memcmp;
+ }
+#else
+ h->rte_hash_cmp_eq = memcmp;
+#endif
+
+ snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name);
+ r = rte_ring_create(ring_name, rte_align32pow2(num_key_slots),
+ params->socket_id, 0);
+ if (r == NULL) {
+ RTE_LOG(ERR, HASH, "memory allocation failed\n");
+ goto err;
+ }
+
+ if (hw_trans_mem_support) {
+ h->local_free_slots = rte_zmalloc_socket(NULL,
+ sizeof(struct lcore_cache) * RTE_MAX_LCORE,
+ RTE_CACHE_LINE_SIZE, params->socket_id);
+ }
+
+ /* Setup hash context */
+ snprintf(h->name, sizeof(h->name), "%s", params->name);
+ h->entries = params->entries;
+ h->key_len = params->key_len;
+ h->key_entry_size = key_entry_size;
+ h->hash_func_init_val = params->hash_func_init_val;
+
+ h->num_buckets = num_buckets;
+ h->bucket_bitmask = h->num_buckets - 1;
+ h->buckets = buckets;
+ h->hash_func = (params->hash_func == NULL) ?
+ DEFAULT_HASH_FUNC : params->hash_func;
+ h->key_store = k;
+ h->free_slots = r;
+ h->hw_trans_mem_support = hw_trans_mem_support;
+
+ /* populate the free slots ring. Entry zero is reserved for key misses */
+ for (i = 1; i < params->entries + 1; i++)
+ rte_ring_sp_enqueue(r, (void *)((uintptr_t) i));
+
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+ te->data = (void *) h;
+ TAILQ_INSERT_TAIL(hash_list, te, next);
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ return h;
+err:
+ rte_free(te);
+ rte_free(h);
+ rte_free(buckets);
+ rte_free(k);
+ return NULL;
+}
+
+void
+rte_hash_free(struct rte_hash *h)
+{
+ struct rte_tailq_entry *te;
+ struct rte_hash_list *hash_list;
+
+ if (h == NULL)
+ return;
+
+ hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
+
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ /* find out tailq entry */
+ TAILQ_FOREACH(te, hash_list, next) {
+ if (te->data == (void *) h)
+ break;
+ }
+
+ if (te == NULL) {
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ return;
+ }
+
+ TAILQ_REMOVE(hash_list, te, next);
+
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ if (h->hw_trans_mem_support)
+ rte_free(h->local_free_slots);
+
+ rte_ring_free(h->free_slots);
+ rte_free(h->key_store);
+ rte_free(h->buckets);
+ rte_free(h);
+ rte_free(te);
+}
+
+hash_sig_t
+rte_hash_hash(const struct rte_hash *h, const void *key)
+{
+ /* calc hash result by key */
+ return h->hash_func(key, h->key_len, h->hash_func_init_val);
+}
+
+/* Calc the secondary hash value from the primary hash value of a given key */
+static inline hash_sig_t
+rte_hash_secondary_hash(const hash_sig_t primary_hash)
+{
+ static const unsigned all_bits_shift = 12;
+ static const unsigned alt_bits_xor = 0x5bd1e995;
+
+ uint32_t tag = primary_hash >> all_bits_shift;
+
+ return (primary_hash ^ ((tag + 1) * alt_bits_xor));
+}
+
+void
+rte_hash_reset(struct rte_hash *h)
+{
+ void *ptr;
+ unsigned i;
+
+ if (h == NULL)
+ return;
+
+ memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket));
+ memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
+
+ /* clear the free ring */
+ while (rte_ring_dequeue(h->free_slots, &ptr) == 0)
+ rte_pause();
+
+ /* Repopulate the free slots ring. Entry zero is reserved for key misses */
+ for (i = 1; i < h->entries + 1; i++)
+ rte_ring_sp_enqueue(h->free_slots, (void *)((uintptr_t) i));
+
+ if (h->hw_trans_mem_support) {
+ /* Reset local caches per lcore */
+ for (i = 0; i < RTE_MAX_LCORE; i++)
+ h->local_free_slots[i].len = 0;
+ }
+}
+
+/* Search for an entry that can be pushed to its alternative location */
+static inline int
+make_space_bucket(const struct rte_hash *h, struct rte_hash_bucket *bkt)
+{
+ unsigned i, j;
+ int ret;
+ uint32_t next_bucket_idx;
+ struct rte_hash_bucket *next_bkt[RTE_HASH_BUCKET_ENTRIES];
+
+ /*
+ * Push existing item (search for bucket with space in
+ * alternative locations) to its alternative location
+ */
+ for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
+ /* Search for space in alternative locations */
+ next_bucket_idx = bkt->signatures[i].alt & h->bucket_bitmask;
+ next_bkt[i] = &h->buckets[next_bucket_idx];
+ for (j = 0; j < RTE_HASH_BUCKET_ENTRIES; j++) {
+ if (next_bkt[i]->signatures[j].sig == NULL_SIGNATURE)
+ break;
+ }
+
+ if (j != RTE_HASH_BUCKET_ENTRIES)
+ break;
+ }
+
+ /* Alternative location has spare room (end of recursive function) */
+ if (i != RTE_HASH_BUCKET_ENTRIES) {
+ next_bkt[i]->signatures[j].alt = bkt->signatures[i].current;
+ next_bkt[i]->signatures[j].current = bkt->signatures[i].alt;
+ next_bkt[i]->key_idx[j] = bkt->key_idx[i];
+ return i;
+ }
+
+ /* Pick entry that has not been pushed yet */
+ for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++)
+ if (bkt->flag[i] == 0)
+ break;
+
+ /* All entries have been pushed, so entry cannot be added */
+ if (i == RTE_HASH_BUCKET_ENTRIES)
+ return -ENOSPC;
+
+ /* Set flag to indicate that this entry is going to be pushed */
+ bkt->flag[i] = 1;
+ /* Need room in alternative bucket to insert the pushed entry */
+ ret = make_space_bucket(h, next_bkt[i]);
+ /*
+ * After recursive function.
+ * Clear flags and insert the pushed entry
+ * in its alternative location if successful,
+ * or return error
+ */
+ bkt->flag[i] = 0;
+ if (ret >= 0) {
+ next_bkt[i]->signatures[ret].alt = bkt->signatures[i].current;
+ next_bkt[i]->signatures[ret].current = bkt->signatures[i].alt;
+ next_bkt[i]->key_idx[ret] = bkt->key_idx[i];
+ return i;
+ } else
+ return ret;
+
+}
+
+/*
+ * Function called to enqueue back an index in the cache/ring,
+ * as slot has not being used and it can be used in the
+ * next addition attempt.
+ */
+static inline void
+enqueue_slot_back(const struct rte_hash *h,
+ struct lcore_cache *cached_free_slots,
+ void *slot_id)
+{
+ if (h->hw_trans_mem_support) {
+ cached_free_slots->objs[cached_free_slots->len] = slot_id;
+ cached_free_slots->len++;
+ } else
+ rte_ring_sp_enqueue(h->free_slots, slot_id);
+}
+
+static inline int32_t
+__rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
+ hash_sig_t sig, void *data)
+{
+ hash_sig_t alt_hash;
+ uint32_t prim_bucket_idx, sec_bucket_idx;
+ unsigned i;
+ struct rte_hash_bucket *prim_bkt, *sec_bkt;
+ struct rte_hash_key *new_k, *k, *keys = h->key_store;
+ void *slot_id = NULL;
+ uint32_t new_idx;
+ int ret;
+ unsigned n_slots;
+ unsigned lcore_id;
+ struct lcore_cache *cached_free_slots = NULL;
+
+ prim_bucket_idx = sig & h->bucket_bitmask;
+ prim_bkt = &h->buckets[prim_bucket_idx];
+ rte_prefetch0(prim_bkt);
+
+ alt_hash = rte_hash_secondary_hash(sig);
+ sec_bucket_idx = alt_hash & h->bucket_bitmask;
+ sec_bkt = &h->buckets[sec_bucket_idx];
+ rte_prefetch0(sec_bkt);
+
+ /* Get a new slot for storing the new key */
+ if (h->hw_trans_mem_support) {
+ lcore_id = rte_lcore_id();
+ cached_free_slots = &h->local_free_slots[lcore_id];
+ /* Try to get a free slot from the local cache */
+ if (cached_free_slots->len == 0) {
+ /* Need to get another burst of free slots from global ring */
+ n_slots = rte_ring_mc_dequeue_burst(h->free_slots,
+ cached_free_slots->objs, LCORE_CACHE_SIZE);
+ if (n_slots == 0)
+ return -ENOSPC;
+
+ cached_free_slots->len += n_slots;
+ }
+
+ /* Get a free slot from the local cache */
+ cached_free_slots->len--;
+ slot_id = cached_free_slots->objs[cached_free_slots->len];
+ } else {
+ if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0)
+ return -ENOSPC;
+ }
+
+ new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size);
+ rte_prefetch0(new_k);
+ new_idx = (uint32_t)((uintptr_t) slot_id);
+
+ /* Check if key is already inserted in primary location */
+ for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
+ if (prim_bkt->signatures[i].current == sig &&
+ prim_bkt->signatures[i].alt == alt_hash) {
+ k = (struct rte_hash_key *) ((char *)keys +
+ prim_bkt->key_idx[i] * h->key_entry_size);
+ if (h->rte_hash_cmp_eq(key, k->key, h->key_len) == 0) {
+ /* Enqueue index of free slot back in the ring. */
+ enqueue_slot_back(h, cached_free_slots, slot_id);
+ /* Update data */
+ k->pdata = data;
+ /*
+ * Return index where key is stored,
+ * substracting the first dummy index
+ */
+ return (prim_bkt->key_idx[i] - 1);
+ }
+ }
+ }
+
+ /* Check if key is already inserted in secondary location */
+ for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
+ if (sec_bkt->signatures[i].alt == sig &&
+ sec_bkt->signatures[i].current == alt_hash) {
+ k = (struct rte_hash_key *) ((char *)keys +
+ sec_bkt->key_idx[i] * h->key_entry_size);
+ if (h->rte_hash_cmp_eq(key, k->key, h->key_len) == 0) {
+ /* Enqueue index of free slot back in the ring. */
+ enqueue_slot_back(h, cached_free_slots, slot_id);
+ /* Update data */
+ k->pdata = data;
+ /*
+ * Return index where key is stored,
+ * substracting the first dummy index
+ */
+ return (sec_bkt->key_idx[i] - 1);
+ }
+ }
+ }
+
+ /* Copy key */
+ rte_memcpy(new_k->key, key, h->key_len);
+ new_k->pdata = data;
+
+ /* Insert new entry is there is room in the primary bucket */
+ for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
+ /* Check if slot is available */
+ if (likely(prim_bkt->signatures[i].sig == NULL_SIGNATURE)) {
+ prim_bkt->signatures[i].current = sig;
+ prim_bkt->signatures[i].alt = alt_hash;
+ prim_bkt->key_idx[i] = new_idx;
+ return new_idx - 1;
+ }
+ }
+
+ /* Primary bucket is full, so we need to make space for new entry */
+ ret = make_space_bucket(h, prim_bkt);
+ /*
+ * After recursive function.
+ * Insert the new entry in the position of the pushed entry
+ * if successful or return error and
+ * store the new slot back in the ring
+ */
+ if (ret >= 0) {
+ prim_bkt->signatures[ret].current = sig;
+ prim_bkt->signatures[ret].alt = alt_hash;
+ prim_bkt->key_idx[ret] = new_idx;
+ return (new_idx - 1);
+ }
+
+ /* Error in addition, store new slot back in the ring and return error */
+ enqueue_slot_back(h, cached_free_slots, (void *)((uintptr_t) new_idx));
+
+ return ret;
+}
+
+int32_t
+rte_hash_add_key_with_hash(const struct rte_hash *h,
+ const void *key, hash_sig_t sig)
+{
+ RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
+ return __rte_hash_add_key_with_hash(h, key, sig, 0);
+}
+
+int32_t
+rte_hash_add_key(const struct rte_hash *h, const void *key)
+{
+ RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
+ return __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), 0);
+}
+
+int
+rte_hash_add_key_with_hash_data(const struct rte_hash *h,
+ const void *key, hash_sig_t sig, void *data)
+{
+ int ret;
+
+ RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
+ ret = __rte_hash_add_key_with_hash(h, key, sig, data);
+ if (ret >= 0)
+ return 0;
+ else
+ return ret;
+}
+
+int
+rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data)
+{
+ int ret;
+
+ RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
+
+ ret = __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), data);
+ if (ret >= 0)
+ return 0;
+ else
+ return ret;
+}
+static inline int32_t
+__rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
+ hash_sig_t sig, void **data)
+{
+ uint32_t bucket_idx;
+ hash_sig_t alt_hash;
+ unsigned i;
+ struct rte_hash_bucket *bkt;
+ struct rte_hash_key *k, *keys = h->key_store;
+
+ bucket_idx = sig & h->bucket_bitmask;
+ bkt = &h->buckets[bucket_idx];
+
+ /* Check if key is in primary location */
+ for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
+ if (bkt->signatures[i].current == sig &&
+ bkt->signatures[i].sig != NULL_SIGNATURE) {
+ k = (struct rte_hash_key *) ((char *)keys +
+ bkt->key_idx[i] * h->key_entry_size);
+ if (h->rte_hash_cmp_eq(key, k->key, h->key_len) == 0) {
+ if (data != NULL)
+ *data = k->pdata;
+ /*
+ * Return index where key is stored,
+ * substracting the first dummy index
+ */
+ return (bkt->key_idx[i] - 1);
+ }
+ }
+ }
+
+ /* Calculate secondary hash */
+ alt_hash = rte_hash_secondary_hash(sig);
+ bucket_idx = alt_hash & h->bucket_bitmask;
+ bkt = &h->buckets[bucket_idx];
+
+ /* Check if key is in secondary location */
+ for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
+ if (bkt->signatures[i].current == alt_hash &&
+ bkt->signatures[i].alt == sig) {
+ k = (struct rte_hash_key *) ((char *)keys +
+ bkt->key_idx[i] * h->key_entry_size);
+ if (h->rte_hash_cmp_eq(key, k->key, h->key_len) == 0) {
+ if (data != NULL)
+ *data = k->pdata;
+ /*
+ * Return index where key is stored,
+ * substracting the first dummy index
+ */
+ return (bkt->key_idx[i] - 1);
+ }
+ }
+ }
+
+ return -ENOENT;
+}
+
+int32_t
+rte_hash_lookup_with_hash(const struct rte_hash *h,
+ const void *key, hash_sig_t sig)
+{
+ RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
+ return __rte_hash_lookup_with_hash(h, key, sig, NULL);
+}
+
+int32_t
+rte_hash_lookup(const struct rte_hash *h, const void *key)
+{
+ RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
+ return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), NULL);
+}
+
+int
+rte_hash_lookup_with_hash_data(const struct rte_hash *h,
+ const void *key, hash_sig_t sig, void **data)
+{
+ RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
+ return __rte_hash_lookup_with_hash(h, key, sig, data);
+}
+
+int
+rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data)
+{
+ RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
+ return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), data);
+}
+
+static inline void
+remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i)
+{
+ unsigned lcore_id, n_slots;
+ struct lcore_cache *cached_free_slots;
+
+ bkt->signatures[i].sig = NULL_SIGNATURE;
+ if (h->hw_trans_mem_support) {
+ lcore_id = rte_lcore_id();
+ cached_free_slots = &h->local_free_slots[lcore_id];
+ /* Cache full, need to free it. */
+ if (cached_free_slots->len == LCORE_CACHE_SIZE) {
+ /* Need to enqueue the free slots in global ring. */
+ n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
+ cached_free_slots->objs,
+ LCORE_CACHE_SIZE);
+ cached_free_slots->len -= n_slots;
+ }
+ /* Put index of new free slot in cache. */
+ cached_free_slots->objs[cached_free_slots->len] =
+ (void *)((uintptr_t)bkt->key_idx[i]);
+ cached_free_slots->len++;
+ } else {
+ rte_ring_sp_enqueue(h->free_slots,
+ (void *)((uintptr_t)bkt->key_idx[i]));
+ }
+}
+
+static inline int32_t
+__rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
+ hash_sig_t sig)
+{
+ uint32_t bucket_idx;
+ hash_sig_t alt_hash;
+ unsigned i;
+ struct rte_hash_bucket *bkt;
+ struct rte_hash_key *k, *keys = h->key_store;
+
+ bucket_idx = sig & h->bucket_bitmask;
+ bkt = &h->buckets[bucket_idx];
+
+ /* Check if key is in primary location */
+ for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
+ if (bkt->signatures[i].current == sig &&
+ bkt->signatures[i].sig != NULL_SIGNATURE) {
+ k = (struct rte_hash_key *) ((char *)keys +
+ bkt->key_idx[i] * h->key_entry_size);
+ if (h->rte_hash_cmp_eq(key, k->key, h->key_len) == 0) {
+ remove_entry(h, bkt, i);
+
+ /*
+ * Return index where key is stored,
+ * substracting the first dummy index
+ */
+ return (bkt->key_idx[i] - 1);
+ }
+ }
+ }
+
+ /* Calculate secondary hash */
+ alt_hash = rte_hash_secondary_hash(sig);
+ bucket_idx = alt_hash & h->bucket_bitmask;
+ bkt = &h->buckets[bucket_idx];
+
+ /* Check if key is in secondary location */
+ for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
+ if (bkt->signatures[i].current == alt_hash &&
+ bkt->signatures[i].sig != NULL_SIGNATURE) {
+ k = (struct rte_hash_key *) ((char *)keys +
+ bkt->key_idx[i] * h->key_entry_size);
+ if (h->rte_hash_cmp_eq(key, k->key, h->key_len) == 0) {
+ remove_entry(h, bkt, i);
+
+ /*
+ * Return index where key is stored,
+ * substracting the first dummy index
+ */
+ return (bkt->key_idx[i] - 1);
+ }
+ }
+ }
+
+ return -ENOENT;
+}
+
+int32_t
+rte_hash_del_key_with_hash(const struct rte_hash *h,
+ const void *key, hash_sig_t sig)
+{
+ RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
+ return __rte_hash_del_key_with_hash(h, key, sig);
+}
+
+int32_t
+rte_hash_del_key(const struct rte_hash *h, const void *key)
+{
+ RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
+ return __rte_hash_del_key_with_hash(h, key, rte_hash_hash(h, key));
+}
+
+/* Lookup bulk stage 0: Prefetch input key */
+static inline void
+lookup_stage0(unsigned *idx, uint64_t *lookup_mask,
+ const void * const *keys)
+{
+ *idx = __builtin_ctzl(*lookup_mask);
+ if (*lookup_mask == 0)
+ *idx = 0;
+
+ rte_prefetch0(keys[*idx]);
+ *lookup_mask &= ~(1llu << *idx);
+}
+
+/*
+ * Lookup bulk stage 1: Calculate primary/secondary hashes
+ * and prefetch primary/secondary buckets
+ */
+static inline void
+lookup_stage1(unsigned idx, hash_sig_t *prim_hash, hash_sig_t *sec_hash,
+ const struct rte_hash_bucket **primary_bkt,
+ const struct rte_hash_bucket **secondary_bkt,
+ hash_sig_t *hash_vals, const void * const *keys,
+ const struct rte_hash *h)
+{
+ *prim_hash = rte_hash_hash(h, keys[idx]);
+ hash_vals[idx] = *prim_hash;
+ *sec_hash = rte_hash_secondary_hash(*prim_hash);
+
+ *primary_bkt = &h->buckets[*prim_hash & h->bucket_bitmask];
+ *secondary_bkt = &h->buckets[*sec_hash & h->bucket_bitmask];
+
+ rte_prefetch0(*primary_bkt);
+ rte_prefetch0(*secondary_bkt);
+}
+
+/*
+ * Lookup bulk stage 2: Search for match hashes in primary/secondary locations
+ * and prefetch first key slot
+ */
+static inline void
+lookup_stage2(unsigned idx, hash_sig_t prim_hash, hash_sig_t sec_hash,
+ const struct rte_hash_bucket *prim_bkt,
+ const struct rte_hash_bucket *sec_bkt,
+ const struct rte_hash_key **key_slot, int32_t *positions,
+ uint64_t *extra_hits_mask, const void *keys,
+ const struct rte_hash *h)
+{
+ unsigned prim_hash_matches, sec_hash_matches, key_idx, i;
+ unsigned total_hash_matches;
+
+ prim_hash_matches = 1 << RTE_HASH_BUCKET_ENTRIES;
+ sec_hash_matches = 1 << RTE_HASH_BUCKET_ENTRIES;
+ for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
+ prim_hash_matches |= ((prim_hash == prim_bkt->signatures[i].current) << i);
+ sec_hash_matches |= ((sec_hash == sec_bkt->signatures[i].current) << i);
+ }
+
+ key_idx = prim_bkt->key_idx[__builtin_ctzl(prim_hash_matches)];
+ if (key_idx == 0)
+ key_idx = sec_bkt->key_idx[__builtin_ctzl(sec_hash_matches)];
+
+ total_hash_matches = (prim_hash_matches |
+ (sec_hash_matches << (RTE_HASH_BUCKET_ENTRIES + 1)));
+ *key_slot = (const struct rte_hash_key *) ((const char *)keys +
+ key_idx * h->key_entry_size);
+
+ rte_prefetch0(*key_slot);
+ /*
+ * Return index where key is stored,
+ * substracting the first dummy index
+ */
+ positions[idx] = (key_idx - 1);
+
+ *extra_hits_mask |= (uint64_t)(__builtin_popcount(total_hash_matches) > 3) << idx;
+
+}
+
+
+/* Lookup bulk stage 3: Check if key matches, update hit mask and return data */
+static inline void
+lookup_stage3(unsigned idx, const struct rte_hash_key *key_slot, const void * const *keys,
+ const int32_t *positions, void *data[], uint64_t *hits,
+ const struct rte_hash *h)
+{
+ unsigned hit;
+ unsigned key_idx;
+
+ hit = !h->rte_hash_cmp_eq(key_slot->key, keys[idx], h->key_len);
+ if (data != NULL)
+ data[idx] = key_slot->pdata;
+
+ key_idx = positions[idx] + 1;
+ /*
+ * If key index is 0, force hit to be 0, in case key to be looked up
+ * is all zero (as in the dummy slot), which would result in a wrong hit
+ */
+ *hits |= (uint64_t)(hit && !!key_idx) << idx;
+}
+
+static inline void
+__rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
+ uint32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
+{
+ uint64_t hits = 0;
+ uint64_t extra_hits_mask = 0;
+ uint64_t lookup_mask, miss_mask;
+ unsigned idx;
+ const void *key_store = h->key_store;
+ int ret;
+ hash_sig_t hash_vals[RTE_HASH_LOOKUP_BULK_MAX];
+
+ unsigned idx00, idx01, idx10, idx11, idx20, idx21, idx30, idx31;
+ const struct rte_hash_bucket *primary_bkt10, *primary_bkt11;
+ const struct rte_hash_bucket *secondary_bkt10, *secondary_bkt11;
+ const struct rte_hash_bucket *primary_bkt20, *primary_bkt21;
+ const struct rte_hash_bucket *secondary_bkt20, *secondary_bkt21;
+ const struct rte_hash_key *k_slot20, *k_slot21, *k_slot30, *k_slot31;
+ hash_sig_t primary_hash10, primary_hash11;
+ hash_sig_t secondary_hash10, secondary_hash11;
+ hash_sig_t primary_hash20, primary_hash21;
+ hash_sig_t secondary_hash20, secondary_hash21;
+
+ lookup_mask = (uint64_t) -1 >> (64 - num_keys);
+ miss_mask = lookup_mask;
+
+ lookup_stage0(&idx00, &lookup_mask, keys);
+ lookup_stage0(&idx01, &lookup_mask, keys);
+
+ idx10 = idx00, idx11 = idx01;
+
+ lookup_stage0(&idx00, &lookup_mask, keys);
+ lookup_stage0(&idx01, &lookup_mask, keys);
+ lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
+ &primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
+ lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
+ &primary_bkt11, &secondary_bkt11, hash_vals, keys, h);
+
+ primary_bkt20 = primary_bkt10;
+ primary_bkt21 = primary_bkt11;
+ secondary_bkt20 = secondary_bkt10;
+ secondary_bkt21 = secondary_bkt11;
+ primary_hash20 = primary_hash10;
+ primary_hash21 = primary_hash11;
+ secondary_hash20 = secondary_hash10;
+ secondary_hash21 = secondary_hash11;
+ idx20 = idx10, idx21 = idx11;
+ idx10 = idx00, idx11 = idx01;
+
+ lookup_stage0(&idx00, &lookup_mask, keys);
+ lookup_stage0(&idx01, &lookup_mask, keys);
+ lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
+ &primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
+ lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
+ &primary_bkt11, &secondary_bkt11, hash_vals, keys, h);
+ lookup_stage2(idx20, primary_hash20, secondary_hash20, primary_bkt20,
+ secondary_bkt20, &k_slot20, positions, &extra_hits_mask,
+ key_store, h);
+ lookup_stage2(idx21, primary_hash21, secondary_hash21, primary_bkt21,
+ secondary_bkt21, &k_slot21, positions, &extra_hits_mask,
+ key_store, h);
+
+ while (lookup_mask) {
+ k_slot30 = k_slot20, k_slot31 = k_slot21;
+ idx30 = idx20, idx31 = idx21;
+ primary_bkt20 = primary_bkt10;
+ primary_bkt21 = primary_bkt11;
+ secondary_bkt20 = secondary_bkt10;
+ secondary_bkt21 = secondary_bkt11;
+ primary_hash20 = primary_hash10;
+ primary_hash21 = primary_hash11;
+ secondary_hash20 = secondary_hash10;
+ secondary_hash21 = secondary_hash11;
+ idx20 = idx10, idx21 = idx11;
+ idx10 = idx00, idx11 = idx01;
+
+ lookup_stage0(&idx00, &lookup_mask, keys);
+ lookup_stage0(&idx01, &lookup_mask, keys);
+ lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
+ &primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
+ lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
+ &primary_bkt11, &secondary_bkt11, hash_vals, keys, h);
+ lookup_stage2(idx20, primary_hash20, secondary_hash20,
+ primary_bkt20, secondary_bkt20, &k_slot20, positions,
+ &extra_hits_mask, key_store, h);
+ lookup_stage2(idx21, primary_hash21, secondary_hash21,
+ primary_bkt21, secondary_bkt21, &k_slot21, positions,
+ &extra_hits_mask, key_store, h);
+ lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
+ lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);
+ }
+
+ k_slot30 = k_slot20, k_slot31 = k_slot21;
+ idx30 = idx20, idx31 = idx21;
+ primary_bkt20 = primary_bkt10;
+ primary_bkt21 = primary_bkt11;
+ secondary_bkt20 = secondary_bkt10;
+ secondary_bkt21 = secondary_bkt11;
+ primary_hash20 = primary_hash10;
+ primary_hash21 = primary_hash11;
+ secondary_hash20 = secondary_hash10;
+ secondary_hash21 = secondary_hash11;
+ idx20 = idx10, idx21 = idx11;
+ idx10 = idx00, idx11 = idx01;
+
+ lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
+ &primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
+ lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
+ &primary_bkt11, &secondary_bkt11, hash_vals, keys, h);
+ lookup_stage2(idx20, primary_hash20, secondary_hash20, primary_bkt20,
+ secondary_bkt20, &k_slot20, positions, &extra_hits_mask,
+ key_store, h);
+ lookup_stage2(idx21, primary_hash21, secondary_hash21, primary_bkt21,
+ secondary_bkt21, &k_slot21, positions, &extra_hits_mask,
+ key_store, h);
+ lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
+ lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);
+
+ k_slot30 = k_slot20, k_slot31 = k_slot21;
+ idx30 = idx20, idx31 = idx21;
+ primary_bkt20 = primary_bkt10;
+ primary_bkt21 = primary_bkt11;
+ secondary_bkt20 = secondary_bkt10;
+ secondary_bkt21 = secondary_bkt11;
+ primary_hash20 = primary_hash10;
+ primary_hash21 = primary_hash11;
+ secondary_hash20 = secondary_hash10;
+ secondary_hash21 = secondary_hash11;
+ idx20 = idx10, idx21 = idx11;
+
+ lookup_stage2(idx20, primary_hash20, secondary_hash20, primary_bkt20,
+ secondary_bkt20, &k_slot20, positions, &extra_hits_mask,
+ key_store, h);
+ lookup_stage2(idx21, primary_hash21, secondary_hash21, primary_bkt21,
+ secondary_bkt21, &k_slot21, positions, &extra_hits_mask,
+ key_store, h);
+ lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
+ lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);
+
+ k_slot30 = k_slot20, k_slot31 = k_slot21;
+ idx30 = idx20, idx31 = idx21;
+
+ lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
+ lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);
+
+ /* ignore any items we have already found */
+ extra_hits_mask &= ~hits;
+
+ if (unlikely(extra_hits_mask)) {
+ /* run a single search for each remaining item */
+ do {
+ idx = __builtin_ctzl(extra_hits_mask);
+ if (data != NULL) {
+ ret = rte_hash_lookup_with_hash_data(h,
+ keys[idx], hash_vals[idx], &data[idx]);
+ if (ret >= 0)
+ hits |= 1ULL << idx;
+ } else {
+ positions[idx] = rte_hash_lookup_with_hash(h,
+ keys[idx], hash_vals[idx]);
+ if (positions[idx] >= 0)
+ hits |= 1llu << idx;
+ }
+ extra_hits_mask &= ~(1llu << idx);
+ } while (extra_hits_mask);
+ }
+
+ miss_mask &= ~hits;
+ if (unlikely(miss_mask)) {
+ do {
+ idx = __builtin_ctzl(miss_mask);
+ positions[idx] = -ENOENT;
+ miss_mask &= ~(1llu << idx);
+ } while (miss_mask);
+ }
+
+ if (hit_mask != NULL)
+ *hit_mask = hits;
+}
+
+int
+rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
+ uint32_t num_keys, int32_t *positions)
+{
+ RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
+ (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
+ (positions == NULL)), -EINVAL);
+
+ __rte_hash_lookup_bulk(h, keys, num_keys, positions, NULL, NULL);
+ return 0;
+}
+
+int
+rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
+ uint32_t num_keys, uint64_t *hit_mask, void *data[])
+{
+ RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
+ (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
+ (hit_mask == NULL)), -EINVAL);
+
+ int32_t positions[num_keys];
+
+ __rte_hash_lookup_bulk(h, keys, num_keys, positions, hit_mask, data);
+
+ /* Return number of hits */
+ return __builtin_popcountl(*hit_mask);
+}
+
+int32_t
+rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
+{
+ uint32_t bucket_idx, idx, position;
+ struct rte_hash_key *next_key;
+
+ RETURN_IF_TRUE(((h == NULL) || (next == NULL)), -EINVAL);
+
+ const uint32_t total_entries = h->num_buckets * RTE_HASH_BUCKET_ENTRIES;
+ /* Out of bounds */
+ if (*next >= total_entries)
+ return -ENOENT;
+
+ /* Calculate bucket and index of current iterator */
+ bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
+ idx = *next % RTE_HASH_BUCKET_ENTRIES;
+
+ /* If current position is empty, go to the next one */
+ while (h->buckets[bucket_idx].signatures[idx].sig == NULL_SIGNATURE) {
+ (*next)++;
+ /* End of table */
+ if (*next == total_entries)
+ return -ENOENT;
+ bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
+ idx = *next % RTE_HASH_BUCKET_ENTRIES;
+ }
+
+ /* Get position of entry in key table */
+ position = h->buckets[bucket_idx].key_idx[idx];
+ next_key = (struct rte_hash_key *) ((char *)h->key_store +
+ position * h->key_entry_size);
+ /* Return key and data */
+ *key = next_key->key;
+ *data = next_key->pdata;
+
+ /* Increment iterator */
+ (*next)++;
+
+ return (position - 1);
+}
diff --git a/src/dpdk_lib18/librte_hash/rte_fbk_hash.h b/src/dpdk22/lib/librte_hash/rte_fbk_hash.h
index 3d229bf9..a430961d 100755..100644
--- a/src/dpdk_lib18/librte_hash/rte_fbk_hash.h
+++ b/src/dpdk22/lib/librte_hash/rte_fbk_hash.h
@@ -55,7 +55,7 @@ extern "C" {
#include <string.h>
#ifndef RTE_FBK_HASH_FUNC_DEFAULT
-#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
+#if defined(RTE_MACHINE_CPUFLAG_SSE4_2) || defined(RTE_MACHINE_CPUFLAG_CRC32)
#include <rte_hash_crc.h>
/** Default four-byte key hash function if none is specified. */
#define RTE_FBK_HASH_FUNC_DEFAULT rte_hash_crc_4byte
@@ -372,7 +372,6 @@ struct rte_fbk_hash_table *rte_fbk_hash_find_existing(const char *name);
* Possible rte_errno error values include:
* - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
* - E_RTE_SECONDARY - function was called from a secondary process instance
- * - E_RTE_NO_TAILQ - no tailq list could be got for the fbk hash table list
* - EINVAL - invalid parameter value passed to function
* - ENOSPC - the maximum number of memzones has already been allocated
* - EEXIST - a memzone with the same name already exists
diff --git a/src/dpdk_lib18/librte_hash/rte_hash.h b/src/dpdk22/lib/librte_hash/rte_hash.h
index 2ecaf1ad..85fc4162 100755..100644
--- a/src/dpdk_lib18/librte_hash/rte_hash.h
+++ b/src/dpdk22/lib/librte_hash/rte_hash.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -41,27 +41,24 @@
*/
#include <stdint.h>
-#include <sys/queue.h>
+#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
/** Maximum size of hash table that can be created. */
-#define RTE_HASH_ENTRIES_MAX (1 << 26)
+#define RTE_HASH_ENTRIES_MAX (1 << 30)
-/** Maximum bucket size that can be created. */
-#define RTE_HASH_BUCKET_ENTRIES_MAX 16
-
-/** Maximum length of key that can be used. */
-#define RTE_HASH_KEY_LENGTH_MAX 64
+/** Maximum number of characters in hash name.*/
+#define RTE_HASH_NAMESIZE 32
-/** Max number of keys that can be searched for using rte_hash_lookup_multi. */
-#define RTE_HASH_LOOKUP_BULK_MAX 16
+/** Maximum number of keys that can be searched for using rte_hash_lookup_bulk. */
+#define RTE_HASH_LOOKUP_BULK_MAX 64
#define RTE_HASH_LOOKUP_MULTI_MAX RTE_HASH_LOOKUP_BULK_MAX
-/** Max number of characters in hash name.*/
-#define RTE_HASH_NAMESIZE 32
+/** Enable Hardware transactional memory support. */
+#define RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT 0x01
/** Signature of key that is stored internally. */
typedef uint32_t hash_sig_t;
@@ -70,41 +67,25 @@ typedef uint32_t hash_sig_t;
typedef uint32_t (*rte_hash_function)(const void *key, uint32_t key_len,
uint32_t init_val);
+/** Type of function used to compare the hash key. */
+typedef int (*rte_hash_cmp_eq_t)(const void *key1, const void *key2, size_t key_len);
+
/**
- * Parameters used when creating the hash table. The total table entries and
- * bucket entries must be a power of 2.
+ * Parameters used when creating the hash table.
*/
struct rte_hash_parameters {
const char *name; /**< Name of the hash. */
uint32_t entries; /**< Total hash table entries. */
- uint32_t bucket_entries; /**< Bucket entries. */
+ uint32_t reserved; /**< Unused field. Should be set to 0 */
uint32_t key_len; /**< Length of hash key. */
- rte_hash_function hash_func; /**< Function used to calculate hash. */
+ rte_hash_function hash_func; /**< Primary Hash function used to calculate hash. */
uint32_t hash_func_init_val; /**< Init value used by hash_func. */
int socket_id; /**< NUMA Socket ID for memory. */
+ uint8_t extra_flag; /**< Indicate if additional parameters are present. */
};
-/** A hash table structure. */
-struct rte_hash {
- char name[RTE_HASH_NAMESIZE]; /**< Name of the hash. */
- uint32_t entries; /**< Total table entries. */
- uint32_t bucket_entries; /**< Bucket entries. */
- uint32_t key_len; /**< Length of hash key. */
- rte_hash_function hash_func; /**< Function used to calculate hash. */
- uint32_t hash_func_init_val; /**< Init value used by hash_func. */
- uint32_t num_buckets; /**< Number of buckets in table. */
- uint32_t bucket_bitmask; /**< Bitmask for getting bucket index
- from hash signature. */
- hash_sig_t sig_msb; /**< MSB is always set in valid signatures. */
- uint8_t *sig_tbl; /**< Flat array of hash signature buckets. */
- uint32_t sig_tbl_bucket_size; /**< Signature buckets may be padded for
- alignment reasons, and this is the
- bucket size used by sig_tbl. */
- uint8_t *key_tbl; /**< Flat array of key value buckets. */
- uint32_t key_tbl_key_size; /**< Keys may be padded for alignment
- reasons, and this is the key size
- used by key_tbl. */
-};
+/** @internal A hash table structure. */
+struct rte_hash;
/**
* Create a new hash table.
@@ -117,7 +98,6 @@ struct rte_hash {
* Possible rte_errno errors include:
* - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
* - E_RTE_SECONDARY - function was called from a secondary process instance
- * - E_RTE_NO_TAILQ - no tailq list could be got for the hash table list
* - ENOENT - missing entry
* - EINVAL - invalid parameter passed to function
* - ENOSPC - the maximum number of memzones has already been allocated
@@ -127,6 +107,18 @@ struct rte_hash {
struct rte_hash *
rte_hash_create(const struct rte_hash_parameters *params);
+/**
+ * Set a new hash compare function other than the default one.
+ *
+ * @note Function pointer does not work with multi-process, so do not use it
+ * in multi-process mode.
+ *
+ * @param h
+ * Hash table to reset
+ * @param func
+ * New compare function
+ */
+void rte_hash_set_cmp_func(struct rte_hash *h, rte_hash_cmp_eq_t func);
/**
* Find an existing hash table object and return a pointer to it.
@@ -150,6 +142,56 @@ void
rte_hash_free(struct rte_hash *h);
/**
+ * Reset all hash structure, by zeroing all entries
+ * @param h
+ * Hash table to reset
+ */
+void
+rte_hash_reset(struct rte_hash *h);
+
+/**
+ * Add a key-value pair to an existing hash table.
+ * This operation is not multi-thread safe
+ * and should only be called from one thread.
+ *
+ * @param h
+ * Hash table to add the key to.
+ * @param key
+ * Key to add to the hash table.
+ * @param data
+ * Data to add to the hash table.
+ * @return
+ * - 0 if added successfully
+ * - -EINVAL if the parameters are invalid.
+ * - -ENOSPC if there is no space in the hash for this key.
+ */
+int
+rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data);
+
+/**
+ * Add a key-value pair with a pre-computed hash value
+ * to an existing hash table.
+ * This operation is not multi-thread safe
+ * and should only be called from one thread.
+ *
+ * @param h
+ * Hash table to add the key to.
+ * @param key
+ * Key to add to the hash table.
+ * @param sig
+ * Precomputed hash value for 'key'
+ * @param data
+ * Data to add to the hash table.
+ * @return
+ * - 0 if added successfully
+ * - -EINVAL if the parameters are invalid.
+ * - -ENOSPC if there is no space in the hash for this key.
+ */
+int32_t
+rte_hash_add_key_with_hash_data(const struct rte_hash *h, const void *key,
+ hash_sig_t sig, void *data);
+
+/**
* Add a key to an existing hash table. This operation is not multi-thread safe
* and should only be called from one thread.
*
@@ -167,7 +209,8 @@ int32_t
rte_hash_add_key(const struct rte_hash *h, const void *key);
/**
- * Add a key to an existing hash table. This operation is not multi-thread safe
+ * Add a key to an existing hash table.
+ * This operation is not multi-thread safe
* and should only be called from one thread.
*
* @param h
@@ -175,7 +218,7 @@ rte_hash_add_key(const struct rte_hash *h, const void *key);
* @param key
* Key to add to the hash table.
* @param sig
- * Hash value to add to the hash table.
+ * Precomputed hash value for 'key'.
* @return
* - -EINVAL if the parameters are invalid.
* - -ENOSPC if there is no space in the hash for this key.
@@ -183,12 +226,12 @@ rte_hash_add_key(const struct rte_hash *h, const void *key);
* array of user data. This value is unique for this key.
*/
int32_t
-rte_hash_add_key_with_hash(const struct rte_hash *h,
- const void *key, hash_sig_t sig);
+rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key, hash_sig_t sig);
/**
- * Remove a key from an existing hash table. This operation is not multi-thread
- * safe and should only be called from one thread.
+ * Remove a key from an existing hash table.
+ * This operation is not multi-thread safe
+ * and should only be called from one thread.
*
* @param h
* Hash table to remove the key from.
@@ -205,15 +248,16 @@ int32_t
rte_hash_del_key(const struct rte_hash *h, const void *key);
/**
- * Remove a key from an existing hash table. This operation is not multi-thread
- * safe and should only be called from one thread.
+ * Remove a key from an existing hash table.
+ * This operation is not multi-thread safe
+ * and should only be called from one thread.
*
* @param h
* Hash table to remove the key from.
* @param key
* Key to remove from the hash table.
* @param sig
- * Hash value to remove from the hash table.
+ * Precomputed hash value for 'key'.
* @return
* - -EINVAL if the parameters are invalid.
* - -ENOENT if the key is not found.
@@ -222,12 +266,52 @@ rte_hash_del_key(const struct rte_hash *h, const void *key);
* value that was returned when the key was added.
*/
int32_t
-rte_hash_del_key_with_hash(const struct rte_hash *h,
- const void *key, hash_sig_t sig);
+rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key, hash_sig_t sig);
+
+
+/**
+ * Find a key-value pair in the hash table.
+ * This operation is multi-thread safe.
+ *
+ * @param h
+ * Hash table to look in.
+ * @param key
+ * Key to find.
+ * @param data
+ * Output with pointer to data returned from the hash table.
+ * @return
+ * 0 if successful lookup
+ * - EINVAL if the parameters are invalid.
+ * - ENOENT if the key is not found.
+ */
+int
+rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data);
+/**
+ * Find a key-value pair with a pre-computed hash value
+ * to an existing hash table.
+ * This operation is multi-thread safe.
+ *
+ * @param h
+ * Hash table to look in.
+ * @param key
+ * Key to find.
+ * @param sig
+ * Precomputed hash value for 'key'
+ * @param data
+ * Output with pointer to data returned from the hash table.
+ * @return
+ * 0 if successful lookup
+ * - EINVAL if the parameters are invalid.
+ * - ENOENT if the key is not found.
+ */
+int
+rte_hash_lookup_with_hash_data(const struct rte_hash *h, const void *key,
+ hash_sig_t sig, void **data);
/**
- * Find a key in the hash table. This operation is multi-thread safe.
+ * Find a key in the hash table.
+ * This operation is multi-thread safe.
*
* @param h
* Hash table to look in.
@@ -244,14 +328,15 @@ int32_t
rte_hash_lookup(const struct rte_hash *h, const void *key);
/**
- * Find a key in the hash table. This operation is multi-thread safe.
+ * Find a key in the hash table.
+ * This operation is multi-thread safe.
*
* @param h
* Hash table to look in.
* @param key
* Key to find.
* @param sig
- * Hash value to find.
+ * Hash value to remove from the hash table.
* @return
* - -EINVAL if the parameters are invalid.
* - -ENOENT if the key is not found.
@@ -263,9 +348,9 @@ int32_t
rte_hash_lookup_with_hash(const struct rte_hash *h,
const void *key, hash_sig_t sig);
-
/**
- * Calc a hash value by key. This operation is not multi-process safe.
+ * Calc a hash value by key.
+ * This operation is not multi-thread safe.
*
* @param h
* Hash table to look in.
@@ -274,16 +359,35 @@ rte_hash_lookup_with_hash(const struct rte_hash *h,
* @return
* - hash value
*/
-static inline hash_sig_t
-rte_hash_hash(const struct rte_hash *h, const void *key)
-{
- /* calc hash result by key */
- return h->hash_func(key, h->key_len, h->hash_func_init_val);
-}
+hash_sig_t
+rte_hash_hash(const struct rte_hash *h, const void *key);
#define rte_hash_lookup_multi rte_hash_lookup_bulk
+#define rte_hash_lookup_multi_data rte_hash_lookup_bulk_data
/**
- * Find multiple keys in the hash table. This operation is multi-thread safe.
+ * Find multiple keys in the hash table.
+ * This operation is multi-thread safe.
+ *
+ * @param h
+ * Hash table to look in.
+ * @param keys
+ * A pointer to a list of keys to look for.
+ * @param num_keys
+ * How many keys are in the keys list (less than RTE_HASH_LOOKUP_BULK_MAX).
+ * @param hit_mask
+ * Output containing a bitmask with all successful lookups.
+ * @param data
+ * Output containing array of data returned from all the successful lookups.
+ * @return
+ * -EINVAL if there's an error, otherwise number of successful lookups.
+ */
+int
+rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
+ uint32_t num_keys, uint64_t *hit_mask, void *data[]);
+
+/**
+ * Find multiple keys in the hash table.
+ * This operation is multi-thread safe.
*
* @param h
* Hash table to look in.
@@ -303,6 +407,28 @@ rte_hash_hash(const struct rte_hash *h, const void *key)
int
rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
uint32_t num_keys, int32_t *positions);
+
+/**
+ * Iterate through the hash table, returning key-value pairs.
+ *
+ * @param h
+ * Hash table to iterate
+ * @param key
+ * Output containing the key where current iterator
+ * was pointing at
+ * @param data
+ * Output containing the data associated with key.
+ * Returns NULL if data was not stored.
+ * @param next
+ * Pointer to iterator. Should be 0 to start iterating the hash table.
+ * Iterator is incremented after each call of this function.
+ * @return
+ * Position where key was stored, if successful.
+ * - -EINVAL if the parameters are invalid.
+ * - -ENOENT if end of the hash table.
+ */
+int32_t
+rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next);
#ifdef __cplusplus
}
#endif
diff --git a/src/dpdk22/lib/librte_hash/rte_hash_crc.h b/src/dpdk22/lib/librte_hash/rte_hash_crc.h
new file mode 100644
index 00000000..78a34b76
--- /dev/null
+++ b/src/dpdk22/lib/librte_hash/rte_hash_crc.h
@@ -0,0 +1,568 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_HASH_CRC_H_
+#define _RTE_HASH_CRC_H_
+
+/**
+ * @file
+ *
+ * RTE CRC Hash
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <rte_cpuflags.h>
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+
+/* Lookup tables for software implementation of CRC32C */
+static const uint32_t crc32c_tables[8][256] = {{
+ 0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4, 0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB,
+ 0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B, 0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24,
+ 0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B, 0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384,
+ 0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54, 0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B,
+ 0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A, 0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35,
+ 0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5, 0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA,
+ 0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45, 0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A,
+ 0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A, 0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595,
+ 0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48, 0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957,
+ 0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687, 0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198,
+ 0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927, 0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38,
+ 0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8, 0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7,
+ 0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096, 0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789,
+ 0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859, 0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46,
+ 0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9, 0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6,
+ 0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36, 0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829,
+ 0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C, 0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93,
+ 0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043, 0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C,
+ 0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3, 0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC,
+ 0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C, 0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033,
+ 0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652, 0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D,
+ 0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D, 0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982,
+ 0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D, 0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622,
+ 0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2, 0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED,
+ 0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530, 0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F,
+ 0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF, 0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0,
+ 0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F, 0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540,
+ 0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90, 0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F,
+ 0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE, 0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1,
+ 0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321, 0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E,
+ 0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81, 0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E,
+ 0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E, 0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351
+},
+{
+ 0x00000000, 0x13A29877, 0x274530EE, 0x34E7A899, 0x4E8A61DC, 0x5D28F9AB, 0x69CF5132, 0x7A6DC945,
+ 0x9D14C3B8, 0x8EB65BCF, 0xBA51F356, 0xA9F36B21, 0xD39EA264, 0xC03C3A13, 0xF4DB928A, 0xE7790AFD,
+ 0x3FC5F181, 0x2C6769F6, 0x1880C16F, 0x0B225918, 0x714F905D, 0x62ED082A, 0x560AA0B3, 0x45A838C4,
+ 0xA2D13239, 0xB173AA4E, 0x859402D7, 0x96369AA0, 0xEC5B53E5, 0xFFF9CB92, 0xCB1E630B, 0xD8BCFB7C,
+ 0x7F8BE302, 0x6C297B75, 0x58CED3EC, 0x4B6C4B9B, 0x310182DE, 0x22A31AA9, 0x1644B230, 0x05E62A47,
+ 0xE29F20BA, 0xF13DB8CD, 0xC5DA1054, 0xD6788823, 0xAC154166, 0xBFB7D911, 0x8B507188, 0x98F2E9FF,
+ 0x404E1283, 0x53EC8AF4, 0x670B226D, 0x74A9BA1A, 0x0EC4735F, 0x1D66EB28, 0x298143B1, 0x3A23DBC6,
+ 0xDD5AD13B, 0xCEF8494C, 0xFA1FE1D5, 0xE9BD79A2, 0x93D0B0E7, 0x80722890, 0xB4958009, 0xA737187E,
+ 0xFF17C604, 0xECB55E73, 0xD852F6EA, 0xCBF06E9D, 0xB19DA7D8, 0xA23F3FAF, 0x96D89736, 0x857A0F41,
+ 0x620305BC, 0x71A19DCB, 0x45463552, 0x56E4AD25, 0x2C896460, 0x3F2BFC17, 0x0BCC548E, 0x186ECCF9,
+ 0xC0D23785, 0xD370AFF2, 0xE797076B, 0xF4359F1C, 0x8E585659, 0x9DFACE2E, 0xA91D66B7, 0xBABFFEC0,
+ 0x5DC6F43D, 0x4E646C4A, 0x7A83C4D3, 0x69215CA4, 0x134C95E1, 0x00EE0D96, 0x3409A50F, 0x27AB3D78,
+ 0x809C2506, 0x933EBD71, 0xA7D915E8, 0xB47B8D9F, 0xCE1644DA, 0xDDB4DCAD, 0xE9537434, 0xFAF1EC43,
+ 0x1D88E6BE, 0x0E2A7EC9, 0x3ACDD650, 0x296F4E27, 0x53028762, 0x40A01F15, 0x7447B78C, 0x67E52FFB,
+ 0xBF59D487, 0xACFB4CF0, 0x981CE469, 0x8BBE7C1E, 0xF1D3B55B, 0xE2712D2C, 0xD69685B5, 0xC5341DC2,
+ 0x224D173F, 0x31EF8F48, 0x050827D1, 0x16AABFA6, 0x6CC776E3, 0x7F65EE94, 0x4B82460D, 0x5820DE7A,
+ 0xFBC3FAF9, 0xE861628E, 0xDC86CA17, 0xCF245260, 0xB5499B25, 0xA6EB0352, 0x920CABCB, 0x81AE33BC,
+ 0x66D73941, 0x7575A136, 0x419209AF, 0x523091D8, 0x285D589D, 0x3BFFC0EA, 0x0F186873, 0x1CBAF004,
+ 0xC4060B78, 0xD7A4930F, 0xE3433B96, 0xF0E1A3E1, 0x8A8C6AA4, 0x992EF2D3, 0xADC95A4A, 0xBE6BC23D,
+ 0x5912C8C0, 0x4AB050B7, 0x7E57F82E, 0x6DF56059, 0x1798A91C, 0x043A316B, 0x30DD99F2, 0x237F0185,
+ 0x844819FB, 0x97EA818C, 0xA30D2915, 0xB0AFB162, 0xCAC27827, 0xD960E050, 0xED8748C9, 0xFE25D0BE,
+ 0x195CDA43, 0x0AFE4234, 0x3E19EAAD, 0x2DBB72DA, 0x57D6BB9F, 0x447423E8, 0x70938B71, 0x63311306,
+ 0xBB8DE87A, 0xA82F700D, 0x9CC8D894, 0x8F6A40E3, 0xF50789A6, 0xE6A511D1, 0xD242B948, 0xC1E0213F,
+ 0x26992BC2, 0x353BB3B5, 0x01DC1B2C, 0x127E835B, 0x68134A1E, 0x7BB1D269, 0x4F567AF0, 0x5CF4E287,
+ 0x04D43CFD, 0x1776A48A, 0x23910C13, 0x30339464, 0x4A5E5D21, 0x59FCC556, 0x6D1B6DCF, 0x7EB9F5B8,
+ 0x99C0FF45, 0x8A626732, 0xBE85CFAB, 0xAD2757DC, 0xD74A9E99, 0xC4E806EE, 0xF00FAE77, 0xE3AD3600,
+ 0x3B11CD7C, 0x28B3550B, 0x1C54FD92, 0x0FF665E5, 0x759BACA0, 0x663934D7, 0x52DE9C4E, 0x417C0439,
+ 0xA6050EC4, 0xB5A796B3, 0x81403E2A, 0x92E2A65D, 0xE88F6F18, 0xFB2DF76F, 0xCFCA5FF6, 0xDC68C781,
+ 0x7B5FDFFF, 0x68FD4788, 0x5C1AEF11, 0x4FB87766, 0x35D5BE23, 0x26772654, 0x12908ECD, 0x013216BA,
+ 0xE64B1C47, 0xF5E98430, 0xC10E2CA9, 0xD2ACB4DE, 0xA8C17D9B, 0xBB63E5EC, 0x8F844D75, 0x9C26D502,
+ 0x449A2E7E, 0x5738B609, 0x63DF1E90, 0x707D86E7, 0x0A104FA2, 0x19B2D7D5, 0x2D557F4C, 0x3EF7E73B,
+ 0xD98EEDC6, 0xCA2C75B1, 0xFECBDD28, 0xED69455F, 0x97048C1A, 0x84A6146D, 0xB041BCF4, 0xA3E32483
+},
+{
+ 0x00000000, 0xA541927E, 0x4F6F520D, 0xEA2EC073, 0x9EDEA41A, 0x3B9F3664, 0xD1B1F617, 0x74F06469,
+ 0x38513EC5, 0x9D10ACBB, 0x773E6CC8, 0xD27FFEB6, 0xA68F9ADF, 0x03CE08A1, 0xE9E0C8D2, 0x4CA15AAC,
+ 0x70A27D8A, 0xD5E3EFF4, 0x3FCD2F87, 0x9A8CBDF9, 0xEE7CD990, 0x4B3D4BEE, 0xA1138B9D, 0x045219E3,
+ 0x48F3434F, 0xEDB2D131, 0x079C1142, 0xA2DD833C, 0xD62DE755, 0x736C752B, 0x9942B558, 0x3C032726,
+ 0xE144FB14, 0x4405696A, 0xAE2BA919, 0x0B6A3B67, 0x7F9A5F0E, 0xDADBCD70, 0x30F50D03, 0x95B49F7D,
+ 0xD915C5D1, 0x7C5457AF, 0x967A97DC, 0x333B05A2, 0x47CB61CB, 0xE28AF3B5, 0x08A433C6, 0xADE5A1B8,
+ 0x91E6869E, 0x34A714E0, 0xDE89D493, 0x7BC846ED, 0x0F382284, 0xAA79B0FA, 0x40577089, 0xE516E2F7,
+ 0xA9B7B85B, 0x0CF62A25, 0xE6D8EA56, 0x43997828, 0x37691C41, 0x92288E3F, 0x78064E4C, 0xDD47DC32,
+ 0xC76580D9, 0x622412A7, 0x880AD2D4, 0x2D4B40AA, 0x59BB24C3, 0xFCFAB6BD, 0x16D476CE, 0xB395E4B0,
+ 0xFF34BE1C, 0x5A752C62, 0xB05BEC11, 0x151A7E6F, 0x61EA1A06, 0xC4AB8878, 0x2E85480B, 0x8BC4DA75,
+ 0xB7C7FD53, 0x12866F2D, 0xF8A8AF5E, 0x5DE93D20, 0x29195949, 0x8C58CB37, 0x66760B44, 0xC337993A,
+ 0x8F96C396, 0x2AD751E8, 0xC0F9919B, 0x65B803E5, 0x1148678C, 0xB409F5F2, 0x5E273581, 0xFB66A7FF,
+ 0x26217BCD, 0x8360E9B3, 0x694E29C0, 0xCC0FBBBE, 0xB8FFDFD7, 0x1DBE4DA9, 0xF7908DDA, 0x52D11FA4,
+ 0x1E704508, 0xBB31D776, 0x511F1705, 0xF45E857B, 0x80AEE112, 0x25EF736C, 0xCFC1B31F, 0x6A802161,
+ 0x56830647, 0xF3C29439, 0x19EC544A, 0xBCADC634, 0xC85DA25D, 0x6D1C3023, 0x8732F050, 0x2273622E,
+ 0x6ED23882, 0xCB93AAFC, 0x21BD6A8F, 0x84FCF8F1, 0xF00C9C98, 0x554D0EE6, 0xBF63CE95, 0x1A225CEB,
+ 0x8B277743, 0x2E66E53D, 0xC448254E, 0x6109B730, 0x15F9D359, 0xB0B84127, 0x5A968154, 0xFFD7132A,
+ 0xB3764986, 0x1637DBF8, 0xFC191B8B, 0x595889F5, 0x2DA8ED9C, 0x88E97FE2, 0x62C7BF91, 0xC7862DEF,
+ 0xFB850AC9, 0x5EC498B7, 0xB4EA58C4, 0x11ABCABA, 0x655BAED3, 0xC01A3CAD, 0x2A34FCDE, 0x8F756EA0,
+ 0xC3D4340C, 0x6695A672, 0x8CBB6601, 0x29FAF47F, 0x5D0A9016, 0xF84B0268, 0x1265C21B, 0xB7245065,
+ 0x6A638C57, 0xCF221E29, 0x250CDE5A, 0x804D4C24, 0xF4BD284D, 0x51FCBA33, 0xBBD27A40, 0x1E93E83E,
+ 0x5232B292, 0xF77320EC, 0x1D5DE09F, 0xB81C72E1, 0xCCEC1688, 0x69AD84F6, 0x83834485, 0x26C2D6FB,
+ 0x1AC1F1DD, 0xBF8063A3, 0x55AEA3D0, 0xF0EF31AE, 0x841F55C7, 0x215EC7B9, 0xCB7007CA, 0x6E3195B4,
+ 0x2290CF18, 0x87D15D66, 0x6DFF9D15, 0xC8BE0F6B, 0xBC4E6B02, 0x190FF97C, 0xF321390F, 0x5660AB71,
+ 0x4C42F79A, 0xE90365E4, 0x032DA597, 0xA66C37E9, 0xD29C5380, 0x77DDC1FE, 0x9DF3018D, 0x38B293F3,
+ 0x7413C95F, 0xD1525B21, 0x3B7C9B52, 0x9E3D092C, 0xEACD6D45, 0x4F8CFF3B, 0xA5A23F48, 0x00E3AD36,
+ 0x3CE08A10, 0x99A1186E, 0x738FD81D, 0xD6CE4A63, 0xA23E2E0A, 0x077FBC74, 0xED517C07, 0x4810EE79,
+ 0x04B1B4D5, 0xA1F026AB, 0x4BDEE6D8, 0xEE9F74A6, 0x9A6F10CF, 0x3F2E82B1, 0xD50042C2, 0x7041D0BC,
+ 0xAD060C8E, 0x08479EF0, 0xE2695E83, 0x4728CCFD, 0x33D8A894, 0x96993AEA, 0x7CB7FA99, 0xD9F668E7,
+ 0x9557324B, 0x3016A035, 0xDA386046, 0x7F79F238, 0x0B899651, 0xAEC8042F, 0x44E6C45C, 0xE1A75622,
+ 0xDDA47104, 0x78E5E37A, 0x92CB2309, 0x378AB177, 0x437AD51E, 0xE63B4760, 0x0C158713, 0xA954156D,
+ 0xE5F54FC1, 0x40B4DDBF, 0xAA9A1DCC, 0x0FDB8FB2, 0x7B2BEBDB, 0xDE6A79A5, 0x3444B9D6, 0x91052BA8
+},
+{
+ 0x00000000, 0xDD45AAB8, 0xBF672381, 0x62228939, 0x7B2231F3, 0xA6679B4B, 0xC4451272, 0x1900B8CA,
+ 0xF64463E6, 0x2B01C95E, 0x49234067, 0x9466EADF, 0x8D665215, 0x5023F8AD, 0x32017194, 0xEF44DB2C,
+ 0xE964B13D, 0x34211B85, 0x560392BC, 0x8B463804, 0x924680CE, 0x4F032A76, 0x2D21A34F, 0xF06409F7,
+ 0x1F20D2DB, 0xC2657863, 0xA047F15A, 0x7D025BE2, 0x6402E328, 0xB9474990, 0xDB65C0A9, 0x06206A11,
+ 0xD725148B, 0x0A60BE33, 0x6842370A, 0xB5079DB2, 0xAC072578, 0x71428FC0, 0x136006F9, 0xCE25AC41,
+ 0x2161776D, 0xFC24DDD5, 0x9E0654EC, 0x4343FE54, 0x5A43469E, 0x8706EC26, 0xE524651F, 0x3861CFA7,
+ 0x3E41A5B6, 0xE3040F0E, 0x81268637, 0x5C632C8F, 0x45639445, 0x98263EFD, 0xFA04B7C4, 0x27411D7C,
+ 0xC805C650, 0x15406CE8, 0x7762E5D1, 0xAA274F69, 0xB327F7A3, 0x6E625D1B, 0x0C40D422, 0xD1057E9A,
+ 0xABA65FE7, 0x76E3F55F, 0x14C17C66, 0xC984D6DE, 0xD0846E14, 0x0DC1C4AC, 0x6FE34D95, 0xB2A6E72D,
+ 0x5DE23C01, 0x80A796B9, 0xE2851F80, 0x3FC0B538, 0x26C00DF2, 0xFB85A74A, 0x99A72E73, 0x44E284CB,
+ 0x42C2EEDA, 0x9F874462, 0xFDA5CD5B, 0x20E067E3, 0x39E0DF29, 0xE4A57591, 0x8687FCA8, 0x5BC25610,
+ 0xB4868D3C, 0x69C32784, 0x0BE1AEBD, 0xD6A40405, 0xCFA4BCCF, 0x12E11677, 0x70C39F4E, 0xAD8635F6,
+ 0x7C834B6C, 0xA1C6E1D4, 0xC3E468ED, 0x1EA1C255, 0x07A17A9F, 0xDAE4D027, 0xB8C6591E, 0x6583F3A6,
+ 0x8AC7288A, 0x57828232, 0x35A00B0B, 0xE8E5A1B3, 0xF1E51979, 0x2CA0B3C1, 0x4E823AF8, 0x93C79040,
+ 0x95E7FA51, 0x48A250E9, 0x2A80D9D0, 0xF7C57368, 0xEEC5CBA2, 0x3380611A, 0x51A2E823, 0x8CE7429B,
+ 0x63A399B7, 0xBEE6330F, 0xDCC4BA36, 0x0181108E, 0x1881A844, 0xC5C402FC, 0xA7E68BC5, 0x7AA3217D,
+ 0x52A0C93F, 0x8FE56387, 0xEDC7EABE, 0x30824006, 0x2982F8CC, 0xF4C75274, 0x96E5DB4D, 0x4BA071F5,
+ 0xA4E4AAD9, 0x79A10061, 0x1B838958, 0xC6C623E0, 0xDFC69B2A, 0x02833192, 0x60A1B8AB, 0xBDE41213,
+ 0xBBC47802, 0x6681D2BA, 0x04A35B83, 0xD9E6F13B, 0xC0E649F1, 0x1DA3E349, 0x7F816A70, 0xA2C4C0C8,
+ 0x4D801BE4, 0x90C5B15C, 0xF2E73865, 0x2FA292DD, 0x36A22A17, 0xEBE780AF, 0x89C50996, 0x5480A32E,
+ 0x8585DDB4, 0x58C0770C, 0x3AE2FE35, 0xE7A7548D, 0xFEA7EC47, 0x23E246FF, 0x41C0CFC6, 0x9C85657E,
+ 0x73C1BE52, 0xAE8414EA, 0xCCA69DD3, 0x11E3376B, 0x08E38FA1, 0xD5A62519, 0xB784AC20, 0x6AC10698,
+ 0x6CE16C89, 0xB1A4C631, 0xD3864F08, 0x0EC3E5B0, 0x17C35D7A, 0xCA86F7C2, 0xA8A47EFB, 0x75E1D443,
+ 0x9AA50F6F, 0x47E0A5D7, 0x25C22CEE, 0xF8878656, 0xE1873E9C, 0x3CC29424, 0x5EE01D1D, 0x83A5B7A5,
+ 0xF90696D8, 0x24433C60, 0x4661B559, 0x9B241FE1, 0x8224A72B, 0x5F610D93, 0x3D4384AA, 0xE0062E12,
+ 0x0F42F53E, 0xD2075F86, 0xB025D6BF, 0x6D607C07, 0x7460C4CD, 0xA9256E75, 0xCB07E74C, 0x16424DF4,
+ 0x106227E5, 0xCD278D5D, 0xAF050464, 0x7240AEDC, 0x6B401616, 0xB605BCAE, 0xD4273597, 0x09629F2F,
+ 0xE6264403, 0x3B63EEBB, 0x59416782, 0x8404CD3A, 0x9D0475F0, 0x4041DF48, 0x22635671, 0xFF26FCC9,
+ 0x2E238253, 0xF36628EB, 0x9144A1D2, 0x4C010B6A, 0x5501B3A0, 0x88441918, 0xEA669021, 0x37233A99,
+ 0xD867E1B5, 0x05224B0D, 0x6700C234, 0xBA45688C, 0xA345D046, 0x7E007AFE, 0x1C22F3C7, 0xC167597F,
+ 0xC747336E, 0x1A0299D6, 0x782010EF, 0xA565BA57, 0xBC65029D, 0x6120A825, 0x0302211C, 0xDE478BA4,
+ 0x31035088, 0xEC46FA30, 0x8E647309, 0x5321D9B1, 0x4A21617B, 0x9764CBC3, 0xF54642FA, 0x2803E842
+},
+{
+ 0x00000000, 0x38116FAC, 0x7022DF58, 0x4833B0F4, 0xE045BEB0, 0xD854D11C, 0x906761E8, 0xA8760E44,
+ 0xC5670B91, 0xFD76643D, 0xB545D4C9, 0x8D54BB65, 0x2522B521, 0x1D33DA8D, 0x55006A79, 0x6D1105D5,
+ 0x8F2261D3, 0xB7330E7F, 0xFF00BE8B, 0xC711D127, 0x6F67DF63, 0x5776B0CF, 0x1F45003B, 0x27546F97,
+ 0x4A456A42, 0x725405EE, 0x3A67B51A, 0x0276DAB6, 0xAA00D4F2, 0x9211BB5E, 0xDA220BAA, 0xE2336406,
+ 0x1BA8B557, 0x23B9DAFB, 0x6B8A6A0F, 0x539B05A3, 0xFBED0BE7, 0xC3FC644B, 0x8BCFD4BF, 0xB3DEBB13,
+ 0xDECFBEC6, 0xE6DED16A, 0xAEED619E, 0x96FC0E32, 0x3E8A0076, 0x069B6FDA, 0x4EA8DF2E, 0x76B9B082,
+ 0x948AD484, 0xAC9BBB28, 0xE4A80BDC, 0xDCB96470, 0x74CF6A34, 0x4CDE0598, 0x04EDB56C, 0x3CFCDAC0,
+ 0x51EDDF15, 0x69FCB0B9, 0x21CF004D, 0x19DE6FE1, 0xB1A861A5, 0x89B90E09, 0xC18ABEFD, 0xF99BD151,
+ 0x37516AAE, 0x0F400502, 0x4773B5F6, 0x7F62DA5A, 0xD714D41E, 0xEF05BBB2, 0xA7360B46, 0x9F2764EA,
+ 0xF236613F, 0xCA270E93, 0x8214BE67, 0xBA05D1CB, 0x1273DF8F, 0x2A62B023, 0x625100D7, 0x5A406F7B,
+ 0xB8730B7D, 0x806264D1, 0xC851D425, 0xF040BB89, 0x5836B5CD, 0x6027DA61, 0x28146A95, 0x10050539,
+ 0x7D1400EC, 0x45056F40, 0x0D36DFB4, 0x3527B018, 0x9D51BE5C, 0xA540D1F0, 0xED736104, 0xD5620EA8,
+ 0x2CF9DFF9, 0x14E8B055, 0x5CDB00A1, 0x64CA6F0D, 0xCCBC6149, 0xF4AD0EE5, 0xBC9EBE11, 0x848FD1BD,
+ 0xE99ED468, 0xD18FBBC4, 0x99BC0B30, 0xA1AD649C, 0x09DB6AD8, 0x31CA0574, 0x79F9B580, 0x41E8DA2C,
+ 0xA3DBBE2A, 0x9BCAD186, 0xD3F96172, 0xEBE80EDE, 0x439E009A, 0x7B8F6F36, 0x33BCDFC2, 0x0BADB06E,
+ 0x66BCB5BB, 0x5EADDA17, 0x169E6AE3, 0x2E8F054F, 0x86F90B0B, 0xBEE864A7, 0xF6DBD453, 0xCECABBFF,
+ 0x6EA2D55C, 0x56B3BAF0, 0x1E800A04, 0x269165A8, 0x8EE76BEC, 0xB6F60440, 0xFEC5B4B4, 0xC6D4DB18,
+ 0xABC5DECD, 0x93D4B161, 0xDBE70195, 0xE3F66E39, 0x4B80607D, 0x73910FD1, 0x3BA2BF25, 0x03B3D089,
+ 0xE180B48F, 0xD991DB23, 0x91A26BD7, 0xA9B3047B, 0x01C50A3F, 0x39D46593, 0x71E7D567, 0x49F6BACB,
+ 0x24E7BF1E, 0x1CF6D0B2, 0x54C56046, 0x6CD40FEA, 0xC4A201AE, 0xFCB36E02, 0xB480DEF6, 0x8C91B15A,
+ 0x750A600B, 0x4D1B0FA7, 0x0528BF53, 0x3D39D0FF, 0x954FDEBB, 0xAD5EB117, 0xE56D01E3, 0xDD7C6E4F,
+ 0xB06D6B9A, 0x887C0436, 0xC04FB4C2, 0xF85EDB6E, 0x5028D52A, 0x6839BA86, 0x200A0A72, 0x181B65DE,
+ 0xFA2801D8, 0xC2396E74, 0x8A0ADE80, 0xB21BB12C, 0x1A6DBF68, 0x227CD0C4, 0x6A4F6030, 0x525E0F9C,
+ 0x3F4F0A49, 0x075E65E5, 0x4F6DD511, 0x777CBABD, 0xDF0AB4F9, 0xE71BDB55, 0xAF286BA1, 0x9739040D,
+ 0x59F3BFF2, 0x61E2D05E, 0x29D160AA, 0x11C00F06, 0xB9B60142, 0x81A76EEE, 0xC994DE1A, 0xF185B1B6,
+ 0x9C94B463, 0xA485DBCF, 0xECB66B3B, 0xD4A70497, 0x7CD10AD3, 0x44C0657F, 0x0CF3D58B, 0x34E2BA27,
+ 0xD6D1DE21, 0xEEC0B18D, 0xA6F30179, 0x9EE26ED5, 0x36946091, 0x0E850F3D, 0x46B6BFC9, 0x7EA7D065,
+ 0x13B6D5B0, 0x2BA7BA1C, 0x63940AE8, 0x5B856544, 0xF3F36B00, 0xCBE204AC, 0x83D1B458, 0xBBC0DBF4,
+ 0x425B0AA5, 0x7A4A6509, 0x3279D5FD, 0x0A68BA51, 0xA21EB415, 0x9A0FDBB9, 0xD23C6B4D, 0xEA2D04E1,
+ 0x873C0134, 0xBF2D6E98, 0xF71EDE6C, 0xCF0FB1C0, 0x6779BF84, 0x5F68D028, 0x175B60DC, 0x2F4A0F70,
+ 0xCD796B76, 0xF56804DA, 0xBD5BB42E, 0x854ADB82, 0x2D3CD5C6, 0x152DBA6A, 0x5D1E0A9E, 0x650F6532,
+ 0x081E60E7, 0x300F0F4B, 0x783CBFBF, 0x402DD013, 0xE85BDE57, 0xD04AB1FB, 0x9879010F, 0xA0686EA3
+},
+{
+ 0x00000000, 0xEF306B19, 0xDB8CA0C3, 0x34BCCBDA, 0xB2F53777, 0x5DC55C6E, 0x697997B4, 0x8649FCAD,
+ 0x6006181F, 0x8F367306, 0xBB8AB8DC, 0x54BAD3C5, 0xD2F32F68, 0x3DC34471, 0x097F8FAB, 0xE64FE4B2,
+ 0xC00C303E, 0x2F3C5B27, 0x1B8090FD, 0xF4B0FBE4, 0x72F90749, 0x9DC96C50, 0xA975A78A, 0x4645CC93,
+ 0xA00A2821, 0x4F3A4338, 0x7B8688E2, 0x94B6E3FB, 0x12FF1F56, 0xFDCF744F, 0xC973BF95, 0x2643D48C,
+ 0x85F4168D, 0x6AC47D94, 0x5E78B64E, 0xB148DD57, 0x370121FA, 0xD8314AE3, 0xEC8D8139, 0x03BDEA20,
+ 0xE5F20E92, 0x0AC2658B, 0x3E7EAE51, 0xD14EC548, 0x570739E5, 0xB83752FC, 0x8C8B9926, 0x63BBF23F,
+ 0x45F826B3, 0xAAC84DAA, 0x9E748670, 0x7144ED69, 0xF70D11C4, 0x183D7ADD, 0x2C81B107, 0xC3B1DA1E,
+ 0x25FE3EAC, 0xCACE55B5, 0xFE729E6F, 0x1142F576, 0x970B09DB, 0x783B62C2, 0x4C87A918, 0xA3B7C201,
+ 0x0E045BEB, 0xE13430F2, 0xD588FB28, 0x3AB89031, 0xBCF16C9C, 0x53C10785, 0x677DCC5F, 0x884DA746,
+ 0x6E0243F4, 0x813228ED, 0xB58EE337, 0x5ABE882E, 0xDCF77483, 0x33C71F9A, 0x077BD440, 0xE84BBF59,
+ 0xCE086BD5, 0x213800CC, 0x1584CB16, 0xFAB4A00F, 0x7CFD5CA2, 0x93CD37BB, 0xA771FC61, 0x48419778,
+ 0xAE0E73CA, 0x413E18D3, 0x7582D309, 0x9AB2B810, 0x1CFB44BD, 0xF3CB2FA4, 0xC777E47E, 0x28478F67,
+ 0x8BF04D66, 0x64C0267F, 0x507CEDA5, 0xBF4C86BC, 0x39057A11, 0xD6351108, 0xE289DAD2, 0x0DB9B1CB,
+ 0xEBF65579, 0x04C63E60, 0x307AF5BA, 0xDF4A9EA3, 0x5903620E, 0xB6330917, 0x828FC2CD, 0x6DBFA9D4,
+ 0x4BFC7D58, 0xA4CC1641, 0x9070DD9B, 0x7F40B682, 0xF9094A2F, 0x16392136, 0x2285EAEC, 0xCDB581F5,
+ 0x2BFA6547, 0xC4CA0E5E, 0xF076C584, 0x1F46AE9D, 0x990F5230, 0x763F3929, 0x4283F2F3, 0xADB399EA,
+ 0x1C08B7D6, 0xF338DCCF, 0xC7841715, 0x28B47C0C, 0xAEFD80A1, 0x41CDEBB8, 0x75712062, 0x9A414B7B,
+ 0x7C0EAFC9, 0x933EC4D0, 0xA7820F0A, 0x48B26413, 0xCEFB98BE, 0x21CBF3A7, 0x1577387D, 0xFA475364,
+ 0xDC0487E8, 0x3334ECF1, 0x0788272B, 0xE8B84C32, 0x6EF1B09F, 0x81C1DB86, 0xB57D105C, 0x5A4D7B45,
+ 0xBC029FF7, 0x5332F4EE, 0x678E3F34, 0x88BE542D, 0x0EF7A880, 0xE1C7C399, 0xD57B0843, 0x3A4B635A,
+ 0x99FCA15B, 0x76CCCA42, 0x42700198, 0xAD406A81, 0x2B09962C, 0xC439FD35, 0xF08536EF, 0x1FB55DF6,
+ 0xF9FAB944, 0x16CAD25D, 0x22761987, 0xCD46729E, 0x4B0F8E33, 0xA43FE52A, 0x90832EF0, 0x7FB345E9,
+ 0x59F09165, 0xB6C0FA7C, 0x827C31A6, 0x6D4C5ABF, 0xEB05A612, 0x0435CD0B, 0x308906D1, 0xDFB96DC8,
+ 0x39F6897A, 0xD6C6E263, 0xE27A29B9, 0x0D4A42A0, 0x8B03BE0D, 0x6433D514, 0x508F1ECE, 0xBFBF75D7,
+ 0x120CEC3D, 0xFD3C8724, 0xC9804CFE, 0x26B027E7, 0xA0F9DB4A, 0x4FC9B053, 0x7B757B89, 0x94451090,
+ 0x720AF422, 0x9D3A9F3B, 0xA98654E1, 0x46B63FF8, 0xC0FFC355, 0x2FCFA84C, 0x1B736396, 0xF443088F,
+ 0xD200DC03, 0x3D30B71A, 0x098C7CC0, 0xE6BC17D9, 0x60F5EB74, 0x8FC5806D, 0xBB794BB7, 0x544920AE,
+ 0xB206C41C, 0x5D36AF05, 0x698A64DF, 0x86BA0FC6, 0x00F3F36B, 0xEFC39872, 0xDB7F53A8, 0x344F38B1,
+ 0x97F8FAB0, 0x78C891A9, 0x4C745A73, 0xA344316A, 0x250DCDC7, 0xCA3DA6DE, 0xFE816D04, 0x11B1061D,
+ 0xF7FEE2AF, 0x18CE89B6, 0x2C72426C, 0xC3422975, 0x450BD5D8, 0xAA3BBEC1, 0x9E87751B, 0x71B71E02,
+ 0x57F4CA8E, 0xB8C4A197, 0x8C786A4D, 0x63480154, 0xE501FDF9, 0x0A3196E0, 0x3E8D5D3A, 0xD1BD3623,
+ 0x37F2D291, 0xD8C2B988, 0xEC7E7252, 0x034E194B, 0x8507E5E6, 0x6A378EFF, 0x5E8B4525, 0xB1BB2E3C
+},
+{
+ 0x00000000, 0x68032CC8, 0xD0065990, 0xB8057558, 0xA5E0C5D1, 0xCDE3E919, 0x75E69C41, 0x1DE5B089,
+ 0x4E2DFD53, 0x262ED19B, 0x9E2BA4C3, 0xF628880B, 0xEBCD3882, 0x83CE144A, 0x3BCB6112, 0x53C84DDA,
+ 0x9C5BFAA6, 0xF458D66E, 0x4C5DA336, 0x245E8FFE, 0x39BB3F77, 0x51B813BF, 0xE9BD66E7, 0x81BE4A2F,
+ 0xD27607F5, 0xBA752B3D, 0x02705E65, 0x6A7372AD, 0x7796C224, 0x1F95EEEC, 0xA7909BB4, 0xCF93B77C,
+ 0x3D5B83BD, 0x5558AF75, 0xED5DDA2D, 0x855EF6E5, 0x98BB466C, 0xF0B86AA4, 0x48BD1FFC, 0x20BE3334,
+ 0x73767EEE, 0x1B755226, 0xA370277E, 0xCB730BB6, 0xD696BB3F, 0xBE9597F7, 0x0690E2AF, 0x6E93CE67,
+ 0xA100791B, 0xC90355D3, 0x7106208B, 0x19050C43, 0x04E0BCCA, 0x6CE39002, 0xD4E6E55A, 0xBCE5C992,
+ 0xEF2D8448, 0x872EA880, 0x3F2BDDD8, 0x5728F110, 0x4ACD4199, 0x22CE6D51, 0x9ACB1809, 0xF2C834C1,
+ 0x7AB7077A, 0x12B42BB2, 0xAAB15EEA, 0xC2B27222, 0xDF57C2AB, 0xB754EE63, 0x0F519B3B, 0x6752B7F3,
+ 0x349AFA29, 0x5C99D6E1, 0xE49CA3B9, 0x8C9F8F71, 0x917A3FF8, 0xF9791330, 0x417C6668, 0x297F4AA0,
+ 0xE6ECFDDC, 0x8EEFD114, 0x36EAA44C, 0x5EE98884, 0x430C380D, 0x2B0F14C5, 0x930A619D, 0xFB094D55,
+ 0xA8C1008F, 0xC0C22C47, 0x78C7591F, 0x10C475D7, 0x0D21C55E, 0x6522E996, 0xDD279CCE, 0xB524B006,
+ 0x47EC84C7, 0x2FEFA80F, 0x97EADD57, 0xFFE9F19F, 0xE20C4116, 0x8A0F6DDE, 0x320A1886, 0x5A09344E,
+ 0x09C17994, 0x61C2555C, 0xD9C72004, 0xB1C40CCC, 0xAC21BC45, 0xC422908D, 0x7C27E5D5, 0x1424C91D,
+ 0xDBB77E61, 0xB3B452A9, 0x0BB127F1, 0x63B20B39, 0x7E57BBB0, 0x16549778, 0xAE51E220, 0xC652CEE8,
+ 0x959A8332, 0xFD99AFFA, 0x459CDAA2, 0x2D9FF66A, 0x307A46E3, 0x58796A2B, 0xE07C1F73, 0x887F33BB,
+ 0xF56E0EF4, 0x9D6D223C, 0x25685764, 0x4D6B7BAC, 0x508ECB25, 0x388DE7ED, 0x808892B5, 0xE88BBE7D,
+ 0xBB43F3A7, 0xD340DF6F, 0x6B45AA37, 0x034686FF, 0x1EA33676, 0x76A01ABE, 0xCEA56FE6, 0xA6A6432E,
+ 0x6935F452, 0x0136D89A, 0xB933ADC2, 0xD130810A, 0xCCD53183, 0xA4D61D4B, 0x1CD36813, 0x74D044DB,
+ 0x27180901, 0x4F1B25C9, 0xF71E5091, 0x9F1D7C59, 0x82F8CCD0, 0xEAFBE018, 0x52FE9540, 0x3AFDB988,
+ 0xC8358D49, 0xA036A181, 0x1833D4D9, 0x7030F811, 0x6DD54898, 0x05D66450, 0xBDD31108, 0xD5D03DC0,
+ 0x8618701A, 0xEE1B5CD2, 0x561E298A, 0x3E1D0542, 0x23F8B5CB, 0x4BFB9903, 0xF3FEEC5B, 0x9BFDC093,
+ 0x546E77EF, 0x3C6D5B27, 0x84682E7F, 0xEC6B02B7, 0xF18EB23E, 0x998D9EF6, 0x2188EBAE, 0x498BC766,
+ 0x1A438ABC, 0x7240A674, 0xCA45D32C, 0xA246FFE4, 0xBFA34F6D, 0xD7A063A5, 0x6FA516FD, 0x07A63A35,
+ 0x8FD9098E, 0xE7DA2546, 0x5FDF501E, 0x37DC7CD6, 0x2A39CC5F, 0x423AE097, 0xFA3F95CF, 0x923CB907,
+ 0xC1F4F4DD, 0xA9F7D815, 0x11F2AD4D, 0x79F18185, 0x6414310C, 0x0C171DC4, 0xB412689C, 0xDC114454,
+ 0x1382F328, 0x7B81DFE0, 0xC384AAB8, 0xAB878670, 0xB66236F9, 0xDE611A31, 0x66646F69, 0x0E6743A1,
+ 0x5DAF0E7B, 0x35AC22B3, 0x8DA957EB, 0xE5AA7B23, 0xF84FCBAA, 0x904CE762, 0x2849923A, 0x404ABEF2,
+ 0xB2828A33, 0xDA81A6FB, 0x6284D3A3, 0x0A87FF6B, 0x17624FE2, 0x7F61632A, 0xC7641672, 0xAF673ABA,
+ 0xFCAF7760, 0x94AC5BA8, 0x2CA92EF0, 0x44AA0238, 0x594FB2B1, 0x314C9E79, 0x8949EB21, 0xE14AC7E9,
+ 0x2ED97095, 0x46DA5C5D, 0xFEDF2905, 0x96DC05CD, 0x8B39B544, 0xE33A998C, 0x5B3FECD4, 0x333CC01C,
+ 0x60F48DC6, 0x08F7A10E, 0xB0F2D456, 0xD8F1F89E, 0xC5144817, 0xAD1764DF, 0x15121187, 0x7D113D4F
+},
+{
+ 0x00000000, 0x493C7D27, 0x9278FA4E, 0xDB448769, 0x211D826D, 0x6821FF4A, 0xB3657823, 0xFA590504,
+ 0x423B04DA, 0x0B0779FD, 0xD043FE94, 0x997F83B3, 0x632686B7, 0x2A1AFB90, 0xF15E7CF9, 0xB86201DE,
+ 0x847609B4, 0xCD4A7493, 0x160EF3FA, 0x5F328EDD, 0xA56B8BD9, 0xEC57F6FE, 0x37137197, 0x7E2F0CB0,
+ 0xC64D0D6E, 0x8F717049, 0x5435F720, 0x1D098A07, 0xE7508F03, 0xAE6CF224, 0x7528754D, 0x3C14086A,
+ 0x0D006599, 0x443C18BE, 0x9F789FD7, 0xD644E2F0, 0x2C1DE7F4, 0x65219AD3, 0xBE651DBA, 0xF759609D,
+ 0x4F3B6143, 0x06071C64, 0xDD439B0D, 0x947FE62A, 0x6E26E32E, 0x271A9E09, 0xFC5E1960, 0xB5626447,
+ 0x89766C2D, 0xC04A110A, 0x1B0E9663, 0x5232EB44, 0xA86BEE40, 0xE1579367, 0x3A13140E, 0x732F6929,
+ 0xCB4D68F7, 0x827115D0, 0x593592B9, 0x1009EF9E, 0xEA50EA9A, 0xA36C97BD, 0x782810D4, 0x31146DF3,
+ 0x1A00CB32, 0x533CB615, 0x8878317C, 0xC1444C5B, 0x3B1D495F, 0x72213478, 0xA965B311, 0xE059CE36,
+ 0x583BCFE8, 0x1107B2CF, 0xCA4335A6, 0x837F4881, 0x79264D85, 0x301A30A2, 0xEB5EB7CB, 0xA262CAEC,
+ 0x9E76C286, 0xD74ABFA1, 0x0C0E38C8, 0x453245EF, 0xBF6B40EB, 0xF6573DCC, 0x2D13BAA5, 0x642FC782,
+ 0xDC4DC65C, 0x9571BB7B, 0x4E353C12, 0x07094135, 0xFD504431, 0xB46C3916, 0x6F28BE7F, 0x2614C358,
+ 0x1700AEAB, 0x5E3CD38C, 0x857854E5, 0xCC4429C2, 0x361D2CC6, 0x7F2151E1, 0xA465D688, 0xED59ABAF,
+ 0x553BAA71, 0x1C07D756, 0xC743503F, 0x8E7F2D18, 0x7426281C, 0x3D1A553B, 0xE65ED252, 0xAF62AF75,
+ 0x9376A71F, 0xDA4ADA38, 0x010E5D51, 0x48322076, 0xB26B2572, 0xFB575855, 0x2013DF3C, 0x692FA21B,
+ 0xD14DA3C5, 0x9871DEE2, 0x4335598B, 0x0A0924AC, 0xF05021A8, 0xB96C5C8F, 0x6228DBE6, 0x2B14A6C1,
+ 0x34019664, 0x7D3DEB43, 0xA6796C2A, 0xEF45110D, 0x151C1409, 0x5C20692E, 0x8764EE47, 0xCE589360,
+ 0x763A92BE, 0x3F06EF99, 0xE44268F0, 0xAD7E15D7, 0x572710D3, 0x1E1B6DF4, 0xC55FEA9D, 0x8C6397BA,
+ 0xB0779FD0, 0xF94BE2F7, 0x220F659E, 0x6B3318B9, 0x916A1DBD, 0xD856609A, 0x0312E7F3, 0x4A2E9AD4,
+ 0xF24C9B0A, 0xBB70E62D, 0x60346144, 0x29081C63, 0xD3511967, 0x9A6D6440, 0x4129E329, 0x08159E0E,
+ 0x3901F3FD, 0x703D8EDA, 0xAB7909B3, 0xE2457494, 0x181C7190, 0x51200CB7, 0x8A648BDE, 0xC358F6F9,
+ 0x7B3AF727, 0x32068A00, 0xE9420D69, 0xA07E704E, 0x5A27754A, 0x131B086D, 0xC85F8F04, 0x8163F223,
+ 0xBD77FA49, 0xF44B876E, 0x2F0F0007, 0x66337D20, 0x9C6A7824, 0xD5560503, 0x0E12826A, 0x472EFF4D,
+ 0xFF4CFE93, 0xB67083B4, 0x6D3404DD, 0x240879FA, 0xDE517CFE, 0x976D01D9, 0x4C2986B0, 0x0515FB97,
+ 0x2E015D56, 0x673D2071, 0xBC79A718, 0xF545DA3F, 0x0F1CDF3B, 0x4620A21C, 0x9D642575, 0xD4585852,
+ 0x6C3A598C, 0x250624AB, 0xFE42A3C2, 0xB77EDEE5, 0x4D27DBE1, 0x041BA6C6, 0xDF5F21AF, 0x96635C88,
+ 0xAA7754E2, 0xE34B29C5, 0x380FAEAC, 0x7133D38B, 0x8B6AD68F, 0xC256ABA8, 0x19122CC1, 0x502E51E6,
+ 0xE84C5038, 0xA1702D1F, 0x7A34AA76, 0x3308D751, 0xC951D255, 0x806DAF72, 0x5B29281B, 0x1215553C,
+ 0x230138CF, 0x6A3D45E8, 0xB179C281, 0xF845BFA6, 0x021CBAA2, 0x4B20C785, 0x906440EC, 0xD9583DCB,
+ 0x613A3C15, 0x28064132, 0xF342C65B, 0xBA7EBB7C, 0x4027BE78, 0x091BC35F, 0xD25F4436, 0x9B633911,
+ 0xA777317B, 0xEE4B4C5C, 0x350FCB35, 0x7C33B612, 0x866AB316, 0xCF56CE31, 0x14124958, 0x5D2E347F,
+ 0xE54C35A1, 0xAC704886, 0x7734CFEF, 0x3E08B2C8, 0xC451B7CC, 0x8D6DCAEB, 0x56294D82, 0x1F1530A5
+}};
+
+#define CRC32_UPD(crc, n) \
+ (crc32c_tables[(n)][(crc) & 0xFF] ^ \
+ crc32c_tables[(n)-1][((crc) >> 8) & 0xFF])
+
+static inline uint32_t
+crc32c_1word(uint32_t data, uint32_t init_val)
+{
+ uint32_t crc, term1, term2;
+ crc = init_val;
+ crc ^= data;
+
+ term1 = CRC32_UPD(crc, 3);
+ term2 = crc >> 16;
+ crc = term1 ^ CRC32_UPD(term2, 1);
+
+ return crc;
+}
+
+static inline uint32_t
+crc32c_2words(uint64_t data, uint32_t init_val)
+{
+ union {
+ uint64_t u64;
+ uint32_t u32[2];
+ } d;
+ d.u64 = data;
+
+ uint32_t crc, term1, term2;
+
+ crc = init_val;
+ crc ^= d.u32[0];
+
+ term1 = CRC32_UPD(crc, 7);
+ term2 = crc >> 16;
+ crc = term1 ^ CRC32_UPD(term2, 5);
+ term1 = CRC32_UPD(d.u32[1], 3);
+ term2 = d.u32[1] >> 16;
+ crc ^= term1 ^ CRC32_UPD(term2, 1);
+
+ return crc;
+}
+
+#if defined(RTE_ARCH_I686) || defined(RTE_ARCH_X86_64)
+static inline uint32_t
+crc32c_sse42_u32(uint32_t data, uint32_t init_val)
+{
+ __asm__ volatile(
+ "crc32l %[data], %[init_val];"
+ : [init_val] "+r" (init_val)
+ : [data] "rm" (data));
+ return init_val;
+}
+
+static inline uint32_t
+crc32c_sse42_u64_mimic(uint64_t data, uint64_t init_val)
+{
+ union {
+ uint32_t u32[2];
+ uint64_t u64;
+ } d;
+
+ d.u64 = data;
+ init_val = crc32c_sse42_u32(d.u32[0], init_val);
+ init_val = crc32c_sse42_u32(d.u32[1], init_val);
+ return init_val;
+}
+#endif
+
+#ifdef RTE_ARCH_X86_64
+static inline uint32_t
+crc32c_sse42_u64(uint64_t data, uint64_t init_val)
+{
+ __asm__ volatile(
+ "crc32q %[data], %[init_val];"
+ : [init_val] "+r" (init_val)
+ : [data] "rm" (data));
+ return init_val;
+}
+#endif
+
+#define CRC32_SW (1U << 0)
+#define CRC32_SSE42 (1U << 1)
+#define CRC32_x64 (1U << 2)
+#define CRC32_SSE42_x64 (CRC32_x64|CRC32_SSE42)
+#define CRC32_ARM64 (1U << 3)
+
+static uint8_t crc32_alg = CRC32_SW;
+
+#if defined(RTE_ARCH_ARM64)
+#include "rte_crc_arm64.h"
+#else
+
+/**
+ * Allow or disallow use of SSE4.2 instrinsics for CRC32 hash
+ * calculation.
+ *
+ * @param alg
+ * An OR of following flags:
+ * - (CRC32_SW) Don't use SSE4.2 intrinsics
+ * - (CRC32_SSE42) Use SSE4.2 intrinsics if available
+ * - (CRC32_SSE42_x64) Use 64-bit SSE4.2 intrinsic if available (default)
+ *
+ */
+static inline void
+rte_hash_crc_set_alg(uint8_t alg)
+{
+ switch (alg) {
+#if defined(RTE_ARCH_I686) || defined(RTE_ARCH_X86_64)
+ case CRC32_SSE42_x64:
+ if (! rte_cpu_get_flag_enabled(RTE_CPUFLAG_EM64T))
+ alg = CRC32_SSE42;
+ case CRC32_SSE42:
+ if (! rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_2))
+ alg = CRC32_SW;
+#endif
+ case CRC32_SW:
+ crc32_alg = alg;
+ default:
+ break;
+ }
+}
+
+/* Setting the best available algorithm */
+static inline void __attribute__((constructor))
+rte_hash_crc_init_alg(void)
+{
+ rte_hash_crc_set_alg(CRC32_SSE42_x64);
+}
+
+/**
+ * Use single crc32 instruction to perform a hash on a 4 byte value.
+ * Fall back to software crc32 implementation in case SSE4.2 is
+ * not supported
+ *
+ * @param data
+ * Data to perform hash on.
+ * @param init_val
+ * Value to initialise hash generator.
+ * @return
+ * 32bit calculated hash value.
+ */
+static inline uint32_t
+rte_hash_crc_4byte(uint32_t data, uint32_t init_val)
+{
+#if defined RTE_ARCH_I686 || defined RTE_ARCH_X86_64
+ if (likely(crc32_alg & CRC32_SSE42))
+ return crc32c_sse42_u32(data, init_val);
+#endif
+
+ return crc32c_1word(data, init_val);
+}
+
+/**
+ * Use single crc32 instruction to perform a hash on a 8 byte value.
+ * Fall back to software crc32 implementation in case SSE4.2 is
+ * not supported
+ *
+ * @param data
+ * Data to perform hash on.
+ * @param init_val
+ * Value to initialise hash generator.
+ * @return
+ * 32bit calculated hash value.
+ */
+static inline uint32_t
+rte_hash_crc_8byte(uint64_t data, uint32_t init_val)
+{
+#ifdef RTE_ARCH_X86_64
+ if (likely(crc32_alg == CRC32_SSE42_x64))
+ return crc32c_sse42_u64(data, init_val);
+#endif
+
+#if defined RTE_ARCH_I686 || defined RTE_ARCH_X86_64
+ if (likely(crc32_alg & CRC32_SSE42))
+ return crc32c_sse42_u64_mimic(data, init_val);
+#endif
+
+ return crc32c_2words(data, init_val);
+}
+
+#endif
+
+/**
+ * Calculate CRC32 hash on user-supplied byte array.
+ *
+ * @param data
+ * Data to perform hash on.
+ * @param data_len
+ * How many bytes to use to calculate hash value.
+ * @param init_val
+ * Value to initialise hash generator.
+ * @return
+ * 32bit calculated hash value.
+ */
+static inline uint32_t
+rte_hash_crc(const void *data, uint32_t data_len, uint32_t init_val)
+{
+ unsigned i;
+ uint64_t temp = 0;
+ uintptr_t pd = (uintptr_t) data;
+
+ for (i = 0; i < data_len / 8; i++) {
+ init_val = rte_hash_crc_8byte(*(const uint64_t *)pd, init_val);
+ pd += 8;
+ }
+
+ switch (7 - (data_len & 0x07)) {
+ case 0:
+ temp |= (uint64_t) *((const uint8_t *)pd + 6) << 48;
+ /* Fallthrough */
+ case 1:
+ temp |= (uint64_t) *((const uint8_t *)pd + 5) << 40;
+ /* Fallthrough */
+ case 2:
+ temp |= (uint64_t) *((const uint8_t *)pd + 4) << 32;
+ temp |= *(const uint32_t *)pd;
+ init_val = rte_hash_crc_8byte(temp, init_val);
+ break;
+ case 3:
+ init_val = rte_hash_crc_4byte(*(const uint32_t *)pd, init_val);
+ break;
+ case 4:
+ temp |= *((const uint8_t *)pd + 2) << 16;
+ /* Fallthrough */
+ case 5:
+ temp |= *((const uint8_t *)pd + 1) << 8;
+ /* Fallthrough */
+ case 6:
+ temp |= *(const uint8_t *)pd;
+ init_val = rte_hash_crc_4byte(temp, init_val);
+ /* Fallthrough */
+ default:
+ break;
+ }
+
+ return init_val;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_HASH_CRC_H_ */
diff --git a/src/dpdk22/lib/librte_hash/rte_jhash.h b/src/dpdk22/lib/librte_hash/rte_jhash.h
new file mode 100644
index 00000000..457f225c
--- /dev/null
+++ b/src/dpdk22/lib/librte_hash/rte_jhash.h
@@ -0,0 +1,410 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_JHASH_H
+#define _RTE_JHASH_H
+
+/**
+ * @file
+ *
+ * jhash functions.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <string.h>
+#include <limits.h>
+
+#include <rte_log.h>
+#include <rte_byteorder.h>
+
+/* jhash.h: Jenkins hash support.
+ *
+ * Copyright (C) 2006 Bob Jenkins (bob_jenkins@burtleburtle.net)
+ *
+ * http://burtleburtle.net/bob/hash/
+ *
+ * These are the credits from Bob's sources:
+ *
+ * lookup3.c, by Bob Jenkins, May 2006, Public Domain.
+ *
+ * These are functions for producing 32-bit hashes for hash table lookup.
+ * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
+ * are externally useful functions. Routines to test the hash are included
+ * if SELF_TEST is defined. You can use this free for any purpose. It's in
+ * the public domain. It has no warranty.
+ *
+ * $FreeBSD$
+ */
+
+#define rot(x, k) (((x) << (k)) | ((x) >> (32-(k))))
+
+/** @internal Internal function. NOTE: Arguments are modified. */
+#define __rte_jhash_mix(a, b, c) do { \
+ a -= c; a ^= rot(c, 4); c += b; \
+ b -= a; b ^= rot(a, 6); a += c; \
+ c -= b; c ^= rot(b, 8); b += a; \
+ a -= c; a ^= rot(c, 16); c += b; \
+ b -= a; b ^= rot(a, 19); a += c; \
+ c -= b; c ^= rot(b, 4); b += a; \
+} while (0)
+
+#define __rte_jhash_final(a, b, c) do { \
+ c ^= b; c -= rot(b, 14); \
+ a ^= c; a -= rot(c, 11); \
+ b ^= a; b -= rot(a, 25); \
+ c ^= b; c -= rot(b, 16); \
+ a ^= c; a -= rot(c, 4); \
+ b ^= a; b -= rot(a, 14); \
+ c ^= b; c -= rot(b, 24); \
+} while (0)
+
+/** The golden ratio: an arbitrary value. */
+#define RTE_JHASH_GOLDEN_RATIO 0xdeadbeef
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+#define BIT_SHIFT(x, y, k) (((x) >> (k)) | ((uint64_t)(y) << (32-(k))))
+#else
+#define BIT_SHIFT(x, y, k) (((uint64_t)(x) << (k)) | ((y) >> (32-(k))))
+#endif
+
+#define LOWER8b_MASK rte_le_to_cpu_32(0xff)
+#define LOWER16b_MASK rte_le_to_cpu_32(0xffff)
+#define LOWER24b_MASK rte_le_to_cpu_32(0xffffff)
+
+static inline void
+__rte_jhash_2hashes(const void *key, uint32_t length, uint32_t *pc,
+ uint32_t *pb, unsigned check_align)
+{
+ uint32_t a, b, c;
+
+ /* Set up the internal state */
+ a = b = c = RTE_JHASH_GOLDEN_RATIO + ((uint32_t)length) + *pc;
+ c += *pb;
+
+ /*
+ * Check key alignment. For x86 architecture, first case is always optimal
+ * If check_align is not set, first case will be used
+ */
+#if defined(RTE_ARCH_X86_64) || defined(RTE_ARCH_I686) || defined(RTE_ARCH_X86_X32)
+ const uint32_t *k = key;
+ const uint32_t s = 0;
+#else
+ const uint32_t *k = (uint32_t *)((uintptr_t)key & (uintptr_t)~3);
+ const uint32_t s = ((uintptr_t)key & 3) * CHAR_BIT;
+#endif
+ if (!check_align || s == 0) {
+ while (length > 12) {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+
+ __rte_jhash_mix(a, b, c);
+
+ k += 3;
+ length -= 12;
+ }
+
+ switch (length) {
+ case 12:
+ c += k[2]; b += k[1]; a += k[0]; break;
+ case 11:
+ c += k[2] & LOWER24b_MASK; b += k[1]; a += k[0]; break;
+ case 10:
+ c += k[2] & LOWER16b_MASK; b += k[1]; a += k[0]; break;
+ case 9:
+ c += k[2] & LOWER8b_MASK; b += k[1]; a += k[0]; break;
+ case 8:
+ b += k[1]; a += k[0]; break;
+ case 7:
+ b += k[1] & LOWER24b_MASK; a += k[0]; break;
+ case 6:
+ b += k[1] & LOWER16b_MASK; a += k[0]; break;
+ case 5:
+ b += k[1] & LOWER8b_MASK; a += k[0]; break;
+ case 4:
+ a += k[0]; break;
+ case 3:
+ a += k[0] & LOWER24b_MASK; break;
+ case 2:
+ a += k[0] & LOWER16b_MASK; break;
+ case 1:
+ a += k[0] & LOWER8b_MASK; break;
+ /* zero length strings require no mixing */
+ case 0:
+ *pc = c;
+ *pb = b;
+ return;
+ };
+ } else {
+ /* all but the last block: affect some 32 bits of (a, b, c) */
+ while (length > 12) {
+ a += BIT_SHIFT(k[0], k[1], s);
+ b += BIT_SHIFT(k[1], k[2], s);
+ c += BIT_SHIFT(k[2], k[3], s);
+ __rte_jhash_mix(a, b, c);
+
+ k += 3;
+ length -= 12;
+ }
+
+ /* last block: affect all 32 bits of (c) */
+ switch (length) {
+ case 12:
+ a += BIT_SHIFT(k[0], k[1], s);
+ b += BIT_SHIFT(k[1], k[2], s);
+ c += BIT_SHIFT(k[2], k[3], s);
+ break;
+ case 11:
+ a += BIT_SHIFT(k[0], k[1], s);
+ b += BIT_SHIFT(k[1], k[2], s);
+ c += BIT_SHIFT(k[2], k[3], s) & LOWER24b_MASK;
+ break;
+ case 10:
+ a += BIT_SHIFT(k[0], k[1], s);
+ b += BIT_SHIFT(k[1], k[2], s);
+ c += BIT_SHIFT(k[2], k[3], s) & LOWER16b_MASK;
+ break;
+ case 9:
+ a += BIT_SHIFT(k[0], k[1], s);
+ b += BIT_SHIFT(k[1], k[2], s);
+ c += BIT_SHIFT(k[2], k[3], s) & LOWER8b_MASK;
+ break;
+ case 8:
+ a += BIT_SHIFT(k[0], k[1], s);
+ b += BIT_SHIFT(k[1], k[2], s);
+ break;
+ case 7:
+ a += BIT_SHIFT(k[0], k[1], s);
+ b += BIT_SHIFT(k[1], k[2], s) & LOWER24b_MASK;
+ break;
+ case 6:
+ a += BIT_SHIFT(k[0], k[1], s);
+ b += BIT_SHIFT(k[1], k[2], s) & LOWER16b_MASK;
+ break;
+ case 5:
+ a += BIT_SHIFT(k[0], k[1], s);
+ b += BIT_SHIFT(k[1], k[2], s) & LOWER8b_MASK;
+ break;
+ case 4:
+ a += BIT_SHIFT(k[0], k[1], s);
+ break;
+ case 3:
+ a += BIT_SHIFT(k[0], k[1], s) & LOWER24b_MASK;
+ break;
+ case 2:
+ a += BIT_SHIFT(k[0], k[1], s) & LOWER16b_MASK;
+ break;
+ case 1:
+ a += BIT_SHIFT(k[0], k[1], s) & LOWER8b_MASK;
+ break;
+ /* zero length strings require no mixing */
+ case 0:
+ *pc = c;
+ *pb = b;
+ return;
+ }
+ }
+
+ __rte_jhash_final(a, b, c);
+
+ *pc = c;
+ *pb = b;
+}
+
+/**
+ * Same as rte_jhash, but takes two seeds and return two uint32_ts.
+ * pc and pb must be non-null, and *pc and *pb must both be initialized
+ * with seeds. If you pass in (*pb)=0, the output (*pc) will be
+ * the same as the return value from rte_jhash.
+ *
+ * @param key
+ * Key to calculate hash of.
+ * @param length
+ * Length of key in bytes.
+ * @param pc
+ * IN: seed OUT: primary hash value.
+ * @param pb
+ * IN: second seed OUT: secondary hash value.
+ */
+static inline void
+rte_jhash_2hashes(const void *key, uint32_t length, uint32_t *pc, uint32_t *pb)
+{
+ __rte_jhash_2hashes(key, length, pc, pb, 1);
+}
+
+/**
+ * Same as rte_jhash_32b, but takes two seeds and return two uint32_ts.
+ * pc and pb must be non-null, and *pc and *pb must both be initialized
+ * with seeds. If you pass in (*pb)=0, the output (*pc) will be
+ * the same as the return value from rte_jhash_32b.
+ *
+ * @param k
+ * Key to calculate hash of.
+ * @param length
+ * Length of key in units of 4 bytes.
+ * @param pc
+ * IN: seed OUT: primary hash value.
+ * @param pb
+ * IN: second seed OUT: secondary hash value.
+ */
+static inline void
+rte_jhash_32b_2hashes(const uint32_t *k, uint32_t length, uint32_t *pc, uint32_t *pb)
+{
+ __rte_jhash_2hashes((const void *) k, (length << 2), pc, pb, 0);
+}
+
+/**
+ * The most generic version, hashes an arbitrary sequence
+ * of bytes. No alignment or length assumptions are made about
+ * the input key.
+ *
+ * @param key
+ * Key to calculate hash of.
+ * @param length
+ * Length of key in bytes.
+ * @param initval
+ * Initialising value of hash.
+ * @return
+ * Calculated hash value.
+ */
+static inline uint32_t
+rte_jhash(const void *key, uint32_t length, uint32_t initval)
+{
+ uint32_t initval2 = 0;
+
+ rte_jhash_2hashes(key, length, &initval, &initval2);
+
+ return initval;
+}
+
+/**
+ * A special optimized version that handles 1 or more of uint32_ts.
+ * The length parameter here is the number of uint32_ts in the key.
+ *
+ * @param k
+ * Key to calculate hash of.
+ * @param length
+ * Length of key in units of 4 bytes.
+ * @param initval
+ * Initialising value of hash.
+ * @return
+ * Calculated hash value.
+ */
+static inline uint32_t
+rte_jhash_32b(const uint32_t *k, uint32_t length, uint32_t initval)
+{
+ uint32_t initval2 = 0;
+
+ rte_jhash_32b_2hashes(k, length, &initval, &initval2);
+
+ return initval;
+}
+
+static inline uint32_t
+__rte_jhash_3words(uint32_t a, uint32_t b, uint32_t c, uint32_t initval)
+{
+ a += RTE_JHASH_GOLDEN_RATIO + initval;
+ b += RTE_JHASH_GOLDEN_RATIO + initval;
+ c += RTE_JHASH_GOLDEN_RATIO + initval;
+
+ __rte_jhash_final(a, b, c);
+
+ return c;
+}
+
+/**
+ * A special ultra-optimized versions that knows it is hashing exactly
+ * 3 words.
+ *
+ * @param a
+ * First word to calculate hash of.
+ * @param b
+ * Second word to calculate hash of.
+ * @param c
+ * Third word to calculate hash of.
+ * @param initval
+ * Initialising value of hash.
+ * @return
+ * Calculated hash value.
+ */
+static inline uint32_t
+rte_jhash_3words(uint32_t a, uint32_t b, uint32_t c, uint32_t initval)
+{
+ return __rte_jhash_3words(a + 12, b + 12, c + 12, initval);
+}
+
+/**
+ * A special ultra-optimized versions that knows it is hashing exactly
+ * 2 words.
+ *
+ * @param a
+ * First word to calculate hash of.
+ * @param b
+ * Second word to calculate hash of.
+ * @param initval
+ * Initialising value of hash.
+ * @return
+ * Calculated hash value.
+ */
+static inline uint32_t
+rte_jhash_2words(uint32_t a, uint32_t b, uint32_t initval)
+{
+ return __rte_jhash_3words(a + 8, b + 8, 8, initval);
+}
+
+/**
+ * A special ultra-optimized versions that knows it is hashing exactly
+ * 1 word.
+ *
+ * @param a
+ * Word to calculate hash of.
+ * @param initval
+ * Initialising value of hash.
+ * @return
+ * Calculated hash value.
+ */
+static inline uint32_t
+rte_jhash_1word(uint32_t a, uint32_t initval)
+{
+ return __rte_jhash_3words(a + 4, 4, 4, initval);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_JHASH_H */
diff --git a/src/dpdk22/lib/librte_hash/rte_thash.h b/src/dpdk22/lib/librte_hash/rte_thash.h
new file mode 100644
index 00000000..d98e98e7
--- /dev/null
+++ b/src/dpdk22/lib/librte_hash/rte_thash.h
@@ -0,0 +1,250 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Vladimir Medvedkin <medvedkinv@gmail.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_THASH_H
+#define _RTE_THASH_H
+
+/**
+ * @file
+ *
+ * toeplitz hash functions.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Software implementation of the Toeplitz hash function used by RSS.
+ * Can be used either for packet distribution on single queue NIC
+ * or for simulating of RSS computation on specific NIC (for example
+ * after GRE header decapsulating)
+ */
+
+#include <stdint.h>
+#include <rte_byteorder.h>
+#include <rte_ip.h>
+
+#ifdef __SSE3__
+#include <rte_vect.h>
+#endif
+
+#ifdef __SSE3__
+/* Byte swap mask used for converting IPv6 address
+ * 4-byte chunks to CPU byte order
+ */
+static const __m128i rte_thash_ipv6_bswap_mask = {
+ 0x0405060700010203ULL, 0x0C0D0E0F08090A0BULL};
+#endif
+
+/**
+ * length in dwords of input tuple to
+ * calculate hash of ipv4 header only
+ */
+#define RTE_THASH_V4_L3_LEN ((sizeof(struct rte_ipv4_tuple) - \
+ sizeof(((struct rte_ipv4_tuple *)0)->sctp_tag)) / 4)
+
+/**
+ * length in dwords of input tuple to
+ * calculate hash of ipv4 header +
+ * transport header
+ */
+#define RTE_THASH_V4_L4_LEN ((sizeof(struct rte_ipv4_tuple)) / 4)
+
+/**
+ * length in dwords of input tuple to
+ * calculate hash of ipv6 header only
+ */
+#define RTE_THASH_V6_L3_LEN ((sizeof(struct rte_ipv6_tuple) - \
+ sizeof(((struct rte_ipv6_tuple *)0)->sctp_tag)) / 4)
+
+/**
+ * length in dwords of input tuple to
+ * calculate hash of ipv6 header +
+ * transport header
+ */
+#define RTE_THASH_V6_L4_LEN ((sizeof(struct rte_ipv6_tuple)) / 4)
+
+/**
+ * IPv4 tuple
+ * addresses and ports/sctp_tag have to be CPU byte order
+ */
+struct rte_ipv4_tuple {
+ uint32_t src_addr;
+ uint32_t dst_addr;
+ union {
+ struct {
+ uint16_t dport;
+ uint16_t sport;
+ };
+ uint32_t sctp_tag;
+ };
+};
+
+/**
+ * IPv6 tuple
+ * Addresses have to be filled by rte_thash_load_v6_addr()
+ * ports/sctp_tag have to be CPU byte order
+ */
+struct rte_ipv6_tuple {
+ uint8_t src_addr[16];
+ uint8_t dst_addr[16];
+ union {
+ struct {
+ uint16_t dport;
+ uint16_t sport;
+ };
+ uint32_t sctp_tag;
+ };
+};
+
+union rte_thash_tuple {
+ struct rte_ipv4_tuple v4;
+ struct rte_ipv6_tuple v6;
+#ifdef __SSE3__
+} __attribute__((aligned(XMM_SIZE)));
+#else
+};
+#endif
+
+/**
+ * Prepare special converted key to use with rte_softrss_be()
+ * @param orig
+ * pointer to original RSS key
+ * @param targ
+ * pointer to target RSS key
+ * @param len
+ * RSS key length
+ */
+static inline void
+rte_convert_rss_key(const uint32_t *orig, uint32_t *targ, int len)
+{
+ int i;
+
+ for (i = 0; i < (len >> 2); i++)
+ targ[i] = rte_be_to_cpu_32(orig[i]);
+}
+
+/**
+ * Prepare and load IPv6 addresses (src and dst)
+ * into target tuple
+ * @param orig
+ * Pointer to ipv6 header of the original packet
+ * @param targ
+ * Pointer to rte_ipv6_tuple structure
+ */
+static inline void
+rte_thash_load_v6_addrs(const struct ipv6_hdr *orig, union rte_thash_tuple *targ)
+{
+#ifdef __SSE3__
+ __m128i ipv6 = _mm_loadu_si128((const __m128i *)orig->src_addr);
+ *(__m128i *)targ->v6.src_addr =
+ _mm_shuffle_epi8(ipv6, rte_thash_ipv6_bswap_mask);
+ ipv6 = _mm_loadu_si128((const __m128i *)orig->dst_addr);
+ *(__m128i *)targ->v6.dst_addr =
+ _mm_shuffle_epi8(ipv6, rte_thash_ipv6_bswap_mask);
+#else
+ int i;
+ for (i = 0; i < 4; i++) {
+ *((uint32_t *)targ->v6.src_addr + i) =
+ rte_be_to_cpu_32(*((const uint32_t *)orig->src_addr + i));
+ *((uint32_t *)targ->v6.dst_addr + i) =
+ rte_be_to_cpu_32(*((const uint32_t *)orig->dst_addr + i));
+ }
+#endif
+}
+
+/**
+ * Generic implementation. Can be used with original rss_key
+ * @param input_tuple
+ * Pointer to input tuple
+ * @param input_len
+ * Length of input_tuple in 4-bytes chunks
+ * @param rss_key
+ * Pointer to RSS hash key.
+ * @return
+ * Calculated hash value.
+ */
+static inline uint32_t
+rte_softrss(uint32_t *input_tuple, uint32_t input_len,
+ const uint8_t *rss_key)
+{
+ uint32_t i, j, ret = 0;
+
+ for (j = 0; j < input_len; j++) {
+ for (i = 0; i < 32; i++) {
+ if (input_tuple[j] & (1 << (31 - i))) {
+ ret ^= rte_cpu_to_be_32(((const uint32_t *)rss_key)[j]) << i |
+ (uint32_t)((uint64_t)(rte_cpu_to_be_32(((const uint32_t *)rss_key)[j + 1])) >>
+ (32 - i));
+ }
+ }
+ }
+ return ret;
+}
+
+/**
+ * Optimized implementation.
+ * If you want the calculated hash value matches NIC RSS value
+ * you have to use special converted key with rte_convert_rss_key() fn.
+ * @param input_tuple
+ * Pointer to input tuple
+ * @param input_len
+ * Length of input_tuple in 4-bytes chunks
+ * @param *rss_key
+ * Pointer to RSS hash key.
+ * @return
+ * Calculated hash value.
+ */
+static inline uint32_t
+rte_softrss_be(uint32_t *input_tuple, uint32_t input_len,
+ const uint8_t *rss_key)
+{
+ uint32_t i, j, ret = 0;
+
+ for (j = 0; j < input_len; j++) {
+ for (i = 0; i < 32; i++) {
+ if (input_tuple[j] & (1 << (31 - i))) {
+ ret ^= ((const uint32_t *)rss_key)[j] << i |
+ (uint32_t)((uint64_t)(((const uint32_t *)rss_key)[j + 1]) >> (32 - i));
+ }
+ }
+ }
+ return ret;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_THASH_H */
diff --git a/src/dpdk_lib18/librte_kvargs/rte_kvargs.c b/src/dpdk22/lib/librte_kvargs/rte_kvargs.c
index 8bc1e461..c2dd0513 100755..100644
--- a/src/dpdk_lib18/librte_kvargs/rte_kvargs.c
+++ b/src/dpdk22/lib/librte_kvargs/rte_kvargs.c
@@ -174,8 +174,12 @@ rte_kvargs_process(const struct rte_kvargs *kvlist,
void
rte_kvargs_free(struct rte_kvargs *kvlist)
{
+ if (!kvlist)
+ return;
+
if (kvlist->str != NULL)
free(kvlist->str);
+
free(kvlist);
}
diff --git a/src/dpdk_lib18/librte_kvargs/rte_kvargs.h b/src/dpdk22/lib/librte_kvargs/rte_kvargs.h
index ef4efabf..ae9ae79f 100755..100644
--- a/src/dpdk_lib18/librte_kvargs/rte_kvargs.h
+++ b/src/dpdk22/lib/librte_kvargs/rte_kvargs.h
@@ -115,7 +115,8 @@ void rte_kvargs_free(struct rte_kvargs *kvlist);
*
* For each key/value association that matches the given key, calls the
* handler function with the for a given arg_name passing the value on the
- * dictionary for that key and a given extra argument.
+ * dictionary for that key and a given extra argument. If *kvlist* is NULL
+ * function does nothing.
*
* @param kvlist
* The rte_kvargs structure
diff --git a/src/dpdk_lib18/librte_mbuf/rte_mbuf.c b/src/dpdk22/lib/librte_mbuf/rte_mbuf.c
index 1b14e027..c18b4381 100755..100644
--- a/src/dpdk_lib18/librte_mbuf/rte_mbuf.c
+++ b/src/dpdk22/lib/librte_mbuf/rte_mbuf.c
@@ -48,7 +48,6 @@
#include <rte_memory.h>
#include <rte_memzone.h>
#include <rte_launch.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
@@ -59,6 +58,7 @@
#include <rte_mbuf.h>
#include <rte_string_fns.h>
#include <rte_hexdump.h>
+#include <rte_errno.h>
/*
* ctrlmbuf constructor, given as a callback function to
@@ -82,17 +82,30 @@ rte_ctrlmbuf_init(struct rte_mempool *mp,
void
rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
{
- struct rte_pktmbuf_pool_private *mbp_priv;
+ struct rte_pktmbuf_pool_private *user_mbp_priv, *mbp_priv;
+ struct rte_pktmbuf_pool_private default_mbp_priv;
uint16_t roomsz;
- mbp_priv = rte_mempool_get_priv(mp);
- roomsz = (uint16_t)(uintptr_t)opaque_arg;
+ RTE_MBUF_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf));
+
+ /* if no structure is provided, assume no mbuf private area */
+ user_mbp_priv = opaque_arg;
+ if (user_mbp_priv == NULL) {
+ default_mbp_priv.mbuf_priv_size = 0;
+ if (mp->elt_size > sizeof(struct rte_mbuf))
+ roomsz = mp->elt_size - sizeof(struct rte_mbuf);
+ else
+ roomsz = 0;
+ default_mbp_priv.mbuf_data_room_size = roomsz;
+ user_mbp_priv = &default_mbp_priv;
+ }
- /* Use default data room size. */
- if (0 == roomsz)
- roomsz = 2048 + RTE_PKTMBUF_HEADROOM;
+ RTE_MBUF_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf) +
+ user_mbp_priv->mbuf_data_room_size +
+ user_mbp_priv->mbuf_priv_size);
- mbp_priv->mbuf_data_room_size = roomsz;
+ mbp_priv = rte_mempool_get_priv(mp);
+ memcpy(mbp_priv, user_mbp_priv, sizeof(*mbp_priv));
}
/*
@@ -107,16 +120,22 @@ rte_pktmbuf_init(struct rte_mempool *mp,
__attribute__((unused)) unsigned i)
{
struct rte_mbuf *m = _m;
- uint32_t buf_len = mp->elt_size - sizeof(struct rte_mbuf);
+ uint32_t mbuf_size, buf_len, priv_size;
- RTE_MBUF_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf));
+ priv_size = rte_pktmbuf_priv_size(mp);
+ mbuf_size = sizeof(struct rte_mbuf) + priv_size;
+ buf_len = rte_pktmbuf_data_room_size(mp);
+
+ RTE_MBUF_ASSERT(RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) == priv_size);
+ RTE_MBUF_ASSERT(mp->elt_size >= mbuf_size);
+ RTE_MBUF_ASSERT(buf_len <= UINT16_MAX);
memset(m, 0, mp->elt_size);
- /* start of buffer is just after mbuf structure */
- m->buf_addr = (char *)m + sizeof(struct rte_mbuf);
- m->buf_physaddr = rte_mempool_virt2phy(mp, m) +
- sizeof(struct rte_mbuf);
+ /* start of buffer is after mbuf structure and priv data */
+ m->priv_size = priv_size;
+ m->buf_addr = (char *)m + mbuf_size;
+ m->buf_physaddr = rte_mempool_virt2phy(mp, m) + mbuf_size;
m->buf_len = (uint16_t)buf_len;
/* keep some headroom between start of buffer and data */
@@ -128,6 +147,32 @@ rte_pktmbuf_init(struct rte_mempool *mp,
m->port = 0xff;
}
+/* helper to create a mbuf pool */
+struct rte_mempool *
+rte_pktmbuf_pool_create(const char *name, unsigned n,
+ unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
+ int socket_id)
+{
+ struct rte_pktmbuf_pool_private mbp_priv;
+ unsigned elt_size;
+
+ if (RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) != priv_size) {
+ RTE_LOG(ERR, MBUF, "mbuf priv_size=%u is not aligned\n",
+ priv_size);
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ elt_size = sizeof(struct rte_mbuf) + (unsigned)priv_size +
+ (unsigned)data_room_size;
+ mbp_priv.mbuf_data_room_size = data_room_size;
+ mbp_priv.mbuf_priv_size = priv_size;
+
+ return rte_mempool_create(name, n, elt_size,
+ cache_size, sizeof(struct rte_pktmbuf_pool_private),
+ rte_pktmbuf_pool_init, &mbp_priv, rte_pktmbuf_init, NULL,
+ socket_id, 0);
+}
+
/* do some sanity checks on a mbuf: panic if it fails */
void
rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
@@ -146,11 +191,9 @@ rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
if (m->buf_addr == NULL)
rte_panic("bad virt addr\n");
-#ifdef RTE_MBUF_REFCNT
uint16_t cnt = rte_mbuf_refcnt_read(m);
if ((cnt == 0) || (cnt == UINT16_MAX))
rte_panic("bad ref cnt\n");
-#endif
/* nothing to check for sub-segments */
if (is_header == 0)
@@ -215,14 +258,8 @@ const char *rte_get_rx_ol_flag_name(uint64_t mask)
/* case PKT_RX_HBUF_OVERFLOW: return "PKT_RX_HBUF_OVERFLOW"; */
/* case PKT_RX_RECIP_ERR: return "PKT_RX_RECIP_ERR"; */
/* case PKT_RX_MAC_ERR: return "PKT_RX_MAC_ERR"; */
- case PKT_RX_IPV4_HDR: return "PKT_RX_IPV4_HDR";
- case PKT_RX_IPV4_HDR_EXT: return "PKT_RX_IPV4_HDR_EXT";
- case PKT_RX_IPV6_HDR: return "PKT_RX_IPV6_HDR";
- case PKT_RX_IPV6_HDR_EXT: return "PKT_RX_IPV6_HDR_EXT";
case PKT_RX_IEEE1588_PTP: return "PKT_RX_IEEE1588_PTP";
case PKT_RX_IEEE1588_TMST: return "PKT_RX_IEEE1588_TMST";
- case PKT_RX_TUNNEL_IPV4_HDR: return "PKT_RX_TUNNEL_IPV4_HDR";
- case PKT_RX_TUNNEL_IPV6_HDR: return "PKT_RX_TUNNEL_IPV6_HDR";
default: return NULL;
}
}
@@ -240,7 +277,6 @@ const char *rte_get_tx_ol_flag_name(uint64_t mask)
case PKT_TX_SCTP_CKSUM: return "PKT_TX_SCTP_CKSUM";
case PKT_TX_UDP_CKSUM: return "PKT_TX_UDP_CKSUM";
case PKT_TX_IEEE1588_TMST: return "PKT_TX_IEEE1588_TMST";
- case PKT_TX_UDP_TUNNEL_PKT: return "PKT_TX_UDP_TUNNEL_PKT";
case PKT_TX_TCP_SEG: return "PKT_TX_TCP_SEG";
case PKT_TX_IPV4: return "PKT_TX_IPV4";
case PKT_TX_IPV6: return "PKT_TX_IPV6";
diff --git a/src/dpdk_lib18/librte_mbuf/rte_mbuf.h b/src/dpdk22/lib/librte_mbuf/rte_mbuf.h
index 16059c66..f234ac9a 100755..100644
--- a/src/dpdk_lib18/librte_mbuf/rte_mbuf.h
+++ b/src/dpdk22/lib/librte_mbuf/rte_mbuf.h
@@ -54,6 +54,7 @@
*/
#include <stdint.h>
+#include <rte_common.h>
#include <rte_mempool.h>
#include <rte_memory.h>
#include <rte_atomic.h>
@@ -64,8 +65,9 @@
extern "C" {
#endif
-/* deprecated feature, renamed in RTE_MBUF_REFCNT */
+/* deprecated options */
#pragma GCC poison RTE_MBUF_SCATTER_GATHER
+#pragma GCC poison RTE_MBUF_REFCNT
/*
* Packet Offload Features Flags. It also carry packet type information.
@@ -73,9 +75,10 @@ extern "C" {
*
* - RX flags start at bit position zero, and get added to the left of previous
* flags.
- * - The most-significant 8 bits are reserved for generic mbuf flags
- * - TX flags therefore start at bit position 55 (i.e. 63-8), and new flags get
- * added to the right of the previously defined flags
+ * - The most-significant 3 bits are reserved for generic mbuf flags
+ * - TX flags therefore start at bit position 60 (i.e. 63-3), and new flags get
+ * added to the right of the previously defined flags i.e. they should count
+ * downwards, not upwards.
*
* Keep these flags synchronized with rte_get_rx_ol_flag_name() and
* rte_get_tx_ol_flag_name().
@@ -90,21 +93,21 @@ extern "C" {
#define PKT_RX_HBUF_OVERFLOW (0ULL << 0) /**< Header buffer overflow. */
#define PKT_RX_RECIP_ERR (0ULL << 0) /**< Hardware processing error. */
#define PKT_RX_MAC_ERR (0ULL << 0) /**< MAC error. */
-#define PKT_RX_IPV4_HDR (1ULL << 5) /**< RX packet with IPv4 header. */
-#define PKT_RX_IPV4_HDR_EXT (1ULL << 6) /**< RX packet with extended IPv4 header. */
-#define PKT_RX_IPV6_HDR (1ULL << 7) /**< RX packet with IPv6 header. */
-#define PKT_RX_IPV6_HDR_EXT (1ULL << 8) /**< RX packet with extended IPv6 header. */
#define PKT_RX_IEEE1588_PTP (1ULL << 9) /**< RX IEEE1588 L2 Ethernet PT Packet. */
#define PKT_RX_IEEE1588_TMST (1ULL << 10) /**< RX IEEE1588 L2/L4 timestamped packet.*/
-#define PKT_RX_TUNNEL_IPV4_HDR (1ULL << 11) /**< RX tunnel packet with IPv4 header.*/
-#define PKT_RX_TUNNEL_IPV6_HDR (1ULL << 12) /**< RX tunnel packet with IPv6 header. */
#define PKT_RX_FDIR_ID (1ULL << 13) /**< FD id reported if FDIR match. */
#define PKT_RX_FDIR_FLX (1ULL << 14) /**< Flexible bytes reported if FDIR match. */
+#define PKT_RX_QINQ_PKT (1ULL << 15) /**< RX packet with double VLAN stripped. */
/* add new RX flags here */
/* add new TX flags here */
/**
+ * Second VLAN insertion (QinQ) flag.
+ */
+#define PKT_TX_QINQ_PKT (1ULL << 49) /**< TX packet with double VLAN inserted. */
+
+/**
* TCP segmentation offload. To enable this offload feature for a
* packet to be transmitted on hardware supporting TSO:
* - set the PKT_TX_TCP_SEG flag in mbuf->ol_flags (this flag implies
@@ -117,11 +120,8 @@ extern "C" {
* and set it in the TCP header. Refer to rte_ipv4_phdr_cksum() and
* rte_ipv6_phdr_cksum() that can be used as helpers.
*/
-#define PKT_TX_TCP_SEG (1ULL << 49)
+#define PKT_TX_TCP_SEG (1ULL << 50)
-/** TX packet is an UDP tunneled packet. It must be specified when using
- * outer checksum offload (PKT_TX_OUTER_IP_CKSUM) */
-#define PKT_TX_UDP_TUNNEL_PKT (1ULL << 50) /**< TX packet is an UDP tunneled packet */
#define PKT_TX_IEEE1588_TMST (1ULL << 51) /**< TX IEEE1588 packet to timestamp. */
/**
@@ -141,30 +141,554 @@ extern "C" {
#define PKT_TX_UDP_CKSUM (3ULL << 52) /**< UDP cksum of TX pkt. computed by NIC. */
#define PKT_TX_L4_MASK (3ULL << 52) /**< Mask for L4 cksum offload request. */
-#define PKT_TX_IP_CKSUM (1ULL << 54) /**< IP cksum of TX pkt. computed by NIC. */
-#define PKT_TX_IPV4_CSUM PKT_TX_IP_CKSUM /**< Alias of PKT_TX_IP_CKSUM. */
+/**
+ * Offload the IP checksum in the hardware. The flag PKT_TX_IPV4 should
+ * also be set by the application, although a PMD will only check
+ * PKT_TX_IP_CKSUM.
+ * - set the IP checksum field in the packet to 0
+ * - fill the mbuf offload information: l2_len, l3_len
+ */
+#define PKT_TX_IP_CKSUM (1ULL << 54)
-/** Packet is IPv4 without requiring IP checksum offload. */
+/**
+ * Packet is IPv4. This flag must be set when using any offload feature
+ * (TSO, L3 or L4 checksum) to tell the NIC that the packet is an IPv4
+ * packet. If the packet is a tunneled packet, this flag is related to
+ * the inner headers.
+ */
#define PKT_TX_IPV4 (1ULL << 55)
-/** Tell the NIC it's an IPv6 packet.*/
+/**
+ * Packet is IPv6. This flag must be set when using an offload feature
+ * (TSO or L4 checksum) to tell the NIC that the packet is an IPv6
+ * packet. If the packet is a tunneled packet, this flag is related to
+ * the inner headers.
+ */
#define PKT_TX_IPV6 (1ULL << 56)
#define PKT_TX_VLAN_PKT (1ULL << 57) /**< TX packet is a 802.1q VLAN packet. */
-/** Outer IP checksum of TX packet, computed by NIC for tunneling packet.
- * The tunnel type must also be specified, ex: PKT_TX_UDP_TUNNEL_PKT. */
+/**
+ * Offload the IP checksum of an external header in the hardware. The
+ * flag PKT_TX_OUTER_IPV4 should also be set by the application, alto ugh
+ * a PMD will only check PKT_TX_IP_CKSUM. The IP checksum field in the
+ * packet must be set to 0.
+ * - set the outer IP checksum field in the packet to 0
+ * - fill the mbuf offload information: outer_l2_len, outer_l3_len
+ */
#define PKT_TX_OUTER_IP_CKSUM (1ULL << 58)
-/** Packet is outer IPv4 without requiring IP checksum offload for tunneling packet. */
+/**
+ * Packet outer header is IPv4. This flag must be set when using any
+ * outer offload feature (L3 or L4 checksum) to tell the NIC that the
+ * outer header of the tunneled packet is an IPv4 packet.
+ */
#define PKT_TX_OUTER_IPV4 (1ULL << 59)
-/** Tell the NIC it's an outer IPv6 packet for tunneling packet */
+/**
+ * Packet outer header is IPv6. This flag must be set when using any
+ * outer offload feature (L4 checksum) to tell the NIC that the outer
+ * header of the tunneled packet is an IPv6 packet.
+ */
#define PKT_TX_OUTER_IPV6 (1ULL << 60)
+#define __RESERVED (1ULL << 61) /**< reserved for future mbuf use */
+
+#define IND_ATTACHED_MBUF (1ULL << 62) /**< Indirect attached mbuf */
+
/* Use final bit of flags to indicate a control mbuf */
#define CTRL_MBUF_FLAG (1ULL << 63) /**< Mbuf contains control data */
+/*
+ * 32 bits are divided into several fields to mark packet types. Note that
+ * each field is indexical.
+ * - Bit 3:0 is for L2 types.
+ * - Bit 7:4 is for L3 or outer L3 (for tunneling case) types.
+ * - Bit 11:8 is for L4 or outer L4 (for tunneling case) types.
+ * - Bit 15:12 is for tunnel types.
+ * - Bit 19:16 is for inner L2 types.
+ * - Bit 23:20 is for inner L3 types.
+ * - Bit 27:24 is for inner L4 types.
+ * - Bit 31:28 is reserved.
+ *
+ * To be compatible with Vector PMD, RTE_PTYPE_L3_IPV4, RTE_PTYPE_L3_IPV4_EXT,
+ * RTE_PTYPE_L3_IPV6, RTE_PTYPE_L3_IPV6_EXT, RTE_PTYPE_L4_TCP, RTE_PTYPE_L4_UDP
+ * and RTE_PTYPE_L4_SCTP should be kept as below in a contiguous 7 bits.
+ *
+ * Note that L3 types values are selected for checking IPV4/IPV6 header from
+ * performance point of view. Reading annotations of RTE_ETH_IS_IPV4_HDR and
+ * RTE_ETH_IS_IPV6_HDR is needed for any future changes of L3 type values.
+ *
+ * Note that the packet types of the same packet recognized by different
+ * hardware may be different, as different hardware may have different
+ * capability of packet type recognition.
+ *
+ * examples:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=0x29
+ * | 'version'=6, 'next header'=0x3A
+ * | 'ICMPv6 header'>
+ * will be recognized on i40e hardware as packet type combination of,
+ * RTE_PTYPE_L2_ETHER |
+ * RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ * RTE_PTYPE_TUNNEL_IP |
+ * RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ * RTE_PTYPE_INNER_L4_ICMP.
+ *
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=0x2F
+ * | 'GRE header'
+ * | 'version'=6, 'next header'=0x11
+ * | 'UDP header'>
+ * will be recognized on i40e hardware as packet type combination of,
+ * RTE_PTYPE_L2_ETHER |
+ * RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ * RTE_PTYPE_TUNNEL_GRENAT |
+ * RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ * RTE_PTYPE_INNER_L4_UDP.
+ */
+#define RTE_PTYPE_UNKNOWN 0x00000000
+/**
+ * Ethernet packet type.
+ * It is used for outer packet for tunneling cases.
+ *
+ * Packet format:
+ * <'ether type'=[0x0800|0x86DD]>
+ */
+#define RTE_PTYPE_L2_ETHER 0x00000001
+/**
+ * Ethernet packet type for time sync.
+ *
+ * Packet format:
+ * <'ether type'=0x88F7>
+ */
+#define RTE_PTYPE_L2_ETHER_TIMESYNC 0x00000002
+/**
+ * ARP (Address Resolution Protocol) packet type.
+ *
+ * Packet format:
+ * <'ether type'=0x0806>
+ */
+#define RTE_PTYPE_L2_ETHER_ARP 0x00000003
+/**
+ * LLDP (Link Layer Discovery Protocol) packet type.
+ *
+ * Packet format:
+ * <'ether type'=0x88CC>
+ */
+#define RTE_PTYPE_L2_ETHER_LLDP 0x00000004
+/**
+ * Mask of layer 2 packet types.
+ * It is used for outer packet for tunneling cases.
+ */
+#define RTE_PTYPE_L2_MASK 0x0000000f
+/**
+ * IP (Internet Protocol) version 4 packet type.
+ * It is used for outer packet for tunneling cases, and does not contain any
+ * header option.
+ *
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'ihl'=5>
+ */
+#define RTE_PTYPE_L3_IPV4 0x00000010
+/**
+ * IP (Internet Protocol) version 4 packet type.
+ * It is used for outer packet for tunneling cases, and contains header
+ * options.
+ *
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'ihl'=[6-15], 'options'>
+ */
+#define RTE_PTYPE_L3_IPV4_EXT 0x00000030
+/**
+ * IP (Internet Protocol) version 6 packet type.
+ * It is used for outer packet for tunneling cases, and does not contain any
+ * extension header.
+ *
+ * Packet format:
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=0x3B>
+ */
+#define RTE_PTYPE_L3_IPV6 0x00000040
+/**
+ * IP (Internet Protocol) version 4 packet type.
+ * It is used for outer packet for tunneling cases, and may or maynot contain
+ * header options.
+ *
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'ihl'=[5-15], <'options'>>
+ */
+#define RTE_PTYPE_L3_IPV4_EXT_UNKNOWN 0x00000090
+/**
+ * IP (Internet Protocol) version 6 packet type.
+ * It is used for outer packet for tunneling cases, and contains extension
+ * headers.
+ *
+ * Packet format:
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=[0x0|0x2B|0x2C|0x32|0x33|0x3C|0x87],
+ * 'extension headers'>
+ */
+#define RTE_PTYPE_L3_IPV6_EXT 0x000000c0
+/**
+ * IP (Internet Protocol) version 6 packet type.
+ * It is used for outer packet for tunneling cases, and may or maynot contain
+ * extension headers.
+ *
+ * Packet format:
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=[0x3B|0x0|0x2B|0x2C|0x32|0x33|0x3C|0x87],
+ * <'extension headers'>>
+ */
+#define RTE_PTYPE_L3_IPV6_EXT_UNKNOWN 0x000000e0
+/**
+ * Mask of layer 3 packet types.
+ * It is used for outer packet for tunneling cases.
+ */
+#define RTE_PTYPE_L3_MASK 0x000000f0
+/**
+ * TCP (Transmission Control Protocol) packet type.
+ * It is used for outer packet for tunneling cases.
+ *
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=6, 'MF'=0>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=6>
+ */
+#define RTE_PTYPE_L4_TCP 0x00000100
+/**
+ * UDP (User Datagram Protocol) packet type.
+ * It is used for outer packet for tunneling cases.
+ *
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=17, 'MF'=0>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=17>
+ */
+#define RTE_PTYPE_L4_UDP 0x00000200
+/**
+ * Fragmented IP (Internet Protocol) packet type.
+ * It is used for outer packet for tunneling cases.
+ *
+ * It refers to those packets of any IP types, which can be recognized as
+ * fragmented. A fragmented packet cannot be recognized as any other L4 types
+ * (RTE_PTYPE_L4_TCP, RTE_PTYPE_L4_UDP, RTE_PTYPE_L4_SCTP, RTE_PTYPE_L4_ICMP,
+ * RTE_PTYPE_L4_NONFRAG).
+ *
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'MF'=1>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=44>
+ */
+#define RTE_PTYPE_L4_FRAG 0x00000300
+/**
+ * SCTP (Stream Control Transmission Protocol) packet type.
+ * It is used for outer packet for tunneling cases.
+ *
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=132, 'MF'=0>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=132>
+ */
+#define RTE_PTYPE_L4_SCTP 0x00000400
+/**
+ * ICMP (Internet Control Message Protocol) packet type.
+ * It is used for outer packet for tunneling cases.
+ *
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=1, 'MF'=0>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=1>
+ */
+#define RTE_PTYPE_L4_ICMP 0x00000500
+/**
+ * Non-fragmented IP (Internet Protocol) packet type.
+ * It is used for outer packet for tunneling cases.
+ *
+ * It refers to those packets of any IP types, while cannot be recognized as
+ * any of above L4 types (RTE_PTYPE_L4_TCP, RTE_PTYPE_L4_UDP,
+ * RTE_PTYPE_L4_FRAG, RTE_PTYPE_L4_SCTP, RTE_PTYPE_L4_ICMP).
+ *
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'!=[6|17|132|1], 'MF'=0>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'!=[6|17|44|132|1]>
+ */
+#define RTE_PTYPE_L4_NONFRAG 0x00000600
+/**
+ * Mask of layer 4 packet types.
+ * It is used for outer packet for tunneling cases.
+ */
+#define RTE_PTYPE_L4_MASK 0x00000f00
+/**
+ * IP (Internet Protocol) in IP (Internet Protocol) tunneling packet type.
+ *
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=[4|41]>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=[4|41]>
+ */
+#define RTE_PTYPE_TUNNEL_IP 0x00001000
+/**
+ * GRE (Generic Routing Encapsulation) tunneling packet type.
+ *
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=47>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=47>
+ */
+#define RTE_PTYPE_TUNNEL_GRE 0x00002000
+/**
+ * VXLAN (Virtual eXtensible Local Area Network) tunneling packet type.
+ *
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=17
+ * | 'destination port'=4798>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=17
+ * | 'destination port'=4798>
+ */
+#define RTE_PTYPE_TUNNEL_VXLAN 0x00003000
+/**
+ * NVGRE (Network Virtualization using Generic Routing Encapsulation) tunneling
+ * packet type.
+ *
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=47
+ * | 'protocol type'=0x6558>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=47
+ * | 'protocol type'=0x6558'>
+ */
+#define RTE_PTYPE_TUNNEL_NVGRE 0x00004000
+/**
+ * GENEVE (Generic Network Virtualization Encapsulation) tunneling packet type.
+ *
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=17
+ * | 'destination port'=6081>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=17
+ * | 'destination port'=6081>
+ */
+#define RTE_PTYPE_TUNNEL_GENEVE 0x00005000
+/**
+ * Tunneling packet type of Teredo, VXLAN (Virtual eXtensible Local Area
+ * Network) or GRE (Generic Routing Encapsulation) could be recognized as this
+ * packet type, if they can not be recognized independently as of hardware
+ * capability.
+ */
+#define RTE_PTYPE_TUNNEL_GRENAT 0x00006000
+/**
+ * Mask of tunneling packet types.
+ */
+#define RTE_PTYPE_TUNNEL_MASK 0x0000f000
+/**
+ * Ethernet packet type.
+ * It is used for inner packet type only.
+ *
+ * Packet format (inner only):
+ * <'ether type'=[0x800|0x86DD]>
+ */
+#define RTE_PTYPE_INNER_L2_ETHER 0x00010000
+/**
+ * Ethernet packet type with VLAN (Virtual Local Area Network) tag.
+ *
+ * Packet format (inner only):
+ * <'ether type'=[0x800|0x86DD], vlan=[1-4095]>
+ */
+#define RTE_PTYPE_INNER_L2_ETHER_VLAN 0x00020000
+/**
+ * Mask of inner layer 2 packet types.
+ */
+#define RTE_PTYPE_INNER_L2_MASK 0x000f0000
+/**
+ * IP (Internet Protocol) version 4 packet type.
+ * It is used for inner packet only, and does not contain any header option.
+ *
+ * Packet format (inner only):
+ * <'ether type'=0x0800
+ * | 'version'=4, 'ihl'=5>
+ */
+#define RTE_PTYPE_INNER_L3_IPV4 0x00100000
+/**
+ * IP (Internet Protocol) version 4 packet type.
+ * It is used for inner packet only, and contains header options.
+ *
+ * Packet format (inner only):
+ * <'ether type'=0x0800
+ * | 'version'=4, 'ihl'=[6-15], 'options'>
+ */
+#define RTE_PTYPE_INNER_L3_IPV4_EXT 0x00200000
+/**
+ * IP (Internet Protocol) version 6 packet type.
+ * It is used for inner packet only, and does not contain any extension header.
+ *
+ * Packet format (inner only):
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=0x3B>
+ */
+#define RTE_PTYPE_INNER_L3_IPV6 0x00300000
+/**
+ * IP (Internet Protocol) version 4 packet type.
+ * It is used for inner packet only, and may or maynot contain header options.
+ *
+ * Packet format (inner only):
+ * <'ether type'=0x0800
+ * | 'version'=4, 'ihl'=[5-15], <'options'>>
+ */
+#define RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN 0x00400000
+/**
+ * IP (Internet Protocol) version 6 packet type.
+ * It is used for inner packet only, and contains extension headers.
+ *
+ * Packet format (inner only):
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=[0x0|0x2B|0x2C|0x32|0x33|0x3C|0x87],
+ * 'extension headers'>
+ */
+#define RTE_PTYPE_INNER_L3_IPV6_EXT 0x00500000
+/**
+ * IP (Internet Protocol) version 6 packet type.
+ * It is used for inner packet only, and may or maynot contain extension
+ * headers.
+ *
+ * Packet format (inner only):
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=[0x3B|0x0|0x2B|0x2C|0x32|0x33|0x3C|0x87],
+ * <'extension headers'>>
+ */
+#define RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN 0x00600000
+/**
+ * Mask of inner layer 3 packet types.
+ */
+#define RTE_PTYPE_INNER_L3_MASK 0x00f00000
+/**
+ * TCP (Transmission Control Protocol) packet type.
+ * It is used for inner packet only.
+ *
+ * Packet format (inner only):
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=6, 'MF'=0>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=6>
+ */
+#define RTE_PTYPE_INNER_L4_TCP 0x01000000
+/**
+ * UDP (User Datagram Protocol) packet type.
+ * It is used for inner packet only.
+ *
+ * Packet format (inner only):
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=17, 'MF'=0>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=17>
+ */
+#define RTE_PTYPE_INNER_L4_UDP 0x02000000
+/**
+ * Fragmented IP (Internet Protocol) packet type.
+ * It is used for inner packet only, and may or maynot have layer 4 packet.
+ *
+ * Packet format (inner only):
+ * <'ether type'=0x0800
+ * | 'version'=4, 'MF'=1>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=44>
+ */
+#define RTE_PTYPE_INNER_L4_FRAG 0x03000000
+/**
+ * SCTP (Stream Control Transmission Protocol) packet type.
+ * It is used for inner packet only.
+ *
+ * Packet format (inner only):
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=132, 'MF'=0>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=132>
+ */
+#define RTE_PTYPE_INNER_L4_SCTP 0x04000000
+/**
+ * ICMP (Internet Control Message Protocol) packet type.
+ * It is used for inner packet only.
+ *
+ * Packet format (inner only):
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=1, 'MF'=0>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=1>
+ */
+#define RTE_PTYPE_INNER_L4_ICMP 0x05000000
+/**
+ * Non-fragmented IP (Internet Protocol) packet type.
+ * It is used for inner packet only, and may or maynot have other unknown layer
+ * 4 packet types.
+ *
+ * Packet format (inner only):
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'!=[6|17|132|1], 'MF'=0>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'!=[6|17|44|132|1]>
+ */
+#define RTE_PTYPE_INNER_L4_NONFRAG 0x06000000
+/**
+ * Mask of inner layer 4 packet types.
+ */
+#define RTE_PTYPE_INNER_L4_MASK 0x0f000000
+
+/**
+ * Check if the (outer) L3 header is IPv4. To avoid comparing IPv4 types one by
+ * one, bit 4 is selected to be used for IPv4 only. Then checking bit 4 can
+ * determine if it is an IPV4 packet.
+ */
+#define RTE_ETH_IS_IPV4_HDR(ptype) ((ptype) & RTE_PTYPE_L3_IPV4)
+
+/**
+ * Check if the (outer) L3 header is IPv4. To avoid comparing IPv4 types one by
+ * one, bit 6 is selected to be used for IPv4 only. Then checking bit 6 can
+ * determine if it is an IPV4 packet.
+ */
+#define RTE_ETH_IS_IPV6_HDR(ptype) ((ptype) & RTE_PTYPE_L3_IPV6)
+
+/* Check if it is a tunneling packet */
+#define RTE_ETH_IS_TUNNEL_PKT(ptype) ((ptype) & (RTE_PTYPE_TUNNEL_MASK | \
+ RTE_PTYPE_INNER_L2_MASK | \
+ RTE_PTYPE_INNER_L3_MASK | \
+ RTE_PTYPE_INNER_L4_MASK))
+
+/** Alignment constraint of mbuf private area. */
+#define RTE_MBUF_PRIV_ALIGN 8
+
/**
* Get the name of a RX offload flag
*
@@ -187,6 +711,16 @@ const char *rte_get_rx_ol_flag_name(uint64_t mask);
*/
const char *rte_get_tx_ol_flag_name(uint64_t mask);
+/**
+ * Some NICs need at least 2KB buffer to RX standard Ethernet frame without
+ * splitting it into multiple segments.
+ * So, for mbufs that planned to be involved into RX/TX, the recommended
+ * minimal buffer length is 2KB + RTE_PKTMBUF_HEADROOM.
+ */
+#define RTE_MBUF_DEFAULT_DATAROOM 2048
+#define RTE_MBUF_DEFAULT_BUF_SIZE \
+ (RTE_MBUF_DEFAULT_DATAROOM + RTE_PKTMBUF_HEADROOM)
+
/* define a set of marker types that can be used to refer to set points in the
* mbuf */
typedef void *MARKER[0]; /**< generic marker for a point in a structure */
@@ -194,6 +728,9 @@ typedef uint8_t MARKER8[0]; /**< generic marker with 1B alignment */
typedef uint64_t MARKER64[0]; /**< marker that allows us to overwrite 8 bytes
* with a single assignment */
+/** Opaque rte_mbuf_offload structure declarations */
+struct rte_mbuf_offload;
+
/**
* The generic rte_mbuf, containing a packet mbuf.
*/
@@ -218,11 +755,8 @@ struct rte_mbuf {
* config option.
*/
union {
-#ifdef RTE_MBUF_REFCNT
rte_atomic16_t refcnt_atomic; /**< Atomically accessed refcnt */
uint16_t refcnt; /**< Non-atomically accessed refcnt */
-#endif
- uint16_t refcnt_reserved; /**< Do not use this field */
};
uint8_t nb_segs; /**< Number of segments. */
uint8_t port; /**< Input port. */
@@ -232,17 +766,27 @@ struct rte_mbuf {
/* remaining bytes are set on RX when pulling packet from descriptor */
MARKER rx_descriptor_fields1;
- /**
- * The packet type, which is used to indicate ordinary packet and also
- * tunneled packet format, i.e. each number is represented a type of
- * packet.
+ /*
+ * The packet type, which is the combination of outer/inner L2, L3, L4
+ * and tunnel types.
*/
- uint16_t packet_type;
+ union {
+ uint32_t packet_type; /**< L2/L3/L4 and tunnel information. */
+ struct {
+ uint32_t l2_type:4; /**< (Outer) L2 type. */
+ uint32_t l3_type:4; /**< (Outer) L3 type. */
+ uint32_t l4_type:4; /**< (Outer) L4 type. */
+ uint32_t tun_type:4; /**< Tunnel type. */
+ uint32_t inner_l2_type:4; /**< Inner L2 type. */
+ uint32_t inner_l3_type:4; /**< Inner L3 type. */
+ uint32_t inner_l4_type:4; /**< Inner L4 type. */
+ };
+ };
- uint16_t data_len; /**< Amount of data in segment buffer. */
uint32_t pkt_len; /**< Total pkt len: sum of all segments. */
+ uint16_t data_len; /**< Amount of data in segment buffer. */
uint16_t vlan_tci; /**< VLAN Tag Control Identifier (CPU order) */
- uint16_t reserved;
+
union {
uint32_t rss; /**< RSS hash result if RSS enabled */
struct {
@@ -258,10 +802,17 @@ struct rte_mbuf {
/**< First 4 flexible bytes or FD ID, dependent on
PKT_RX_FDIR_* flag in ol_flags. */
} fdir; /**< Filter identifier if FDIR enabled */
- uint32_t sched; /**< Hierarchical scheduler */
- uint32_t usr; /**< User defined tags. See @rte_distributor_process */
+ struct {
+ uint32_t lo;
+ uint32_t hi;
+ } sched; /**< Hierarchical scheduler */
+ uint32_t usr; /**< User defined tags. See rte_distributor_process() */
} hash; /**< hash information */
+ uint32_t seqn; /**< Sequence number. See also rte_reorder_insert() */
+
+ uint16_t vlan_tci_outer; /**< Outer VLAN Tag Control Identifier (CPU order) */
+
/* second cache line - fields only used in slow path or on TX */
MARKER cacheline1 __rte_cache_aligned;
@@ -289,29 +840,59 @@ struct rte_mbuf {
/* uint64_t unused:8; */
};
};
+
+ /** Size of the application private data. In case of an indirect
+ * mbuf, it stores the direct mbuf private data size. */
+ uint16_t priv_size;
+
+ /** Timesync flags for use with IEEE1588. */
+ uint16_t timesync;
+
+ /* Chain of off-load operations to perform on mbuf */
+ struct rte_mbuf_offload *offload_ops;
} __rte_cache_aligned;
+static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
+
/**
- * Given the buf_addr returns the pointer to corresponding mbuf.
+ * Return the mbuf owning the data buffer address of an indirect mbuf.
+ *
+ * @param mi
+ * The pointer to the indirect mbuf.
+ * @return
+ * The address of the direct mbuf corresponding to buffer_addr.
*/
-#define RTE_MBUF_FROM_BADDR(ba) (((struct rte_mbuf *)(ba)) - 1)
+static inline struct rte_mbuf *
+rte_mbuf_from_indirect(struct rte_mbuf *mi)
+{
+ return (struct rte_mbuf *)RTE_PTR_SUB(mi->buf_addr, sizeof(*mi) + mi->priv_size);
+}
/**
- * Given the pointer to mbuf returns an address where it's buf_addr
- * should point to.
+ * Return the buffer address embedded in the given mbuf.
+ *
+ * @param md
+ * The pointer to the mbuf.
+ * @return
+ * The address of the data buffer owned by the mbuf.
*/
-#define RTE_MBUF_TO_BADDR(mb) (((struct rte_mbuf *)(mb)) + 1)
+static inline char *
+rte_mbuf_to_baddr(struct rte_mbuf *md)
+{
+ char *buffer_addr;
+ buffer_addr = (char *)md + sizeof(*md) + rte_pktmbuf_priv_size(md->pool);
+ return buffer_addr;
+}
/**
* Returns TRUE if given mbuf is indirect, or FALSE otherwise.
*/
-#define RTE_MBUF_INDIRECT(mb) (RTE_MBUF_FROM_BADDR((mb)->buf_addr) != (mb))
+#define RTE_MBUF_INDIRECT(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF)
/**
* Returns TRUE if given mbuf is direct, or FALSE otherwise.
*/
-#define RTE_MBUF_DIRECT(mb) (RTE_MBUF_FROM_BADDR((mb)->buf_addr) == (mb))
-
+#define RTE_MBUF_DIRECT(mb) (!RTE_MBUF_INDIRECT(mb))
/**
* Private data in case of pktmbuf pool.
@@ -320,7 +901,8 @@ struct rte_mbuf {
* appended after the mempool structure (in private data).
*/
struct rte_pktmbuf_pool_private {
- uint16_t mbuf_data_room_size; /**< Size of data space in each mbuf.*/
+ uint16_t mbuf_data_room_size; /**< Size of data space in each mbuf. */
+ uint16_t mbuf_priv_size; /**< Size of private area in each mbuf. */
};
#ifdef RTE_LIBRTE_MBUF_DEBUG
@@ -353,25 +935,9 @@ if (!(exp)) { \
#endif /* RTE_LIBRTE_MBUF_DEBUG */
-#ifdef RTE_MBUF_REFCNT
#ifdef RTE_MBUF_REFCNT_ATOMIC
/**
- * Adds given value to an mbuf's refcnt and returns its new value.
- * @param m
- * Mbuf to update
- * @param value
- * Value to add/subtract
- * @return
- * Updated value
- */
-static inline uint16_t
-rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
-{
- return (uint16_t)(rte_atomic16_add_return(&m->refcnt_atomic, value));
-}
-
-/**
* Reads the value of an mbuf's refcnt.
* @param m
* Mbuf to read
@@ -397,6 +963,33 @@ rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
rte_atomic16_set(&m->refcnt_atomic, new_value);
}
+/**
+ * Adds given value to an mbuf's refcnt and returns its new value.
+ * @param m
+ * Mbuf to update
+ * @param value
+ * Value to add/subtract
+ * @return
+ * Updated value
+ */
+static inline uint16_t
+rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
+{
+ /*
+ * The atomic_add is an expensive operation, so we don't want to
+ * call it in the case where we know we are the uniq holder of
+ * this mbuf (i.e. ref_cnt == 1). Otherwise, an atomic
+ * operation has to be used because concurrent accesses on the
+ * reference counter can occur.
+ */
+ if (likely(rte_mbuf_refcnt_read(m) == 1)) {
+ rte_mbuf_refcnt_set(m, 1 + value);
+ return 1 + value;
+ }
+
+ return (uint16_t)(rte_atomic16_add_return(&m->refcnt_atomic, value));
+}
+
#else /* ! RTE_MBUF_REFCNT_ATOMIC */
/**
@@ -435,15 +1028,6 @@ rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
rte_prefetch0(m); \
} while (0)
-#else /* ! RTE_MBUF_REFCNT */
-
-/** Mbuf prefetch */
-#define RTE_MBUF_PREFETCH_TO_FREE(m) do { } while(0)
-
-#define rte_mbuf_refcnt_set(m,v) do { } while(0)
-
-#endif /* RTE_MBUF_REFCNT */
-
/**
* Sanity checks on an mbuf.
@@ -478,11 +1062,9 @@ static inline struct rte_mbuf *__rte_mbuf_raw_alloc(struct rte_mempool *mp)
if (rte_mempool_get(mp, &mb) < 0)
return NULL;
m = (struct rte_mbuf *)mb;
-#ifdef RTE_MBUF_REFCNT
RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(m) == 0);
rte_mbuf_refcnt_set(m, 1);
-#endif /* RTE_MBUF_REFCNT */
- return (m);
+ return m;
}
/**
@@ -496,9 +1078,7 @@ static inline struct rte_mbuf *__rte_mbuf_raw_alloc(struct rte_mempool *mp)
static inline void __attribute__((always_inline))
__rte_mbuf_raw_free(struct rte_mbuf *m)
{
-#ifdef RTE_MBUF_REFCNT
RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(m) == 0);
-#endif /* RTE_MBUF_REFCNT */
rte_mempool_put(m->pool, m);
}
@@ -580,7 +1160,7 @@ void rte_ctrlmbuf_init(struct rte_mempool *mp, void *opaque_arg,
static inline int
rte_is_ctrlmbuf(struct rte_mbuf *m)
{
- return (!!(m->ol_flags & CTRL_MBUF_FLAG));
+ return !!(m->ol_flags & CTRL_MBUF_FLAG);
}
/* Operations on pkt mbuf */
@@ -627,6 +1207,87 @@ void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);
/**
+ * Create a mbuf pool.
+ *
+ * This function creates and initializes a packet mbuf pool. It is
+ * a wrapper to rte_mempool_create() with the proper packet constructor
+ * and mempool constructor.
+ *
+ * @param name
+ * The name of the mbuf pool.
+ * @param n
+ * The number of elements in the mbuf pool. The optimum size (in terms
+ * of memory usage) for a mempool is when n is a power of two minus one:
+ * n = (2^q - 1).
+ * @param cache_size
+ * Size of the per-core object cache. See rte_mempool_create() for
+ * details.
+ * @param priv_size
+ * Size of application private are between the rte_mbuf structure
+ * and the data buffer. This value must be aligned to RTE_MBUF_PRIV_ALIGN.
+ * @param data_room_size
+ * Size of data buffer in each mbuf, including RTE_PKTMBUF_HEADROOM.
+ * @param socket_id
+ * The socket identifier where the memory should be allocated. The
+ * value can be *SOCKET_ID_ANY* if there is no NUMA constraint for the
+ * reserved zone.
+ * @return
+ * The pointer to the new allocated mempool, on success. NULL on error
+ * with rte_errno set appropriately. Possible rte_errno values include:
+ * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
+ * - E_RTE_SECONDARY - function was called from a secondary process instance
+ * - EINVAL - cache size provided is too large, or priv_size is not aligned.
+ * - ENOSPC - the maximum number of memzones has already been allocated
+ * - EEXIST - a memzone with the same name already exists
+ * - ENOMEM - no appropriate memory area found in which to create memzone
+ */
+struct rte_mempool *
+rte_pktmbuf_pool_create(const char *name, unsigned n,
+ unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
+ int socket_id);
+
+/**
+ * Get the data room size of mbufs stored in a pktmbuf_pool
+ *
+ * The data room size is the amount of data that can be stored in a
+ * mbuf including the headroom (RTE_PKTMBUF_HEADROOM).
+ *
+ * @param mp
+ * The packet mbuf pool.
+ * @return
+ * The data room size of mbufs stored in this mempool.
+ */
+static inline uint16_t
+rte_pktmbuf_data_room_size(struct rte_mempool *mp)
+{
+ struct rte_pktmbuf_pool_private *mbp_priv;
+
+ mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
+ return mbp_priv->mbuf_data_room_size;
+}
+
+/**
+ * Get the application private size of mbufs stored in a pktmbuf_pool
+ *
+ * The private size of mbuf is a zone located between the rte_mbuf
+ * structure and the data buffer where an application can store data
+ * associated to a packet.
+ *
+ * @param mp
+ * The packet mbuf pool.
+ * @return
+ * The private size of mbufs stored in this mempool.
+ */
+static inline uint16_t
+rte_pktmbuf_priv_size(struct rte_mempool *mp)
+{
+ struct rte_pktmbuf_pool_private *mbp_priv;
+
+ mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
+ return mbp_priv->mbuf_priv_size;
+}
+
+/**
* Reset the fields of a packet mbuf to their default values.
*
* The given mbuf must have only one segment.
@@ -640,6 +1301,7 @@ static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
m->pkt_len = 0;
m->tx_offload = 0;
m->vlan_tci = 0;
+ m->vlan_tci_outer = 0;
m->nb_segs = 1;
m->port = 0xff;
@@ -670,58 +1332,65 @@ static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
struct rte_mbuf *m;
if ((m = __rte_mbuf_raw_alloc(mp)) != NULL)
rte_pktmbuf_reset(m);
- return (m);
+ return m;
}
-#ifdef RTE_MBUF_REFCNT
-
/**
* Attach packet mbuf to another packet mbuf.
+ *
* After attachment we refer the mbuf we attached as 'indirect',
* while mbuf we attached to as 'direct'.
* Right now, not supported:
- * - attachment to indirect mbuf (e.g. - md has to be direct).
* - attachment for already indirect mbuf (e.g. - mi has to be direct).
* - mbuf we trying to attach (mi) is used by someone else
* e.g. it's reference counter is greater then 1.
*
* @param mi
* The indirect packet mbuf.
- * @param md
- * The direct packet mbuf.
+ * @param m
+ * The packet mbuf we're attaching to.
*/
-
-static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *md)
+static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
{
- RTE_MBUF_ASSERT(RTE_MBUF_DIRECT(md) &&
- RTE_MBUF_DIRECT(mi) &&
+ struct rte_mbuf *md;
+
+ RTE_MBUF_ASSERT(RTE_MBUF_DIRECT(mi) &&
rte_mbuf_refcnt_read(mi) == 1);
+ /* if m is not direct, get the mbuf that embeds the data */
+ if (RTE_MBUF_DIRECT(m))
+ md = m;
+ else
+ md = rte_mbuf_from_indirect(m);
+
rte_mbuf_refcnt_update(md, 1);
- mi->buf_physaddr = md->buf_physaddr;
- mi->buf_addr = md->buf_addr;
- mi->buf_len = md->buf_len;
-
- mi->next = md->next;
- mi->data_off = md->data_off;
- mi->data_len = md->data_len;
- mi->port = md->port;
- mi->vlan_tci = md->vlan_tci;
- mi->tx_offload = md->tx_offload;
- mi->hash = md->hash;
+ mi->priv_size = m->priv_size;
+ mi->buf_physaddr = m->buf_physaddr;
+ mi->buf_addr = m->buf_addr;
+ mi->buf_len = m->buf_len;
+
+ mi->next = m->next;
+ mi->data_off = m->data_off;
+ mi->data_len = m->data_len;
+ mi->port = m->port;
+ mi->vlan_tci = m->vlan_tci;
+ mi->vlan_tci_outer = m->vlan_tci_outer;
+ mi->tx_offload = m->tx_offload;
+ mi->hash = m->hash;
mi->next = NULL;
mi->pkt_len = mi->data_len;
mi->nb_segs = 1;
- mi->ol_flags = md->ol_flags;
- mi->packet_type = md->packet_type;
+ mi->ol_flags = m->ol_flags | IND_ATTACHED_MBUF;
+ mi->packet_type = m->packet_type;
__rte_mbuf_sanity_check(mi, 1);
- __rte_mbuf_sanity_check(md, 0);
+ __rte_mbuf_sanity_check(m, 0);
}
/**
- * Detach an indirect packet mbuf -
+ * Detach an indirect packet mbuf.
+ *
* - restore original mbuf address and length values.
* - reset pktmbuf data and data_len to their default values.
* All other fields of the given packet mbuf will be left intact.
@@ -729,53 +1398,44 @@ static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *md)
* @param m
* The indirect attached packet mbuf.
*/
-
static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
{
- const struct rte_mempool *mp = m->pool;
- void *buf = RTE_MBUF_TO_BADDR(m);
- uint32_t buf_len = mp->elt_size - sizeof(*m);
- m->buf_physaddr = rte_mempool_virt2phy(mp, m) + sizeof (*m);
-
- m->buf_addr = buf;
- m->buf_len = (uint16_t)buf_len;
+ struct rte_mempool *mp = m->pool;
+ uint32_t mbuf_size, buf_len, priv_size;
- m->data_off = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
- RTE_PKTMBUF_HEADROOM : m->buf_len;
+ priv_size = rte_pktmbuf_priv_size(mp);
+ mbuf_size = sizeof(struct rte_mbuf) + priv_size;
+ buf_len = rte_pktmbuf_data_room_size(mp);
+ m->priv_size = priv_size;
+ m->buf_addr = (char *)m + mbuf_size;
+ m->buf_physaddr = rte_mempool_virt2phy(mp, m) + mbuf_size;
+ m->buf_len = (uint16_t)buf_len;
+ m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, (uint16_t)m->buf_len);
m->data_len = 0;
+ m->ol_flags = 0;
}
-#endif /* RTE_MBUF_REFCNT */
-
-
static inline struct rte_mbuf* __attribute__((always_inline))
__rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
{
__rte_mbuf_sanity_check(m, 0);
-#ifdef RTE_MBUF_REFCNT
- if (likely (rte_mbuf_refcnt_read(m) == 1) ||
- likely (rte_mbuf_refcnt_update(m, -1) == 0)) {
- struct rte_mbuf *md = RTE_MBUF_FROM_BADDR(m->buf_addr);
-
- rte_mbuf_refcnt_set(m, 0);
+ if (likely(rte_mbuf_refcnt_update(m, -1) == 0)) {
/* if this is an indirect mbuf, then
* - detach mbuf
* - free attached mbuf segment
*/
- if (unlikely (md != m)) {
+ if (RTE_MBUF_INDIRECT(m)) {
+ struct rte_mbuf *md = rte_mbuf_from_indirect(m);
rte_pktmbuf_detach(m);
if (rte_mbuf_refcnt_update(md, -1) == 0)
__rte_mbuf_raw_free(md);
}
-#endif
- return(m);
-#ifdef RTE_MBUF_REFCNT
+ return m;
}
- return (NULL);
-#endif
+ return NULL;
}
/**
@@ -818,8 +1478,6 @@ static inline void rte_pktmbuf_free(struct rte_mbuf *m)
}
}
-#ifdef RTE_MBUF_REFCNT
-
/**
* Creates a "clone" of the given packet mbuf.
*
@@ -845,7 +1503,7 @@ static inline struct rte_mbuf *rte_pktmbuf_clone(struct rte_mbuf *md,
uint8_t nseg;
if (unlikely ((mc = rte_pktmbuf_alloc(mp)) == NULL))
- return (NULL);
+ return NULL;
mi = mc;
prev = &mi->next;
@@ -867,11 +1525,11 @@ static inline struct rte_mbuf *rte_pktmbuf_clone(struct rte_mbuf *md,
/* Allocation of new indirect segment failed */
if (unlikely (mi == NULL)) {
rte_pktmbuf_free(mc);
- return (NULL);
+ return NULL;
}
__rte_mbuf_sanity_check(mc, 1);
- return (mc);
+ return mc;
}
/**
@@ -894,8 +1552,6 @@ static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
} while ((m = m->next) != NULL);
}
-#endif /* RTE_MBUF_REFCNT */
-
/**
* Get the headroom in a packet mbuf.
*
@@ -944,18 +1600,56 @@ static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
}
/**
+ * A macro that points to an offset into the data in the mbuf.
+ *
+ * The returned pointer is cast to type t. Before using this
+ * function, the user must ensure that the first segment is large
+ * enough to accommodate its data.
+ *
+ * @param m
+ * The packet mbuf.
+ * @param o
+ * The offset into the mbuf data.
+ * @param t
+ * The type to cast the result into.
+ */
+#define rte_pktmbuf_mtod_offset(m, t, o) \
+ ((t)((char *)(m)->buf_addr + (m)->data_off + (o)))
+
+/**
* A macro that points to the start of the data in the mbuf.
*
* The returned pointer is cast to type t. Before using this
- * function, the user must ensure that m_headlen(m) is large enough to
- * read its data.
+ * function, the user must ensure that the first segment is large
+ * enough to accommodate its data.
*
* @param m
* The packet mbuf.
* @param t
* The type to cast the result into.
*/
-#define rte_pktmbuf_mtod(m, t) ((t)((char *)(m)->buf_addr + (m)->data_off))
+#define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0)
+
+/**
+ * A macro that returns the physical address that points to an offset of the
+ * start of the data in the mbuf
+ *
+ * @param m
+ * The packet mbuf.
+ * @param o
+ * The offset into the data to calculate address from.
+ */
+#define rte_pktmbuf_mtophys_offset(m, o) \
+ (phys_addr_t)((m)->buf_physaddr + (m)->data_off + (o))
+
+/**
+ * A macro that returns the physical address that points to the start of the
+ * data in the mbuf
+ *
+ * @param m
+ * The packet mbuf.
+ */
+#define rte_pktmbuf_mtophys(m) rte_pktmbuf_mtophys_offset(m, 0)
/**
* A macro that returns the length of the packet.
@@ -1111,6 +1805,44 @@ static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
}
/**
+ * Chain an mbuf to another, thereby creating a segmented packet.
+ *
+ * Note: The implementation will do a linear walk over the segments to find
+ * the tail entry. For cases when there are many segments, it's better to
+ * chain the entries manually.
+ *
+ * @param head
+ * The head of the mbuf chain (the first packet)
+ * @param tail
+ * The mbuf to put last in the chain
+ *
+ * @return
+ * - 0, on success.
+ * - -EOVERFLOW, if the chain is full (256 entries)
+ */
+static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
+{
+ struct rte_mbuf *cur_tail;
+
+ /* Check for number-of-segments-overflow */
+ if (head->nb_segs + tail->nb_segs >= 1 << (sizeof(head->nb_segs) * 8))
+ return -EOVERFLOW;
+
+ /* Chain 'tail' onto the old tail */
+ cur_tail = rte_pktmbuf_lastseg(head);
+ cur_tail->next = tail;
+
+ /* accumulate number of segments and total length. */
+ head->nb_segs = (uint8_t)(head->nb_segs + tail->nb_segs);
+ head->pkt_len += tail->pkt_len;
+
+ /* pkt_len is only set in the head */
+ tail->pkt_len = tail->data_len;
+
+ return 0;
+}
+
+/**
* Dump an mbuf structure to the console.
*
* Dump all fields for the given packet mbuf and all its associated
diff --git a/src/dpdk_lib18/librte_mempool/rte_mempool.c b/src/dpdk22/lib/librte_mempool/rte_mempool.c
index 4cf6c25b..aff5f6da 100755..100644
--- a/src/dpdk_lib18/librte_mempool/rte_mempool.c
+++ b/src/dpdk22/lib/librte_mempool/rte_mempool.c
@@ -48,7 +48,6 @@
#include <rte_malloc.h>
#include <rte_atomic.h>
#include <rte_launch.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
#include <rte_per_lcore.h>
@@ -63,7 +62,14 @@
TAILQ_HEAD(rte_mempool_list, rte_tailq_entry);
+static struct rte_tailq_elem rte_mempool_tailq = {
+ .name = "RTE_MEMPOOL",
+};
+EAL_REGISTER_TAILQ(rte_mempool_tailq)
+
#define CACHE_FLUSHTHRESH_MULTIPLIER 1.5
+#define CALC_CACHE_FLUSHTHRESH(c) \
+ ((typeof(c))((c) * CACHE_FLUSHTHRESH_MULTIPLIER))
/*
* return the greatest common divisor between a and b (fast algorithm)
@@ -107,34 +113,36 @@ static unsigned optimize_object_size(unsigned obj_size)
/* get number of channels */
nchan = rte_memory_get_nchannel();
if (nchan == 0)
- nchan = 1;
+ nchan = 4;
nrank = rte_memory_get_nrank();
if (nrank == 0)
nrank = 1;
/* process new object size */
- new_obj_size = (obj_size + RTE_CACHE_LINE_MASK) / RTE_CACHE_LINE_SIZE;
+ new_obj_size = (obj_size + RTE_MEMPOOL_ALIGN_MASK) / RTE_MEMPOOL_ALIGN;
while (get_gcd(new_obj_size, nrank * nchan) != 1)
new_obj_size++;
- return new_obj_size * RTE_CACHE_LINE_SIZE;
+ return new_obj_size * RTE_MEMPOOL_ALIGN;
}
static void
mempool_add_elem(struct rte_mempool *mp, void *obj, uint32_t obj_idx,
rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg)
{
- struct rte_mempool **mpp;
+ struct rte_mempool_objhdr *hdr;
+ struct rte_mempool_objtlr *tlr __rte_unused;
obj = (char *)obj + mp->header_size;
/* set mempool ptr in header */
- mpp = __mempool_from_obj(obj);
- *mpp = mp;
+ hdr = RTE_PTR_SUB(obj, sizeof(*hdr));
+ hdr->mp = mp;
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
- __mempool_write_header_cookie(obj, 1);
- __mempool_write_trailer_cookie(obj);
+ hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE2;
+ tlr = __mempool_get_trailer(obj);
+ tlr->cookie = RTE_MEMPOOL_TRAILER_COOKIE;
#endif
/* call the initializer */
if (obj_init)
@@ -150,7 +158,7 @@ rte_mempool_obj_iter(void *vaddr, uint32_t elt_num, size_t elt_sz, size_t align,
rte_mempool_obj_iter_t obj_iter, void *obj_iter_arg)
{
uint32_t i, j, k;
- uint32_t pgn;
+ uint32_t pgn, pgf;
uintptr_t end, start, va;
uintptr_t pg_sz;
@@ -165,10 +173,14 @@ rte_mempool_obj_iter(void *vaddr, uint32_t elt_num, size_t elt_sz, size_t align,
start = RTE_ALIGN_CEIL(va, align);
end = start + elt_sz;
- pgn = (end >> pg_shift) - (start >> pg_shift);
+ /* index of the first page for the next element. */
+ pgf = (end >> pg_shift) - (start >> pg_shift);
+
+ /* index of the last page for the current element. */
+ pgn = ((end - 1) >> pg_shift) - (start >> pg_shift);
pgn += j;
- /* do we have enough space left for the next element. */
+ /* do we have enough space left for the element. */
if (pgn >= pg_num)
break;
@@ -188,7 +200,7 @@ rte_mempool_obj_iter(void *vaddr, uint32_t elt_num, size_t elt_sz, size_t align,
obj_iter(obj_iter_arg, (void *)start,
(void *)end, i);
va = end;
- j = pgn;
+ j += pgf;
i++;
} else {
va = RTE_ALIGN_CEIL((va + 1), pg_sz);
@@ -196,7 +208,7 @@ rte_mempool_obj_iter(void *vaddr, uint32_t elt_num, size_t elt_sz, size_t align,
}
}
- return (i);
+ return i;
}
/*
@@ -255,7 +267,7 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
#endif
if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0)
sz->header_size = RTE_ALIGN_CEIL(sz->header_size,
- RTE_CACHE_LINE_SIZE);
+ RTE_MEMPOOL_ALIGN);
/* trailer contains the cookie in debug mode */
sz->trailer_size = 0;
@@ -269,9 +281,9 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0) {
sz->total_size = sz->header_size + sz->elt_size +
sz->trailer_size;
- sz->trailer_size += ((RTE_CACHE_LINE_SIZE -
- (sz->total_size & RTE_CACHE_LINE_MASK)) &
- RTE_CACHE_LINE_MASK);
+ sz->trailer_size += ((RTE_MEMPOOL_ALIGN -
+ (sz->total_size & RTE_MEMPOOL_ALIGN_MASK)) &
+ RTE_MEMPOOL_ALIGN_MASK);
}
/*
@@ -305,7 +317,7 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
/* this is the size of an object, including header and trailer */
sz->total_size = sz->header_size + sz->elt_size + sz->trailer_size;
- return (sz->total_size);
+ return sz->total_size;
}
@@ -326,7 +338,7 @@ rte_mempool_xmem_size(uint32_t elt_num, size_t elt_sz, uint32_t pg_shift)
sz = RTE_ALIGN_CEIL(elt_sz, pg_sz) * elt_num;
}
- return (sz);
+ return sz;
}
/*
@@ -335,9 +347,9 @@ rte_mempool_xmem_size(uint32_t elt_num, size_t elt_sz, uint32_t pg_shift)
*/
static void
mempool_lelem_iter(void *arg, __rte_unused void *start, void *end,
- __rte_unused uint32_t idx)
+ __rte_unused uint32_t idx)
{
- *(uintptr_t *)arg = (uintptr_t)end;
+ *(uintptr_t *)arg = (uintptr_t)end;
}
ssize_t
@@ -355,14 +367,34 @@ rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t elt_sz,
if ((n = rte_mempool_obj_iter(vaddr, elt_num, elt_sz, 1,
paddr, pg_num, pg_shift, mempool_lelem_iter,
&uv)) != elt_num) {
- return (-n);
+ return -(ssize_t)n;
}
uv = RTE_ALIGN_CEIL(uv, pg_sz);
usz = uv - va;
- return (usz);
+ return usz;
}
+#ifndef RTE_LIBRTE_XEN_DOM0
+/* stub if DOM0 support not configured */
+struct rte_mempool *
+rte_dom0_mempool_create(const char *name __rte_unused,
+ unsigned n __rte_unused,
+ unsigned elt_size __rte_unused,
+ unsigned cache_size __rte_unused,
+ unsigned private_data_size __rte_unused,
+ rte_mempool_ctor_t *mp_init __rte_unused,
+ void *mp_init_arg __rte_unused,
+ rte_mempool_obj_ctor_t *obj_init __rte_unused,
+ void *obj_init_arg __rte_unused,
+ int socket_id __rte_unused,
+ unsigned flags __rte_unused)
+{
+ rte_errno = EINVAL;
+ return NULL;
+}
+#endif
+
/* create the mempool */
struct rte_mempool *
rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
@@ -371,20 +403,20 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
int socket_id, unsigned flags)
{
-#ifdef RTE_LIBRTE_XEN_DOM0
- return (rte_dom0_mempool_create(name, n, elt_size,
- cache_size, private_data_size,
- mp_init, mp_init_arg,
- obj_init, obj_init_arg,
- socket_id, flags));
-#else
- return (rte_mempool_xmem_create(name, n, elt_size,
- cache_size, private_data_size,
- mp_init, mp_init_arg,
- obj_init, obj_init_arg,
- socket_id, flags,
- NULL, NULL, MEMPOOL_PG_NUM_DEFAULT, MEMPOOL_PG_SHIFT_MAX));
-#endif
+ if (rte_xen_dom0_supported())
+ return rte_dom0_mempool_create(name, n, elt_size,
+ cache_size, private_data_size,
+ mp_init, mp_init_arg,
+ obj_init, obj_init_arg,
+ socket_id, flags);
+ else
+ return rte_mempool_xmem_create(name, n, elt_size,
+ cache_size, private_data_size,
+ mp_init, mp_init_arg,
+ obj_init, obj_init_arg,
+ socket_id, flags,
+ NULL, NULL, MEMPOOL_PG_NUM_DEFAULT,
+ MEMPOOL_PG_SHIFT_MAX);
}
/*
@@ -404,6 +436,7 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
{
char mz_name[RTE_MEMZONE_NAMESIZE];
char rg_name[RTE_RING_NAMESIZE];
+ struct rte_mempool_list *mempool_list;
struct rte_mempool *mp = NULL;
struct rte_tailq_entry *te;
struct rte_ring *r;
@@ -432,15 +465,11 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
RTE_CACHE_LINE_MASK) != 0);
#endif
- /* check that we have an initialised tail queue */
- if (RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_MEMPOOL,
- rte_mempool_list) == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return NULL;
- }
+ mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
/* asked cache too big */
- if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE) {
+ if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE ||
+ CALC_CACHE_FLUSHTHRESH(cache_size) > n) {
rte_errno = EINVAL;
return NULL;
}
@@ -489,7 +518,7 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
* cache-aligned
*/
private_data_size = (private_data_size +
- RTE_CACHE_LINE_MASK) & (~RTE_CACHE_LINE_MASK);
+ RTE_MEMPOOL_ALIGN_MASK) & (~RTE_MEMPOOL_ALIGN_MASK);
if (! rte_eal_has_hugepages()) {
/*
@@ -512,10 +541,11 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
/*
* If user provided an external memory buffer, then use it to
- * store mempool objects. Otherwise reserve memzone big enough to
- * hold mempool header and metadata plus mempool objects.
+ * store mempool objects. Otherwise reserve a memzone that is large
+ * enough to hold mempool header and metadata plus mempool objects.
*/
mempool_size = MEMPOOL_HEADER_SIZE(mp, pg_num) + private_data_size;
+ mempool_size = RTE_ALIGN_CEIL(mempool_size, RTE_MEMPOOL_ALIGN);
if (vaddr == NULL)
mempool_size += (size_t)objsz.total_size * n;
@@ -534,7 +564,7 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
/*
* no more memory: in this case we loose previously reserved
- * space for the as we cannot free it
+ * space for the ring as we cannot free it
*/
if (mz == NULL) {
rte_free(te);
@@ -565,13 +595,13 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
mp->header_size = objsz.header_size;
mp->trailer_size = objsz.trailer_size;
mp->cache_size = cache_size;
- mp->cache_flushthresh = (uint32_t)
- (cache_size * CACHE_FLUSHTHRESH_MULTIPLIER);
+ mp->cache_flushthresh = CALC_CACHE_FLUSHTHRESH(cache_size);
mp->private_data_size = private_data_size;
/* calculate address of the first element for continuous mempool. */
obj = (char *)mp + MEMPOOL_HEADER_SIZE(mp, pg_num) +
private_data_size;
+ obj = RTE_PTR_ALIGN_CEIL(obj, RTE_MEMPOOL_ALIGN);
/* populate address translation fields. */
mp->pg_num = pg_num;
@@ -600,7 +630,9 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
te->data = (void *) mp;
- RTE_EAL_TAILQ_INSERT_TAIL(RTE_TAILQ_MEMPOOL, rte_mempool_list, te);
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+ TAILQ_INSERT_TAIL(mempool_list, te, next);
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
exit:
rte_rwlock_write_unlock(RTE_EAL_MEMPOOL_RWLOCK);
@@ -831,11 +863,7 @@ rte_mempool_list_dump(FILE *f)
struct rte_tailq_entry *te;
struct rte_mempool_list *mempool_list;
- if ((mempool_list =
- RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_MEMPOOL, rte_mempool_list)) == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return;
- }
+ mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
rte_rwlock_read_lock(RTE_EAL_MEMPOOL_RWLOCK);
@@ -855,11 +883,7 @@ rte_mempool_lookup(const char *name)
struct rte_tailq_entry *te;
struct rte_mempool_list *mempool_list;
- if ((mempool_list =
- RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_MEMPOOL, rte_mempool_list)) == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return NULL;
- }
+ mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
rte_rwlock_read_lock(RTE_EAL_MEMPOOL_RWLOCK);
@@ -885,11 +909,7 @@ void rte_mempool_walk(void (*func)(const struct rte_mempool *, void *),
struct rte_tailq_entry *te = NULL;
struct rte_mempool_list *mempool_list;
- if ((mempool_list =
- RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_MEMPOOL, rte_mempool_list)) == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return;
- }
+ mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
rte_rwlock_read_lock(RTE_EAL_MEMPOOL_RWLOCK);
diff --git a/src/dpdk_lib18/librte_mempool/rte_mempool.h b/src/dpdk22/lib/librte_mempool/rte_mempool.h
index 33146518..6e2390a1 100755..100644
--- a/src/dpdk_lib18/librte_mempool/rte_mempool.h
+++ b/src/dpdk22/lib/librte_mempool/rte_mempool.h
@@ -109,6 +109,9 @@ struct rte_mempool_cache {
} __rte_cache_aligned;
#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
+/**
+ * A structure that stores the size of mempool elements.
+ */
struct rte_mempool_objsz {
uint32_t elt_size; /**< Size of an element. */
uint32_t header_size; /**< Size of header (before elt). */
@@ -139,6 +142,39 @@ struct rte_mempool_objsz {
/** Mempool over one chunk of physically continuous memory */
#define MEMPOOL_PG_NUM_DEFAULT 1
+#ifndef RTE_MEMPOOL_ALIGN
+#define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE
+#endif
+
+#define RTE_MEMPOOL_ALIGN_MASK (RTE_MEMPOOL_ALIGN - 1)
+
+/**
+ * Mempool object header structure
+ *
+ * Each object stored in mempools are prefixed by this header structure,
+ * it allows to retrieve the mempool pointer from the object. When debug
+ * is enabled, a cookie is also added in this structure preventing
+ * corruptions and double-frees.
+ */
+struct rte_mempool_objhdr {
+ struct rte_mempool *mp; /**< The mempool owning the object. */
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ uint64_t cookie; /**< Debug cookie. */
+#endif
+};
+
+/**
+ * Mempool object trailer structure
+ *
+ * In debug mode, each object stored in mempools are suffixed by this
+ * trailer structure containing a cookie preventing memory corruptions.
+ */
+struct rte_mempool_objtlr {
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ uint64_t cookie; /**< Debug cookie. */
+#endif
+};
+
/**
* The RTE mempool structure.
*/
@@ -179,7 +215,7 @@ struct rte_mempool {
uintptr_t elt_va_end;
/**< Virtual address of the <size + 1> mempool object. */
phys_addr_t elt_pa[MEMPOOL_PG_NUM_DEFAULT];
- /**< Array of physical pages addresses for the mempool objects buffer. */
+ /**< Array of physical page addresses for the mempool objects buffer. */
} __rte_cache_aligned;
@@ -190,6 +226,7 @@ struct rte_mempool {
/**
* @internal When debug is enabled, store some statistics.
+ *
* @param mp
* Pointer to the memory pool.
* @param name
@@ -198,51 +235,40 @@ struct rte_mempool {
* Number to add to the object-oriented statistics.
*/
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
-#define __MEMPOOL_STAT_ADD(mp, name, n) do { \
- unsigned __lcore_id = rte_lcore_id(); \
- mp->stats[__lcore_id].name##_objs += n; \
- mp->stats[__lcore_id].name##_bulk += 1; \
+#define __MEMPOOL_STAT_ADD(mp, name, n) do { \
+ unsigned __lcore_id = rte_lcore_id(); \
+ if (__lcore_id < RTE_MAX_LCORE) { \
+ mp->stats[__lcore_id].name##_objs += n; \
+ mp->stats[__lcore_id].name##_bulk += 1; \
+ } \
} while(0)
#else
#define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0)
#endif
/**
- * Calculates size of the mempool header.
+ * Calculate the size of the mempool header.
+ *
* @param mp
* Pointer to the memory pool.
* @param pgn
- * Number of page used to store mempool objects.
+ * Number of pages used to store mempool objects.
*/
#define MEMPOOL_HEADER_SIZE(mp, pgn) (sizeof(*(mp)) + \
RTE_ALIGN_CEIL(((pgn) - RTE_DIM((mp)->elt_pa)) * \
sizeof ((mp)->elt_pa[0]), RTE_CACHE_LINE_SIZE))
/**
- * Returns TRUE if whole mempool is allocated in one contiguous block of memory.
+ * Return true if the whole mempool is in contiguous memory.
*/
#define MEMPOOL_IS_CONTIG(mp) \
((mp)->pg_num == MEMPOOL_PG_NUM_DEFAULT && \
(mp)->phys_addr == (mp)->elt_pa[0])
-/**
- * @internal Get a pointer to a mempool pointer in the object header.
- * @param obj
- * Pointer to object.
- * @return
- * The pointer to the mempool from which the object was allocated.
- */
-static inline struct rte_mempool **__mempool_from_obj(void *obj)
+/* return the header of a mempool object (internal) */
+static inline struct rte_mempool_objhdr *__mempool_get_header(void *obj)
{
- struct rte_mempool **mpp;
- unsigned off;
-
- off = sizeof(struct rte_mempool *);
-#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
- off += sizeof(uint64_t);
-#endif
- mpp = (struct rte_mempool **)((char *)obj - off);
- return mpp;
+ return (struct rte_mempool_objhdr *)RTE_PTR_SUB(obj, sizeof(struct rte_mempool_objhdr));
}
/**
@@ -254,49 +280,19 @@ static inline struct rte_mempool **__mempool_from_obj(void *obj)
* @return
* A pointer to the mempool structure.
*/
-static inline const struct rte_mempool *rte_mempool_from_obj(void *obj)
+static inline struct rte_mempool *rte_mempool_from_obj(void *obj)
{
- struct rte_mempool * const *mpp;
- mpp = __mempool_from_obj(obj);
- return *mpp;
+ struct rte_mempool_objhdr *hdr = __mempool_get_header(obj);
+ return hdr->mp;
}
-#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
-/* get header cookie value */
-static inline uint64_t __mempool_read_header_cookie(const void *obj)
+/* return the trailer of a mempool object (internal) */
+static inline struct rte_mempool_objtlr *__mempool_get_trailer(void *obj)
{
- return *(const uint64_t *)((const char *)obj - sizeof(uint64_t));
+ struct rte_mempool *mp = rte_mempool_from_obj(obj);
+ return (struct rte_mempool_objtlr *)RTE_PTR_ADD(obj, mp->elt_size);
}
-/* get trailer cookie value */
-static inline uint64_t __mempool_read_trailer_cookie(void *obj)
-{
- struct rte_mempool **mpp = __mempool_from_obj(obj);
- return *(uint64_t *)((char *)obj + (*mpp)->elt_size);
-}
-
-/* write header cookie value */
-static inline void __mempool_write_header_cookie(void *obj, int free)
-{
- uint64_t *cookie_p;
- cookie_p = (uint64_t *)((char *)obj - sizeof(uint64_t));
- if (free == 0)
- *cookie_p = RTE_MEMPOOL_HEADER_COOKIE1;
- else
- *cookie_p = RTE_MEMPOOL_HEADER_COOKIE2;
-
-}
-
-/* write trailer cookie value */
-static inline void __mempool_write_trailer_cookie(void *obj)
-{
- uint64_t *cookie_p;
- struct rte_mempool **mpp = __mempool_from_obj(obj);
- cookie_p = (uint64_t *)((char *)obj + (*mpp)->elt_size);
- *cookie_p = RTE_MEMPOOL_TRAILER_COOKIE;
-}
-#endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
-
/**
* @internal Check and update cookies or panic.
*
@@ -319,6 +315,8 @@ static inline void __mempool_check_cookies(const struct rte_mempool *mp,
void * const *obj_table_const,
unsigned n, int free)
{
+ struct rte_mempool_objhdr *hdr;
+ struct rte_mempool_objtlr *tlr;
uint64_t cookie;
void *tmp;
void *obj;
@@ -336,44 +334,46 @@ static inline void __mempool_check_cookies(const struct rte_mempool *mp,
rte_panic("MEMPOOL: object is owned by another "
"mempool\n");
- cookie = __mempool_read_header_cookie(obj);
+ hdr = __mempool_get_header(obj);
+ cookie = hdr->cookie;
if (free == 0) {
if (cookie != RTE_MEMPOOL_HEADER_COOKIE1) {
rte_log_set_history(0);
RTE_LOG(CRIT, MEMPOOL,
- "obj=%p, mempool=%p, cookie=%"PRIx64"\n",
- obj, mp, cookie);
+ "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
+ obj, (const void *) mp, cookie);
rte_panic("MEMPOOL: bad header cookie (put)\n");
}
- __mempool_write_header_cookie(obj, 1);
+ hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE2;
}
else if (free == 1) {
if (cookie != RTE_MEMPOOL_HEADER_COOKIE2) {
rte_log_set_history(0);
RTE_LOG(CRIT, MEMPOOL,
- "obj=%p, mempool=%p, cookie=%"PRIx64"\n",
- obj, mp, cookie);
+ "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
+ obj, (const void *) mp, cookie);
rte_panic("MEMPOOL: bad header cookie (get)\n");
}
- __mempool_write_header_cookie(obj, 0);
+ hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE1;
}
else if (free == 2) {
if (cookie != RTE_MEMPOOL_HEADER_COOKIE1 &&
cookie != RTE_MEMPOOL_HEADER_COOKIE2) {
rte_log_set_history(0);
RTE_LOG(CRIT, MEMPOOL,
- "obj=%p, mempool=%p, cookie=%"PRIx64"\n",
- obj, mp, cookie);
+ "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
+ obj, (const void *) mp, cookie);
rte_panic("MEMPOOL: bad header cookie (audit)\n");
}
}
- cookie = __mempool_read_trailer_cookie(obj);
+ tlr = __mempool_get_trailer(obj);
+ cookie = tlr->cookie;
if (cookie != RTE_MEMPOOL_TRAILER_COOKIE) {
rte_log_set_history(0);
RTE_LOG(CRIT, MEMPOOL,
- "obj=%p, mempool=%p, cookie=%"PRIx64"\n",
- obj, mp, cookie);
+ "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
+ obj, (const void *) mp, cookie);
rte_panic("MEMPOOL: bad trailer cookie\n");
}
}
@@ -386,29 +386,35 @@ static inline void __mempool_check_cookies(const struct rte_mempool *mp,
#endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
/**
- * An mempool's object iterator callback function.
+ * A mempool object iterator callback function.
*/
typedef void (*rte_mempool_obj_iter_t)(void * /*obj_iter_arg*/,
void * /*obj_start*/,
void * /*obj_end*/,
uint32_t /*obj_index */);
-/*
- * Iterates across objects of the given size and alignment in the
+/**
+ * Call a function for each mempool object in a memory chunk
+ *
+ * Iterate across objects of the given size and alignment in the
* provided chunk of memory. The given memory buffer can consist of
- * disjoint physical pages.
- * For each object calls the provided callback (if any).
- * Used to populate mempool, walk through all elements of the mempool,
- * estimate how many elements of the given size could be created in the given
- * memory buffer.
+ * disjointed physical pages.
+ *
+ * For each object, call the provided callback (if any). This function
+ * is used to populate a mempool, or walk through all the elements of a
+ * mempool, or estimate how many elements of the given size could be
+ * created in the given memory buffer.
+ *
* @param vaddr
* Virtual address of the memory buffer.
* @param elt_num
* Maximum number of objects to iterate through.
* @param elt_sz
* Size of each object.
+ * @param align
+ * Alignment of each object.
* @param paddr
- * Array of phyiscall addresses of the pages that comprises given memory
+ * Array of physical addresses of the pages that comprises given memory
* buffer.
* @param pg_num
* Number of elements in the paddr array.
@@ -417,12 +423,11 @@ typedef void (*rte_mempool_obj_iter_t)(void * /*obj_iter_arg*/,
* @param obj_iter
* Object iterator callback function (could be NULL).
* @param obj_iter_arg
- * User defined Prameter for the object iterator callback function.
+ * User defined parameter for the object iterator callback function.
*
* @return
* Number of objects iterated through.
*/
-
uint32_t rte_mempool_obj_iter(void *vaddr,
uint32_t elt_num, size_t elt_sz, size_t align,
const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
@@ -447,7 +452,7 @@ typedef void (rte_mempool_obj_ctor_t)(struct rte_mempool *, void *,
typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
/**
- * Creates a new mempool named *name* in memory.
+ * Create a new mempool named *name* in memory.
*
* This function uses ``memzone_reserve()`` to allocate memory. The
* pool contains n elements of elt_size. Its size is set to n.
@@ -466,7 +471,7 @@ typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
* If cache_size is non-zero, the rte_mempool library will try to
* limit the accesses to the common lockless pool, by maintaining a
* per-lcore object cache. This argument must be lower or equal to
- * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE. It is advised to choose
+ * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE and n / 1.5. It is advised to choose
* cache_size to have "n modulo cache_size == 0": if this is
* not the case, some elements will always stay in the pool and will
* never be used. The access to the per-lcore table is of course
@@ -522,7 +527,6 @@ typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
* with rte_errno set appropriately. Possible rte_errno values include:
* - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
* - E_RTE_SECONDARY - function was called from a secondary process instance
- * - E_RTE_NO_TAILQ - no tailq list could be got for the ring or mempool list
* - EINVAL - cache size provided is too large
* - ENOSPC - the maximum number of memzones has already been allocated
* - EEXIST - a memzone with the same name already exists
@@ -536,14 +540,14 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
int socket_id, unsigned flags);
/**
- * Creates a new mempool named *name* in memory.
+ * Create a new mempool named *name* in memory.
*
* This function uses ``memzone_reserve()`` to allocate memory. The
* pool contains n elements of elt_size. Its size is set to n.
* Depending on the input parameters, mempool elements can be either allocated
* together with the mempool header, or an externally provided memory buffer
* could be used to store mempool objects. In later case, that external
- * memory buffer can consist of set of disjoint phyiscal pages.
+ * memory buffer can consist of set of disjoint physical pages.
*
* @param name
* The name of the mempool.
@@ -612,7 +616,7 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
* Virtual address of the externally allocated memory buffer.
* Will be used to store mempool objects.
* @param paddr
- * Array of phyiscall addresses of the pages that comprises given memory
+ * Array of physical addresses of the pages that comprises given memory
* buffer.
* @param pg_num
* Number of elements in the paddr array.
@@ -623,7 +627,6 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
* with rte_errno set appropriately. Possible rte_errno values include:
* - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
* - E_RTE_SECONDARY - function was called from a secondary process instance
- * - E_RTE_NO_TAILQ - no tailq list could be got for the ring or mempool list
* - EINVAL - cache size provided is too large
* - ENOSPC - the maximum number of memzones has already been allocated
* - EEXIST - a memzone with the same name already exists
@@ -637,14 +640,13 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
int socket_id, unsigned flags, void *vaddr,
const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift);
-#ifdef RTE_LIBRTE_XEN_DOM0
/**
- * Creates a new mempool named *name* in memory on Xen Dom0.
+ * Create a new mempool named *name* in memory on Xen Dom0.
*
* This function uses ``rte_mempool_xmem_create()`` to allocate memory. The
* pool contains n elements of elt_size. Its size is set to n.
* All elements of the mempool are allocated together with the mempool header,
- * and memory buffer can consist of set of disjoint phyiscal pages.
+ * and memory buffer can consist of set of disjoint physical pages.
*
* @param name
* The name of the mempool.
@@ -714,7 +716,6 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
* with rte_errno set appropriately. Possible rte_errno values include:
* - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
* - E_RTE_SECONDARY - function was called from a secondary process instance
- * - E_RTE_NO_TAILQ - no tailq list could be got for the ring or mempool list
* - EINVAL - cache size provided is too large
* - ENOSPC - the maximum number of memzones has already been allocated
* - EEXIST - a memzone with the same name already exists
@@ -726,7 +727,7 @@ rte_dom0_mempool_create(const char *name, unsigned n, unsigned elt_size,
rte_mempool_ctor_t *mp_init, void *mp_init_arg,
rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
int socket_id, unsigned flags);
-#endif
+
/**
* Dump the status of the mempool to the console.
@@ -767,8 +768,9 @@ __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
__MEMPOOL_STAT_ADD(mp, put, n);
#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
- /* cache is not enabled or single producer */
- if (unlikely(cache_size == 0 || is_mp == 0))
+ /* cache is not enabled or single producer or non-EAL thread */
+ if (unlikely(cache_size == 0 || is_mp == 0 ||
+ lcore_id >= RTE_MAX_LCORE))
goto ring_enqueue;
/* Go straight to ring if put would overflow mem allocated for cache */
@@ -952,7 +954,8 @@ __mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
uint32_t cache_size = mp->cache_size;
/* cache is not enabled or single consumer */
- if (unlikely(cache_size == 0 || is_mc == 0 || n >= cache_size))
+ if (unlikely(cache_size == 0 || is_mc == 0 ||
+ n >= cache_size || lcore_id >= RTE_MAX_LCORE))
goto ring_dequeue;
cache = &mp->local_cache[lcore_id];
@@ -1316,14 +1319,19 @@ void rte_mempool_list_dump(FILE *f);
struct rte_mempool *rte_mempool_lookup(const char *name);
/**
+ * Get the header, trailer and total size of a mempool element.
+ *
* Given a desired size of the mempool element and mempool flags,
- * caluclates header, trailer, body and total sizes of the mempool object.
+ * calculates header, trailer, body and total sizes of the mempool object.
+ *
* @param elt_size
* The size of each element.
* @param flags
* The flags used for the mempool creation.
* Consult rte_mempool_create() for more information about possible values.
* The size of each element.
+ * @param sz
+ * The calculated detailed size the mempool object. May be NULL.
* @return
* Total size of the mempool object.
*/
@@ -1331,11 +1339,16 @@ uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
struct rte_mempool_objsz *sz);
/**
- * Calculate maximum amount of memory required to store given number of objects.
- * Assumes that the memory buffer will be aligned at page boundary.
- * Note, that if object size is bigger then page size, then it assumes that
- * we have a subsets of physically continuous pages big enough to store
- * at least one object.
+ * Get the size of memory required to store mempool elements.
+ *
+ * Calculate the maximum amount of memory required to store given number
+ * of objects. Assume that the memory buffer will be aligned at page
+ * boundary.
+ *
+ * Note that if object size is bigger then page size, then it assumes
+ * that pages are grouped in subsets of physically continuous pages big
+ * enough to store at least one object.
+ *
* @param elt_num
* Number of elements.
* @param elt_sz
@@ -1349,8 +1362,11 @@ size_t rte_mempool_xmem_size(uint32_t elt_num, size_t elt_sz,
uint32_t pg_shift);
/**
+ * Get the size of memory required to store mempool elements.
+ *
* Calculate how much memory would be actually required with the given
* memory footprint to store required number of objects.
+ *
* @param vaddr
* Virtual address of the externally allocated memory buffer.
* Will be used to store mempool objects.
@@ -1359,17 +1375,17 @@ size_t rte_mempool_xmem_size(uint32_t elt_num, size_t elt_sz,
* @param elt_sz
* The size of each element.
* @param paddr
- * Array of phyiscall addresses of the pages that comprises given memory
+ * Array of physical addresses of the pages that comprises given memory
* buffer.
* @param pg_num
* Number of elements in the paddr array.
* @param pg_shift
* LOG2 of the physical pages size.
* @return
- * Number of bytes needed to store given number of objects,
- * aligned to the given page size.
- * If provided memory buffer is not big enough:
- * (-1) * actual number of elemnts that can be stored in that buffer.
+ * On success, the number of bytes needed to store given number of
+ * objects, aligned to the given page size. If the provided memory
+ * buffer is too small, return a negative value whose absolute value
+ * is the actual number of elements that can be stored in that buffer.
*/
ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t elt_sz,
const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift);
diff --git a/src/dpdk_lib18/librte_net/rte_arp.h b/src/dpdk22/lib/librte_net/rte_arp.h
index c7b0e514..18364187 100755..100644
--- a/src/dpdk_lib18/librte_net/rte_arp.h
+++ b/src/dpdk22/lib/librte_net/rte_arp.h
@@ -39,6 +39,7 @@
*/
#include <stdint.h>
+#include <rte_ether.h>
#ifdef __cplusplus
extern "C" {
@@ -48,10 +49,10 @@ extern "C" {
* ARP header IPv4 payload.
*/
struct arp_ipv4 {
- uint8_t arp_sha[6]; /* sender hardware address */
- uint8_t arp_sip[4]; /* sender IP address */
- uint8_t arp_tha[6]; /* target hardware address */
- uint8_t arp_tip[4]; /* target IP address */
+ struct ether_addr arp_sha; /**< sender hardware address */
+ uint32_t arp_sip; /**< sender IP address */
+ struct ether_addr arp_tha; /**< target hardware address */
+ uint32_t arp_tip; /**< target IP address */
} __attribute__((__packed__));
/**
@@ -72,9 +73,7 @@ struct arp_hdr {
#define ARP_OP_INVREQUEST 8 /* request to identify peer */
#define ARP_OP_INVREPLY 9 /* response identifying peer */
- union {
- struct arp_ipv4 arp_ip;
- } arp_data;
+ struct arp_ipv4 arp_data;
} __attribute__((__packed__));
#ifdef __cplusplus
diff --git a/src/dpdk_lib18/librte_net/rte_icmp.h b/src/dpdk22/lib/librte_net/rte_icmp.h
index 8b287f6d..8b287f6d 100755..100644
--- a/src/dpdk_lib18/librte_net/rte_icmp.h
+++ b/src/dpdk22/lib/librte_net/rte_icmp.h
diff --git a/src/dpdk_lib18/librte_net/rte_ip.h b/src/dpdk22/lib/librte_net/rte_ip.h
index f0ec543b..5b7554ab 100755..100644
--- a/src/dpdk_lib18/librte_net/rte_ip.h
+++ b/src/dpdk22/lib/librte_net/rte_ip.h
@@ -80,7 +80,6 @@
#include <stdint.h>
#include <netinet/in.h>
-#include <rte_memcpy.h>
#include <rte_byteorder.h>
#include <rte_mbuf.h>
@@ -110,6 +109,17 @@ struct ipv4_hdr {
(((c) & 0xff) << 8) | \
((d) & 0xff))
+/** Maximal IPv4 packet length (including a header) */
+#define IPV4_MAX_PKT_LEN 65535
+
+/** Internet header length mask for version_ihl field */
+#define IPV4_HDR_IHL_MASK (0x0f)
+/**
+ * Internet header length field multiplier (IHL field specifies overall header
+ * length in number of 4-byte words)
+ */
+#define IPV4_IHL_MULTIPLIER (4)
+
/* Fragment Offset * Flags. */
#define IPV4_HDR_DF_SHIFT 14
#define IPV4_HDR_MF_SHIFT 13
@@ -159,7 +169,8 @@ __rte_raw_cksum(const void *buf, size_t len, uint32_t sum)
{
/* workaround gcc strict-aliasing warning */
uintptr_t ptr = (uintptr_t)buf;
- const uint16_t *u16 = (const uint16_t *)ptr;
+ typedef uint16_t __attribute__((__may_alias__)) u16_p;
+ const u16_p *u16 = (const u16_p *)ptr;
while (len >= (sizeof(*u16) * 4)) {
sum += u16[0];
@@ -233,7 +244,7 @@ rte_ipv4_cksum(const struct ipv4_hdr *ipv4_hdr)
{
uint16_t cksum;
cksum = rte_raw_cksum(ipv4_hdr, sizeof(struct ipv4_hdr));
- return ((cksum == 0xffff) ? cksum : ~cksum);
+ return (cksum == 0xffff) ? cksum : ~cksum;
}
/**
diff --git a/src/dpdk_lib18/librte_net/rte_sctp.h b/src/dpdk22/lib/librte_net/rte_sctp.h
index 688e126f..688e126f 100755..100644
--- a/src/dpdk_lib18/librte_net/rte_sctp.h
+++ b/src/dpdk22/lib/librte_net/rte_sctp.h
diff --git a/src/dpdk_lib18/librte_net/rte_tcp.h b/src/dpdk22/lib/librte_net/rte_tcp.h
index 28b61e6d..28b61e6d 100755..100644
--- a/src/dpdk_lib18/librte_net/rte_tcp.h
+++ b/src/dpdk22/lib/librte_net/rte_tcp.h
diff --git a/src/dpdk_lib18/librte_net/rte_udp.h b/src/dpdk22/lib/librte_net/rte_udp.h
index bc5be4af..bc5be4af 100755..100644
--- a/src/dpdk_lib18/librte_net/rte_udp.h
+++ b/src/dpdk22/lib/librte_net/rte_udp.h
diff --git a/src/dpdk_lib18/librte_pipeline/rte_pipeline.c b/src/dpdk22/lib/librte_pipeline/rte_pipeline.c
index ac7e8873..d625fd25 100755..100644
--- a/src/dpdk_lib18/librte_pipeline/rte_pipeline.c
+++ b/src/dpdk22/lib/librte_pipeline/rte_pipeline.c
@@ -48,6 +48,17 @@
#define RTE_TABLE_INVALID UINT32_MAX
+#ifdef RTE_PIPELINE_STATS_COLLECT
+#define RTE_PIPELINE_STATS_ADD(counter, val) \
+ ({ (counter) += (val); })
+
+#define RTE_PIPELINE_STATS_ADD_M(counter, mask) \
+ ({ (counter) += __builtin_popcountll(mask); })
+#else
+#define RTE_PIPELINE_STATS_ADD(counter, val)
+#define RTE_PIPELINE_STATS_ADD_M(counter, mask)
+#endif
+
struct rte_port_in {
/* Input parameters */
struct rte_port_in_ops ops;
@@ -63,6 +74,8 @@ struct rte_port_in {
/* List of enabled ports */
struct rte_port_in *next;
+
+ uint64_t n_pkts_dropped_by_ah;
};
struct rte_port_out {
@@ -74,6 +87,8 @@ struct rte_port_out {
/* Handle to low-level port */
void *h_port;
+
+ uint64_t n_pkts_dropped_by_ah;
};
struct rte_table {
@@ -90,6 +105,12 @@ struct rte_table {
/* Handle to the low-level table object */
void *h_table;
+
+ /* Stats for this table. */
+ uint64_t n_pkts_dropped_by_lkp_hit_ah;
+ uint64_t n_pkts_dropped_by_lkp_miss_ah;
+ uint64_t n_pkts_dropped_lkp_hit;
+ uint64_t n_pkts_dropped_lkp_miss;
};
#define RTE_PIPELINE_MAX_NAME_SZ 124
@@ -175,14 +196,6 @@ rte_pipeline_check_params(struct rte_pipeline_params *params)
return -EINVAL;
}
- /* offset_port_id */
- if (params->offset_port_id & 0x3) {
- RTE_LOG(ERR, PIPELINE,
- "%s: Incorrect value for parameter offset_port_id\n",
- __func__);
- return -EINVAL;
- }
-
return 0;
}
@@ -574,6 +587,112 @@ rte_pipeline_table_entry_delete(struct rte_pipeline *p,
return (table->ops.f_delete)(table->h_table, key, key_found, entry);
}
+int rte_pipeline_table_entry_add_bulk(struct rte_pipeline *p,
+ uint32_t table_id,
+ void **keys,
+ struct rte_pipeline_table_entry **entries,
+ uint32_t n_keys,
+ int *key_found,
+ struct rte_pipeline_table_entry **entries_ptr)
+{
+ struct rte_table *table;
+ uint32_t i;
+
+ /* Check input arguments */
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (keys == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: keys parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (entries == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: entries parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (table_id >= p->num_tables) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: table_id %d out of range\n", __func__, table_id);
+ return -EINVAL;
+ }
+
+ table = &p->tables[table_id];
+
+ if (table->ops.f_add_bulk == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: f_add_bulk function pointer NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < n_keys; i++) {
+ if ((entries[i]->action == RTE_PIPELINE_ACTION_TABLE) &&
+ table->table_next_id_valid &&
+ (entries[i]->table_id != table->table_next_id)) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: Tree-like topologies not allowed\n", __func__);
+ return -EINVAL;
+ }
+ }
+
+ /* Add entry */
+ for (i = 0; i < n_keys; i++) {
+ if ((entries[i]->action == RTE_PIPELINE_ACTION_TABLE) &&
+ (table->table_next_id_valid == 0)) {
+ table->table_next_id = entries[i]->table_id;
+ table->table_next_id_valid = 1;
+ }
+ }
+
+ return (table->ops.f_add_bulk)(table->h_table, keys, (void **) entries,
+ n_keys, key_found, (void **) entries_ptr);
+}
+
+int rte_pipeline_table_entry_delete_bulk(struct rte_pipeline *p,
+ uint32_t table_id,
+ void **keys,
+ uint32_t n_keys,
+ int *key_found,
+ struct rte_pipeline_table_entry **entries)
+{
+ struct rte_table *table;
+
+ /* Check input arguments */
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (keys == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: key parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (table_id >= p->num_tables) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: table_id %d out of range\n", __func__, table_id);
+ return -EINVAL;
+ }
+
+ table = &p->tables[table_id];
+
+ if (table->ops.f_delete_bulk == NULL) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: f_delete function pointer NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ return (table->ops.f_delete_bulk)(table->h_table, keys, n_keys, key_found,
+ (void **) entries);
+}
+
/*
* Port
*
@@ -999,6 +1118,7 @@ rte_pipeline_compute_masks(struct rte_pipeline *p, uint64_t pkts_mask)
{
p->action_mask1[RTE_PIPELINE_ACTION_DROP] = 0;
p->action_mask1[RTE_PIPELINE_ACTION_PORT] = 0;
+ p->action_mask1[RTE_PIPELINE_ACTION_PORT_META] = 0;
p->action_mask1[RTE_PIPELINE_ACTION_TABLE] = 0;
if ((pkts_mask & (pkts_mask + 1)) == 0) {
@@ -1039,6 +1159,8 @@ rte_pipeline_action_handler_port_bulk(struct rte_pipeline *p,
port_out->f_action_bulk(p->pkts, &pkts_mask, port_out->arg_ah);
p->action_mask0[RTE_PIPELINE_ACTION_DROP] |= pkts_mask ^ mask;
+ RTE_PIPELINE_STATS_ADD_M(port_out->n_pkts_dropped_by_ah,
+ pkts_mask ^ mask);
}
/* Output port TX */
@@ -1070,6 +1192,9 @@ rte_pipeline_action_handler_port(struct rte_pipeline *p, uint64_t pkts_mask)
p->action_mask0[RTE_PIPELINE_ACTION_DROP] |=
(pkt_mask ^ 1LLU) << i;
+ RTE_PIPELINE_STATS_ADD(port_out->n_pkts_dropped_by_ah,
+ pkt_mask ^ 1LLU);
+
/* Output port TX */
if (pkt_mask != 0)
port_out->ops.f_tx(port_out->h_port,
@@ -1103,6 +1228,9 @@ rte_pipeline_action_handler_port(struct rte_pipeline *p, uint64_t pkts_mask)
p->action_mask0[RTE_PIPELINE_ACTION_DROP] |=
(pkt_mask ^ 1LLU) << i;
+ RTE_PIPELINE_STATS_ADD(port_out->n_pkts_dropped_by_ah,
+ pkt_mask ^ 1LLU);
+
/* Output port TX */
if (pkt_mask != 0)
port_out->ops.f_tx(port_out->h_port,
@@ -1139,6 +1267,9 @@ rte_pipeline_action_handler_port_meta(struct rte_pipeline *p,
p->action_mask0[RTE_PIPELINE_ACTION_DROP] |=
(pkt_mask ^ 1LLU) << i;
+ RTE_PIPELINE_STATS_ADD(port_out->n_pkts_dropped_by_ah,
+ pkt_mask ^ 1ULL);
+
/* Output port TX */
if (pkt_mask != 0)
port_out->ops.f_tx(port_out->h_port,
@@ -1173,6 +1304,9 @@ rte_pipeline_action_handler_port_meta(struct rte_pipeline *p,
p->action_mask0[RTE_PIPELINE_ACTION_DROP] |=
(pkt_mask ^ 1LLU) << i;
+ RTE_PIPELINE_STATS_ADD(port_out->n_pkts_dropped_by_ah,
+ pkt_mask ^ 1ULL);
+
/* Output port TX */
if (pkt_mask != 0)
port_out->ops.f_tx(port_out->h_port,
@@ -1224,16 +1358,17 @@ rte_pipeline_run(struct rte_pipeline *p)
pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t);
p->action_mask0[RTE_PIPELINE_ACTION_DROP] = 0;
p->action_mask0[RTE_PIPELINE_ACTION_PORT] = 0;
+ p->action_mask0[RTE_PIPELINE_ACTION_PORT_META] = 0;
p->action_mask0[RTE_PIPELINE_ACTION_TABLE] = 0;
/* Input port user actions */
if (port_in->f_action != NULL) {
uint64_t mask = pkts_mask;
- port_in->f_action(p->pkts, n_pkts, &pkts_mask,
- port_in->arg_ah);
- p->action_mask0[RTE_PIPELINE_ACTION_DROP] |=
- pkts_mask ^ mask;
+ port_in->f_action(p->pkts, n_pkts, &pkts_mask, port_in->arg_ah);
+ mask ^= pkts_mask;
+ p->action_mask0[RTE_PIPELINE_ACTION_DROP] |= mask;
+ RTE_PIPELINE_STATS_ADD_M(port_in->n_pkts_dropped_by_ah, mask);
}
/* Table */
@@ -1259,9 +1394,10 @@ rte_pipeline_run(struct rte_pipeline *p)
table->f_action_miss(p->pkts,
&lookup_miss_mask,
default_entry, table->arg_ah);
- p->action_mask0[
- RTE_PIPELINE_ACTION_DROP] |=
- lookup_miss_mask ^ mask;
+ mask ^= lookup_miss_mask;
+ p->action_mask0[RTE_PIPELINE_ACTION_DROP] |= mask;
+ RTE_PIPELINE_STATS_ADD_M(
+ table->n_pkts_dropped_by_lkp_miss_ah, mask);
}
/* Table reserved actions */
@@ -1275,6 +1411,10 @@ rte_pipeline_run(struct rte_pipeline *p)
uint32_t pos = default_entry->action;
p->action_mask0[pos] = lookup_miss_mask;
+ if (pos == RTE_PIPELINE_ACTION_DROP) {
+ RTE_PIPELINE_STATS_ADD_M(table->n_pkts_dropped_lkp_miss,
+ lookup_miss_mask);
+ }
}
}
@@ -1287,9 +1427,10 @@ rte_pipeline_run(struct rte_pipeline *p)
table->f_action_hit(p->pkts,
&lookup_hit_mask,
p->entries, table->arg_ah);
- p->action_mask0[
- RTE_PIPELINE_ACTION_DROP] |=
- lookup_hit_mask ^ mask;
+ mask ^= lookup_hit_mask;
+ p->action_mask0[RTE_PIPELINE_ACTION_DROP] |= mask;
+ RTE_PIPELINE_STATS_ADD_M(
+ table->n_pkts_dropped_by_lkp_hit_ah, mask);
}
/* Table reserved actions */
@@ -1300,9 +1441,15 @@ rte_pipeline_run(struct rte_pipeline *p)
p->action_mask0[RTE_PIPELINE_ACTION_PORT] |=
p->action_mask1[
RTE_PIPELINE_ACTION_PORT];
+ p->action_mask0[RTE_PIPELINE_ACTION_PORT_META] |=
+ p->action_mask1[
+ RTE_PIPELINE_ACTION_PORT_META];
p->action_mask0[RTE_PIPELINE_ACTION_TABLE] |=
p->action_mask1[
RTE_PIPELINE_ACTION_TABLE];
+
+ RTE_PIPELINE_STATS_ADD_M(table->n_pkts_dropped_lkp_hit,
+ p->action_mask1[RTE_PIPELINE_ACTION_DROP]);
}
/* Prepare for next iteration */
@@ -1365,8 +1512,126 @@ rte_pipeline_port_out_packet_insert(struct rte_pipeline *p,
if (pkt_mask != 0) /* Output port TX */
port_out->ops.f_tx(port_out->h_port, pkt);
- else
+ else {
rte_pktmbuf_free(pkt);
+ RTE_PIPELINE_STATS_ADD(port_out->n_pkts_dropped_by_ah, 1);
+ }
+ }
+
+ return 0;
+}
+
+int rte_pipeline_port_in_stats_read(struct rte_pipeline *p, uint32_t port_id,
+ struct rte_pipeline_port_in_stats *stats, int clear)
+{
+ struct rte_port_in *port;
+ int retval;
+
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (port_id >= p->num_ports_in) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: port IN ID %u is out of range\n",
+ __func__, port_id);
+ return -EINVAL;
+ }
+
+ port = &p->ports_in[port_id];
+
+ if (port->ops.f_stats != NULL) {
+ retval = port->ops.f_stats(port->h_port, &stats->stats, clear);
+ if (retval)
+ return retval;
+ } else if (stats != NULL)
+ memset(&stats->stats, 0, sizeof(stats->stats));
+
+ if (stats != NULL)
+ stats->n_pkts_dropped_by_ah = port->n_pkts_dropped_by_ah;
+
+ if (clear != 0)
+ port->n_pkts_dropped_by_ah = 0;
+
+ return 0;
+}
+
+int rte_pipeline_port_out_stats_read(struct rte_pipeline *p, uint32_t port_id,
+ struct rte_pipeline_port_out_stats *stats, int clear)
+{
+ struct rte_port_out *port;
+ int retval;
+
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (port_id >= p->num_ports_out) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: port OUT ID %u is out of range\n", __func__, port_id);
+ return -EINVAL;
+ }
+
+ port = &p->ports_out[port_id];
+ if (port->ops.f_stats != NULL) {
+ retval = port->ops.f_stats(port->h_port, &stats->stats, clear);
+ if (retval != 0)
+ return retval;
+ } else if (stats != NULL)
+ memset(&stats->stats, 0, sizeof(stats->stats));
+
+ if (stats != NULL)
+ stats->n_pkts_dropped_by_ah = port->n_pkts_dropped_by_ah;
+
+ if (clear != 0)
+ port->n_pkts_dropped_by_ah = 0;
+
+ return 0;
+}
+
+int rte_pipeline_table_stats_read(struct rte_pipeline *p, uint32_t table_id,
+ struct rte_pipeline_table_stats *stats, int clear)
+{
+ struct rte_table *table;
+ int retval;
+
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (table_id >= p->num_tables) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: table %u is out of range\n", __func__, table_id);
+ return -EINVAL;
+ }
+
+ table = &p->tables[table_id];
+ if (table->ops.f_stats != NULL) {
+ retval = table->ops.f_stats(table->h_table, &stats->stats, clear);
+ if (retval != 0)
+ return retval;
+ } else if (stats != NULL)
+ memset(&stats->stats, 0, sizeof(stats->stats));
+
+ if (stats != NULL) {
+ stats->n_pkts_dropped_by_lkp_hit_ah =
+ table->n_pkts_dropped_by_lkp_hit_ah;
+ stats->n_pkts_dropped_by_lkp_miss_ah =
+ table->n_pkts_dropped_by_lkp_miss_ah;
+ stats->n_pkts_dropped_lkp_hit = table->n_pkts_dropped_lkp_hit;
+ stats->n_pkts_dropped_lkp_miss = table->n_pkts_dropped_lkp_miss;
+ }
+
+ if (clear != 0) {
+ table->n_pkts_dropped_by_lkp_hit_ah = 0;
+ table->n_pkts_dropped_by_lkp_miss_ah = 0;
+ table->n_pkts_dropped_lkp_hit = 0;
+ table->n_pkts_dropped_lkp_miss = 0;
}
return 0;
diff --git a/src/dpdk_lib18/librte_pipeline/rte_pipeline.h b/src/dpdk22/lib/librte_pipeline/rte_pipeline.h
index fb1014a0..54593245 100755..100644
--- a/src/dpdk_lib18/librte_pipeline/rte_pipeline.h
+++ b/src/dpdk22/lib/librte_pipeline/rte_pipeline.h
@@ -85,10 +85,11 @@ extern "C" {
#include <stdint.h>
-#include <rte_mbuf.h>
#include <rte_port.h>
#include <rte_table.h>
+struct rte_mbuf;
+
/*
* Pipeline
*
@@ -111,6 +112,45 @@ struct rte_pipeline_params {
uint32_t offset_port_id;
};
+/** Pipeline port in stats. */
+struct rte_pipeline_port_in_stats {
+ /** Port in stats. */
+ struct rte_port_in_stats stats;
+
+ /** Number of packets dropped by action handler. */
+ uint64_t n_pkts_dropped_by_ah;
+
+};
+
+/** Pipeline port out stats. */
+struct rte_pipeline_port_out_stats {
+ /** Port out stats. */
+ struct rte_port_out_stats stats;
+
+ /** Number of packets dropped by action handler. */
+ uint64_t n_pkts_dropped_by_ah;
+};
+
+/** Pipeline table stats. */
+struct rte_pipeline_table_stats {
+ /** Table stats. */
+ struct rte_table_stats stats;
+
+ /** Number of packets dropped by lookup hit action handler. */
+ uint64_t n_pkts_dropped_by_lkp_hit_ah;
+
+ /** Number of packets dropped by lookup miss action handler. */
+ uint64_t n_pkts_dropped_by_lkp_miss_ah;
+
+ /** Number of packets dropped by pipeline in behalf of this table based on
+ * on action specified in table entry. */
+ uint64_t n_pkts_dropped_lkp_hit;
+
+ /** Number of packets dropped by pipeline in behalf of this table based on
+ * on action specified in table entry. */
+ uint64_t n_pkts_dropped_lkp_miss;
+};
+
/**
* Pipeline create
*
@@ -425,6 +465,90 @@ int rte_pipeline_table_entry_delete(struct rte_pipeline *p,
int *key_found,
struct rte_pipeline_table_entry *entry);
+/**
+ * Pipeline table entry add bulk
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param table_id
+ * Table ID (returned by previous invocation of pipeline table create)
+ * @param keys
+ * Array containing table entry keys
+ * @param entries
+ * Array containung new contents for every table entry identified by key
+ * @param n_keys
+ * Number of keys to add
+ * @param key_found
+ * On successful invocation, key_found for every item in the array is set to
+ * TRUE (value different than 0) if key was already present in the table
+ * before the add operation and to FALSE (value 0) if not
+ * @param entries_ptr
+ * On successful invocation, array *entries_ptr stores pointer to every table
+ * entry associated with key. This can be used for further read-write accesses
+ * to this table entry and is valid until the key is deleted from the table or
+ * re-added (usually for associating different actions and/or action meta-data
+ * to the current key)
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_table_entry_add_bulk(struct rte_pipeline *p,
+ uint32_t table_id,
+ void **keys,
+ struct rte_pipeline_table_entry **entries,
+ uint32_t n_keys,
+ int *key_found,
+ struct rte_pipeline_table_entry **entries_ptr);
+
+/**
+ * Pipeline table entry delete bulk
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param table_id
+ * Table ID (returned by previous invocation of pipeline table create)
+ * @param keys
+ * Array containing table entry keys
+ * @param n_keys
+ * Number of keys to delete
+ * @param key_found
+ * On successful invocation, key_found for every item in the array is set to
+ * TRUE (value different than 0) if key was found in the table before the
+ * delete operation and to FALSE (value 0) if not
+ * @param entries
+ * If entries pointer is NULL, this pointer is ignored for every entry found.
+ * Else, after successful invocation, if specific key is found in the table
+ * and entry points to a valid buffer, the table entry contents (as it was
+ * before the delete was performed) is copied to this buffer.
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_table_entry_delete_bulk(struct rte_pipeline *p,
+ uint32_t table_id,
+ void **keys,
+ uint32_t n_keys,
+ int *key_found,
+ struct rte_pipeline_table_entry **entries);
+
+/**
+ * Read pipeline table stats.
+ *
+ * This function reads table statistics identified by *table_id* of given
+ * pipeline *p*.
+ *
+ * @param p
+ * Handle to pipeline instance.
+ * @param table_id
+ * Port ID what stats will be returned.
+ * @param stats
+ * Statistics buffer.
+ * @param clear
+ * If not 0 clear stats after reading.
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_table_stats_read(struct rte_pipeline *p, uint32_t table_id,
+ struct rte_pipeline_table_stats *stats, int clear);
+
/*
* Port IN
*
@@ -539,6 +663,26 @@ int rte_pipeline_port_in_enable(struct rte_pipeline *p,
int rte_pipeline_port_in_disable(struct rte_pipeline *p,
uint32_t port_id);
+/**
+ * Read pipeline port in stats.
+ *
+ * This function reads port in statistics identified by *port_id* of given
+ * pipeline *p*.
+ *
+ * @param p
+ * Handle to pipeline instance.
+ * @param port_id
+ * Port ID what stats will be returned.
+ * @param stats
+ * Statistics buffer.
+ * @param clear
+ * If not 0 clear stats after reading.
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_port_in_stats_read(struct rte_pipeline *p, uint32_t port_id,
+ struct rte_pipeline_port_in_stats *stats, int clear);
+
/*
* Port OUT
*
@@ -657,6 +801,25 @@ int rte_pipeline_port_out_packet_insert(struct rte_pipeline *p,
uint32_t port_id,
struct rte_mbuf *pkt);
+/**
+ * Read pipeline port out stats.
+ *
+ * This function reads port out statistics identified by *port_id* of given
+ * pipeline *p*.
+ *
+ * @param p
+ * Handle to pipeline instance.
+ * @param port_id
+ * Port ID what stats will be returned.
+ * @param stats
+ * Statistics buffer.
+ * @param clear
+ * If not 0 clear stats after reading.
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_port_out_stats_read(struct rte_pipeline *p, uint32_t port_id,
+ struct rte_pipeline_port_out_stats *stats, int clear);
#ifdef __cplusplus
}
#endif
diff --git a/src/dpdk_lib18/librte_port/rte_port.h b/src/dpdk22/lib/librte_port/rte_port.h
index d84e5a12..00b97a91 100755..100644
--- a/src/dpdk_lib18/librte_port/rte_port.h
+++ b/src/dpdk22/lib/librte_port/rte_port.h
@@ -54,23 +54,23 @@ extern "C" {
* Macros to allow accessing metadata stored in the mbuf headroom
* just beyond the end of the mbuf data structure returned by a port
*/
-#define RTE_MBUF_METADATA_UINT8(mbuf, offset) \
- (((uint8_t *)&(mbuf)[1])[offset])
-#define RTE_MBUF_METADATA_UINT16(mbuf, offset) \
- (((uint16_t *)&(mbuf)[1])[offset/sizeof(uint16_t)])
-#define RTE_MBUF_METADATA_UINT32(mbuf, offset) \
- (((uint32_t *)&(mbuf)[1])[offset/sizeof(uint32_t)])
-#define RTE_MBUF_METADATA_UINT64(mbuf, offset) \
- (((uint64_t *)&(mbuf)[1])[offset/sizeof(uint64_t)])
-
#define RTE_MBUF_METADATA_UINT8_PTR(mbuf, offset) \
- (&RTE_MBUF_METADATA_UINT8(mbuf, offset))
+ (&((uint8_t *)(mbuf))[offset])
#define RTE_MBUF_METADATA_UINT16_PTR(mbuf, offset) \
- (&RTE_MBUF_METADATA_UINT16(mbuf, offset))
+ ((uint16_t *) RTE_MBUF_METADATA_UINT8_PTR(mbuf, offset))
#define RTE_MBUF_METADATA_UINT32_PTR(mbuf, offset) \
- (&RTE_MBUF_METADATA_UINT32(mbuf, offset))
+ ((uint32_t *) RTE_MBUF_METADATA_UINT8_PTR(mbuf, offset))
#define RTE_MBUF_METADATA_UINT64_PTR(mbuf, offset) \
- (&RTE_MBUF_METADATA_UINT64(mbuf, offset))
+ ((uint64_t *) RTE_MBUF_METADATA_UINT8_PTR(mbuf, offset))
+
+#define RTE_MBUF_METADATA_UINT8(mbuf, offset) \
+ (*RTE_MBUF_METADATA_UINT8_PTR(mbuf, offset))
+#define RTE_MBUF_METADATA_UINT16(mbuf, offset) \
+ (*RTE_MBUF_METADATA_UINT16_PTR(mbuf, offset))
+#define RTE_MBUF_METADATA_UINT32(mbuf, offset) \
+ (*RTE_MBUF_METADATA_UINT32_PTR(mbuf, offset))
+#define RTE_MBUF_METADATA_UINT64(mbuf, offset) \
+ (*RTE_MBUF_METADATA_UINT64_PTR(mbuf, offset))
/**@}*/
/*
@@ -81,6 +81,12 @@ extern "C" {
Cannot be changed. */
#define RTE_PORT_IN_BURST_SIZE_MAX 64
+/** Input port statistics */
+struct rte_port_in_stats {
+ uint64_t n_pkts_in;
+ uint64_t n_pkts_drop;
+};
+
/**
* Input port create
*
@@ -120,17 +126,42 @@ typedef int (*rte_port_in_op_rx)(
struct rte_mbuf **pkts,
uint32_t n_pkts);
+/**
+ * Input port stats get
+ *
+ * @param port
+ * Handle to output port instance
+ * @param stats
+ * Handle to port_in stats struct to copy data
+ * @param clear
+ * Flag indicating that stats should be cleared after read
+ *
+ * @return
+ * Error code or 0 on success.
+ */
+typedef int (*rte_port_in_op_stats_read)(
+ void *port,
+ struct rte_port_in_stats *stats,
+ int clear);
+
/** Input port interface defining the input port operation */
struct rte_port_in_ops {
- rte_port_in_op_create f_create; /**< Create */
- rte_port_in_op_free f_free; /**< Free */
- rte_port_in_op_rx f_rx; /**< Packet RX (packet burst) */
+ rte_port_in_op_create f_create; /**< Create */
+ rte_port_in_op_free f_free; /**< Free */
+ rte_port_in_op_rx f_rx; /**< Packet RX (packet burst) */
+ rte_port_in_op_stats_read f_stats; /**< Stats */
};
/*
* Port OUT
*
*/
+/** Output port statistics */
+struct rte_port_out_stats {
+ uint64_t n_pkts_in;
+ uint64_t n_pkts_drop;
+};
+
/**
* Output port create
*
@@ -197,13 +228,32 @@ typedef int (*rte_port_out_op_tx_bulk)(
*/
typedef int (*rte_port_out_op_flush)(void *port);
+/**
+ * Output port stats read
+ *
+ * @param port
+ * Handle to output port instance
+ * @param stats
+ * Handle to port_out stats struct to copy data
+ * @param clear
+ * Flag indicating that stats should be cleared after read
+ *
+ * @return
+ * Error code or 0 on success.
+ */
+typedef int (*rte_port_out_op_stats_read)(
+ void *port,
+ struct rte_port_out_stats *stats,
+ int clear);
+
/** Output port interface defining the output port operation */
struct rte_port_out_ops {
- rte_port_out_op_create f_create; /**< Create */
- rte_port_out_op_free f_free; /**< Free */
- rte_port_out_op_tx f_tx; /**< Packet TX (single packet) */
- rte_port_out_op_tx_bulk f_tx_bulk; /**< Packet TX (packet burst) */
- rte_port_out_op_flush f_flush; /**< Flush */
+ rte_port_out_op_create f_create; /**< Create */
+ rte_port_out_op_free f_free; /**< Free */
+ rte_port_out_op_tx f_tx; /**< Packet TX (single packet) */
+ rte_port_out_op_tx_bulk f_tx_bulk; /**< Packet TX (packet burst) */
+ rte_port_out_op_flush f_flush; /**< Flush */
+ rte_port_out_op_stats_read f_stats; /**< Stats */
};
#ifdef __cplusplus
diff --git a/src/dpdk_lib18/librte_port/rte_port_ethdev.h b/src/dpdk22/lib/librte_port/rte_port_ethdev.h
index af67a12b..201a79e4 100755..100644
--- a/src/dpdk_lib18/librte_port/rte_port_ethdev.h
+++ b/src/dpdk22/lib/librte_port/rte_port_ethdev.h
@@ -79,6 +79,25 @@ struct rte_port_ethdev_writer_params {
/** ethdev_writer port operations */
extern struct rte_port_out_ops rte_port_ethdev_writer_ops;
+/** ethdev_writer_nodrop port parameters */
+struct rte_port_ethdev_writer_nodrop_params {
+ /** NIC RX port ID */
+ uint8_t port_id;
+
+ /** NIC RX queue ID */
+ uint16_t queue_id;
+
+ /** Recommended burst size to NIC TX queue. The actual burst size can be
+ bigger or smaller than this value. */
+ uint32_t tx_burst_sz;
+
+ /** Maximum number of retries, 0 for no limit */
+ uint32_t n_retries;
+};
+
+/** ethdev_writer_nodrop port operations */
+extern struct rte_port_out_ops rte_port_ethdev_writer_nodrop_ops;
+
#ifdef __cplusplus
}
#endif
diff --git a/src/dpdk_lib18/librte_port/rte_port_frag.h b/src/dpdk22/lib/librte_port/rte_port_frag.h
index dfd70c01..0085ff7c 100755..100644
--- a/src/dpdk_lib18/librte_port/rte_port_frag.h
+++ b/src/dpdk22/lib/librte_port/rte_port_frag.h
@@ -63,7 +63,7 @@ extern "C" {
#include "rte_port.h"
/** ring_reader_ipv4_frag port parameters */
-struct rte_port_ring_reader_ipv4_frag_params {
+struct rte_port_ring_reader_frag_params {
/** Underlying single consumer ring that has to be pre-initialized. */
struct rte_ring *ring;
@@ -84,9 +84,16 @@ struct rte_port_ring_reader_ipv4_frag_params {
struct rte_mempool *pool_indirect;
};
+#define rte_port_ring_reader_ipv4_frag_params rte_port_ring_reader_frag_params
+
+#define rte_port_ring_reader_ipv6_frag_params rte_port_ring_reader_frag_params
+
/** ring_reader_ipv4_frag port operations */
extern struct rte_port_in_ops rte_port_ring_reader_ipv4_frag_ops;
+/** ring_reader_ipv6_frag port operations */
+extern struct rte_port_in_ops rte_port_ring_reader_ipv6_frag_ops;
+
#ifdef __cplusplus
}
#endif
diff --git a/src/dpdk_lib18/librte_port/rte_port_ras.h b/src/dpdk22/lib/librte_port/rte_port_ras.h
index c6ed688c..5a16f831 100755..100644
--- a/src/dpdk_lib18/librte_port/rte_port_ras.h
+++ b/src/dpdk22/lib/librte_port/rte_port_ras.h
@@ -64,7 +64,7 @@ extern "C" {
#include "rte_port.h"
/** ring_writer_ipv4_ras port parameters */
-struct rte_port_ring_writer_ipv4_ras_params {
+struct rte_port_ring_writer_ras_params {
/** Underlying single consumer ring that has to be pre-initialized. */
struct rte_ring *ring;
@@ -73,9 +73,16 @@ struct rte_port_ring_writer_ipv4_ras_params {
uint32_t tx_burst_sz;
};
+#define rte_port_ring_writer_ipv4_ras_params rte_port_ring_writer_ras_params
+
+#define rte_port_ring_writer_ipv6_ras_params rte_port_ring_writer_ras_params
+
/** ring_writer_ipv4_ras port operations */
extern struct rte_port_out_ops rte_port_ring_writer_ipv4_ras_ops;
+/** ring_writer_ipv6_ras port operations */
+extern struct rte_port_out_ops rte_port_ring_writer_ipv6_ras_ops;
+
#ifdef __cplusplus
}
#endif
diff --git a/src/dpdk_lib18/librte_port/rte_port_ring.h b/src/dpdk22/lib/librte_port/rte_port_ring.h
index 009dcf85..de377d28 100755..100644
--- a/src/dpdk_lib18/librte_port/rte_port_ring.h
+++ b/src/dpdk22/lib/librte_port/rte_port_ring.h
@@ -42,8 +42,14 @@ extern "C" {
* @file
* RTE Port Ring
*
- * ring_reader: input port built on top of pre-initialized single consumer ring
- * ring_writer: output port built on top of pre-initialized single producer ring
+ * ring_reader:
+ * input port built on top of pre-initialized single consumer ring
+ * ring_writer:
+ * output port built on top of pre-initialized single producer ring
+ * ring_multi_reader:
+ * input port built on top of pre-initialized multi consumers ring
+ * ring_multi_writer:
+ * output port built on top of pre-initialized multi producers ring
*
***/
@@ -55,7 +61,7 @@ extern "C" {
/** ring_reader port parameters */
struct rte_port_ring_reader_params {
- /** Underlying single consumer ring that has to be pre-initialized */
+ /** Underlying consumer ring that has to be pre-initialized */
struct rte_ring *ring;
};
@@ -64,7 +70,7 @@ extern struct rte_port_in_ops rte_port_ring_reader_ops;
/** ring_writer port parameters */
struct rte_port_ring_writer_params {
- /** Underlying single producer ring that has to be pre-initialized */
+ /** Underlying producer ring that has to be pre-initialized */
struct rte_ring *ring;
/** Recommended burst size to ring. The actual burst size can be
@@ -75,6 +81,41 @@ struct rte_port_ring_writer_params {
/** ring_writer port operations */
extern struct rte_port_out_ops rte_port_ring_writer_ops;
+/** ring_writer_nodrop port parameters */
+struct rte_port_ring_writer_nodrop_params {
+ /** Underlying producer ring that has to be pre-initialized */
+ struct rte_ring *ring;
+
+ /** Recommended burst size to ring. The actual burst size can be
+ bigger or smaller than this value. */
+ uint32_t tx_burst_sz;
+
+ /** Maximum number of retries, 0 for no limit */
+ uint32_t n_retries;
+};
+
+/** ring_writer_nodrop port operations */
+extern struct rte_port_out_ops rte_port_ring_writer_nodrop_ops;
+
+/** ring_multi_reader port parameters */
+#define rte_port_ring_multi_reader_params rte_port_ring_reader_params
+
+/** ring_multi_reader port operations */
+extern struct rte_port_in_ops rte_port_ring_multi_reader_ops;
+
+/** ring_multi_writer port parameters */
+#define rte_port_ring_multi_writer_params rte_port_ring_writer_params
+
+/** ring_multi_writer port operations */
+extern struct rte_port_out_ops rte_port_ring_multi_writer_ops;
+
+/** ring_multi_writer_nodrop port parameters */
+#define rte_port_ring_multi_writer_nodrop_params \
+ rte_port_ring_writer_nodrop_params
+
+/** ring_multi_writer_nodrop port operations */
+extern struct rte_port_out_ops rte_port_ring_multi_writer_nodrop_ops;
+
#ifdef __cplusplus
}
#endif
diff --git a/src/dpdk_lib18/librte_port/rte_port_sched.h b/src/dpdk22/lib/librte_port/rte_port_sched.h
index 555415ab..555415ab 100755..100644
--- a/src/dpdk_lib18/librte_port/rte_port_sched.h
+++ b/src/dpdk22/lib/librte_port/rte_port_sched.h
diff --git a/src/dpdk_lib18/librte_port/rte_port_source_sink.h b/src/dpdk22/lib/librte_port/rte_port_source_sink.h
index 0f9be799..0f9be799 100755..100644
--- a/src/dpdk_lib18/librte_port/rte_port_source_sink.h
+++ b/src/dpdk22/lib/librte_port/rte_port_source_sink.h
diff --git a/src/dpdk_lib18/librte_ring/rte_ring.c b/src/dpdk22/lib/librte_ring/rte_ring.c
index f5899c4c..d80faf3b 100755..100644
--- a/src/dpdk_lib18/librte_ring/rte_ring.c
+++ b/src/dpdk22/lib/librte_ring/rte_ring.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -77,7 +77,6 @@
#include <rte_memzone.h>
#include <rte_malloc.h>
#include <rte_launch.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
#include <rte_atomic.h>
@@ -92,6 +91,11 @@
TAILQ_HEAD(rte_ring_list, rte_tailq_entry);
+static struct rte_tailq_elem rte_ring_tailq = {
+ .name = RTE_TAILQ_RING_NAME,
+};
+EAL_REGISTER_TAILQ(rte_ring_tailq)
+
/* true if x is a power of 2 */
#define POWEROF2(x) ((((x)-1) & (x)) == 0)
@@ -162,12 +166,7 @@ rte_ring_create(const char *name, unsigned count, int socket_id,
int mz_flags = 0;
struct rte_ring_list* ring_list = NULL;
- /* check that we have an initialised tail queue */
- if ((ring_list =
- RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_RING, rte_ring_list)) == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return NULL;
- }
+ ring_list = RTE_TAILQ_CAST(rte_ring_tailq.head, rte_ring_list);
ring_size = rte_ring_get_memsize(count);
if (ring_size < 0) {
@@ -197,6 +196,7 @@ rte_ring_create(const char *name, unsigned count, int socket_id,
rte_ring_init(r, name, count, flags);
te->data = (void *) r;
+ r->memzone = mz;
TAILQ_INSERT_TAIL(ring_list, te, next);
} else {
@@ -209,6 +209,51 @@ rte_ring_create(const char *name, unsigned count, int socket_id,
return r;
}
+/* free the ring */
+void
+rte_ring_free(struct rte_ring *r)
+{
+ struct rte_ring_list *ring_list = NULL;
+ struct rte_tailq_entry *te;
+
+ if (r == NULL)
+ return;
+
+ /*
+ * Ring was not created with rte_ring_create,
+ * therefore, there is no memzone to free.
+ */
+ if (r->memzone == NULL) {
+ RTE_LOG(ERR, RING, "Cannot free ring (not created with rte_ring_create()");
+ return;
+ }
+
+ if (rte_memzone_free(r->memzone) != 0) {
+ RTE_LOG(ERR, RING, "Cannot free memory\n");
+ return;
+ }
+
+ ring_list = RTE_TAILQ_CAST(rte_ring_tailq.head, rte_ring_list);
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ /* find out tailq entry */
+ TAILQ_FOREACH(te, ring_list, next) {
+ if (te->data == (void *) r)
+ break;
+ }
+
+ if (te == NULL) {
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ return;
+ }
+
+ TAILQ_REMOVE(ring_list, te, next);
+
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ rte_free(te);
+}
+
/*
* change the high water mark. If *count* is 0, water marking is
* disabled
@@ -288,12 +333,7 @@ rte_ring_list_dump(FILE *f)
const struct rte_tailq_entry *te;
struct rte_ring_list *ring_list;
- /* check that we have an initialised tail queue */
- if ((ring_list =
- RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_RING, rte_ring_list)) == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return;
- }
+ ring_list = RTE_TAILQ_CAST(rte_ring_tailq.head, rte_ring_list);
rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
@@ -312,12 +352,7 @@ rte_ring_lookup(const char *name)
struct rte_ring *r = NULL;
struct rte_ring_list *ring_list;
- /* check that we have an initialized tail queue */
- if ((ring_list =
- RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_RING, rte_ring_list)) == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return NULL;
- }
+ ring_list = RTE_TAILQ_CAST(rte_ring_tailq.head, rte_ring_list);
rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
diff --git a/src/dpdk_lib18/librte_ring/rte_ring.h b/src/dpdk22/lib/librte_ring/rte_ring.h
index 7cd5f2d4..de036cef 100755..100644
--- a/src/dpdk_lib18/librte_ring/rte_ring.h
+++ b/src/dpdk22/lib/librte_ring/rte_ring.h
@@ -101,6 +101,8 @@ extern "C" {
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
+#define RTE_TAILQ_RING_NAME "RTE_RING"
+
enum rte_ring_queue_behavior {
RTE_RING_QUEUE_FIXED = 0, /* Enq/Deq a fixed number of items from a ring */
RTE_RING_QUEUE_VARIABLE /* Enq/Deq as many items a possible from ring */
@@ -127,6 +129,13 @@ struct rte_ring_debug_stats {
#define RTE_RING_NAMESIZE 32 /**< The maximum length of a ring name. */
#define RTE_RING_MZ_PREFIX "RG_"
+#ifndef RTE_RING_PAUSE_REP_COUNT
+#define RTE_RING_PAUSE_REP_COUNT 0 /**< Yield after pause num of times, no yield
+ * if RTE_RING_PAUSE_REP not defined. */
+#endif
+
+struct rte_memzone; /* forward declaration, so as not to require memzone.h */
+
/**
* An RTE ring structure.
*
@@ -140,6 +149,8 @@ struct rte_ring_debug_stats {
struct rte_ring {
char name[RTE_RING_NAMESIZE]; /**< Name of the ring. */
int flags; /**< Flags supplied at creation. */
+ const struct rte_memzone *memzone;
+ /**< Memzone, if any, containing the rte_ring */
/** Ring producer status. */
struct prod {
@@ -188,10 +199,12 @@ struct rte_ring {
* The number to add to the object-oriented statistics.
*/
#ifdef RTE_LIBRTE_RING_DEBUG
-#define __RING_STAT_ADD(r, name, n) do { \
- unsigned __lcore_id = rte_lcore_id(); \
- r->stats[__lcore_id].name##_objs += n; \
- r->stats[__lcore_id].name##_bulk += 1; \
+#define __RING_STAT_ADD(r, name, n) do { \
+ unsigned __lcore_id = rte_lcore_id(); \
+ if (__lcore_id < RTE_MAX_LCORE) { \
+ r->stats[__lcore_id].name##_objs += n; \
+ r->stats[__lcore_id].name##_bulk += 1; \
+ } \
} while(0)
#else
#define __RING_STAT_ADD(r, name, n) do {} while(0)
@@ -284,7 +297,6 @@ int rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
* rte_errno set appropriately. Possible errno values include:
* - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
* - E_RTE_SECONDARY - function was called from a secondary process instance
- * - E_RTE_NO_TAILQ - no tailq list could be got for the ring list
* - EINVAL - count provided is not a power of 2
* - ENOSPC - the maximum number of memzones has already been allocated
* - EEXIST - a memzone with the same name already exists
@@ -292,6 +304,13 @@ int rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
*/
struct rte_ring *rte_ring_create(const char *name, unsigned count,
int socket_id, unsigned flags);
+/**
+ * De-allocate all memory used by the ring.
+ *
+ * @param r
+ * Ring to free
+ */
+void rte_ring_free(struct rte_ring *r);
/**
* Change the high water mark.
@@ -408,7 +427,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
uint32_t cons_tail, free_entries;
const unsigned max = n;
int success;
- unsigned i;
+ unsigned i, rep = 0;
uint32_t mask = r->prod.mask;
int ret;
@@ -449,7 +468,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
/* write entries in ring */
ENQUEUE_PTRS();
- rte_compiler_barrier();
+ rte_smp_wmb();
/* if we exceed the watermark */
if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
@@ -466,9 +485,18 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
* If there are other enqueues in progress that preceded us,
* we need to wait for them to complete
*/
- while (unlikely(r->prod.tail != prod_head))
+ while (unlikely(r->prod.tail != prod_head)) {
rte_pause();
+ /* Set RTE_RING_PAUSE_REP_COUNT to avoid spin too long waiting
+ * for other thread finish. It gives pre-empted thread a chance
+ * to proceed and finish with ring dequeue operation. */
+ if (RTE_RING_PAUSE_REP_COUNT &&
+ ++rep == RTE_RING_PAUSE_REP_COUNT) {
+ rep = 0;
+ sched_yield();
+ }
+ }
r->prod.tail = prod_next;
return ret;
}
@@ -535,7 +563,7 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
/* write entries in ring */
ENQUEUE_PTRS();
- rte_compiler_barrier();
+ rte_smp_wmb();
/* if we exceed the watermark */
if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
@@ -587,7 +615,7 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
uint32_t cons_next, entries;
const unsigned max = n;
int success;
- unsigned i;
+ unsigned i, rep = 0;
uint32_t mask = r->prod.mask;
/* move cons.head atomically */
@@ -626,15 +654,24 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
/* copy in table */
DEQUEUE_PTRS();
- rte_compiler_barrier();
+ rte_smp_rmb();
/*
* If there are other dequeues in progress that preceded us,
* we need to wait for them to complete
*/
- while (unlikely(r->cons.tail != cons_head))
+ while (unlikely(r->cons.tail != cons_head)) {
rte_pause();
+ /* Set RTE_RING_PAUSE_REP_COUNT to avoid spin too long waiting
+ * for other thread finish. It gives pre-empted thread a chance
+ * to proceed and finish with ring dequeue operation. */
+ if (RTE_RING_PAUSE_REP_COUNT &&
+ ++rep == RTE_RING_PAUSE_REP_COUNT) {
+ rep = 0;
+ sched_yield();
+ }
+ }
__RING_STAT_ADD(r, deq_success, n);
r->cons.tail = cons_next;
@@ -701,7 +738,7 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
/* copy in table */
DEQUEUE_PTRS();
- rte_compiler_barrier();
+ rte_smp_rmb();
__RING_STAT_ADD(r, deq_success, n);
r->cons.tail = cons_next;
diff --git a/src/dpdk_lib18/librte_table/rte_lru.h b/src/dpdk22/lib/librte_table/rte_lru.h
index e87e062d..e87e062d 100755..100644
--- a/src/dpdk_lib18/librte_table/rte_lru.h
+++ b/src/dpdk22/lib/librte_table/rte_lru.h
diff --git a/src/dpdk_lib18/librte_table/rte_table.h b/src/dpdk22/lib/librte_table/rte_table.h
index d57bc336..720514ea 100755..100644
--- a/src/dpdk_lib18/librte_table/rte_table.h
+++ b/src/dpdk22/lib/librte_table/rte_table.h
@@ -55,9 +55,16 @@ extern "C" {
***/
#include <stdint.h>
-#include <rte_mbuf.h>
#include <rte_port.h>
+struct rte_mbuf;
+
+/** Lookup table statistics */
+struct rte_table_stats {
+ uint64_t n_pkts_in;
+ uint64_t n_pkts_lookup_miss;
+};
+
/**
* Lookup table create
*
@@ -147,6 +154,77 @@ typedef int (*rte_table_op_entry_delete)(
void *entry);
/**
+ * Lookup table entry add bulk
+ *
+ * @param table
+ * Handle to lookup table instance
+ * @param key
+ * Array containing lookup keys
+ * @param entries
+ * Array containing data to be associated with each key. Every item in the
+ * array has to point to a valid memory buffer where the first entry_size
+ * bytes (table create parameter) are populated with the data.
+ * @param n_keys
+ * Number of keys to add
+ * @param key_found
+ * After successful invocation, key_found for every item in the array is set
+ * to a value different than 0 if the current key is already present in the
+ * table and to 0 if not. This pointer has to be set to a valid memory
+ * location before the table entry add function is called.
+ * @param entries_ptr
+ * After successful invocation, array *entries_ptr stores the handle to the
+ * table entry containing the data associated with every key. This handle can
+ * be used to perform further read-write accesses to this entry. This handle
+ * is valid until the key is deleted from the table or the same key is
+ * re-added to the table, typically to associate it with different data. This
+ * pointer has to be set to a valid memory location before the function is
+ * called.
+ * @return
+ * 0 on success, error code otherwise
+ */
+typedef int (*rte_table_op_entry_add_bulk)(
+ void *table,
+ void **keys,
+ void **entries,
+ uint32_t n_keys,
+ int *key_found,
+ void **entries_ptr);
+
+/**
+ * Lookup table entry delete bulk
+ *
+ * @param table
+ * Handle to lookup table instance
+ * @param key
+ * Array containing lookup keys
+ * @param n_keys
+ * Number of keys to delete
+ * @param key_found
+ * After successful invocation, key_found for every item in the array is set
+ * to a value different than 0if the current key was present in the table
+ * before the delete operation was performed and to 0 if not. This pointer
+ * has to be set to a valid memory location before the table entry delete
+ * function is called.
+ * @param entries
+ * If entries pointer is NULL, this pointer is ignored for every entry found.
+ * Else, after successful invocation, if specific key is found in the table
+ * (key_found is different than 0 for this item after function call is
+ * completed) and item of entry array points to a valid buffer (entry is set
+ * to a value different than NULL before the function is called), then the
+ * first entry_size bytes (table create parameter) in *entry store a copy of
+ * table entry that contained the data associated with the current key before
+ * the key was deleted.
+ * @return
+ * 0 on success, error code otherwise
+ */
+typedef int (*rte_table_op_entry_delete_bulk)(
+ void *table,
+ void **keys,
+ uint32_t n_keys,
+ int *key_found,
+ void **entries);
+
+/**
* Lookup table lookup
*
* @param table
@@ -186,13 +264,34 @@ typedef int (*rte_table_op_lookup)(
uint64_t *lookup_hit_mask,
void **entries);
+/**
+ * Lookup table stats read
+ *
+ * @param table
+ * Handle to lookup table instance
+ * @param stats
+ * Handle to table stats struct to copy data
+ * @param clear
+ * Flag indicating that stats should be cleared after read
+ *
+ * @return
+ * Error code or 0 on success.
+ */
+typedef int (*rte_table_op_stats_read)(
+ void *table,
+ struct rte_table_stats *stats,
+ int clear);
+
/** Lookup table interface defining the lookup table operation */
struct rte_table_ops {
- rte_table_op_create f_create; /**< Create */
- rte_table_op_free f_free; /**< Free */
- rte_table_op_entry_add f_add; /**< Entry add */
- rte_table_op_entry_delete f_delete; /**< Entry delete */
- rte_table_op_lookup f_lookup; /**< Lookup */
+ rte_table_op_create f_create; /**< Create */
+ rte_table_op_free f_free; /**< Free */
+ rte_table_op_entry_add f_add; /**< Entry add */
+ rte_table_op_entry_delete f_delete; /**< Entry delete */
+ rte_table_op_entry_add_bulk f_add_bulk; /**< Add entry bulk */
+ rte_table_op_entry_delete_bulk f_delete_bulk; /**< Delete entry bulk */
+ rte_table_op_lookup f_lookup; /**< Lookup */
+ rte_table_op_stats_read f_stats; /**< Stats */
};
#ifdef __cplusplus
diff --git a/src/dpdk_lib18/librte_table/rte_table_acl.h b/src/dpdk22/lib/librte_table/rte_table_acl.h
index a9cc0328..a9cc0328 100755..100644
--- a/src/dpdk_lib18/librte_table/rte_table_acl.h
+++ b/src/dpdk22/lib/librte_table/rte_table_acl.h
diff --git a/src/dpdk_lib18/librte_table/rte_table_array.h b/src/dpdk22/lib/librte_table/rte_table_array.h
index 9521119e..9521119e 100755..100644
--- a/src/dpdk_lib18/librte_table/rte_table_array.h
+++ b/src/dpdk22/lib/librte_table/rte_table_array.h
diff --git a/src/dpdk_lib18/librte_table/rte_table_hash.h b/src/dpdk22/lib/librte_table/rte_table_hash.h
index 9181942a..9d17516a 100755..100644
--- a/src/dpdk_lib18/librte_table/rte_table_hash.h
+++ b/src/dpdk22/lib/librte_table/rte_table_hash.h
@@ -196,6 +196,9 @@ struct rte_table_hash_key8_lru_params {
/** Byte offset within packet meta-data where the key is located */
uint32_t key_offset;
+
+ /** Bit-mask to be AND-ed to the key on lookup */
+ uint8_t *key_mask;
};
/** LRU hash table operations for pre-computed key signature */
@@ -226,6 +229,9 @@ struct rte_table_hash_key8_ext_params {
/** Byte offset within packet meta-data where the key is located */
uint32_t key_offset;
+
+ /** Bit-mask to be AND-ed to the key on lookup */
+ uint8_t *key_mask;
};
/** Extendible bucket hash table operations for pre-computed key signature */
@@ -257,11 +263,18 @@ struct rte_table_hash_key16_lru_params {
/** Byte offset within packet meta-data where the key is located */
uint32_t key_offset;
+
+ /** Bit-mask to be AND-ed to the key on lookup */
+ uint8_t *key_mask;
};
/** LRU hash table operations for pre-computed key signature */
extern struct rte_table_ops rte_table_hash_key16_lru_ops;
+/** LRU hash table operations for key signature computed on lookup
+ ("do-sig") */
+extern struct rte_table_ops rte_table_hash_key16_lru_dosig_ops;
+
/** Extendible bucket hash table parameters */
struct rte_table_hash_key16_ext_params {
/** Maximum number of entries (and keys) in the table */
@@ -284,11 +297,18 @@ struct rte_table_hash_key16_ext_params {
/** Byte offset within packet meta-data where the key is located */
uint32_t key_offset;
+
+ /** Bit-mask to be AND-ed to the key on lookup */
+ uint8_t *key_mask;
};
/** Extendible bucket operations for pre-computed key signature */
extern struct rte_table_ops rte_table_hash_key16_ext_ops;
+/** Extendible bucket hash table operations for key signature computed on
+ lookup ("do-sig") */
+extern struct rte_table_ops rte_table_hash_key16_ext_dosig_ops;
+
/**
* 32-byte key hash tables
*
diff --git a/src/dpdk_lib18/librte_table/rte_table_lpm.h b/src/dpdk22/lib/librte_table/rte_table_lpm.h
index c08c9580..06e84102 100755..100644
--- a/src/dpdk_lib18/librte_table/rte_table_lpm.h
+++ b/src/dpdk22/lib/librte_table/rte_table_lpm.h
@@ -77,6 +77,9 @@ extern "C" {
/** LPM table parameters */
struct rte_table_lpm_params {
+ /** Table name */
+ const char *name;
+
/** Maximum number of LPM rules (i.e. IP routes) */
uint32_t n_rules;
diff --git a/src/dpdk_lib18/librte_table/rte_table_lpm_ipv6.h b/src/dpdk22/lib/librte_table/rte_table_lpm_ipv6.h
index 91fb0d8e..43aea399 100755..100644
--- a/src/dpdk_lib18/librte_table/rte_table_lpm_ipv6.h
+++ b/src/dpdk22/lib/librte_table/rte_table_lpm_ipv6.h
@@ -79,6 +79,9 @@ extern "C" {
/** LPM table parameters */
struct rte_table_lpm_ipv6_params {
+ /** Table name */
+ const char *name;
+
/** Maximum number of LPM rules (i.e. IP routes) */
uint32_t n_rules;
diff --git a/src/dpdk_lib18/librte_table/rte_table_stub.h b/src/dpdk22/lib/librte_table/rte_table_stub.h
index e75340b0..e75340b0 100755..100644
--- a/src/dpdk_lib18/librte_table/rte_table_stub.h
+++ b/src/dpdk22/lib/librte_table/rte_table_stub.h
diff --git a/src/dpdk_lib18/librte_acl/Makefile b/src/dpdk_lib18/librte_acl/Makefile
deleted file mode 100755
index 65e566df..00000000
--- a/src/dpdk_lib18/librte_acl/Makefile
+++ /dev/null
@@ -1,63 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_acl.a
-
-CFLAGS += -O3
-CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
-
-# all source are stored in SRCS-y
-SRCS-$(CONFIG_RTE_LIBRTE_ACL) += tb_mem.c
-
-SRCS-$(CONFIG_RTE_LIBRTE_ACL) += rte_acl.c
-SRCS-$(CONFIG_RTE_LIBRTE_ACL) += acl_bld.c
-SRCS-$(CONFIG_RTE_LIBRTE_ACL) += acl_gen.c
-SRCS-$(CONFIG_RTE_LIBRTE_ACL) += acl_run_scalar.c
-SRCS-$(CONFIG_RTE_LIBRTE_ACL) += acl_run_sse.c
-
-CFLAGS_acl_run_sse.o += -msse4.1
-
-# install this header file
-SYMLINK-$(CONFIG_RTE_LIBRTE_ACL)-include := rte_acl_osdep.h
-SYMLINK-$(CONFIG_RTE_LIBRTE_ACL)-include += rte_acl.h
-
-ifeq ($(CONFIG_RTE_LIBRTE_ACL_STANDALONE),y)
-# standalone build
-SYMLINK-$(CONFIG_RTE_LIBRTE_ACL)-include += rte_acl_osdep_alone.h
-else
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_ACL) += lib/librte_eal lib/librte_malloc
-endif
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_acl/acl_bld.c b/src/dpdk_lib18/librte_acl/acl_bld.c
deleted file mode 100755
index d6e0c451..00000000
--- a/src/dpdk_lib18/librte_acl/acl_bld.c
+++ /dev/null
@@ -1,2008 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <rte_acl.h>
-#include "tb_mem.h"
-#include "acl.h"
-
-#define ACL_POOL_ALIGN 8
-#define ACL_POOL_ALLOC_MIN 0x800000
-
-/* number of pointers per alloc */
-#define ACL_PTR_ALLOC 32
-
-/* variable for dividing rule sets */
-#define NODE_MAX 2500
-#define NODE_PERCENTAGE (0.40)
-#define RULE_PERCENTAGE (0.40)
-
-/* TALLY are statistics per field */
-enum {
- TALLY_0 = 0, /* number of rules that are 0% or more wild. */
- TALLY_25, /* number of rules that are 25% or more wild. */
- TALLY_50,
- TALLY_75,
- TALLY_100,
- TALLY_DEACTIVATED, /* deactivated fields (100% wild in all rules). */
- TALLY_DEPTH,
- /* number of rules that are 100% wild for this field and higher. */
- TALLY_NUM
-};
-
-static const uint32_t wild_limits[TALLY_DEACTIVATED] = {0, 25, 50, 75, 100};
-
-enum {
- ACL_INTERSECT_NONE = 0,
- ACL_INTERSECT_A = 1, /* set A is a superset of A and B intersect */
- ACL_INTERSECT_B = 2, /* set B is a superset of A and B intersect */
- ACL_INTERSECT = 4, /* sets A and B intersect */
-};
-
-enum {
- ACL_PRIORITY_EQUAL = 0,
- ACL_PRIORITY_NODE_A = 1,
- ACL_PRIORITY_NODE_B = 2,
- ACL_PRIORITY_MIXED = 3
-};
-
-
-struct acl_mem_block {
- uint32_t block_size;
- void *mem_ptr;
-};
-
-#define MEM_BLOCK_NUM 16
-
-/* Single ACL rule, build representation.*/
-struct rte_acl_build_rule {
- struct rte_acl_build_rule *next;
- struct rte_acl_config *config;
- /**< configuration for each field in the rule. */
- const struct rte_acl_rule *f;
- uint32_t *wildness;
-};
-
-/* Context for build phase */
-struct acl_build_context {
- const struct rte_acl_ctx *acx;
- struct rte_acl_build_rule *build_rules;
- struct rte_acl_config cfg;
- uint32_t node;
- uint32_t num_nodes;
- uint32_t category_mask;
- uint32_t num_rules;
- uint32_t node_id;
- uint32_t src_mask;
- uint32_t num_build_rules;
- uint32_t num_tries;
- struct tb_mem_pool pool;
- struct rte_acl_trie tries[RTE_ACL_MAX_TRIES];
- struct rte_acl_bld_trie bld_tries[RTE_ACL_MAX_TRIES];
- uint32_t data_indexes[RTE_ACL_MAX_TRIES][RTE_ACL_MAX_FIELDS];
-
- /* memory free lists for nodes and blocks used for node ptrs */
- struct acl_mem_block blocks[MEM_BLOCK_NUM];
- struct rte_acl_node *node_free_list;
-};
-
-static int acl_merge_trie(struct acl_build_context *context,
- struct rte_acl_node *node_a, struct rte_acl_node *node_b,
- uint32_t level, uint32_t subtree_id, struct rte_acl_node **node_c);
-
-static int acl_merge(struct acl_build_context *context,
- struct rte_acl_node *node_a, struct rte_acl_node *node_b,
- int move, int a_subset, int level);
-
-static void
-acl_deref_ptr(struct acl_build_context *context,
- struct rte_acl_node *node, int index);
-
-static void *
-acl_build_alloc(struct acl_build_context *context, size_t n, size_t s)
-{
- uint32_t m;
- void *p;
- size_t alloc_size = n * s;
-
- /*
- * look for memory in free lists
- */
- for (m = 0; m < RTE_DIM(context->blocks); m++) {
- if (context->blocks[m].block_size ==
- alloc_size && context->blocks[m].mem_ptr != NULL) {
- p = context->blocks[m].mem_ptr;
- context->blocks[m].mem_ptr = *((void **)p);
- memset(p, 0, alloc_size);
- return p;
- }
- }
-
- /*
- * return allocation from memory pool
- */
- p = tb_alloc(&context->pool, alloc_size);
- return p;
-}
-
-/*
- * Free memory blocks (kept in context for reuse).
- */
-static void
-acl_build_free(struct acl_build_context *context, size_t s, void *p)
-{
- uint32_t n;
-
- for (n = 0; n < RTE_DIM(context->blocks); n++) {
- if (context->blocks[n].block_size == s) {
- *((void **)p) = context->blocks[n].mem_ptr;
- context->blocks[n].mem_ptr = p;
- return;
- }
- }
- for (n = 0; n < RTE_DIM(context->blocks); n++) {
- if (context->blocks[n].block_size == 0) {
- context->blocks[n].block_size = s;
- *((void **)p) = NULL;
- context->blocks[n].mem_ptr = p;
- return;
- }
- }
-}
-
-/*
- * Allocate and initialize a new node.
- */
-static struct rte_acl_node *
-acl_alloc_node(struct acl_build_context *context, int level)
-{
- struct rte_acl_node *node;
-
- if (context->node_free_list != NULL) {
- node = context->node_free_list;
- context->node_free_list = node->next;
- memset(node, 0, sizeof(struct rte_acl_node));
- } else {
- node = acl_build_alloc(context, sizeof(struct rte_acl_node), 1);
- }
-
- if (node != NULL) {
- node->num_ptrs = 0;
- node->level = level;
- node->node_type = RTE_ACL_NODE_UNDEFINED;
- node->node_index = RTE_ACL_NODE_UNDEFINED;
- context->num_nodes++;
- node->id = context->node_id++;
- }
- return node;
-}
-
-/*
- * Dereference all nodes to which this node points
- */
-static void
-acl_free_node(struct acl_build_context *context,
- struct rte_acl_node *node)
-{
- uint32_t n;
-
- if (node->prev != NULL)
- node->prev->next = NULL;
- for (n = 0; n < node->num_ptrs; n++)
- acl_deref_ptr(context, node, n);
-
- /* free mrt if this is a match node */
- if (node->mrt != NULL) {
- acl_build_free(context, sizeof(struct rte_acl_match_results),
- node->mrt);
- node->mrt = NULL;
- }
-
- /* free transitions to other nodes */
- if (node->ptrs != NULL) {
- acl_build_free(context,
- node->max_ptrs * sizeof(struct rte_acl_ptr_set),
- node->ptrs);
- node->ptrs = NULL;
- }
-
- /* put it on the free list */
- context->num_nodes--;
- node->next = context->node_free_list;
- context->node_free_list = node;
-}
-
-
-/*
- * Include src bitset in dst bitset
- */
-static void
-acl_include(struct rte_acl_bitset *dst, struct rte_acl_bitset *src, bits_t mask)
-{
- uint32_t n;
-
- for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++)
- dst->bits[n] = (dst->bits[n] & mask) | src->bits[n];
-}
-
-/*
- * Set dst to bits of src1 that are not in src2
- */
-static int
-acl_exclude(struct rte_acl_bitset *dst,
- struct rte_acl_bitset *src1,
- struct rte_acl_bitset *src2)
-{
- uint32_t n;
- bits_t all_bits = 0;
-
- for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++) {
- dst->bits[n] = src1->bits[n] & ~src2->bits[n];
- all_bits |= dst->bits[n];
- }
- return all_bits != 0;
-}
-
-/*
- * Add a pointer (ptr) to a node.
- */
-static int
-acl_add_ptr(struct acl_build_context *context,
- struct rte_acl_node *node,
- struct rte_acl_node *ptr,
- struct rte_acl_bitset *bits)
-{
- uint32_t n, num_ptrs;
- struct rte_acl_ptr_set *ptrs = NULL;
-
- /*
- * If there's already a pointer to the same node, just add to the bitset
- */
- for (n = 0; n < node->num_ptrs; n++) {
- if (node->ptrs[n].ptr != NULL) {
- if (node->ptrs[n].ptr == ptr) {
- acl_include(&node->ptrs[n].values, bits, -1);
- acl_include(&node->values, bits, -1);
- return 0;
- }
- }
- }
-
- /* if there's no room for another pointer, make room */
- if (node->num_ptrs >= node->max_ptrs) {
- /* add room for more pointers */
- num_ptrs = node->max_ptrs + ACL_PTR_ALLOC;
- ptrs = acl_build_alloc(context, num_ptrs, sizeof(*ptrs));
- if (ptrs == NULL)
- return -ENOMEM;
-
- /* copy current points to new memory allocation */
- if (node->ptrs != NULL) {
- memcpy(ptrs, node->ptrs,
- node->num_ptrs * sizeof(*ptrs));
- acl_build_free(context, node->max_ptrs * sizeof(*ptrs),
- node->ptrs);
- }
- node->ptrs = ptrs;
- node->max_ptrs = num_ptrs;
- }
-
- /* Find available ptr and add a new pointer to this node */
- for (n = node->min_add; n < node->max_ptrs; n++) {
- if (node->ptrs[n].ptr == NULL) {
- node->ptrs[n].ptr = ptr;
- acl_include(&node->ptrs[n].values, bits, 0);
- acl_include(&node->values, bits, -1);
- if (ptr != NULL)
- ptr->ref_count++;
- if (node->num_ptrs <= n)
- node->num_ptrs = n + 1;
- return 0;
- }
- }
-
- return 0;
-}
-
-/*
- * Add a pointer for a range of values
- */
-static int
-acl_add_ptr_range(struct acl_build_context *context,
- struct rte_acl_node *root,
- struct rte_acl_node *node,
- uint8_t low,
- uint8_t high)
-{
- uint32_t n;
- struct rte_acl_bitset bitset;
-
- /* clear the bitset values */
- for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++)
- bitset.bits[n] = 0;
-
- /* for each bit in range, add bit to set */
- for (n = 0; n < UINT8_MAX + 1; n++)
- if (n >= low && n <= high)
- bitset.bits[n / (sizeof(bits_t) * 8)] |=
- 1 << (n % (sizeof(bits_t) * 8));
-
- return acl_add_ptr(context, root, node, &bitset);
-}
-
-/*
- * Generate a bitset from a byte value and mask.
- */
-static int
-acl_gen_mask(struct rte_acl_bitset *bitset, uint32_t value, uint32_t mask)
-{
- int range = 0;
- uint32_t n;
-
- /* clear the bitset values */
- for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++)
- bitset->bits[n] = 0;
-
- /* for each bit in value/mask, add bit to set */
- for (n = 0; n < UINT8_MAX + 1; n++) {
- if ((n & mask) == value) {
- range++;
- bitset->bits[n / (sizeof(bits_t) * 8)] |=
- 1 << (n % (sizeof(bits_t) * 8));
- }
- }
- return range;
-}
-
-/*
- * Determine how A and B intersect.
- * Determine if A and/or B are supersets of the intersection.
- */
-static int
-acl_intersect_type(struct rte_acl_bitset *a_bits,
- struct rte_acl_bitset *b_bits,
- struct rte_acl_bitset *intersect)
-{
- uint32_t n;
- bits_t intersect_bits = 0;
- bits_t a_superset = 0;
- bits_t b_superset = 0;
-
- /*
- * calculate and store intersection and check if A and/or B have
- * bits outside the intersection (superset)
- */
- for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++) {
- intersect->bits[n] = a_bits->bits[n] & b_bits->bits[n];
- a_superset |= a_bits->bits[n] ^ intersect->bits[n];
- b_superset |= b_bits->bits[n] ^ intersect->bits[n];
- intersect_bits |= intersect->bits[n];
- }
-
- n = (intersect_bits == 0 ? ACL_INTERSECT_NONE : ACL_INTERSECT) |
- (b_superset == 0 ? 0 : ACL_INTERSECT_B) |
- (a_superset == 0 ? 0 : ACL_INTERSECT_A);
-
- return n;
-}
-
-/*
- * Check if all bits in the bitset are on
- */
-static int
-acl_full(struct rte_acl_node *node)
-{
- uint32_t n;
- bits_t all_bits = -1;
-
- for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++)
- all_bits &= node->values.bits[n];
- return all_bits == -1;
-}
-
-/*
- * Check if all bits in the bitset are off
- */
-static int
-acl_empty(struct rte_acl_node *node)
-{
- uint32_t n;
-
- if (node->ref_count == 0) {
- for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++) {
- if (0 != node->values.bits[n])
- return 0;
- }
- return 1;
- } else {
- return 0;
- }
-}
-
-/*
- * Compute intersection of A and B
- * return 1 if there is an intersection else 0.
- */
-static int
-acl_intersect(struct rte_acl_bitset *a_bits,
- struct rte_acl_bitset *b_bits,
- struct rte_acl_bitset *intersect)
-{
- uint32_t n;
- bits_t all_bits = 0;
-
- for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++) {
- intersect->bits[n] = a_bits->bits[n] & b_bits->bits[n];
- all_bits |= intersect->bits[n];
- }
- return all_bits != 0;
-}
-
-/*
- * Duplicate a node
- */
-static struct rte_acl_node *
-acl_dup_node(struct acl_build_context *context, struct rte_acl_node *node)
-{
- uint32_t n;
- struct rte_acl_node *next;
-
- next = acl_alloc_node(context, node->level);
- if (next == NULL)
- return NULL;
-
- /* allocate the pointers */
- if (node->num_ptrs > 0) {
- next->ptrs = acl_build_alloc(context,
- node->max_ptrs,
- sizeof(struct rte_acl_ptr_set));
- if (next->ptrs == NULL)
- return NULL;
- next->max_ptrs = node->max_ptrs;
- }
-
- /* copy over the pointers */
- for (n = 0; n < node->num_ptrs; n++) {
- if (node->ptrs[n].ptr != NULL) {
- next->ptrs[n].ptr = node->ptrs[n].ptr;
- next->ptrs[n].ptr->ref_count++;
- acl_include(&next->ptrs[n].values,
- &node->ptrs[n].values, -1);
- }
- }
-
- next->num_ptrs = node->num_ptrs;
-
- /* copy over node's match results */
- if (node->match_flag == 0)
- next->match_flag = 0;
- else {
- next->match_flag = -1;
- next->mrt = acl_build_alloc(context, 1, sizeof(*next->mrt));
- memcpy(next->mrt, node->mrt, sizeof(*next->mrt));
- }
-
- /* copy over node's bitset */
- acl_include(&next->values, &node->values, -1);
-
- node->next = next;
- next->prev = node;
-
- return next;
-}
-
-/*
- * Dereference a pointer from a node
- */
-static void
-acl_deref_ptr(struct acl_build_context *context,
- struct rte_acl_node *node, int index)
-{
- struct rte_acl_node *ref_node;
-
- /* De-reference the node at the specified pointer */
- if (node != NULL && node->ptrs[index].ptr != NULL) {
- ref_node = node->ptrs[index].ptr;
- ref_node->ref_count--;
- if (ref_node->ref_count == 0)
- acl_free_node(context, ref_node);
- }
-}
-
-/*
- * Exclude bitset from a node pointer
- * returns 0 if poiter was deref'd
- * 1 otherwise.
- */
-static int
-acl_exclude_ptr(struct acl_build_context *context,
- struct rte_acl_node *node,
- int index,
- struct rte_acl_bitset *b_bits)
-{
- int retval = 1;
-
- /*
- * remove bitset from node pointer and deref
- * if the bitset becomes empty.
- */
- if (!acl_exclude(&node->ptrs[index].values,
- &node->ptrs[index].values,
- b_bits)) {
- acl_deref_ptr(context, node, index);
- node->ptrs[index].ptr = NULL;
- retval = 0;
- }
-
- /* exclude bits from the composite bits for the node */
- acl_exclude(&node->values, &node->values, b_bits);
- return retval;
-}
-
-/*
- * Remove a bitset from src ptr and move remaining ptr to dst
- */
-static int
-acl_move_ptr(struct acl_build_context *context,
- struct rte_acl_node *dst,
- struct rte_acl_node *src,
- int index,
- struct rte_acl_bitset *b_bits)
-{
- int rc;
-
- if (b_bits != NULL)
- if (!acl_exclude_ptr(context, src, index, b_bits))
- return 0;
-
- /* add src pointer to dst node */
- rc = acl_add_ptr(context, dst, src->ptrs[index].ptr,
- &src->ptrs[index].values);
- if (rc < 0)
- return rc;
-
- /* remove ptr from src */
- acl_exclude_ptr(context, src, index, &src->ptrs[index].values);
- return 1;
-}
-
-/*
- * acl_exclude rte_acl_bitset from src and copy remaining pointer to dst
- */
-static int
-acl_copy_ptr(struct acl_build_context *context,
- struct rte_acl_node *dst,
- struct rte_acl_node *src,
- int index,
- struct rte_acl_bitset *b_bits)
-{
- int rc;
- struct rte_acl_bitset bits;
-
- if (b_bits != NULL)
- if (!acl_exclude(&bits, &src->ptrs[index].values, b_bits))
- return 0;
-
- rc = acl_add_ptr(context, dst, src->ptrs[index].ptr, &bits);
- if (rc < 0)
- return rc;
- return 1;
-}
-
-/*
- * Fill in gaps in ptrs list with the ptr at the end of the list
- */
-static void
-acl_compact_node_ptrs(struct rte_acl_node *node_a)
-{
- uint32_t n;
- int min_add = node_a->min_add;
-
- while (node_a->num_ptrs > 0 &&
- node_a->ptrs[node_a->num_ptrs - 1].ptr == NULL)
- node_a->num_ptrs--;
-
- for (n = min_add; n + 1 < node_a->num_ptrs; n++) {
-
- /* if this entry is empty */
- if (node_a->ptrs[n].ptr == NULL) {
-
- /* move the last pointer to this entry */
- acl_include(&node_a->ptrs[n].values,
- &node_a->ptrs[node_a->num_ptrs - 1].values,
- 0);
- node_a->ptrs[n].ptr =
- node_a->ptrs[node_a->num_ptrs - 1].ptr;
-
- /*
- * mark the end as empty and adjust the number
- * of used pointer enum_tries
- */
- node_a->ptrs[node_a->num_ptrs - 1].ptr = NULL;
- while (node_a->num_ptrs > 0 &&
- node_a->ptrs[node_a->num_ptrs - 1].ptr == NULL)
- node_a->num_ptrs--;
- }
- }
-}
-
-/*
- * acl_merge helper routine.
- */
-static int
-acl_merge_intersect(struct acl_build_context *context,
- struct rte_acl_node *node_a, uint32_t idx_a,
- struct rte_acl_node *node_b, uint32_t idx_b,
- int next_move, int level,
- struct rte_acl_bitset *intersect_ptr)
-{
- struct rte_acl_node *node_c;
-
- /* Duplicate A for intersection */
- node_c = acl_dup_node(context, node_a->ptrs[idx_a].ptr);
- if (node_c == NULL)
- return -1;
-
- /* Remove intersection from A */
- acl_exclude_ptr(context, node_a, idx_a, intersect_ptr);
-
- /*
- * Added link from A to C for all transitions
- * in the intersection
- */
- if (acl_add_ptr(context, node_a, node_c, intersect_ptr) < 0)
- return -1;
-
- /* merge B->node into C */
- return acl_merge(context, node_c, node_b->ptrs[idx_b].ptr, next_move,
- 0, level + 1);
-}
-
-
-/*
- * Merge the children of nodes A and B together.
- *
- * if match node
- * For each category
- * node A result = highest priority result
- * if any pointers in A intersect with any in B
- * For each intersection
- * C = copy of node that A points to
- * remove intersection from A pointer
- * add a pointer to A that points to C for the intersection
- * Merge C and node that B points to
- * Compact the pointers in A and B
- * if move flag
- * If B has only one reference
- * Move B pointers to A
- * else
- * Copy B pointers to A
- */
-static int
-acl_merge(struct acl_build_context *context,
- struct rte_acl_node *node_a, struct rte_acl_node *node_b,
- int move, int a_subset, int level)
-{
- uint32_t n, m, ptrs_a, ptrs_b;
- uint32_t min_add_a, min_add_b;
- int intersect_type;
- int node_intersect_type;
- int b_full, next_move, rc;
- struct rte_acl_bitset intersect_values;
- struct rte_acl_bitset intersect_ptr;
-
- min_add_a = 0;
- min_add_b = 0;
- intersect_type = 0;
- node_intersect_type = 0;
-
- if (level == 0)
- a_subset = 1;
-
- /*
- * Resolve match priorities
- */
- if (node_a->match_flag != 0 || node_b->match_flag != 0) {
-
- if (node_a->match_flag == 0 || node_b->match_flag == 0)
- RTE_LOG(ERR, ACL, "Not both matches\n");
-
- if (node_b->match_flag < node_a->match_flag)
- RTE_LOG(ERR, ACL, "Not same match\n");
-
- for (n = 0; n < context->cfg.num_categories; n++) {
- if (node_a->mrt->priority[n] <
- node_b->mrt->priority[n]) {
- node_a->mrt->priority[n] =
- node_b->mrt->priority[n];
- node_a->mrt->results[n] =
- node_b->mrt->results[n];
- }
- }
- }
-
- /*
- * If the two node transitions intersect then merge the transitions.
- * Check intersection for entire node (all pointers)
- */
- node_intersect_type = acl_intersect_type(&node_a->values,
- &node_b->values,
- &intersect_values);
-
- if (node_intersect_type & ACL_INTERSECT) {
-
- b_full = acl_full(node_b);
-
- min_add_b = node_b->min_add;
- node_b->min_add = node_b->num_ptrs;
- ptrs_b = node_b->num_ptrs;
-
- min_add_a = node_a->min_add;
- node_a->min_add = node_a->num_ptrs;
- ptrs_a = node_a->num_ptrs;
-
- for (n = 0; n < ptrs_a; n++) {
- for (m = 0; m < ptrs_b; m++) {
-
- if (node_a->ptrs[n].ptr == NULL ||
- node_b->ptrs[m].ptr == NULL ||
- node_a->ptrs[n].ptr ==
- node_b->ptrs[m].ptr)
- continue;
-
- intersect_type = acl_intersect_type(
- &node_a->ptrs[n].values,
- &node_b->ptrs[m].values,
- &intersect_ptr);
-
- /* If this node is not a 'match' node */
- if ((intersect_type & ACL_INTERSECT) &&
- (context->cfg.num_categories != 1 ||
- !(node_a->ptrs[n].ptr->match_flag))) {
-
- /*
- * next merge is a 'move' pointer,
- * if this one is and B is a
- * subset of the intersection.
- */
- next_move = move &&
- (intersect_type &
- ACL_INTERSECT_B) == 0;
-
- if (a_subset && b_full) {
- rc = acl_merge(context,
- node_a->ptrs[n].ptr,
- node_b->ptrs[m].ptr,
- next_move,
- 1, level + 1);
- if (rc != 0)
- return rc;
- } else {
- rc = acl_merge_intersect(
- context, node_a, n,
- node_b, m, next_move,
- level, &intersect_ptr);
- if (rc != 0)
- return rc;
- }
- }
- }
- }
- }
-
- /* Compact pointers */
- node_a->min_add = min_add_a;
- acl_compact_node_ptrs(node_a);
- node_b->min_add = min_add_b;
- acl_compact_node_ptrs(node_b);
-
- /*
- * Either COPY or MOVE pointers from B to A
- */
- acl_intersect(&node_a->values, &node_b->values, &intersect_values);
-
- if (move && node_b->ref_count == 1) {
- for (m = 0; m < node_b->num_ptrs; m++) {
- if (node_b->ptrs[m].ptr != NULL &&
- acl_move_ptr(context, node_a, node_b, m,
- &intersect_values) < 0)
- return -1;
- }
- } else {
- for (m = 0; m < node_b->num_ptrs; m++) {
- if (node_b->ptrs[m].ptr != NULL &&
- acl_copy_ptr(context, node_a, node_b, m,
- &intersect_values) < 0)
- return -1;
- }
- }
-
- /*
- * Free node if its empty (no longer used)
- */
- if (acl_empty(node_b))
- acl_free_node(context, node_b);
- return 0;
-}
-
-static int
-acl_resolve_leaf(struct acl_build_context *context,
- struct rte_acl_node *node_a,
- struct rte_acl_node *node_b,
- struct rte_acl_node **node_c)
-{
- uint32_t n;
- int combined_priority = ACL_PRIORITY_EQUAL;
-
- for (n = 0; n < context->cfg.num_categories; n++) {
- if (node_a->mrt->priority[n] != node_b->mrt->priority[n]) {
- combined_priority |= (node_a->mrt->priority[n] >
- node_b->mrt->priority[n]) ?
- ACL_PRIORITY_NODE_A : ACL_PRIORITY_NODE_B;
- }
- }
-
- /*
- * if node a is higher or equal priority for all categories,
- * then return node_a.
- */
- if (combined_priority == ACL_PRIORITY_NODE_A ||
- combined_priority == ACL_PRIORITY_EQUAL) {
- *node_c = node_a;
- return 0;
- }
-
- /*
- * if node b is higher or equal priority for all categories,
- * then return node_b.
- */
- if (combined_priority == ACL_PRIORITY_NODE_B) {
- *node_c = node_b;
- return 0;
- }
-
- /*
- * mixed priorities - create a new node with the highest priority
- * for each category.
- */
-
- /* force new duplication. */
- node_a->next = NULL;
-
- *node_c = acl_dup_node(context, node_a);
- for (n = 0; n < context->cfg.num_categories; n++) {
- if ((*node_c)->mrt->priority[n] < node_b->mrt->priority[n]) {
- (*node_c)->mrt->priority[n] = node_b->mrt->priority[n];
- (*node_c)->mrt->results[n] = node_b->mrt->results[n];
- }
- }
- return 0;
-}
-
-/*
-* Within the existing trie structure, determine which nodes are
-* part of the subtree of the trie to be merged.
-*
-* For these purposes, a subtree is defined as the set of nodes that
-* are 1) not a superset of the intersection with the same level of
-* the merging tree, and 2) do not have any references from a node
-* outside of the subtree.
-*/
-static void
-mark_subtree(struct rte_acl_node *node,
- struct rte_acl_bitset *level_bits,
- uint32_t level,
- uint32_t id)
-{
- uint32_t n;
-
- /* mark this node as part of the subtree */
- node->subtree_id = id | RTE_ACL_SUBTREE_NODE;
-
- for (n = 0; n < node->num_ptrs; n++) {
-
- if (node->ptrs[n].ptr != NULL) {
-
- struct rte_acl_bitset intersect_bits;
- int intersect;
-
- /*
- * Item 1) :
- * check if this child pointer is not a superset of the
- * same level of the merging tree.
- */
- intersect = acl_intersect_type(&node->ptrs[n].values,
- &level_bits[level],
- &intersect_bits);
-
- if ((intersect & ACL_INTERSECT_A) == 0) {
-
- struct rte_acl_node *child = node->ptrs[n].ptr;
-
- /*
- * reset subtree reference if this is
- * the first visit by this subtree.
- */
- if (child->subtree_id != id) {
- child->subtree_id = id;
- child->subtree_ref_count = 0;
- }
-
- /*
- * Item 2) :
- * increment the subtree reference count and if
- * all references are from this subtree then
- * recurse to that child
- */
- child->subtree_ref_count++;
- if (child->subtree_ref_count ==
- child->ref_count)
- mark_subtree(child, level_bits,
- level + 1, id);
- }
- }
- }
-}
-
-/*
- * Build the set of bits that define the set of transitions
- * for each level of a trie.
- */
-static void
-build_subset_mask(struct rte_acl_node *node,
- struct rte_acl_bitset *level_bits,
- int level)
-{
- uint32_t n;
-
- /* Add this node's transitions to the set for this level */
- for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++)
- level_bits[level].bits[n] &= node->values.bits[n];
-
- /* For each child, add the transitions for the next level */
- for (n = 0; n < node->num_ptrs; n++)
- if (node->ptrs[n].ptr != NULL)
- build_subset_mask(node->ptrs[n].ptr, level_bits,
- level + 1);
-}
-
-
-/*
- * Merge nodes A and B together,
- * returns a node that is the path for the intersection
- *
- * If match node (leaf on trie)
- * For each category
- * return node = highest priority result
- *
- * Create C as a duplicate of A to point to child intersections
- * If any pointers in C intersect with any in B
- * For each intersection
- * merge children
- * remove intersection from C pointer
- * add a pointer from C to child intersection node
- * Compact the pointers in A and B
- * Copy any B pointers that are outside of the intersection to C
- * If C has no references to the B trie
- * free C and return A
- * Else If C has no references to the A trie
- * free C and return B
- * Else
- * return C
- */
-static int
-acl_merge_trie(struct acl_build_context *context,
- struct rte_acl_node *node_a, struct rte_acl_node *node_b,
- uint32_t level, uint32_t subtree_id, struct rte_acl_node **return_c)
-{
- uint32_t n, m, ptrs_c, ptrs_b;
- uint32_t min_add_c, min_add_b;
- int node_intersect_type;
- struct rte_acl_bitset node_intersect;
- struct rte_acl_node *node_c;
- struct rte_acl_node *node_a_next;
- int node_b_refs;
- int node_a_refs;
-
- node_c = node_a;
- node_a_next = node_a->next;
- min_add_c = 0;
- min_add_b = 0;
- node_a_refs = node_a->num_ptrs;
- node_b_refs = 0;
- node_intersect_type = 0;
-
- /* Resolve leaf nodes (matches) */
- if (node_a->match_flag != 0) {
- acl_resolve_leaf(context, node_a, node_b, return_c);
- return 0;
- }
-
- /*
- * Create node C as a copy of node A if node A is not part of
- * a subtree of the merging tree (node B side). Otherwise,
- * just use node A.
- */
- if (level > 0 &&
- node_a->subtree_id !=
- (subtree_id | RTE_ACL_SUBTREE_NODE)) {
- node_c = acl_dup_node(context, node_a);
- node_c->subtree_id = subtree_id | RTE_ACL_SUBTREE_NODE;
- }
-
- /*
- * If the two node transitions intersect then merge the transitions.
- * Check intersection for entire node (all pointers)
- */
- node_intersect_type = acl_intersect_type(&node_c->values,
- &node_b->values,
- &node_intersect);
-
- if (node_intersect_type & ACL_INTERSECT) {
-
- min_add_b = node_b->min_add;
- node_b->min_add = node_b->num_ptrs;
- ptrs_b = node_b->num_ptrs;
-
- min_add_c = node_c->min_add;
- node_c->min_add = node_c->num_ptrs;
- ptrs_c = node_c->num_ptrs;
-
- for (n = 0; n < ptrs_c; n++) {
- if (node_c->ptrs[n].ptr == NULL) {
- node_a_refs--;
- continue;
- }
- node_c->ptrs[n].ptr->next = NULL;
- for (m = 0; m < ptrs_b; m++) {
-
- struct rte_acl_bitset child_intersect;
- int child_intersect_type;
- struct rte_acl_node *child_node_c = NULL;
-
- if (node_b->ptrs[m].ptr == NULL ||
- node_c->ptrs[n].ptr ==
- node_b->ptrs[m].ptr)
- continue;
-
- child_intersect_type = acl_intersect_type(
- &node_c->ptrs[n].values,
- &node_b->ptrs[m].values,
- &child_intersect);
-
- if ((child_intersect_type & ACL_INTERSECT) !=
- 0) {
- if (acl_merge_trie(context,
- node_c->ptrs[n].ptr,
- node_b->ptrs[m].ptr,
- level + 1, subtree_id,
- &child_node_c))
- return 1;
-
- if (child_node_c != NULL &&
- child_node_c !=
- node_c->ptrs[n].ptr) {
-
- node_b_refs++;
-
- /*
- * Added link from C to
- * child_C for all transitions
- * in the intersection.
- */
- acl_add_ptr(context, node_c,
- child_node_c,
- &child_intersect);
-
- /*
- * inc refs if pointer is not
- * to node b.
- */
- node_a_refs += (child_node_c !=
- node_b->ptrs[m].ptr);
-
- /*
- * Remove intersection from C
- * pointer.
- */
- if (!acl_exclude(
- &node_c->ptrs[n].values,
- &node_c->ptrs[n].values,
- &child_intersect)) {
- acl_deref_ptr(context,
- node_c, n);
- node_c->ptrs[n].ptr =
- NULL;
- node_a_refs--;
- }
- }
- }
- }
- }
-
- /* Compact pointers */
- node_c->min_add = min_add_c;
- acl_compact_node_ptrs(node_c);
- node_b->min_add = min_add_b;
- acl_compact_node_ptrs(node_b);
- }
-
- /*
- * Copy pointers outside of the intersection from B to C
- */
- if ((node_intersect_type & ACL_INTERSECT_B) != 0) {
- node_b_refs++;
- for (m = 0; m < node_b->num_ptrs; m++)
- if (node_b->ptrs[m].ptr != NULL)
- acl_copy_ptr(context, node_c,
- node_b, m, &node_intersect);
- }
-
- /*
- * Free node C if top of trie is contained in A or B
- * if node C is a duplicate of node A &&
- * node C was not an existing duplicate
- */
- if (node_c != node_a && node_c != node_a_next) {
-
- /*
- * if the intersection has no references to the
- * B side, then it is contained in A
- */
- if (node_b_refs == 0) {
- acl_free_node(context, node_c);
- node_c = node_a;
- } else {
- /*
- * if the intersection has no references to the
- * A side, then it is contained in B.
- */
- if (node_a_refs == 0) {
- acl_free_node(context, node_c);
- node_c = node_b;
- }
- }
- }
-
- if (return_c != NULL)
- *return_c = node_c;
-
- if (level == 0)
- acl_free_node(context, node_b);
-
- return 0;
-}
-
-/*
- * Reset current runtime fields before next build:
- * - free allocated RT memory.
- * - reset all RT related fields to zero.
- */
-static void
-acl_build_reset(struct rte_acl_ctx *ctx)
-{
- rte_free(ctx->mem);
- memset(&ctx->num_categories, 0,
- sizeof(*ctx) - offsetof(struct rte_acl_ctx, num_categories));
-}
-
-static void
-acl_gen_range(struct acl_build_context *context,
- const uint8_t *hi, const uint8_t *lo, int size, int level,
- struct rte_acl_node *root, struct rte_acl_node *end)
-{
- struct rte_acl_node *node, *prev;
- uint32_t n;
-
- prev = root;
- for (n = size - 1; n > 0; n--) {
- node = acl_alloc_node(context, level++);
- acl_add_ptr_range(context, prev, node, lo[n], hi[n]);
- prev = node;
- }
- acl_add_ptr_range(context, prev, end, lo[0], hi[0]);
-}
-
-static struct rte_acl_node *
-acl_gen_range_trie(struct acl_build_context *context,
- const void *min, const void *max,
- int size, int level, struct rte_acl_node **pend)
-{
- int32_t n;
- struct rte_acl_node *root;
- const uint8_t *lo = (const uint8_t *)min;
- const uint8_t *hi = (const uint8_t *)max;
-
- *pend = acl_alloc_node(context, level+size);
- root = acl_alloc_node(context, level++);
-
- if (lo[size - 1] == hi[size - 1]) {
- acl_gen_range(context, hi, lo, size, level, root, *pend);
- } else {
- uint8_t limit_lo[64];
- uint8_t limit_hi[64];
- uint8_t hi_ff = UINT8_MAX;
- uint8_t lo_00 = 0;
-
- memset(limit_lo, 0, RTE_DIM(limit_lo));
- memset(limit_hi, UINT8_MAX, RTE_DIM(limit_hi));
-
- for (n = size - 2; n >= 0; n--) {
- hi_ff = (uint8_t)(hi_ff & hi[n]);
- lo_00 = (uint8_t)(lo_00 | lo[n]);
- }
-
- if (hi_ff != UINT8_MAX) {
- limit_lo[size - 1] = hi[size - 1];
- acl_gen_range(context, hi, limit_lo, size, level,
- root, *pend);
- }
-
- if (lo_00 != 0) {
- limit_hi[size - 1] = lo[size - 1];
- acl_gen_range(context, limit_hi, lo, size, level,
- root, *pend);
- }
-
- if (hi[size - 1] - lo[size - 1] > 1 ||
- lo_00 == 0 ||
- hi_ff == UINT8_MAX) {
- limit_lo[size-1] = (uint8_t)(lo[size-1] + (lo_00 != 0));
- limit_hi[size-1] = (uint8_t)(hi[size-1] -
- (hi_ff != UINT8_MAX));
- acl_gen_range(context, limit_hi, limit_lo, size,
- level, root, *pend);
- }
- }
- return root;
-}
-
-static struct rte_acl_node *
-acl_gen_mask_trie(struct acl_build_context *context,
- const void *value, const void *mask,
- int size, int level, struct rte_acl_node **pend)
-{
- int32_t n;
- struct rte_acl_node *root;
- struct rte_acl_node *node, *prev;
- struct rte_acl_bitset bits;
- const uint8_t *val = (const uint8_t *)value;
- const uint8_t *msk = (const uint8_t *)mask;
-
- root = acl_alloc_node(context, level++);
- prev = root;
-
- for (n = size - 1; n >= 0; n--) {
- node = acl_alloc_node(context, level++);
- acl_gen_mask(&bits, val[n] & msk[n], msk[n]);
- acl_add_ptr(context, prev, node, &bits);
- prev = node;
- }
-
- *pend = prev;
- return root;
-}
-
-static struct rte_acl_node *
-build_trie(struct acl_build_context *context, struct rte_acl_build_rule *head,
- struct rte_acl_build_rule **last, uint32_t *count)
-{
- uint32_t n, m;
- int field_index, node_count;
- struct rte_acl_node *trie;
- struct rte_acl_build_rule *prev, *rule;
- struct rte_acl_node *end, *merge, *root, *end_prev;
- const struct rte_acl_field *fld;
- struct rte_acl_bitset level_bits[RTE_ACL_MAX_LEVELS];
-
- prev = head;
- rule = head;
-
- trie = acl_alloc_node(context, 0);
- if (trie == NULL)
- return NULL;
-
- while (rule != NULL) {
-
- root = acl_alloc_node(context, 0);
- if (root == NULL)
- return NULL;
-
- root->ref_count = 1;
- end = root;
-
- for (n = 0; n < rule->config->num_fields; n++) {
-
- field_index = rule->config->defs[n].field_index;
- fld = rule->f->field + field_index;
- end_prev = end;
-
- /* build a mini-trie for this field */
- switch (rule->config->defs[n].type) {
-
- case RTE_ACL_FIELD_TYPE_BITMASK:
- merge = acl_gen_mask_trie(context,
- &fld->value,
- &fld->mask_range,
- rule->config->defs[n].size,
- end->level + 1,
- &end);
- break;
-
- case RTE_ACL_FIELD_TYPE_MASK:
- {
- /*
- * set msb for the size of the field and
- * all higher bits.
- */
- uint64_t mask;
-
- if (fld->mask_range.u32 == 0) {
- mask = 0;
-
- /*
- * arithmetic right shift for the length of
- * the mask less the msb.
- */
- } else {
- mask = -1 <<
- (rule->config->defs[n].size *
- CHAR_BIT - fld->mask_range.u32);
- }
-
- /* gen a mini-trie for this field */
- merge = acl_gen_mask_trie(context,
- &fld->value,
- (char *)&mask,
- rule->config->defs[n].size,
- end->level + 1,
- &end);
- }
- break;
-
- case RTE_ACL_FIELD_TYPE_RANGE:
- merge = acl_gen_range_trie(context,
- &rule->f->field[field_index].value,
- &rule->f->field[field_index].mask_range,
- rule->config->defs[n].size,
- end->level + 1,
- &end);
- break;
-
- default:
- RTE_LOG(ERR, ACL,
- "Error in rule[%u] type - %hhu\n",
- rule->f->data.userdata,
- rule->config->defs[n].type);
- return NULL;
- }
-
- /* merge this field on to the end of the rule */
- if (acl_merge_trie(context, end_prev, merge, 0,
- 0, NULL) != 0) {
- return NULL;
- }
- }
-
- end->match_flag = ++context->num_build_rules;
-
- /*
- * Setup the results for this rule.
- * The result and priority of each category.
- */
- if (end->mrt == NULL &&
- (end->mrt = acl_build_alloc(context, 1,
- sizeof(*end->mrt))) == NULL)
- return NULL;
-
- for (m = 0; m < context->cfg.num_categories; m++) {
- if (rule->f->data.category_mask & (1 << m)) {
- end->mrt->results[m] = rule->f->data.userdata;
- end->mrt->priority[m] = rule->f->data.priority;
- } else {
- end->mrt->results[m] = 0;
- end->mrt->priority[m] = 0;
- }
- }
-
- node_count = context->num_nodes;
-
- memset(&level_bits[0], UINT8_MAX, sizeof(level_bits));
- build_subset_mask(root, &level_bits[0], 0);
- mark_subtree(trie, &level_bits[0], 0, end->match_flag);
- (*count)++;
-
- /* merge this rule into the trie */
- if (acl_merge_trie(context, trie, root, 0, end->match_flag,
- NULL))
- return NULL;
-
- node_count = context->num_nodes - node_count;
- if (node_count > NODE_MAX) {
- *last = prev;
- return trie;
- }
-
- prev = rule;
- rule = rule->next;
- }
-
- *last = NULL;
- return trie;
-}
-
-static int
-acl_calc_wildness(struct rte_acl_build_rule *head,
- const struct rte_acl_config *config)
-{
- uint32_t n;
- struct rte_acl_build_rule *rule;
-
- for (rule = head; rule != NULL; rule = rule->next) {
-
- for (n = 0; n < config->num_fields; n++) {
-
- double wild = 0;
- double size = CHAR_BIT * config->defs[n].size;
- int field_index = config->defs[n].field_index;
- const struct rte_acl_field *fld = rule->f->field +
- field_index;
-
- switch (rule->config->defs[n].type) {
- case RTE_ACL_FIELD_TYPE_BITMASK:
- wild = (size - __builtin_popcount(
- fld->mask_range.u8)) /
- size;
- break;
-
- case RTE_ACL_FIELD_TYPE_MASK:
- wild = (size - fld->mask_range.u32) / size;
- break;
-
- case RTE_ACL_FIELD_TYPE_RANGE:
- switch (rule->config->defs[n].size) {
- case sizeof(uint8_t):
- wild = ((double)fld->mask_range.u8 -
- fld->value.u8) / UINT8_MAX;
- break;
- case sizeof(uint16_t):
- wild = ((double)fld->mask_range.u16 -
- fld->value.u16) / UINT16_MAX;
- break;
- case sizeof(uint32_t):
- wild = ((double)fld->mask_range.u32 -
- fld->value.u32) / UINT32_MAX;
- break;
- case sizeof(uint64_t):
- wild = ((double)fld->mask_range.u64 -
- fld->value.u64) / UINT64_MAX;
- break;
- default:
- RTE_LOG(ERR, ACL,
- "%s(rule: %u) invalid %u-th "
- "field, type: %hhu, "
- "unknown size: %hhu\n",
- __func__,
- rule->f->data.userdata,
- n,
- rule->config->defs[n].type,
- rule->config->defs[n].size);
- return -EINVAL;
- }
- break;
-
- default:
- RTE_LOG(ERR, ACL,
- "%s(rule: %u) invalid %u-th "
- "field, unknown type: %hhu\n",
- __func__,
- rule->f->data.userdata,
- n,
- rule->config->defs[n].type);
- return -EINVAL;
-
- }
-
- rule->wildness[field_index] = (uint32_t)(wild * 100);
- }
- }
-
- return 0;
-}
-
-static int
-acl_rule_stats(struct rte_acl_build_rule *head, struct rte_acl_config *config,
- uint32_t *wild_limit)
-{
- int min;
- struct rte_acl_build_rule *rule;
- uint32_t n, m, fields_deactivated = 0;
- uint32_t start = 0, deactivate = 0;
- int tally[RTE_ACL_MAX_LEVELS][TALLY_NUM];
-
- memset(tally, 0, sizeof(tally));
-
- for (rule = head; rule != NULL; rule = rule->next) {
-
- for (n = 0; n < config->num_fields; n++) {
- uint32_t field_index = config->defs[n].field_index;
-
- tally[n][TALLY_0]++;
- for (m = 1; m < RTE_DIM(wild_limits); m++) {
- if (rule->wildness[field_index] >=
- wild_limits[m])
- tally[n][m]++;
- }
- }
-
- for (n = config->num_fields - 1; n > 0; n--) {
- uint32_t field_index = config->defs[n].field_index;
-
- if (rule->wildness[field_index] == 100)
- tally[n][TALLY_DEPTH]++;
- else
- break;
- }
- }
-
- /*
- * Look for any field that is always wild and drop it from the config
- * Only deactivate if all fields for a given input loop are deactivated.
- */
- for (n = 1; n < config->num_fields; n++) {
- if (config->defs[n].input_index !=
- config->defs[n - 1].input_index) {
- for (m = start; m < n; m++)
- tally[m][TALLY_DEACTIVATED] = deactivate;
- fields_deactivated += deactivate;
- start = n;
- deactivate = 1;
- }
-
- /* if the field is not always completely wild */
- if (tally[n][TALLY_100] != tally[n][TALLY_0])
- deactivate = 0;
- }
-
- for (m = start; m < n; m++)
- tally[m][TALLY_DEACTIVATED] = deactivate;
-
- fields_deactivated += deactivate;
-
- /* remove deactivated fields */
- if (fields_deactivated) {
- uint32_t k, l = 0;
-
- for (k = 0; k < config->num_fields; k++) {
- if (tally[k][TALLY_DEACTIVATED] == 0) {
- memcpy(&tally[l][0], &tally[k][0],
- TALLY_NUM * sizeof(tally[0][0]));
- memcpy(&config->defs[l++],
- &config->defs[k],
- sizeof(struct rte_acl_field_def));
- }
- }
- config->num_fields = l;
- }
-
- min = RTE_ACL_SINGLE_TRIE_SIZE;
- if (config->num_fields == 2)
- min *= 4;
- else if (config->num_fields == 3)
- min *= 3;
- else if (config->num_fields == 4)
- min *= 2;
-
- if (tally[0][TALLY_0] < min)
- return 0;
- for (n = 0; n < config->num_fields; n++)
- wild_limit[n] = 0;
-
- /*
- * If trailing fields are 100% wild, group those together.
- * This allows the search length of the trie to be shortened.
- */
- for (n = 1; n < config->num_fields; n++) {
-
- double rule_percentage = (double)tally[n][TALLY_DEPTH] /
- tally[n][0];
-
- if (rule_percentage > RULE_PERCENTAGE) {
- /* if it crosses an input boundary then round up */
- while (config->defs[n - 1].input_index ==
- config->defs[n].input_index)
- n++;
-
- /* set the limit for selecting rules */
- while (n < config->num_fields)
- wild_limit[n++] = 100;
-
- if (wild_limit[n - 1] == 100)
- return 1;
- }
- }
-
- /* look for the most wild that's 40% or more of the rules */
- for (n = 1; n < config->num_fields; n++) {
- for (m = TALLY_100; m > 0; m--) {
-
- double rule_percentage = (double)tally[n][m] /
- tally[n][0];
-
- if (tally[n][TALLY_DEACTIVATED] == 0 &&
- tally[n][TALLY_0] >
- RTE_ACL_SINGLE_TRIE_SIZE &&
- rule_percentage > NODE_PERCENTAGE &&
- rule_percentage < 0.80) {
- wild_limit[n] = wild_limits[m];
- return 1;
- }
- }
- }
- return 0;
-}
-
-static int
-order(struct rte_acl_build_rule **insert, struct rte_acl_build_rule *rule)
-{
- uint32_t n;
- struct rte_acl_build_rule *left = *insert;
-
- if (left == NULL)
- return 0;
-
- for (n = 1; n < left->config->num_fields; n++) {
- int field_index = left->config->defs[n].field_index;
-
- if (left->wildness[field_index] != rule->wildness[field_index])
- return (left->wildness[field_index] >=
- rule->wildness[field_index]);
- }
- return 0;
-}
-
-static struct rte_acl_build_rule *
-ordered_insert_rule(struct rte_acl_build_rule *head,
- struct rte_acl_build_rule *rule)
-{
- struct rte_acl_build_rule **insert;
-
- if (rule == NULL)
- return head;
-
- rule->next = head;
- if (head == NULL)
- return rule;
-
- insert = &head;
- while (order(insert, rule))
- insert = &(*insert)->next;
-
- rule->next = *insert;
- *insert = rule;
- return head;
-}
-
-static struct rte_acl_build_rule *
-sort_rules(struct rte_acl_build_rule *head)
-{
- struct rte_acl_build_rule *rule, *reordered_head = NULL;
- struct rte_acl_build_rule *last_rule = NULL;
-
- for (rule = head; rule != NULL; rule = rule->next) {
- reordered_head = ordered_insert_rule(reordered_head, last_rule);
- last_rule = rule;
- }
-
- if (last_rule != reordered_head)
- reordered_head = ordered_insert_rule(reordered_head, last_rule);
-
- return reordered_head;
-}
-
-static uint32_t
-acl_build_index(const struct rte_acl_config *config, uint32_t *data_index)
-{
- uint32_t n, m;
- int32_t last_header;
-
- m = 0;
- last_header = -1;
-
- for (n = 0; n < config->num_fields; n++) {
- if (last_header != config->defs[n].input_index) {
- last_header = config->defs[n].input_index;
- data_index[m++] = config->defs[n].offset;
- }
- }
-
- return m;
-}
-
-static int
-acl_build_tries(struct acl_build_context *context,
- struct rte_acl_build_rule *head)
-{
- int32_t rc;
- uint32_t n, m, num_tries;
- struct rte_acl_config *config;
- struct rte_acl_build_rule *last, *rule;
- uint32_t wild_limit[RTE_ACL_MAX_LEVELS];
- struct rte_acl_build_rule *rule_sets[RTE_ACL_MAX_TRIES];
-
- config = head->config;
- rule = head;
- rule_sets[0] = head;
- num_tries = 1;
-
- /* initialize tries */
- for (n = 0; n < RTE_DIM(context->tries); n++) {
- context->tries[n].type = RTE_ACL_UNUSED_TRIE;
- context->bld_tries[n].trie = NULL;
- context->tries[n].count = 0;
- context->tries[n].smallest = INT32_MAX;
- }
-
- context->tries[0].type = RTE_ACL_FULL_TRIE;
-
- /* calc wildness of each field of each rule */
- rc = acl_calc_wildness(head, config);
- if (rc != 0)
- return rc;
-
- n = acl_rule_stats(head, config, &wild_limit[0]);
-
- /* put all rules that fit the wildness criteria into a seperate trie */
- while (n > 0 && num_tries < RTE_ACL_MAX_TRIES) {
-
- struct rte_acl_config *new_config;
- struct rte_acl_build_rule **prev = &rule_sets[num_tries - 1];
- struct rte_acl_build_rule *next = head->next;
-
- new_config = acl_build_alloc(context, 1, sizeof(*new_config));
- if (new_config == NULL) {
- RTE_LOG(ERR, ACL,
- "Failed to get space for new config\n");
- return -ENOMEM;
- }
-
- memcpy(new_config, config, sizeof(*new_config));
- config = new_config;
- rule_sets[num_tries] = NULL;
-
- for (rule = head; rule != NULL; rule = next) {
-
- int move = 1;
-
- next = rule->next;
- for (m = 0; m < config->num_fields; m++) {
- int x = config->defs[m].field_index;
- if (rule->wildness[x] < wild_limit[m]) {
- move = 0;
- break;
- }
- }
-
- if (move) {
- rule->config = new_config;
- rule->next = rule_sets[num_tries];
- rule_sets[num_tries] = rule;
- *prev = next;
- } else
- prev = &rule->next;
- }
-
- head = rule_sets[num_tries];
- n = acl_rule_stats(rule_sets[num_tries], config,
- &wild_limit[0]);
- num_tries++;
- }
-
- if (n > 0)
- RTE_LOG(DEBUG, ACL,
- "Number of tries(%d) exceeded.\n", RTE_ACL_MAX_TRIES);
-
- for (n = 0; n < num_tries; n++) {
-
- rule_sets[n] = sort_rules(rule_sets[n]);
- context->tries[n].type = RTE_ACL_FULL_TRIE;
- context->tries[n].count = 0;
- context->tries[n].num_data_indexes =
- acl_build_index(rule_sets[n]->config,
- context->data_indexes[n]);
- context->tries[n].data_index = context->data_indexes[n];
-
- context->bld_tries[n].trie =
- build_trie(context, rule_sets[n],
- &last, &context->tries[n].count);
- if (context->bld_tries[n].trie == NULL) {
- RTE_LOG(ERR, ACL, "Build of %u-th trie failed\n", n);
- return -ENOMEM;
- }
-
- if (last != NULL) {
- rule_sets[num_tries++] = last->next;
- last->next = NULL;
- acl_free_node(context, context->bld_tries[n].trie);
- context->tries[n].count = 0;
-
- context->bld_tries[n].trie =
- build_trie(context, rule_sets[n],
- &last, &context->tries[n].count);
- if (context->bld_tries[n].trie == NULL) {
- RTE_LOG(ERR, ACL,
- "Build of %u-th trie failed\n", n);
- return -ENOMEM;
- }
- }
- }
-
- context->num_tries = num_tries;
- return 0;
-}
-
-static void
-acl_build_log(const struct acl_build_context *ctx)
-{
- uint32_t n;
-
- RTE_LOG(DEBUG, ACL, "Build phase for ACL \"%s\":\n"
- "memory consumed: %zu\n",
- ctx->acx->name,
- ctx->pool.alloc);
-
- for (n = 0; n < RTE_DIM(ctx->tries); n++) {
- if (ctx->tries[n].count != 0)
- RTE_LOG(DEBUG, ACL,
- "trie %u: number of rules: %u\n",
- n, ctx->tries[n].count);
- }
-}
-
-static int
-acl_build_rules(struct acl_build_context *bcx)
-{
- struct rte_acl_build_rule *br, *head;
- const struct rte_acl_rule *rule;
- uint32_t *wp;
- uint32_t fn, i, n, num;
- size_t ofs, sz;
-
- fn = bcx->cfg.num_fields;
- n = bcx->acx->num_rules;
- ofs = n * sizeof(*br);
- sz = ofs + n * fn * sizeof(*wp);
-
- br = tb_alloc(&bcx->pool, sz);
- if (br == NULL) {
- RTE_LOG(ERR, ACL, "ACL context %s: failed to create a copy "
- "of %u build rules (%zu bytes)\n",
- bcx->acx->name, n, sz);
- return -ENOMEM;
- }
-
- wp = (uint32_t *)((uintptr_t)br + ofs);
- num = 0;
- head = NULL;
-
- for (i = 0; i != n; i++) {
- rule = (const struct rte_acl_rule *)
- ((uintptr_t)bcx->acx->rules + bcx->acx->rule_sz * i);
- if ((rule->data.category_mask & bcx->category_mask) != 0) {
- br[num].next = head;
- br[num].config = &bcx->cfg;
- br[num].f = rule;
- br[num].wildness = wp;
- wp += fn;
- head = br + num;
- num++;
- }
- }
-
- bcx->num_rules = num;
- bcx->build_rules = head;
-
- return 0;
-}
-
-/*
- * Copy data_indexes for each trie into RT location.
- */
-static void
-acl_set_data_indexes(struct rte_acl_ctx *ctx)
-{
- uint32_t i, n, ofs;
-
- ofs = 0;
- for (i = 0; i != ctx->num_tries; i++) {
- n = ctx->trie[i].num_data_indexes;
- memcpy(ctx->data_indexes + ofs, ctx->trie[i].data_index,
- n * sizeof(ctx->data_indexes[0]));
- ctx->trie[i].data_index = ctx->data_indexes + ofs;
- ofs += n;
- }
-}
-
-
-int
-rte_acl_build(struct rte_acl_ctx *ctx, const struct rte_acl_config *cfg)
-{
- int rc;
- struct acl_build_context bcx;
-
- if (ctx == NULL || cfg == NULL || cfg->num_categories == 0 ||
- cfg->num_categories > RTE_ACL_MAX_CATEGORIES)
- return -EINVAL;
-
- acl_build_reset(ctx);
-
- memset(&bcx, 0, sizeof(bcx));
- bcx.acx = ctx;
- bcx.pool.alignment = ACL_POOL_ALIGN;
- bcx.pool.min_alloc = ACL_POOL_ALLOC_MIN;
- bcx.cfg = *cfg;
- bcx.category_mask = LEN2MASK(bcx.cfg.num_categories);
-
-
- /* Create a build rules copy. */
- rc = acl_build_rules(&bcx);
- if (rc != 0)
- return rc;
-
- /* No rules to build for that context+config */
- if (bcx.build_rules == NULL) {
- rc = -EINVAL;
-
- /* build internal trie representation. */
- } else if ((rc = acl_build_tries(&bcx, bcx.build_rules)) == 0) {
-
- /* allocate and fill run-time structures. */
- rc = rte_acl_gen(ctx, bcx.tries, bcx.bld_tries,
- bcx.num_tries, bcx.cfg.num_categories,
- RTE_ACL_IPV4VLAN_NUM * RTE_DIM(bcx.tries),
- bcx.num_build_rules);
- if (rc == 0) {
-
- /* set data indexes. */
- acl_set_data_indexes(ctx);
-
- /* copy in build config. */
- ctx->config = *cfg;
- }
- }
-
- acl_build_log(&bcx);
-
- /* cleanup after build. */
- tb_free_pool(&bcx.pool);
- return rc;
-}
diff --git a/src/dpdk_lib18/librte_acl/acl_gen.c b/src/dpdk_lib18/librte_acl/acl_gen.c
deleted file mode 100755
index b1f766bb..00000000
--- a/src/dpdk_lib18/librte_acl/acl_gen.c
+++ /dev/null
@@ -1,475 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <rte_acl.h>
-#include "acl_vect.h"
-#include "acl.h"
-
-#define QRANGE_MIN ((uint8_t)INT8_MIN)
-
-#define RTE_ACL_VERIFY(exp) do { \
- if (!(exp)) \
- rte_panic("line %d\tassert \"" #exp "\" failed\n", __LINE__); \
-} while (0)
-
-struct acl_node_counters {
- int match;
- int match_used;
- int single;
- int quad;
- int quad_vectors;
- int dfa;
- int smallest_match;
-};
-
-struct rte_acl_indices {
- int dfa_index;
- int quad_index;
- int single_index;
- int match_index;
-};
-
-static void
-acl_gen_log_stats(const struct rte_acl_ctx *ctx,
- const struct acl_node_counters *counts)
-{
- RTE_LOG(DEBUG, ACL, "Gen phase for ACL \"%s\":\n"
- "runtime memory footprint on socket %d:\n"
- "single nodes/bytes used: %d/%zu\n"
- "quad nodes/bytes used: %d/%zu\n"
- "DFA nodes/bytes used: %d/%zu\n"
- "match nodes/bytes used: %d/%zu\n"
- "total: %zu bytes\n",
- ctx->name, ctx->socket_id,
- counts->single, counts->single * sizeof(uint64_t),
- counts->quad, counts->quad_vectors * sizeof(uint64_t),
- counts->dfa, counts->dfa * RTE_ACL_DFA_SIZE * sizeof(uint64_t),
- counts->match,
- counts->match * sizeof(struct rte_acl_match_results),
- ctx->mem_sz);
-}
-
-/*
-* Counts the number of groups of sequential bits that are
-* either 0 or 1, as specified by the zero_one parameter. This is used to
-* calculate the number of ranges in a node to see if it fits in a quad range
-* node.
-*/
-static int
-acl_count_sequential_groups(struct rte_acl_bitset *bits, int zero_one)
-{
- int n, ranges, last_bit;
-
- ranges = 0;
- last_bit = zero_one ^ 1;
-
- for (n = QRANGE_MIN; n < UINT8_MAX + 1; n++) {
- if (bits->bits[n / (sizeof(bits_t) * 8)] &
- (1 << (n % (sizeof(bits_t) * 8)))) {
- if (zero_one == 1 && last_bit != 1)
- ranges++;
- last_bit = 1;
- } else {
- if (zero_one == 0 && last_bit != 0)
- ranges++;
- last_bit = 0;
- }
- }
- for (n = 0; n < QRANGE_MIN; n++) {
- if (bits->bits[n / (sizeof(bits_t) * 8)] &
- (1 << (n % (sizeof(bits_t) * 8)))) {
- if (zero_one == 1 && last_bit != 1)
- ranges++;
- last_bit = 1;
- } else {
- if (zero_one == 0 && last_bit != 0)
- ranges++;
- last_bit = 0;
- }
- }
-
- return ranges;
-}
-
-/*
- * Count number of ranges spanned by the node's pointers
- */
-static int
-acl_count_fanout(struct rte_acl_node *node)
-{
- uint32_t n;
- int ranges;
-
- if (node->fanout != 0)
- return node->fanout;
-
- ranges = acl_count_sequential_groups(&node->values, 0);
-
- for (n = 0; n < node->num_ptrs; n++) {
- if (node->ptrs[n].ptr != NULL)
- ranges += acl_count_sequential_groups(
- &node->ptrs[n].values, 1);
- }
-
- node->fanout = ranges;
- return node->fanout;
-}
-
-/*
- * Determine the type of nodes and count each type
- */
-static int
-acl_count_trie_types(struct acl_node_counters *counts,
- struct rte_acl_node *node, int match, int force_dfa)
-{
- uint32_t n;
- int num_ptrs;
-
- /* skip if this node has been counted */
- if (node->node_type != (uint32_t)RTE_ACL_NODE_UNDEFINED)
- return match;
-
- if (node->match_flag != 0 || node->num_ptrs == 0) {
- counts->match++;
- if (node->match_flag == -1)
- node->match_flag = match++;
- node->node_type = RTE_ACL_NODE_MATCH;
- if (counts->smallest_match > node->match_flag)
- counts->smallest_match = node->match_flag;
- return match;
- }
-
- num_ptrs = acl_count_fanout(node);
-
- /* Force type to dfa */
- if (force_dfa)
- num_ptrs = RTE_ACL_DFA_SIZE;
-
- /* determine node type based on number of ranges */
- if (num_ptrs == 1) {
- counts->single++;
- node->node_type = RTE_ACL_NODE_SINGLE;
- } else if (num_ptrs <= RTE_ACL_QUAD_MAX) {
- counts->quad++;
- counts->quad_vectors += node->fanout;
- node->node_type = RTE_ACL_NODE_QRANGE;
- } else {
- counts->dfa++;
- node->node_type = RTE_ACL_NODE_DFA;
- }
-
- /*
- * recursively count the types of all children
- */
- for (n = 0; n < node->num_ptrs; n++) {
- if (node->ptrs[n].ptr != NULL)
- match = acl_count_trie_types(counts, node->ptrs[n].ptr,
- match, 0);
- }
-
- return match;
-}
-
-static void
-acl_add_ptrs(struct rte_acl_node *node, uint64_t *node_array, uint64_t no_match,
- int resolved)
-{
- uint32_t n, x;
- int m, ranges, last_bit;
- struct rte_acl_node *child;
- struct rte_acl_bitset *bits;
- uint64_t *node_a, index, dfa[RTE_ACL_DFA_SIZE];
-
- ranges = 0;
- last_bit = 0;
-
- for (n = 0; n < RTE_DIM(dfa); n++)
- dfa[n] = no_match;
-
- for (x = 0; x < node->num_ptrs; x++) {
-
- child = node->ptrs[x].ptr;
- if (child == NULL)
- continue;
-
- bits = &node->ptrs[x].values;
- for (n = 0; n < RTE_DIM(dfa); n++) {
-
- if (bits->bits[n / (sizeof(bits_t) * CHAR_BIT)] &
- (1 << (n % (sizeof(bits_t) * CHAR_BIT)))) {
-
- dfa[n] = resolved ? child->node_index : x;
- ranges += (last_bit == 0);
- last_bit = 1;
- } else {
- last_bit = 0;
- }
- }
- }
-
- /*
- * Rather than going from 0 to 256, the range count and
- * the layout are from 80-ff then 0-7f due to signed compare
- * for SSE (cmpgt).
- */
- if (node->node_type == RTE_ACL_NODE_QRANGE) {
-
- m = 0;
- node_a = node_array;
- index = dfa[QRANGE_MIN];
- *node_a++ = index;
-
- for (x = QRANGE_MIN + 1; x < UINT8_MAX + 1; x++) {
- if (dfa[x] != index) {
- index = dfa[x];
- *node_a++ = index;
- node->transitions[m++] = (uint8_t)(x - 1);
- }
- }
-
- for (x = 0; x < INT8_MAX + 1; x++) {
- if (dfa[x] != index) {
- index = dfa[x];
- *node_a++ = index;
- node->transitions[m++] = (uint8_t)(x - 1);
- }
- }
-
- /* fill unused locations with max value - nothing is greater */
- for (; m < RTE_ACL_QUAD_SIZE; m++)
- node->transitions[m] = INT8_MAX;
-
- RTE_ACL_VERIFY(m <= RTE_ACL_QUAD_SIZE);
-
- } else if (node->node_type == RTE_ACL_NODE_DFA && resolved) {
- for (n = 0; n < RTE_DIM(dfa); n++)
- node_array[n] = dfa[n];
- }
-}
-
-/*
- * Routine that allocates space for this node and recursively calls
- * to allocate space for each child. Once all the children are allocated,
- * then resolve all transitions for this node.
- */
-static void
-acl_gen_node(struct rte_acl_node *node, uint64_t *node_array,
- uint64_t no_match, struct rte_acl_indices *index, int num_categories)
-{
- uint32_t n, *qtrp;
- uint64_t *array_ptr;
- struct rte_acl_match_results *match;
-
- if (node->node_index != RTE_ACL_NODE_UNDEFINED)
- return;
-
- array_ptr = NULL;
-
- switch (node->node_type) {
- case RTE_ACL_NODE_DFA:
- node->node_index = index->dfa_index | node->node_type;
- array_ptr = &node_array[index->dfa_index];
- index->dfa_index += RTE_ACL_DFA_SIZE;
- for (n = 0; n < RTE_ACL_DFA_SIZE; n++)
- array_ptr[n] = no_match;
- break;
- case RTE_ACL_NODE_SINGLE:
- node->node_index = RTE_ACL_QUAD_SINGLE | index->single_index |
- node->node_type;
- array_ptr = &node_array[index->single_index];
- index->single_index += 1;
- array_ptr[0] = no_match;
- break;
- case RTE_ACL_NODE_QRANGE:
- array_ptr = &node_array[index->quad_index];
- acl_add_ptrs(node, array_ptr, no_match, 0);
- qtrp = (uint32_t *)node->transitions;
- node->node_index = qtrp[0];
- node->node_index <<= sizeof(index->quad_index) * CHAR_BIT;
- node->node_index |= index->quad_index | node->node_type;
- index->quad_index += node->fanout;
- break;
- case RTE_ACL_NODE_MATCH:
- match = ((struct rte_acl_match_results *)
- (node_array + index->match_index));
- memcpy(match + node->match_flag, node->mrt, sizeof(*node->mrt));
- node->node_index = node->match_flag | node->node_type;
- break;
- case RTE_ACL_NODE_UNDEFINED:
- RTE_ACL_VERIFY(node->node_type !=
- (uint32_t)RTE_ACL_NODE_UNDEFINED);
- break;
- }
-
- /* recursively allocate space for all children */
- for (n = 0; n < node->num_ptrs; n++) {
- if (node->ptrs[n].ptr != NULL)
- acl_gen_node(node->ptrs[n].ptr,
- node_array,
- no_match,
- index,
- num_categories);
- }
-
- /* All children are resolved, resolve this node's pointers */
- switch (node->node_type) {
- case RTE_ACL_NODE_DFA:
- acl_add_ptrs(node, array_ptr, no_match, 1);
- break;
- case RTE_ACL_NODE_SINGLE:
- for (n = 0; n < node->num_ptrs; n++) {
- if (node->ptrs[n].ptr != NULL)
- array_ptr[0] = node->ptrs[n].ptr->node_index;
- }
- break;
- case RTE_ACL_NODE_QRANGE:
- acl_add_ptrs(node, array_ptr, no_match, 1);
- break;
- case RTE_ACL_NODE_MATCH:
- break;
- case RTE_ACL_NODE_UNDEFINED:
- RTE_ACL_VERIFY(node->node_type !=
- (uint32_t)RTE_ACL_NODE_UNDEFINED);
- break;
- }
-}
-
-static int
-acl_calc_counts_indices(struct acl_node_counters *counts,
- struct rte_acl_indices *indices, struct rte_acl_trie *trie,
- struct rte_acl_bld_trie *node_bld_trie, uint32_t num_tries,
- int match_num)
-{
- uint32_t n;
-
- memset(indices, 0, sizeof(*indices));
- memset(counts, 0, sizeof(*counts));
-
- /* Get stats on nodes */
- for (n = 0; n < num_tries; n++) {
- counts->smallest_match = INT32_MAX;
- match_num = acl_count_trie_types(counts, node_bld_trie[n].trie,
- match_num, 1);
- trie[n].smallest = counts->smallest_match;
- }
-
- indices->dfa_index = RTE_ACL_DFA_SIZE + 1;
- indices->quad_index = indices->dfa_index +
- counts->dfa * RTE_ACL_DFA_SIZE;
- indices->single_index = indices->quad_index + counts->quad_vectors;
- indices->match_index = indices->single_index + counts->single + 1;
- indices->match_index = RTE_ALIGN(indices->match_index,
- (XMM_SIZE / sizeof(uint64_t)));
-
- return match_num;
-}
-
-/*
- * Generate the runtime structure using build structure
- */
-int
-rte_acl_gen(struct rte_acl_ctx *ctx, struct rte_acl_trie *trie,
- struct rte_acl_bld_trie *node_bld_trie, uint32_t num_tries,
- uint32_t num_categories, uint32_t data_index_sz, int match_num)
-{
- void *mem;
- size_t total_size;
- uint64_t *node_array, no_match;
- uint32_t n, match_index;
- struct rte_acl_match_results *match;
- struct acl_node_counters counts;
- struct rte_acl_indices indices;
-
- /* Fill counts and indices arrays from the nodes. */
- match_num = acl_calc_counts_indices(&counts, &indices, trie,
- node_bld_trie, num_tries, match_num);
-
- /* Allocate runtime memory (align to cache boundary) */
- total_size = RTE_ALIGN(data_index_sz, RTE_CACHE_LINE_SIZE) +
- indices.match_index * sizeof(uint64_t) +
- (match_num + 2) * sizeof(struct rte_acl_match_results) +
- XMM_SIZE;
-
- mem = rte_zmalloc_socket(ctx->name, total_size, RTE_CACHE_LINE_SIZE,
- ctx->socket_id);
- if (mem == NULL) {
- RTE_LOG(ERR, ACL,
- "allocation of %zu bytes on socket %d for %s failed\n",
- total_size, ctx->socket_id, ctx->name);
- return -ENOMEM;
- }
-
- /* Fill the runtime structure */
- match_index = indices.match_index;
- node_array = (uint64_t *)((uintptr_t)mem +
- RTE_ALIGN(data_index_sz, RTE_CACHE_LINE_SIZE));
-
- /*
- * Setup the NOMATCH node (a SINGLE at the
- * highest index, that points to itself)
- */
-
- node_array[RTE_ACL_DFA_SIZE] = RTE_ACL_DFA_SIZE | RTE_ACL_NODE_SINGLE;
- no_match = RTE_ACL_NODE_MATCH;
-
- for (n = 0; n < RTE_ACL_DFA_SIZE; n++)
- node_array[n] = no_match;
-
- match = ((struct rte_acl_match_results *)(node_array + match_index));
- memset(match, 0, sizeof(*match));
-
- for (n = 0; n < num_tries; n++) {
-
- acl_gen_node(node_bld_trie[n].trie, node_array, no_match,
- &indices, num_categories);
-
- if (node_bld_trie[n].trie->node_index == no_match)
- trie[n].root_index = 0;
- else
- trie[n].root_index = node_bld_trie[n].trie->node_index;
- }
-
- ctx->mem = mem;
- ctx->mem_sz = total_size;
- ctx->data_indexes = mem;
- ctx->num_tries = num_tries;
- ctx->num_categories = num_categories;
- ctx->match_index = match_index;
- ctx->no_match = no_match;
- ctx->idle = node_array[RTE_ACL_DFA_SIZE];
- ctx->trans_table = node_array;
- memcpy(ctx->trie, trie, sizeof(ctx->trie));
-
- acl_gen_log_stats(ctx, &counts);
- return 0;
-}
diff --git a/src/dpdk_lib18/librte_acl/acl_run_scalar.c b/src/dpdk_lib18/librte_acl/acl_run_scalar.c
deleted file mode 100755
index 43c8fc3e..00000000
--- a/src/dpdk_lib18/librte_acl/acl_run_scalar.c
+++ /dev/null
@@ -1,193 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "acl_run.h"
-
-/*
- * Resolve priority for multiple results (scalar version).
- * This consists comparing the priority of the current traversal with the
- * running set of results for the packet.
- * For each result, keep a running array of the result (rule number) and
- * its priority for each category.
- */
-static inline void
-resolve_priority_scalar(uint64_t transition, int n,
- const struct rte_acl_ctx *ctx, struct parms *parms,
- const struct rte_acl_match_results *p, uint32_t categories)
-{
- uint32_t i;
- int32_t *saved_priority;
- uint32_t *saved_results;
- const int32_t *priority;
- const uint32_t *results;
-
- saved_results = parms[n].cmplt->results;
- saved_priority = parms[n].cmplt->priority;
-
- /* results and priorities for completed trie */
- results = p[transition].results;
- priority = p[transition].priority;
-
- /* if this is not the first completed trie */
- if (parms[n].cmplt->count != ctx->num_tries) {
- for (i = 0; i < categories; i += RTE_ACL_RESULTS_MULTIPLIER) {
-
- if (saved_priority[i] <= priority[i]) {
- saved_priority[i] = priority[i];
- saved_results[i] = results[i];
- }
- if (saved_priority[i + 1] <= priority[i + 1]) {
- saved_priority[i + 1] = priority[i + 1];
- saved_results[i + 1] = results[i + 1];
- }
- if (saved_priority[i + 2] <= priority[i + 2]) {
- saved_priority[i + 2] = priority[i + 2];
- saved_results[i + 2] = results[i + 2];
- }
- if (saved_priority[i + 3] <= priority[i + 3]) {
- saved_priority[i + 3] = priority[i + 3];
- saved_results[i + 3] = results[i + 3];
- }
- }
- } else {
- for (i = 0; i < categories; i += RTE_ACL_RESULTS_MULTIPLIER) {
- saved_priority[i] = priority[i];
- saved_priority[i + 1] = priority[i + 1];
- saved_priority[i + 2] = priority[i + 2];
- saved_priority[i + 3] = priority[i + 3];
-
- saved_results[i] = results[i];
- saved_results[i + 1] = results[i + 1];
- saved_results[i + 2] = results[i + 2];
- saved_results[i + 3] = results[i + 3];
- }
- }
-}
-
-/*
- * When processing the transition, rather than using if/else
- * construct, the offset is calculated for DFA and QRANGE and
- * then conditionally added to the address based on node type.
- * This is done to avoid branch mis-predictions. Since the
- * offset is rather simple calculation it is more efficient
- * to do the calculation and do a condition move rather than
- * a conditional branch to determine which calculation to do.
- */
-static inline uint32_t
-scan_forward(uint32_t input, uint32_t max)
-{
- return (input == 0) ? max : rte_bsf32(input);
-}
-
-static inline uint64_t
-scalar_transition(const uint64_t *trans_table, uint64_t transition,
- uint8_t input)
-{
- uint32_t addr, index, ranges, x, a, b, c;
-
- /* break transition into component parts */
- ranges = transition >> (sizeof(index) * CHAR_BIT);
-
- /* calc address for a QRANGE node */
- c = input * SCALAR_QRANGE_MULT;
- a = ranges | SCALAR_QRANGE_MIN;
- index = transition & ~RTE_ACL_NODE_INDEX;
- a -= (c & SCALAR_QRANGE_MASK);
- b = c & SCALAR_QRANGE_MIN;
- addr = transition ^ index;
- a &= SCALAR_QRANGE_MIN;
- a ^= (ranges ^ b) & (a ^ b);
- x = scan_forward(a, 32) >> 3;
- addr += (index == RTE_ACL_NODE_DFA) ? input : x;
-
- /* pickup next transition */
- transition = *(trans_table + addr);
- return transition;
-}
-
-int
-rte_acl_classify_scalar(const struct rte_acl_ctx *ctx, const uint8_t **data,
- uint32_t *results, uint32_t num, uint32_t categories)
-{
- int n;
- uint64_t transition0, transition1;
- uint32_t input0, input1;
- struct acl_flow_data flows;
- uint64_t index_array[MAX_SEARCHES_SCALAR];
- struct completion cmplt[MAX_SEARCHES_SCALAR];
- struct parms parms[MAX_SEARCHES_SCALAR];
-
- if (categories != 1 &&
- ((RTE_ACL_RESULTS_MULTIPLIER - 1) & categories) != 0)
- return -EINVAL;
-
- acl_set_flow(&flows, cmplt, RTE_DIM(cmplt), data, results, num,
- categories, ctx->trans_table);
-
- for (n = 0; n < MAX_SEARCHES_SCALAR; n++) {
- cmplt[n].count = 0;
- index_array[n] = acl_start_next_trie(&flows, parms, n, ctx);
- }
-
- transition0 = index_array[0];
- transition1 = index_array[1];
-
- while (flows.started > 0) {
-
- input0 = GET_NEXT_4BYTES(parms, 0);
- input1 = GET_NEXT_4BYTES(parms, 1);
-
- for (n = 0; n < 4; n++) {
- if (likely((transition0 & RTE_ACL_NODE_MATCH) == 0))
- transition0 = scalar_transition(flows.trans,
- transition0, (uint8_t)input0);
-
- input0 >>= CHAR_BIT;
-
- if (likely((transition1 & RTE_ACL_NODE_MATCH) == 0))
- transition1 = scalar_transition(flows.trans,
- transition1, (uint8_t)input1);
-
- input1 >>= CHAR_BIT;
-
- }
- if ((transition0 | transition1) & RTE_ACL_NODE_MATCH) {
- transition0 = acl_match_check(transition0,
- 0, ctx, parms, &flows, resolve_priority_scalar);
- transition1 = acl_match_check(transition1,
- 1, ctx, parms, &flows, resolve_priority_scalar);
-
- }
- }
- return 0;
-}
diff --git a/src/dpdk_lib18/librte_acl/acl_run_sse.c b/src/dpdk_lib18/librte_acl/acl_run_sse.c
deleted file mode 100755
index 69a9d775..00000000
--- a/src/dpdk_lib18/librte_acl/acl_run_sse.c
+++ /dev/null
@@ -1,626 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "acl_run.h"
-
-enum {
- SHUFFLE32_SLOT1 = 0xe5,
- SHUFFLE32_SLOT2 = 0xe6,
- SHUFFLE32_SLOT3 = 0xe7,
- SHUFFLE32_SWAP64 = 0x4e,
-};
-
-static const rte_xmm_t mm_type_quad_range = {
- .u32 = {
- RTE_ACL_NODE_QRANGE,
- RTE_ACL_NODE_QRANGE,
- RTE_ACL_NODE_QRANGE,
- RTE_ACL_NODE_QRANGE,
- },
-};
-
-static const rte_xmm_t mm_type_quad_range64 = {
- .u32 = {
- RTE_ACL_NODE_QRANGE,
- RTE_ACL_NODE_QRANGE,
- 0,
- 0,
- },
-};
-
-static const rte_xmm_t mm_shuffle_input = {
- .u32 = {0x00000000, 0x04040404, 0x08080808, 0x0c0c0c0c},
-};
-
-static const rte_xmm_t mm_shuffle_input64 = {
- .u32 = {0x00000000, 0x04040404, 0x80808080, 0x80808080},
-};
-
-static const rte_xmm_t mm_ones_16 = {
- .u16 = {1, 1, 1, 1, 1, 1, 1, 1},
-};
-
-static const rte_xmm_t mm_bytes = {
- .u32 = {UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX},
-};
-
-static const rte_xmm_t mm_bytes64 = {
- .u32 = {UINT8_MAX, UINT8_MAX, 0, 0},
-};
-
-static const rte_xmm_t mm_match_mask = {
- .u32 = {
- RTE_ACL_NODE_MATCH,
- RTE_ACL_NODE_MATCH,
- RTE_ACL_NODE_MATCH,
- RTE_ACL_NODE_MATCH,
- },
-};
-
-static const rte_xmm_t mm_match_mask64 = {
- .u32 = {
- RTE_ACL_NODE_MATCH,
- 0,
- RTE_ACL_NODE_MATCH,
- 0,
- },
-};
-
-static const rte_xmm_t mm_index_mask = {
- .u32 = {
- RTE_ACL_NODE_INDEX,
- RTE_ACL_NODE_INDEX,
- RTE_ACL_NODE_INDEX,
- RTE_ACL_NODE_INDEX,
- },
-};
-
-static const rte_xmm_t mm_index_mask64 = {
- .u32 = {
- RTE_ACL_NODE_INDEX,
- RTE_ACL_NODE_INDEX,
- 0,
- 0,
- },
-};
-
-
-/*
- * Resolve priority for multiple results (sse version).
- * This consists comparing the priority of the current traversal with the
- * running set of results for the packet.
- * For each result, keep a running array of the result (rule number) and
- * its priority for each category.
- */
-static inline void
-resolve_priority_sse(uint64_t transition, int n, const struct rte_acl_ctx *ctx,
- struct parms *parms, const struct rte_acl_match_results *p,
- uint32_t categories)
-{
- uint32_t x;
- xmm_t results, priority, results1, priority1, selector;
- xmm_t *saved_results, *saved_priority;
-
- for (x = 0; x < categories; x += RTE_ACL_RESULTS_MULTIPLIER) {
-
- saved_results = (xmm_t *)(&parms[n].cmplt->results[x]);
- saved_priority =
- (xmm_t *)(&parms[n].cmplt->priority[x]);
-
- /* get results and priorities for completed trie */
- results = MM_LOADU((const xmm_t *)&p[transition].results[x]);
- priority = MM_LOADU((const xmm_t *)&p[transition].priority[x]);
-
- /* if this is not the first completed trie */
- if (parms[n].cmplt->count != ctx->num_tries) {
-
- /* get running best results and their priorities */
- results1 = MM_LOADU(saved_results);
- priority1 = MM_LOADU(saved_priority);
-
- /* select results that are highest priority */
- selector = MM_CMPGT32(priority1, priority);
- results = MM_BLENDV8(results, results1, selector);
- priority = MM_BLENDV8(priority, priority1, selector);
- }
-
- /* save running best results and their priorities */
- MM_STOREU(saved_results, results);
- MM_STOREU(saved_priority, priority);
- }
-}
-
-/*
- * Extract transitions from an XMM register and check for any matches
- */
-static void
-acl_process_matches(xmm_t *indices, int slot, const struct rte_acl_ctx *ctx,
- struct parms *parms, struct acl_flow_data *flows)
-{
- uint64_t transition1, transition2;
-
- /* extract transition from low 64 bits. */
- transition1 = MM_CVT64(*indices);
-
- /* extract transition from high 64 bits. */
- *indices = MM_SHUFFLE32(*indices, SHUFFLE32_SWAP64);
- transition2 = MM_CVT64(*indices);
-
- transition1 = acl_match_check(transition1, slot, ctx,
- parms, flows, resolve_priority_sse);
- transition2 = acl_match_check(transition2, slot + 1, ctx,
- parms, flows, resolve_priority_sse);
-
- /* update indices with new transitions. */
- *indices = MM_SET64(transition2, transition1);
-}
-
-/*
- * Check for a match in 2 transitions (contained in SSE register)
- */
-static inline void
-acl_match_check_x2(int slot, const struct rte_acl_ctx *ctx, struct parms *parms,
- struct acl_flow_data *flows, xmm_t *indices, xmm_t match_mask)
-{
- xmm_t temp;
-
- temp = MM_AND(match_mask, *indices);
- while (!MM_TESTZ(temp, temp)) {
- acl_process_matches(indices, slot, ctx, parms, flows);
- temp = MM_AND(match_mask, *indices);
- }
-}
-
-/*
- * Check for any match in 4 transitions (contained in 2 SSE registers)
- */
-static inline void
-acl_match_check_x4(int slot, const struct rte_acl_ctx *ctx, struct parms *parms,
- struct acl_flow_data *flows, xmm_t *indices1, xmm_t *indices2,
- xmm_t match_mask)
-{
- xmm_t temp;
-
- /* put low 32 bits of each transition into one register */
- temp = (xmm_t)MM_SHUFFLEPS((__m128)*indices1, (__m128)*indices2,
- 0x88);
- /* test for match node */
- temp = MM_AND(match_mask, temp);
-
- while (!MM_TESTZ(temp, temp)) {
- acl_process_matches(indices1, slot, ctx, parms, flows);
- acl_process_matches(indices2, slot + 2, ctx, parms, flows);
-
- temp = (xmm_t)MM_SHUFFLEPS((__m128)*indices1,
- (__m128)*indices2,
- 0x88);
- temp = MM_AND(match_mask, temp);
- }
-}
-
-/*
- * Calculate the address of the next transition for
- * all types of nodes. Note that only DFA nodes and range
- * nodes actually transition to another node. Match
- * nodes don't move.
- */
-static inline xmm_t
-acl_calc_addr(xmm_t index_mask, xmm_t next_input, xmm_t shuffle_input,
- xmm_t ones_16, xmm_t bytes, xmm_t type_quad_range,
- xmm_t *indices1, xmm_t *indices2)
-{
- xmm_t addr, node_types, temp;
-
- /*
- * Note that no transition is done for a match
- * node and therefore a stream freezes when
- * it reaches a match.
- */
-
- /* Shuffle low 32 into temp and high 32 into indices2 */
- temp = (xmm_t)MM_SHUFFLEPS((__m128)*indices1, (__m128)*indices2,
- 0x88);
- *indices2 = (xmm_t)MM_SHUFFLEPS((__m128)*indices1,
- (__m128)*indices2, 0xdd);
-
- /* Calc node type and node addr */
- node_types = MM_ANDNOT(index_mask, temp);
- addr = MM_AND(index_mask, temp);
-
- /*
- * Calc addr for DFAs - addr = dfa_index + input_byte
- */
-
- /* mask for DFA type (0) nodes */
- temp = MM_CMPEQ32(node_types, MM_XOR(node_types, node_types));
-
- /* add input byte to DFA position */
- temp = MM_AND(temp, bytes);
- temp = MM_AND(temp, next_input);
- addr = MM_ADD32(addr, temp);
-
- /*
- * Calc addr for Range nodes -> range_index + range(input)
- */
- node_types = MM_CMPEQ32(node_types, type_quad_range);
-
- /*
- * Calculate number of range boundaries that are less than the
- * input value. Range boundaries for each node are in signed 8 bit,
- * ordered from -128 to 127 in the indices2 register.
- * This is effectively a popcnt of bytes that are greater than the
- * input byte.
- */
-
- /* shuffle input byte to all 4 positions of 32 bit value */
- temp = MM_SHUFFLE8(next_input, shuffle_input);
-
- /* check ranges */
- temp = MM_CMPGT8(temp, *indices2);
-
- /* convert -1 to 1 (bytes greater than input byte */
- temp = MM_SIGN8(temp, temp);
-
- /* horizontal add pairs of bytes into words */
- temp = MM_MADD8(temp, temp);
-
- /* horizontal add pairs of words into dwords */
- temp = MM_MADD16(temp, ones_16);
-
- /* mask to range type nodes */
- temp = MM_AND(temp, node_types);
-
- /* add index into node position */
- return MM_ADD32(addr, temp);
-}
-
-/*
- * Process 4 transitions (in 2 SIMD registers) in parallel
- */
-static inline xmm_t
-transition4(xmm_t index_mask, xmm_t next_input, xmm_t shuffle_input,
- xmm_t ones_16, xmm_t bytes, xmm_t type_quad_range,
- const uint64_t *trans, xmm_t *indices1, xmm_t *indices2)
-{
- xmm_t addr;
- uint64_t trans0, trans2;
-
- /* Calculate the address (array index) for all 4 transitions. */
-
- addr = acl_calc_addr(index_mask, next_input, shuffle_input, ones_16,
- bytes, type_quad_range, indices1, indices2);
-
- /* Gather 64 bit transitions and pack back into 2 registers. */
-
- trans0 = trans[MM_CVT32(addr)];
-
- /* get slot 2 */
-
- /* {x0, x1, x2, x3} -> {x2, x1, x2, x3} */
- addr = MM_SHUFFLE32(addr, SHUFFLE32_SLOT2);
- trans2 = trans[MM_CVT32(addr)];
-
- /* get slot 1 */
-
- /* {x2, x1, x2, x3} -> {x1, x1, x2, x3} */
- addr = MM_SHUFFLE32(addr, SHUFFLE32_SLOT1);
- *indices1 = MM_SET64(trans[MM_CVT32(addr)], trans0);
-
- /* get slot 3 */
-
- /* {x1, x1, x2, x3} -> {x3, x1, x2, x3} */
- addr = MM_SHUFFLE32(addr, SHUFFLE32_SLOT3);
- *indices2 = MM_SET64(trans[MM_CVT32(addr)], trans2);
-
- return MM_SRL32(next_input, 8);
-}
-
-/*
- * Execute trie traversal with 8 traversals in parallel
- */
-static inline int
-search_sse_8(const struct rte_acl_ctx *ctx, const uint8_t **data,
- uint32_t *results, uint32_t total_packets, uint32_t categories)
-{
- int n;
- struct acl_flow_data flows;
- uint64_t index_array[MAX_SEARCHES_SSE8];
- struct completion cmplt[MAX_SEARCHES_SSE8];
- struct parms parms[MAX_SEARCHES_SSE8];
- xmm_t input0, input1;
- xmm_t indices1, indices2, indices3, indices4;
-
- acl_set_flow(&flows, cmplt, RTE_DIM(cmplt), data, results,
- total_packets, categories, ctx->trans_table);
-
- for (n = 0; n < MAX_SEARCHES_SSE8; n++) {
- cmplt[n].count = 0;
- index_array[n] = acl_start_next_trie(&flows, parms, n, ctx);
- }
-
- /*
- * indices1 contains index_array[0,1]
- * indices2 contains index_array[2,3]
- * indices3 contains index_array[4,5]
- * indices4 contains index_array[6,7]
- */
-
- indices1 = MM_LOADU((xmm_t *) &index_array[0]);
- indices2 = MM_LOADU((xmm_t *) &index_array[2]);
-
- indices3 = MM_LOADU((xmm_t *) &index_array[4]);
- indices4 = MM_LOADU((xmm_t *) &index_array[6]);
-
- /* Check for any matches. */
- acl_match_check_x4(0, ctx, parms, &flows,
- &indices1, &indices2, mm_match_mask.m);
- acl_match_check_x4(4, ctx, parms, &flows,
- &indices3, &indices4, mm_match_mask.m);
-
- while (flows.started > 0) {
-
- /* Gather 4 bytes of input data for each stream. */
- input0 = MM_INSERT32(mm_ones_16.m, GET_NEXT_4BYTES(parms, 0),
- 0);
- input1 = MM_INSERT32(mm_ones_16.m, GET_NEXT_4BYTES(parms, 4),
- 0);
-
- input0 = MM_INSERT32(input0, GET_NEXT_4BYTES(parms, 1), 1);
- input1 = MM_INSERT32(input1, GET_NEXT_4BYTES(parms, 5), 1);
-
- input0 = MM_INSERT32(input0, GET_NEXT_4BYTES(parms, 2), 2);
- input1 = MM_INSERT32(input1, GET_NEXT_4BYTES(parms, 6), 2);
-
- input0 = MM_INSERT32(input0, GET_NEXT_4BYTES(parms, 3), 3);
- input1 = MM_INSERT32(input1, GET_NEXT_4BYTES(parms, 7), 3);
-
- /* Process the 4 bytes of input on each stream. */
-
- input0 = transition4(mm_index_mask.m, input0,
- mm_shuffle_input.m, mm_ones_16.m,
- mm_bytes.m, mm_type_quad_range.m,
- flows.trans, &indices1, &indices2);
-
- input1 = transition4(mm_index_mask.m, input1,
- mm_shuffle_input.m, mm_ones_16.m,
- mm_bytes.m, mm_type_quad_range.m,
- flows.trans, &indices3, &indices4);
-
- input0 = transition4(mm_index_mask.m, input0,
- mm_shuffle_input.m, mm_ones_16.m,
- mm_bytes.m, mm_type_quad_range.m,
- flows.trans, &indices1, &indices2);
-
- input1 = transition4(mm_index_mask.m, input1,
- mm_shuffle_input.m, mm_ones_16.m,
- mm_bytes.m, mm_type_quad_range.m,
- flows.trans, &indices3, &indices4);
-
- input0 = transition4(mm_index_mask.m, input0,
- mm_shuffle_input.m, mm_ones_16.m,
- mm_bytes.m, mm_type_quad_range.m,
- flows.trans, &indices1, &indices2);
-
- input1 = transition4(mm_index_mask.m, input1,
- mm_shuffle_input.m, mm_ones_16.m,
- mm_bytes.m, mm_type_quad_range.m,
- flows.trans, &indices3, &indices4);
-
- input0 = transition4(mm_index_mask.m, input0,
- mm_shuffle_input.m, mm_ones_16.m,
- mm_bytes.m, mm_type_quad_range.m,
- flows.trans, &indices1, &indices2);
-
- input1 = transition4(mm_index_mask.m, input1,
- mm_shuffle_input.m, mm_ones_16.m,
- mm_bytes.m, mm_type_quad_range.m,
- flows.trans, &indices3, &indices4);
-
- /* Check for any matches. */
- acl_match_check_x4(0, ctx, parms, &flows,
- &indices1, &indices2, mm_match_mask.m);
- acl_match_check_x4(4, ctx, parms, &flows,
- &indices3, &indices4, mm_match_mask.m);
- }
-
- return 0;
-}
-
-/*
- * Execute trie traversal with 4 traversals in parallel
- */
-static inline int
-search_sse_4(const struct rte_acl_ctx *ctx, const uint8_t **data,
- uint32_t *results, int total_packets, uint32_t categories)
-{
- int n;
- struct acl_flow_data flows;
- uint64_t index_array[MAX_SEARCHES_SSE4];
- struct completion cmplt[MAX_SEARCHES_SSE4];
- struct parms parms[MAX_SEARCHES_SSE4];
- xmm_t input, indices1, indices2;
-
- acl_set_flow(&flows, cmplt, RTE_DIM(cmplt), data, results,
- total_packets, categories, ctx->trans_table);
-
- for (n = 0; n < MAX_SEARCHES_SSE4; n++) {
- cmplt[n].count = 0;
- index_array[n] = acl_start_next_trie(&flows, parms, n, ctx);
- }
-
- indices1 = MM_LOADU((xmm_t *) &index_array[0]);
- indices2 = MM_LOADU((xmm_t *) &index_array[2]);
-
- /* Check for any matches. */
- acl_match_check_x4(0, ctx, parms, &flows,
- &indices1, &indices2, mm_match_mask.m);
-
- while (flows.started > 0) {
-
- /* Gather 4 bytes of input data for each stream. */
- input = MM_INSERT32(mm_ones_16.m, GET_NEXT_4BYTES(parms, 0), 0);
- input = MM_INSERT32(input, GET_NEXT_4BYTES(parms, 1), 1);
- input = MM_INSERT32(input, GET_NEXT_4BYTES(parms, 2), 2);
- input = MM_INSERT32(input, GET_NEXT_4BYTES(parms, 3), 3);
-
- /* Process the 4 bytes of input on each stream. */
- input = transition4(mm_index_mask.m, input,
- mm_shuffle_input.m, mm_ones_16.m,
- mm_bytes.m, mm_type_quad_range.m,
- flows.trans, &indices1, &indices2);
-
- input = transition4(mm_index_mask.m, input,
- mm_shuffle_input.m, mm_ones_16.m,
- mm_bytes.m, mm_type_quad_range.m,
- flows.trans, &indices1, &indices2);
-
- input = transition4(mm_index_mask.m, input,
- mm_shuffle_input.m, mm_ones_16.m,
- mm_bytes.m, mm_type_quad_range.m,
- flows.trans, &indices1, &indices2);
-
- input = transition4(mm_index_mask.m, input,
- mm_shuffle_input.m, mm_ones_16.m,
- mm_bytes.m, mm_type_quad_range.m,
- flows.trans, &indices1, &indices2);
-
- /* Check for any matches. */
- acl_match_check_x4(0, ctx, parms, &flows,
- &indices1, &indices2, mm_match_mask.m);
- }
-
- return 0;
-}
-
-static inline xmm_t
-transition2(xmm_t index_mask, xmm_t next_input, xmm_t shuffle_input,
- xmm_t ones_16, xmm_t bytes, xmm_t type_quad_range,
- const uint64_t *trans, xmm_t *indices1)
-{
- uint64_t t;
- xmm_t addr, indices2;
-
- indices2 = MM_XOR(ones_16, ones_16);
-
- addr = acl_calc_addr(index_mask, next_input, shuffle_input, ones_16,
- bytes, type_quad_range, indices1, &indices2);
-
- /* Gather 64 bit transitions and pack 2 per register. */
-
- t = trans[MM_CVT32(addr)];
-
- /* get slot 1 */
- addr = MM_SHUFFLE32(addr, SHUFFLE32_SLOT1);
- *indices1 = MM_SET64(trans[MM_CVT32(addr)], t);
-
- return MM_SRL32(next_input, 8);
-}
-
-/*
- * Execute trie traversal with 2 traversals in parallel.
- */
-static inline int
-search_sse_2(const struct rte_acl_ctx *ctx, const uint8_t **data,
- uint32_t *results, uint32_t total_packets, uint32_t categories)
-{
- int n;
- struct acl_flow_data flows;
- uint64_t index_array[MAX_SEARCHES_SSE2];
- struct completion cmplt[MAX_SEARCHES_SSE2];
- struct parms parms[MAX_SEARCHES_SSE2];
- xmm_t input, indices;
-
- acl_set_flow(&flows, cmplt, RTE_DIM(cmplt), data, results,
- total_packets, categories, ctx->trans_table);
-
- for (n = 0; n < MAX_SEARCHES_SSE2; n++) {
- cmplt[n].count = 0;
- index_array[n] = acl_start_next_trie(&flows, parms, n, ctx);
- }
-
- indices = MM_LOADU((xmm_t *) &index_array[0]);
-
- /* Check for any matches. */
- acl_match_check_x2(0, ctx, parms, &flows, &indices, mm_match_mask64.m);
-
- while (flows.started > 0) {
-
- /* Gather 4 bytes of input data for each stream. */
- input = MM_INSERT32(mm_ones_16.m, GET_NEXT_4BYTES(parms, 0), 0);
- input = MM_INSERT32(input, GET_NEXT_4BYTES(parms, 1), 1);
-
- /* Process the 4 bytes of input on each stream. */
-
- input = transition2(mm_index_mask64.m, input,
- mm_shuffle_input64.m, mm_ones_16.m,
- mm_bytes64.m, mm_type_quad_range64.m,
- flows.trans, &indices);
-
- input = transition2(mm_index_mask64.m, input,
- mm_shuffle_input64.m, mm_ones_16.m,
- mm_bytes64.m, mm_type_quad_range64.m,
- flows.trans, &indices);
-
- input = transition2(mm_index_mask64.m, input,
- mm_shuffle_input64.m, mm_ones_16.m,
- mm_bytes64.m, mm_type_quad_range64.m,
- flows.trans, &indices);
-
- input = transition2(mm_index_mask64.m, input,
- mm_shuffle_input64.m, mm_ones_16.m,
- mm_bytes64.m, mm_type_quad_range64.m,
- flows.trans, &indices);
-
- /* Check for any matches. */
- acl_match_check_x2(0, ctx, parms, &flows, &indices,
- mm_match_mask64.m);
- }
-
- return 0;
-}
-
-int
-rte_acl_classify_sse(const struct rte_acl_ctx *ctx, const uint8_t **data,
- uint32_t *results, uint32_t num, uint32_t categories)
-{
- if (categories != 1 &&
- ((RTE_ACL_RESULTS_MULTIPLIER - 1) & categories) != 0)
- return -EINVAL;
-
- if (likely(num >= MAX_SEARCHES_SSE8))
- return search_sse_8(ctx, data, results, num, categories);
- else if (num >= MAX_SEARCHES_SSE4)
- return search_sse_4(ctx, data, results, num, categories);
- else
- return search_sse_2(ctx, data, results, num, categories);
-}
diff --git a/src/dpdk_lib18/librte_acl/acl_vect.h b/src/dpdk_lib18/librte_acl/acl_vect.h
deleted file mode 100755
index d8136003..00000000
--- a/src/dpdk_lib18/librte_acl/acl_vect.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_ACL_VECT_H_
-#define _RTE_ACL_VECT_H_
-
-/**
- * @file
- *
- * RTE ACL SSE/AVX related header.
- */
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define MM_ADD16(a, b) _mm_add_epi16(a, b)
-#define MM_ADD32(a, b) _mm_add_epi32(a, b)
-#define MM_ALIGNR8(a, b, c) _mm_alignr_epi8(a, b, c)
-#define MM_AND(a, b) _mm_and_si128(a, b)
-#define MM_ANDNOT(a, b) _mm_andnot_si128(a, b)
-#define MM_BLENDV8(a, b, c) _mm_blendv_epi8(a, b, c)
-#define MM_CMPEQ16(a, b) _mm_cmpeq_epi16(a, b)
-#define MM_CMPEQ32(a, b) _mm_cmpeq_epi32(a, b)
-#define MM_CMPEQ8(a, b) _mm_cmpeq_epi8(a, b)
-#define MM_CMPGT32(a, b) _mm_cmpgt_epi32(a, b)
-#define MM_CMPGT8(a, b) _mm_cmpgt_epi8(a, b)
-#define MM_CVT(a) _mm_cvtsi32_si128(a)
-#define MM_CVT32(a) _mm_cvtsi128_si32(a)
-#define MM_CVTU32(a) _mm_cvtsi32_si128(a)
-#define MM_INSERT16(a, c, b) _mm_insert_epi16(a, c, b)
-#define MM_INSERT32(a, c, b) _mm_insert_epi32(a, c, b)
-#define MM_LOAD(a) _mm_load_si128(a)
-#define MM_LOADH_PI(a, b) _mm_loadh_pi(a, b)
-#define MM_LOADU(a) _mm_loadu_si128(a)
-#define MM_MADD16(a, b) _mm_madd_epi16(a, b)
-#define MM_MADD8(a, b) _mm_maddubs_epi16(a, b)
-#define MM_MOVEMASK8(a) _mm_movemask_epi8(a)
-#define MM_OR(a, b) _mm_or_si128(a, b)
-#define MM_SET1_16(a) _mm_set1_epi16(a)
-#define MM_SET1_32(a) _mm_set1_epi32(a)
-#define MM_SET1_64(a) _mm_set1_epi64(a)
-#define MM_SET1_8(a) _mm_set1_epi8(a)
-#define MM_SET32(a, b, c, d) _mm_set_epi32(a, b, c, d)
-#define MM_SHUFFLE32(a, b) _mm_shuffle_epi32(a, b)
-#define MM_SHUFFLE8(a, b) _mm_shuffle_epi8(a, b)
-#define MM_SHUFFLEPS(a, b, c) _mm_shuffle_ps(a, b, c)
-#define MM_SIGN8(a, b) _mm_sign_epi8(a, b)
-#define MM_SLL64(a, b) _mm_sll_epi64(a, b)
-#define MM_SRL128(a, b) _mm_srli_si128(a, b)
-#define MM_SRL16(a, b) _mm_srli_epi16(a, b)
-#define MM_SRL32(a, b) _mm_srli_epi32(a, b)
-#define MM_STORE(a, b) _mm_store_si128(a, b)
-#define MM_STOREU(a, b) _mm_storeu_si128(a, b)
-#define MM_TESTZ(a, b) _mm_testz_si128(a, b)
-#define MM_XOR(a, b) _mm_xor_si128(a, b)
-
-#define MM_SET16(a, b, c, d, e, f, g, h) \
- _mm_set_epi16(a, b, c, d, e, f, g, h)
-
-#define MM_SET8(c0, c1, c2, c3, c4, c5, c6, c7, \
- c8, c9, cA, cB, cC, cD, cE, cF) \
- _mm_set_epi8(c0, c1, c2, c3, c4, c5, c6, c7, \
- c8, c9, cA, cB, cC, cD, cE, cF)
-
-#ifdef RTE_ARCH_X86_64
-
-#define MM_CVT64(a) _mm_cvtsi128_si64(a)
-
-#else
-
-#define MM_CVT64(a) ({ \
- rte_xmm_t m; \
- m.m = (a); \
- (m.u64[0]); \
-})
-
-#endif /*RTE_ARCH_X86_64 */
-
-/*
- * Prior to version 12.1 icc doesn't support _mm_set_epi64x.
- */
-#if (defined(__ICC) && __ICC < 1210)
-
-#define MM_SET64(a, b) ({ \
- rte_xmm_t m; \
- m.u64[0] = b; \
- m.u64[1] = a; \
- (m.m); \
-})
-
-#else
-
-#define MM_SET64(a, b) _mm_set_epi64x(a, b)
-
-#endif /* (defined(__ICC) && __ICC < 1210) */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_ACL_VECT_H_ */
diff --git a/src/dpdk_lib18/librte_acl/rte_acl.c b/src/dpdk_lib18/librte_acl/rte_acl.c
deleted file mode 100755
index 547e6dae..00000000
--- a/src/dpdk_lib18/librte_acl/rte_acl.c
+++ /dev/null
@@ -1,516 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <rte_acl.h>
-#include "acl.h"
-
-#define BIT_SIZEOF(x) (sizeof(x) * CHAR_BIT)
-
-TAILQ_HEAD(rte_acl_list, rte_tailq_entry);
-
-static const rte_acl_classify_t classify_fns[] = {
- [RTE_ACL_CLASSIFY_DEFAULT] = rte_acl_classify_scalar,
- [RTE_ACL_CLASSIFY_SCALAR] = rte_acl_classify_scalar,
- [RTE_ACL_CLASSIFY_SSE] = rte_acl_classify_sse,
-};
-
-/* by default, use always available scalar code path. */
-static enum rte_acl_classify_alg rte_acl_default_classify =
- RTE_ACL_CLASSIFY_SCALAR;
-
-static void
-rte_acl_set_default_classify(enum rte_acl_classify_alg alg)
-{
- rte_acl_default_classify = alg;
-}
-
-extern int
-rte_acl_set_ctx_classify(struct rte_acl_ctx *ctx, enum rte_acl_classify_alg alg)
-{
- if (ctx == NULL || (uint32_t)alg >= RTE_DIM(classify_fns))
- return -EINVAL;
-
- ctx->alg = alg;
- return 0;
-}
-
-static void __attribute__((constructor))
-rte_acl_init(void)
-{
- enum rte_acl_classify_alg alg = RTE_ACL_CLASSIFY_DEFAULT;
-
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
- alg = RTE_ACL_CLASSIFY_SSE;
-
- rte_acl_set_default_classify(alg);
-}
-
-int
-rte_acl_classify(const struct rte_acl_ctx *ctx, const uint8_t **data,
- uint32_t *results, uint32_t num, uint32_t categories)
-{
- return classify_fns[ctx->alg](ctx, data, results, num, categories);
-}
-
-int
-rte_acl_classify_alg(const struct rte_acl_ctx *ctx, const uint8_t **data,
- uint32_t *results, uint32_t num, uint32_t categories,
- enum rte_acl_classify_alg alg)
-{
- return classify_fns[alg](ctx, data, results, num, categories);
-}
-
-struct rte_acl_ctx *
-rte_acl_find_existing(const char *name)
-{
- struct rte_acl_ctx *ctx = NULL;
- struct rte_acl_list *acl_list;
- struct rte_tailq_entry *te;
-
- /* check that we have an initialised tail queue */
- acl_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_ACL, rte_acl_list);
- if (acl_list == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return NULL;
- }
-
- rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
- TAILQ_FOREACH(te, acl_list, next) {
- ctx = (struct rte_acl_ctx *) te->data;
- if (strncmp(name, ctx->name, sizeof(ctx->name)) == 0)
- break;
- }
- rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
-
- if (te == NULL) {
- rte_errno = ENOENT;
- return NULL;
- }
- return ctx;
-}
-
-void
-rte_acl_free(struct rte_acl_ctx *ctx)
-{
- struct rte_acl_list *acl_list;
- struct rte_tailq_entry *te;
-
- if (ctx == NULL)
- return;
-
- /* check that we have an initialised tail queue */
- acl_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_ACL, rte_acl_list);
- if (acl_list == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return;
- }
-
- rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
-
- /* find our tailq entry */
- TAILQ_FOREACH(te, acl_list, next) {
- if (te->data == (void *) ctx)
- break;
- }
- if (te == NULL) {
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
- return;
- }
-
- TAILQ_REMOVE(acl_list, te, next);
-
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
-
- rte_free(ctx->mem);
- rte_free(ctx);
- rte_free(te);
-}
-
-struct rte_acl_ctx *
-rte_acl_create(const struct rte_acl_param *param)
-{
- size_t sz;
- struct rte_acl_ctx *ctx;
- struct rte_acl_list *acl_list;
- struct rte_tailq_entry *te;
- char name[sizeof(ctx->name)];
-
- /* check that we have an initialised tail queue */
- acl_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_ACL, rte_acl_list);
- if (acl_list == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return NULL;
- }
-
- /* check that input parameters are valid. */
- if (param == NULL || param->name == NULL) {
- rte_errno = EINVAL;
- return NULL;
- }
-
- snprintf(name, sizeof(name), "ACL_%s", param->name);
-
- /* calculate amount of memory required for pattern set. */
- sz = sizeof(*ctx) + param->max_rule_num * param->rule_size;
-
- /* get EAL TAILQ lock. */
- rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
-
- /* if we already have one with that name */
- TAILQ_FOREACH(te, acl_list, next) {
- ctx = (struct rte_acl_ctx *) te->data;
- if (strncmp(param->name, ctx->name, sizeof(ctx->name)) == 0)
- break;
- }
-
- /* if ACL with such name doesn't exist, then create a new one. */
- if (te == NULL) {
- ctx = NULL;
- te = rte_zmalloc("ACL_TAILQ_ENTRY", sizeof(*te), 0);
-
- if (te == NULL) {
- RTE_LOG(ERR, ACL, "Cannot allocate tailq entry!\n");
- goto exit;
- }
-
- ctx = rte_zmalloc_socket(name, sz, RTE_CACHE_LINE_SIZE, param->socket_id);
-
- if (ctx == NULL) {
- RTE_LOG(ERR, ACL,
- "allocation of %zu bytes on socket %d for %s failed\n",
- sz, param->socket_id, name);
- rte_free(te);
- goto exit;
- }
- /* init new allocated context. */
- ctx->rules = ctx + 1;
- ctx->max_rules = param->max_rule_num;
- ctx->rule_sz = param->rule_size;
- ctx->socket_id = param->socket_id;
- ctx->alg = rte_acl_default_classify;
- snprintf(ctx->name, sizeof(ctx->name), "%s", param->name);
-
- te->data = (void *) ctx;
-
- TAILQ_INSERT_TAIL(acl_list, te, next);
- }
-
-exit:
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
- return ctx;
-}
-
-static int
-acl_add_rules(struct rte_acl_ctx *ctx, const void *rules, uint32_t num)
-{
- uint8_t *pos;
-
- if (num + ctx->num_rules > ctx->max_rules)
- return -ENOMEM;
-
- pos = ctx->rules;
- pos += ctx->rule_sz * ctx->num_rules;
- memcpy(pos, rules, num * ctx->rule_sz);
- ctx->num_rules += num;
-
- return 0;
-}
-
-static int
-acl_check_rule(const struct rte_acl_rule_data *rd)
-{
- if ((rd->category_mask & LEN2MASK(RTE_ACL_MAX_CATEGORIES)) == 0 ||
- rd->priority > RTE_ACL_MAX_PRIORITY ||
- rd->priority < RTE_ACL_MIN_PRIORITY ||
- rd->userdata == RTE_ACL_INVALID_USERDATA)
- return -EINVAL;
- return 0;
-}
-
-int
-rte_acl_add_rules(struct rte_acl_ctx *ctx, const struct rte_acl_rule *rules,
- uint32_t num)
-{
- const struct rte_acl_rule *rv;
- uint32_t i;
- int32_t rc;
-
- if (ctx == NULL || rules == NULL || 0 == ctx->rule_sz)
- return -EINVAL;
-
- for (i = 0; i != num; i++) {
- rv = (const struct rte_acl_rule *)
- ((uintptr_t)rules + i * ctx->rule_sz);
- rc = acl_check_rule(&rv->data);
- if (rc != 0) {
- RTE_LOG(ERR, ACL, "%s(%s): rule #%u is invalid\n",
- __func__, ctx->name, i + 1);
- return rc;
- }
- }
-
- return acl_add_rules(ctx, rules, num);
-}
-
-/*
- * Reset all rules.
- * Note that RT structures are not affected.
- */
-void
-rte_acl_reset_rules(struct rte_acl_ctx *ctx)
-{
- if (ctx != NULL)
- ctx->num_rules = 0;
-}
-
-/*
- * Reset all rules and destroys RT structures.
- */
-void
-rte_acl_reset(struct rte_acl_ctx *ctx)
-{
- if (ctx != NULL) {
- rte_acl_reset_rules(ctx);
- rte_acl_build(ctx, &ctx->config);
- }
-}
-
-/*
- * Dump ACL context to the stdout.
- */
-void
-rte_acl_dump(const struct rte_acl_ctx *ctx)
-{
- if (!ctx)
- return;
- printf("acl context <%s>@%p\n", ctx->name, ctx);
- printf(" socket_id=%"PRId32"\n", ctx->socket_id);
- printf(" alg=%"PRId32"\n", ctx->alg);
- printf(" max_rules=%"PRIu32"\n", ctx->max_rules);
- printf(" rule_size=%"PRIu32"\n", ctx->rule_sz);
- printf(" num_rules=%"PRIu32"\n", ctx->num_rules);
- printf(" num_categories=%"PRIu32"\n", ctx->num_categories);
- printf(" num_tries=%"PRIu32"\n", ctx->num_tries);
-}
-
-/*
- * Dump all ACL contexts to the stdout.
- */
-void
-rte_acl_list_dump(void)
-{
- struct rte_acl_ctx *ctx;
- struct rte_acl_list *acl_list;
- struct rte_tailq_entry *te;
-
- /* check that we have an initialised tail queue */
- acl_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_ACL, rte_acl_list);
- if (acl_list == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return;
- }
-
- rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
- TAILQ_FOREACH(te, acl_list, next) {
- ctx = (struct rte_acl_ctx *) te->data;
- rte_acl_dump(ctx);
- }
- rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
-}
-
-/*
- * Support for legacy ipv4vlan rules.
- */
-
-RTE_ACL_RULE_DEF(acl_ipv4vlan_rule, RTE_ACL_IPV4VLAN_NUM_FIELDS);
-
-static int
-acl_ipv4vlan_check_rule(const struct rte_acl_ipv4vlan_rule *rule)
-{
- if (rule->src_port_low > rule->src_port_high ||
- rule->dst_port_low > rule->dst_port_high ||
- rule->src_mask_len > BIT_SIZEOF(rule->src_addr) ||
- rule->dst_mask_len > BIT_SIZEOF(rule->dst_addr))
- return -EINVAL;
-
- return acl_check_rule(&rule->data);
-}
-
-static void
-acl_ipv4vlan_convert_rule(const struct rte_acl_ipv4vlan_rule *ri,
- struct acl_ipv4vlan_rule *ro)
-{
- ro->data = ri->data;
-
- ro->field[RTE_ACL_IPV4VLAN_PROTO_FIELD].value.u8 = ri->proto;
- ro->field[RTE_ACL_IPV4VLAN_VLAN1_FIELD].value.u16 = ri->vlan;
- ro->field[RTE_ACL_IPV4VLAN_VLAN2_FIELD].value.u16 = ri->domain;
- ro->field[RTE_ACL_IPV4VLAN_SRC_FIELD].value.u32 = ri->src_addr;
- ro->field[RTE_ACL_IPV4VLAN_DST_FIELD].value.u32 = ri->dst_addr;
- ro->field[RTE_ACL_IPV4VLAN_SRCP_FIELD].value.u16 = ri->src_port_low;
- ro->field[RTE_ACL_IPV4VLAN_DSTP_FIELD].value.u16 = ri->dst_port_low;
-
- ro->field[RTE_ACL_IPV4VLAN_PROTO_FIELD].mask_range.u8 = ri->proto_mask;
- ro->field[RTE_ACL_IPV4VLAN_VLAN1_FIELD].mask_range.u16 = ri->vlan_mask;
- ro->field[RTE_ACL_IPV4VLAN_VLAN2_FIELD].mask_range.u16 =
- ri->domain_mask;
- ro->field[RTE_ACL_IPV4VLAN_SRC_FIELD].mask_range.u32 =
- ri->src_mask_len;
- ro->field[RTE_ACL_IPV4VLAN_DST_FIELD].mask_range.u32 = ri->dst_mask_len;
- ro->field[RTE_ACL_IPV4VLAN_SRCP_FIELD].mask_range.u16 =
- ri->src_port_high;
- ro->field[RTE_ACL_IPV4VLAN_DSTP_FIELD].mask_range.u16 =
- ri->dst_port_high;
-}
-
-int
-rte_acl_ipv4vlan_add_rules(struct rte_acl_ctx *ctx,
- const struct rte_acl_ipv4vlan_rule *rules,
- uint32_t num)
-{
- int32_t rc;
- uint32_t i;
- struct acl_ipv4vlan_rule rv;
-
- if (ctx == NULL || rules == NULL || ctx->rule_sz != sizeof(rv))
- return -EINVAL;
-
- /* check input rules. */
- for (i = 0; i != num; i++) {
- rc = acl_ipv4vlan_check_rule(rules + i);
- if (rc != 0) {
- RTE_LOG(ERR, ACL, "%s(%s): rule #%u is invalid\n",
- __func__, ctx->name, i + 1);
- return rc;
- }
- }
-
- if (num + ctx->num_rules > ctx->max_rules)
- return -ENOMEM;
-
- /* perform conversion to the internal format and add to the context. */
- for (i = 0, rc = 0; i != num && rc == 0; i++) {
- acl_ipv4vlan_convert_rule(rules + i, &rv);
- rc = acl_add_rules(ctx, &rv, 1);
- }
-
- return rc;
-}
-
-static void
-acl_ipv4vlan_config(struct rte_acl_config *cfg,
- const uint32_t layout[RTE_ACL_IPV4VLAN_NUM],
- uint32_t num_categories)
-{
- static const struct rte_acl_field_def
- ipv4_defs[RTE_ACL_IPV4VLAN_NUM_FIELDS] = {
- {
- .type = RTE_ACL_FIELD_TYPE_BITMASK,
- .size = sizeof(uint8_t),
- .field_index = RTE_ACL_IPV4VLAN_PROTO_FIELD,
- .input_index = RTE_ACL_IPV4VLAN_PROTO,
- },
- {
- .type = RTE_ACL_FIELD_TYPE_BITMASK,
- .size = sizeof(uint16_t),
- .field_index = RTE_ACL_IPV4VLAN_VLAN1_FIELD,
- .input_index = RTE_ACL_IPV4VLAN_VLAN,
- },
- {
- .type = RTE_ACL_FIELD_TYPE_BITMASK,
- .size = sizeof(uint16_t),
- .field_index = RTE_ACL_IPV4VLAN_VLAN2_FIELD,
- .input_index = RTE_ACL_IPV4VLAN_VLAN,
- },
- {
- .type = RTE_ACL_FIELD_TYPE_MASK,
- .size = sizeof(uint32_t),
- .field_index = RTE_ACL_IPV4VLAN_SRC_FIELD,
- .input_index = RTE_ACL_IPV4VLAN_SRC,
- },
- {
- .type = RTE_ACL_FIELD_TYPE_MASK,
- .size = sizeof(uint32_t),
- .field_index = RTE_ACL_IPV4VLAN_DST_FIELD,
- .input_index = RTE_ACL_IPV4VLAN_DST,
- },
- {
- .type = RTE_ACL_FIELD_TYPE_RANGE,
- .size = sizeof(uint16_t),
- .field_index = RTE_ACL_IPV4VLAN_SRCP_FIELD,
- .input_index = RTE_ACL_IPV4VLAN_PORTS,
- },
- {
- .type = RTE_ACL_FIELD_TYPE_RANGE,
- .size = sizeof(uint16_t),
- .field_index = RTE_ACL_IPV4VLAN_DSTP_FIELD,
- .input_index = RTE_ACL_IPV4VLAN_PORTS,
- },
- };
-
- memcpy(&cfg->defs, ipv4_defs, sizeof(ipv4_defs));
- cfg->num_fields = RTE_DIM(ipv4_defs);
-
- cfg->defs[RTE_ACL_IPV4VLAN_PROTO_FIELD].offset =
- layout[RTE_ACL_IPV4VLAN_PROTO];
- cfg->defs[RTE_ACL_IPV4VLAN_VLAN1_FIELD].offset =
- layout[RTE_ACL_IPV4VLAN_VLAN];
- cfg->defs[RTE_ACL_IPV4VLAN_VLAN2_FIELD].offset =
- layout[RTE_ACL_IPV4VLAN_VLAN] +
- cfg->defs[RTE_ACL_IPV4VLAN_VLAN1_FIELD].size;
- cfg->defs[RTE_ACL_IPV4VLAN_SRC_FIELD].offset =
- layout[RTE_ACL_IPV4VLAN_SRC];
- cfg->defs[RTE_ACL_IPV4VLAN_DST_FIELD].offset =
- layout[RTE_ACL_IPV4VLAN_DST];
- cfg->defs[RTE_ACL_IPV4VLAN_SRCP_FIELD].offset =
- layout[RTE_ACL_IPV4VLAN_PORTS];
- cfg->defs[RTE_ACL_IPV4VLAN_DSTP_FIELD].offset =
- layout[RTE_ACL_IPV4VLAN_PORTS] +
- cfg->defs[RTE_ACL_IPV4VLAN_SRCP_FIELD].size;
-
- cfg->num_categories = num_categories;
-}
-
-int
-rte_acl_ipv4vlan_build(struct rte_acl_ctx *ctx,
- const uint32_t layout[RTE_ACL_IPV4VLAN_NUM],
- uint32_t num_categories)
-{
- struct rte_acl_config cfg;
-
- if (ctx == NULL || layout == NULL)
- return -EINVAL;
-
- acl_ipv4vlan_config(&cfg, layout, num_categories);
- return rte_acl_build(ctx, &cfg);
-}
diff --git a/src/dpdk_lib18/librte_acl/rte_acl_osdep_alone.h b/src/dpdk_lib18/librte_acl/rte_acl_osdep_alone.h
deleted file mode 100755
index a84b6f97..00000000
--- a/src/dpdk_lib18/librte_acl/rte_acl_osdep_alone.h
+++ /dev/null
@@ -1,278 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_ACL_OSDEP_ALONE_H_
-#define _RTE_ACL_OSDEP_ALONE_H_
-
-/**
- * @file
- *
- * RTE ACL OS dependent file.
- * An example how to build/use ACL library standalone
- * (without rest of DPDK).
- * Don't include that file on it's own, use <rte_acl_osdep.h>.
- */
-
-#if (defined(__ICC) || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
-
-#ifdef __SSE__
-#include <xmmintrin.h>
-#endif
-
-#ifdef __SSE2__
-#include <emmintrin.h>
-#endif
-
-#if defined(__SSE4_2__) || defined(__SSE4_1__)
-#include <smmintrin.h>
-#endif
-
-#else
-
-#include <x86intrin.h>
-
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define DUMMY_MACRO do {} while (0)
-
-/*
- * rte_common related.
- */
-#define __rte_unused __attribute__((__unused__))
-
-#define RTE_PTR_ADD(ptr, x) ((typeof(ptr))((uintptr_t)(ptr) + (x)))
-
-#define RTE_PTR_ALIGN_FLOOR(ptr, align) \
- (typeof(ptr))((uintptr_t)(ptr) & ~((uintptr_t)(align) - 1))
-
-#define RTE_PTR_ALIGN_CEIL(ptr, align) \
- RTE_PTR_ALIGN_FLOOR(RTE_PTR_ADD(ptr, (align) - 1), align)
-
-#define RTE_PTR_ALIGN(ptr, align) RTE_PTR_ALIGN_CEIL(ptr, align)
-
-#define RTE_ALIGN_FLOOR(val, align) \
- (typeof(val))((val) & (~((typeof(val))((align) - 1))))
-
-#define RTE_ALIGN_CEIL(val, align) \
- RTE_ALIGN_FLOOR(((val) + ((typeof(val))(align) - 1)), align)
-
-#define RTE_ALIGN(ptr, align) RTE_ALIGN_CEIL(ptr, align)
-
-#define RTE_MIN(a, b) ({ \
- typeof(a) _a = (a); \
- typeof(b) _b = (b); \
- _a < _b ? _a : _b; \
- })
-
-#define RTE_DIM(a) (sizeof(a) / sizeof((a)[0]))
-
-/**
- * Searches the input parameter for the least significant set bit
- * (starting from zero).
- * If a least significant 1 bit is found, its bit index is returned.
- * If the content of the input parameter is zero, then the content of the return
- * value is undefined.
- * @param v
- * input parameter, should not be zero.
- * @return
- * least significant set bit in the input parameter.
- */
-static inline uint32_t
-rte_bsf32(uint32_t v)
-{
- asm("bsf %1,%0"
- : "=r" (v)
- : "rm" (v));
- return v;
-}
-
-/*
- * rte_common_vect related.
- */
-typedef __m128i xmm_t;
-
-#define XMM_SIZE (sizeof(xmm_t))
-#define XMM_MASK (XMM_SIZE - 1)
-
-typedef union rte_mmsse {
- xmm_t m;
- uint8_t u8[XMM_SIZE / sizeof(uint8_t)];
- uint16_t u16[XMM_SIZE / sizeof(uint16_t)];
- uint32_t u32[XMM_SIZE / sizeof(uint32_t)];
- uint64_t u64[XMM_SIZE / sizeof(uint64_t)];
- double pd[XMM_SIZE / sizeof(double)];
-} rte_xmm_t;
-
-/*
- * rte_cycles related.
- */
-static inline uint64_t
-rte_rdtsc(void)
-{
- union {
- uint64_t tsc_64;
- struct {
- uint32_t lo_32;
- uint32_t hi_32;
- };
- } tsc;
-
- asm volatile("rdtsc" :
- "=a" (tsc.lo_32),
- "=d" (tsc.hi_32));
- return tsc.tsc_64;
-}
-
-/*
- * rte_lcore related.
- */
-#define rte_lcore_id() (0)
-
-/*
- * rte_errno related.
- */
-#define rte_errno errno
-#define E_RTE_NO_TAILQ (-1)
-
-/*
- * rte_rwlock related.
- */
-#define rte_rwlock_read_lock(x) DUMMY_MACRO
-#define rte_rwlock_read_unlock(x) DUMMY_MACRO
-#define rte_rwlock_write_lock(x) DUMMY_MACRO
-#define rte_rwlock_write_unlock(x) DUMMY_MACRO
-
-/*
- * rte_memory related.
- */
-#define SOCKET_ID_ANY -1 /**< Any NUMA socket. */
-#define RTE_CACHE_LINE_SIZE 64 /**< Cache line size. */
-#define RTE_CACHE_LINE_MASK (RTE_CACHE_LINE_SIZE-1) /**< Cache line mask. */
-
-/**
- * Force alignment to cache line.
- */
-#define __rte_cache_aligned __attribute__((__aligned__(RTE_CACHE_LINE_SIZE)))
-
-
-/*
- * rte_byteorder related.
- */
-#define rte_le_to_cpu_16(x) (x)
-#define rte_le_to_cpu_32(x) (x)
-
-#define rte_cpu_to_be_16(x) \
- (((x) & UINT8_MAX) << CHAR_BIT | ((x) >> CHAR_BIT & UINT8_MAX))
-#define rte_cpu_to_be_32(x) __builtin_bswap32(x)
-
-/*
- * rte_branch_prediction related.
- */
-#ifndef likely
-#define likely(x) __builtin_expect((x), 1)
-#endif /* likely */
-
-#ifndef unlikely
-#define unlikely(x) __builtin_expect((x), 0)
-#endif /* unlikely */
-
-
-/*
- * rte_tailq related.
- */
-static inline void *
-rte_dummy_tailq(void)
-{
- static __thread TAILQ_HEAD(rte_dummy_head, rte_dummy) dummy_head;
- TAILQ_INIT(&dummy_head);
- return &dummy_head;
-}
-
-#define RTE_TAILQ_LOOKUP_BY_IDX(idx, struct_name) rte_dummy_tailq()
-
-#define RTE_EAL_TAILQ_REMOVE(idx, type, elm) DUMMY_MACRO
-
-/*
- * rte_string related
- */
-#define snprintf(str, len, frmt, args...) snprintf(str, len, frmt, ##args)
-
-/*
- * rte_log related
- */
-#define RTE_LOG(l, t, fmt, args...) printf(fmt, ##args)
-
-/*
- * rte_malloc related
- */
-#define rte_free(x) free(x)
-
-static inline void *
-rte_zmalloc_socket(__rte_unused const char *type, size_t size, unsigned align,
- __rte_unused int socket)
-{
- void *ptr;
- int rc;
-
- rc = posix_memalign(&ptr, align, size);
- if (rc != 0) {
- rte_errno = rc;
- return NULL;
- }
-
- memset(ptr, 0, size);
- return ptr;
-}
-
-/*
- * rte_debug related
- */
-#define rte_panic(fmt, args...) do { \
- RTE_LOG(CRIT, EAL, fmt, ##args); \
- abort(); \
-} while (0)
-
-#define rte_exit(err, fmt, args...) do { \
- RTE_LOG(CRIT, EAL, fmt, ##args); \
- exit(err); \
-} while (0)
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_ACL_OSDEP_ALONE_H_ */
diff --git a/src/dpdk_lib18/librte_acl/tb_mem.c b/src/dpdk_lib18/librte_acl/tb_mem.c
deleted file mode 100755
index fdf3080b..00000000
--- a/src/dpdk_lib18/librte_acl/tb_mem.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "tb_mem.h"
-
-/*
- * Memory management routines for temporary memory.
- * That memory is used only during build phase and is released after
- * build is finished.
- */
-
-static struct tb_mem_block *
-tb_pool(struct tb_mem_pool *pool, size_t sz)
-{
- struct tb_mem_block *block;
- uint8_t *ptr;
- size_t size;
-
- size = sz + pool->alignment - 1;
- block = calloc(1, size + sizeof(*pool->block));
- if (block == NULL) {
- RTE_LOG(ERR, MALLOC, "%s(%zu)\n failed, currently allocated "
- "by pool: %zu bytes\n", __func__, sz, pool->alloc);
- return NULL;
- }
-
- block->pool = pool;
-
- block->next = pool->block;
- pool->block = block;
-
- pool->alloc += size;
-
- ptr = (uint8_t *)(block + 1);
- block->mem = RTE_PTR_ALIGN_CEIL(ptr, pool->alignment);
- block->size = size - (block->mem - ptr);
-
- return block;
-}
-
-void *
-tb_alloc(struct tb_mem_pool *pool, size_t size)
-{
- struct tb_mem_block *block;
- void *ptr;
- size_t new_sz;
-
- size = RTE_ALIGN_CEIL(size, pool->alignment);
-
- block = pool->block;
- if (block == NULL || block->size < size) {
- new_sz = (size > pool->min_alloc) ? size : pool->min_alloc;
- block = tb_pool(pool, new_sz);
- if (block == NULL)
- return NULL;
- }
- ptr = block->mem;
- block->size -= size;
- block->mem += size;
- return ptr;
-}
-
-void
-tb_free_pool(struct tb_mem_pool *pool)
-{
- struct tb_mem_block *next, *block;
-
- for (block = pool->block; block != NULL; block = next) {
- next = block->next;
- free(block);
- }
- pool->block = NULL;
- pool->alloc = 0;
-}
diff --git a/src/dpdk_lib18/librte_cfgfile/Makefile b/src/dpdk_lib18/librte_cfgfile/Makefile
deleted file mode 100755
index 55e87015..00000000
--- a/src/dpdk_lib18/librte_cfgfile/Makefile
+++ /dev/null
@@ -1,53 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-#
-# library name
-#
-LIB = librte_cfgfile.a
-
-CFLAGS += -O3
-CFLAGS += $(WERROR_FLAGS)
-
-#
-# all source are stored in SRCS-y
-#
-SRCS-$(CONFIG_RTE_LIBRTE_CFGFILE) += rte_cfgfile.c
-
-# install includes
-SYMLINK-$(CONFIG_RTE_LIBRTE_CFGFILE)-include += rte_cfgfile.h
-
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_CFGFILE) += lib/librte_eal
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_cmdline/Makefile b/src/dpdk_lib18/librte_cmdline/Makefile
deleted file mode 100755
index 7eae4493..00000000
--- a/src/dpdk_lib18/librte_cmdline/Makefile
+++ /dev/null
@@ -1,63 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_cmdline.a
-
-CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
-
-# all source are stored in SRCS-y
-SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) := cmdline.c
-SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_cirbuf.c
-SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_parse.c
-SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_parse_etheraddr.c
-SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_parse_ipaddr.c
-SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_parse_num.c
-SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_parse_string.c
-SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_rdline.c
-SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_vt100.c
-SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_socket.c
-SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_parse_portlist.c
-
-CFLAGS += -D_GNU_SOURCE
-
-# install includes
-INCS := cmdline.h cmdline_parse.h cmdline_parse_num.h cmdline_parse_ipaddr.h
-INCS += cmdline_parse_etheraddr.h cmdline_parse_string.h cmdline_rdline.h
-INCS += cmdline_vt100.h cmdline_socket.h cmdline_cirbuf.h cmdline_parse_portlist.h
-SYMLINK-$(CONFIG_RTE_LIBRTE_CMDLINE)-include := $(INCS)
-
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += lib/librte_eal
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline.c b/src/dpdk_lib18/librte_cmdline/cmdline.c
deleted file mode 100755
index e61c4f2c..00000000
--- a/src/dpdk_lib18/librte_cmdline/cmdline.c
+++ /dev/null
@@ -1,264 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
- * All rights reserved.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the University of California, Berkeley nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdio.h>
-#include <string.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <stdarg.h>
-#include <inttypes.h>
-#include <fcntl.h>
-#include <errno.h>
-#include <termios.h>
-#include <netinet/in.h>
-
-#include <rte_string_fns.h>
-
-#include "cmdline_parse.h"
-#include "cmdline_rdline.h"
-#include "cmdline.h"
-
-static void
-cmdline_valid_buffer(struct rdline *rdl, const char *buf,
- __attribute__((unused)) unsigned int size)
-{
- struct cmdline *cl = rdl->opaque;
- int ret;
- ret = cmdline_parse(cl, buf);
- if (ret == CMDLINE_PARSE_AMBIGUOUS)
- cmdline_printf(cl, "Ambiguous command\n");
- else if (ret == CMDLINE_PARSE_NOMATCH)
- cmdline_printf(cl, "Command not found\n");
- else if (ret == CMDLINE_PARSE_BAD_ARGS)
- cmdline_printf(cl, "Bad arguments\n");
-}
-
-static int
-cmdline_complete_buffer(struct rdline *rdl, const char *buf,
- char *dstbuf, unsigned int dstsize,
- int *state)
-{
- struct cmdline *cl = rdl->opaque;
- return cmdline_complete(cl, buf, state, dstbuf, dstsize);
-}
-
-int
-cmdline_write_char(struct rdline *rdl, char c)
-{
- int ret = -1;
- struct cmdline *cl;
-
- if (!rdl)
- return -1;
-
- cl = rdl->opaque;
-
- if (cl->s_out >= 0)
- ret = write(cl->s_out, &c, 1);
-
- return ret;
-}
-
-
-void
-cmdline_set_prompt(struct cmdline *cl, const char *prompt)
-{
- if (!cl || !prompt)
- return;
- snprintf(cl->prompt, sizeof(cl->prompt), "%s", prompt);
-}
-
-struct cmdline *
-cmdline_new(cmdline_parse_ctx_t *ctx, const char *prompt, int s_in, int s_out)
-{
- struct cmdline *cl;
-
- if (!ctx || !prompt)
- return NULL;
-
- cl = malloc(sizeof(struct cmdline));
- if (cl == NULL)
- return NULL;
- memset(cl, 0, sizeof(struct cmdline));
- cl->s_in = s_in;
- cl->s_out = s_out;
- cl->ctx = ctx;
-
- rdline_init(&cl->rdl, cmdline_write_char,
- cmdline_valid_buffer, cmdline_complete_buffer);
- cl->rdl.opaque = cl;
- cmdline_set_prompt(cl, prompt);
- rdline_newline(&cl->rdl, cl->prompt);
-
- return cl;
-}
-
-void
-cmdline_free(struct cmdline *cl)
-{
- dprintf("called\n");
-
- if (!cl)
- return;
-
- if (cl->s_in > 2)
- close(cl->s_in);
- if (cl->s_out != cl->s_in && cl->s_out > 2)
- close(cl->s_out);
- free(cl);
-}
-
-void
-cmdline_printf(const struct cmdline *cl, const char *fmt, ...)
-{
- va_list ap;
-
- if (!cl || !fmt)
- return;
-
-#ifdef _GNU_SOURCE
- if (cl->s_out < 0)
- return;
- va_start(ap, fmt);
- vdprintf(cl->s_out, fmt, ap);
- va_end(ap);
-#else
- int ret;
- char *buf;
-
- if (cl->s_out < 0)
- return;
-
- buf = malloc(BUFSIZ);
- if (buf == NULL)
- return;
- va_start(ap, fmt);
- ret = vsnprintf(buf, BUFSIZ, fmt, ap);
- va_end(ap);
- if (ret < 0)
- return;
- if (ret >= BUFSIZ)
- ret = BUFSIZ - 1;
- write(cl->s_out, buf, ret);
- free(buf);
-#endif
-}
-
-int
-cmdline_in(struct cmdline *cl, const char *buf, int size)
-{
- const char *history, *buffer;
- size_t histlen, buflen;
- int ret = 0;
- int i, same;
-
- if (!cl || !buf)
- return -1;
-
- for (i=0; i<size; i++) {
- ret = rdline_char_in(&cl->rdl, buf[i]);
-
- if (ret == RDLINE_RES_VALIDATED) {
- buffer = rdline_get_buffer(&cl->rdl);
- history = rdline_get_history_item(&cl->rdl, 0);
- if (history) {
- histlen = strnlen(history, RDLINE_BUF_SIZE);
- same = !memcmp(buffer, history, histlen) &&
- buffer[histlen] == '\n';
- }
- else
- same = 0;
- buflen = strnlen(buffer, RDLINE_BUF_SIZE);
- if (buflen > 1 && !same)
- rdline_add_history(&cl->rdl, buffer);
- rdline_newline(&cl->rdl, cl->prompt);
- }
- else if (ret == RDLINE_RES_EOF)
- return -1;
- else if (ret == RDLINE_RES_EXITED)
- return -1;
- }
- return i;
-}
-
-void
-cmdline_quit(struct cmdline *cl)
-{
- if (!cl)
- return;
- rdline_quit(&cl->rdl);
-}
-
-void
-cmdline_interact(struct cmdline *cl)
-{
- char c;
-
- if (!cl)
- return;
-
- c = -1;
- while (1) {
- if (read(cl->s_in, &c, 1) <= 0)
- break;
- if (cmdline_in(cl, &c, 1) < 0)
- break;
- }
-}
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline.h b/src/dpdk_lib18/librte_cmdline/cmdline.h
deleted file mode 100755
index 06ae0866..00000000
--- a/src/dpdk_lib18/librte_cmdline/cmdline.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
- * All rights reserved.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the University of California, Berkeley nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _CMDLINE_H_
-#define _CMDLINE_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct cmdline {
- int s_in;
- int s_out;
- cmdline_parse_ctx_t *ctx;
- struct rdline rdl;
- char prompt[RDLINE_PROMPT_SIZE];
- struct termios oldterm;
-};
-
-struct cmdline *cmdline_new(cmdline_parse_ctx_t *ctx, const char *prompt, int s_in, int s_out);
-void cmdline_set_prompt(struct cmdline *cl, const char *prompt);
-void cmdline_free(struct cmdline *cl);
-void cmdline_printf(const struct cmdline *cl, const char *fmt, ...)
- __attribute__((format(printf,2,3)));
-int cmdline_in(struct cmdline *cl, const char *buf, int size);
-int cmdline_write_char(struct rdline *rdl, char c);
-void cmdline_interact(struct cmdline *cl);
-void cmdline_quit(struct cmdline *cl);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _CMDLINE_SOCKET_H_ */
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_cirbuf.c b/src/dpdk_lib18/librte_cmdline/cmdline_cirbuf.c
deleted file mode 100755
index b9f9f4bc..00000000
--- a/src/dpdk_lib18/librte_cmdline/cmdline_cirbuf.c
+++ /dev/null
@@ -1,467 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
- * All rights reserved.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the University of California, Berkeley nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <string.h>
-#include <errno.h>
-#include <stdio.h>
-
-#include "cmdline_cirbuf.h"
-
-
-int
-cirbuf_init(struct cirbuf *cbuf, char *buf, unsigned int start, unsigned int maxlen)
-{
- if (!cbuf || !buf)
- return -EINVAL;
- cbuf->maxlen = maxlen;
- cbuf->len = 0;
- cbuf->start = start;
- cbuf->end = start;
- cbuf->buf = buf;
- return 0;
-}
-
-/* multiple add */
-
-int
-cirbuf_add_buf_head(struct cirbuf *cbuf, const char *c, unsigned int n)
-{
- unsigned int e;
-
- if (!cbuf || !c || !n || n > CIRBUF_GET_FREELEN(cbuf))
- return -EINVAL;
-
- e = CIRBUF_IS_EMPTY(cbuf) ? 1 : 0;
-
- if (n < cbuf->start + e) {
- dprintf("s[%d] -> d[%d] (%d)\n", 0, cbuf->start - n + e, n);
- memcpy(cbuf->buf + cbuf->start - n + e, c, n);
- }
- else {
- dprintf("s[%d] -> d[%d] (%d)\n", + n - (cbuf->start + e), 0,
- cbuf->start + e);
- dprintf("s[%d] -> d[%d] (%d)\n", cbuf->maxlen - n +
- (cbuf->start + e), 0, n - (cbuf->start + e));
- memcpy(cbuf->buf, c + n - (cbuf->start + e) , cbuf->start + e);
- memcpy(cbuf->buf + cbuf->maxlen - n + (cbuf->start + e), c,
- n - (cbuf->start + e));
- }
- cbuf->len += n;
- cbuf->start += (cbuf->maxlen - n + e);
- cbuf->start %= cbuf->maxlen;
- return n;
-}
-
-/* multiple add */
-
-int
-cirbuf_add_buf_tail(struct cirbuf *cbuf, const char *c, unsigned int n)
-{
- unsigned int e;
-
- if (!cbuf || !c || !n || n > CIRBUF_GET_FREELEN(cbuf))
- return -EINVAL;
-
- e = CIRBUF_IS_EMPTY(cbuf) ? 1 : 0;
-
- if (n < cbuf->maxlen - cbuf->end - 1 + e) {
- dprintf("s[%d] -> d[%d] (%d)\n", 0, cbuf->end + !e, n);
- memcpy(cbuf->buf + cbuf->end + !e, c, n);
- }
- else {
- dprintf("s[%d] -> d[%d] (%d)\n", cbuf->end + !e, 0,
- cbuf->maxlen - cbuf->end - 1 + e);
- dprintf("s[%d] -> d[%d] (%d)\n", cbuf->maxlen - cbuf->end - 1 +
- e, 0, n - cbuf->maxlen + cbuf->end + 1 - e);
- memcpy(cbuf->buf + cbuf->end + !e, c, cbuf->maxlen -
- cbuf->end - 1 + e);
- memcpy(cbuf->buf, c + cbuf->maxlen - cbuf->end - 1 + e,
- n - cbuf->maxlen + cbuf->end + 1 - e);
- }
- cbuf->len += n;
- cbuf->end += n - e;
- cbuf->end %= cbuf->maxlen;
- return n;
-}
-
-/* add at head */
-
-static inline void
-__cirbuf_add_head(struct cirbuf * cbuf, char c)
-{
- if (!CIRBUF_IS_EMPTY(cbuf)) {
- cbuf->start += (cbuf->maxlen - 1);
- cbuf->start %= cbuf->maxlen;
- }
- cbuf->buf[cbuf->start] = c;
- cbuf->len ++;
-}
-
-int
-cirbuf_add_head_safe(struct cirbuf * cbuf, char c)
-{
- if (cbuf && !CIRBUF_IS_FULL(cbuf)) {
- __cirbuf_add_head(cbuf, c);
- return 0;
- }
- return -EINVAL;
-}
-
-void
-cirbuf_add_head(struct cirbuf * cbuf, char c)
-{
- __cirbuf_add_head(cbuf, c);
-}
-
-/* add at tail */
-
-static inline void
-__cirbuf_add_tail(struct cirbuf * cbuf, char c)
-{
- if (!CIRBUF_IS_EMPTY(cbuf)) {
- cbuf->end ++;
- cbuf->end %= cbuf->maxlen;
- }
- cbuf->buf[cbuf->end] = c;
- cbuf->len ++;
-}
-
-int
-cirbuf_add_tail_safe(struct cirbuf * cbuf, char c)
-{
- if (cbuf && !CIRBUF_IS_FULL(cbuf)) {
- __cirbuf_add_tail(cbuf, c);
- return 0;
- }
- return -EINVAL;
-}
-
-void
-cirbuf_add_tail(struct cirbuf * cbuf, char c)
-{
- __cirbuf_add_tail(cbuf, c);
-}
-
-
-static inline void
-__cirbuf_shift_left(struct cirbuf *cbuf)
-{
- unsigned int i;
- char tmp = cbuf->buf[cbuf->start];
-
- for (i=0 ; i<cbuf->len ; i++) {
- cbuf->buf[(cbuf->start+i)%cbuf->maxlen] =
- cbuf->buf[(cbuf->start+i+1)%cbuf->maxlen];
- }
- cbuf->buf[(cbuf->start-1+cbuf->maxlen)%cbuf->maxlen] = tmp;
- cbuf->start += (cbuf->maxlen - 1);
- cbuf->start %= cbuf->maxlen;
- cbuf->end += (cbuf->maxlen - 1);
- cbuf->end %= cbuf->maxlen;
-}
-
-static inline void
-__cirbuf_shift_right(struct cirbuf *cbuf)
-{
- unsigned int i;
- char tmp = cbuf->buf[cbuf->end];
-
- for (i=0 ; i<cbuf->len ; i++) {
- cbuf->buf[(cbuf->end+cbuf->maxlen-i)%cbuf->maxlen] =
- cbuf->buf[(cbuf->end+cbuf->maxlen-i-1)%cbuf->maxlen];
- }
- cbuf->buf[(cbuf->end+1)%cbuf->maxlen] = tmp;
- cbuf->start += 1;
- cbuf->start %= cbuf->maxlen;
- cbuf->end += 1;
- cbuf->end %= cbuf->maxlen;
-}
-
-/* XXX we could do a better algorithm here... */
-int
-cirbuf_align_left(struct cirbuf * cbuf)
-{
- if (!cbuf)
- return -EINVAL;
-
- if (cbuf->start < cbuf->maxlen/2) {
- while (cbuf->start != 0) {
- __cirbuf_shift_left(cbuf);
- }
- }
- else {
- while (cbuf->start != 0) {
- __cirbuf_shift_right(cbuf);
- }
- }
-
- return 0;
-}
-
-/* XXX we could do a better algorithm here... */
-int
-cirbuf_align_right(struct cirbuf * cbuf)
-{
- if (!cbuf)
- return -EINVAL;
-
- if (cbuf->start >= cbuf->maxlen/2) {
- while (cbuf->end != cbuf->maxlen-1) {
- __cirbuf_shift_left(cbuf);
- }
- }
- else {
- while (cbuf->start != cbuf->maxlen-1) {
- __cirbuf_shift_right(cbuf);
- }
- }
-
- return 0;
-}
-
-/* buffer del */
-
-int
-cirbuf_del_buf_head(struct cirbuf *cbuf, unsigned int size)
-{
- if (!cbuf || !size || size > CIRBUF_GET_LEN(cbuf))
- return -EINVAL;
-
- cbuf->len -= size;
- if (CIRBUF_IS_EMPTY(cbuf)) {
- cbuf->start += size - 1;
- cbuf->start %= cbuf->maxlen;
- }
- else {
- cbuf->start += size;
- cbuf->start %= cbuf->maxlen;
- }
- return 0;
-}
-
-/* buffer del */
-
-int
-cirbuf_del_buf_tail(struct cirbuf *cbuf, unsigned int size)
-{
- if (!cbuf || !size || size > CIRBUF_GET_LEN(cbuf))
- return -EINVAL;
-
- cbuf->len -= size;
- if (CIRBUF_IS_EMPTY(cbuf)) {
- cbuf->end += (cbuf->maxlen - size + 1);
- cbuf->end %= cbuf->maxlen;
- }
- else {
- cbuf->end += (cbuf->maxlen - size);
- cbuf->end %= cbuf->maxlen;
- }
- return 0;
-}
-
-/* del at head */
-
-static inline void
-__cirbuf_del_head(struct cirbuf * cbuf)
-{
- cbuf->len --;
- if (!CIRBUF_IS_EMPTY(cbuf)) {
- cbuf->start ++;
- cbuf->start %= cbuf->maxlen;
- }
-}
-
-int
-cirbuf_del_head_safe(struct cirbuf * cbuf)
-{
- if (cbuf && !CIRBUF_IS_EMPTY(cbuf)) {
- __cirbuf_del_head(cbuf);
- return 0;
- }
- return -EINVAL;
-}
-
-void
-cirbuf_del_head(struct cirbuf * cbuf)
-{
- __cirbuf_del_head(cbuf);
-}
-
-/* del at tail */
-
-static inline void
-__cirbuf_del_tail(struct cirbuf * cbuf)
-{
- cbuf->len --;
- if (!CIRBUF_IS_EMPTY(cbuf)) {
- cbuf->end += (cbuf->maxlen - 1);
- cbuf->end %= cbuf->maxlen;
- }
-}
-
-int
-cirbuf_del_tail_safe(struct cirbuf * cbuf)
-{
- if (cbuf && !CIRBUF_IS_EMPTY(cbuf)) {
- __cirbuf_del_tail(cbuf);
- return 0;
- }
- return -EINVAL;
-}
-
-void
-cirbuf_del_tail(struct cirbuf * cbuf)
-{
- __cirbuf_del_tail(cbuf);
-}
-
-/* convert to buffer */
-
-int
-cirbuf_get_buf_head(struct cirbuf *cbuf, char *c, unsigned int size)
-{
- unsigned int n;
-
- if (!cbuf || !c)
- return -EINVAL;
-
- n = (size < CIRBUF_GET_LEN(cbuf)) ? size : CIRBUF_GET_LEN(cbuf);
-
- if (!n)
- return 0;
-
- if (cbuf->start <= cbuf->end) {
- dprintf("s[%d] -> d[%d] (%d)\n", cbuf->start, 0, n);
- memcpy(c, cbuf->buf + cbuf->start , n);
- }
- else {
- /* check if we need to go from end to the beginning */
- if (n <= cbuf->maxlen - cbuf->start) {
- dprintf("s[%d] -> d[%d] (%d)\n", 0, cbuf->start, n);
- memcpy(c, cbuf->buf + cbuf->start , n);
- }
- else {
- dprintf("s[%d] -> d[%d] (%d)\n", cbuf->start, 0,
- cbuf->maxlen - cbuf->start);
- dprintf("s[%d] -> d[%d] (%d)\n", 0, cbuf->maxlen - cbuf->start,
- n - cbuf->maxlen + cbuf->start);
- memcpy(c, cbuf->buf + cbuf->start , cbuf->maxlen - cbuf->start);
- memcpy(c + cbuf->maxlen - cbuf->start, cbuf->buf,
- n - cbuf->maxlen + cbuf->start);
- }
- }
- return n;
-}
-
-/* convert to buffer */
-
-int
-cirbuf_get_buf_tail(struct cirbuf *cbuf, char *c, unsigned int size)
-{
- unsigned int n;
-
- if (!cbuf || !c)
- return -EINVAL;
-
- n = (size < CIRBUF_GET_LEN(cbuf)) ? size : CIRBUF_GET_LEN(cbuf);
-
- if (!n)
- return 0;
-
- if (cbuf->start <= cbuf->end) {
- dprintf("s[%d] -> d[%d] (%d)\n", cbuf->end - n + 1, 0, n);
- memcpy(c, cbuf->buf + cbuf->end - n + 1, n);
- }
- else {
- /* check if we need to go from end to the beginning */
- if (n <= cbuf->end + 1) {
- dprintf("s[%d] -> d[%d] (%d)\n", 0, cbuf->end - n + 1, n);
- memcpy(c, cbuf->buf + cbuf->end - n + 1, n);
- }
- else {
- dprintf("s[%d] -> d[%d] (%d)\n", 0,
- cbuf->maxlen - cbuf->start, cbuf->end + 1);
- dprintf("s[%d] -> d[%d] (%d)\n",
- cbuf->maxlen - n + cbuf->end + 1, 0, n - cbuf->end - 1);
- memcpy(c + cbuf->maxlen - cbuf->start,
- cbuf->buf, cbuf->end + 1);
- memcpy(c, cbuf->buf + cbuf->maxlen - n + cbuf->end +1,
- n - cbuf->end - 1);
- }
- }
- return n;
-}
-
-/* get head or get tail */
-
-char
-cirbuf_get_head(struct cirbuf * cbuf)
-{
- return cbuf->buf[cbuf->start];
-}
-
-/* get head or get tail */
-
-char
-cirbuf_get_tail(struct cirbuf * cbuf)
-{
- return cbuf->buf[cbuf->end];
-}
-
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_cirbuf.h b/src/dpdk_lib18/librte_cmdline/cmdline_cirbuf.h
deleted file mode 100755
index 6321dec5..00000000
--- a/src/dpdk_lib18/librte_cmdline/cmdline_cirbuf.h
+++ /dev/null
@@ -1,245 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
- * All rights reserved.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the University of California, Berkeley nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _CIRBUF_H_
-#define _CIRBUF_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
- * This structure is the header of a cirbuf type.
- */
-struct cirbuf {
- unsigned int maxlen; /**< total len of the fifo (number of elements) */
- unsigned int start; /**< indice of the first elt */
- unsigned int end; /**< indice of the last elt */
- unsigned int len; /**< current len of fifo */
- char *buf;
-};
-
-#ifdef RTE_LIBRTE_CMDLINE_DEBUG
-#define dprintf_(fmt, ...) printf("line %3.3d - " fmt "%.0s", __LINE__, __VA_ARGS__)
-#define dprintf(...) dprintf_(__VA_ARGS__, "dummy")
-#else
-#define dprintf(...) (void)0
-#endif
-
-
-/**
- * Init the circular buffer
- */
-int cirbuf_init(struct cirbuf *cbuf, char *buf, unsigned int start, unsigned int maxlen);
-
-
-/**
- * Return 1 if the circular buffer is full
- */
-#define CIRBUF_IS_FULL(cirbuf) ((cirbuf)->maxlen == (cirbuf)->len)
-
-/**
- * Return 1 if the circular buffer is empty
- */
-#define CIRBUF_IS_EMPTY(cirbuf) ((cirbuf)->len == 0)
-
-/**
- * return current size of the circular buffer (number of used elements)
- */
-#define CIRBUF_GET_LEN(cirbuf) ((cirbuf)->len)
-
-/**
- * return size of the circular buffer (used + free elements)
- */
-#define CIRBUF_GET_MAXLEN(cirbuf) ((cirbuf)->maxlen)
-
-/**
- * return the number of free elts
- */
-#define CIRBUF_GET_FREELEN(cirbuf) ((cirbuf)->maxlen - (cirbuf)->len)
-
-/**
- * Iterator for a circular buffer
- * c: struct cirbuf pointer
- * i: an integer type internally used in the macro
- * e: char that takes the value for each iteration
- */
-#define CIRBUF_FOREACH(c, i, e) \
- for ( i=0, e=(c)->buf[(c)->start] ; \
- i<((c)->len) ; \
- i ++, e=(c)->buf[((c)->start+i)%((c)->maxlen)])
-
-
-/**
- * Add a character at head of the circular buffer. Return 0 on success, or
- * a negative value on error.
- */
-int cirbuf_add_head_safe(struct cirbuf *cbuf, char c);
-
-/**
- * Add a character at head of the circular buffer. You _must_ check that you
- * have enough free space in the buffer before calling this func.
- */
-void cirbuf_add_head(struct cirbuf *cbuf, char c);
-
-/**
- * Add a character at tail of the circular buffer. Return 0 on success, or
- * a negative value on error.
- */
-int cirbuf_add_tail_safe(struct cirbuf *cbuf, char c);
-
-/**
- * Add a character at tail of the circular buffer. You _must_ check that you
- * have enough free space in the buffer before calling this func.
- */
-void cirbuf_add_tail(struct cirbuf *cbuf, char c);
-
-/**
- * Remove a char at the head of the circular buffer. Return 0 on
- * success, or a negative value on error.
- */
-int cirbuf_del_head_safe(struct cirbuf *cbuf);
-
-/**
- * Remove a char at the head of the circular buffer. You _must_ check
- * that buffer is not empty before calling the function.
- */
-void cirbuf_del_head(struct cirbuf *cbuf);
-
-/**
- * Remove a char at the tail of the circular buffer. Return 0 on
- * success, or a negative value on error.
- */
-int cirbuf_del_tail_safe(struct cirbuf *cbuf);
-
-/**
- * Remove a char at the tail of the circular buffer. You _must_ check
- * that buffer is not empty before calling the function.
- */
-void cirbuf_del_tail(struct cirbuf *cbuf);
-
-/**
- * Return the head of the circular buffer. You _must_ check that
- * buffer is not empty before calling the function.
- */
-char cirbuf_get_head(struct cirbuf *cbuf);
-
-/**
- * Return the tail of the circular buffer. You _must_ check that
- * buffer is not empty before calling the function.
- */
-char cirbuf_get_tail(struct cirbuf *cbuf);
-
-/**
- * Add a buffer at head of the circular buffer. 'c' is a pointer to a
- * buffer, and n is the number of char to add. Return the number of
- * copied bytes on success, or a negative value on error.
- */
-int cirbuf_add_buf_head(struct cirbuf *cbuf, const char *c, unsigned int n);
-
-/**
- * Add a buffer at tail of the circular buffer. 'c' is a pointer to a
- * buffer, and n is the number of char to add. Return the number of
- * copied bytes on success, or a negative value on error.
- */
-int cirbuf_add_buf_tail(struct cirbuf *cbuf, const char *c, unsigned int n);
-
-/**
- * Remove chars at the head of the circular buffer. Return 0 on
- * success, or a negative value on error.
- */
-int cirbuf_del_buf_head(struct cirbuf *cbuf, unsigned int size);
-
-/**
- * Remove chars at the tail of the circular buffer. Return 0 on
- * success, or a negative value on error.
- */
-int cirbuf_del_buf_tail(struct cirbuf *cbuf, unsigned int size);
-
-/**
- * Copy a maximum of 'size' characters from the head of the circular
- * buffer to a flat one pointed by 'c'. Return the number of copied
- * chars.
- */
-int cirbuf_get_buf_head(struct cirbuf *cbuf, char *c, unsigned int size);
-
-/**
- * Copy a maximum of 'size' characters from the tail of the circular
- * buffer to a flat one pointed by 'c'. Return the number of copied
- * chars.
- */
-int cirbuf_get_buf_tail(struct cirbuf *cbuf, char *c, unsigned int size);
-
-
-/**
- * Set the start of the data to the index 0 of the internal buffer.
- */
-int cirbuf_align_left(struct cirbuf *cbuf);
-
-/**
- * Set the end of the data to the last index of the internal buffer.
- */
-int cirbuf_align_right(struct cirbuf *cbuf);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _CIRBUF_H_ */
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_parse.c b/src/dpdk_lib18/librte_cmdline/cmdline_parse.c
deleted file mode 100755
index dfc885cf..00000000
--- a/src/dpdk_lib18/librte_cmdline/cmdline_parse.c
+++ /dev/null
@@ -1,564 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
- * All rights reserved.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the University of California, Berkeley nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdio.h>
-#include <stdarg.h>
-#include <errno.h>
-#include <string.h>
-#include <inttypes.h>
-#include <ctype.h>
-#include <termios.h>
-
-#include <netinet/in.h>
-
-#include <rte_string_fns.h>
-
-#include "cmdline_rdline.h"
-#include "cmdline_parse.h"
-#include "cmdline.h"
-
-#ifdef RTE_LIBRTE_CMDLINE_DEBUG
-#define debug_printf printf
-#else
-#define debug_printf(args...) do {} while(0)
-#endif
-
-#define CMDLINE_BUFFER_SIZE 64
-
-/* isblank() needs _XOPEN_SOURCE >= 600 || _ISOC99_SOURCE, so use our
- * own. */
-static int
-isblank2(char c)
-{
- if (c == ' ' ||
- c == '\t' )
- return 1;
- return 0;
-}
-
-static int
-isendofline(char c)
-{
- if (c == '\n' ||
- c == '\r' )
- return 1;
- return 0;
-}
-
-static int
-iscomment(char c)
-{
- if (c == '#')
- return 1;
- return 0;
-}
-
-int
-cmdline_isendoftoken(char c)
-{
- if (!c || iscomment(c) || isblank2(c) || isendofline(c))
- return 1;
- return 0;
-}
-
-static unsigned int
-nb_common_chars(const char * s1, const char * s2)
-{
- unsigned int i=0;
-
- while (*s1==*s2 && *s1) {
- s1++;
- s2++;
- i++;
- }
- return i;
-}
-
-/**
- * try to match the buffer with an instruction (only the first
- * nb_match_token tokens if != 0). Return 0 if we match all the
- * tokens, else the number of matched tokens, else -1.
- */
-static int
-match_inst(cmdline_parse_inst_t *inst, const char *buf,
- unsigned int nb_match_token, void *resbuf, unsigned resbuf_size)
-{
- unsigned int token_num=0;
- cmdline_parse_token_hdr_t * token_p;
- unsigned int i=0;
- int n = 0;
- struct cmdline_token_hdr token_hdr;
-
- token_p = inst->tokens[token_num];
- if (token_p)
- memcpy(&token_hdr, token_p, sizeof(token_hdr));
-
- /* check if we match all tokens of inst */
- while (token_p && (!nb_match_token || i<nb_match_token)) {
- debug_printf("TK\n");
- /* skip spaces */
- while (isblank2(*buf)) {
- buf++;
- }
-
- /* end of buf */
- if ( isendofline(*buf) || iscomment(*buf) )
- break;
-
- if (resbuf == NULL) {
- n = token_hdr.ops->parse(token_p, buf, NULL, 0);
- } else {
- unsigned rb_sz;
-
- if (token_hdr.offset > resbuf_size) {
- printf("Parse error(%s:%d): Token offset(%u) "
- "exceeds maximum size(%u)\n",
- __FILE__, __LINE__,
- token_hdr.offset, resbuf_size);
- return -ENOBUFS;
- }
- rb_sz = resbuf_size - token_hdr.offset;
-
- n = token_hdr.ops->parse(token_p, buf, (char *)resbuf +
- token_hdr.offset, rb_sz);
- }
-
- if (n < 0)
- break;
-
- debug_printf("TK parsed (len=%d)\n", n);
- i++;
- buf += n;
-
- token_num ++;
- token_p = inst->tokens[token_num];
- if (token_p)
- memcpy(&token_hdr, token_p, sizeof(token_hdr));
- }
-
- /* does not match */
- if (i==0)
- return -1;
-
- /* in case we want to match a specific num of token */
- if (nb_match_token) {
- if (i == nb_match_token) {
- return 0;
- }
- return i;
- }
-
- /* we don't match all the tokens */
- if (token_p) {
- return i;
- }
-
- /* are there are some tokens more */
- while (isblank2(*buf)) {
- buf++;
- }
-
- /* end of buf */
- if ( isendofline(*buf) || iscomment(*buf) )
- return 0;
-
- /* garbage after inst */
- return i;
-}
-
-
-int
-cmdline_parse(struct cmdline *cl, const char * buf)
-{
- unsigned int inst_num=0;
- cmdline_parse_inst_t *inst;
- const char *curbuf;
- char result_buf[CMDLINE_PARSE_RESULT_BUFSIZE];
- void (*f)(void *, struct cmdline *, void *) = NULL;
- void *data = NULL;
- int comment = 0;
- int linelen = 0;
- int parse_it = 0;
- int err = CMDLINE_PARSE_NOMATCH;
- int tok;
- cmdline_parse_ctx_t *ctx;
-#ifdef RTE_LIBRTE_CMDLINE_DEBUG
- char debug_buf[BUFSIZ];
-#endif
-
- if (!cl || !buf)
- return CMDLINE_PARSE_BAD_ARGS;
-
- ctx = cl->ctx;
-
- /*
- * - look if the buffer contains at least one line
- * - look if line contains only spaces or comments
- * - count line length
- */
- curbuf = buf;
- while (! isendofline(*curbuf)) {
- if ( *curbuf == '\0' ) {
- debug_printf("Incomplete buf (len=%d)\n", linelen);
- return 0;
- }
- if ( iscomment(*curbuf) ) {
- comment = 1;
- }
- if ( ! isblank2(*curbuf) && ! comment) {
- parse_it = 1;
- }
- curbuf++;
- linelen++;
- }
-
- /* skip all endofline chars */
- while (isendofline(buf[linelen])) {
- linelen++;
- }
-
- /* empty line */
- if ( parse_it == 0 ) {
- debug_printf("Empty line (len=%d)\n", linelen);
- return linelen;
- }
-
-#ifdef RTE_LIBRTE_CMDLINE_DEBUG
- snprintf(debug_buf, (linelen>64 ? 64 : linelen), "%s", buf);
- debug_printf("Parse line : len=%d, <%s>\n", linelen, debug_buf);
-#endif
-
- /* parse it !! */
- inst = ctx[inst_num];
- while (inst) {
- debug_printf("INST %d\n", inst_num);
-
- /* fully parsed */
- tok = match_inst(inst, buf, 0, result_buf, sizeof(result_buf));
-
- if (tok > 0) /* we matched at least one token */
- err = CMDLINE_PARSE_BAD_ARGS;
-
- else if (!tok) {
- debug_printf("INST fully parsed\n");
- /* skip spaces */
- while (isblank2(*curbuf)) {
- curbuf++;
- }
-
- /* if end of buf -> there is no garbage after inst */
- if (isendofline(*curbuf) || iscomment(*curbuf)) {
- if (!f) {
- memcpy(&f, &inst->f, sizeof(f));
- memcpy(&data, &inst->data, sizeof(data));
- }
- else {
- /* more than 1 inst matches */
- err = CMDLINE_PARSE_AMBIGUOUS;
- f=NULL;
- debug_printf("Ambiguous cmd\n");
- break;
- }
- }
- }
-
- inst_num ++;
- inst = ctx[inst_num];
- }
-
- /* call func */
- if (f) {
- f(result_buf, cl, data);
- }
-
- /* no match */
- else {
- debug_printf("No match err=%d\n", err);
- return err;
- }
-
- return linelen;
-}
-
-int
-cmdline_complete(struct cmdline *cl, const char *buf, int *state,
- char *dst, unsigned int size)
-{
- const char *partial_tok = buf;
- unsigned int inst_num = 0;
- cmdline_parse_inst_t *inst;
- cmdline_parse_token_hdr_t *token_p;
- struct cmdline_token_hdr token_hdr;
- char tmpbuf[CMDLINE_BUFFER_SIZE], comp_buf[CMDLINE_BUFFER_SIZE];
- unsigned int partial_tok_len;
- int comp_len = -1;
- int tmp_len = -1;
- int nb_token = 0;
- unsigned int i, n;
- int l;
- unsigned int nb_completable;
- unsigned int nb_non_completable;
- int local_state = 0;
- const char *help_str;
- cmdline_parse_ctx_t *ctx;
-
- if (!cl || !buf || !state || !dst)
- return -1;
-
- ctx = cl->ctx;
-
- debug_printf("%s called\n", __func__);
- memset(&token_hdr, 0, sizeof(token_hdr));
-
- /* count the number of complete token to parse */
- for (i=0 ; buf[i] ; i++) {
- if (!isblank2(buf[i]) && isblank2(buf[i+1]))
- nb_token++;
- if (isblank2(buf[i]) && !isblank2(buf[i+1]))
- partial_tok = buf+i+1;
- }
- partial_tok_len = strnlen(partial_tok, RDLINE_BUF_SIZE);
-
- /* first call -> do a first pass */
- if (*state <= 0) {
- debug_printf("try complete <%s>\n", buf);
- debug_printf("there is %d complete tokens, <%s> is incomplete\n",
- nb_token, partial_tok);
-
- nb_completable = 0;
- nb_non_completable = 0;
-
- inst = ctx[inst_num];
- while (inst) {
- /* parse the first tokens of the inst */
- if (nb_token && match_inst(inst, buf, nb_token, NULL, 0))
- goto next;
-
- debug_printf("instruction match\n");
- token_p = inst->tokens[nb_token];
- if (token_p)
- memcpy(&token_hdr, token_p, sizeof(token_hdr));
-
- /* non completable */
- if (!token_p ||
- !token_hdr.ops->complete_get_nb ||
- !token_hdr.ops->complete_get_elt ||
- (n = token_hdr.ops->complete_get_nb(token_p)) == 0) {
- nb_non_completable++;
- goto next;
- }
-
- debug_printf("%d choices for this token\n", n);
- for (i=0 ; i<n ; i++) {
- if (token_hdr.ops->complete_get_elt(token_p, i,
- tmpbuf,
- sizeof(tmpbuf)) < 0)
- continue;
-
- /* we have at least room for one char */
- tmp_len = strnlen(tmpbuf, sizeof(tmpbuf));
- if (tmp_len < CMDLINE_BUFFER_SIZE - 1) {
- tmpbuf[tmp_len] = ' ';
- tmpbuf[tmp_len+1] = 0;
- }
-
- debug_printf(" choice <%s>\n", tmpbuf);
-
- /* does the completion match the
- * beginning of the word ? */
- if (!strncmp(partial_tok, tmpbuf,
- partial_tok_len)) {
- if (comp_len == -1) {
- snprintf(comp_buf, sizeof(comp_buf),
- "%s", tmpbuf + partial_tok_len);
- comp_len =
- strnlen(tmpbuf + partial_tok_len,
- sizeof(tmpbuf) - partial_tok_len);
-
- }
- else {
- comp_len =
- nb_common_chars(comp_buf,
- tmpbuf+partial_tok_len);
- comp_buf[comp_len] = 0;
- }
- nb_completable++;
- }
- }
- next:
- debug_printf("next\n");
- inst_num ++;
- inst = ctx[inst_num];
- }
-
- debug_printf("total choices %d for this completion\n",
- nb_completable);
-
- /* no possible completion */
- if (nb_completable == 0 && nb_non_completable == 0)
- return 0;
-
- /* if multichoice is not required */
- if (*state == 0 && partial_tok_len > 0) {
- /* one or several choices starting with the
- same chars */
- if (comp_len > 0) {
- if ((unsigned)(comp_len + 1) > size)
- return 0;
-
- snprintf(dst, size, "%s", comp_buf);
- dst[comp_len] = 0;
- return 2;
- }
- }
- }
-
- /* init state correctly */
- if (*state == -1)
- *state = 0;
-
- debug_printf("Multiple choice STATE=%d\n", *state);
-
- inst_num = 0;
- inst = ctx[inst_num];
- while (inst) {
- /* we need to redo it */
- inst = ctx[inst_num];
-
- if (nb_token && match_inst(inst, buf, nb_token, NULL, 0))
- goto next2;
-
- token_p = inst->tokens[nb_token];
- if (token_p)
- memcpy(&token_hdr, token_p, sizeof(token_hdr));
-
- /* one choice for this token */
- if (!token_p ||
- !token_hdr.ops->complete_get_nb ||
- !token_hdr.ops->complete_get_elt ||
- (n = token_hdr.ops->complete_get_nb(token_p)) == 0) {
- if (local_state < *state) {
- local_state++;
- goto next2;
- }
- (*state)++;
- if (token_p && token_hdr.ops->get_help) {
- token_hdr.ops->get_help(token_p, tmpbuf,
- sizeof(tmpbuf));
- help_str = inst->help_str;
- if (help_str)
- snprintf(dst, size, "[%s]: %s", tmpbuf,
- help_str);
- else
- snprintf(dst, size, "[%s]: No help",
- tmpbuf);
- }
- else {
- snprintf(dst, size, "[RETURN]");
- }
- return 1;
- }
-
- /* several choices */
- for (i=0 ; i<n ; i++) {
- if (token_hdr.ops->complete_get_elt(token_p, i, tmpbuf,
- sizeof(tmpbuf)) < 0)
- continue;
- /* we have at least room for one char */
- tmp_len = strnlen(tmpbuf, sizeof(tmpbuf));
- if (tmp_len < CMDLINE_BUFFER_SIZE - 1) {
- tmpbuf[tmp_len] = ' ';
- tmpbuf[tmp_len + 1] = 0;
- }
-
- debug_printf(" choice <%s>\n", tmpbuf);
-
- /* does the completion match the beginning of
- * the word ? */
- if (!strncmp(partial_tok, tmpbuf,
- partial_tok_len)) {
- if (local_state < *state) {
- local_state++;
- continue;
- }
- (*state)++;
- l=snprintf(dst, size, "%s", tmpbuf);
- if (l>=0 && token_hdr.ops->get_help) {
- token_hdr.ops->get_help(token_p, tmpbuf,
- sizeof(tmpbuf));
- help_str = inst->help_str;
- if (help_str)
- snprintf(dst+l, size-l, "[%s]: %s",
- tmpbuf, help_str);
- else
- snprintf(dst+l, size-l,
- "[%s]: No help", tmpbuf);
- }
-
- return 1;
- }
- }
- next2:
- inst_num ++;
- inst = ctx[inst_num];
- }
- return 0;
-}
-
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_parse.h b/src/dpdk_lib18/librte_cmdline/cmdline_parse.h
deleted file mode 100755
index 4b25c456..00000000
--- a/src/dpdk_lib18/librte_cmdline/cmdline_parse.h
+++ /dev/null
@@ -1,191 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
- * All rights reserved.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the University of California, Berkeley nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _CMDLINE_PARSE_H_
-#define _CMDLINE_PARSE_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifndef offsetof
-#define offsetof(type, field) ((size_t) &( ((type *)0)->field) )
-#endif
-
-/* return status for parsing */
-#define CMDLINE_PARSE_SUCCESS 0
-#define CMDLINE_PARSE_AMBIGUOUS -1
-#define CMDLINE_PARSE_NOMATCH -2
-#define CMDLINE_PARSE_BAD_ARGS -3
-
-/* return status for completion */
-#define CMDLINE_PARSE_COMPLETE_FINISHED 0
-#define CMDLINE_PARSE_COMPLETE_AGAIN 1
-#define CMDLINE_PARSE_COMPLETED_BUFFER 2
-
-/* maximum buffer size for parsed result */
-#define CMDLINE_PARSE_RESULT_BUFSIZE 8192
-
-/**
- * Stores a pointer to the ops struct, and the offset: the place to
- * write the parsed result in the destination structure.
- */
-struct cmdline_token_hdr {
- struct cmdline_token_ops *ops;
- unsigned int offset;
-};
-typedef struct cmdline_token_hdr cmdline_parse_token_hdr_t;
-
-/**
- * A token is defined by this structure.
- *
- * parse() takes the token as first argument, then the source buffer
- * starting at the token we want to parse. The 3rd arg is a pointer
- * where we store the parsed data (as binary). It returns the number of
- * parsed chars on success and a negative value on error.
- *
- * complete_get_nb() returns the number of possible values for this
- * token if completion is possible. If it is NULL or if it returns 0,
- * no completion is possible.
- *
- * complete_get_elt() copy in dstbuf (the size is specified in the
- * parameter) the i-th possible completion for this token. returns 0
- * on success or and a negative value on error.
- *
- * get_help() fills the dstbuf with the help for the token. It returns
- * -1 on error and 0 on success.
- */
-struct cmdline_token_ops {
- /** parse(token ptr, buf, res pts, buf len) */
- int (*parse)(cmdline_parse_token_hdr_t *, const char *, void *,
- unsigned int);
- /** return the num of possible choices for this token */
- int (*complete_get_nb)(cmdline_parse_token_hdr_t *);
- /** return the elt x for this token (token, idx, dstbuf, size) */
- int (*complete_get_elt)(cmdline_parse_token_hdr_t *, int, char *,
- unsigned int);
- /** get help for this token (token, dstbuf, size) */
- int (*get_help)(cmdline_parse_token_hdr_t *, char *, unsigned int);
-};
-
-struct cmdline;
-/**
- * Store a instruction, which is a pointer to a callback function and
- * its parameter that is called when the instruction is parsed, a help
- * string, and a list of token composing this instruction.
- */
-struct cmdline_inst {
- /* f(parsed_struct, data) */
- void (*f)(void *, struct cmdline *, void *);
- void *data;
- const char *help_str;
- cmdline_parse_token_hdr_t *tokens[];
-};
-typedef struct cmdline_inst cmdline_parse_inst_t;
-
-/**
- * A context is identified by its name, and contains a list of
- * instruction
- *
- */
-typedef cmdline_parse_inst_t *cmdline_parse_ctx_t;
-
-/**
- * Try to parse a buffer according to the specified context. The
- * argument buf must ends with "\n\0". The function returns
- * CMDLINE_PARSE_AMBIGUOUS, CMDLINE_PARSE_NOMATCH or
- * CMDLINE_PARSE_BAD_ARGS on error. Else it calls the associated
- * function (defined in the context) and returns 0
- * (CMDLINE_PARSE_SUCCESS).
- */
-int cmdline_parse(struct cmdline *cl, const char *buf);
-
-/**
- * complete() must be called with *state==0 (try to complete) or
- * with *state==-1 (just display choices), then called without
- * modifying *state until it returns CMDLINE_PARSE_COMPLETED_BUFFER or
- * CMDLINE_PARSE_COMPLETED_BUFFER.
- *
- * It returns < 0 on error.
- *
- * Else it returns:
- * - CMDLINE_PARSE_COMPLETED_BUFFER on completion (one possible
- * choice). In this case, the chars are appended in dst buffer.
- * - CMDLINE_PARSE_COMPLETE_AGAIN if there is several possible
- * choices. In this case, you must call the function again,
- * keeping the value of state intact.
- * - CMDLINE_PARSE_COMPLETED_BUFFER when the iteration is
- * finished. The dst is not valid for this last call.
- *
- * The returned dst buf ends with \0.
- */
-int cmdline_complete(struct cmdline *cl, const char *buf, int *state,
- char *dst, unsigned int size);
-
-
-/* return true if(!c || iscomment(c) || isblank(c) ||
- * isendofline(c)) */
-int cmdline_isendoftoken(char c);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _CMDLINE_PARSE_H_ */
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_parse_etheraddr.c b/src/dpdk_lib18/librte_cmdline/cmdline_parse_etheraddr.c
deleted file mode 100755
index 64ae86c7..00000000
--- a/src/dpdk_lib18/librte_cmdline/cmdline_parse_etheraddr.c
+++ /dev/null
@@ -1,180 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
- * All rights reserved.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the University of California, Berkeley nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdarg.h>
-#include <errno.h>
-#include <inttypes.h>
-#include <ctype.h>
-#include <string.h>
-#include <errno.h>
-#include <sys/types.h>
-#include <net/ethernet.h>
-
-#include <rte_string_fns.h>
-
-#include "cmdline_parse.h"
-#include "cmdline_parse_etheraddr.h"
-
-struct cmdline_token_ops cmdline_token_etheraddr_ops = {
- .parse = cmdline_parse_etheraddr,
- .complete_get_nb = NULL,
- .complete_get_elt = NULL,
- .get_help = cmdline_get_help_etheraddr,
-};
-
-/* the format can be either XX:XX:XX:XX:XX:XX or XXXX:XXXX:XXXX */
-#define ETHER_ADDRSTRLENLONG 18
-#define ETHER_ADDRSTRLENSHORT 15
-
-#ifdef __linux__
-#define ea_oct ether_addr_octet
-#else
-#define ea_oct octet
-#endif
-
-
-static struct ether_addr *
-my_ether_aton(const char *a)
-{
- int i;
- char *end;
- unsigned long o[ETHER_ADDR_LEN];
- static struct ether_addr ether_addr;
-
- i = 0;
- do {
- errno = 0;
- o[i] = strtoul(a, &end, 16);
- if (errno != 0 || end == a || (end[0] != ':' && end[0] != 0))
- return (NULL);
- a = end + 1;
- } while (++i != sizeof (o) / sizeof (o[0]) && end[0] != 0);
-
- /* Junk at the end of line */
- if (end[0] != 0)
- return (NULL);
-
- /* Support the format XX:XX:XX:XX:XX:XX */
- if (i == ETHER_ADDR_LEN) {
- while (i-- != 0) {
- if (o[i] > UINT8_MAX)
- return (NULL);
- ether_addr.ea_oct[i] = (uint8_t)o[i];
- }
- /* Support the format XXXX:XXXX:XXXX */
- } else if (i == ETHER_ADDR_LEN / 2) {
- while (i-- != 0) {
- if (o[i] > UINT16_MAX)
- return (NULL);
- ether_addr.ea_oct[i * 2] = (uint8_t)(o[i] >> 8);
- ether_addr.ea_oct[i * 2 + 1] = (uint8_t)(o[i] & 0xff);
- }
- /* unknown format */
- } else
- return (NULL);
-
- return (struct ether_addr *)&ether_addr;
-}
-
-int
-cmdline_parse_etheraddr(__attribute__((unused)) cmdline_parse_token_hdr_t *tk,
- const char *buf, void *res, unsigned ressize)
-{
- unsigned int token_len = 0;
- char ether_str[ETHER_ADDRSTRLENLONG+1];
- struct ether_addr *tmp;
-
- if (res && ressize < sizeof(struct ether_addr))
- return -1;
-
- if (!buf || ! *buf)
- return -1;
-
- while (!cmdline_isendoftoken(buf[token_len]))
- token_len++;
-
- /* if token doesn't match possible string lengths... */
- if ((token_len != ETHER_ADDRSTRLENLONG - 1) &&
- (token_len != ETHER_ADDRSTRLENSHORT - 1))
- return -1;
-
- snprintf(ether_str, token_len+1, "%s", buf);
-
- tmp = my_ether_aton(ether_str);
- if (tmp == NULL)
- return -1;
- if (res)
- memcpy(res, tmp, sizeof(struct ether_addr));
- return token_len;
-}
-
-int
-cmdline_get_help_etheraddr(__attribute__((unused)) cmdline_parse_token_hdr_t *tk,
- char *dstbuf, unsigned int size)
-{
- int ret;
-
- ret = snprintf(dstbuf, size, "Ethernet address");
- if (ret < 0)
- return -1;
- return 0;
-}
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_parse_etheraddr.h b/src/dpdk_lib18/librte_cmdline/cmdline_parse_etheraddr.h
deleted file mode 100755
index 0085bb3b..00000000
--- a/src/dpdk_lib18/librte_cmdline/cmdline_parse_etheraddr.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
- * All rights reserved.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the University of California, Berkeley nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _PARSE_ETHERADDR_H_
-#define _PARSE_ETHERADDR_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct cmdline_token_etheraddr {
- struct cmdline_token_hdr hdr;
-};
-typedef struct cmdline_token_etheraddr cmdline_parse_token_etheraddr_t;
-
-extern struct cmdline_token_ops cmdline_token_etheraddr_ops;
-
-int cmdline_parse_etheraddr(cmdline_parse_token_hdr_t *tk, const char *srcbuf,
- void *res, unsigned ressize);
-int cmdline_get_help_etheraddr(cmdline_parse_token_hdr_t *tk, char *dstbuf,
- unsigned int size);
-
-#define TOKEN_ETHERADDR_INITIALIZER(structure, field) \
-{ \
- /* hdr */ \
- { \
- &cmdline_token_etheraddr_ops, /* ops */ \
- offsetof(structure, field), /* offset */ \
- }, \
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-
-#endif /* _PARSE_ETHERADDR_H_ */
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_parse_ipaddr.c b/src/dpdk_lib18/librte_cmdline/cmdline_parse_ipaddr.c
deleted file mode 100755
index 7f335994..00000000
--- a/src/dpdk_lib18/librte_cmdline/cmdline_parse_ipaddr.c
+++ /dev/null
@@ -1,408 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
- * All rights reserved.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the University of California, Berkeley nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * For inet_ntop() functions:
- *
- * Copyright (c) 1996 by Internet Software Consortium.
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS
- * ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE
- * CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
- * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
- * SOFTWARE.
- */
-
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdarg.h>
-#include <inttypes.h>
-#include <ctype.h>
-#include <string.h>
-#include <errno.h>
-#include <netinet/in.h>
-#ifndef __linux__
-#ifndef __FreeBSD__
-#include <net/socket.h>
-#else
-#include <sys/socket.h>
-#endif
-#endif
-
-#include <rte_string_fns.h>
-
-#include "cmdline_parse.h"
-#include "cmdline_parse_ipaddr.h"
-
-struct cmdline_token_ops cmdline_token_ipaddr_ops = {
- .parse = cmdline_parse_ipaddr,
- .complete_get_nb = NULL,
- .complete_get_elt = NULL,
- .get_help = cmdline_get_help_ipaddr,
-};
-
-#define INADDRSZ 4
-#define IN6ADDRSZ 16
-#define PREFIXMAX 128
-#define V4PREFIXMAX 32
-
-/*
- * WARNING: Don't even consider trying to compile this on a system where
- * sizeof(int) < 4. sizeof(int) > 4 is fine; all the world's not a VAX.
- */
-
-static int inet_pton4(const char *src, unsigned char *dst);
-static int inet_pton6(const char *src, unsigned char *dst);
-
-/* int
- * inet_pton(af, src, dst)
- * convert from presentation format (which usually means ASCII printable)
- * to network format (which is usually some kind of binary format).
- * return:
- * 1 if the address was valid for the specified address family
- * 0 if the address wasn't valid (`dst' is untouched in this case)
- * -1 if some other error occurred (`dst' is untouched in this case, too)
- * author:
- * Paul Vixie, 1996.
- */
-static int
-my_inet_pton(int af, const char *src, void *dst)
-{
- switch (af) {
- case AF_INET:
- return (inet_pton4(src, dst));
- case AF_INET6:
- return (inet_pton6(src, dst));
- default:
- errno = EAFNOSUPPORT;
- return (-1);
- }
- /* NOTREACHED */
-}
-
-/* int
- * inet_pton4(src, dst)
- * like inet_aton() but without all the hexadecimal and shorthand.
- * return:
- * 1 if `src' is a valid dotted quad, else 0.
- * notice:
- * does not touch `dst' unless it's returning 1.
- * author:
- * Paul Vixie, 1996.
- */
-static int
-inet_pton4(const char *src, unsigned char *dst)
-{
- static const char digits[] = "0123456789";
- int saw_digit, octets, ch;
- unsigned char tmp[INADDRSZ], *tp;
-
- saw_digit = 0;
- octets = 0;
- *(tp = tmp) = 0;
- while ((ch = *src++) != '\0') {
- const char *pch;
-
- if ((pch = strchr(digits, ch)) != NULL) {
- unsigned int new = *tp * 10 + (pch - digits);
-
- if (new > 255)
- return (0);
- if (! saw_digit) {
- if (++octets > 4)
- return (0);
- saw_digit = 1;
- }
- *tp = (unsigned char)new;
- } else if (ch == '.' && saw_digit) {
- if (octets == 4)
- return (0);
- *++tp = 0;
- saw_digit = 0;
- } else
- return (0);
- }
- if (octets < 4)
- return (0);
-
- memcpy(dst, tmp, INADDRSZ);
- return (1);
-}
-
-/* int
- * inet_pton6(src, dst)
- * convert presentation level address to network order binary form.
- * return:
- * 1 if `src' is a valid [RFC1884 2.2] address, else 0.
- * notice:
- * (1) does not touch `dst' unless it's returning 1.
- * (2) :: in a full address is silently ignored.
- * credit:
- * inspired by Mark Andrews.
- * author:
- * Paul Vixie, 1996.
- */
-static int
-inet_pton6(const char *src, unsigned char *dst)
-{
- static const char xdigits_l[] = "0123456789abcdef",
- xdigits_u[] = "0123456789ABCDEF";
- unsigned char tmp[IN6ADDRSZ], *tp = 0, *endp = 0, *colonp = 0;
- const char *xdigits = 0, *curtok = 0;
- int ch = 0, saw_xdigit = 0, count_xdigit = 0;
- unsigned int val = 0;
- unsigned dbloct_count = 0;
-
- memset((tp = tmp), '\0', IN6ADDRSZ);
- endp = tp + IN6ADDRSZ;
- colonp = NULL;
- /* Leading :: requires some special handling. */
- if (*src == ':')
- if (*++src != ':')
- return (0);
- curtok = src;
- saw_xdigit = count_xdigit = 0;
- val = 0;
-
- while ((ch = *src++) != '\0') {
- const char *pch;
-
- if ((pch = strchr((xdigits = xdigits_l), ch)) == NULL)
- pch = strchr((xdigits = xdigits_u), ch);
- if (pch != NULL) {
- if (count_xdigit >= 4)
- return (0);
- val <<= 4;
- val |= (pch - xdigits);
- if (val > 0xffff)
- return (0);
- saw_xdigit = 1;
- count_xdigit++;
- continue;
- }
- if (ch == ':') {
- curtok = src;
- if (!saw_xdigit) {
- if (colonp)
- return (0);
- colonp = tp;
- continue;
- } else if (*src == '\0') {
- return (0);
- }
- if (tp + sizeof(int16_t) > endp)
- return (0);
- *tp++ = (unsigned char) ((val >> 8) & 0xff);
- *tp++ = (unsigned char) (val & 0xff);
- saw_xdigit = 0;
- count_xdigit = 0;
- val = 0;
- dbloct_count++;
- continue;
- }
- if (ch == '.' && ((tp + INADDRSZ) <= endp) &&
- inet_pton4(curtok, tp) > 0) {
- tp += INADDRSZ;
- saw_xdigit = 0;
- dbloct_count += 2;
- break; /* '\0' was seen by inet_pton4(). */
- }
- return (0);
- }
- if (saw_xdigit) {
- if (tp + sizeof(int16_t) > endp)
- return (0);
- *tp++ = (unsigned char) ((val >> 8) & 0xff);
- *tp++ = (unsigned char) (val & 0xff);
- dbloct_count++;
- }
- if (colonp != NULL) {
- /* if we already have 8 double octets, having a colon means error */
- if (dbloct_count == 8)
- return 0;
-
- /*
- * Since some memmove()'s erroneously fail to handle
- * overlapping regions, we'll do the shift by hand.
- */
- const int n = tp - colonp;
- int i;
-
- for (i = 1; i <= n; i++) {
- endp[- i] = colonp[n - i];
- colonp[n - i] = 0;
- }
- tp = endp;
- }
- if (tp != endp)
- return (0);
- memcpy(dst, tmp, IN6ADDRSZ);
- return (1);
-}
-
-int
-cmdline_parse_ipaddr(cmdline_parse_token_hdr_t *tk, const char *buf, void *res,
- unsigned ressize)
-{
- struct cmdline_token_ipaddr *tk2;
- unsigned int token_len = 0;
- char ip_str[INET6_ADDRSTRLEN+4+1]; /* '+4' is for prefixlen (if any) */
- cmdline_ipaddr_t ipaddr;
- char *prefix, *prefix_end;
- long prefixlen = 0;
-
- if (res && ressize < sizeof(cmdline_ipaddr_t))
- return -1;
-
- if (!buf || !tk || ! *buf)
- return -1;
-
- tk2 = (struct cmdline_token_ipaddr *)tk;
-
- while (!cmdline_isendoftoken(buf[token_len]))
- token_len++;
-
- /* if token is too big... */
- if (token_len >= INET6_ADDRSTRLEN+4)
- return -1;
-
- snprintf(ip_str, token_len+1, "%s", buf);
-
- /* convert the network prefix */
- if (tk2->ipaddr_data.flags & CMDLINE_IPADDR_NETWORK) {
- prefix = strrchr(ip_str, '/');
- if (prefix == NULL)
- return -1;
- *prefix = '\0';
- prefix ++;
- errno = 0;
- prefixlen = strtol(prefix, &prefix_end, 10);
- if (errno || (*prefix_end != '\0')
- || prefixlen < 0 || prefixlen > PREFIXMAX)
- return -1;
- ipaddr.prefixlen = prefixlen;
- }
- else {
- ipaddr.prefixlen = 0;
- }
-
- /* convert the IP addr */
- if ((tk2->ipaddr_data.flags & CMDLINE_IPADDR_V4) &&
- my_inet_pton(AF_INET, ip_str, &ipaddr.addr.ipv4) == 1 &&
- prefixlen <= V4PREFIXMAX) {
- ipaddr.family = AF_INET;
- if (res)
- memcpy(res, &ipaddr, sizeof(ipaddr));
- return token_len;
- }
- if ((tk2->ipaddr_data.flags & CMDLINE_IPADDR_V6) &&
- my_inet_pton(AF_INET6, ip_str, &ipaddr.addr.ipv6) == 1) {
- ipaddr.family = AF_INET6;
- if (res)
- memcpy(res, &ipaddr, sizeof(ipaddr));
- return token_len;
- }
- return -1;
-
-}
-
-int cmdline_get_help_ipaddr(cmdline_parse_token_hdr_t *tk, char *dstbuf,
- unsigned int size)
-{
- struct cmdline_token_ipaddr *tk2;
-
- if (!tk || !dstbuf)
- return -1;
-
- tk2 = (struct cmdline_token_ipaddr *)tk;
-
- switch (tk2->ipaddr_data.flags) {
- case CMDLINE_IPADDR_V4:
- snprintf(dstbuf, size, "IPv4");
- break;
- case CMDLINE_IPADDR_V6:
- snprintf(dstbuf, size, "IPv6");
- break;
- case CMDLINE_IPADDR_V4|CMDLINE_IPADDR_V6:
- snprintf(dstbuf, size, "IPv4/IPv6");
- break;
- case CMDLINE_IPADDR_NETWORK|CMDLINE_IPADDR_V4:
- snprintf(dstbuf, size, "IPv4 network");
- break;
- case CMDLINE_IPADDR_NETWORK|CMDLINE_IPADDR_V6:
- snprintf(dstbuf, size, "IPv6 network");
- break;
- case CMDLINE_IPADDR_NETWORK|CMDLINE_IPADDR_V4|CMDLINE_IPADDR_V6:
- snprintf(dstbuf, size, "IPv4/IPv6 network");
- break;
- default:
- snprintf(dstbuf, size, "IPaddr (bad flags)");
- break;
- }
- return 0;
-}
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_parse_ipaddr.h b/src/dpdk_lib18/librte_cmdline/cmdline_parse_ipaddr.h
deleted file mode 100755
index 296c374a..00000000
--- a/src/dpdk_lib18/librte_cmdline/cmdline_parse_ipaddr.h
+++ /dev/null
@@ -1,186 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
- * All rights reserved.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the University of California, Berkeley nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _PARSE_IPADDR_H_
-#define _PARSE_IPADDR_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define CMDLINE_IPADDR_V4 0x01
-#define CMDLINE_IPADDR_V6 0x02
-#define CMDLINE_IPADDR_NETWORK 0x04
-
-struct cmdline_ipaddr {
- uint8_t family;
- union {
- struct in_addr ipv4;
- struct in6_addr ipv6;
- } addr;
- unsigned int prefixlen; /* in case of network only */
-};
-typedef struct cmdline_ipaddr cmdline_ipaddr_t;
-
-struct cmdline_token_ipaddr_data {
- uint8_t flags;
-};
-
-struct cmdline_token_ipaddr {
- struct cmdline_token_hdr hdr;
- struct cmdline_token_ipaddr_data ipaddr_data;
-};
-typedef struct cmdline_token_ipaddr cmdline_parse_token_ipaddr_t;
-
-extern struct cmdline_token_ops cmdline_token_ipaddr_ops;
-
-int cmdline_parse_ipaddr(cmdline_parse_token_hdr_t *tk, const char *srcbuf,
- void *res, unsigned ressize);
-int cmdline_get_help_ipaddr(cmdline_parse_token_hdr_t *tk, char *dstbuf,
- unsigned int size);
-
-#define TOKEN_IPADDR_INITIALIZER(structure, field) \
-{ \
- /* hdr */ \
- { \
- &cmdline_token_ipaddr_ops, /* ops */ \
- offsetof(structure, field), /* offset */ \
- }, \
- /* ipaddr_data */ \
- { \
- CMDLINE_IPADDR_V4 | /* flags */ \
- CMDLINE_IPADDR_V6, \
- }, \
-}
-
-#define TOKEN_IPV4_INITIALIZER(structure, field) \
-{ \
- /* hdr */ \
- { \
- &cmdline_token_ipaddr_ops, /* ops */ \
- offsetof(structure, field), /* offset */ \
- }, \
- /* ipaddr_data */ \
- { \
- CMDLINE_IPADDR_V4, /* flags */ \
- }, \
-}
-
-#define TOKEN_IPV6_INITIALIZER(structure, field) \
-{ \
- /* hdr */ \
- { \
- &cmdline_token_ipaddr_ops, /* ops */ \
- offsetof(structure, field), /* offset */ \
- }, \
- /* ipaddr_data */ \
- { \
- CMDLINE_IPADDR_V6, /* flags */ \
- }, \
-}
-
-#define TOKEN_IPNET_INITIALIZER(structure, field) \
-{ \
- /* hdr */ \
- { \
- &cmdline_token_ipaddr_ops, /* ops */ \
- offsetof(structure, field), /* offset */ \
- }, \
- /* ipaddr_data */ \
- { \
- CMDLINE_IPADDR_V4 | /* flags */ \
- CMDLINE_IPADDR_V6 | \
- CMDLINE_IPADDR_NETWORK, \
- }, \
-}
-
-#define TOKEN_IPV4NET_INITIALIZER(structure, field) \
-{ \
- /* hdr */ \
- { \
- &cmdline_token_ipaddr_ops, /* ops */ \
- offsetof(structure, field), /* offset */ \
- }, \
- /* ipaddr_data */ \
- { \
- CMDLINE_IPADDR_V4 | /* flags */ \
- CMDLINE_IPADDR_NETWORK, \
- }, \
-}
-
-#define TOKEN_IPV6NET_INITIALIZER(structure, field) \
-{ \
- /* hdr */ \
- { \
- &cmdline_token_ipaddr_ops, /* ops */ \
- offsetof(structure, field), /* offset */ \
- }, \
- /* ipaddr_data */ \
- { \
- CMDLINE_IPADDR_V4 | /* flags */ \
- CMDLINE_IPADDR_NETWORK, \
- }, \
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _PARSE_IPADDR_H_ */
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_parse_num.c b/src/dpdk_lib18/librte_cmdline/cmdline_parse_num.c
deleted file mode 100755
index d8cf37f0..00000000
--- a/src/dpdk_lib18/librte_cmdline/cmdline_parse_num.c
+++ /dev/null
@@ -1,402 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
- * All rights reserved.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the University of California, Berkeley nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdio.h>
-#include <stdint.h>
-#include <inttypes.h>
-#include <ctype.h>
-#include <string.h>
-#include <stdarg.h>
-#include <errno.h>
-#include <rte_string_fns.h>
-
-#include "cmdline_parse.h"
-#include "cmdline_parse_num.h"
-
-#ifdef RTE_LIBRTE_CMDLINE_DEBUG
-#define debug_printf(args...) printf(args)
-#else
-#define debug_printf(args...) do {} while(0)
-#endif
-
-struct cmdline_token_ops cmdline_token_num_ops = {
- .parse = cmdline_parse_num,
- .complete_get_nb = NULL,
- .complete_get_elt = NULL,
- .get_help = cmdline_get_help_num,
-};
-
-
-enum num_parse_state_t {
- START,
- DEC_NEG,
- BIN,
- HEX,
-
- ERROR,
-
- FIRST_OK, /* not used */
- ZERO_OK,
- HEX_OK,
- OCTAL_OK,
- BIN_OK,
- DEC_NEG_OK,
- DEC_POS_OK,
-};
-
-/* Keep it sync with enum in .h */
-static const char * num_help[] = {
- "UINT8", "UINT16", "UINT32", "UINT64",
- "INT8", "INT16", "INT32", "INT64",
-};
-
-static inline int
-add_to_res(unsigned int c, uint64_t *res, unsigned int base)
-{
- /* overflow */
- if ( (UINT64_MAX - c) / base < *res ) {
- return -1;
- }
-
- *res = (uint64_t) (*res * base + c);
- return 0;
-}
-
-static int
-check_res_size(struct cmdline_token_num_data *nd, unsigned ressize)
-{
- switch (nd->type) {
- case INT8:
- case UINT8:
- if (ressize < sizeof(int8_t))
- return -1;
- break;
- case INT16:
- case UINT16:
- if (ressize < sizeof(int16_t))
- return -1;
- break;
- case INT32:
- case UINT32:
- if (ressize < sizeof(int32_t))
- return -1;
- break;
- case INT64:
- case UINT64:
- if (ressize < sizeof(int64_t))
- return -1;
- break;
- default:
- return -1;
- }
- return 0;
-}
-
-/* parse an int */
-int
-cmdline_parse_num(cmdline_parse_token_hdr_t *tk, const char *srcbuf, void *res,
- unsigned ressize)
-{
- struct cmdline_token_num_data nd;
- enum num_parse_state_t st = START;
- const char * buf;
- char c;
- uint64_t res1 = 0;
-
- if (!tk)
- return -1;
-
- if (!srcbuf || !*srcbuf)
- return -1;
-
- buf = srcbuf;
- c = *buf;
-
- memcpy(&nd, &((struct cmdline_token_num *)tk)->num_data, sizeof(nd));
-
- /* check that we have enough room in res */
- if (res) {
- if (check_res_size(&nd, ressize) < 0)
- return -1;
- }
-
- while ( st != ERROR && c && ! cmdline_isendoftoken(c) ) {
- debug_printf("%c %x -> ", c, c);
- switch (st) {
- case START:
- if (c == '-') {
- st = DEC_NEG;
- }
- else if (c == '0') {
- st = ZERO_OK;
- }
- else if (c >= '1' && c <= '9') {
- if (add_to_res(c - '0', &res1, 10) < 0)
- st = ERROR;
- else
- st = DEC_POS_OK;
- }
- else {
- st = ERROR;
- }
- break;
-
- case ZERO_OK:
- if (c == 'x') {
- st = HEX;
- }
- else if (c == 'b') {
- st = BIN;
- }
- else if (c >= '0' && c <= '7') {
- if (add_to_res(c - '0', &res1, 10) < 0)
- st = ERROR;
- else
- st = OCTAL_OK;
- }
- else {
- st = ERROR;
- }
- break;
-
- case DEC_NEG:
- if (c >= '0' && c <= '9') {
- if (add_to_res(c - '0', &res1, 10) < 0)
- st = ERROR;
- else
- st = DEC_NEG_OK;
- }
- else {
- st = ERROR;
- }
- break;
-
- case DEC_NEG_OK:
- if (c >= '0' && c <= '9') {
- if (add_to_res(c - '0', &res1, 10) < 0)
- st = ERROR;
- }
- else {
- st = ERROR;
- }
- break;
-
- case DEC_POS_OK:
- if (c >= '0' && c <= '9') {
- if (add_to_res(c - '0', &res1, 10) < 0)
- st = ERROR;
- }
- else {
- st = ERROR;
- }
- break;
-
- case HEX:
- st = HEX_OK;
- /* no break */
- case HEX_OK:
- if (c >= '0' && c <= '9') {
- if (add_to_res(c - '0', &res1, 16) < 0)
- st = ERROR;
- }
- else if (c >= 'a' && c <= 'f') {
- if (add_to_res(c - 'a' + 10, &res1, 16) < 0)
- st = ERROR;
- }
- else if (c >= 'A' && c <= 'F') {
- if (add_to_res(c - 'A' + 10, &res1, 16) < 0)
- st = ERROR;
- }
- else {
- st = ERROR;
- }
- break;
-
-
- case OCTAL_OK:
- if (c >= '0' && c <= '7') {
- if (add_to_res(c - '0', &res1, 8) < 0)
- st = ERROR;
- }
- else {
- st = ERROR;
- }
- break;
-
- case BIN:
- st = BIN_OK;
- /* no break */
- case BIN_OK:
- if (c >= '0' && c <= '1') {
- if (add_to_res(c - '0', &res1, 2) < 0)
- st = ERROR;
- }
- else {
- st = ERROR;
- }
- break;
- default:
- debug_printf("not impl ");
-
- }
-
- debug_printf("(%"PRIu64")\n", res1);
-
- buf ++;
- c = *buf;
-
- /* token too long */
- if (buf-srcbuf > 127)
- return -1;
- }
-
- switch (st) {
- case ZERO_OK:
- case DEC_POS_OK:
- case HEX_OK:
- case OCTAL_OK:
- case BIN_OK:
- if ( nd.type == INT8 && res1 <= INT8_MAX ) {
- if (res) *(int8_t *)res = (int8_t) res1;
- return (buf-srcbuf);
- }
- else if ( nd.type == INT16 && res1 <= INT16_MAX ) {
- if (res) *(int16_t *)res = (int16_t) res1;
- return (buf-srcbuf);
- }
- else if ( nd.type == INT32 && res1 <= INT32_MAX ) {
- if (res) *(int32_t *)res = (int32_t) res1;
- return (buf-srcbuf);
- }
- else if ( nd.type == INT64 && res1 <= INT64_MAX ) {
- if (res) *(int64_t *)res = (int64_t) res1;
- return (buf-srcbuf);
- }
- else if ( nd.type == UINT8 && res1 <= UINT8_MAX ) {
- if (res) *(uint8_t *)res = (uint8_t) res1;
- return (buf-srcbuf);
- }
- else if (nd.type == UINT16 && res1 <= UINT16_MAX ) {
- if (res) *(uint16_t *)res = (uint16_t) res1;
- return (buf-srcbuf);
- }
- else if ( nd.type == UINT32 && res1 <= UINT32_MAX ) {
- if (res) *(uint32_t *)res = (uint32_t) res1;
- return (buf-srcbuf);
- }
- else if ( nd.type == UINT64 ) {
- if (res) *(uint64_t *)res = res1;
- return (buf-srcbuf);
- }
- else {
- return -1;
- }
- break;
-
- case DEC_NEG_OK:
- if ( nd.type == INT8 && res1 <= INT8_MAX + 1 ) {
- if (res) *(int8_t *)res = (int8_t) (-res1);
- return (buf-srcbuf);
- }
- else if ( nd.type == INT16 && res1 <= (uint16_t)INT16_MAX + 1 ) {
- if (res) *(int16_t *)res = (int16_t) (-res1);
- return (buf-srcbuf);
- }
- else if ( nd.type == INT32 && res1 <= (uint32_t)INT32_MAX + 1 ) {
- if (res) *(int32_t *)res = (int32_t) (-res1);
- return (buf-srcbuf);
- }
- else if ( nd.type == INT64 && res1 <= (uint64_t)INT64_MAX + 1 ) {
- if (res) *(int64_t *)res = (int64_t) (-res1);
- return (buf-srcbuf);
- }
- else {
- return -1;
- }
- break;
- default:
- debug_printf("error\n");
- return -1;
- }
-}
-
-
-/* parse an int */
-int
-cmdline_get_help_num(cmdline_parse_token_hdr_t *tk, char *dstbuf, unsigned int size)
-{
- struct cmdline_token_num_data nd;
- int ret;
-
- if (!tk)
- return -1;
-
- memcpy(&nd, &((struct cmdline_token_num *)tk)->num_data, sizeof(nd));
-
- /* should not happen.... don't so this test */
- /* if (nd.type >= (sizeof(num_help)/sizeof(const char *))) */
- /* return -1; */
-
- ret = snprintf(dstbuf, size, "%s", num_help[nd.type]);
- if (ret < 0)
- return -1;
- dstbuf[size-1] = '\0';
- return 0;
-}
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_parse_num.h b/src/dpdk_lib18/librte_cmdline/cmdline_parse_num.h
deleted file mode 100755
index 5376806f..00000000
--- a/src/dpdk_lib18/librte_cmdline/cmdline_parse_num.h
+++ /dev/null
@@ -1,113 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
- * All rights reserved.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the University of California, Berkeley nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _PARSE_NUM_H_
-#define _PARSE_NUM_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-enum cmdline_numtype {
- UINT8 = 0,
- UINT16,
- UINT32,
- UINT64,
- INT8,
- INT16,
- INT32,
- INT64
-};
-
-struct cmdline_token_num_data {
- enum cmdline_numtype type;
-};
-
-struct cmdline_token_num {
- struct cmdline_token_hdr hdr;
- struct cmdline_token_num_data num_data;
-};
-typedef struct cmdline_token_num cmdline_parse_token_num_t;
-
-extern struct cmdline_token_ops cmdline_token_num_ops;
-
-int cmdline_parse_num(cmdline_parse_token_hdr_t *tk,
- const char *srcbuf, void *res, unsigned ressize);
-int cmdline_get_help_num(cmdline_parse_token_hdr_t *tk,
- char *dstbuf, unsigned int size);
-
-#define TOKEN_NUM_INITIALIZER(structure, field, numtype) \
-{ \
- /* hdr */ \
- { \
- &cmdline_token_num_ops, /* ops */ \
- offsetof(structure, field), /* offset */ \
- }, \
- /* num_data */ \
- { \
- numtype, /* type */ \
- }, \
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _PARSE_NUM_H_ */
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_parse_portlist.c b/src/dpdk_lib18/librte_cmdline/cmdline_parse_portlist.c
deleted file mode 100755
index 834f2e6e..00000000
--- a/src/dpdk_lib18/librte_cmdline/cmdline_parse_portlist.c
+++ /dev/null
@@ -1,173 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Copyright (c) 2010, Keith Wiles <keith.wiles@windriver.com>
- * All rights reserved.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the University of California, Berkeley nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <inttypes.h>
-#include <ctype.h>
-#include <string.h>
-#include <errno.h>
-#include <stdarg.h>
-
-#include <rte_string_fns.h>
-#include "cmdline_parse.h"
-#include "cmdline_parse_portlist.h"
-
-struct cmdline_token_ops cmdline_token_portlist_ops = {
- .parse = cmdline_parse_portlist,
- .complete_get_nb = NULL,
- .complete_get_elt = NULL,
- .get_help = cmdline_get_help_portlist,
-};
-
-static void
-parse_set_list(cmdline_portlist_t * pl, int low, int high)
-{
- do {
- pl->map |= (1 << low++);
- } while (low <= high);
-}
-
-static int
-parse_ports(cmdline_portlist_t * pl, const char * str)
-{
- size_t ps, pe;
- const char *first, *last;
- char *end;
-
- for (first = str, last = first;
- first != NULL && last != NULL;
- first = last + 1) {
-
- last = strchr(first, ',');
-
- errno = 0;
- ps = strtoul(first, &end, 10);
- if (errno != 0 || end == first ||
- (end[0] != '-' && end[0] != 0 && end != last))
- return (-1);
-
- /* Support for N-M portlist format */
- if (end[0] == '-') {
- errno = 0;
- first = end + 1;
- pe = strtoul(first, &end, 10);
- if (errno != 0 || end == first ||
- (end[0] != 0 && end != last))
- return (-1);
- } else {
- pe = ps;
- }
-
- if (ps > pe || pe >= sizeof (pl->map) * 8)
- return (-1);
-
- parse_set_list(pl, ps, pe);
- }
-
- return (0);
-}
-
-int
-cmdline_parse_portlist(__attribute__((unused)) cmdline_parse_token_hdr_t *tk,
- const char *buf, void *res, unsigned ressize)
-{
- unsigned int token_len = 0;
- char portlist_str[PORTLIST_TOKEN_SIZE+1];
- cmdline_portlist_t *pl;
-
- if (!buf || ! *buf)
- return (-1);
-
- if (res && ressize < PORTLIST_TOKEN_SIZE)
- return -1;
-
- pl = res;
-
- while (!cmdline_isendoftoken(buf[token_len]) &&
- (token_len < PORTLIST_TOKEN_SIZE))
- token_len++;
-
- if (token_len >= PORTLIST_TOKEN_SIZE)
- return (-1);
-
- snprintf(portlist_str, token_len+1, "%s", buf);
-
- if (pl) {
- pl->map = 0;
- if (strcmp("all", portlist_str) == 0)
- pl->map = UINT32_MAX;
- else if (parse_ports(pl, portlist_str) != 0)
- return (-1);
- }
-
- return token_len;
-}
-
-int
-cmdline_get_help_portlist(__attribute__((unused)) cmdline_parse_token_hdr_t *tk,
- char *dstbuf, unsigned int size)
-{
- int ret;
- ret = snprintf(dstbuf, size, "range of ports as 3,4-6,8-19,20");
- if (ret < 0)
- return -1;
- return 0;
-}
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_parse_portlist.h b/src/dpdk_lib18/librte_cmdline/cmdline_parse_portlist.h
deleted file mode 100755
index 85050595..00000000
--- a/src/dpdk_lib18/librte_cmdline/cmdline_parse_portlist.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Copyright (c) 2010, Keith Wiles <keith.wiles@windriver.com>
- * All rights reserved.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the University of California, Berkeley nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _PARSE_PORTLIST_H_
-#define _PARSE_PORTLIST_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* size of a parsed string */
-#define PORTLIST_TOKEN_SIZE 128
-#define PORTLIST_MAX_TOKENS 32
-
-typedef struct cmdline_portlist {
- uint32_t map;
-} cmdline_portlist_t;
-
-struct cmdline_token_portlist {
- struct cmdline_token_hdr hdr;
-};
-typedef struct cmdline_token_portlist cmdline_parse_token_portlist_t;
-
-extern struct cmdline_token_ops cmdline_token_portlist_ops;
-
-int cmdline_parse_portlist(cmdline_parse_token_hdr_t *tk,
- const char *srcbuf, void *res, unsigned ressize);
-int cmdline_get_help_portlist(cmdline_parse_token_hdr_t *tk,
- char *dstbuf, unsigned int size);
-
-#define TOKEN_PORTLIST_INITIALIZER(structure, field) \
-{ \
- /* hdr */ \
- { \
- &cmdline_token_portlist_ops, /* ops */ \
- offsetof(structure, field), /* offset */ \
- }, \
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _PARSE_PORTLIST_H_ */
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_parse_string.c b/src/dpdk_lib18/librte_cmdline/cmdline_parse_string.c
deleted file mode 100755
index 45883b3e..00000000
--- a/src/dpdk_lib18/librte_cmdline/cmdline_parse_string.c
+++ /dev/null
@@ -1,253 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
- * All rights reserved.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the University of California, Berkeley nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdio.h>
-#include <inttypes.h>
-#include <ctype.h>
-#include <string.h>
-#include <stdarg.h>
-#include <errno.h>
-#include <rte_string_fns.h>
-
-#include "cmdline_parse.h"
-#include "cmdline_parse_string.h"
-
-struct cmdline_token_ops cmdline_token_string_ops = {
- .parse = cmdline_parse_string,
- .complete_get_nb = cmdline_complete_get_nb_string,
- .complete_get_elt = cmdline_complete_get_elt_string,
- .get_help = cmdline_get_help_string,
-};
-
-#define MULTISTRING_HELP "Mul-choice STRING"
-#define ANYSTRING_HELP "Any STRING"
-#define FIXEDSTRING_HELP "Fixed STRING"
-
-static unsigned int
-get_token_len(const char *s)
-{
- char c;
- unsigned int i=0;
-
- c = s[i];
- while (c!='#' && c!='\0') {
- i++;
- c = s[i];
- }
- return i;
-}
-
-static const char *
-get_next_token(const char *s)
-{
- unsigned int i;
- i = get_token_len(s);
- if (s[i] == '#')
- return s+i+1;
- return NULL;
-}
-
-int
-cmdline_parse_string(cmdline_parse_token_hdr_t *tk, const char *buf, void *res,
- unsigned ressize)
-{
- struct cmdline_token_string *tk2;
- struct cmdline_token_string_data *sd;
- unsigned int token_len;
- const char *str;
-
- if (res && ressize < STR_TOKEN_SIZE)
- return -1;
-
- if (!tk || !buf || ! *buf)
- return -1;
-
- tk2 = (struct cmdline_token_string *)tk;
-
- sd = &tk2->string_data;
-
- /* fixed string */
- if (sd->str) {
- str = sd->str;
- do {
- token_len = get_token_len(str);
-
- /* if token is too big... */
- if (token_len >= STR_TOKEN_SIZE - 1) {
- continue;
- }
-
- if ( strncmp(buf, str, token_len) ) {
- continue;
- }
-
- if ( !cmdline_isendoftoken(*(buf+token_len)) ) {
- continue;
- }
-
- break;
- } while ( (str = get_next_token(str)) != NULL );
-
- if (!str)
- return -1;
- }
- /* unspecified string */
- else {
- token_len = 0;
- while(!cmdline_isendoftoken(buf[token_len]) &&
- token_len < (STR_TOKEN_SIZE-1))
- token_len++;
-
- /* return if token too long */
- if (token_len >= STR_TOKEN_SIZE - 1) {
- return -1;
- }
- }
-
- if (res) {
- /* we are sure that token_len is < STR_TOKEN_SIZE-1 */
- snprintf(res, STR_TOKEN_SIZE, "%s", buf);
- *((char *)res + token_len) = 0;
- }
-
-
- return token_len;
-}
-
-int cmdline_complete_get_nb_string(cmdline_parse_token_hdr_t *tk)
-{
- struct cmdline_token_string *tk2;
- struct cmdline_token_string_data *sd;
- const char *str;
- int ret = 1;
-
- if (!tk)
- return -1;
-
- tk2 = (struct cmdline_token_string *)tk;
- sd = &tk2->string_data;
-
- if (!sd->str)
- return 0;
-
- str = sd->str;
- while( (str = get_next_token(str)) != NULL ) {
- ret++;
- }
- return ret;
-}
-
-int cmdline_complete_get_elt_string(cmdline_parse_token_hdr_t *tk, int idx,
- char *dstbuf, unsigned int size)
-{
- struct cmdline_token_string *tk2;
- struct cmdline_token_string_data *sd;
- const char *s;
- unsigned int len;
-
- if (!tk || !dstbuf || idx < 0)
- return -1;
-
- tk2 = (struct cmdline_token_string *)tk;
- sd = &tk2->string_data;
-
- s = sd->str;
-
- while (idx-- && s)
- s = get_next_token(s);
-
- if (!s)
- return -1;
-
- len = get_token_len(s);
- if (len > size - 1)
- return -1;
-
- memcpy(dstbuf, s, len);
- dstbuf[len] = '\0';
- return 0;
-}
-
-
-int cmdline_get_help_string(cmdline_parse_token_hdr_t *tk, char *dstbuf,
- unsigned int size)
-{
- struct cmdline_token_string *tk2;
- struct cmdline_token_string_data *sd;
- const char *s;
-
- if (!tk || !dstbuf)
- return -1;
-
- tk2 = (struct cmdline_token_string *)tk;
- sd = &tk2->string_data;
-
- s = sd->str;
-
- if (s) {
- if (get_next_token(s))
- snprintf(dstbuf, size, MULTISTRING_HELP);
- else
- snprintf(dstbuf, size, FIXEDSTRING_HELP);
- } else
- snprintf(dstbuf, size, ANYSTRING_HELP);
-
- return 0;
-}
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_parse_string.h b/src/dpdk_lib18/librte_cmdline/cmdline_parse_string.h
deleted file mode 100755
index c2056226..00000000
--- a/src/dpdk_lib18/librte_cmdline/cmdline_parse_string.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
- * All rights reserved.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the University of California, Berkeley nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _PARSE_STRING_H_
-#define _PARSE_STRING_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* size of a parsed string */
-#define STR_TOKEN_SIZE 128
-
-typedef char cmdline_fixed_string_t[STR_TOKEN_SIZE];
-
-struct cmdline_token_string_data {
- const char *str;
-};
-
-struct cmdline_token_string {
- struct cmdline_token_hdr hdr;
- struct cmdline_token_string_data string_data;
-};
-typedef struct cmdline_token_string cmdline_parse_token_string_t;
-
-extern struct cmdline_token_ops cmdline_token_string_ops;
-
-int cmdline_parse_string(cmdline_parse_token_hdr_t *tk, const char *srcbuf,
- void *res, unsigned ressize);
-int cmdline_complete_get_nb_string(cmdline_parse_token_hdr_t *tk);
-int cmdline_complete_get_elt_string(cmdline_parse_token_hdr_t *tk, int idx,
- char *dstbuf, unsigned int size);
-int cmdline_get_help_string(cmdline_parse_token_hdr_t *tk, char *dstbuf,
- unsigned int size);
-
-#define TOKEN_STRING_INITIALIZER(structure, field, string) \
-{ \
- /* hdr */ \
- { \
- &cmdline_token_string_ops, /* ops */ \
- offsetof(structure, field), /* offset */ \
- }, \
- /* string_data */ \
- { \
- string, /* str */ \
- }, \
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _PARSE_STRING_H_ */
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_rdline.c b/src/dpdk_lib18/librte_cmdline/cmdline_rdline.c
deleted file mode 100755
index f79ebe31..00000000
--- a/src/dpdk_lib18/librte_cmdline/cmdline_rdline.c
+++ /dev/null
@@ -1,698 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
- * All rights reserved.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the University of California, Berkeley nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <stdint.h>
-#include <string.h>
-#include <stdarg.h>
-#include <errno.h>
-#include <ctype.h>
-
-#include "cmdline_cirbuf.h"
-#include "cmdline_rdline.h"
-
-static void rdline_puts(struct rdline *rdl, const char *buf);
-static void rdline_miniprintf(struct rdline *rdl,
- const char *buf, unsigned int val);
-
-static void rdline_remove_old_history_item(struct rdline *rdl);
-static void rdline_remove_first_history_item(struct rdline *rdl);
-static unsigned int rdline_get_history_size(struct rdline *rdl);
-
-
-/* isblank() needs _XOPEN_SOURCE >= 600 || _ISOC99_SOURCE, so use our
- * own. */
-static int
-isblank2(char c)
-{
- if (c == ' ' ||
- c == '\t' )
- return 1;
- return 0;
-}
-
-int
-rdline_init(struct rdline *rdl,
- rdline_write_char_t *write_char,
- rdline_validate_t *validate,
- rdline_complete_t *complete)
-{
- if (!rdl || !write_char || !validate || !complete)
- return -EINVAL;
- memset(rdl, 0, sizeof(*rdl));
- rdl->validate = validate;
- rdl->complete = complete;
- rdl->write_char = write_char;
- rdl->status = RDLINE_INIT;
- return cirbuf_init(&rdl->history, rdl->history_buf, 0, RDLINE_HISTORY_BUF_SIZE);
-}
-
-void
-rdline_newline(struct rdline *rdl, const char *prompt)
-{
- unsigned int i;
-
- if (!rdl || !prompt)
- return;
-
- vt100_init(&rdl->vt100);
- cirbuf_init(&rdl->left, rdl->left_buf, 0, RDLINE_BUF_SIZE);
- cirbuf_init(&rdl->right, rdl->right_buf, 0, RDLINE_BUF_SIZE);
-
- rdl->prompt_size = strnlen(prompt, RDLINE_PROMPT_SIZE-1);
- if (prompt != rdl->prompt)
- memcpy(rdl->prompt, prompt, rdl->prompt_size);
- rdl->prompt[RDLINE_PROMPT_SIZE-1] = '\0';
-
- for (i=0 ; i<rdl->prompt_size ; i++)
- rdl->write_char(rdl, rdl->prompt[i]);
- rdl->status = RDLINE_RUNNING;
-
- rdl->history_cur_line = -1;
-}
-
-void
-rdline_stop(struct rdline *rdl)
-{
- if (!rdl)
- return;
- rdl->status = RDLINE_INIT;
-}
-
-void
-rdline_quit(struct rdline *rdl)
-{
- if (!rdl)
- return;
- rdl->status = RDLINE_EXITED;
-}
-
-void
-rdline_restart(struct rdline *rdl)
-{
- if (!rdl)
- return;
- rdl->status = RDLINE_RUNNING;
-}
-
-void
-rdline_reset(struct rdline *rdl)
-{
- if (!rdl)
- return;
- vt100_init(&rdl->vt100);
- cirbuf_init(&rdl->left, rdl->left_buf, 0, RDLINE_BUF_SIZE);
- cirbuf_init(&rdl->right, rdl->right_buf, 0, RDLINE_BUF_SIZE);
-
- rdl->status = RDLINE_RUNNING;
-
- rdl->history_cur_line = -1;
-}
-
-const char *
-rdline_get_buffer(struct rdline *rdl)
-{
- if (!rdl)
- return NULL;
- unsigned int len_l, len_r;
- cirbuf_align_left(&rdl->left);
- cirbuf_align_left(&rdl->right);
-
- len_l = CIRBUF_GET_LEN(&rdl->left);
- len_r = CIRBUF_GET_LEN(&rdl->right);
- memcpy(rdl->left_buf+len_l, rdl->right_buf, len_r);
-
- rdl->left_buf[len_l + len_r] = '\n';
- rdl->left_buf[len_l + len_r + 1] = '\0';
- return rdl->left_buf;
-}
-
-static void
-display_right_buffer(struct rdline *rdl, int force)
-{
- unsigned int i;
- char tmp;
-
- if (!force && CIRBUF_IS_EMPTY(&rdl->right))
- return;
-
- rdline_puts(rdl, vt100_clear_right);
- CIRBUF_FOREACH(&rdl->right, i, tmp) {
- rdl->write_char(rdl, tmp);
- }
- if (!CIRBUF_IS_EMPTY(&rdl->right))
- rdline_miniprintf(rdl, vt100_multi_left,
- CIRBUF_GET_LEN(&rdl->right));
-}
-
-void
-rdline_redisplay(struct rdline *rdl)
-{
- unsigned int i;
- char tmp;
-
- if (!rdl)
- return;
-
- rdline_puts(rdl, vt100_home);
- for (i=0 ; i<rdl->prompt_size ; i++)
- rdl->write_char(rdl, rdl->prompt[i]);
- CIRBUF_FOREACH(&rdl->left, i, tmp) {
- rdl->write_char(rdl, tmp);
- }
- display_right_buffer(rdl, 1);
-}
-
-int
-rdline_char_in(struct rdline *rdl, char c)
-{
- unsigned int i;
- int cmd;
- char tmp;
- char *buf;
-
- if (!rdl)
- return -EINVAL;
-
- if (rdl->status == RDLINE_EXITED)
- return RDLINE_RES_EXITED;
- if (rdl->status != RDLINE_RUNNING)
- return RDLINE_RES_NOT_RUNNING;
-
- cmd = vt100_parser(&rdl->vt100, c);
- if (cmd == -2)
- return RDLINE_RES_SUCCESS;
-
- if (cmd >= 0) {
- switch (cmd) {
- /* move caret 1 char to the left */
- case CMDLINE_KEY_CTRL_B:
- case CMDLINE_KEY_LEFT_ARR:
- if (CIRBUF_IS_EMPTY(&rdl->left))
- break;
- tmp = cirbuf_get_tail(&rdl->left);
- cirbuf_del_tail(&rdl->left);
- cirbuf_add_head(&rdl->right, tmp);
- rdline_puts(rdl, vt100_left_arr);
- break;
-
- /* move caret 1 char to the right */
- case CMDLINE_KEY_CTRL_F:
- case CMDLINE_KEY_RIGHT_ARR:
- if (CIRBUF_IS_EMPTY(&rdl->right))
- break;
- tmp = cirbuf_get_head(&rdl->right);
- cirbuf_del_head(&rdl->right);
- cirbuf_add_tail(&rdl->left, tmp);
- rdline_puts(rdl, vt100_right_arr);
- break;
-
- /* move caret 1 word to the left */
- /* keyboard equivalent: Alt+B */
- case CMDLINE_KEY_WLEFT:
- while (! CIRBUF_IS_EMPTY(&rdl->left) &&
- (tmp = cirbuf_get_tail(&rdl->left)) &&
- isblank2(tmp)) {
- rdline_puts(rdl, vt100_left_arr);
- cirbuf_del_tail(&rdl->left);
- cirbuf_add_head(&rdl->right, tmp);
- }
- while (! CIRBUF_IS_EMPTY(&rdl->left) &&
- (tmp = cirbuf_get_tail(&rdl->left)) &&
- !isblank2(tmp)) {
- rdline_puts(rdl, vt100_left_arr);
- cirbuf_del_tail(&rdl->left);
- cirbuf_add_head(&rdl->right, tmp);
- }
- break;
-
- /* move caret 1 word to the right */
- /* keyboard equivalent: Alt+F */
- case CMDLINE_KEY_WRIGHT:
- while (! CIRBUF_IS_EMPTY(&rdl->right) &&
- (tmp = cirbuf_get_head(&rdl->right)) &&
- isblank2(tmp)) {
- rdline_puts(rdl, vt100_right_arr);
- cirbuf_del_head(&rdl->right);
- cirbuf_add_tail(&rdl->left, tmp);
- }
- while (! CIRBUF_IS_EMPTY(&rdl->right) &&
- (tmp = cirbuf_get_head(&rdl->right)) &&
- !isblank2(tmp)) {
- rdline_puts(rdl, vt100_right_arr);
- cirbuf_del_head(&rdl->right);
- cirbuf_add_tail(&rdl->left, tmp);
- }
- break;
-
- /* move caret to the left */
- case CMDLINE_KEY_CTRL_A:
- if (CIRBUF_IS_EMPTY(&rdl->left))
- break;
- rdline_miniprintf(rdl, vt100_multi_left,
- CIRBUF_GET_LEN(&rdl->left));
- while (! CIRBUF_IS_EMPTY(&rdl->left)) {
- tmp = cirbuf_get_tail(&rdl->left);
- cirbuf_del_tail(&rdl->left);
- cirbuf_add_head(&rdl->right, tmp);
- }
- break;
-
- /* move caret to the right */
- case CMDLINE_KEY_CTRL_E:
- if (CIRBUF_IS_EMPTY(&rdl->right))
- break;
- rdline_miniprintf(rdl, vt100_multi_right,
- CIRBUF_GET_LEN(&rdl->right));
- while (! CIRBUF_IS_EMPTY(&rdl->right)) {
- tmp = cirbuf_get_head(&rdl->right);
- cirbuf_del_head(&rdl->right);
- cirbuf_add_tail(&rdl->left, tmp);
- }
- break;
-
- /* delete 1 char from the left */
- case CMDLINE_KEY_BKSPACE:
- if(!cirbuf_del_tail_safe(&rdl->left)) {
- rdline_puts(rdl, vt100_bs);
- display_right_buffer(rdl, 1);
- }
- break;
-
- /* delete 1 char from the right */
- case CMDLINE_KEY_SUPPR:
- case CMDLINE_KEY_CTRL_D:
- if (cmd == CMDLINE_KEY_CTRL_D &&
- CIRBUF_IS_EMPTY(&rdl->left) &&
- CIRBUF_IS_EMPTY(&rdl->right)) {
- return RDLINE_RES_EOF;
- }
- if (!cirbuf_del_head_safe(&rdl->right)) {
- display_right_buffer(rdl, 1);
- }
- break;
-
- /* delete 1 word from the left */
- case CMDLINE_KEY_META_BKSPACE:
- case CMDLINE_KEY_CTRL_W:
- while (! CIRBUF_IS_EMPTY(&rdl->left) && isblank2(cirbuf_get_tail(&rdl->left))) {
- rdline_puts(rdl, vt100_bs);
- cirbuf_del_tail(&rdl->left);
- }
- while (! CIRBUF_IS_EMPTY(&rdl->left) && !isblank2(cirbuf_get_tail(&rdl->left))) {
- rdline_puts(rdl, vt100_bs);
- cirbuf_del_tail(&rdl->left);
- }
- display_right_buffer(rdl, 1);
- break;
-
- /* delete 1 word from the right */
- case CMDLINE_KEY_META_D:
- while (! CIRBUF_IS_EMPTY(&rdl->right) && isblank2(cirbuf_get_head(&rdl->right)))
- cirbuf_del_head(&rdl->right);
- while (! CIRBUF_IS_EMPTY(&rdl->right) && !isblank2(cirbuf_get_head(&rdl->right)))
- cirbuf_del_head(&rdl->right);
- display_right_buffer(rdl, 1);
- break;
-
- /* set kill buffer to contents on the right side of caret */
- case CMDLINE_KEY_CTRL_K:
- cirbuf_get_buf_head(&rdl->right, rdl->kill_buf, RDLINE_BUF_SIZE);
- rdl->kill_size = CIRBUF_GET_LEN(&rdl->right);
- cirbuf_del_buf_head(&rdl->right, rdl->kill_size);
- rdline_puts(rdl, vt100_clear_right);
- break;
-
- /* paste contents of kill buffer to the left side of caret */
- case CMDLINE_KEY_CTRL_Y:
- i=0;
- while(CIRBUF_GET_LEN(&rdl->right) + CIRBUF_GET_LEN(&rdl->left) <
- RDLINE_BUF_SIZE &&
- i < rdl->kill_size) {
- cirbuf_add_tail(&rdl->left, rdl->kill_buf[i]);
- rdl->write_char(rdl, rdl->kill_buf[i]);
- i++;
- }
- display_right_buffer(rdl, 0);
- break;
-
- /* clear and newline */
- case CMDLINE_KEY_CTRL_C:
- rdline_puts(rdl, "\r\n");
- rdline_newline(rdl, rdl->prompt);
- break;
-
- /* redisplay (helps when prompt is lost in other output) */
- case CMDLINE_KEY_CTRL_L:
- rdline_redisplay(rdl);
- break;
-
- /* autocomplete */
- case CMDLINE_KEY_TAB:
- case CMDLINE_KEY_HELP:
- cirbuf_align_left(&rdl->left);
- rdl->left_buf[CIRBUF_GET_LEN(&rdl->left)] = '\0';
- if (rdl->complete) {
- char tmp_buf[BUFSIZ];
- int complete_state;
- int ret;
- unsigned int tmp_size;
-
- if (cmd == CMDLINE_KEY_TAB)
- complete_state = 0;
- else
- complete_state = -1;
-
- /* see in parse.h for help on complete() */
- ret = rdl->complete(rdl, rdl->left_buf,
- tmp_buf, sizeof(tmp_buf),
- &complete_state);
- /* no completion or error */
- if (ret <= 0) {
- return RDLINE_RES_COMPLETE;
- }
-
- tmp_size = strnlen(tmp_buf, sizeof(tmp_buf));
- /* add chars */
- if (ret == RDLINE_RES_COMPLETE) {
- i=0;
- while(CIRBUF_GET_LEN(&rdl->right) + CIRBUF_GET_LEN(&rdl->left) <
- RDLINE_BUF_SIZE &&
- i < tmp_size) {
- cirbuf_add_tail(&rdl->left, tmp_buf[i]);
- rdl->write_char(rdl, tmp_buf[i]);
- i++;
- }
- display_right_buffer(rdl, 1);
- return RDLINE_RES_COMPLETE; /* ?? */
- }
-
- /* choice */
- rdline_puts(rdl, "\r\n");
- while (ret) {
- rdl->write_char(rdl, ' ');
- for (i=0 ; tmp_buf[i] ; i++)
- rdl->write_char(rdl, tmp_buf[i]);
- rdline_puts(rdl, "\r\n");
- ret = rdl->complete(rdl, rdl->left_buf,
- tmp_buf, sizeof(tmp_buf),
- &complete_state);
- }
-
- rdline_redisplay(rdl);
- }
- return RDLINE_RES_COMPLETE;
-
- /* complete buffer */
- case CMDLINE_KEY_RETURN:
- case CMDLINE_KEY_RETURN2:
- rdline_get_buffer(rdl);
- rdl->status = RDLINE_INIT;
- rdline_puts(rdl, "\r\n");
- if (rdl->history_cur_line != -1)
- rdline_remove_first_history_item(rdl);
-
- if (rdl->validate)
- rdl->validate(rdl, rdl->left_buf, CIRBUF_GET_LEN(&rdl->left)+2);
- /* user may have stopped rdline */
- if (rdl->status == RDLINE_EXITED)
- return RDLINE_RES_EXITED;
- return RDLINE_RES_VALIDATED;
-
- /* previous element in history */
- case CMDLINE_KEY_UP_ARR:
- case CMDLINE_KEY_CTRL_P:
- if (rdl->history_cur_line == 0) {
- rdline_remove_first_history_item(rdl);
- }
- if (rdl->history_cur_line <= 0) {
- rdline_add_history(rdl, rdline_get_buffer(rdl));
- rdl->history_cur_line = 0;
- }
-
- buf = rdline_get_history_item(rdl, rdl->history_cur_line + 1);
- if (!buf)
- break;
-
- rdl->history_cur_line ++;
- vt100_init(&rdl->vt100);
- cirbuf_init(&rdl->left, rdl->left_buf, 0, RDLINE_BUF_SIZE);
- cirbuf_init(&rdl->right, rdl->right_buf, 0, RDLINE_BUF_SIZE);
- cirbuf_add_buf_tail(&rdl->left, buf, strnlen(buf, RDLINE_BUF_SIZE));
- rdline_redisplay(rdl);
- break;
-
- /* next element in history */
- case CMDLINE_KEY_DOWN_ARR:
- case CMDLINE_KEY_CTRL_N:
- if (rdl->history_cur_line - 1 < 0)
- break;
-
- rdl->history_cur_line --;
- buf = rdline_get_history_item(rdl, rdl->history_cur_line);
- if (!buf)
- break;
- vt100_init(&rdl->vt100);
- cirbuf_init(&rdl->left, rdl->left_buf, 0, RDLINE_BUF_SIZE);
- cirbuf_init(&rdl->right, rdl->right_buf, 0, RDLINE_BUF_SIZE);
- cirbuf_add_buf_tail(&rdl->left, buf, strnlen(buf, RDLINE_BUF_SIZE));
- rdline_redisplay(rdl);
-
- break;
-
-
- default:
- break;
- }
-
- return RDLINE_RES_SUCCESS;
- }
-
- if (!isprint((int)c))
- return RDLINE_RES_SUCCESS;
-
- /* standard chars */
- if (CIRBUF_GET_LEN(&rdl->left) + CIRBUF_GET_LEN(&rdl->right) >= RDLINE_BUF_SIZE)
- return RDLINE_RES_SUCCESS;
-
- if (cirbuf_add_tail_safe(&rdl->left, c))
- return RDLINE_RES_SUCCESS;
-
- rdl->write_char(rdl, c);
- display_right_buffer(rdl, 0);
-
- return RDLINE_RES_SUCCESS;
-}
-
-
-/* HISTORY */
-
-static void
-rdline_remove_old_history_item(struct rdline * rdl)
-{
- char tmp;
-
- while (! CIRBUF_IS_EMPTY(&rdl->history) ) {
- tmp = cirbuf_get_head(&rdl->history);
- cirbuf_del_head(&rdl->history);
- if (!tmp)
- break;
- }
-}
-
-static void
-rdline_remove_first_history_item(struct rdline * rdl)
-{
- char tmp;
-
- if ( CIRBUF_IS_EMPTY(&rdl->history) ) {
- return;
- }
- else {
- cirbuf_del_tail(&rdl->history);
- }
-
- while (! CIRBUF_IS_EMPTY(&rdl->history) ) {
- tmp = cirbuf_get_tail(&rdl->history);
- if (!tmp)
- break;
- cirbuf_del_tail(&rdl->history);
- }
-}
-
-static unsigned int
-rdline_get_history_size(struct rdline * rdl)
-{
- unsigned int i, tmp, ret=0;
-
- CIRBUF_FOREACH(&rdl->history, i, tmp) {
- if (tmp == 0)
- ret ++;
- }
-
- return ret;
-}
-
-char *
-rdline_get_history_item(struct rdline * rdl, unsigned int idx)
-{
- unsigned int len, i, tmp;
-
- if (!rdl)
- return NULL;
-
- len = rdline_get_history_size(rdl);
- if ( idx >= len ) {
- return NULL;
- }
-
- cirbuf_align_left(&rdl->history);
-
- CIRBUF_FOREACH(&rdl->history, i, tmp) {
- if ( idx == len - 1) {
- return rdl->history_buf + i;
- }
- if (tmp == 0)
- len --;
- }
-
- return NULL;
-}
-
-int
-rdline_add_history(struct rdline * rdl, const char * buf)
-{
- unsigned int len, i;
-
- if (!rdl || !buf)
- return -EINVAL;
-
- len = strnlen(buf, RDLINE_BUF_SIZE);
- for (i=0; i<len ; i++) {
- if (buf[i] == '\n') {
- len = i;
- break;
- }
- }
-
- if ( len >= RDLINE_HISTORY_BUF_SIZE )
- return -1;
-
- while ( len >= CIRBUF_GET_FREELEN(&rdl->history) ) {
- rdline_remove_old_history_item(rdl);
- }
-
- cirbuf_add_buf_tail(&rdl->history, buf, len);
- cirbuf_add_tail(&rdl->history, 0);
-
- return 0;
-}
-
-void
-rdline_clear_history(struct rdline * rdl)
-{
- if (!rdl)
- return;
- cirbuf_init(&rdl->history, rdl->history_buf, 0, RDLINE_HISTORY_BUF_SIZE);
-}
-
-
-/* STATIC USEFUL FUNCS */
-
-static void
-rdline_puts(struct rdline * rdl, const char * buf)
-{
- char c;
- while ( (c = *(buf++)) != '\0' ) {
- rdl->write_char(rdl, c);
- }
-}
-
-/* a very very basic printf with one arg and one format 'u' */
-static void
-rdline_miniprintf(struct rdline *rdl, const char * buf, unsigned int val)
-{
- char c, started=0, div=100;
-
- while ( (c=*(buf++)) ) {
- if (c != '%') {
- rdl->write_char(rdl, c);
- continue;
- }
- c = *(buf++);
- if (c != 'u') {
- rdl->write_char(rdl, '%');
- rdl->write_char(rdl, c);
- continue;
- }
- /* val is never more than 255 */
- while (div) {
- c = (char)(val / div);
- if (c || started) {
- rdl->write_char(rdl, (char)(c+'0'));
- started = 1;
- }
- val %= div;
- div /= 10;
- }
- }
-}
-
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_rdline.h b/src/dpdk_lib18/librte_cmdline/cmdline_rdline.h
deleted file mode 100755
index ae6e24e8..00000000
--- a/src/dpdk_lib18/librte_cmdline/cmdline_rdline.h
+++ /dev/null
@@ -1,254 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
- * All rights reserved.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the University of California, Berkeley nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDLINE_H_
-#define _RDLINE_H_
-
-/**
- * This file is a small equivalent to the GNU readline library, but it
- * was originally designed for small systems, like Atmel AVR
- * microcontrollers (8 bits). Indeed, we don't use any malloc that is
- * sometimes not implemented (or just not recommended) on such
- * systems.
- *
- * Obviously, it does not support as many things as the GNU readline,
- * but at least it supports some interesting features like a kill
- * buffer and a command history.
- *
- * It also have a feature that does not have the GNU readline (as far
- * as I know): we can have several instances of it running at the same
- * time, even on a monothread program, since it works with callbacks.
- *
- * The lib is designed for a client-side or a server-side use:
- * - server-side: the server receives all data from a socket, including
- * control chars, like arrows, tabulations, ... The client is
- * very simple, it can be a telnet or a minicom through a serial line.
- * - client-side: the client receives its data through its stdin for
- * instance.
- */
-
-#include <cmdline_cirbuf.h>
-#include <cmdline_vt100.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* configuration */
-#define RDLINE_BUF_SIZE 256
-#define RDLINE_PROMPT_SIZE 32
-#define RDLINE_VT100_BUF_SIZE 8
-#define RDLINE_HISTORY_BUF_SIZE BUFSIZ
-#define RDLINE_HISTORY_MAX_LINE 64
-
-enum rdline_status {
- RDLINE_INIT,
- RDLINE_RUNNING,
- RDLINE_EXITED
-};
-
-struct rdline;
-
-typedef int (rdline_write_char_t)(struct rdline *rdl, char);
-typedef void (rdline_validate_t)(struct rdline *rdl,
- const char *buf, unsigned int size);
-typedef int (rdline_complete_t)(struct rdline *rdl, const char *buf,
- char *dstbuf, unsigned int dstsize,
- int *state);
-
-struct rdline {
- enum rdline_status status;
- /* rdline bufs */
- struct cirbuf left;
- struct cirbuf right;
- char left_buf[RDLINE_BUF_SIZE+2]; /* reserve 2 chars for the \n\0 */
- char right_buf[RDLINE_BUF_SIZE];
-
- char prompt[RDLINE_PROMPT_SIZE];
- unsigned int prompt_size;
-
- char kill_buf[RDLINE_BUF_SIZE];
- unsigned int kill_size;
-
- /* history */
- struct cirbuf history;
- char history_buf[RDLINE_HISTORY_BUF_SIZE];
- int history_cur_line;
-
- /* callbacks and func pointers */
- rdline_write_char_t *write_char;
- rdline_validate_t *validate;
- rdline_complete_t *complete;
-
- /* vt100 parser */
- struct cmdline_vt100 vt100;
-
- /* opaque pointer */
- void *opaque;
-};
-
-/**
- * Init fields for a struct rdline. Call this only once at the beginning
- * of your program.
- * \param rdl A pointer to an uninitialized struct rdline
- * \param write_char The function used by the function to write a character
- * \param validate A pointer to the function to execute when the
- * user validates the buffer.
- * \param complete A pointer to the function to execute when the
- * user completes the buffer.
- */
-int rdline_init(struct rdline *rdl,
- rdline_write_char_t *write_char,
- rdline_validate_t *validate,
- rdline_complete_t *complete);
-
-
-/**
- * Init the current buffer, and display a prompt.
- * \param rdl A pointer to a struct rdline
- * \param prompt A string containing the prompt
- */
-void rdline_newline(struct rdline *rdl, const char *prompt);
-
-/**
- * Call it and all received chars will be ignored.
- * \param rdl A pointer to a struct rdline
- */
-void rdline_stop(struct rdline *rdl);
-
-/**
- * Same than rdline_stop() except that next calls to rdline_char_in()
- * will return RDLINE_RES_EXITED.
- * \param rdl A pointer to a struct rdline
- */
-void rdline_quit(struct rdline *rdl);
-
-/**
- * Restart after a call to rdline_stop() or rdline_quit()
- * \param rdl A pointer to a struct rdline
- */
-void rdline_restart(struct rdline *rdl);
-
-/**
- * Redisplay the current buffer
- * \param rdl A pointer to a struct rdline
- */
-void rdline_redisplay(struct rdline *rdl);
-
-/**
- * Reset the current buffer and setup for a new line.
- * \param rdl A pointer to a struct rdline
- */
-void rdline_reset(struct rdline *rdl);
-
-
-/* return status for rdline_char_in() */
-#define RDLINE_RES_SUCCESS 0
-#define RDLINE_RES_VALIDATED 1
-#define RDLINE_RES_COMPLETE 2
-#define RDLINE_RES_NOT_RUNNING -1
-#define RDLINE_RES_EOF -2
-#define RDLINE_RES_EXITED -3
-
-/**
- * append a char to the readline buffer.
- * Return RDLINE_RES_VALIDATE when the line has been validated.
- * Return RDLINE_RES_COMPLETE when the user asked to complete the buffer.
- * Return RDLINE_RES_NOT_RUNNING if it is not running.
- * Return RDLINE_RES_EOF if EOF (ctrl-d on an empty line).
- * Else return RDLINE_RES_SUCCESS.
- * XXX error case when the buffer is full ?
- *
- * \param rdl A pointer to a struct rdline
- * \param c The character to append
- */
-int rdline_char_in(struct rdline *rdl, char c);
-
-/**
- * Return the current buffer, terminated by '\0'.
- * \param rdl A pointer to a struct rdline
- */
-const char *rdline_get_buffer(struct rdline *rdl);
-
-
-/**
- * Add the buffer to history.
- * return < 0 on error.
- * \param rdl A pointer to a struct rdline
- * \param buf A buffer that is terminated by '\0'
- */
-int rdline_add_history(struct rdline *rdl, const char *buf);
-
-/**
- * Clear current history
- * \param rdl A pointer to a struct rdline
- */
-void rdline_clear_history(struct rdline *rdl);
-
-/**
- * Get the i-th history item
- */
-char *rdline_get_history_item(struct rdline *rdl, unsigned int i);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RDLINE_H_ */
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_socket.c b/src/dpdk_lib18/librte_cmdline/cmdline_socket.c
deleted file mode 100755
index 6820b6df..00000000
--- a/src/dpdk_lib18/librte_cmdline/cmdline_socket.c
+++ /dev/null
@@ -1,119 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
- * All rights reserved.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the University of California, Berkeley nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdio.h>
-#include <string.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <stdarg.h>
-#include <inttypes.h>
-#include <fcntl.h>
-#include <termios.h>
-
-#include "cmdline_parse.h"
-#include "cmdline_rdline.h"
-#include "cmdline_socket.h"
-#include "cmdline.h"
-
-struct cmdline *
-cmdline_file_new(cmdline_parse_ctx_t *ctx, const char *prompt, const char *path)
-{
- int fd;
-
- /* everything else is checked in cmdline_new() */
- if (!path)
- return NULL;
-
- fd = open(path, O_RDONLY, 0);
- if (fd < 0) {
- dprintf("open() failed\n");
- return NULL;
- }
- return (cmdline_new(ctx, prompt, fd, -1));
-}
-
-struct cmdline *
-cmdline_stdin_new(cmdline_parse_ctx_t *ctx, const char *prompt)
-{
- struct cmdline *cl;
- struct termios oldterm, term;
-
- tcgetattr(0, &oldterm);
- memcpy(&term, &oldterm, sizeof(term));
- term.c_lflag &= ~(ICANON | ECHO | ISIG);
- tcsetattr(0, TCSANOW, &term);
- setbuf(stdin, NULL);
-
- cl = cmdline_new(ctx, prompt, 0, 1);
-
- if (cl)
- memcpy(&cl->oldterm, &oldterm, sizeof(term));
-
- return cl;
-}
-
-void
-cmdline_stdin_exit(struct cmdline *cl)
-{
- if (!cl)
- return;
-
- tcsetattr(fileno(stdin), TCSANOW, &cl->oldterm);
-}
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_socket.h b/src/dpdk_lib18/librte_cmdline/cmdline_socket.h
deleted file mode 100755
index 8cc2dfbc..00000000
--- a/src/dpdk_lib18/librte_cmdline/cmdline_socket.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
- * All rights reserved.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the University of California, Berkeley nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _CMDLINE_SOCKET_H_
-#define _CMDLINE_SOCKET_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct cmdline *cmdline_file_new(cmdline_parse_ctx_t *ctx, const char *prompt, const char *path);
-struct cmdline *cmdline_stdin_new(cmdline_parse_ctx_t *ctx, const char *prompt);
-void cmdline_stdin_exit(struct cmdline *cl);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _CMDLINE_SOCKET_H_ */
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_vt100.c b/src/dpdk_lib18/librte_cmdline/cmdline_vt100.c
deleted file mode 100755
index a253e8b6..00000000
--- a/src/dpdk_lib18/librte_cmdline/cmdline_vt100.c
+++ /dev/null
@@ -1,185 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
- * All rights reserved.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the University of California, Berkeley nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdlib.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <string.h>
-#include <stdarg.h>
-#include <ctype.h>
-#include <termios.h>
-
-#include "cmdline_vt100.h"
-
-const char *cmdline_vt100_commands[] = {
- vt100_up_arr,
- vt100_down_arr,
- vt100_right_arr,
- vt100_left_arr,
- "\177",
- "\n",
- "\001",
- "\005",
- "\013",
- "\031",
- "\003",
- "\006",
- "\002",
- vt100_suppr,
- vt100_tab,
- "\004",
- "\014",
- "\r",
- "\033\177",
- vt100_word_left,
- vt100_word_right,
- "?",
- "\027",
- "\020",
- "\016",
- "\033\144",
-};
-
-void
-vt100_init(struct cmdline_vt100 *vt)
-{
- if (!vt)
- return;
- vt->state = CMDLINE_VT100_INIT;
-}
-
-
-static int
-match_command(char *buf, unsigned int size)
-{
- const char *cmd;
- size_t cmdlen;
- unsigned int i = 0;
-
- for (i=0 ; i<sizeof(cmdline_vt100_commands)/sizeof(const char *) ; i++) {
- cmd = *(cmdline_vt100_commands + i);
-
- cmdlen = strnlen(cmd, CMDLINE_VT100_BUF_SIZE);
- if (size == cmdlen &&
- !strncmp(buf, cmd, cmdlen)) {
- return i;
- }
- }
-
- return -1;
-}
-
-int
-vt100_parser(struct cmdline_vt100 *vt, char ch)
-{
- unsigned int size;
- uint8_t c = (uint8_t) ch;
-
- if (!vt)
- return -1;
-
- if (vt->bufpos >= CMDLINE_VT100_BUF_SIZE) {
- vt->state = CMDLINE_VT100_INIT;
- vt->bufpos = 0;
- }
-
- vt->buf[vt->bufpos++] = c;
- size = vt->bufpos;
-
- switch (vt->state) {
- case CMDLINE_VT100_INIT:
- if (c == 033) {
- vt->state = CMDLINE_VT100_ESCAPE;
- }
- else {
- vt->bufpos = 0;
- goto match_command;
- }
- break;
-
- case CMDLINE_VT100_ESCAPE:
- if (c == 0133) {
- vt->state = CMDLINE_VT100_ESCAPE_CSI;
- }
- else if (c >= 060 && c <= 0177) { /* XXX 0177 ? */
- vt->bufpos = 0;
- vt->state = CMDLINE_VT100_INIT;
- goto match_command;
- }
- break;
-
- case CMDLINE_VT100_ESCAPE_CSI:
- if (c >= 0100 && c <= 0176) {
- vt->bufpos = 0;
- vt->state = CMDLINE_VT100_INIT;
- goto match_command;
- }
- break;
-
- default:
- vt->bufpos = 0;
- break;
- }
-
- return -2;
-
- match_command:
- return match_command(vt->buf, size);
-}
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_vt100.h b/src/dpdk_lib18/librte_cmdline/cmdline_vt100.h
deleted file mode 100755
index b9840f6c..00000000
--- a/src/dpdk_lib18/librte_cmdline/cmdline_vt100.h
+++ /dev/null
@@ -1,151 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
- * All rights reserved.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the University of California, Berkeley nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _CMDLINE_VT100_H_
-#define _CMDLINE_VT100_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define vt100_bell "\007"
-#define vt100_bs "\010"
-#define vt100_bs_clear "\010 \010"
-#define vt100_tab "\011"
-#define vt100_crnl "\012\015"
-#define vt100_clear_right "\033[0K"
-#define vt100_clear_left "\033[1K"
-#define vt100_clear_down "\033[0J"
-#define vt100_clear_up "\033[1J"
-#define vt100_clear_line "\033[2K"
-#define vt100_clear_screen "\033[2J"
-#define vt100_up_arr "\033\133\101"
-#define vt100_down_arr "\033\133\102"
-#define vt100_right_arr "\033\133\103"
-#define vt100_left_arr "\033\133\104"
-#define vt100_multi_right "\033\133%uC"
-#define vt100_multi_left "\033\133%uD"
-#define vt100_suppr "\033\133\063\176"
-#define vt100_home "\033M\033E"
-#define vt100_word_left "\033\142"
-#define vt100_word_right "\033\146"
-
-/* Result of parsing : it must be synchronized with
- * cmdline_vt100_commands[] in vt100.c */
-#define CMDLINE_KEY_UP_ARR 0
-#define CMDLINE_KEY_DOWN_ARR 1
-#define CMDLINE_KEY_RIGHT_ARR 2
-#define CMDLINE_KEY_LEFT_ARR 3
-#define CMDLINE_KEY_BKSPACE 4
-#define CMDLINE_KEY_RETURN 5
-#define CMDLINE_KEY_CTRL_A 6
-#define CMDLINE_KEY_CTRL_E 7
-#define CMDLINE_KEY_CTRL_K 8
-#define CMDLINE_KEY_CTRL_Y 9
-#define CMDLINE_KEY_CTRL_C 10
-#define CMDLINE_KEY_CTRL_F 11
-#define CMDLINE_KEY_CTRL_B 12
-#define CMDLINE_KEY_SUPPR 13
-#define CMDLINE_KEY_TAB 14
-#define CMDLINE_KEY_CTRL_D 15
-#define CMDLINE_KEY_CTRL_L 16
-#define CMDLINE_KEY_RETURN2 17
-#define CMDLINE_KEY_META_BKSPACE 18
-#define CMDLINE_KEY_WLEFT 19
-#define CMDLINE_KEY_WRIGHT 20
-#define CMDLINE_KEY_HELP 21
-#define CMDLINE_KEY_CTRL_W 22
-#define CMDLINE_KEY_CTRL_P 23
-#define CMDLINE_KEY_CTRL_N 24
-#define CMDLINE_KEY_META_D 25
-
-extern const char *cmdline_vt100_commands[];
-
-enum cmdline_vt100_parser_state {
- CMDLINE_VT100_INIT,
- CMDLINE_VT100_ESCAPE,
- CMDLINE_VT100_ESCAPE_CSI
-};
-
-#define CMDLINE_VT100_BUF_SIZE 8
-struct cmdline_vt100 {
- uint8_t bufpos;
- char buf[CMDLINE_VT100_BUF_SIZE];
- enum cmdline_vt100_parser_state state;
-};
-
-/**
- * Init
- */
-void vt100_init(struct cmdline_vt100 *vt);
-
-/**
- * Input a new character.
- * Return -1 if the character is not part of a control sequence
- * Return -2 if c is not the last char of a control sequence
- * Else return the index in vt100_commands[]
- */
-int vt100_parser(struct cmdline_vt100 *vt, char c);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/src/dpdk_lib18/librte_distributor/Makefile b/src/dpdk_lib18/librte_distributor/Makefile
deleted file mode 100755
index 36699f84..00000000
--- a/src/dpdk_lib18/librte_distributor/Makefile
+++ /dev/null
@@ -1,50 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_distributor.a
-
-CFLAGS += -O3
-CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
-
-# all source are stored in SRCS-y
-SRCS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) := rte_distributor.c
-
-# install this header file
-SYMLINK-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR)-include := rte_distributor.h
-
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += lib/librte_mbuf
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_distributor/rte_distributor.c b/src/dpdk_lib18/librte_distributor/rte_distributor.c
deleted file mode 100755
index e0fdb4c1..00000000
--- a/src/dpdk_lib18/librte_distributor/rte_distributor.c
+++ /dev/null
@@ -1,488 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdio.h>
-#include <sys/queue.h>
-#include <string.h>
-#include <rte_mbuf.h>
-#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_errno.h>
-#include <rte_string_fns.h>
-#include <rte_tailq.h>
-#include <rte_eal_memconfig.h>
-#include "rte_distributor.h"
-
-#define NO_FLAGS 0
-#define RTE_DISTRIB_PREFIX "DT_"
-
-/* we will use the bottom four bits of pointer for flags, shifting out
- * the top four bits to make room (since a 64-bit pointer actually only uses
- * 48 bits). An arithmetic-right-shift will then appropriately restore the
- * original pointer value with proper sign extension into the top bits. */
-#define RTE_DISTRIB_FLAG_BITS 4
-#define RTE_DISTRIB_FLAGS_MASK (0x0F)
-#define RTE_DISTRIB_NO_BUF 0 /**< empty flags: no buffer requested */
-#define RTE_DISTRIB_GET_BUF (1) /**< worker requests a buffer, returns old */
-#define RTE_DISTRIB_RETURN_BUF (2) /**< worker returns a buffer, no request */
-
-#define RTE_DISTRIB_BACKLOG_SIZE 8
-#define RTE_DISTRIB_BACKLOG_MASK (RTE_DISTRIB_BACKLOG_SIZE - 1)
-
-#define RTE_DISTRIB_MAX_RETURNS 128
-#define RTE_DISTRIB_RETURNS_MASK (RTE_DISTRIB_MAX_RETURNS - 1)
-
-/**
- * Maximum number of workers allowed.
- * Be aware of increasing the limit, becaus it is limited by how we track
- * in-flight tags. See @in_flight_bitmask and @rte_distributor_process
- */
-#define RTE_DISTRIB_MAX_WORKERS 64
-
-/**
- * Buffer structure used to pass the pointer data between cores. This is cache
- * line aligned, but to improve performance and prevent adjacent cache-line
- * prefetches of buffers for other workers, e.g. when worker 1's buffer is on
- * the next cache line to worker 0, we pad this out to three cache lines.
- * Only 64-bits of the memory is actually used though.
- */
-union rte_distributor_buffer {
- volatile int64_t bufptr64;
- char pad[RTE_CACHE_LINE_SIZE*3];
-} __rte_cache_aligned;
-
-struct rte_distributor_backlog {
- unsigned start;
- unsigned count;
- int64_t pkts[RTE_DISTRIB_BACKLOG_SIZE];
-};
-
-struct rte_distributor_returned_pkts {
- unsigned start;
- unsigned count;
- struct rte_mbuf *mbufs[RTE_DISTRIB_MAX_RETURNS];
-};
-
-struct rte_distributor {
- TAILQ_ENTRY(rte_distributor) next; /**< Next in list. */
-
- char name[RTE_DISTRIBUTOR_NAMESIZE]; /**< Name of the ring. */
- unsigned num_workers; /**< Number of workers polling */
-
- uint32_t in_flight_tags[RTE_DISTRIB_MAX_WORKERS];
- /**< Tracks the tag being processed per core */
- uint64_t in_flight_bitmask;
- /**< on/off bits for in-flight tags.
- * Note that if RTE_DISTRIB_MAX_WORKERS is larger than 64 then
- * the bitmask has to expand.
- */
-
- struct rte_distributor_backlog backlog[RTE_DISTRIB_MAX_WORKERS];
-
- union rte_distributor_buffer bufs[RTE_DISTRIB_MAX_WORKERS];
-
- struct rte_distributor_returned_pkts returns;
-};
-
-TAILQ_HEAD(rte_distributor_list, rte_distributor);
-
-/**** APIs called by workers ****/
-
-void
-rte_distributor_request_pkt(struct rte_distributor *d,
- unsigned worker_id, struct rte_mbuf *oldpkt)
-{
- union rte_distributor_buffer *buf = &d->bufs[worker_id];
- int64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
- | RTE_DISTRIB_GET_BUF;
- while (unlikely(buf->bufptr64 & RTE_DISTRIB_FLAGS_MASK))
- rte_pause();
- buf->bufptr64 = req;
-}
-
-struct rte_mbuf *
-rte_distributor_poll_pkt(struct rte_distributor *d,
- unsigned worker_id)
-{
- union rte_distributor_buffer *buf = &d->bufs[worker_id];
- if (buf->bufptr64 & RTE_DISTRIB_GET_BUF)
- return NULL;
-
- /* since bufptr64 is signed, this should be an arithmetic shift */
- int64_t ret = buf->bufptr64 >> RTE_DISTRIB_FLAG_BITS;
- return (struct rte_mbuf *)((uintptr_t)ret);
-}
-
-struct rte_mbuf *
-rte_distributor_get_pkt(struct rte_distributor *d,
- unsigned worker_id, struct rte_mbuf *oldpkt)
-{
- struct rte_mbuf *ret;
- rte_distributor_request_pkt(d, worker_id, oldpkt);
- while ((ret = rte_distributor_poll_pkt(d, worker_id)) == NULL)
- rte_pause();
- return ret;
-}
-
-int
-rte_distributor_return_pkt(struct rte_distributor *d,
- unsigned worker_id, struct rte_mbuf *oldpkt)
-{
- union rte_distributor_buffer *buf = &d->bufs[worker_id];
- uint64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
- | RTE_DISTRIB_RETURN_BUF;
- buf->bufptr64 = req;
- return 0;
-}
-
-/**** APIs called on distributor core ***/
-
-/* as name suggests, adds a packet to the backlog for a particular worker */
-static int
-add_to_backlog(struct rte_distributor_backlog *bl, int64_t item)
-{
- if (bl->count == RTE_DISTRIB_BACKLOG_SIZE)
- return -1;
-
- bl->pkts[(bl->start + bl->count++) & (RTE_DISTRIB_BACKLOG_MASK)]
- = item;
- return 0;
-}
-
-/* takes the next packet for a worker off the backlog */
-static int64_t
-backlog_pop(struct rte_distributor_backlog *bl)
-{
- bl->count--;
- return bl->pkts[bl->start++ & RTE_DISTRIB_BACKLOG_MASK];
-}
-
-/* stores a packet returned from a worker inside the returns array */
-static inline void
-store_return(uintptr_t oldbuf, struct rte_distributor *d,
- unsigned *ret_start, unsigned *ret_count)
-{
- /* store returns in a circular buffer - code is branch-free */
- d->returns.mbufs[(*ret_start + *ret_count) & RTE_DISTRIB_RETURNS_MASK]
- = (void *)oldbuf;
- *ret_start += (*ret_count == RTE_DISTRIB_RETURNS_MASK) & !!(oldbuf);
- *ret_count += (*ret_count != RTE_DISTRIB_RETURNS_MASK) & !!(oldbuf);
-}
-
-static inline void
-handle_worker_shutdown(struct rte_distributor *d, unsigned wkr)
-{
- d->in_flight_tags[wkr] = 0;
- d->in_flight_bitmask &= ~(1UL << wkr);
- d->bufs[wkr].bufptr64 = 0;
- if (unlikely(d->backlog[wkr].count != 0)) {
- /* On return of a packet, we need to move the
- * queued packets for this core elsewhere.
- * Easiest solution is to set things up for
- * a recursive call. That will cause those
- * packets to be queued up for the next free
- * core, i.e. it will return as soon as a
- * core becomes free to accept the first
- * packet, as subsequent ones will be added to
- * the backlog for that core.
- */
- struct rte_mbuf *pkts[RTE_DISTRIB_BACKLOG_SIZE];
- unsigned i;
- struct rte_distributor_backlog *bl = &d->backlog[wkr];
-
- for (i = 0; i < bl->count; i++) {
- unsigned idx = (bl->start + i) &
- RTE_DISTRIB_BACKLOG_MASK;
- pkts[i] = (void *)((uintptr_t)(bl->pkts[idx] >>
- RTE_DISTRIB_FLAG_BITS));
- }
- /* recursive call.
- * Note that the tags were set before first level call
- * to rte_distributor_process.
- */
- rte_distributor_process(d, pkts, i);
- bl->count = bl->start = 0;
- }
-}
-
-/* this function is called when process() fn is called without any new
- * packets. It goes through all the workers and clears any returned packets
- * to do a partial flush.
- */
-static int
-process_returns(struct rte_distributor *d)
-{
- unsigned wkr;
- unsigned flushed = 0;
- unsigned ret_start = d->returns.start,
- ret_count = d->returns.count;
-
- for (wkr = 0; wkr < d->num_workers; wkr++) {
-
- const int64_t data = d->bufs[wkr].bufptr64;
- uintptr_t oldbuf = 0;
-
- if (data & RTE_DISTRIB_GET_BUF) {
- flushed++;
- if (d->backlog[wkr].count)
- d->bufs[wkr].bufptr64 =
- backlog_pop(&d->backlog[wkr]);
- else {
- d->bufs[wkr].bufptr64 = RTE_DISTRIB_GET_BUF;
- d->in_flight_tags[wkr] = 0;
- d->in_flight_bitmask &= ~(1UL << wkr);
- }
- oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
- } else if (data & RTE_DISTRIB_RETURN_BUF) {
- handle_worker_shutdown(d, wkr);
- oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
- }
-
- store_return(oldbuf, d, &ret_start, &ret_count);
- }
-
- d->returns.start = ret_start;
- d->returns.count = ret_count;
-
- return flushed;
-}
-
-/* process a set of packets to distribute them to workers */
-int
-rte_distributor_process(struct rte_distributor *d,
- struct rte_mbuf **mbufs, unsigned num_mbufs)
-{
- unsigned next_idx = 0;
- unsigned wkr = 0;
- struct rte_mbuf *next_mb = NULL;
- int64_t next_value = 0;
- uint32_t new_tag = 0;
- unsigned ret_start = d->returns.start,
- ret_count = d->returns.count;
-
- if (unlikely(num_mbufs == 0))
- return process_returns(d);
-
- while (next_idx < num_mbufs || next_mb != NULL) {
-
- int64_t data = d->bufs[wkr].bufptr64;
- uintptr_t oldbuf = 0;
-
- if (!next_mb) {
- next_mb = mbufs[next_idx++];
- next_value = (((int64_t)(uintptr_t)next_mb)
- << RTE_DISTRIB_FLAG_BITS);
- /*
- * User is advocated to set tag vaue for each
- * mbuf before calling rte_distributor_process.
- * User defined tags are used to identify flows,
- * or sessions.
- */
- new_tag = next_mb->hash.usr;
-
- /*
- * Note that if RTE_DISTRIB_MAX_WORKERS is larger than 64
- * then the size of match has to be expanded.
- */
- uint64_t match = 0;
- unsigned i;
- /*
- * to scan for a match use "xor" and "not" to get a 0/1
- * value, then use shifting to merge to single "match"
- * variable, where a one-bit indicates a match for the
- * worker given by the bit-position
- */
- for (i = 0; i < d->num_workers; i++)
- match |= (!(d->in_flight_tags[i] ^ new_tag)
- << i);
-
- /* Only turned-on bits are considered as match */
- match &= d->in_flight_bitmask;
-
- if (match) {
- next_mb = NULL;
- unsigned worker = __builtin_ctzl(match);
- if (add_to_backlog(&d->backlog[worker],
- next_value) < 0)
- next_idx--;
- }
- }
-
- if ((data & RTE_DISTRIB_GET_BUF) &&
- (d->backlog[wkr].count || next_mb)) {
-
- if (d->backlog[wkr].count)
- d->bufs[wkr].bufptr64 =
- backlog_pop(&d->backlog[wkr]);
-
- else {
- d->bufs[wkr].bufptr64 = next_value;
- d->in_flight_tags[wkr] = new_tag;
- d->in_flight_bitmask |= (1UL << wkr);
- next_mb = NULL;
- }
- oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
- } else if (data & RTE_DISTRIB_RETURN_BUF) {
- handle_worker_shutdown(d, wkr);
- oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
- }
-
- /* store returns in a circular buffer */
- store_return(oldbuf, d, &ret_start, &ret_count);
-
- if (++wkr == d->num_workers)
- wkr = 0;
- }
- /* to finish, check all workers for backlog and schedule work for them
- * if they are ready */
- for (wkr = 0; wkr < d->num_workers; wkr++)
- if (d->backlog[wkr].count &&
- (d->bufs[wkr].bufptr64 & RTE_DISTRIB_GET_BUF)) {
-
- int64_t oldbuf = d->bufs[wkr].bufptr64 >>
- RTE_DISTRIB_FLAG_BITS;
- store_return(oldbuf, d, &ret_start, &ret_count);
-
- d->bufs[wkr].bufptr64 = backlog_pop(&d->backlog[wkr]);
- }
-
- d->returns.start = ret_start;
- d->returns.count = ret_count;
- return num_mbufs;
-}
-
-/* return to the caller, packets returned from workers */
-int
-rte_distributor_returned_pkts(struct rte_distributor *d,
- struct rte_mbuf **mbufs, unsigned max_mbufs)
-{
- struct rte_distributor_returned_pkts *returns = &d->returns;
- unsigned retval = (max_mbufs < returns->count) ?
- max_mbufs : returns->count;
- unsigned i;
-
- for (i = 0; i < retval; i++) {
- unsigned idx = (returns->start + i) & RTE_DISTRIB_RETURNS_MASK;
- mbufs[i] = returns->mbufs[idx];
- }
- returns->start += i;
- returns->count -= i;
-
- return retval;
-}
-
-/* return the number of packets in-flight in a distributor, i.e. packets
- * being workered on or queued up in a backlog. */
-static inline unsigned
-total_outstanding(const struct rte_distributor *d)
-{
- unsigned wkr, total_outstanding;
-
- total_outstanding = __builtin_popcountl(d->in_flight_bitmask);
-
- for (wkr = 0; wkr < d->num_workers; wkr++)
- total_outstanding += d->backlog[wkr].count;
-
- return total_outstanding;
-}
-
-/* flush the distributor, so that there are no outstanding packets in flight or
- * queued up. */
-int
-rte_distributor_flush(struct rte_distributor *d)
-{
- const unsigned flushed = total_outstanding(d);
-
- while (total_outstanding(d) > 0)
- rte_distributor_process(d, NULL, 0);
-
- return flushed;
-}
-
-/* clears the internal returns array in the distributor */
-void
-rte_distributor_clear_returns(struct rte_distributor *d)
-{
- d->returns.start = d->returns.count = 0;
-#ifndef __OPTIMIZE__
- memset(d->returns.mbufs, 0, sizeof(d->returns.mbufs));
-#endif
-}
-
-/* creates a distributor instance */
-struct rte_distributor *
-rte_distributor_create(const char *name,
- unsigned socket_id,
- unsigned num_workers)
-{
- struct rte_distributor *d;
- struct rte_distributor_list *distributor_list;
- char mz_name[RTE_MEMZONE_NAMESIZE];
- const struct rte_memzone *mz;
-
- /* compilation-time checks */
- RTE_BUILD_BUG_ON((sizeof(*d) & RTE_CACHE_LINE_MASK) != 0);
- RTE_BUILD_BUG_ON((RTE_DISTRIB_MAX_WORKERS & 7) != 0);
- RTE_BUILD_BUG_ON(RTE_DISTRIB_MAX_WORKERS >
- sizeof(d->in_flight_bitmask) * CHAR_BIT);
-
- if (name == NULL || num_workers >= RTE_DISTRIB_MAX_WORKERS) {
- rte_errno = EINVAL;
- return NULL;
- }
-
- /* check that we have an initialised tail queue */
- distributor_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_DISTRIBUTOR,
- rte_distributor_list);
- if (distributor_list == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return NULL;
- }
-
- snprintf(mz_name, sizeof(mz_name), RTE_DISTRIB_PREFIX"%s", name);
- mz = rte_memzone_reserve(mz_name, sizeof(*d), socket_id, NO_FLAGS);
- if (mz == NULL) {
- rte_errno = ENOMEM;
- return NULL;
- }
-
- d = mz->addr;
- snprintf(d->name, sizeof(d->name), "%s", name);
- d->num_workers = num_workers;
-
- rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
- TAILQ_INSERT_TAIL(distributor_list, d, next);
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
-
- return d;
-}
diff --git a/src/dpdk_lib18/librte_eal/Makefile b/src/dpdk_lib18/librte_eal/Makefile
deleted file mode 100755
index 69003cfe..00000000
--- a/src/dpdk_lib18/librte_eal/Makefile
+++ /dev/null
@@ -1,39 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-DIRS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += common
-DIRS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += linuxapp
-DIRS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += common
-DIRS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += bsdapp
-
-include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/Makefile b/src/dpdk_lib18/librte_eal/bsdapp/Makefile
deleted file mode 100755
index 57548203..00000000
--- a/src/dpdk_lib18/librte_eal/bsdapp/Makefile
+++ /dev/null
@@ -1,38 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-DIRS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal
-DIRS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += contigmem
-DIRS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += nic_uio
-
-include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/contigmem/BSDmakefile b/src/dpdk_lib18/librte_eal/bsdapp/contigmem/BSDmakefile
deleted file mode 100755
index f64374c6..00000000
--- a/src/dpdk_lib18/librte_eal/bsdapp/contigmem/BSDmakefile
+++ /dev/null
@@ -1,36 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-KMOD= contigmem
-SRCS= contigmem.c device_if.h bus_if.h
-
-.include <bsd.kmod.mk>
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/contigmem/Makefile b/src/dpdk_lib18/librte_eal/bsdapp/contigmem/Makefile
deleted file mode 100755
index bab005fd..00000000
--- a/src/dpdk_lib18/librte_eal/bsdapp/contigmem/Makefile
+++ /dev/null
@@ -1,52 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-#
-# module name and path
-#
-MODULE = contigmem
-
-#
-# CFLAGS
-#
-MODULE_CFLAGS += -I$(SRCDIR)
-MODULE_CFLAGS += -I$(RTE_OUTPUT)/include
-MODULE_CFLAGS += -Winline -Wall -Werror
-MODULE_CFLAGS += -include $(RTE_OUTPUT)/include/rte_config.h
-
-#
-# all source are stored in SRCS-y
-#
-SRCS-y := contigmem.c
-
-include $(RTE_SDK)/mk/rte.bsdmodule.mk
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/contigmem/contigmem.c b/src/dpdk_lib18/librte_eal/bsdapp/contigmem/contigmem.c
deleted file mode 100755
index b1a23fa6..00000000
--- a/src/dpdk_lib18/librte_eal/bsdapp/contigmem/contigmem.c
+++ /dev/null
@@ -1,233 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
-
-#include <sys/param.h>
-#include <sys/bio.h>
-#include <sys/bus.h>
-#include <sys/conf.h>
-#include <sys/kernel.h>
-#include <sys/malloc.h>
-#include <sys/module.h>
-#include <sys/proc.h>
-#include <sys/rwlock.h>
-#include <sys/systm.h>
-#include <sys/sysctl.h>
-
-#include <machine/bus.h>
-
-#include <vm/vm.h>
-#include <vm/pmap.h>
-#include <vm/vm_object.h>
-#include <vm/vm_page.h>
-#include <vm/vm_pager.h>
-
-static int contigmem_load(void);
-static int contigmem_unload(void);
-static int contigmem_physaddr(SYSCTL_HANDLER_ARGS);
-
-static d_mmap_t contigmem_mmap;
-static d_mmap_single_t contigmem_mmap_single;
-static d_open_t contigmem_open;
-
-static int contigmem_num_buffers = RTE_CONTIGMEM_DEFAULT_NUM_BUFS;
-static int contigmem_buffer_size = RTE_CONTIGMEM_DEFAULT_BUF_SIZE;
-
-static eventhandler_tag contigmem_eh_tag;
-static void *contigmem_buffers[RTE_CONTIGMEM_MAX_NUM_BUFS];
-static struct cdev *contigmem_cdev = NULL;
-
-TUNABLE_INT("hw.contigmem.num_buffers", &contigmem_num_buffers);
-TUNABLE_INT("hw.contigmem.buffer_size", &contigmem_buffer_size);
-
-static SYSCTL_NODE(_hw, OID_AUTO, contigmem, CTLFLAG_RD, 0, "contigmem");
-
-SYSCTL_INT(_hw_contigmem, OID_AUTO, num_buffers, CTLFLAG_RD,
- &contigmem_num_buffers, 0, "Number of contigmem buffers allocated");
-SYSCTL_INT(_hw_contigmem, OID_AUTO, buffer_size, CTLFLAG_RD,
- &contigmem_buffer_size, 0, "Size of each contiguous buffer");
-
-static SYSCTL_NODE(_hw_contigmem, OID_AUTO, physaddr, CTLFLAG_RD, 0,
- "physaddr");
-
-MALLOC_DEFINE(M_CONTIGMEM, "contigmem", "contigmem(4) allocations");
-
-static int contigmem_modevent(module_t mod, int type, void *arg)
-{
- int error = 0;
-
- switch (type) {
- case MOD_LOAD:
- error = contigmem_load();
- break;
- case MOD_UNLOAD:
- error = contigmem_unload();
- break;
- default:
- break;
- }
-
- return (error);
-}
-
-moduledata_t contigmem_mod = {
- "contigmem",
- (modeventhand_t)contigmem_modevent,
- 0
-};
-
-DECLARE_MODULE(contigmem, contigmem_mod, SI_SUB_DRIVERS, SI_ORDER_ANY);
-MODULE_VERSION(contigmem, 1);
-
-static struct cdevsw contigmem_ops = {
- .d_name = "contigmem",
- .d_version = D_VERSION,
- .d_mmap = contigmem_mmap,
- .d_mmap_single = contigmem_mmap_single,
- .d_open = contigmem_open,
-};
-
-static int
-contigmem_load()
-{
- char index_string[8], description[32];
- int i;
-
- if (contigmem_num_buffers > RTE_CONTIGMEM_MAX_NUM_BUFS) {
- printf("%d buffers requested is greater than %d allowed\n",
- contigmem_num_buffers, RTE_CONTIGMEM_MAX_NUM_BUFS);
- return (EINVAL);
- }
-
- if (contigmem_buffer_size < PAGE_SIZE ||
- (contigmem_buffer_size & (contigmem_buffer_size - 1)) != 0) {
- printf("buffer size 0x%x is not greater than PAGE_SIZE and "
- "power of two\n", contigmem_buffer_size);
- return (EINVAL);
- }
-
- for (i = 0; i < contigmem_num_buffers; i++) {
- contigmem_buffers[i] =
- contigmalloc(contigmem_buffer_size, M_CONTIGMEM, M_ZERO, 0,
- BUS_SPACE_MAXADDR, contigmem_buffer_size, 0);
-
- if (contigmem_buffers[i] == NULL) {
- printf("contigmalloc failed for buffer %d\n", i);
- return (ENOMEM);
- }
-
- printf("%2u: virt=%p phys=%p\n", i, contigmem_buffers[i],
- (void *)pmap_kextract((vm_offset_t)contigmem_buffers[i]));
-
- snprintf(index_string, sizeof(index_string), "%d", i);
- snprintf(description, sizeof(description),
- "phys addr for buffer %d", i);
- SYSCTL_ADD_PROC(NULL,
- &SYSCTL_NODE_CHILDREN(_hw_contigmem, physaddr), OID_AUTO,
- index_string, CTLTYPE_U64 | CTLFLAG_RD,
- (void *)(uintptr_t)i, 0, contigmem_physaddr, "LU",
- description);
- }
-
- contigmem_cdev = make_dev_credf(0, &contigmem_ops, 0, NULL, UID_ROOT,
- GID_WHEEL, 0600, "contigmem");
-
- return (0);
-}
-
-static int
-contigmem_unload()
-{
- int i;
-
- if (contigmem_cdev != NULL)
- destroy_dev(contigmem_cdev);
-
- if (contigmem_eh_tag != NULL)
- EVENTHANDLER_DEREGISTER(process_exit, contigmem_eh_tag);
-
- for (i = 0; i < RTE_CONTIGMEM_MAX_NUM_BUFS; i++)
- if (contigmem_buffers[i] != NULL)
- contigfree(contigmem_buffers[i], contigmem_buffer_size,
- M_CONTIGMEM);
-
- return (0);
-}
-
-static int
-contigmem_physaddr(SYSCTL_HANDLER_ARGS)
-{
- uint64_t physaddr;
- int index = (int)(uintptr_t)arg1;
-
- physaddr = (uint64_t)vtophys(contigmem_buffers[index]);
- return (sysctl_handle_64(oidp, &physaddr, 0, req));
-}
-
-static int
-contigmem_open(struct cdev *cdev, int fflags, int devtype,
- struct thread *td)
-{
- return (0);
-}
-
-static int
-contigmem_mmap(struct cdev *cdev, vm_ooffset_t offset, vm_paddr_t *paddr,
- int prot, vm_memattr_t *memattr)
-{
-
- *paddr = offset;
- return (0);
-}
-
-static int
-contigmem_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t size,
- struct vm_object **obj, int nprot)
-{
- /*
- * The buffer index is encoded in the offset. Divide the offset by
- * PAGE_SIZE to get the index of the buffer requested by the user
- * app.
- */
- if ((*offset/PAGE_SIZE) >= contigmem_num_buffers)
- return (EINVAL);
-
- *offset = (vm_ooffset_t)vtophys(contigmem_buffers[*offset/PAGE_SIZE]);
- *obj = vm_pager_allocate(OBJT_DEVICE, cdev, size, nprot, *offset,
- curthread->td_ucred);
-
- return (0);
-}
-
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/eal/Makefile b/src/dpdk_lib18/librte_eal/bsdapp/eal/Makefile
deleted file mode 100755
index d4348822..00000000
--- a/src/dpdk_lib18/librte_eal/bsdapp/eal/Makefile
+++ /dev/null
@@ -1,97 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-LIB = librte_eal.a
-
-VPATH += $(RTE_SDK)/lib/librte_eal/common
-
-CFLAGS += -I$(SRCDIR)/include
-CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common
-CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
-CFLAGS += -I$(RTE_SDK)/lib/librte_ring
-CFLAGS += -I$(RTE_SDK)/lib/librte_mempool
-CFLAGS += -I$(RTE_SDK)/lib/librte_malloc
-CFLAGS += -I$(RTE_SDK)/lib/librte_ether
-CFLAGS += -I$(RTE_SDK)/lib/librte_pmd_ring
-CFLAGS += -I$(RTE_SDK)/lib/librte_pmd_pcap
-CFLAGS += $(WERROR_FLAGS) -O3
-
-# specific to linuxapp exec-env
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) := eal.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_memory.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_hugepage_info.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_thread.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_log.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_pci.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_debug.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_lcore.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_timer.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_interrupts.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_alarm.c
-
-# from common dir
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_common_memzone.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_common_log.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_common_launch.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_common_pci.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_common_memory.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_common_tailqs.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_common_errno.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_common_cpuflags.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_common_string_fns.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_common_hexdump.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_common_devargs.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_common_dev.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_common_options.c
-
-CFLAGS_eal.o := -D_GNU_SOURCE
-#CFLAGS_eal_thread.o := -D_GNU_SOURCE
-CFLAGS_eal_log.o := -D_GNU_SOURCE
-CFLAGS_eal_common_log.o := -D_GNU_SOURCE
-
-# workaround for a gcc bug with noreturn attribute
-# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603
-ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
-CFLAGS_eal_thread.o += -Wno-return-type
-CFLAGS_eal_hpet.o += -Wno-return-type
-endif
-
-INC := rte_interrupts.h
-
-SYMLINK-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP)-include/exec-env := \
- $(addprefix include/exec-env/,$(INC))
-
-DEPDIRS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += lib/librte_eal/common
-
-include $(RTE_SDK)/mk/rte.lib.mk
-
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal.c b/src/dpdk_lib18/librte_eal/bsdapp/eal/eal.c
deleted file mode 100755
index 69f3c034..00000000
--- a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal.c
+++ /dev/null
@@ -1,563 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2014 6WIND S.A.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <string.h>
-#include <stdarg.h>
-#include <unistd.h>
-#include <pthread.h>
-#include <syslog.h>
-#include <getopt.h>
-#include <sys/file.h>
-#include <stddef.h>
-#include <errno.h>
-#include <limits.h>
-#include <errno.h>
-#include <sys/mman.h>
-#include <sys/queue.h>
-
-#include <rte_common.h>
-#include <rte_debug.h>
-#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_launch.h>
-#include <rte_tailq.h>
-#include <rte_eal.h>
-#include <rte_eal_memconfig.h>
-#include <rte_per_lcore.h>
-#include <rte_lcore.h>
-#include <rte_log.h>
-#include <rte_random.h>
-#include <rte_cycles.h>
-#include <rte_string_fns.h>
-#include <rte_cpuflags.h>
-#include <rte_interrupts.h>
-#include <rte_pci.h>
-#include <rte_dev.h>
-#include <rte_devargs.h>
-#include <rte_common.h>
-#include <rte_version.h>
-#include <rte_atomic.h>
-#include <malloc_heap.h>
-#include <rte_eth_ring.h>
-
-#include "eal_private.h"
-#include "eal_thread.h"
-#include "eal_internal_cfg.h"
-#include "eal_filesystem.h"
-#include "eal_hugepages.h"
-#include "eal_options.h"
-
-#define MEMSIZE_IF_NO_HUGE_PAGE (64ULL * 1024ULL * 1024ULL)
-
-/* Allow the application to print its usage message too if set */
-static rte_usage_hook_t rte_application_usage_hook = NULL;
-/* early configuration structure, when memory config is not mmapped */
-static struct rte_mem_config early_mem_config;
-
-/* define fd variable here, because file needs to be kept open for the
- * duration of the program, as we hold a write lock on it in the primary proc */
-static int mem_cfg_fd = -1;
-
-static struct flock wr_lock = {
- .l_type = F_WRLCK,
- .l_whence = SEEK_SET,
- .l_start = offsetof(struct rte_mem_config, memseg),
- .l_len = sizeof(early_mem_config.memseg),
-};
-
-/* Address of global and public configuration */
-static struct rte_config rte_config = {
- .mem_config = &early_mem_config,
-};
-
-/* internal configuration (per-core) */
-struct lcore_config lcore_config[RTE_MAX_LCORE];
-
-/* internal configuration */
-struct internal_config internal_config;
-
-/* used by rte_rdtsc() */
-int rte_cycles_vmware_tsc_map;
-
-/* Return a pointer to the configuration structure */
-struct rte_config *
-rte_eal_get_configuration(void)
-{
- return &rte_config;
-}
-
-/* parse a sysfs (or other) file containing one integer value */
-int
-eal_parse_sysfs_value(const char *filename, unsigned long *val)
-{
- FILE *f;
- char buf[BUFSIZ];
- char *end = NULL;
-
- if ((f = fopen(filename, "r")) == NULL) {
- RTE_LOG(ERR, EAL, "%s(): cannot open sysfs value %s\n",
- __func__, filename);
- return -1;
- }
-
- if (fgets(buf, sizeof(buf), f) == NULL) {
- RTE_LOG(ERR, EAL, "%s(): cannot read sysfs value %s\n",
- __func__, filename);
- fclose(f);
- return -1;
- }
- *val = strtoul(buf, &end, 0);
- if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) {
- RTE_LOG(ERR, EAL, "%s(): cannot parse sysfs value %s\n",
- __func__, filename);
- fclose(f);
- return -1;
- }
- fclose(f);
- return 0;
-}
-
-
-/* create memory configuration in shared/mmap memory. Take out
- * a write lock on the memsegs, so we can auto-detect primary/secondary.
- * This means we never close the file while running (auto-close on exit).
- * We also don't lock the whole file, so that in future we can use read-locks
- * on other parts, e.g. memzones, to detect if there are running secondary
- * processes. */
-static void
-rte_eal_config_create(void)
-{
- void *rte_mem_cfg_addr;
- int retval;
-
- const char *pathname = eal_runtime_config_path();
-
- if (internal_config.no_shconf)
- return;
-
- if (mem_cfg_fd < 0){
- mem_cfg_fd = open(pathname, O_RDWR | O_CREAT, 0660);
- if (mem_cfg_fd < 0)
- rte_panic("Cannot open '%s' for rte_mem_config\n", pathname);
- }
-
- retval = ftruncate(mem_cfg_fd, sizeof(*rte_config.mem_config));
- if (retval < 0){
- close(mem_cfg_fd);
- rte_panic("Cannot resize '%s' for rte_mem_config\n", pathname);
- }
-
- retval = fcntl(mem_cfg_fd, F_SETLK, &wr_lock);
- if (retval < 0){
- close(mem_cfg_fd);
- rte_exit(EXIT_FAILURE, "Cannot create lock on '%s'. Is another primary "
- "process running?\n", pathname);
- }
-
- rte_mem_cfg_addr = mmap(NULL, sizeof(*rte_config.mem_config),
- PROT_READ | PROT_WRITE, MAP_SHARED, mem_cfg_fd, 0);
-
- if (rte_mem_cfg_addr == MAP_FAILED){
- rte_panic("Cannot mmap memory for rte_config\n");
- }
- memcpy(rte_mem_cfg_addr, &early_mem_config, sizeof(early_mem_config));
- rte_config.mem_config = (struct rte_mem_config *) rte_mem_cfg_addr;
-}
-
-/* attach to an existing shared memory config */
-static void
-rte_eal_config_attach(void)
-{
- void *rte_mem_cfg_addr;
- const char *pathname = eal_runtime_config_path();
-
- if (internal_config.no_shconf)
- return;
-
- if (mem_cfg_fd < 0){
- mem_cfg_fd = open(pathname, O_RDWR);
- if (mem_cfg_fd < 0)
- rte_panic("Cannot open '%s' for rte_mem_config\n", pathname);
- }
-
- rte_mem_cfg_addr = mmap(NULL, sizeof(*rte_config.mem_config),
- PROT_READ | PROT_WRITE, MAP_SHARED, mem_cfg_fd, 0);
- close(mem_cfg_fd);
- if (rte_mem_cfg_addr == MAP_FAILED)
- rte_panic("Cannot mmap memory for rte_config\n");
-
- rte_config.mem_config = (struct rte_mem_config *) rte_mem_cfg_addr;
-}
-
-/* Detect if we are a primary or a secondary process */
-enum rte_proc_type_t
-eal_proc_type_detect(void)
-{
- enum rte_proc_type_t ptype = RTE_PROC_PRIMARY;
- const char *pathname = eal_runtime_config_path();
-
- /* if we can open the file but not get a write-lock we are a secondary
- * process. NOTE: if we get a file handle back, we keep that open
- * and don't close it to prevent a race condition between multiple opens */
- if (((mem_cfg_fd = open(pathname, O_RDWR)) >= 0) &&
- (fcntl(mem_cfg_fd, F_SETLK, &wr_lock) < 0))
- ptype = RTE_PROC_SECONDARY;
-
- RTE_LOG(INFO, EAL, "Auto-detected process type: %s\n",
- ptype == RTE_PROC_PRIMARY ? "PRIMARY" : "SECONDARY");
-
- return ptype;
-}
-
-/* Sets up rte_config structure with the pointer to shared memory config.*/
-static void
-rte_config_init(void)
-{
- rte_config.process_type = internal_config.process_type;
-
- switch (rte_config.process_type){
- case RTE_PROC_PRIMARY:
- rte_eal_config_create();
- break;
- case RTE_PROC_SECONDARY:
- rte_eal_config_attach();
- rte_eal_mcfg_wait_complete(rte_config.mem_config);
- break;
- case RTE_PROC_AUTO:
- case RTE_PROC_INVALID:
- rte_panic("Invalid process type\n");
- }
-}
-
-/* display usage */
-static void
-eal_usage(const char *prgname)
-{
- printf("\nUsage: %s ", prgname);
- eal_common_usage();
- /* Allow the application to print its usage message too if hook is set */
- if ( rte_application_usage_hook ) {
- printf("===== Application Usage =====\n\n");
- rte_application_usage_hook(prgname);
- }
-}
-
-/* Set a per-application usage message */
-rte_usage_hook_t
-rte_set_application_usage_hook( rte_usage_hook_t usage_func )
-{
- rte_usage_hook_t old_func;
-
- /* Will be NULL on the first call to denote the last usage routine. */
- old_func = rte_application_usage_hook;
- rte_application_usage_hook = usage_func;
-
- return old_func;
-}
-
-static inline size_t
-eal_get_hugepage_mem_size(void)
-{
- uint64_t size = 0;
- unsigned i, j;
-
- for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
- struct hugepage_info *hpi = &internal_config.hugepage_info[i];
- if (hpi->hugedir != NULL) {
- for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
- size += hpi->hugepage_sz * hpi->num_pages[j];
- }
- }
- }
-
- return (size < SIZE_MAX) ? (size_t)(size) : SIZE_MAX;
-}
-
-/* Parse the argument given in the command line of the application */
-static int
-eal_parse_args(int argc, char **argv)
-{
- int opt, ret;
- char **argvopt;
- int option_index;
- char *prgname = argv[0];
-
- argvopt = argv;
-
- eal_reset_internal_config(&internal_config);
-
- while ((opt = getopt_long(argc, argvopt, eal_short_options,
- eal_long_options, &option_index)) != EOF) {
-
- int ret;
-
- /* getopt is not happy, stop right now */
- if (opt == '?')
- return -1;
-
- ret = eal_parse_common_option(opt, optarg, &internal_config);
- /* common parser is not happy */
- if (ret < 0) {
- eal_usage(prgname);
- return -1;
- }
- /* common parser handled this option */
- if (ret == 0)
- continue;
-
- switch (opt) {
- default:
- if (opt < OPT_LONG_MIN_NUM && isprint(opt)) {
- RTE_LOG(ERR, EAL, "Option %c is not supported "
- "on FreeBSD\n", opt);
- } else if (opt >= OPT_LONG_MIN_NUM &&
- opt < OPT_LONG_MAX_NUM) {
- RTE_LOG(ERR, EAL, "Option %s is not supported "
- "on FreeBSD\n",
- eal_long_options[option_index].name);
- } else {
- RTE_LOG(ERR, EAL, "Option %d is not supported "
- "on FreeBSD\n", opt);
- }
- eal_usage(prgname);
- return -1;
- }
- }
-
- if (eal_adjust_config(&internal_config) != 0)
- return -1;
-
- /* sanity checks */
- if (eal_check_common_options(&internal_config) != 0) {
- eal_usage(prgname);
- return -1;
- }
-
- if (optind >= 0)
- argv[optind-1] = prgname;
- ret = optind-1;
- optind = 0; /* reset getopt lib */
- return ret;
-}
-
-static void
-eal_check_mem_on_local_socket(void)
-{
- const struct rte_memseg *ms;
- int i, socket_id;
-
- socket_id = rte_lcore_to_socket_id(rte_config.master_lcore);
-
- ms = rte_eal_get_physmem_layout();
-
- for (i = 0; i < RTE_MAX_MEMSEG; i++)
- if (ms[i].socket_id == socket_id &&
- ms[i].len > 0)
- return;
-
- RTE_LOG(WARNING, EAL, "WARNING: Master core has no "
- "memory on local socket!\n");
-}
-
-static int
-sync_func(__attribute__((unused)) void *arg)
-{
- return 0;
-}
-
-inline static void
-rte_eal_mcfg_complete(void)
-{
- /* ALL shared mem_config related INIT DONE */
- if (rte_config.process_type == RTE_PROC_PRIMARY)
- rte_config.mem_config->magic = RTE_MAGIC;
-}
-
-/* return non-zero if hugepages are enabled. */
-int rte_eal_has_hugepages(void)
-{
- return !internal_config.no_hugetlbfs;
-}
-
-/* Abstraction for port I/0 privilege */
-int
-rte_eal_iopl_init(void)
-{
- int fd = -1;
- fd = open("/dev/io", O_RDWR);
- if (fd < 0)
- return -1;
- close(fd);
- return 0;
-}
-
-/* Launch threads, called at application init(). */
-int
-rte_eal_init(int argc, char **argv)
-{
- int i, fctret, ret;
- pthread_t thread_id;
- static rte_atomic32_t run_once = RTE_ATOMIC32_INIT(0);
-
- if (!rte_atomic32_test_and_set(&run_once))
- return -1;
-
- thread_id = pthread_self();
-
- if (rte_eal_log_early_init() < 0)
- rte_panic("Cannot init early logs\n");
-
- if (rte_eal_cpu_init() < 0)
- rte_panic("Cannot detect lcores\n");
-
- fctret = eal_parse_args(argc, argv);
- if (fctret < 0)
- exit(1);
-
- /* set log level as early as possible */
- rte_set_log_level(internal_config.log_level);
-
- if (internal_config.no_hugetlbfs == 0 &&
- internal_config.process_type != RTE_PROC_SECONDARY &&
- eal_hugepage_info_init() < 0)
- rte_panic("Cannot get hugepage information\n");
-
- if (internal_config.memory == 0 && internal_config.force_sockets == 0) {
- if (internal_config.no_hugetlbfs)
- internal_config.memory = MEMSIZE_IF_NO_HUGE_PAGE;
- else
- internal_config.memory = eal_get_hugepage_mem_size();
- }
-
- if (internal_config.vmware_tsc_map == 1) {
-#ifdef RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT
- rte_cycles_vmware_tsc_map = 1;
- RTE_LOG (DEBUG, EAL, "Using VMWARE TSC MAP, "
- "you must have monitor_control.pseudo_perfctr = TRUE\n");
-#else
- RTE_LOG (WARNING, EAL, "Ignoring --vmware-tsc-map because "
- "RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT is not set\n");
-#endif
- }
-
- rte_srand(rte_rdtsc());
-
- rte_config_init();
-
- if (rte_eal_memory_init() < 0)
- rte_panic("Cannot init memory\n");
-
- if (rte_eal_memzone_init() < 0)
- rte_panic("Cannot init memzone\n");
-
- if (rte_eal_tailqs_init() < 0)
- rte_panic("Cannot init tail queues for objects\n");
-
-/* if (rte_eal_log_init(argv[0], internal_config.syslog_facility) < 0)
- rte_panic("Cannot init logs\n");*/
-
- if (rte_eal_alarm_init() < 0)
- rte_panic("Cannot init interrupt-handling thread\n");
-
- if (rte_eal_intr_init() < 0)
- rte_panic("Cannot init interrupt-handling thread\n");
-
- if (rte_eal_timer_init() < 0)
- rte_panic("Cannot init HPET or TSC timers\n");
-
- if (rte_eal_pci_init() < 0)
- rte_panic("Cannot init PCI\n");
-
- RTE_LOG(DEBUG, EAL, "Master core %u is ready (tid=%p)\n",
- rte_config.master_lcore, thread_id);
-
- eal_check_mem_on_local_socket();
-
- rte_eal_mcfg_complete();
-
- if (rte_eal_dev_init() < 0)
- rte_panic("Cannot init pmd devices\n");
-
- RTE_LCORE_FOREACH_SLAVE(i) {
-
- /*
- * create communication pipes between master thread
- * and children
- */
- if (pipe(lcore_config[i].pipe_master2slave) < 0)
- rte_panic("Cannot create pipe\n");
- if (pipe(lcore_config[i].pipe_slave2master) < 0)
- rte_panic("Cannot create pipe\n");
-
- lcore_config[i].state = WAIT;
-
- /* create a thread for each lcore */
- ret = pthread_create(&lcore_config[i].thread_id, NULL,
- eal_thread_loop, NULL);
- if (ret != 0)
- rte_panic("Cannot create thread\n");
- }
-
- eal_thread_init_master(rte_config.master_lcore);
-
- /*
- * Launch a dummy function on all slave lcores, so that master lcore
- * knows they are all ready when this function returns.
- */
- rte_eal_mp_remote_launch(sync_func, NULL, SKIP_MASTER);
- rte_eal_mp_wait_lcore();
-
- /* Probe & Initialize PCI devices */
- if (rte_eal_pci_probe())
- rte_panic("Cannot probe PCI\n");
-
- return fctret;
-}
-
-/* get core role */
-enum rte_lcore_role_t
-rte_eal_lcore_role(unsigned lcore_id)
-{
- return (rte_config.lcore_role[lcore_id]);
-}
-
-enum rte_proc_type_t
-rte_eal_process_type(void)
-{
- return (rte_config.process_type);
-}
-
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_hugepage_info.c b/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_hugepage_info.c
deleted file mode 100755
index 24248fbc..00000000
--- a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_hugepage_info.c
+++ /dev/null
@@ -1,133 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#include <sys/types.h>
-#include <sys/sysctl.h>
-#include <sys/mman.h>
-#include <string.h>
-
-#include <rte_log.h>
-#include <fcntl.h>
-#include "eal_hugepages.h"
-#include "eal_internal_cfg.h"
-#include "eal_filesystem.h"
-
-#define CONTIGMEM_DEV "/dev/contigmem"
-
-/*
- * Uses mmap to create a shared memory area for storage of data
- * Used in this file to store the hugepage file map on disk
- */
-static void *
-create_shared_memory(const char *filename, const size_t mem_size)
-{
- void *retval;
- int fd = open(filename, O_CREAT | O_RDWR, 0666);
- if (fd < 0)
- return NULL;
- if (ftruncate(fd, mem_size) < 0) {
- close(fd);
- return NULL;
- }
- retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
- close(fd);
- return retval;
-}
-
-/*
- * No hugepage support on freebsd, but we dummy it, using contigmem driver
- */
-int
-eal_hugepage_info_init(void)
-{
- size_t sysctl_size;
- int buffer_size, num_buffers, fd, error;
- /* re-use the linux "internal config" structure for our memory data */
- struct hugepage_info *hpi = &internal_config.hugepage_info[0];
- struct hugepage_info *tmp_hpi;
-
- sysctl_size = sizeof(num_buffers);
- error = sysctlbyname("hw.contigmem.num_buffers", &num_buffers,
- &sysctl_size, NULL, 0);
-
- if (error != 0) {
- RTE_LOG(ERR, EAL, "could not read sysctl hw.contigmem.num_buffers");
- return -1;
- }
-
- sysctl_size = sizeof(buffer_size);
- error = sysctlbyname("hw.contigmem.buffer_size", &buffer_size,
- &sysctl_size, NULL, 0);
-
- if (error != 0) {
- RTE_LOG(ERR, EAL, "could not read sysctl hw.contigmem.buffer_size");
- return -1;
- }
-
- fd = open(CONTIGMEM_DEV, O_RDWR);
- if (fd < 0) {
- RTE_LOG(ERR, EAL, "could not open "CONTIGMEM_DEV"\n");
- return -1;
- }
-
- if (buffer_size >= 1<<30)
- RTE_LOG(INFO, EAL, "Contigmem driver has %d buffers, each of size %dGB\n",
- num_buffers, buffer_size>>30);
- else if (buffer_size >= 1<<20)
- RTE_LOG(INFO, EAL, "Contigmem driver has %d buffers, each of size %dMB\n",
- num_buffers, buffer_size>>20);
- else
- RTE_LOG(INFO, EAL, "Contigmem driver has %d buffers, each of size %dKB\n",
- num_buffers, buffer_size>>10);
-
- internal_config.num_hugepage_sizes = 1;
- hpi->hugedir = CONTIGMEM_DEV;
- hpi->hugepage_sz = buffer_size;
- hpi->num_pages[0] = num_buffers;
- hpi->lock_descriptor = fd;
-
- tmp_hpi = create_shared_memory(eal_hugepage_info_path(),
- sizeof(struct hugepage_info));
- if (tmp_hpi == NULL ) {
- RTE_LOG(ERR, EAL, "Failed to create shared memory!\n");
- return -1;
- }
-
- memcpy(tmp_hpi, hpi, sizeof(struct hugepage_info));
-
- if ( munmap(tmp_hpi, sizeof(struct hugepage_info)) < 0) {
- RTE_LOG(ERR, EAL, "Failed to unmap shared memory!\n");
- return -1;
- }
-
- return 0;
-}
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_log.c b/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_log.c
deleted file mode 100755
index a425f7a8..00000000
--- a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_log.c
+++ /dev/null
@@ -1,57 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdio.h>
-#include <rte_common.h>
-#include <rte_log.h>
-
-#include <eal_private.h>
-
-/*
- * set the log to default function, called during eal init process,
- * once memzones are available.
- */
-int
-rte_eal_log_init(const char *id __rte_unused, int facility __rte_unused)
-{
- if (rte_eal_common_log_init(stderr) < 0)
- return -1;
- return 0;
-}
-
-int
-rte_eal_log_early_init(void)
-{
- rte_openlog_stream(stderr);
- return 0;
-}
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_memory.c b/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_memory.c
deleted file mode 100755
index 65ee87d8..00000000
--- a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_memory.c
+++ /dev/null
@@ -1,224 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#include <sys/mman.h>
-#include <unistd.h>
-#include <sys/types.h>
-#include <sys/sysctl.h>
-#include <inttypes.h>
-#include <fcntl.h>
-
-#include <rte_eal.h>
-#include <rte_eal_memconfig.h>
-#include <rte_log.h>
-#include <rte_string_fns.h>
-#include "eal_private.h"
-#include "eal_internal_cfg.h"
-#include "eal_filesystem.h"
-
-#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
-
-/*
- * Get physical address of any mapped virtual address in the current process.
- */
-phys_addr_t
-rte_mem_virt2phy(const void *virtaddr)
-{
- /* XXX not implemented. This function is only used by
- * rte_mempool_virt2phy() when hugepages are disabled. */
- (void)virtaddr;
- return RTE_BAD_PHYS_ADDR;
-}
-
-static int
-rte_eal_contigmem_init(void)
-{
- struct rte_mem_config *mcfg;
- uint64_t total_mem = 0;
- void *addr;
- unsigned i, j, seg_idx = 0;
-
- /* get pointer to global configuration */
- mcfg = rte_eal_get_configuration()->mem_config;
-
- /* for debug purposes, hugetlbfs can be disabled */
- if (internal_config.no_hugetlbfs) {
- addr = malloc(internal_config.memory);
- mcfg->memseg[0].phys_addr = (phys_addr_t)(uintptr_t)addr;
- mcfg->memseg[0].addr = addr;
- mcfg->memseg[0].len = internal_config.memory;
- mcfg->memseg[0].socket_id = 0;
- return 0;
- }
-
- /* map all hugepages and sort them */
- for (i = 0; i < internal_config.num_hugepage_sizes; i ++){
- struct hugepage_info *hpi;
-
- hpi = &internal_config.hugepage_info[i];
- for (j = 0; j < hpi->num_pages[0]; j++) {
- struct rte_memseg *seg;
- uint64_t physaddr;
- int error;
- size_t sysctl_size = sizeof(physaddr);
- char physaddr_str[64];
-
- addr = mmap(NULL, hpi->hugepage_sz, PROT_READ|PROT_WRITE,
- MAP_SHARED, hpi->lock_descriptor, j * PAGE_SIZE);
- if (addr == MAP_FAILED) {
- RTE_LOG(ERR, EAL, "Failed to mmap buffer %u from %s\n",
- j, hpi->hugedir);
- return -1;
- }
-
- snprintf(physaddr_str, sizeof(physaddr_str), "hw.contigmem"
- ".physaddr.%d", j);
- error = sysctlbyname(physaddr_str, &physaddr, &sysctl_size,
- NULL, 0);
- if (error < 0) {
- RTE_LOG(ERR, EAL, "Failed to get physical addr for buffer %u "
- "from %s\n", j, hpi->hugedir);
- return -1;
- }
-
- seg = &mcfg->memseg[seg_idx++];
- seg->addr = addr;
- seg->phys_addr = physaddr;
- seg->hugepage_sz = hpi->hugepage_sz;
- seg->len = hpi->hugepage_sz;
- seg->nchannel = mcfg->nchannel;
- seg->nrank = mcfg->nrank;
- seg->socket_id = 0;
-
- RTE_LOG(INFO, EAL, "Mapped memory segment %u @ %p: physaddr:0x%"
- PRIx64", len %zu\n",
- seg_idx, addr, physaddr, hpi->hugepage_sz);
- if (total_mem >= internal_config.memory ||
- seg_idx >= RTE_MAX_MEMSEG)
- break;
- }
- }
- return 0;
-}
-
-static int
-rte_eal_contigmem_attach(void)
-{
- const struct hugepage_info *hpi;
- int fd_hugepage_info, fd_hugepage = -1;
- unsigned i = 0;
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
-
- /* Obtain a file descriptor for hugepage_info */
- fd_hugepage_info = open(eal_hugepage_info_path(), O_RDONLY);
- if (fd_hugepage_info < 0) {
- RTE_LOG(ERR, EAL, "Could not open %s\n", eal_hugepage_info_path());
- return -1;
- }
-
- /* Map the shared hugepage_info into the process address spaces */
- hpi = mmap(NULL, sizeof(struct hugepage_info), PROT_READ, MAP_PRIVATE,
- fd_hugepage_info, 0);
- if (hpi == NULL) {
- RTE_LOG(ERR, EAL, "Could not mmap %s\n", eal_hugepage_info_path());
- goto error;
- }
-
- /* Obtain a file descriptor for contiguous memory */
- fd_hugepage = open(hpi->hugedir, O_RDWR);
- if (fd_hugepage < 0) {
- RTE_LOG(ERR, EAL, "Could not open %s\n", hpi->hugedir);
- goto error;
- }
-
- /* Map the contiguous memory into each memory segment */
- for (i = 0; i < hpi->num_pages[0]; i++) {
-
- void *addr;
- struct rte_memseg *seg = &mcfg->memseg[i];
-
- addr = mmap(seg->addr, hpi->hugepage_sz, PROT_READ|PROT_WRITE,
- MAP_SHARED|MAP_FIXED, fd_hugepage, i * PAGE_SIZE);
- if (addr == MAP_FAILED || addr != seg->addr) {
- RTE_LOG(ERR, EAL, "Failed to mmap buffer %u from %s\n",
- i, hpi->hugedir);
- goto error;
- }
-
- }
-
- /* hugepage_info is no longer required */
- munmap((void *)(uintptr_t)hpi, sizeof(struct hugepage_info));
- close(fd_hugepage_info);
- close(fd_hugepage);
- return 0;
-
-error:
- if (fd_hugepage_info >= 0)
- close(fd_hugepage_info);
- if (fd_hugepage >= 0)
- close(fd_hugepage);
- return -1;
-}
-
-
-static int
-rte_eal_memdevice_init(void)
-{
- struct rte_config *config;
-
- if (rte_eal_process_type() == RTE_PROC_SECONDARY)
- return 0;
-
- config = rte_eal_get_configuration();
- config->mem_config->nchannel = internal_config.force_nchannel;
- config->mem_config->nrank = internal_config.force_nrank;
-
- return 0;
-}
-
-/* init memory subsystem */
-int
-rte_eal_memory_init(void)
-{
- RTE_LOG(INFO, EAL, "Setting up physically contiguous memory...\n");
- const int retval = rte_eal_process_type() == RTE_PROC_PRIMARY ?
- rte_eal_contigmem_init() :
- rte_eal_contigmem_attach();
- if (retval < 0)
- return -1;
-
- if (internal_config.no_shconf == 0 && rte_eal_memdevice_init() < 0)
- return -1;
-
- return 0;
-}
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_pci.c b/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_pci.c
deleted file mode 100755
index 74ecce75..00000000
--- a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_pci.c
+++ /dev/null
@@ -1,510 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <ctype.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <stdarg.h>
-#include <unistd.h>
-#include <inttypes.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <stdarg.h>
-#include <errno.h>
-#include <dirent.h>
-#include <limits.h>
-#include <sys/queue.h>
-#include <sys/mman.h>
-#include <sys/ioctl.h>
-#include <sys/pciio.h>
-#include <dev/pci/pcireg.h>
-
-#include <rte_interrupts.h>
-#include <rte_log.h>
-#include <rte_pci.h>
-#include <rte_common.h>
-#include <rte_launch.h>
-#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_tailq.h>
-#include <rte_eal.h>
-#include <rte_eal_memconfig.h>
-#include <rte_per_lcore.h>
-#include <rte_lcore.h>
-#include <rte_malloc.h>
-#include <rte_string_fns.h>
-#include <rte_debug.h>
-#include <rte_devargs.h>
-
-#include "rte_pci_dev_ids.h"
-#include "eal_filesystem.h"
-#include "eal_private.h"
-
-/**
- * @file
- * PCI probing under linux
- *
- * This code is used to simulate a PCI probe by parsing information in
- * sysfs. Moreover, when a registered driver matches a device, the
- * kernel driver currently using it is unloaded and replaced by
- * igb_uio module, which is a very minimal userland driver for Intel
- * network card, only providing access to PCI BAR to applications, and
- * enabling bus master.
- */
-
-struct uio_map {
- void *addr;
- uint64_t offset;
- uint64_t size;
- uint64_t phaddr;
-};
-
-/*
- * For multi-process we need to reproduce all PCI mappings in secondary
- * processes, so save them in a tailq.
- */
-struct uio_resource {
- TAILQ_ENTRY(uio_resource) next;
-
- struct rte_pci_addr pci_addr;
- char path[PATH_MAX];
- size_t nb_maps;
- struct uio_map maps[PCI_MAX_RESOURCE];
-};
-
-TAILQ_HEAD(uio_res_list, uio_resource);
-
-static struct uio_res_list *uio_res_list = NULL;
-
-/* unbind kernel driver for this device */
-static int
-pci_unbind_kernel_driver(struct rte_pci_device *dev __rte_unused)
-{
- RTE_LOG(ERR, EAL, "RTE_PCI_DRV_FORCE_UNBIND flag is not implemented "
- "for BSD\n");
- return -ENOTSUP;
-}
-
-/* map a particular resource from a file */
-static void *
-pci_map_resource(void *requested_addr, const char *devname, off_t offset,
- size_t size)
-{
- int fd;
- void *mapaddr;
-
- /*
- * open devname, to mmap it
- */
- fd = open(devname, O_RDWR);
- if (fd < 0) {
- RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
- devname, strerror(errno));
- goto fail;
- }
-
- /* Map the PCI memory resource of device */
- mapaddr = mmap(requested_addr, size, PROT_READ | PROT_WRITE,
- MAP_SHARED, fd, offset);
- close(fd);
- if (mapaddr == MAP_FAILED ||
- (requested_addr != NULL && mapaddr != requested_addr)) {
- RTE_LOG(ERR, EAL, "%s(): cannot mmap(%s(%d), %p, 0x%lx, 0x%lx):"
- " %s (%p)\n", __func__, devname, fd, requested_addr,
- (unsigned long)size, (unsigned long)offset,
- strerror(errno), mapaddr);
- goto fail;
- }
-
- RTE_LOG(DEBUG, EAL, " PCI memory mapped at %p\n", mapaddr);
-
- return mapaddr;
-
-fail:
- return NULL;
-}
-
-static int
-pci_uio_map_secondary(struct rte_pci_device *dev)
-{
- size_t i;
- struct uio_resource *uio_res;
-
- TAILQ_FOREACH(uio_res, uio_res_list, next) {
-
- /* skip this element if it doesn't match our PCI address */
- if (memcmp(&uio_res->pci_addr, &dev->addr, sizeof(dev->addr)))
- continue;
-
- for (i = 0; i != uio_res->nb_maps; i++) {
- if (pci_map_resource(uio_res->maps[i].addr,
- uio_res->path,
- (off_t)uio_res->maps[i].offset,
- (size_t)uio_res->maps[i].size)
- != uio_res->maps[i].addr) {
- RTE_LOG(ERR, EAL,
- "Cannot mmap device resource\n");
- return (-1);
- }
- }
- return (0);
- }
-
- RTE_LOG(ERR, EAL, "Cannot find resource for device\n");
- return 1;
-}
-
-/* map the PCI resource of a PCI device in virtual memory */
-static int
-pci_uio_map_resource(struct rte_pci_device *dev)
-{
- int i, j;
- char devname[PATH_MAX]; /* contains the /dev/uioX */
- void *mapaddr;
- uint64_t phaddr;
- uint64_t offset;
- uint64_t pagesz;
- struct rte_pci_addr *loc = &dev->addr;
- struct uio_resource *uio_res;
- struct uio_map *maps;
-
- dev->intr_handle.fd = -1;
- dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
-
- /* secondary processes - use already recorded details */
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return (pci_uio_map_secondary(dev));
-
- snprintf(devname, sizeof(devname), "/dev/uio@pci:%u:%u:%u",
- dev->addr.bus, dev->addr.devid, dev->addr.function);
-
- if (access(devname, O_RDWR) < 0) {
- RTE_LOG(WARNING, EAL, " "PCI_PRI_FMT" not managed by UIO driver, "
- "skipping\n", loc->domain, loc->bus, loc->devid, loc->function);
- return 1;
- }
-
- /* save fd if in primary process */
- dev->intr_handle.fd = open(devname, O_RDWR);
- if (dev->intr_handle.fd < 0) {
- RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
- devname, strerror(errno));
- return -1;
- }
- dev->intr_handle.type = RTE_INTR_HANDLE_UIO;
-
- /* allocate the mapping details for secondary processes*/
- if ((uio_res = rte_zmalloc("UIO_RES", sizeof (*uio_res), 0)) == NULL) {
- RTE_LOG(ERR, EAL,
- "%s(): cannot store uio mmap details\n", __func__);
- return (-1);
- }
-
- snprintf(uio_res->path, sizeof(uio_res->path), "%s", devname);
- memcpy(&uio_res->pci_addr, &dev->addr, sizeof(uio_res->pci_addr));
-
-
- /* Map all BARs */
- pagesz = sysconf(_SC_PAGESIZE);
-
- maps = uio_res->maps;
- for (i = uio_res->nb_maps = 0; i != PCI_MAX_RESOURCE; i++) {
-
- j = uio_res->nb_maps;
- /* skip empty BAR */
- if ((phaddr = dev->mem_resource[i].phys_addr) == 0)
- continue;
-
- /* if matching map is found, then use it */
- offset = i * pagesz;
- maps[j].offset = offset;
- maps[j].phaddr = dev->mem_resource[i].phys_addr;
- maps[j].size = dev->mem_resource[i].len;
- if (maps[j].addr != NULL ||
- (mapaddr = pci_map_resource(NULL, devname, (off_t)offset,
- (size_t)maps[j].size)
- ) == NULL) {
- rte_free(uio_res);
- return (-1);
- }
-
- maps[j].addr = mapaddr;
- uio_res->nb_maps++;
- dev->mem_resource[i].addr = mapaddr;
- }
-
- TAILQ_INSERT_TAIL(uio_res_list, uio_res, next);
-
- return (0);
-}
-
-/* Compare two PCI device addresses. */
-static int
-pci_addr_comparison(struct rte_pci_addr *addr, struct rte_pci_addr *addr2)
-{
- uint64_t dev_addr = (addr->domain << 24) + (addr->bus << 16) + (addr->devid << 8) + addr->function;
- uint64_t dev_addr2 = (addr2->domain << 24) + (addr2->bus << 16) + (addr2->devid << 8) + addr2->function;
-
- if (dev_addr > dev_addr2)
- return 1;
- else
- return 0;
-}
-
-
-/* Scan one pci sysfs entry, and fill the devices list from it. */
-static int
-pci_scan_one(int dev_pci_fd, struct pci_conf *conf)
-{
- struct rte_pci_device *dev;
- struct pci_bar_io bar;
- unsigned i, max;
-
- dev = malloc(sizeof(*dev));
- if (dev == NULL) {
- return -1;
- }
-
- memset(dev, 0, sizeof(*dev));
- dev->addr.domain = conf->pc_sel.pc_domain;
- dev->addr.bus = conf->pc_sel.pc_bus;
- dev->addr.devid = conf->pc_sel.pc_dev;
- dev->addr.function = conf->pc_sel.pc_func;
-
- /* get vendor id */
- dev->id.vendor_id = conf->pc_vendor;
-
- /* get device id */
- dev->id.device_id = conf->pc_device;
-
- /* get subsystem_vendor id */
- dev->id.subsystem_vendor_id = conf->pc_subvendor;
-
- /* get subsystem_device id */
- dev->id.subsystem_device_id = conf->pc_subdevice;
-
- /* TODO: get max_vfs */
- dev->max_vfs = 0;
-
- /* FreeBSD has no NUMA support (yet) */
- dev->numa_node = 0;
-
-/* parse resources */
- switch (conf->pc_hdr & PCIM_HDRTYPE) {
- case PCIM_HDRTYPE_NORMAL:
- max = PCIR_MAX_BAR_0;
- break;
- case PCIM_HDRTYPE_BRIDGE:
- max = PCIR_MAX_BAR_1;
- break;
- case PCIM_HDRTYPE_CARDBUS:
- max = PCIR_MAX_BAR_2;
- break;
- default:
- goto skipdev;
- }
-
- for (i = 0; i <= max; i++) {
- bar.pbi_sel = conf->pc_sel;
- bar.pbi_reg = PCIR_BAR(i);
- if (ioctl(dev_pci_fd, PCIOCGETBAR, &bar) < 0)
- continue;
-
- dev->mem_resource[i].len = bar.pbi_length;
- if (PCI_BAR_IO(bar.pbi_base)) {
- dev->mem_resource[i].addr = (void *)(bar.pbi_base & ~((uint64_t)0xf));
- continue;
- }
- dev->mem_resource[i].phys_addr = bar.pbi_base & ~((uint64_t)0xf);
- }
-
- /* device is valid, add in list (sorted) */
- if (TAILQ_EMPTY(&pci_device_list)) {
- TAILQ_INSERT_TAIL(&pci_device_list, dev, next);
- }
- else {
- struct rte_pci_device *dev2 = NULL;
-
- TAILQ_FOREACH(dev2, &pci_device_list, next) {
- if (pci_addr_comparison(&dev->addr, &dev2->addr))
- continue;
- else {
- TAILQ_INSERT_BEFORE(dev2, dev, next);
- return 0;
- }
- }
- TAILQ_INSERT_TAIL(&pci_device_list, dev, next);
- }
-
- return 0;
-
-skipdev:
- free(dev);
- return 0;
-}
-
-/*
- * Scan the content of the PCI bus, and add the devices in the devices
- * list. Call pci_scan_one() for each pci entry found.
- */
-static int
-pci_scan(void)
-{
- int fd = -1;
- unsigned dev_count = 0;
- struct pci_conf matches[16];
- struct pci_conf_io conf_io = {
- .pat_buf_len = 0,
- .num_patterns = 0,
- .patterns = NULL,
- .match_buf_len = sizeof(matches),
- .matches = &matches[0],
- };
-
- fd = open("/dev/pci", O_RDONLY);
- if (fd < 0) {
- RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
- goto error;
- }
-
- do {
- unsigned i;
- if (ioctl(fd, PCIOCGETCONF, &conf_io) < 0) {
- RTE_LOG(ERR, EAL, "%s(): error with ioctl on /dev/pci: %s\n",
- __func__, strerror(errno));
- goto error;
- }
-
- for (i = 0; i < conf_io.num_matches; i++)
- if (pci_scan_one(fd, &matches[i]) < 0)
- goto error;
-
- dev_count += conf_io.num_matches;
- } while(conf_io.status == PCI_GETCONF_MORE_DEVS);
-
- close(fd);
-
- RTE_LOG(ERR, EAL, "PCI scan found %u devices\n", dev_count);
- return 0;
-
-error:
- if (fd >= 0)
- close(fd);
- return -1;
-}
-
-/*
- * If vendor/device ID match, call the devinit() function of the
- * driver.
- */
-int
-rte_eal_pci_probe_one_driver(struct rte_pci_driver *dr, struct rte_pci_device *dev)
-{
- struct rte_pci_id *id_table;
- int ret;
-
- for (id_table = dr->id_table ; id_table->vendor_id != 0; id_table++) {
-
- /* check if device's identifiers match the driver's ones */
- if (id_table->vendor_id != dev->id.vendor_id &&
- id_table->vendor_id != PCI_ANY_ID)
- continue;
- if (id_table->device_id != dev->id.device_id &&
- id_table->device_id != PCI_ANY_ID)
- continue;
- if (id_table->subsystem_vendor_id != dev->id.subsystem_vendor_id &&
- id_table->subsystem_vendor_id != PCI_ANY_ID)
- continue;
- if (id_table->subsystem_device_id != dev->id.subsystem_device_id &&
- id_table->subsystem_device_id != PCI_ANY_ID)
- continue;
-
- struct rte_pci_addr *loc = &dev->addr;
-
- RTE_LOG(DEBUG, EAL, "PCI device "PCI_PRI_FMT" on NUMA socket %i\n",
- loc->domain, loc->bus, loc->devid, loc->function,
- dev->numa_node);
-
- RTE_LOG(DEBUG, EAL, " probe driver: %x:%x %s\n", dev->id.vendor_id,
- dev->id.device_id, dr->name);
-
- /* no initialization when blacklisted, return without error */
- if (dev->devargs != NULL &&
- dev->devargs->type == RTE_DEVTYPE_BLACKLISTED_PCI) {
-
- RTE_LOG(DEBUG, EAL, " Device is blacklisted, not initializing\n");
- return 0;
- }
-
- if (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) {
- /* map resources for devices that use igb_uio */
- ret = pci_uio_map_resource(dev);
- if (ret != 0)
- return ret;
- } else if (dr->drv_flags & RTE_PCI_DRV_FORCE_UNBIND &&
- rte_eal_process_type() == RTE_PROC_PRIMARY) {
- /* unbind current driver */
- if (pci_unbind_kernel_driver(dev) < 0)
- return -1;
- }
-
- /* reference driver structure */
- dev->driver = dr;
-
- /* call the driver devinit() function */
- return dr->devinit(dr, dev);
- }
- /* return positive value if driver is not found */
- return 1;
-}
-
-/* Init the PCI EAL subsystem */
-int
-rte_eal_pci_init(void)
-{
- TAILQ_INIT(&pci_driver_list);
- TAILQ_INIT(&pci_device_list);
- uio_res_list = RTE_TAILQ_RESERVE_BY_IDX(RTE_TAILQ_PCI, uio_res_list);
-
- /* for debug purposes, PCI can be disabled */
- if (internal_config.no_pci)
- return 0;
-
- if (pci_scan() < 0) {
- RTE_LOG(ERR, EAL, "%s(): Cannot scan PCI bus\n", __func__);
- return -1;
- }
- return 0;
-}
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_thread.c b/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_thread.c
deleted file mode 100755
index ab05368d..00000000
--- a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_thread.c
+++ /dev/null
@@ -1,233 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <errno.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <unistd.h>
-#include <sched.h>
-#include <pthread_np.h>
-#include <sys/queue.h>
-
-#include <rte_debug.h>
-#include <rte_atomic.h>
-#include <rte_launch.h>
-#include <rte_log.h>
-#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_per_lcore.h>
-#include <rte_tailq.h>
-#include <rte_eal.h>
-#include <rte_per_lcore.h>
-#include <rte_lcore.h>
-
-#include "eal_private.h"
-#include "eal_thread.h"
-
-RTE_DEFINE_PER_LCORE(unsigned, _lcore_id);
-
-/*
- * Send a message to a slave lcore identified by slave_id to call a
- * function f with argument arg. Once the execution is done, the
- * remote lcore switch in FINISHED state.
- */
-int
-rte_eal_remote_launch(int (*f)(void *), void *arg, unsigned slave_id)
-{
- int n;
- char c = 0;
- int m2s = lcore_config[slave_id].pipe_master2slave[1];
- int s2m = lcore_config[slave_id].pipe_slave2master[0];
-
- if (lcore_config[slave_id].state != WAIT)
- return -EBUSY;
-
- lcore_config[slave_id].f = f;
- lcore_config[slave_id].arg = arg;
-
- /* send message */
- n = 0;
- while (n == 0 || (n < 0 && errno == EINTR))
- n = write(m2s, &c, 1);
- if (n < 0)
- rte_panic("cannot write on configuration pipe\n");
-
- /* wait ack */
- do {
- n = read(s2m, &c, 1);
- } while (n < 0 && errno == EINTR);
-
- if (n <= 0)
- rte_panic("cannot read on configuration pipe\n");
-
- return 0;
-}
-
-/* set affinity for current thread */
-static int
-eal_thread_set_affinity(void)
-{
- int s;
- pthread_t thread;
-
-/*
- * According to the section VERSIONS of the CPU_ALLOC man page:
- *
- * The CPU_ZERO(), CPU_SET(), CPU_CLR(), and CPU_ISSET() macros were added
- * in glibc 2.3.3.
- *
- * CPU_COUNT() first appeared in glibc 2.6.
- *
- * CPU_AND(), CPU_OR(), CPU_XOR(), CPU_EQUAL(), CPU_ALLOC(),
- * CPU_ALLOC_SIZE(), CPU_FREE(), CPU_ZERO_S(), CPU_SET_S(), CPU_CLR_S(),
- * CPU_ISSET_S(), CPU_AND_S(), CPU_OR_S(), CPU_XOR_S(), and CPU_EQUAL_S()
- * first appeared in glibc 2.7.
- */
-#if defined(CPU_ALLOC)
- size_t size;
- cpu_set_t *cpusetp;
-
- cpusetp = CPU_ALLOC(RTE_MAX_LCORE);
- if (cpusetp == NULL) {
- RTE_LOG(ERR, EAL, "CPU_ALLOC failed\n");
- return -1;
- }
-
- size = CPU_ALLOC_SIZE(RTE_MAX_LCORE);
- CPU_ZERO_S(size, cpusetp);
- CPU_SET_S(rte_lcore_id(), size, cpusetp);
-
- thread = pthread_self();
- s = pthread_setaffinity_np(thread, size, cpusetp);
- if (s != 0) {
- RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n");
- CPU_FREE(cpusetp);
- return -1;
- }
-
- CPU_FREE(cpusetp);
-#else /* CPU_ALLOC */
- cpuset_t cpuset;
- CPU_ZERO( &cpuset );
- CPU_SET( rte_lcore_id(), &cpuset );
-
- thread = pthread_self();
- s = pthread_setaffinity_np(thread, sizeof( cpuset ), &cpuset);
- if (s != 0) {
- RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n");
- return -1;
- }
-#endif
- return 0;
-}
-
-void eal_thread_init_master(unsigned lcore_id)
-{
- /* set the lcore ID in per-lcore memory area */
- RTE_PER_LCORE(_lcore_id) = lcore_id;
-
- /* set CPU affinity */
- if (eal_thread_set_affinity() < 0)
- rte_panic("cannot set affinity\n");
-}
-
-/* main loop of threads */
-__attribute__((noreturn)) void *
-eal_thread_loop(__attribute__((unused)) void *arg)
-{
- char c;
- int n, ret;
- unsigned lcore_id;
- pthread_t thread_id;
- int m2s, s2m;
-
- thread_id = pthread_self();
-
- /* retrieve our lcore_id from the configuration structure */
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
- if (thread_id == lcore_config[lcore_id].thread_id)
- break;
- }
- if (lcore_id == RTE_MAX_LCORE)
- rte_panic("cannot retrieve lcore id\n");
-
- RTE_LOG(DEBUG, EAL, "Core %u is ready (tid=%p)\n",
- lcore_id, thread_id);
-
- m2s = lcore_config[lcore_id].pipe_master2slave[0];
- s2m = lcore_config[lcore_id].pipe_slave2master[1];
-
- /* set the lcore ID in per-lcore memory area */
- RTE_PER_LCORE(_lcore_id) = lcore_id;
-
- /* set CPU affinity */
- if (eal_thread_set_affinity() < 0)
- rte_panic("cannot set affinity\n");
-
- /* read on our pipe to get commands */
- while (1) {
- void *fct_arg;
-
- /* wait command */
- do {
- n = read(m2s, &c, 1);
- } while (n < 0 && errno == EINTR);
-
- if (n <= 0)
- rte_panic("cannot read on configuration pipe\n");
-
- lcore_config[lcore_id].state = RUNNING;
-
- /* send ack */
- n = 0;
- while (n == 0 || (n < 0 && errno == EINTR))
- n = write(s2m, &c, 1);
- if (n < 0)
- rte_panic("cannot write on configuration pipe\n");
-
- if (lcore_config[lcore_id].f == NULL)
- rte_panic("NULL function pointer\n");
-
- /* call the function and store the return value */
- fct_arg = lcore_config[lcore_id].arg;
- ret = lcore_config[lcore_id].f(fct_arg);
- lcore_config[lcore_id].ret = ret;
- rte_wmb();
- lcore_config[lcore_id].state = FINISHED;
- }
-
- /* never reached */
- /* pthread_exit(NULL); */
- /* return NULL; */
-}
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_timer.c b/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_timer.c
deleted file mode 100755
index 3e698647..00000000
--- a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_timer.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#if 0
-
-#include <string.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <inttypes.h>
-//#include <sys/types.h>
-//#include <sys/sysctl.h>
-#include <errno.h>
-
-
-#include <rte_common.h>
-#include <rte_log.h>
-#include <rte_cycles.h>
-#include <rte_tailq.h>
-#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_eal.h>
-#include <rte_debug.h>
-
-#include "eal_private.h"
-#include "eal_internal_cfg.h"
-
-#ifdef RTE_LIBEAL_USE_HPET
-#error "should not be enabled"
-//#warning HPET is not supported in FreeBSD
-#endif
-
-enum timer_source eal_timer_source = EAL_TIMER_TSC;
-
-/* The frequency of the RDTSC timer resolution */
-static uint64_t eal_tsc_resolution_hz = 0;
-
-void
-rte_delay_us(unsigned us)
-{
- const uint64_t start = rte_get_timer_cycles();
- const uint64_t ticks = (uint64_t)us * rte_get_timer_hz() / 1E6;
- while ((rte_get_timer_cycles() - start) < ticks)
- rte_pause();
-}
-
-uint64_t
-rte_get_tsc_hz(void)
-{
- return eal_tsc_resolution_hz;
-}
-
-#if 0
-static int
-set_tsc_freq_from_sysctl(void)
-{
- size_t sz;
- int tmp;
-
- sz = sizeof(tmp);
- tmp = 0;
-
- if (sysctlbyname("kern.timecounter.smp_tsc", &tmp, &sz, NULL, 0))
- RTE_LOG(WARNING, EAL, "%s\n", strerror(errno));
- else if (tmp != 1)
- RTE_LOG(WARNING, EAL, "TSC is not safe to use in SMP mode\n");
-
- tmp = 0;
-
- if (sysctlbyname("kern.timecounter.invariant_tsc", &tmp, &sz, NULL, 0))
- RTE_LOG(WARNING, EAL, "%s\n", strerror(errno));
- else if (tmp != 1)
- RTE_LOG(WARNING, EAL, "TSC is not invariant\n");
-
- sz = sizeof(eal_tsc_resolution_hz);
- if (sysctlbyname("machdep.tsc_freq", &eal_tsc_resolution_hz, &sz, NULL, 0)) {
- RTE_LOG(WARNING, EAL, "%s\n", strerror(errno));
- return -1;
- }
-
- return 0;
-}
-#endif
-
-static void
-set_tsc_freq_fallback(void)
-{
- RTE_LOG(WARNING, EAL, "WARNING: clock_gettime cannot use "
- "CLOCK_MONOTONIC_RAW and HPET is not available"
- " - clock timings may be less accurate.\n");
- /* assume that the sleep(1) will sleep for 1 second */
- uint64_t start = rte_rdtsc();
- sleep(1);
- eal_tsc_resolution_hz = rte_rdtsc() - start;
-}
-
-/*
- * This function measures the TSC frequency. It uses a variety of approaches.
- *
- * 1. Read the TSC frequency value provided by the kernel
- * 2. If above does not work, just sleep for 1 second and tune off that,
- * printing a warning about inaccuracy of timing
- */
-static void
-set_tsc_freq(void)
-{
- //if (set_tsc_freq_from_sysctl() < 0)
- set_tsc_freq_fallback();
-
- RTE_LOG(INFO, EAL, "TSC frequency is ~%"PRIu64" KHz\n",
- eal_tsc_resolution_hz/1000);
-}
-
-int
-rte_eal_timer_init(void)
-{
- set_tsc_freq();
- return 0;
-}
-#endif
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/eal/include/exec-env/rte_dom0_common.h b/src/dpdk_lib18/librte_eal/bsdapp/eal/include/exec-env/rte_dom0_common.h
deleted file mode 100755
index 99a33432..00000000
--- a/src/dpdk_lib18/librte_eal/bsdapp/eal/include/exec-env/rte_dom0_common.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*-
- * This file is provided under a dual BSD/LGPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GNU LESSER GENERAL PUBLIC LICENSE
- *
- * Copyright(c) 2007-2014 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2.1 of the GNU Lesser General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Contact Information:
- * Intel Corporation
- *
- *
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifndef _RTE_DOM0_COMMON_H_
-#define _RTE_DOM0_COMMON_H_
-
-#ifdef __KERNEL__
-#include <linux/if.h>
-#endif
-
-#define DOM0_NAME_MAX 256
-#define DOM0_MM_DEV "/dev/dom0_mm"
-
-#define DOM0_CONTIG_NUM_ORDER 9 /**< 2M order */
-#define DOM0_NUM_MEMSEG 512 /**< Maximum nb. of memory segment. */
-#define DOM0_MEMBLOCK_SIZE 0x200000 /**< Maximum nb. of memory block(2M). */
-#define DOM0_CONFIG_MEMSIZE 4096 /**< Maximum config memory size(4G). */
-#define DOM0_NUM_MEMBLOCK (DOM0_CONFIG_MEMSIZE / 2) /**< Maximum nb. of 2M memory block. */
-
-#define RTE_DOM0_IOCTL_PREPARE_MEMSEG _IOWR(0, 1 , struct memory_info)
-#define RTE_DOM0_IOCTL_ATTACH_TO_MEMSEG _IOWR(0, 2 , char *)
-#define RTE_DOM0_IOCTL_GET_NUM_MEMSEG _IOWR(0, 3, int)
-#define RTE_DOM0_IOCTL_GET_MEMSEG_INFO _IOWR(0, 4, void *)
-
-/**
- * A structure used to store memory information.
- */
-struct memory_info {
- char name[DOM0_NAME_MAX];
- uint64_t size;
-};
-
-/**
- * A structure used to store memory segment information.
- */
-struct memseg_info {
- uint32_t idx;
- uint64_t pfn;
- uint64_t size;
- uint64_t mfn[DOM0_NUM_MEMBLOCK];
-};
-
-/**
- * A structure used to store memory block information.
- */
-struct memblock_info {
- uint8_t exchange_flag;
- uint64_t vir_addr;
- uint64_t pfn;
- uint64_t mfn;
-};
-#endif /* _RTE_DOM0_COMMON_H_ */
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/eal/include/exec-env/rte_interrupts.h b/src/dpdk_lib18/librte_eal/bsdapp/eal/include/exec-env/rte_interrupts.h
deleted file mode 100755
index 87a9cf69..00000000
--- a/src/dpdk_lib18/librte_eal/bsdapp/eal/include/exec-env/rte_interrupts.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_INTERRUPTS_H_
-#error "don't include this file directly, please include generic <rte_interrupts.h>"
-#endif
-
-#ifndef _RTE_LINUXAPP_INTERRUPTS_H_
-#define _RTE_LINUXAPP_INTERRUPTS_H_
-
-enum rte_intr_handle_type {
- RTE_INTR_HANDLE_UNKNOWN = 0,
- RTE_INTR_HANDLE_UIO, /**< uio device handle */
- RTE_INTR_HANDLE_ALARM, /**< alarm handle */
- RTE_INTR_HANDLE_MAX
-};
-
-/** Handle for interrupts. */
-struct rte_intr_handle {
- int fd; /**< file descriptor */
- enum rte_intr_handle_type type; /**< handle type */
-};
-
-#endif /* _RTE_LINUXAPP_INTERRUPTS_H_ */
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/nic_uio/BSDmakefile b/src/dpdk_lib18/librte_eal/bsdapp/nic_uio/BSDmakefile
deleted file mode 100755
index 5454ed85..00000000
--- a/src/dpdk_lib18/librte_eal/bsdapp/nic_uio/BSDmakefile
+++ /dev/null
@@ -1,36 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-KMOD= nic_uio
-SRCS= nic_uio.c device_if.h bus_if.h pci_if.h
-
-.include <bsd.kmod.mk>
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/nic_uio/Makefile b/src/dpdk_lib18/librte_eal/bsdapp/nic_uio/Makefile
deleted file mode 100755
index 89957615..00000000
--- a/src/dpdk_lib18/librte_eal/bsdapp/nic_uio/Makefile
+++ /dev/null
@@ -1,52 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-#
-# module name and path
-#
-MODULE = nic_uio
-
-#
-# CFLAGS
-#
-MODULE_CFLAGS += -I$(SRCDIR)
-MODULE_CFLAGS += -I$(RTE_OUTPUT)/include
-MODULE_CFLAGS += -Winline -Wall -Werror
-MODULE_CFLAGS += -include $(RTE_OUTPUT)/include/rte_config.h
-
-#
-# all source are stored in SRCS-y
-#
-SRCS-y := nic_uio.c
-
-include $(RTE_SDK)/mk/rte.bsdmodule.mk
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/nic_uio/nic_uio.c b/src/dpdk_lib18/librte_eal/bsdapp/nic_uio/nic_uio.c
deleted file mode 100755
index ed11d845..00000000
--- a/src/dpdk_lib18/librte_eal/bsdapp/nic_uio/nic_uio.c
+++ /dev/null
@@ -1,329 +0,0 @@
-/* -
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
-
-#include <sys/param.h> /* defines used in kernel.h */
-#include <sys/module.h>
-#include <sys/kernel.h> /* types used in module initialization */
-#include <sys/conf.h> /* cdevsw struct */
-#include <sys/bus.h> /* structs, prototypes for pci bus stuff and DEVMETHOD */
-#include <sys/rman.h>
-#include <sys/systm.h>
-#include <sys/rwlock.h>
-#include <sys/proc.h>
-
-#include <machine/bus.h>
-#include <dev/pci/pcivar.h> /* For pci_get macros! */
-#include <dev/pci/pcireg.h> /* The softc holds our per-instance data. */
-#include <vm/vm.h>
-#include <vm/uma.h>
-#include <vm/vm_object.h>
-#include <vm/vm_page.h>
-#include <vm/vm_pager.h>
-
-
-#define MAX_BARS (PCIR_MAX_BAR_0 + 1)
-
-
-struct nic_uio_softc {
- device_t dev_t;
- struct cdev *my_cdev;
- int bar_id[MAX_BARS];
- struct resource *bar_res[MAX_BARS];
- u_long bar_start[MAX_BARS];
- u_long bar_size[MAX_BARS];
-};
-
-/* Function prototypes */
-static d_open_t nic_uio_open;
-static d_close_t nic_uio_close;
-static d_mmap_t nic_uio_mmap;
-static d_mmap_single_t nic_uio_mmap_single;
-static int nic_uio_probe(device_t dev);
-static int nic_uio_attach(device_t dev);
-static int nic_uio_detach(device_t dev);
-static int nic_uio_shutdown(void);
-static int nic_uio_modevent(module_t mod, int type, void *arg);
-
-static struct cdevsw uio_cdevsw = {
- .d_name = "nic_uio",
- .d_version = D_VERSION,
- .d_open = nic_uio_open,
- .d_close = nic_uio_close,
- .d_mmap = nic_uio_mmap,
- .d_mmap_single = nic_uio_mmap_single,
-};
-
-static device_method_t nic_uio_methods[] = {
- DEVMETHOD(device_probe, nic_uio_probe),
- DEVMETHOD(device_attach, nic_uio_attach),
- DEVMETHOD(device_detach, nic_uio_detach),
- DEVMETHOD_END
-};
-
-struct device {
- int vend;
- int dev;
-};
-
-struct pci_bdf {
- uint32_t bus;
- uint32_t devid;
- uint32_t function;
-};
-
-
-#define RTE_PCI_DEV_ID_DECL_EM(vend, dev) {vend, dev},
-#define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) {vend, dev},
-#define RTE_PCI_DEV_ID_DECL_IGBVF(vend, dev) {vend, dev},
-#define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) {vend, dev},
-#define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev) {vend, dev},
-#define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {vend, dev},
-#define RTE_PCI_DEV_ID_DECL_I40EVF(vend, dev) {vend, dev},
-#define RTE_PCI_DEV_ID_DECL_VIRTIO(vend, dev) {vend, dev},
-#define RTE_PCI_DEV_ID_DECL_VMXNET3(vend, dev) {vend, dev},
-
-const struct device devices[] = {
-#include <rte_pci_dev_ids.h>
-};
-#define NUM_DEVICES (sizeof(devices)/sizeof(devices[0]))
-
-
-static devclass_t nic_uio_devclass;
-
-DEFINE_CLASS_0(nic_uio, nic_uio_driver, nic_uio_methods, sizeof(struct nic_uio_softc));
-DRIVER_MODULE(nic_uio, pci, nic_uio_driver, nic_uio_devclass, nic_uio_modevent, 0);
-
-static int
-nic_uio_mmap(struct cdev *cdev, vm_ooffset_t offset, vm_paddr_t *paddr,
- int prot, vm_memattr_t *memattr)
-{
- *paddr = offset;
- return (0);
-}
-
-static int
-nic_uio_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t size,
- struct vm_object **obj, int nprot)
-{
- /*
- * The BAR index is encoded in the offset. Divide the offset by
- * PAGE_SIZE to get the index of the bar requested by the user
- * app.
- */
- unsigned bar = *offset/PAGE_SIZE;
- struct nic_uio_softc *sc = cdev->si_drv1;
-
- if (bar >= MAX_BARS)
- return EINVAL;
-
- if (sc->bar_res[bar] == NULL) {
- sc->bar_id[bar] = PCIR_BAR(bar);
-
- if (PCI_BAR_IO(pci_read_config(sc->dev_t, sc->bar_id[bar], 4)))
- sc->bar_res[bar] = bus_alloc_resource_any(sc->dev_t, SYS_RES_IOPORT,
- &sc->bar_id[bar], RF_ACTIVE);
- else
- sc->bar_res[bar] = bus_alloc_resource_any(sc->dev_t, SYS_RES_MEMORY,
- &sc->bar_id[bar], RF_ACTIVE);
- }
- if (sc->bar_res[bar] == NULL)
- return ENXIO;
-
- sc->bar_start[bar] = rman_get_start(sc->bar_res[bar]);
- sc->bar_size[bar] = rman_get_size(sc->bar_res[bar]);
-
- device_printf(sc->dev_t, "Bar %u @ %lx, size %lx\n", bar,
- sc->bar_start[bar], sc->bar_size[bar]);
-
- *offset = sc->bar_start[bar];
- *obj = vm_pager_allocate(OBJT_DEVICE, cdev, size, nprot, *offset,
- curthread->td_ucred);
- return 0;
-}
-
-
-int
-nic_uio_open(struct cdev *dev, int oflags, int devtype, d_thread_t *td)
-{
- return 0;
-}
-
-int
-nic_uio_close(struct cdev *dev, int fflag, int devtype, d_thread_t *td)
-{
- return 0;
-}
-
-static int
-nic_uio_probe (device_t dev)
-{
- int i;
-
- for (i = 0; i < NUM_DEVICES; i++)
- if (pci_get_vendor(dev) == devices[i].vend &&
- pci_get_device(dev) == devices[i].dev) {
-
- device_set_desc(dev, "Intel(R) DPDK PCI Device");
- return (BUS_PROBE_SPECIFIC);
- }
-
- return (ENXIO);
-}
-
-static int
-nic_uio_attach(device_t dev)
-{
- int i;
- struct nic_uio_softc *sc;
-
- sc = device_get_softc(dev);
- sc->dev_t = dev;
- sc->my_cdev = make_dev(&uio_cdevsw, device_get_unit(dev),
- UID_ROOT, GID_WHEEL, 0600, "uio@pci:%u:%u:%u",
- pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
- if (sc->my_cdev == NULL)
- return ENXIO;
- sc->my_cdev->si_drv1 = sc;
-
- for (i = 0; i < MAX_BARS; i++)
- sc->bar_res[i] = NULL;
-
- pci_enable_busmaster(dev);
-
- return 0;
-}
-
-static int
-nic_uio_detach(device_t dev)
-{
- int i;
- struct nic_uio_softc *sc;
- sc = device_get_softc(dev);
-
- for (i = 0; i < MAX_BARS; i++)
- if (sc->bar_res[i] != NULL) {
-
- if (PCI_BAR_IO(pci_read_config(dev, sc->bar_id[i], 4)))
- bus_release_resource(dev, SYS_RES_IOPORT, sc->bar_id[i],
- sc->bar_res[i]);
- else
- bus_release_resource(dev, SYS_RES_MEMORY, sc->bar_id[i],
- sc->bar_res[i]);
- }
-
- if (sc->my_cdev != NULL)
- destroy_dev(sc->my_cdev);
- return 0;
-}
-
-static void
-nic_uio_load(void)
-{
- uint32_t bus, device, function;
- int i;
- device_t dev;
- char bdf_str[256];
- char *token, *remaining;
-
- memset(bdf_str, 0, sizeof(bdf_str));
- TUNABLE_STR_FETCH("hw.nic_uio.bdfs", bdf_str, sizeof(bdf_str));
- remaining = bdf_str;
- /*
- * Users should specify PCI BDFs in the format "b:d:f,b:d:f,b:d:f".
- * But the code below does not try differentiate between : and ,
- * and just blindly uses 3 tokens at a time to construct a
- * bus/device/function tuple.
- *
- * There is no checking on strtol() return values, but this should
- * be OK. Worst case is it cannot convert and returns 0. This
- * could give us a different BDF than intended, but as long as the
- * PCI device/vendor ID does not match it will not matter.
- */
- while (1) {
- if (remaining == NULL || remaining[0] == '\0')
- break;
- token = strsep(&remaining, ",:");
- if (token == NULL)
- break;
- bus = strtol(token, NULL, 10);
- token = strsep(&remaining, ",:");
- if (token == NULL)
- break;
- device = strtol(token, NULL, 10);
- token = strsep(&remaining, ",:");
- if (token == NULL)
- break;
- function = strtol(token, NULL, 10);
-
- dev = pci_find_bsf(bus, device, function);
- if (dev != NULL)
- for (i = 0; i < NUM_DEVICES; i++)
- if (pci_get_vendor(dev) == devices[i].vend &&
- pci_get_device(dev) == devices[i].dev)
- device_detach(dev);
- }
-}
-
-static void
-nic_uio_unload(void)
-{
-}
-
-static int
-nic_uio_shutdown(void)
-{
- return (0);
-}
-
-static int
-nic_uio_modevent(module_t mod, int type, void *arg)
-{
-
- switch (type) {
- case MOD_LOAD:
- nic_uio_load();
- break;
- case MOD_UNLOAD:
- nic_uio_unload();
- break;
- case MOD_SHUTDOWN:
- nic_uio_shutdown();
- break;
- default:
- break;
- }
-
- return (0);
-}
diff --git a/src/dpdk_lib18/librte_eal/common/Makefile b/src/dpdk_lib18/librte_eal/common/Makefile
deleted file mode 100755
index 52c1a5f3..00000000
--- a/src/dpdk_lib18/librte_eal/common/Makefile
+++ /dev/null
@@ -1,61 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-INC := rte_branch_prediction.h rte_common.h
-INC += rte_debug.h rte_eal.h rte_errno.h rte_launch.h rte_lcore.h
-INC += rte_log.h rte_memory.h rte_memzone.h rte_pci.h
-INC += rte_pci_dev_ids.h rte_per_lcore.h rte_random.h
-INC += rte_rwlock.h rte_tailq.h rte_interrupts.h rte_alarm.h
-INC += rte_string_fns.h rte_version.h rte_tailq_elem.h
-INC += rte_eal_memconfig.h rte_malloc_heap.h
-INC += rte_hexdump.h rte_devargs.h rte_dev.h
-INC += rte_common_vect.h
-INC += rte_pci_dev_feature_defs.h rte_pci_dev_features.h
-
-ifeq ($(CONFIG_RTE_INSECURE_FUNCTION_WARNING),y)
-INC += rte_warnings.h
-endif
-
-GENERIC_INC := rte_atomic.h rte_byteorder.h rte_cycles.h rte_prefetch.h
-GENERIC_INC += rte_spinlock.h rte_memcpy.h rte_cpuflags.h
-# defined in mk/arch/$(RTE_ARCH)/rte.vars.mk
-ARCH_DIR ?= $(RTE_ARCH)
-ARCH_INC := $(notdir $(wildcard $(RTE_SDK)/lib/librte_eal/common/include/arch/$(ARCH_DIR)/*.h))
-
-SYMLINK-$(CONFIG_RTE_LIBRTE_EAL)-include := $(addprefix include/,$(INC))
-SYMLINK-$(CONFIG_RTE_LIBRTE_EAL)-include += \
- $(addprefix include/arch/$(ARCH_DIR)/,$(ARCH_INC))
-SYMLINK-$(CONFIG_RTE_LIBRTE_EAL)-include/generic := \
- $(addprefix include/generic/,$(GENERIC_INC))
-
-include $(RTE_SDK)/mk/rte.install.mk
diff --git a/src/dpdk_lib18/librte_eal/common/eal_common_options.c b/src/dpdk_lib18/librte_eal/common/eal_common_options.c
deleted file mode 100755
index e2810ab9..00000000
--- a/src/dpdk_lib18/librte_eal/common/eal_common_options.c
+++ /dev/null
@@ -1,611 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2014 6WIND S.A.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdlib.h>
-#include <unistd.h>
-#include <string.h>
-#include <syslog.h>
-#include <ctype.h>
-#include <limits.h>
-#include <errno.h>
-#include <getopt.h>
-
-#include <rte_eal.h>
-#include <rte_log.h>
-#include <rte_lcore.h>
-#include <rte_version.h>
-#include <rte_devargs.h>
-
-#include "eal_internal_cfg.h"
-#include "eal_options.h"
-#include "eal_filesystem.h"
-
-#define BITS_PER_HEX 4
-
-const char
-eal_short_options[] =
- "b:" /* pci-blacklist */
- "w:" /* pci-whitelist */
- "c:" /* coremask */
- "d:"
- "l:" /* corelist */
- "m:"
- "n:"
- "r:"
- "v";
-
-const struct option
-eal_long_options[] = {
- {OPT_HUGE_DIR, 1, 0, OPT_HUGE_DIR_NUM},
- {OPT_MASTER_LCORE, 1, 0, OPT_MASTER_LCORE_NUM},
- {OPT_PROC_TYPE, 1, 0, OPT_PROC_TYPE_NUM},
- {OPT_NO_SHCONF, 0, 0, OPT_NO_SHCONF_NUM},
- {OPT_NO_HPET, 0, 0, OPT_NO_HPET_NUM},
- {OPT_VMWARE_TSC_MAP, 0, 0, OPT_VMWARE_TSC_MAP_NUM},
- {OPT_NO_PCI, 0, 0, OPT_NO_PCI_NUM},
- {OPT_NO_HUGE, 0, 0, OPT_NO_HUGE_NUM},
- {OPT_FILE_PREFIX, 1, 0, OPT_FILE_PREFIX_NUM},
- {OPT_SOCKET_MEM, 1, 0, OPT_SOCKET_MEM_NUM},
- {OPT_PCI_WHITELIST, 1, 0, OPT_PCI_WHITELIST_NUM},
- {OPT_PCI_BLACKLIST, 1, 0, OPT_PCI_BLACKLIST_NUM},
- {OPT_VDEV, 1, 0, OPT_VDEV_NUM},
- {OPT_SYSLOG, 1, NULL, OPT_SYSLOG_NUM},
- {OPT_LOG_LEVEL, 1, NULL, OPT_LOG_LEVEL_NUM},
- {OPT_BASE_VIRTADDR, 1, 0, OPT_BASE_VIRTADDR_NUM},
- {OPT_XEN_DOM0, 0, 0, OPT_XEN_DOM0_NUM},
- {OPT_CREATE_UIO_DEV, 1, NULL, OPT_CREATE_UIO_DEV_NUM},
- {OPT_VFIO_INTR, 1, NULL, OPT_VFIO_INTR_NUM},
- {0, 0, 0, 0}
-};
-
-static int lcores_parsed;
-static int master_lcore_parsed;
-static int mem_parsed;
-
-void
-eal_reset_internal_config(struct internal_config *internal_cfg)
-{
- int i;
-
- internal_cfg->memory = 0;
- internal_cfg->force_nrank = 0;
- internal_cfg->force_nchannel = 0;
- internal_cfg->hugefile_prefix = HUGEFILE_PREFIX_DEFAULT;
- internal_cfg->hugepage_dir = NULL;
- internal_cfg->force_sockets = 0;
- /* zero out the NUMA config */
- for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
- internal_cfg->socket_mem[i] = 0;
- /* zero out hugedir descriptors */
- for (i = 0; i < MAX_HUGEPAGE_SIZES; i++)
- internal_cfg->hugepage_info[i].lock_descriptor = -1;
- internal_cfg->base_virtaddr = 0;
-
- internal_cfg->syslog_facility = LOG_DAEMON;
- /* default value from build option */
- internal_cfg->log_level = RTE_LOG_LEVEL;
-
- internal_cfg->xen_dom0_support = 0;
-
- /* if set to NONE, interrupt mode is determined automatically */
- internal_cfg->vfio_intr_mode = RTE_INTR_MODE_NONE;
-
-#ifdef RTE_LIBEAL_USE_HPET
- internal_cfg->no_hpet = 0;
-#else
- internal_cfg->no_hpet = 1;
-#endif
- internal_cfg->vmware_tsc_map = 0;
-}
-
-/*
- * Parse the coremask given as argument (hexadecimal string) and fill
- * the global configuration (core role and core count) with the parsed
- * value.
- */
-static int xdigit2val(unsigned char c)
-{
- int val;
-
- if (isdigit(c))
- val = c - '0';
- else if (isupper(c))
- val = c - 'A' + 10;
- else
- val = c - 'a' + 10;
- return val;
-}
-
-static int
-eal_parse_coremask(const char *coremask)
-{
- struct rte_config *cfg = rte_eal_get_configuration();
- int i, j, idx = 0;
- unsigned count = 0;
- char c;
- int val;
-
- if (coremask == NULL)
- return -1;
- /* Remove all blank characters ahead and after .
- * Remove 0x/0X if exists.
- */
- while (isblank(*coremask))
- coremask++;
- if (coremask[0] == '0' && ((coremask[1] == 'x')
- || (coremask[1] == 'X')))
- coremask += 2;
- i = strnlen(coremask, PATH_MAX);
- while ((i > 0) && isblank(coremask[i - 1]))
- i--;
- if (i == 0)
- return -1;
-
- for (i = i - 1; i >= 0 && idx < RTE_MAX_LCORE; i--) {
- c = coremask[i];
- if (isxdigit(c) == 0) {
- /* invalid characters */
- return -1;
- }
- val = xdigit2val(c);
- for (j = 0; j < BITS_PER_HEX && idx < RTE_MAX_LCORE; j++, idx++)
- {
- if ((1 << j) & val) {
- if (!lcore_config[idx].detected) {
- RTE_LOG(ERR, EAL, "lcore %u "
- "unavailable\n", idx);
- return -1;
- }
- cfg->lcore_role[idx] = ROLE_RTE;
- lcore_config[idx].core_index = count;
- count++;
- } else {
- cfg->lcore_role[idx] = ROLE_OFF;
- lcore_config[idx].core_index = -1;
- }
- }
- }
- for (; i >= 0; i--)
- if (coremask[i] != '0')
- return -1;
- for (; idx < RTE_MAX_LCORE; idx++) {
- cfg->lcore_role[idx] = ROLE_OFF;
- lcore_config[idx].core_index = -1;
- }
- if (count == 0)
- return -1;
- /* Update the count of enabled logical cores of the EAL configuration */
- cfg->lcore_count = count;
- lcores_parsed = 1;
- return 0;
-}
-
-static int
-eal_parse_corelist(const char *corelist)
-{
- struct rte_config *cfg = rte_eal_get_configuration();
- int i, idx = 0;
- unsigned count = 0;
- char *end = NULL;
- int min, max;
-
- if (corelist == NULL)
- return -1;
-
- /* Remove all blank characters ahead and after */
- while (isblank(*corelist))
- corelist++;
- i = strnlen(corelist, sysconf(_SC_ARG_MAX));
- while ((i > 0) && isblank(corelist[i - 1]))
- i--;
-
- /* Reset config */
- for (idx = 0; idx < RTE_MAX_LCORE; idx++) {
- cfg->lcore_role[idx] = ROLE_OFF;
- lcore_config[idx].core_index = -1;
- }
-
- /* Get list of cores */
- min = RTE_MAX_LCORE;
- do {
- while (isblank(*corelist))
- corelist++;
- if (*corelist == '\0')
- return -1;
- errno = 0;
- idx = strtoul(corelist, &end, 10);
- if (errno || end == NULL)
- return -1;
- while (isblank(*end))
- end++;
- if (*end == '-') {
- min = idx;
- } else if ((*end == ',') || (*end == '\0')) {
- max = idx;
- if (min == RTE_MAX_LCORE)
- min = idx;
- for (idx = min; idx <= max; idx++) {
- cfg->lcore_role[idx] = ROLE_RTE;
- lcore_config[idx].core_index = count;
- count++;
- }
- min = RTE_MAX_LCORE;
- } else
- return -1;
- corelist = end + 1;
- } while (*end != '\0');
-
- if (count == 0)
- return -1;
-
- lcores_parsed = 1;
- return 0;
-}
-
-/* Changes the lcore id of the master thread */
-static int
-eal_parse_master_lcore(const char *arg)
-{
- char *parsing_end;
- struct rte_config *cfg = rte_eal_get_configuration();
-
- errno = 0;
- cfg->master_lcore = (uint32_t) strtol(arg, &parsing_end, 0);
- if (errno || parsing_end[0] != 0)
- return -1;
- if (cfg->master_lcore >= RTE_MAX_LCORE)
- return -1;
- master_lcore_parsed = 1;
- return 0;
-}
-
-static int
-eal_parse_syslog(const char *facility, struct internal_config *conf)
-{
- int i;
- static struct {
- const char *name;
- int value;
- } map[] = {
- { "auth", LOG_AUTH },
- { "cron", LOG_CRON },
- { "daemon", LOG_DAEMON },
- { "ftp", LOG_FTP },
- { "kern", LOG_KERN },
- { "lpr", LOG_LPR },
- { "mail", LOG_MAIL },
- { "news", LOG_NEWS },
- { "syslog", LOG_SYSLOG },
- { "user", LOG_USER },
- { "uucp", LOG_UUCP },
- { "local0", LOG_LOCAL0 },
- { "local1", LOG_LOCAL1 },
- { "local2", LOG_LOCAL2 },
- { "local3", LOG_LOCAL3 },
- { "local4", LOG_LOCAL4 },
- { "local5", LOG_LOCAL5 },
- { "local6", LOG_LOCAL6 },
- { "local7", LOG_LOCAL7 },
- { NULL, 0 }
- };
-
- for (i = 0; map[i].name; i++) {
- if (!strcmp(facility, map[i].name)) {
- conf->syslog_facility = map[i].value;
- return 0;
- }
- }
- return -1;
-}
-
-static int
-eal_parse_log_level(const char *level, uint32_t *log_level)
-{
- char *end;
- unsigned long tmp;
-
- errno = 0;
- tmp = strtoul(level, &end, 0);
-
- /* check for errors */
- if ((errno != 0) || (level[0] == '\0') ||
- end == NULL || (*end != '\0'))
- return -1;
-
- /* log_level is a uint32_t */
- if (tmp >= UINT32_MAX)
- return -1;
-
- *log_level = tmp;
- return 0;
-}
-
-static enum rte_proc_type_t
-eal_parse_proc_type(const char *arg)
-{
- if (strncasecmp(arg, "primary", sizeof("primary")) == 0)
- return RTE_PROC_PRIMARY;
- if (strncasecmp(arg, "secondary", sizeof("secondary")) == 0)
- return RTE_PROC_SECONDARY;
- if (strncasecmp(arg, "auto", sizeof("auto")) == 0)
- return RTE_PROC_AUTO;
-
- return RTE_PROC_INVALID;
-}
-
-int
-eal_parse_common_option(int opt, const char *optarg,
- struct internal_config *conf)
-{
- switch (opt) {
- /* blacklist */
- case 'b':
- if (rte_eal_devargs_add(RTE_DEVTYPE_BLACKLISTED_PCI,
- optarg) < 0) {
- return -1;
- }
- break;
- /* whitelist */
- case 'w':
- if (rte_eal_devargs_add(RTE_DEVTYPE_WHITELISTED_PCI,
- optarg) < 0) {
- return -1;
- }
- break;
- /* coremask */
- case 'c':
- if (eal_parse_coremask(optarg) < 0) {
- RTE_LOG(ERR, EAL, "invalid coremask\n");
- return -1;
- }
- break;
- /* corelist */
- case 'l':
- if (eal_parse_corelist(optarg) < 0) {
- RTE_LOG(ERR, EAL, "invalid core list\n");
- return -1;
- }
- break;
- /* size of memory */
- case 'm':
- conf->memory = atoi(optarg);
- conf->memory *= 1024ULL;
- conf->memory *= 1024ULL;
- mem_parsed = 1;
- break;
- /* force number of channels */
- case 'n':
- conf->force_nchannel = atoi(optarg);
- if (conf->force_nchannel == 0 ||
- conf->force_nchannel > 4) {
- RTE_LOG(ERR, EAL, "invalid channel number\n");
- return -1;
- }
- break;
- /* force number of ranks */
- case 'r':
- conf->force_nrank = atoi(optarg);
- if (conf->force_nrank == 0 ||
- conf->force_nrank > 16) {
- RTE_LOG(ERR, EAL, "invalid rank number\n");
- return -1;
- }
- break;
- case 'v':
- /* since message is explicitly requested by user, we
- * write message at highest log level so it can always
- * be seen
- * even if info or warning messages are disabled */
- RTE_LOG(CRIT, EAL, "RTE Version: '%s'\n", rte_version());
- break;
-
- /* long options */
- case OPT_NO_HUGE_NUM:
- conf->no_hugetlbfs = 1;
- break;
-
- case OPT_NO_PCI_NUM:
- conf->no_pci = 1;
- break;
-
- case OPT_NO_HPET_NUM:
- conf->no_hpet = 1;
- break;
-
- case OPT_VMWARE_TSC_MAP_NUM:
- conf->vmware_tsc_map = 1;
- break;
-
- case OPT_NO_SHCONF_NUM:
- conf->no_shconf = 1;
- break;
-
- case OPT_PROC_TYPE_NUM:
- conf->process_type = eal_parse_proc_type(optarg);
- break;
-
- case OPT_MASTER_LCORE_NUM:
- if (eal_parse_master_lcore(optarg) < 0) {
- RTE_LOG(ERR, EAL, "invalid parameter for --"
- OPT_MASTER_LCORE "\n");
- return -1;
- }
- break;
-
- case OPT_VDEV_NUM:
- if (rte_eal_devargs_add(RTE_DEVTYPE_VIRTUAL,
- optarg) < 0) {
- return -1;
- }
- break;
-
- case OPT_SYSLOG_NUM:
- if (eal_parse_syslog(optarg, conf) < 0) {
- RTE_LOG(ERR, EAL, "invalid parameters for --"
- OPT_SYSLOG "\n");
- return -1;
- }
- break;
-
- case OPT_LOG_LEVEL_NUM: {
- uint32_t log;
-
- if (eal_parse_log_level(optarg, &log) < 0) {
- RTE_LOG(ERR, EAL,
- "invalid parameters for --"
- OPT_LOG_LEVEL "\n");
- return -1;
- }
- conf->log_level = log;
- break;
- }
-
- /* don't know what to do, leave this to caller */
- default:
- return 1;
-
- }
-
- return 0;
-}
-
-int
-eal_adjust_config(struct internal_config *internal_cfg)
-{
- int i;
- struct rte_config *cfg = rte_eal_get_configuration();
-
- if (internal_config.process_type == RTE_PROC_AUTO)
- internal_config.process_type = eal_proc_type_detect();
-
- /* default master lcore is the first one */
- if (!master_lcore_parsed)
- cfg->master_lcore = rte_get_next_lcore(-1, 0, 0);
-
- /* if no memory amounts were requested, this will result in 0 and
- * will be overridden later, right after eal_hugepage_info_init() */
- for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
- internal_cfg->memory += internal_cfg->socket_mem[i];
-
- return 0;
-}
-
-int
-eal_check_common_options(struct internal_config *internal_cfg)
-{
- struct rte_config *cfg = rte_eal_get_configuration();
-
- if (!lcores_parsed) {
- RTE_LOG(ERR, EAL, "CPU cores must be enabled with options "
- "-c or -l\n");
- return -1;
- }
- if (cfg->lcore_role[cfg->master_lcore] != ROLE_RTE) {
- RTE_LOG(ERR, EAL, "Master lcore is not enabled for DPDK\n");
- return -1;
- }
-
- if (internal_cfg->process_type == RTE_PROC_INVALID) {
- RTE_LOG(ERR, EAL, "Invalid process type specified\n");
- return -1;
- }
- if (internal_cfg->process_type == RTE_PROC_PRIMARY &&
- internal_cfg->force_nchannel == 0) {
- RTE_LOG(ERR, EAL, "Number of memory channels (-n) not "
- "specified\n");
- return -1;
- }
- if (index(internal_cfg->hugefile_prefix, '%') != NULL) {
- RTE_LOG(ERR, EAL, "Invalid char, '%%', in --"OPT_FILE_PREFIX" "
- "option\n");
- return -1;
- }
- if (mem_parsed && internal_cfg->force_sockets == 1) {
- RTE_LOG(ERR, EAL, "Options -m and --"OPT_SOCKET_MEM" cannot "
- "be specified at the same time\n");
- return -1;
- }
- if (internal_cfg->no_hugetlbfs &&
- (mem_parsed || internal_cfg->force_sockets == 1)) {
- RTE_LOG(ERR, EAL, "Options -m or --"OPT_SOCKET_MEM" cannot "
- "be specified together with --"OPT_NO_HUGE"\n");
- return -1;
- }
-
- if (rte_eal_devargs_type_count(RTE_DEVTYPE_WHITELISTED_PCI) != 0 &&
- rte_eal_devargs_type_count(RTE_DEVTYPE_BLACKLISTED_PCI) != 0) {
- RTE_LOG(ERR, EAL, "Options blacklist (-b) and whitelist (-w) "
- "cannot be used at the same time\n");
- return -1;
- }
-
- return 0;
-}
-
-void
-eal_common_usage(void)
-{
- printf("-c COREMASK -n NUM [-m NB] [-r NUM] [-b <domain:bus:devid.func>]"
- "[--proc-type primary|secondary|auto]\n\n"
- "EAL common options:\n"
- " -c COREMASK : A hexadecimal bitmask of cores to run on\n"
- " -l CORELIST : List of cores to run on\n"
- " The argument format is <c1>[-c2][,c3[-c4],...]\n"
- " where c1, c2, etc are core indexes between 0 and %d\n"
- " --"OPT_MASTER_LCORE" ID: Core ID that is used as master\n"
- " -n NUM : Number of memory channels\n"
- " -v : Display version information on startup\n"
- " -m MB : memory to allocate (see also --"OPT_SOCKET_MEM")\n"
- " -r NUM : force number of memory ranks (don't detect)\n"
- " --"OPT_SYSLOG" : set syslog facility\n"
- " --"OPT_LOG_LEVEL" : set default log level\n"
- " --"OPT_PROC_TYPE" : type of this process\n"
- " --"OPT_PCI_BLACKLIST", -b: add a PCI device in black list.\n"
- " Prevent EAL from using this PCI device. The argument\n"
- " format is <domain:bus:devid.func>.\n"
- " --"OPT_PCI_WHITELIST", -w: add a PCI device in white list.\n"
- " Only use the specified PCI devices. The argument format\n"
- " is <[domain:]bus:devid.func>. This option can be present\n"
- " several times (once per device).\n"
- " [NOTE: PCI whitelist cannot be used with -b option]\n"
- " --"OPT_VDEV": add a virtual device.\n"
- " The argument format is <driver><id>[,key=val,...]\n"
- " (ex: --vdev=eth_pcap0,iface=eth2).\n"
- " --"OPT_VMWARE_TSC_MAP": use VMware TSC map instead of native RDTSC\n"
- "\nEAL options for DEBUG use only:\n"
- " --"OPT_NO_HUGE" : use malloc instead of hugetlbfs\n"
- " --"OPT_NO_PCI" : disable pci\n"
- " --"OPT_NO_HPET" : disable hpet\n"
- " --"OPT_NO_SHCONF": no shared config (mmap'd files)\n"
- "\n", RTE_MAX_LCORE);
-}
diff --git a/src/dpdk_lib18/librte_eal/common/eal_common_pci.c b/src/dpdk_lib18/librte_eal/common/eal_common_pci.c
deleted file mode 100755
index f3c7f71a..00000000
--- a/src/dpdk_lib18/librte_eal/common/eal_common_pci.c
+++ /dev/null
@@ -1,207 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-/* BSD LICENSE
- *
- * Copyright 2013-2014 6WIND S.A.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <string.h>
-#include <inttypes.h>
-#include <stdint.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <sys/queue.h>
-
-#include <rte_interrupts.h>
-#include <rte_log.h>
-#include <rte_pci.h>
-#include <rte_per_lcore.h>
-#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_tailq.h>
-#include <rte_eal.h>
-#include <rte_string_fns.h>
-#include <rte_common.h>
-#include <rte_devargs.h>
-
-#include "eal_private.h"
-
-struct pci_driver_list pci_driver_list;
-struct pci_device_list pci_device_list;
-
-static struct rte_devargs *pci_devargs_lookup(struct rte_pci_device *dev)
-{
- struct rte_devargs *devargs;
-
- TAILQ_FOREACH(devargs, &devargs_list, next) {
- if (devargs->type != RTE_DEVTYPE_BLACKLISTED_PCI &&
- devargs->type != RTE_DEVTYPE_WHITELISTED_PCI)
- continue;
- if (!memcmp(&dev->addr, &devargs->pci.addr, sizeof(dev->addr)))
- return devargs;
- }
- return NULL;
-}
-
-/*
- * If vendor/device ID match, call the devinit() function of all
- * registered driver for the given device. Return -1 if initialization
- * failed, return 1 if no driver is found for this device.
- */
-static int
-pci_probe_all_drivers(struct rte_pci_device *dev)
-{
- struct rte_pci_driver *dr = NULL;
- int rc;
-
- TAILQ_FOREACH(dr, &pci_driver_list, next) {
- rc = rte_eal_pci_probe_one_driver(dr, dev);
- if (rc < 0)
- /* negative value is an error */
- return -1;
- if (rc > 0)
- /* positive value means driver not found */
- continue;
- return 0;
- }
- return 1;
-}
-
-/*
- * Scan the content of the PCI bus, and call the devinit() function for
- * all registered drivers that have a matching entry in its id_table
- * for discovered devices.
- */
-int
-rte_eal_pci_probe(void)
-{
- struct rte_pci_device *dev = NULL;
- struct rte_devargs *devargs;
- int probe_all = 0;
- int ret = 0;
-
- if (rte_eal_devargs_type_count(RTE_DEVTYPE_WHITELISTED_PCI) == 0)
- probe_all = 1;
-
- TAILQ_FOREACH(dev, &pci_device_list, next) {
-
- /* set devargs in PCI structure */
- devargs = pci_devargs_lookup(dev);
- if (devargs != NULL)
- dev->devargs = devargs;
-
- /* probe all or only whitelisted devices */
- if (probe_all)
- ret = pci_probe_all_drivers(dev);
- else if (devargs != NULL &&
- devargs->type == RTE_DEVTYPE_WHITELISTED_PCI)
- ret = pci_probe_all_drivers(dev);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Requested device " PCI_PRI_FMT
- " cannot be used\n", dev->addr.domain, dev->addr.bus,
- dev->addr.devid, dev->addr.function);
- }
-
- return 0;
-}
-
-/* dump one device */
-static int
-pci_dump_one_device(FILE *f, struct rte_pci_device *dev)
-{
- int i;
-
- fprintf(f, PCI_PRI_FMT, dev->addr.domain, dev->addr.bus,
- dev->addr.devid, dev->addr.function);
- fprintf(f, " - vendor:%x device:%x\n", dev->id.vendor_id,
- dev->id.device_id);
-
- for (i = 0; i != sizeof(dev->mem_resource) /
- sizeof(dev->mem_resource[0]); i++) {
- fprintf(f, " %16.16"PRIx64" %16.16"PRIx64"\n",
- dev->mem_resource[i].phys_addr,
- dev->mem_resource[i].len);
- }
- return 0;
-}
-
-/* dump devices on the bus */
-void
-rte_eal_pci_dump(FILE *f)
-{
- struct rte_pci_device *dev = NULL;
-
- TAILQ_FOREACH(dev, &pci_device_list, next) {
- pci_dump_one_device(f, dev);
- }
-}
-
-/* register a driver */
-void
-rte_eal_pci_register(struct rte_pci_driver *driver)
-{
- TAILQ_INSERT_TAIL(&pci_driver_list, driver, next);
-}
-
-/* unregister a driver */
-void
-rte_eal_pci_unregister(struct rte_pci_driver *driver)
-{
- TAILQ_REMOVE(&pci_driver_list, driver, next);
-}
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_atomic.h b/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_atomic.h
deleted file mode 100755
index fb7af2bd..00000000
--- a/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_atomic.h
+++ /dev/null
@@ -1,426 +0,0 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) IBM Corporation 2014.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of IBM Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/*
- * Inspired from FreeBSD src/sys/powerpc/include/atomic.h
- * Copyright (c) 2008 Marcel Moolenaar
- * Copyright (c) 2001 Benno Rice
- * Copyright (c) 2001 David E. O'Brien
- * Copyright (c) 1998 Doug Rabson
- * All rights reserved.
- */
-
-#ifndef _RTE_ATOMIC_PPC_64_H_
-#define _RTE_ATOMIC_PPC_64_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "generic/rte_atomic.h"
-
-/**
- * General memory barrier.
- *
- * Guarantees that the LOAD and STORE operations generated before the
- * barrier occur before the LOAD and STORE operations generated after.
- */
-#define rte_mb() {asm volatile("sync" : : : "memory"); }
-
-/**
- * Write memory barrier.
- *
- * Guarantees that the STORE operations generated before the barrier
- * occur before the STORE operations generated after.
- */
-#define rte_wmb() {asm volatile("sync" : : : "memory"); }
-
-/**
- * Read memory barrier.
- *
- * Guarantees that the LOAD operations generated before the barrier
- * occur before the LOAD operations generated after.
- */
-#define rte_rmb() {asm volatile("sync" : : : "memory"); }
-
-/*------------------------- 16 bit atomic operations -------------------------*/
-/* To be compatible with Power7, use GCC built-in functions for 16 bit
- * operations */
-
-#ifndef RTE_FORCE_INTRINSICS
-static inline int
-rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
-{
- return __atomic_compare_exchange(dst, &exp, &src, 0, __ATOMIC_ACQUIRE,
- __ATOMIC_ACQUIRE) ? 1 : 0;
-}
-
-static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
-{
- return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
-}
-
-static inline void
-rte_atomic16_inc(rte_atomic16_t *v)
-{
- __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE);
-}
-
-static inline void
-rte_atomic16_dec(rte_atomic16_t *v)
-{
- __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE);
-}
-
-static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
-{
- return (__atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0);
-}
-
-static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
-{
- return (__atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0);
-}
-
-/*------------------------- 32 bit atomic operations -------------------------*/
-
-static inline int
-rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
-{
- unsigned int ret = 0;
-
- asm volatile(
- "\tlwsync\n"
- "1:\tlwarx %[ret], 0, %[dst]\n"
- "cmplw %[exp], %[ret]\n"
- "bne 2f\n"
- "stwcx. %[src], 0, %[dst]\n"
- "bne- 1b\n"
- "li %[ret], 1\n"
- "b 3f\n"
- "2:\n"
- "stwcx. %[ret], 0, %[dst]\n"
- "li %[ret], 0\n"
- "3:\n"
- "isync\n"
- : [ret] "=&r" (ret), "=m" (*dst)
- : [dst] "r" (dst),
- [exp] "r" (exp),
- [src] "r" (src),
- "m" (*dst)
- : "cc", "memory");
-
- return ret;
-}
-
-static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
-{
- return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);
-}
-
-static inline void
-rte_atomic32_inc(rte_atomic32_t *v)
-{
- int t;
-
- asm volatile(
- "1: lwarx %[t],0,%[cnt]\n"
- "addic %[t],%[t],1\n"
- "stwcx. %[t],0,%[cnt]\n"
- "bne- 1b\n"
- : [t] "=&r" (t), "=m" (v->cnt)
- : [cnt] "r" (&v->cnt), "m" (v->cnt)
- : "cc", "xer", "memory");
-}
-
-static inline void
-rte_atomic32_dec(rte_atomic32_t *v)
-{
- int t;
-
- asm volatile(
- "1: lwarx %[t],0,%[cnt]\n"
- "addic %[t],%[t],-1\n"
- "stwcx. %[t],0,%[cnt]\n"
- "bne- 1b\n"
- : [t] "=&r" (t), "=m" (v->cnt)
- : [cnt] "r" (&v->cnt), "m" (v->cnt)
- : "cc", "xer", "memory");
-}
-
-static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
-{
- int ret;
-
- asm volatile(
- "\n\tlwsync\n"
- "1: lwarx %[ret],0,%[cnt]\n"
- "addic %[ret],%[ret],1\n"
- "stwcx. %[ret],0,%[cnt]\n"
- "bne- 1b\n"
- "isync\n"
- : [ret] "=&r" (ret)
- : [cnt] "r" (&v->cnt)
- : "cc", "xer", "memory");
-
- return (ret == 0);
-}
-
-static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
-{
- int ret;
-
- asm volatile(
- "\n\tlwsync\n"
- "1: lwarx %[ret],0,%[cnt]\n"
- "addic %[ret],%[ret],-1\n"
- "stwcx. %[ret],0,%[cnt]\n"
- "bne- 1b\n"
- "isync\n"
- : [ret] "=&r" (ret)
- : [cnt] "r" (&v->cnt)
- : "cc", "xer", "memory");
-
- return (ret == 0);
-}
-/*------------------------- 64 bit atomic operations -------------------------*/
-
-static inline int
-rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
-{
- unsigned int ret = 0;
-
- asm volatile (
- "\tlwsync\n"
- "1: ldarx %[ret], 0, %[dst]\n"
- "cmpld %[exp], %[ret]\n"
- "bne 2f\n"
- "stdcx. %[src], 0, %[dst]\n"
- "bne- 1b\n"
- "li %[ret], 1\n"
- "b 3f\n"
- "2:\n"
- "stdcx. %[ret], 0, %[dst]\n"
- "li %[ret], 0\n"
- "3:\n"
- "isync\n"
- : [ret] "=&r" (ret), "=m" (*dst)
- : [dst] "r" (dst),
- [exp] "r" (exp),
- [src] "r" (src),
- "m" (*dst)
- : "cc", "memory");
- return ret;
-}
-
-static inline void
-rte_atomic64_init(rte_atomic64_t *v)
-{
- v->cnt = 0;
-}
-
-static inline int64_t
-rte_atomic64_read(rte_atomic64_t *v)
-{
- long ret;
-
- asm volatile("ld%U1%X1 %[ret],%[cnt]"
- : [ret] "=r"(ret)
- : [cnt] "m"(v->cnt));
-
- return ret;
-}
-
-static inline void
-rte_atomic64_set(rte_atomic64_t *v, int64_t new_value)
-{
- asm volatile("std%U0%X0 %[new_value],%[cnt]"
- : [cnt] "=m"(v->cnt)
- : [new_value] "r"(new_value));
-}
-
-static inline void
-rte_atomic64_add(rte_atomic64_t *v, int64_t inc)
-{
- long t;
-
- asm volatile(
- "1: ldarx %[t],0,%[cnt]\n"
- "add %[t],%[inc],%[t]\n"
- "stdcx. %[t],0,%[cnt]\n"
- "bne- 1b\n"
- : [t] "=&r" (t), "=m" (v->cnt)
- : [cnt] "r" (&v->cnt), [inc] "r" (inc), "m" (v->cnt)
- : "cc", "memory");
-}
-
-static inline void
-rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
-{
- long t;
-
- asm volatile(
- "1: ldarx %[t],0,%[cnt]\n"
- "subf %[t],%[dec],%[t]\n"
- "stdcx. %[t],0,%[cnt]\n"
- "bne- 1b\n"
- : [t] "=&r" (t), "+m" (v->cnt)
- : [cnt] "r" (&v->cnt), [dec] "r" (dec), "m" (v->cnt)
- : "cc", "memory");
-}
-
-static inline void
-rte_atomic64_inc(rte_atomic64_t *v)
-{
- long t;
-
- asm volatile(
- "1: ldarx %[t],0,%[cnt]\n"
- "addic %[t],%[t],1\n"
- "stdcx. %[t],0,%[cnt]\n"
- "bne- 1b\n"
- : [t] "=&r" (t), "+m" (v->cnt)
- : [cnt] "r" (&v->cnt), "m" (v->cnt)
- : "cc", "xer", "memory");
-}
-
-static inline void
-rte_atomic64_dec(rte_atomic64_t *v)
-{
- long t;
-
- asm volatile(
- "1: ldarx %[t],0,%[cnt]\n"
- "addic %[t],%[t],-1\n"
- "stdcx. %[t],0,%[cnt]\n"
- "bne- 1b\n"
- : [t] "=&r" (t), "+m" (v->cnt)
- : [cnt] "r" (&v->cnt), "m" (v->cnt)
- : "cc", "xer", "memory");
-}
-
-static inline int64_t
-rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)
-{
- long ret;
-
- asm volatile(
- "\n\tlwsync\n"
- "1: ldarx %[ret],0,%[cnt]\n"
- "add %[ret],%[inc],%[ret]\n"
- "stdcx. %[ret],0,%[cnt]\n"
- "bne- 1b\n"
- "isync\n"
- : [ret] "=&r" (ret)
- : [inc] "r" (inc), [cnt] "r" (&v->cnt)
- : "cc", "memory");
-
- return ret;
-}
-
-static inline int64_t
-rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
-{
- long ret;
-
- asm volatile(
- "\n\tlwsync\n"
- "1: ldarx %[ret],0,%[cnt]\n"
- "subf %[ret],%[dec],%[ret]\n"
- "stdcx. %[ret],0,%[cnt]\n"
- "bne- 1b\n"
- "isync\n"
- : [ret] "=&r" (ret)
- : [dec] "r" (dec), [cnt] "r" (&v->cnt)
- : "cc", "memory");
-
- return ret;
-}
-
-static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v)
-{
- long ret;
-
- asm volatile(
- "\n\tlwsync\n"
- "1: ldarx %[ret],0,%[cnt]\n"
- "addic %[ret],%[ret],1\n"
- "stdcx. %[ret],0,%[cnt]\n"
- "bne- 1b\n"
- "isync\n"
- : [ret] "=&r" (ret)
- : [cnt] "r" (&v->cnt)
- : "cc", "xer", "memory");
-
- return (ret == 0);
-}
-
-static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
-{
- long ret;
-
- asm volatile(
- "\n\tlwsync\n"
- "1: ldarx %[ret],0,%[cnt]\n"
- "addic %[ret],%[ret],-1\n"
- "stdcx. %[ret],0,%[cnt]\n"
- "bne- 1b\n"
- "isync\n"
- : [ret] "=&r" (ret)
- : [cnt] "r" (&v->cnt)
- : "cc", "xer", "memory");
-
- return (ret == 0);
-}
-
-static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
-{
- return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1);
-}
-
-/**
- * Atomically set a 64-bit counter to 0.
- *
- * @param v
- * A pointer to the atomic counter.
- */
-static inline void rte_atomic64_clear(rte_atomic64_t *v)
-{
- v->cnt = 0;
-}
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_ATOMIC_PPC_64_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_byteorder.h b/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_byteorder.h
deleted file mode 100755
index 80436f24..00000000
--- a/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_byteorder.h
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) IBM Corporation 2014.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of IBM Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/* Inspired from FreeBSD src/sys/powerpc/include/endian.h
- * Copyright (c) 1987, 1991, 1993
- * The Regents of the University of California. All rights reserved.
-*/
-
-#ifndef _RTE_BYTEORDER_PPC_64_H_
-#define _RTE_BYTEORDER_PPC_64_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "generic/rte_byteorder.h"
-
-/*
- * An architecture-optimized byte swap for a 16-bit value.
- *
- * Do not use this function directly. The preferred function is rte_bswap16().
- */
-static inline uint16_t rte_arch_bswap16(uint16_t _x)
-{
- return ((_x >> 8) | ((_x << 8) & 0xff00));
-}
-
-/*
- * An architecture-optimized byte swap for a 32-bit value.
- *
- * Do not use this function directly. The preferred function is rte_bswap32().
- */
-static inline uint32_t rte_arch_bswap32(uint32_t _x)
-{
- return ((_x >> 24) | ((_x >> 8) & 0xff00) | ((_x << 8) & 0xff0000) |
- ((_x << 24) & 0xff000000));
-}
-
-/*
- * An architecture-optimized byte swap for a 64-bit value.
- *
- * Do not use this function directly. The preferred function is rte_bswap64().
- */
-/* 64-bit mode */
-static inline uint64_t rte_arch_bswap64(uint64_t _x)
-{
- return ((_x >> 56) | ((_x >> 40) & 0xff00) | ((_x >> 24) & 0xff0000) |
- ((_x >> 8) & 0xff000000) | ((_x << 8) & (0xffULL << 32)) |
- ((_x << 24) & (0xffULL << 40)) |
- ((_x << 40) & (0xffULL << 48)) | ((_x << 56)));
-}
-
-#ifndef RTE_FORCE_INTRINSICS
-#define rte_bswap16(x) ((uint16_t)(__builtin_constant_p(x) ? \
- rte_constant_bswap16(x) : \
- rte_arch_bswap16(x)))
-
-#define rte_bswap32(x) ((uint32_t)(__builtin_constant_p(x) ? \
- rte_constant_bswap32(x) : \
- rte_arch_bswap32(x)))
-
-#define rte_bswap64(x) ((uint64_t)(__builtin_constant_p(x) ? \
- rte_constant_bswap64(x) : \
- rte_arch_bswap64(x)))
-#else
-/*
- * __builtin_bswap16 is only available gcc 4.8 and upwards
- */
-#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 8)
-#define rte_bswap16(x) ((uint16_t)(__builtin_constant_p(x) ? \
- rte_constant_bswap16(x) : \
- rte_arch_bswap16(x)))
-#endif
-#endif
-
-/* Power 8 have both little endian and big endian mode
- * Power 7 only support big endian
- */
-#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
-
-#define rte_cpu_to_le_16(x) (x)
-#define rte_cpu_to_le_32(x) (x)
-#define rte_cpu_to_le_64(x) (x)
-
-#define rte_cpu_to_be_16(x) rte_bswap16(x)
-#define rte_cpu_to_be_32(x) rte_bswap32(x)
-#define rte_cpu_to_be_64(x) rte_bswap64(x)
-
-#define rte_le_to_cpu_16(x) (x)
-#define rte_le_to_cpu_32(x) (x)
-#define rte_le_to_cpu_64(x) (x)
-
-#define rte_be_to_cpu_16(x) rte_bswap16(x)
-#define rte_be_to_cpu_32(x) rte_bswap32(x)
-#define rte_be_to_cpu_64(x) rte_bswap64(x)
-
-#else /* RTE_BIG_ENDIAN */
-
-#define rte_cpu_to_le_16(x) rte_bswap16(x)
-#define rte_cpu_to_le_32(x) rte_bswap32(x)
-#define rte_cpu_to_le_64(x) rte_bswap64(x)
-
-#define rte_cpu_to_be_16(x) (x)
-#define rte_cpu_to_be_32(x) (x)
-#define rte_cpu_to_be_64(x) (x)
-
-#define rte_le_to_cpu_16(x) rte_bswap16(x)
-#define rte_le_to_cpu_32(x) rte_bswap32(x)
-#define rte_le_to_cpu_64(x) rte_bswap64(x)
-
-#define rte_be_to_cpu_16(x) (x)
-#define rte_be_to_cpu_32(x) (x)
-#define rte_be_to_cpu_64(x) (x)
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_BYTEORDER_PPC_64_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_cpuflags.h b/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_cpuflags.h
deleted file mode 100755
index df450470..00000000
--- a/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_cpuflags.h
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) IBM Corporation 2014.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of IBM Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-#ifndef _RTE_CPUFLAGS_PPC_64_H_
-#define _RTE_CPUFLAGS_PPC_64_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <elf.h>
-#include <fcntl.h>
-#include <assert.h>
-#include <unistd.h>
-
-#include "generic/rte_cpuflags.h"
-
-/* Symbolic values for the entries in the auxiliary table */
-#define AT_HWCAP 16
-#define AT_HWCAP2 26
-
-/* software based registers */
-enum cpu_register_t {
- REG_HWCAP = 0,
- REG_HWCAP2,
-};
-
-/**
- * Enumeration of all CPU features supported
- */
-enum rte_cpu_flag_t {
- RTE_CPUFLAG_PPC_LE = 0,
- RTE_CPUFLAG_TRUE_LE,
- RTE_CPUFLAG_PSERIES_PERFMON_COMPAT,
- RTE_CPUFLAG_VSX,
- RTE_CPUFLAG_ARCH_2_06,
- RTE_CPUFLAG_POWER6_EXT,
- RTE_CPUFLAG_DFP,
- RTE_CPUFLAG_PA6T,
- RTE_CPUFLAG_ARCH_2_05,
- RTE_CPUFLAG_ICACHE_SNOOP,
- RTE_CPUFLAG_SMT,
- RTE_CPUFLAG_BOOKE,
- RTE_CPUFLAG_CELLBE,
- RTE_CPUFLAG_POWER5_PLUS,
- RTE_CPUFLAG_POWER5,
- RTE_CPUFLAG_POWER4,
- RTE_CPUFLAG_NOTB,
- RTE_CPUFLAG_EFP_DOUBLE,
- RTE_CPUFLAG_EFP_SINGLE,
- RTE_CPUFLAG_SPE,
- RTE_CPUFLAG_UNIFIED_CACHE,
- RTE_CPUFLAG_4xxMAC,
- RTE_CPUFLAG_MMU,
- RTE_CPUFLAG_FPU,
- RTE_CPUFLAG_ALTIVEC,
- RTE_CPUFLAG_PPC601,
- RTE_CPUFLAG_PPC64,
- RTE_CPUFLAG_PPC32,
- RTE_CPUFLAG_TAR,
- RTE_CPUFLAG_LSEL,
- RTE_CPUFLAG_EBB,
- RTE_CPUFLAG_DSCR,
- RTE_CPUFLAG_HTM,
- RTE_CPUFLAG_ARCH_2_07,
- /* The last item */
- RTE_CPUFLAG_NUMFLAGS,/**< This should always be the last! */
-};
-
-static const struct feature_entry cpu_feature_table[] = {
- FEAT_DEF(PPC_LE, 0x00000001, 0, REG_HWCAP, 0)
- FEAT_DEF(TRUE_LE, 0x00000001, 0, REG_HWCAP, 1)
- FEAT_DEF(PSERIES_PERFMON_COMPAT, 0x00000001, 0, REG_HWCAP, 6)
- FEAT_DEF(VSX, 0x00000001, 0, REG_HWCAP, 7)
- FEAT_DEF(ARCH_2_06, 0x00000001, 0, REG_HWCAP, 8)
- FEAT_DEF(POWER6_EXT, 0x00000001, 0, REG_HWCAP, 9)
- FEAT_DEF(DFP, 0x00000001, 0, REG_HWCAP, 10)
- FEAT_DEF(PA6T, 0x00000001, 0, REG_HWCAP, 11)
- FEAT_DEF(ARCH_2_05, 0x00000001, 0, REG_HWCAP, 12)
- FEAT_DEF(ICACHE_SNOOP, 0x00000001, 0, REG_HWCAP, 13)
- FEAT_DEF(SMT, 0x00000001, 0, REG_HWCAP, 14)
- FEAT_DEF(BOOKE, 0x00000001, 0, REG_HWCAP, 15)
- FEAT_DEF(CELLBE, 0x00000001, 0, REG_HWCAP, 16)
- FEAT_DEF(POWER5_PLUS, 0x00000001, 0, REG_HWCAP, 17)
- FEAT_DEF(POWER5, 0x00000001, 0, REG_HWCAP, 18)
- FEAT_DEF(POWER4, 0x00000001, 0, REG_HWCAP, 19)
- FEAT_DEF(NOTB, 0x00000001, 0, REG_HWCAP, 20)
- FEAT_DEF(EFP_DOUBLE, 0x00000001, 0, REG_HWCAP, 21)
- FEAT_DEF(EFP_SINGLE, 0x00000001, 0, REG_HWCAP, 22)
- FEAT_DEF(SPE, 0x00000001, 0, REG_HWCAP, 23)
- FEAT_DEF(UNIFIED_CACHE, 0x00000001, 0, REG_HWCAP, 24)
- FEAT_DEF(4xxMAC, 0x00000001, 0, REG_HWCAP, 25)
- FEAT_DEF(MMU, 0x00000001, 0, REG_HWCAP, 26)
- FEAT_DEF(FPU, 0x00000001, 0, REG_HWCAP, 27)
- FEAT_DEF(ALTIVEC, 0x00000001, 0, REG_HWCAP, 28)
- FEAT_DEF(PPC601, 0x00000001, 0, REG_HWCAP, 29)
- FEAT_DEF(PPC64, 0x00000001, 0, REG_HWCAP, 30)
- FEAT_DEF(PPC32, 0x00000001, 0, REG_HWCAP, 31)
- FEAT_DEF(TAR, 0x00000001, 0, REG_HWCAP2, 26)
- FEAT_DEF(LSEL, 0x00000001, 0, REG_HWCAP2, 27)
- FEAT_DEF(EBB, 0x00000001, 0, REG_HWCAP2, 28)
- FEAT_DEF(DSCR, 0x00000001, 0, REG_HWCAP2, 29)
- FEAT_DEF(HTM, 0x00000001, 0, REG_HWCAP2, 30)
- FEAT_DEF(ARCH_2_07, 0x00000001, 0, REG_HWCAP2, 31)
-};
-
-/*
- * Read AUXV software register and get cpu features for Power
- */
-static inline void
-rte_cpu_get_features(__attribute__((unused)) uint32_t leaf,
- __attribute__((unused)) uint32_t subleaf, cpuid_registers_t out)
-{
- int auxv_fd;
- Elf64_auxv_t auxv;
-
- auxv_fd = open("/proc/self/auxv", O_RDONLY);
- assert(auxv_fd);
- while (read(auxv_fd, &auxv,
- sizeof(Elf64_auxv_t)) == sizeof(Elf64_auxv_t)) {
- if (auxv.a_type == AT_HWCAP)
- out[REG_HWCAP] = auxv.a_un.a_val;
- else if (auxv.a_type == AT_HWCAP2)
- out[REG_HWCAP2] = auxv.a_un.a_val;
- }
-}
-
-/*
- * Checks if a particular flag is available on current machine.
- */
-static inline int
-rte_cpu_get_flag_enabled(enum rte_cpu_flag_t feature)
-{
- const struct feature_entry *feat;
- cpuid_registers_t regs = {0};
-
- if (feature >= RTE_CPUFLAG_NUMFLAGS)
- /* Flag does not match anything in the feature tables */
- return -ENOENT;
-
- feat = &cpu_feature_table[feature];
-
- if (!feat->leaf)
- /* This entry in the table wasn't filled out! */
- return -EFAULT;
-
- /* get the cpuid leaf containing the desired feature */
- rte_cpu_get_features(feat->leaf, feat->subleaf, regs);
-
- /* check if the feature is enabled */
- return (regs[feat->reg] >> feat->bit) & 1;
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_CPUFLAGS_PPC_64_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_cycles.h b/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_cycles.h
deleted file mode 100755
index fd26e8e7..00000000
--- a/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_cycles.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) IBM Corporation 2014.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of IBM Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-#ifndef _RTE_CYCLES_PPC_64_H_
-#define _RTE_CYCLES_PPC_64_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "generic/rte_cycles.h"
-
-/**
- * Read the time base register.
- *
- * @return
- * The time base for this lcore.
- */
-static inline uint64_t
-rte_rdtsc(void)
-{
- union {
- uint64_t tsc_64;
- struct {
- uint32_t hi_32;
- uint32_t lo_32;
- };
- } tsc;
- uint32_t tmp;
-
- asm volatile(
- "0:\n"
- "mftbu %[hi32]\n"
- "mftb %[lo32]\n"
- "mftbu %[tmp]\n"
- "cmpw %[tmp],%[hi32]\n"
- "bne 0b\n"
- : [hi32] "=r"(tsc.hi_32), [lo32] "=r"(tsc.lo_32),
- [tmp] "=r"(tmp)
- );
- return tsc.tsc_64;
-}
-
-static inline uint64_t
-rte_rdtsc_precise(void)
-{
- rte_mb();
- return rte_rdtsc();
-}
-
-static inline uint64_t
-rte_get_tsc_cycles(void) { return rte_rdtsc(); }
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_CYCLES_PPC_64_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_memcpy.h b/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_memcpy.h
deleted file mode 100755
index acf7aac2..00000000
--- a/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_memcpy.h
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) IBM Corporation 2014.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of IBM Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-#ifndef _RTE_MEMCPY_PPC_64_H_
-#define _RTE_MEMCPY_PPC_64_H_
-
-#include <stdint.h>
-#include <string.h>
-/*To include altivec.h, GCC version must >= 4.8 */
-#include <altivec.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "generic/rte_memcpy.h"
-
-static inline void
-rte_mov16(uint8_t *dst, const uint8_t *src)
-{
- vec_vsx_st(vec_vsx_ld(0, src), 0, dst);
-}
-
-static inline void
-rte_mov32(uint8_t *dst, const uint8_t *src)
-{
- vec_vsx_st(vec_vsx_ld(0, src), 0, dst);
- vec_vsx_st(vec_vsx_ld(16, src), 16, dst);
-}
-
-static inline void
-rte_mov48(uint8_t *dst, const uint8_t *src)
-{
- vec_vsx_st(vec_vsx_ld(0, src), 0, dst);
- vec_vsx_st(vec_vsx_ld(16, src), 16, dst);
- vec_vsx_st(vec_vsx_ld(32, src), 32, dst);
-}
-
-static inline void
-rte_mov64(uint8_t *dst, const uint8_t *src)
-{
- vec_vsx_st(vec_vsx_ld(0, src), 0, dst);
- vec_vsx_st(vec_vsx_ld(16, src), 16, dst);
- vec_vsx_st(vec_vsx_ld(32, src), 32, dst);
- vec_vsx_st(vec_vsx_ld(48, src), 48, dst);
-}
-
-static inline void
-rte_mov128(uint8_t *dst, const uint8_t *src)
-{
- vec_vsx_st(vec_vsx_ld(0, src), 0, dst);
- vec_vsx_st(vec_vsx_ld(16, src), 16, dst);
- vec_vsx_st(vec_vsx_ld(32, src), 32, dst);
- vec_vsx_st(vec_vsx_ld(48, src), 48, dst);
- vec_vsx_st(vec_vsx_ld(64, src), 64, dst);
- vec_vsx_st(vec_vsx_ld(80, src), 80, dst);
- vec_vsx_st(vec_vsx_ld(96, src), 96, dst);
- vec_vsx_st(vec_vsx_ld(112, src), 112, dst);
-}
-
-static inline void
-rte_mov256(uint8_t *dst, const uint8_t *src)
-{
- rte_mov128(dst, src);
- rte_mov128(dst + 128, src + 128);
-}
-
-#define rte_memcpy(dst, src, n) \
- ({ (__builtin_constant_p(n)) ? \
- memcpy((dst), (src), (n)) : \
- rte_memcpy_func((dst), (src), (n)); })
-
-static inline void *
-rte_memcpy_func(void *dst, const void *src, size_t n)
-{
- void *ret = dst;
-
- /* We can't copy < 16 bytes using XMM registers so do it manually. */
- if (n < 16) {
- if (n & 0x01) {
- *(uint8_t *)dst = *(const uint8_t *)src;
- dst = (uint8_t *)dst + 1;
- src = (const uint8_t *)src + 1;
- }
- if (n & 0x02) {
- *(uint16_t *)dst = *(const uint16_t *)src;
- dst = (uint16_t *)dst + 1;
- src = (const uint16_t *)src + 1;
- }
- if (n & 0x04) {
- *(uint32_t *)dst = *(const uint32_t *)src;
- dst = (uint32_t *)dst + 1;
- src = (const uint32_t *)src + 1;
- }
- if (n & 0x08)
- *(uint64_t *)dst = *(const uint64_t *)src;
- return ret;
- }
-
- /* Special fast cases for <= 128 bytes */
- if (n <= 32) {
- rte_mov16((uint8_t *)dst, (const uint8_t *)src);
- rte_mov16((uint8_t *)dst - 16 + n,
- (const uint8_t *)src - 16 + n);
- return ret;
- }
-
- if (n <= 64) {
- rte_mov32((uint8_t *)dst, (const uint8_t *)src);
- rte_mov32((uint8_t *)dst - 32 + n,
- (const uint8_t *)src - 32 + n);
- return ret;
- }
-
- if (n <= 128) {
- rte_mov64((uint8_t *)dst, (const uint8_t *)src);
- rte_mov64((uint8_t *)dst - 64 + n,
- (const uint8_t *)src - 64 + n);
- return ret;
- }
-
- /*
- * For large copies > 128 bytes. This combination of 256, 64 and 16 byte
- * copies was found to be faster than doing 128 and 32 byte copies as
- * well.
- */
- for ( ; n >= 256; n -= 256) {
- rte_mov256((uint8_t *)dst, (const uint8_t *)src);
- dst = (uint8_t *)dst + 256;
- src = (const uint8_t *)src + 256;
- }
-
- /*
- * We split the remaining bytes (which will be less than 256) into
- * 64byte (2^6) chunks.
- * Using incrementing integers in the case labels of a switch statement
- * enourages the compiler to use a jump table. To get incrementing
- * integers, we shift the 2 relevant bits to the LSB position to first
- * get decrementing integers, and then subtract.
- */
- switch (3 - (n >> 6)) {
- case 0x00:
- rte_mov64((uint8_t *)dst, (const uint8_t *)src);
- n -= 64;
- dst = (uint8_t *)dst + 64;
- src = (const uint8_t *)src + 64; /* fallthrough */
- case 0x01:
- rte_mov64((uint8_t *)dst, (const uint8_t *)src);
- n -= 64;
- dst = (uint8_t *)dst + 64;
- src = (const uint8_t *)src + 64; /* fallthrough */
- case 0x02:
- rte_mov64((uint8_t *)dst, (const uint8_t *)src);
- n -= 64;
- dst = (uint8_t *)dst + 64;
- src = (const uint8_t *)src + 64; /* fallthrough */
- default:
- ;
- }
-
- /*
- * We split the remaining bytes (which will be less than 64) into
- * 16byte (2^4) chunks, using the same switch structure as above.
- */
- switch (3 - (n >> 4)) {
- case 0x00:
- rte_mov16((uint8_t *)dst, (const uint8_t *)src);
- n -= 16;
- dst = (uint8_t *)dst + 16;
- src = (const uint8_t *)src + 16; /* fallthrough */
- case 0x01:
- rte_mov16((uint8_t *)dst, (const uint8_t *)src);
- n -= 16;
- dst = (uint8_t *)dst + 16;
- src = (const uint8_t *)src + 16; /* fallthrough */
- case 0x02:
- rte_mov16((uint8_t *)dst, (const uint8_t *)src);
- n -= 16;
- dst = (uint8_t *)dst + 16;
- src = (const uint8_t *)src + 16; /* fallthrough */
- default:
- ;
- }
-
- /* Copy any remaining bytes, without going beyond end of buffers */
- if (n != 0)
- rte_mov16((uint8_t *)dst - 16 + n,
- (const uint8_t *)src - 16 + n);
- return ret;
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_MEMCPY_PPC_64_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_prefetch.h b/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_prefetch.h
deleted file mode 100755
index 9df0d13c..00000000
--- a/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_prefetch.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) IBM Corporation 2014.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of IBM Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-#ifndef _RTE_PREFETCH_PPC_64_H_
-#define _RTE_PREFETCH_PPC_64_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "generic/rte_prefetch.h"
-
-static inline void rte_prefetch0(volatile void *p)
-{
- asm volatile ("dcbt 0,%[p],1" : : [p] "r" (p));
-}
-
-static inline void rte_prefetch1(volatile void *p)
-{
- asm volatile ("dcbt 0,%[p],1" : : [p] "r" (p));
-}
-
-static inline void rte_prefetch2(volatile void *p)
-{
- asm volatile ("dcbt 0,%[p],1" : : [p] "r" (p));
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_PREFETCH_PPC_64_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_spinlock.h b/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_spinlock.h
deleted file mode 100755
index cf8b81ad..00000000
--- a/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_spinlock.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) IBM Corporation 2014.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of IBM Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-#ifndef _RTE_SPINLOCK_PPC_64_H_
-#define _RTE_SPINLOCK_PPC_64_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <rte_common.h>
-#include "generic/rte_spinlock.h"
-
-/* Fixme: Use intrinsics to implement the spinlock on Power architecture */
-
-#ifndef RTE_FORCE_INTRINSICS
-
-static inline void
-rte_spinlock_lock(rte_spinlock_t *sl)
-{
- while (__sync_lock_test_and_set(&sl->locked, 1))
- while (sl->locked)
- rte_pause();
-}
-
-static inline void
-rte_spinlock_unlock(rte_spinlock_t *sl)
-{
- __sync_lock_release(&sl->locked);
-}
-
-static inline int
-rte_spinlock_trylock(rte_spinlock_t *sl)
-{
- return (__sync_lock_test_and_set(&sl->locked, 1) == 0);
-}
-
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_SPINLOCK_PPC_64_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_memcpy.h b/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_memcpy.h
deleted file mode 100755
index fb9eba87..00000000
--- a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_memcpy.h
+++ /dev/null
@@ -1,297 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_MEMCPY_X86_64_H_
-#define _RTE_MEMCPY_X86_64_H_
-
-#include <stdint.h>
-#include <string.h>
-#include <emmintrin.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "generic/rte_memcpy.h"
-
-#ifdef __INTEL_COMPILER
-#pragma warning(disable:593) /* Stop unused variable warning (reg_a etc). */
-#endif
-
-static inline void
-rte_mov16(uint8_t *dst, const uint8_t *src)
-{
- __m128i reg_a;
- asm volatile (
- "movdqu (%[src]), %[reg_a]\n\t"
- "movdqu %[reg_a], (%[dst])\n\t"
- : [reg_a] "=x" (reg_a)
- : [src] "r" (src),
- [dst] "r"(dst)
- : "memory"
- );
-}
-
-static inline void
-rte_mov32(uint8_t *dst, const uint8_t *src)
-{
- __m128i reg_a, reg_b;
- asm volatile (
- "movdqu (%[src]), %[reg_a]\n\t"
- "movdqu 16(%[src]), %[reg_b]\n\t"
- "movdqu %[reg_a], (%[dst])\n\t"
- "movdqu %[reg_b], 16(%[dst])\n\t"
- : [reg_a] "=x" (reg_a),
- [reg_b] "=x" (reg_b)
- : [src] "r" (src),
- [dst] "r"(dst)
- : "memory"
- );
-}
-
-static inline void
-rte_mov48(uint8_t *dst, const uint8_t *src)
-{
- __m128i reg_a, reg_b, reg_c;
- asm volatile (
- "movdqu (%[src]), %[reg_a]\n\t"
- "movdqu 16(%[src]), %[reg_b]\n\t"
- "movdqu 32(%[src]), %[reg_c]\n\t"
- "movdqu %[reg_a], (%[dst])\n\t"
- "movdqu %[reg_b], 16(%[dst])\n\t"
- "movdqu %[reg_c], 32(%[dst])\n\t"
- : [reg_a] "=x" (reg_a),
- [reg_b] "=x" (reg_b),
- [reg_c] "=x" (reg_c)
- : [src] "r" (src),
- [dst] "r"(dst)
- : "memory"
- );
-}
-
-static inline void
-rte_mov64(uint8_t *dst, const uint8_t *src)
-{
- __m128i reg_a, reg_b, reg_c, reg_d;
- asm volatile (
- "movdqu (%[src]), %[reg_a]\n\t"
- "movdqu 16(%[src]), %[reg_b]\n\t"
- "movdqu 32(%[src]), %[reg_c]\n\t"
- "movdqu 48(%[src]), %[reg_d]\n\t"
- "movdqu %[reg_a], (%[dst])\n\t"
- "movdqu %[reg_b], 16(%[dst])\n\t"
- "movdqu %[reg_c], 32(%[dst])\n\t"
- "movdqu %[reg_d], 48(%[dst])\n\t"
- : [reg_a] "=x" (reg_a),
- [reg_b] "=x" (reg_b),
- [reg_c] "=x" (reg_c),
- [reg_d] "=x" (reg_d)
- : [src] "r" (src),
- [dst] "r"(dst)
- : "memory"
- );
-}
-
-static inline void
-rte_mov128(uint8_t *dst, const uint8_t *src)
-{
- __m128i reg_a, reg_b, reg_c, reg_d, reg_e, reg_f, reg_g, reg_h;
- asm volatile (
- "movdqu (%[src]), %[reg_a]\n\t"
- "movdqu 16(%[src]), %[reg_b]\n\t"
- "movdqu 32(%[src]), %[reg_c]\n\t"
- "movdqu 48(%[src]), %[reg_d]\n\t"
- "movdqu 64(%[src]), %[reg_e]\n\t"
- "movdqu 80(%[src]), %[reg_f]\n\t"
- "movdqu 96(%[src]), %[reg_g]\n\t"
- "movdqu 112(%[src]), %[reg_h]\n\t"
- "movdqu %[reg_a], (%[dst])\n\t"
- "movdqu %[reg_b], 16(%[dst])\n\t"
- "movdqu %[reg_c], 32(%[dst])\n\t"
- "movdqu %[reg_d], 48(%[dst])\n\t"
- "movdqu %[reg_e], 64(%[dst])\n\t"
- "movdqu %[reg_f], 80(%[dst])\n\t"
- "movdqu %[reg_g], 96(%[dst])\n\t"
- "movdqu %[reg_h], 112(%[dst])\n\t"
- : [reg_a] "=x" (reg_a),
- [reg_b] "=x" (reg_b),
- [reg_c] "=x" (reg_c),
- [reg_d] "=x" (reg_d),
- [reg_e] "=x" (reg_e),
- [reg_f] "=x" (reg_f),
- [reg_g] "=x" (reg_g),
- [reg_h] "=x" (reg_h)
- : [src] "r" (src),
- [dst] "r"(dst)
- : "memory"
- );
-}
-
-#ifdef __INTEL_COMPILER
-#pragma warning(enable:593)
-#endif
-
-static inline void
-rte_mov256(uint8_t *dst, const uint8_t *src)
-{
- rte_mov128(dst, src);
- rte_mov128(dst + 128, src + 128);
-}
-
-#define rte_memcpy(dst, src, n) \
- ({ (__builtin_constant_p(n)) ? \
- memcpy((dst), (src), (n)) : \
- rte_memcpy_func((dst), (src), (n)); })
-
-static inline void *
-rte_memcpy_func(void *dst, const void *src, size_t n)
-{
- void *ret = dst;
-
- /* We can't copy < 16 bytes using XMM registers so do it manually. */
- if (n < 16) {
- if (n & 0x01) {
- *(uint8_t *)dst = *(const uint8_t *)src;
- dst = (uint8_t *)dst + 1;
- src = (const uint8_t *)src + 1;
- }
- if (n & 0x02) {
- *(uint16_t *)dst = *(const uint16_t *)src;
- dst = (uint16_t *)dst + 1;
- src = (const uint16_t *)src + 1;
- }
- if (n & 0x04) {
- *(uint32_t *)dst = *(const uint32_t *)src;
- dst = (uint32_t *)dst + 1;
- src = (const uint32_t *)src + 1;
- }
- if (n & 0x08) {
- *(uint64_t *)dst = *(const uint64_t *)src;
- }
- return ret;
- }
-
- /* Special fast cases for <= 128 bytes */
- if (n <= 32) {
- rte_mov16((uint8_t *)dst, (const uint8_t *)src);
- rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
- return ret;
- }
-
- if (n <= 64) {
- rte_mov32((uint8_t *)dst, (const uint8_t *)src);
- rte_mov32((uint8_t *)dst - 32 + n, (const uint8_t *)src - 32 + n);
- return ret;
- }
-
- if (n <= 128) {
- rte_mov64((uint8_t *)dst, (const uint8_t *)src);
- rte_mov64((uint8_t *)dst - 64 + n, (const uint8_t *)src - 64 + n);
- return ret;
- }
-
- /*
- * For large copies > 128 bytes. This combination of 256, 64 and 16 byte
- * copies was found to be faster than doing 128 and 32 byte copies as
- * well.
- */
- for ( ; n >= 256; n -= 256) {
- rte_mov256((uint8_t *)dst, (const uint8_t *)src);
- dst = (uint8_t *)dst + 256;
- src = (const uint8_t *)src + 256;
- }
-
- /*
- * We split the remaining bytes (which will be less than 256) into
- * 64byte (2^6) chunks.
- * Using incrementing integers in the case labels of a switch statement
- * enourages the compiler to use a jump table. To get incrementing
- * integers, we shift the 2 relevant bits to the LSB position to first
- * get decrementing integers, and then subtract.
- */
- switch (3 - (n >> 6)) {
- case 0x00:
- rte_mov64((uint8_t *)dst, (const uint8_t *)src);
- n -= 64;
- dst = (uint8_t *)dst + 64;
- src = (const uint8_t *)src + 64; /* fallthrough */
- case 0x01:
- rte_mov64((uint8_t *)dst, (const uint8_t *)src);
- n -= 64;
- dst = (uint8_t *)dst + 64;
- src = (const uint8_t *)src + 64; /* fallthrough */
- case 0x02:
- rte_mov64((uint8_t *)dst, (const uint8_t *)src);
- n -= 64;
- dst = (uint8_t *)dst + 64;
- src = (const uint8_t *)src + 64; /* fallthrough */
- default:
- ;
- }
-
- /*
- * We split the remaining bytes (which will be less than 64) into
- * 16byte (2^4) chunks, using the same switch structure as above.
- */
- switch (3 - (n >> 4)) {
- case 0x00:
- rte_mov16((uint8_t *)dst, (const uint8_t *)src);
- n -= 16;
- dst = (uint8_t *)dst + 16;
- src = (const uint8_t *)src + 16; /* fallthrough */
- case 0x01:
- rte_mov16((uint8_t *)dst, (const uint8_t *)src);
- n -= 16;
- dst = (uint8_t *)dst + 16;
- src = (const uint8_t *)src + 16; /* fallthrough */
- case 0x02:
- rte_mov16((uint8_t *)dst, (const uint8_t *)src);
- n -= 16;
- dst = (uint8_t *)dst + 16;
- src = (const uint8_t *)src + 16; /* fallthrough */
- default:
- ;
- }
-
- /* Copy any remaining bytes, without going beyond end of buffers */
- if (n != 0) {
- rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
- }
- return ret;
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_MEMCPY_X86_64_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_tailq.h b/src/dpdk_lib18/librte_eal/common/include/rte_tailq.h
deleted file mode 100755
index b34e5ede..00000000
--- a/src/dpdk_lib18/librte_eal/common/include/rte_tailq.h
+++ /dev/null
@@ -1,215 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_TAILQ_H_
-#define _RTE_TAILQ_H_
-
-/**
- * @file
- * Here defines rte_tailq APIs for only internal use
- *
- */
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <sys/queue.h>
-#include <stdio.h>
-
-/** dummy structure type used by the rte_tailq APIs */
-struct rte_tailq_entry {
- TAILQ_ENTRY(rte_tailq_entry) next; /**< Pointer entries for a tailq list */
- void *data; /**< Pointer to the data referenced by this tailq entry */
-};
-/** dummy */
-TAILQ_HEAD(rte_tailq_entry_head, rte_tailq_entry);
-
-#define RTE_TAILQ_NAMESIZE 32
-
-/**
- * The structure defining a tailq header entry for storing
- * in the rte_config structure in shared memory. Each tailq
- * is identified by name.
- * Any library storing a set of objects e.g. rings, mempools, hash-tables,
- * is recommended to use an entry here, so as to make it easy for
- * a multi-process app to find already-created elements in shared memory.
- */
-struct rte_tailq_head {
- struct rte_tailq_entry_head tailq_head; /**< NOTE: must be first element */
-};
-
-/**
- * Utility macro to make reserving a tailqueue for a particular struct easier.
- *
- * @param name
- * The name to be given to the tailq - used by lookup to find it later
- *
- * @param struct_name
- * The name of the list type we are using. (Generally this is the same as the
- * first parameter passed to TAILQ_HEAD macro)
- *
- * @return
- * The return value from rte_eal_tailq_reserve, typecast to the appropriate
- * structure pointer type.
- * NULL on error, since the tailq_head is the first
- * element in the rte_tailq_head structure.
- */
-#define RTE_TAILQ_RESERVE(name, struct_name) \
- (struct struct_name *)(&rte_eal_tailq_reserve(name)->tailq_head)
-
-/**
- * Utility macro to make reserving a tailqueue for a particular struct easier.
- *
- * @param idx
- * The tailq idx defined in rte_tail_t to be given to the tail queue.
- * - used by lookup to find it later
- *
- * @param struct_name
- * The name of the list type we are using. (Generally this is the same as the
- * first parameter passed to TAILQ_HEAD macro)
- *
- * @return
- * The return value from rte_eal_tailq_reserve, typecast to the appropriate
- * structure pointer type.
- * NULL on error, since the tailq_head is the first
- * element in the rte_tailq_head structure.
- */
-#define RTE_TAILQ_RESERVE_BY_IDX(idx, struct_name) \
- (struct struct_name *)(&rte_eal_tailq_reserve_by_idx(idx)->tailq_head)
-
-/**
- * Utility macro to make looking up a tailqueue for a particular struct easier.
- *
- * @param name
- * The name of tailq
- *
- * @param struct_name
- * The name of the list type we are using. (Generally this is the same as the
- * first parameter passed to TAILQ_HEAD macro)
- *
- * @return
- * The return value from rte_eal_tailq_lookup, typecast to the appropriate
- * structure pointer type.
- * NULL on error, since the tailq_head is the first
- * element in the rte_tailq_head structure.
- */
-#define RTE_TAILQ_LOOKUP(name, struct_name) \
- (struct struct_name *)(&rte_eal_tailq_lookup(name)->tailq_head)
-
-/**
- * Utility macro to make looking up a tailqueue for a particular struct easier.
- *
- * @param idx
- * The tailq idx defined in rte_tail_t to be given to the tail queue.
- *
- * @param struct_name
- * The name of the list type we are using. (Generally this is the same as the
- * first parameter passed to TAILQ_HEAD macro)
- *
- * @return
- * The return value from rte_eal_tailq_lookup, typecast to the appropriate
- * structure pointer type.
- * NULL on error, since the tailq_head is the first
- * element in the rte_tailq_head structure.
- */
-#define RTE_TAILQ_LOOKUP_BY_IDX(idx, struct_name) \
- (struct struct_name *)(&rte_eal_tailq_lookup_by_idx(idx)->tailq_head)
-
-/**
- * Reserve a slot in the tailq list for a particular tailq header
- * Note: this function, along with rte_tailq_lookup, is not multi-thread safe,
- * and both these functions should only be called from a single thread at a time
- *
- * @param name
- * The name to be given to the tail queue.
- * @return
- * A pointer to the newly reserved tailq entry
- */
-struct rte_tailq_head *rte_eal_tailq_reserve(const char *name);
-
-/**
- * Reserve a slot in the tailq list for a particular tailq header
- * Note: this function, along with rte_tailq_lookup, is not multi-thread safe,
- * and both these functions should only be called from a single thread at a time
- *
- * @param idx
- * The tailq idx defined in rte_tail_t to be given to the tail queue.
- * @return
- * A pointer to the newly reserved tailq entry
- */
-struct rte_tailq_head *rte_eal_tailq_reserve_by_idx(const unsigned idx);
-
-/**
- * Dump tail queues to the console.
- *
- * @param f
- * A pointer to a file for output
- */
-void rte_dump_tailq(FILE *f);
-
-/**
- * Lookup for a tail queue.
- *
- * Get a pointer to a tail queue header of an already reserved tail
- * queue identified by the name given as an argument.
- * Note: this function, along with rte_tailq_reserve, is not multi-thread safe,
- * and both these functions should only be called from a single thread at a time
- *
- * @param name
- * The name of the queue.
- * @return
- * A pointer to the tail queue head structure.
- */
-struct rte_tailq_head *rte_eal_tailq_lookup(const char *name);
-
-/**
- * Lookup for a tail queue.
- *
- * Get a pointer to a tail queue header of an already reserved tail
- * queue identified by the name given as an argument.
- * Note: this function, along with rte_tailq_reserve, is not multi-thread safe,
- * and both these functions should only be called from a single thread at a time
- *
- * @param idx
- * The tailq idx defined in rte_tail_t to be given to the tail queue.
- * @return
- * A pointer to the tail queue head structure.
- */
-struct rte_tailq_head *rte_eal_tailq_lookup_by_idx(const unsigned idx);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_TAILQ_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_tailq_elem.h b/src/dpdk_lib18/librte_eal/common/include/rte_tailq_elem.h
deleted file mode 100755
index f74fc7cb..00000000
--- a/src/dpdk_lib18/librte_eal/common/include/rte_tailq_elem.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * @file
- *
- * This file contains the type of the tailq elem recognised by DPDK, which
- * can be used to fill out an array of structures describing the tailq.
- *
- * In order to populate an array, the user of this file must define this macro:
- * rte_tailq_elem(idx, name). For example:
- *
- * @code
- * enum rte_tailq_t {
- * #define rte_tailq_elem(idx, name) idx,
- * #define rte_tailq_end(idx) idx
- * #include <rte_tailq_elem.h>
- * };
- *
- * const char* rte_tailq_names[RTE_MAX_TAILQ] = {
- * #define rte_tailq_elem(idx, name) name,
- * #include <rte_tailq_elem.h>
- * };
- * @endcode
- *
- * Note that this file can be included multiple times within the same file.
- */
-
-#ifndef rte_tailq_elem
-#define rte_tailq_elem(idx, name)
-#endif /* rte_tailq_elem */
-
-#ifndef rte_tailq_end
-#define rte_tailq_end(idx)
-#endif /* rte_tailq_end */
-
-rte_tailq_elem(RTE_TAILQ_PCI, "PCI_RESOURCE_LIST")
-
-rte_tailq_elem(RTE_TAILQ_MEMPOOL, "RTE_MEMPOOL")
-
-rte_tailq_elem(RTE_TAILQ_RING, "RTE_RING")
-
-rte_tailq_elem(RTE_TAILQ_HASH, "RTE_HASH")
-
-rte_tailq_elem(RTE_TAILQ_FBK_HASH, "RTE_FBK_HASH")
-
-rte_tailq_elem(RTE_TAILQ_LPM, "RTE_LPM")
-
-rte_tailq_elem(RTE_TAILQ_LPM6, "RTE_LPM6")
-
-rte_tailq_elem(RTE_TAILQ_PM, "RTE_PM")
-
-rte_tailq_elem(RTE_TAILQ_ACL, "RTE_ACL")
-
-rte_tailq_elem(RTE_TAILQ_DISTRIBUTOR, "RTE_DISTRIBUTOR")
-
-rte_tailq_end(RTE_TAILQ_NUM)
-
-#undef rte_tailq_elem
-#undef rte_tailq_end
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/Makefile b/src/dpdk_lib18/librte_eal/linuxapp/Makefile
deleted file mode 100755
index 8fcfdf67..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/Makefile
+++ /dev/null
@@ -1,45 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-ifeq ($(CONFIG_RTE_EAL_IGB_UIO),y)
-DIRS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += igb_uio
-endif
-DIRS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal
-ifeq ($(CONFIG_RTE_LIBRTE_KNI),y)
-DIRS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += kni
-endif
-ifeq ($(CONFIG_RTE_LIBRTE_XEN_DOM0),y)
-DIRS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += xen_dom0
-endif
-
-include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/Makefile b/src/dpdk_lib18/librte_eal/linuxapp/eal/Makefile
deleted file mode 100755
index 72ecf3aa..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/eal/Makefile
+++ /dev/null
@@ -1,112 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-LIB = librte_eal.a
-
-VPATH += $(RTE_SDK)/lib/librte_eal/common
-
-CFLAGS += -I$(SRCDIR)/include
-CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common
-CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
-CFLAGS += -I$(RTE_SDK)/lib/librte_ring
-CFLAGS += -I$(RTE_SDK)/lib/librte_mempool
-CFLAGS += -I$(RTE_SDK)/lib/librte_malloc
-CFLAGS += -I$(RTE_SDK)/lib/librte_ether
-CFLAGS += -I$(RTE_SDK)/lib/librte_ivshmem
-CFLAGS += -I$(RTE_SDK)/lib/librte_pmd_ring
-CFLAGS += -I$(RTE_SDK)/lib/librte_pmd_pcap
-CFLAGS += -I$(RTE_SDK)/lib/librte_pmd_af_packet
-CFLAGS += -I$(RTE_SDK)/lib/librte_pmd_xenvirt
-CFLAGS += $(WERROR_FLAGS) -O3
-
-# specific to linuxapp exec-env
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) := eal.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_hugepage_info.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_memory.c
-ifeq ($(CONFIG_RTE_LIBRTE_XEN_DOM0),y)
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_xen_memory.c
-endif
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_thread.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_log.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_pci.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_pci_uio.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_pci_vfio.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_pci_vfio_mp_sync.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_debug.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_lcore.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_timer.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_interrupts.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_alarm.c
-ifeq ($(CONFIG_RTE_LIBRTE_IVSHMEM),y)
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_ivshmem.c
-endif
-
-# from common dir
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_memzone.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_log.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_launch.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_pci.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_memory.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_tailqs.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_errno.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_cpuflags.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_string_fns.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_hexdump.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_devargs.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_dev.c
-SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_options.c
-
-CFLAGS_eal.o := -D_GNU_SOURCE
-CFLAGS_eal_thread.o := -D_GNU_SOURCE
-CFLAGS_eal_log.o := -D_GNU_SOURCE
-CFLAGS_eal_common_log.o := -D_GNU_SOURCE
-CFLAGS_eal_hugepage_info.o := -D_GNU_SOURCE
-CFLAGS_eal_pci.o := -D_GNU_SOURCE
-CFLAGS_eal_pci_vfio.o := -D_GNU_SOURCE
-CFLAGS_eal_common_whitelist.o := -D_GNU_SOURCE
-
-# workaround for a gcc bug with noreturn attribute
-# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603
-ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
-CFLAGS_eal_thread.o += -Wno-return-type
-endif
-
-INC := rte_interrupts.h rte_kni_common.h rte_dom0_common.h
-
-SYMLINK-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP)-include/exec-env := \
- $(addprefix include/exec-env/,$(INC))
-
-DEPDIRS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += lib/librte_eal/common
-
-include $(RTE_SDK)/mk/rte.lib.mk
-
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_lcore.c b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_lcore.c
deleted file mode 100755
index c67e0e68..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_lcore.c
+++ /dev/null
@@ -1,191 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <unistd.h>
-#include <limits.h>
-#include <string.h>
-#include <dirent.h>
-
-#include <rte_log.h>
-#include <rte_eal.h>
-#include <rte_lcore.h>
-#include <rte_common.h>
-#include <rte_string_fns.h>
-#include <rte_debug.h>
-
-#include "eal_private.h"
-#include "eal_filesystem.h"
-
-#define SYS_CPU_DIR "/sys/devices/system/cpu/cpu%u"
-#define CORE_ID_FILE "topology/core_id"
-#define PHYS_PKG_FILE "topology/physical_package_id"
-
-/* Check if a cpu is present by the presence of the cpu information for it */
-static int
-cpu_detected(unsigned lcore_id)
-{
- char path[PATH_MAX];
- int len = snprintf(path, sizeof(path), SYS_CPU_DIR
- "/"CORE_ID_FILE, lcore_id);
- if (len <= 0 || (unsigned)len >= sizeof(path))
- return 0;
- if (access(path, F_OK) != 0)
- return 0;
-
- return 1;
-}
-
-/* Get CPU socket id (NUMA node) by reading directory
- * /sys/devices/system/cpu/cpuX looking for symlink "nodeY"
- * which gives the NUMA topology information.
- * Note: physical package id != NUMA node, but we use it as a
- * fallback for kernels which don't create a nodeY link
- */
-static unsigned
-cpu_socket_id(unsigned lcore_id)
-{
- const char node_prefix[] = "node";
- const size_t prefix_len = sizeof(node_prefix) - 1;
- char path[PATH_MAX];
- DIR *d = NULL;
- unsigned long id = 0;
- struct dirent *e;
- char *endptr = NULL;
-
- int len = snprintf(path, sizeof(path),
- SYS_CPU_DIR, lcore_id);
- if (len <= 0 || (unsigned)len >= sizeof(path))
- goto err;
-
- d = opendir(path);
- if (!d)
- goto err;
-
- while ((e = readdir(d)) != NULL) {
- if (strncmp(e->d_name, node_prefix, prefix_len) == 0) {
- id = strtoul(e->d_name+prefix_len, &endptr, 0);
- break;
- }
- }
- if (endptr == NULL || *endptr!='\0' || endptr == e->d_name+prefix_len) {
- RTE_LOG(WARNING, EAL, "Cannot read numa node link "
- "for lcore %u - using physical package id instead\n",
- lcore_id);
-
- len = snprintf(path, sizeof(path), SYS_CPU_DIR "/%s",
- lcore_id, PHYS_PKG_FILE);
- if (len <= 0 || (unsigned)len >= sizeof(path))
- goto err;
- if (eal_parse_sysfs_value(path, &id) != 0)
- goto err;
- }
- closedir(d);
- return (unsigned)id;
-
-err:
- if (d)
- closedir(d);
- RTE_LOG(ERR, EAL, "Error getting NUMA socket information from %s "
- "for lcore %u - assuming NUMA socket 0\n", SYS_CPU_DIR, lcore_id);
- return 0;
-}
-
-/* Get the cpu core id value from the /sys/.../cpuX core_id value */
-static unsigned
-cpu_core_id(unsigned lcore_id)
-{
- char path[PATH_MAX];
- unsigned long id;
-
- int len = snprintf(path, sizeof(path), SYS_CPU_DIR "/%s", lcore_id, CORE_ID_FILE);
- if (len <= 0 || (unsigned)len >= sizeof(path))
- goto err;
- if (eal_parse_sysfs_value(path, &id) != 0)
- goto err;
- return (unsigned)id;
-
-err:
- RTE_LOG(ERR, EAL, "Error reading core id value from %s "
- "for lcore %u - assuming core 0\n", SYS_CPU_DIR, lcore_id);
- return 0;
-}
-
-/*
- * Parse /sys/devices/system/cpu to get the number of physical and logical
- * processors on the machine. The function will fill the cpu_info
- * structure.
- */
-int
-rte_eal_cpu_init(void)
-{
- /* pointer to global configuration */
- struct rte_config *config = rte_eal_get_configuration();
- unsigned lcore_id;
- unsigned count = 0;
-
- /*
- * Parse the maximum set of logical cores, detect the subset of running
- * ones and enable them by default.
- */
- for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
- lcore_config[lcore_id].detected = cpu_detected(lcore_id);
- if (lcore_config[lcore_id].detected == 0) {
- config->lcore_role[lcore_id] = ROLE_OFF;
- continue;
- }
- /* By default, each detected core is enabled */
- config->lcore_role[lcore_id] = ROLE_RTE;
- lcore_config[lcore_id].core_id = cpu_core_id(lcore_id);
- lcore_config[lcore_id].socket_id = cpu_socket_id(lcore_id);
- if (lcore_config[lcore_id].socket_id >= RTE_MAX_NUMA_NODES)
-#ifdef RTE_EAL_ALLOW_INV_SOCKET_ID
- lcore_config[lcore_id].socket_id = 0;
-#else
- rte_panic("Socket ID (%u) is greater than "
- "RTE_MAX_NUMA_NODES (%d)\n",
- lcore_config[lcore_id].socket_id, RTE_MAX_NUMA_NODES);
-#endif
- RTE_LOG(DEBUG, EAL, "Detected lcore %u as core %u on socket %u\n",
- lcore_id,
- lcore_config[lcore_id].core_id,
- lcore_config[lcore_id].socket_id);
- count ++;
- }
- /* Set the count of enabled logical cores of the EAL configuration */
- config->lcore_count = count;
- RTE_LOG(DEBUG, EAL, "Support maximum %u logical core(s) by configuration.\n",
- RTE_MAX_LCORE);
- RTE_LOG(DEBUG, EAL, "Detected %u lcore(s)\n", config->lcore_count);
-
- return 0;
-}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_xen_memory.c b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_xen_memory.c
deleted file mode 100755
index ee5cc2da..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_xen_memory.c
+++ /dev/null
@@ -1,370 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <errno.h>
-#include <stdarg.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <stdint.h>
-#include <inttypes.h>
-#include <string.h>
-#include <stdarg.h>
-#include <sys/mman.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <sys/queue.h>
-#include <sys/file.h>
-#include <unistd.h>
-#include <limits.h>
-#include <errno.h>
-#include <sys/ioctl.h>
-#include <sys/time.h>
-
-#include <rte_log.h>
-#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_launch.h>
-#include <rte_tailq.h>
-#include <rte_eal.h>
-#include <rte_eal_memconfig.h>
-#include <rte_per_lcore.h>
-#include <rte_lcore.h>
-#include <rte_common.h>
-#include <rte_string_fns.h>
-
-#include "eal_private.h"
-#include "eal_internal_cfg.h"
-#include "eal_filesystem.h"
-#include <exec-env/rte_dom0_common.h>
-
-#define PAGE_SIZE RTE_PGSIZE_4K
-#define DEFAUL_DOM0_NAME "dom0-mem"
-
-static int xen_fd = -1;
-static const char sys_dir_path[] = "/sys/kernel/mm/dom0-mm/memsize-mB";
-
-/*
- * Try to mmap *size bytes in /dev/zero. If it is successful, return the
- * pointer to the mmap'd area and keep *size unmodified. Else, retry
- * with a smaller zone: decrease *size by mem_size until it reaches
- * 0. In this case, return NULL. Note: this function returns an address
- * which is a multiple of mem_size size.
- */
-static void *
-xen_get_virtual_area(size_t *size, size_t mem_size)
-{
- void *addr;
- int fd;
- long aligned_addr;
-
- RTE_LOG(INFO, EAL, "Ask a virtual area of 0x%zu bytes\n", *size);
-
- fd = open("/dev/zero", O_RDONLY);
- if (fd < 0){
- RTE_LOG(ERR, EAL, "Cannot open /dev/zero\n");
- return NULL;
- }
- do {
- addr = mmap(NULL, (*size) + mem_size, PROT_READ,
- MAP_PRIVATE, fd, 0);
- if (addr == MAP_FAILED)
- *size -= mem_size;
- } while (addr == MAP_FAILED && *size > 0);
-
- if (addr == MAP_FAILED) {
- close(fd);
- RTE_LOG(INFO, EAL, "Cannot get a virtual area\n");
- return NULL;
- }
-
- munmap(addr, (*size) + mem_size);
- close(fd);
-
- /* align addr to a mem_size boundary */
- aligned_addr = (uintptr_t)addr;
- aligned_addr = RTE_ALIGN_CEIL(aligned_addr, mem_size);
- addr = (void *)(aligned_addr);
-
- RTE_LOG(INFO, EAL, "Virtual area found at %p (size = 0x%zx)\n",
- addr, *size);
-
- return addr;
-}
-
-/**
- * Get memory size configuration from /sys/devices/virtual/misc/dom0_mm
- * /memsize-mB/memsize file, and the size unit is mB.
- */
-static int
-get_xen_memory_size(void)
-{
- char path[PATH_MAX];
- unsigned long mem_size = 0;
- static const char *file_name;
-
- file_name = "memsize";
- snprintf(path, sizeof(path), "%s/%s",
- sys_dir_path, file_name);
-
- if (eal_parse_sysfs_value(path, &mem_size) < 0)
- return -1;
-
- if (mem_size == 0)
- rte_exit(EXIT_FAILURE,"XEN-DOM0:the %s/%s was not"
- " configured.\n",sys_dir_path, file_name);
- if (mem_size % 2)
- rte_exit(EXIT_FAILURE,"XEN-DOM0:the %s/%s must be"
- " even number.\n",sys_dir_path, file_name);
-
- if (mem_size > DOM0_CONFIG_MEMSIZE)
- rte_exit(EXIT_FAILURE,"XEN-DOM0:the %s/%s should not be larger"
- " than %d mB\n",sys_dir_path, file_name, DOM0_CONFIG_MEMSIZE);
-
- return mem_size;
-}
-
-/**
- * Based on physical address to caculate MFN in Xen Dom0.
- */
-phys_addr_t
-rte_mem_phy2mch(uint32_t memseg_id, const phys_addr_t phy_addr)
-{
- int mfn_id;
- uint64_t mfn, mfn_offset;
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- struct rte_memseg *memseg = mcfg->memseg;
-
- mfn_id = (phy_addr - memseg[memseg_id].phys_addr) / RTE_PGSIZE_2M;
-
- /*the MFN is contiguous in 2M */
- mfn_offset = (phy_addr - memseg[memseg_id].phys_addr) %
- RTE_PGSIZE_2M / PAGE_SIZE;
- mfn = mfn_offset + memseg[memseg_id].mfn[mfn_id];
-
- /** return mechine address */
- return (mfn * PAGE_SIZE + phy_addr % PAGE_SIZE);
-}
-
-int
-rte_xen_dom0_memory_init(void)
-{
- void *vir_addr, *vma_addr = NULL;
- int err, ret = 0;
- uint32_t i, requested, mem_size, memseg_idx, num_memseg = 0;
- size_t vma_len = 0;
- struct memory_info meminfo;
- struct memseg_info seginfo[RTE_MAX_MEMSEG];
- int flags, page_size = getpagesize();
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- struct rte_memseg *memseg = mcfg->memseg;
- uint64_t total_mem = internal_config.memory;
-
- memset(seginfo, 0, sizeof(seginfo));
- memset(&meminfo, 0, sizeof(struct memory_info));
-
- mem_size = get_xen_memory_size();
- requested = (unsigned) (total_mem / 0x100000);
- if (requested > mem_size)
- /* if we didn't satisfy total memory requirements */
- rte_exit(EXIT_FAILURE,"Not enough memory available! Requested: %uMB,"
- " available: %uMB\n", requested, mem_size);
- else if (total_mem != 0)
- mem_size = requested;
-
- /* Check FD and open once */
- if (xen_fd < 0) {
- xen_fd = open(DOM0_MM_DEV, O_RDWR);
- if (xen_fd < 0) {
- RTE_LOG(ERR, EAL, "Can not open %s\n",DOM0_MM_DEV);
- return -1;
- }
- }
-
- meminfo.size = mem_size;
-
- /* construct memory mangement name for Dom0 */
- snprintf(meminfo.name, DOM0_NAME_MAX, "%s-%s",
- internal_config.hugefile_prefix, DEFAUL_DOM0_NAME);
-
- /* Notify kernel driver to allocate memory */
- ret = ioctl(xen_fd, RTE_DOM0_IOCTL_PREPARE_MEMSEG, &meminfo);
- if (ret < 0) {
- RTE_LOG(ERR, EAL, "XEN DOM0:failed to get memory\n");
- err = -EIO;
- goto fail;
- }
-
- /* Get number of memory segment from driver */
- ret = ioctl(xen_fd, RTE_DOM0_IOCTL_GET_NUM_MEMSEG, &num_memseg);
- if (ret < 0) {
- RTE_LOG(ERR, EAL, "XEN DOM0:failed to get memseg count.\n");
- err = -EIO;
- goto fail;
- }
-
- if(num_memseg > RTE_MAX_MEMSEG){
- RTE_LOG(ERR, EAL, "XEN DOM0: the memseg count %d is greater"
- " than max memseg %d.\n",num_memseg, RTE_MAX_MEMSEG);
- err = -EIO;
- goto fail;
- }
-
- /* get all memory segements information */
- ret = ioctl(xen_fd, RTE_DOM0_IOCTL_GET_MEMSEG_INFO, seginfo);
- if (ret < 0) {
- RTE_LOG(ERR, EAL, "XEN DOM0:failed to get memseg info.\n");
- err = -EIO;
- goto fail;
- }
-
- /* map all memory segments to contiguous user space */
- for (memseg_idx = 0; memseg_idx < num_memseg; memseg_idx++)
- {
- vma_len = seginfo[memseg_idx].size;
-
- /**
- * get the biggest virtual memory area up to vma_len. If it fails,
- * vma_addr is NULL, so let the kernel provide the address.
- */
- vma_addr = xen_get_virtual_area(&vma_len, RTE_PGSIZE_2M);
- if (vma_addr == NULL) {
- flags = MAP_SHARED;
- vma_len = RTE_PGSIZE_2M;
- } else
- flags = MAP_SHARED | MAP_FIXED;
-
- seginfo[memseg_idx].size = vma_len;
- vir_addr = mmap(vma_addr, seginfo[memseg_idx].size,
- PROT_READ|PROT_WRITE, flags, xen_fd,
- memseg_idx * page_size);
- if (vir_addr == MAP_FAILED) {
- RTE_LOG(ERR, EAL, "XEN DOM0:Could not mmap %s\n",
- DOM0_MM_DEV);
- err = -EIO;
- goto fail;
- }
-
- memseg[memseg_idx].addr = vir_addr;
- memseg[memseg_idx].phys_addr = page_size *
- seginfo[memseg_idx].pfn ;
- memseg[memseg_idx].len = seginfo[memseg_idx].size;
- for ( i = 0; i < seginfo[memseg_idx].size / RTE_PGSIZE_2M; i++)
- memseg[memseg_idx].mfn[i] = seginfo[memseg_idx].mfn[i];
-
- /* MFNs are continuous in 2M, so assume that page size is 2M */
- memseg[memseg_idx].hugepage_sz = RTE_PGSIZE_2M;
-
- memseg[memseg_idx].nchannel = mcfg->nchannel;
- memseg[memseg_idx].nrank = mcfg->nrank;
-
- /* NUMA is not suppoted in Xen Dom0, so only set socket 0*/
- memseg[memseg_idx].socket_id = 0;
- }
-
- return 0;
-fail:
- if (xen_fd > 0) {
- close(xen_fd);
- xen_fd = -1;
- }
- return err;
-}
-
-/*
- * This creates the memory mappings in the secondary process to match that of
- * the server process. It goes through each memory segment in the DPDK runtime
- * configuration, mapping them in order to form a contiguous block in the
- * virtual memory space
- */
-int
-rte_xen_dom0_memory_attach(void)
-{
- const struct rte_mem_config *mcfg;
- unsigned s = 0; /* s used to track the segment number */
- int xen_fd = -1;
- int ret = -1;
- void *vir_addr;
- char name[DOM0_NAME_MAX] = {0};
- int page_size = getpagesize();
-
- mcfg = rte_eal_get_configuration()->mem_config;
-
- /* Check FD and open once */
- if (xen_fd < 0) {
- xen_fd = open(DOM0_MM_DEV, O_RDWR);
- if (xen_fd < 0) {
- RTE_LOG(ERR, EAL, "Can not open %s\n",DOM0_MM_DEV);
- goto error;
- }
- }
-
- /* construct memory mangement name for Dom0 */
- snprintf(name, DOM0_NAME_MAX, "%s-%s",
- internal_config.hugefile_prefix, DEFAUL_DOM0_NAME);
- /* attach to memory segments of primary process */
- ret = ioctl(xen_fd, RTE_DOM0_IOCTL_ATTACH_TO_MEMSEG, name);
- if (ret) {
- RTE_LOG(ERR, EAL,"attach memory segments fail.\n");
- goto error;
- }
-
- /* map all segments into memory to make sure we get the addrs */
- for (s = 0; s < RTE_MAX_MEMSEG; ++s) {
-
- /*
- * the first memory segment with len==0 is the one that
- * follows the last valid segment.
- */
- if (mcfg->memseg[s].len == 0)
- break;
-
- vir_addr = mmap(mcfg->memseg[s].addr, mcfg->memseg[s].len,
- PROT_READ|PROT_WRITE, MAP_SHARED|MAP_FIXED, xen_fd,
- s * page_size);
- if (vir_addr == MAP_FAILED) {
- RTE_LOG(ERR, EAL, "Could not mmap %llu bytes "
- "in %s to requested address [%p]\n",
- (unsigned long long)mcfg->memseg[s].len, DOM0_MM_DEV,
- mcfg->memseg[s].addr);
- goto error;
- }
- }
- return 0;
-
-error:
- if (xen_fd >= 0) {
- close(xen_fd);
- xen_fd = -1;
- }
- return -1;
-}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h b/src/dpdk_lib18/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h
deleted file mode 100755
index 23eafd96..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_INTERRUPTS_H_
-#error "don't include this file directly, please include generic <rte_interrupts.h>"
-#endif
-
-#ifndef _RTE_LINUXAPP_INTERRUPTS_H_
-#define _RTE_LINUXAPP_INTERRUPTS_H_
-
-enum rte_intr_handle_type {
- RTE_INTR_HANDLE_UNKNOWN = 0,
- RTE_INTR_HANDLE_UIO, /**< uio device handle */
- RTE_INTR_HANDLE_VFIO_LEGACY, /**< vfio device handle (legacy) */
- RTE_INTR_HANDLE_VFIO_MSI, /**< vfio device handle (MSI) */
- RTE_INTR_HANDLE_VFIO_MSIX, /**< vfio device handle (MSIX) */
- RTE_INTR_HANDLE_ALARM, /**< alarm handle */
- RTE_INTR_HANDLE_MAX
-};
-
-/** Handle for interrupts. */
-struct rte_intr_handle {
- int vfio_dev_fd; /**< VFIO device file descriptor */
- int fd; /**< file descriptor */
- enum rte_intr_handle_type type; /**< handle type */
-};
-
-#endif /* _RTE_LINUXAPP_INTERRUPTS_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/igb_uio/Makefile b/src/dpdk_lib18/librte_eal/linuxapp/igb_uio/Makefile
deleted file mode 100755
index ec6e702f..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/igb_uio/Makefile
+++ /dev/null
@@ -1,53 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-#
-# module name and path
-#
-MODULE = igb_uio
-MODULE_PATH = drivers/net/igb_uio
-
-#
-# CFLAGS
-#
-MODULE_CFLAGS += -I$(SRCDIR) --param max-inline-insns-single=100
-MODULE_CFLAGS += -I$(RTE_OUTPUT)/include
-MODULE_CFLAGS += -Winline -Wall -Werror
-MODULE_CFLAGS += -include $(RTE_OUTPUT)/include/rte_config.h
-
-#
-# all source are stored in SRCS-y
-#
-SRCS-y := igb_uio.c
-
-include $(RTE_SDK)/mk/rte.module.mk
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/igb_uio/igb_uio.c b/src/dpdk_lib18/librte_eal/linuxapp/igb_uio/igb_uio.c
deleted file mode 100755
index ba1364b7..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/igb_uio/igb_uio.c
+++ /dev/null
@@ -1,643 +0,0 @@
-/*-
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Corporation
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/uio_driver.h>
-#include <linux/io.h>
-#include <linux/msi.h>
-#include <linux/version.h>
-
-#ifdef CONFIG_XEN_DOM0
-#include <xen/xen.h>
-#endif
-#include <rte_pci_dev_features.h>
-
-#include "compat.h"
-
-#ifdef RTE_PCI_CONFIG
-#define PCI_SYS_FILE_BUF_SIZE 10
-#define PCI_DEV_CAP_REG 0xA4
-#define PCI_DEV_CTRL_REG 0xA8
-#define PCI_DEV_CAP_EXT_TAG_MASK 0x20
-#define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
-#define PCI_DEV_CTRL_EXT_TAG_MASK (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
-#endif
-
-/**
- * A structure describing the private information for a uio device.
- */
-struct rte_uio_pci_dev {
- struct uio_info info;
- struct pci_dev *pdev;
- enum rte_intr_mode mode;
-};
-
-static char *intr_mode = NULL;
-static enum rte_intr_mode igbuio_intr_mode_preferred = RTE_INTR_MODE_MSIX;
-
-static inline struct rte_uio_pci_dev *
-igbuio_get_uio_pci_dev(struct uio_info *info)
-{
- return container_of(info, struct rte_uio_pci_dev, info);
-}
-
-/* sriov sysfs */
-static ssize_t
-show_max_vfs(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- return snprintf(buf, 10, "%u\n",
- pci_num_vf(container_of(dev, struct pci_dev, dev)));
-}
-
-static ssize_t
-store_max_vfs(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- int err = 0;
- unsigned long max_vfs;
- struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
-
- if (0 != kstrtoul(buf, 0, &max_vfs))
- return -EINVAL;
-
- if (0 == max_vfs)
- pci_disable_sriov(pdev);
- else if (0 == pci_num_vf(pdev))
- err = pci_enable_sriov(pdev, max_vfs);
- else /* do nothing if change max_vfs number */
- err = -EINVAL;
-
- return err ? err : count;
-}
-
-#ifdef RTE_PCI_CONFIG
-static ssize_t
-show_extended_tag(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct pci_dev *pci_dev = container_of(dev, struct pci_dev, dev);
- uint32_t val = 0;
-
- pci_read_config_dword(pci_dev, PCI_DEV_CAP_REG, &val);
- if (!(val & PCI_DEV_CAP_EXT_TAG_MASK)) /* Not supported */
- return snprintf(buf, PCI_SYS_FILE_BUF_SIZE, "%s\n", "invalid");
-
- val = 0;
- pci_bus_read_config_dword(pci_dev->bus, pci_dev->devfn,
- PCI_DEV_CTRL_REG, &val);
-
- return snprintf(buf, PCI_SYS_FILE_BUF_SIZE, "%s\n",
- (val & PCI_DEV_CTRL_EXT_TAG_MASK) ? "on" : "off");
-}
-
-static ssize_t
-store_extended_tag(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct pci_dev *pci_dev = container_of(dev, struct pci_dev, dev);
- uint32_t val = 0, enable;
-
- if (strncmp(buf, "on", 2) == 0)
- enable = 1;
- else if (strncmp(buf, "off", 3) == 0)
- enable = 0;
- else
- return -EINVAL;
-
- pci_cfg_access_lock(pci_dev);
- pci_bus_read_config_dword(pci_dev->bus, pci_dev->devfn,
- PCI_DEV_CAP_REG, &val);
- if (!(val & PCI_DEV_CAP_EXT_TAG_MASK)) { /* Not supported */
- pci_cfg_access_unlock(pci_dev);
- return -EPERM;
- }
-
- val = 0;
- pci_bus_read_config_dword(pci_dev->bus, pci_dev->devfn,
- PCI_DEV_CTRL_REG, &val);
- if (enable)
- val |= PCI_DEV_CTRL_EXT_TAG_MASK;
- else
- val &= ~PCI_DEV_CTRL_EXT_TAG_MASK;
- pci_bus_write_config_dword(pci_dev->bus, pci_dev->devfn,
- PCI_DEV_CTRL_REG, val);
- pci_cfg_access_unlock(pci_dev);
-
- return count;
-}
-
-static ssize_t
-show_max_read_request_size(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct pci_dev *pci_dev = container_of(dev, struct pci_dev, dev);
- int val = pcie_get_readrq(pci_dev);
-
- return snprintf(buf, PCI_SYS_FILE_BUF_SIZE, "%d\n", val);
-}
-
-static ssize_t
-store_max_read_request_size(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct pci_dev *pci_dev = container_of(dev, struct pci_dev, dev);
- unsigned long size = 0;
- int ret;
-
- if (0 != kstrtoul(buf, 0, &size))
- return -EINVAL;
-
- ret = pcie_set_readrq(pci_dev, (int)size);
- if (ret < 0)
- return ret;
-
- return count;
-}
-#endif
-
-static DEVICE_ATTR(max_vfs, S_IRUGO | S_IWUSR, show_max_vfs, store_max_vfs);
-#ifdef RTE_PCI_CONFIG
-static DEVICE_ATTR(extended_tag, S_IRUGO | S_IWUSR, show_extended_tag,
- store_extended_tag);
-static DEVICE_ATTR(max_read_request_size, S_IRUGO | S_IWUSR,
- show_max_read_request_size, store_max_read_request_size);
-#endif
-
-static struct attribute *dev_attrs[] = {
- &dev_attr_max_vfs.attr,
-#ifdef RTE_PCI_CONFIG
- &dev_attr_extended_tag.attr,
- &dev_attr_max_read_request_size.attr,
-#endif
- NULL,
-};
-
-static const struct attribute_group dev_attr_grp = {
- .attrs = dev_attrs,
-};
-/*
- * It masks the msix on/off of generating MSI-X messages.
- */
-static void
-igbuio_msix_mask_irq(struct msi_desc *desc, int32_t state)
-{
- u32 mask_bits = desc->masked;
- unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_VECTOR_CTRL;
-
- if (state != 0)
- mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
- else
- mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
-
- if (mask_bits != desc->masked) {
- writel(mask_bits, desc->mask_base + offset);
- readl(desc->mask_base);
- desc->masked = mask_bits;
- }
-}
-
-/**
- * This is the irqcontrol callback to be registered to uio_info.
- * It can be used to disable/enable interrupt from user space processes.
- *
- * @param info
- * pointer to uio_info.
- * @param irq_state
- * state value. 1 to enable interrupt, 0 to disable interrupt.
- *
- * @return
- * - On success, 0.
- * - On failure, a negative value.
- */
-static int
-igbuio_pci_irqcontrol(struct uio_info *info, s32 irq_state)
-{
- struct rte_uio_pci_dev *udev = igbuio_get_uio_pci_dev(info);
- struct pci_dev *pdev = udev->pdev;
-
- pci_cfg_access_lock(pdev);
- if (udev->mode == RTE_INTR_MODE_LEGACY)
- pci_intx(pdev, !!irq_state);
-
- else if (udev->mode == RTE_INTR_MODE_MSIX) {
- struct msi_desc *desc;
-
- list_for_each_entry(desc, &pdev->msi_list, list)
- igbuio_msix_mask_irq(desc, irq_state);
- }
- pci_cfg_access_unlock(pdev);
-
- return 0;
-}
-
-/**
- * This is interrupt handler which will check if the interrupt is for the right device.
- * If yes, disable it here and will be enable later.
- */
-static irqreturn_t
-igbuio_pci_irqhandler(int irq, struct uio_info *info)
-{
- struct rte_uio_pci_dev *udev = igbuio_get_uio_pci_dev(info);
-
- /* Legacy mode need to mask in hardware */
- if (udev->mode == RTE_INTR_MODE_LEGACY &&
- !pci_check_and_mask_intx(udev->pdev))
- return IRQ_NONE;
-
- /* Message signal mode, no share IRQ and automasked */
- return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_XEN_DOM0
-static int
-igbuio_dom0_mmap_phys(struct uio_info *info, struct vm_area_struct *vma)
-{
- int idx;
-
- idx = (int)vma->vm_pgoff;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-#ifdef HAVE_PTE_MASK_PAGE_IOMAP
- vma->vm_page_prot.pgprot |= _PAGE_IOMAP;
-#endif
-
- return remap_pfn_range(vma,
- vma->vm_start,
- info->mem[idx].addr >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
-}
-
-/**
- * This is uio device mmap method which will use igbuio mmap for Xen
- * Dom0 environment.
- */
-static int
-igbuio_dom0_pci_mmap(struct uio_info *info, struct vm_area_struct *vma)
-{
- int idx;
-
- if (vma->vm_pgoff >= MAX_UIO_MAPS)
- return -EINVAL;
-
- if (info->mem[vma->vm_pgoff].size == 0)
- return -EINVAL;
-
- idx = (int)vma->vm_pgoff;
- switch (info->mem[idx].memtype) {
- case UIO_MEM_PHYS:
- return igbuio_dom0_mmap_phys(info, vma);
- case UIO_MEM_LOGICAL:
- case UIO_MEM_VIRTUAL:
- default:
- return -EINVAL;
- }
-}
-#endif
-
-/* Remap pci resources described by bar #pci_bar in uio resource n. */
-static int
-igbuio_pci_setup_iomem(struct pci_dev *dev, struct uio_info *info,
- int n, int pci_bar, const char *name)
-{
- unsigned long addr, len;
- void *internal_addr;
-
- if (sizeof(info->mem) / sizeof(info->mem[0]) <= n)
- return -EINVAL;
-
- addr = pci_resource_start(dev, pci_bar);
- len = pci_resource_len(dev, pci_bar);
- if (addr == 0 || len == 0)
- return -1;
- internal_addr = ioremap(addr, len);
- if (internal_addr == NULL)
- return -1;
- info->mem[n].name = name;
- info->mem[n].addr = addr;
- info->mem[n].internal_addr = internal_addr;
- info->mem[n].size = len;
- info->mem[n].memtype = UIO_MEM_PHYS;
- return 0;
-}
-
-/* Get pci port io resources described by bar #pci_bar in uio resource n. */
-static int
-igbuio_pci_setup_ioport(struct pci_dev *dev, struct uio_info *info,
- int n, int pci_bar, const char *name)
-{
- unsigned long addr, len;
-
- if (sizeof(info->port) / sizeof(info->port[0]) <= n)
- return -EINVAL;
-
- addr = pci_resource_start(dev, pci_bar);
- len = pci_resource_len(dev, pci_bar);
- if (addr == 0 || len == 0)
- return -EINVAL;
-
- info->port[n].name = name;
- info->port[n].start = addr;
- info->port[n].size = len;
- info->port[n].porttype = UIO_PORT_X86;
-
- return 0;
-}
-
-/* Unmap previously ioremap'd resources */
-static void
-igbuio_pci_release_iomem(struct uio_info *info)
-{
- int i;
-
- for (i = 0; i < MAX_UIO_MAPS; i++) {
- if (info->mem[i].internal_addr)
- iounmap(info->mem[i].internal_addr);
- }
-}
-
-static int
-igbuio_setup_bars(struct pci_dev *dev, struct uio_info *info)
-{
- int i, iom, iop, ret;
- unsigned long flags;
- static const char *bar_names[PCI_STD_RESOURCE_END + 1] = {
- "BAR0",
- "BAR1",
- "BAR2",
- "BAR3",
- "BAR4",
- "BAR5",
- };
-
- iom = 0;
- iop = 0;
-
- for (i = 0; i != sizeof(bar_names) / sizeof(bar_names[0]); i++) {
- if (pci_resource_len(dev, i) != 0 &&
- pci_resource_start(dev, i) != 0) {
- flags = pci_resource_flags(dev, i);
- if (flags & IORESOURCE_MEM) {
- ret = igbuio_pci_setup_iomem(dev, info, iom,
- i, bar_names[i]);
- if (ret != 0)
- return ret;
- iom++;
- } else if (flags & IORESOURCE_IO) {
- ret = igbuio_pci_setup_ioport(dev, info, iop,
- i, bar_names[i]);
- if (ret != 0)
- return ret;
- iop++;
- }
- }
- }
-
- return (iom != 0) ? ret : -ENOENT;
-}
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
-static int __devinit
-#else
-static int
-#endif
-igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct rte_uio_pci_dev *udev;
- struct msix_entry msix_entry;
- int err;
-
- udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL);
- if (!udev)
- return -ENOMEM;
-
- /*
- * enable device: ask low-level code to enable I/O and
- * memory
- */
- err = pci_enable_device(dev);
- if (err != 0) {
- dev_err(&dev->dev, "Cannot enable PCI device\n");
- goto fail_free;
- }
-
- /*
- * reserve device's PCI memory regions for use by this
- * module
- */
- err = pci_request_regions(dev, "igb_uio");
- if (err != 0) {
- dev_err(&dev->dev, "Cannot request regions\n");
- goto fail_disable;
- }
-
- /* enable bus mastering on the device */
- pci_set_master(dev);
-
- /* remap IO memory */
- err = igbuio_setup_bars(dev, &udev->info);
- if (err != 0)
- goto fail_release_iomem;
-
- /* set 64-bit DMA mask */
- err = pci_set_dma_mask(dev, DMA_BIT_MASK(64));
- if (err != 0) {
- dev_err(&dev->dev, "Cannot set DMA mask\n");
- goto fail_release_iomem;
- }
-
- err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64));
- if (err != 0) {
- dev_err(&dev->dev, "Cannot set consistent DMA mask\n");
- goto fail_release_iomem;
- }
-
- /* fill uio infos */
- udev->info.name = "igb_uio";
- udev->info.version = "0.1";
- udev->info.handler = igbuio_pci_irqhandler;
- udev->info.irqcontrol = igbuio_pci_irqcontrol;
-#ifdef CONFIG_XEN_DOM0
- /* check if the driver run on Xen Dom0 */
- if (xen_initial_domain())
- udev->info.mmap = igbuio_dom0_pci_mmap;
-#endif
- udev->info.priv = udev;
- udev->pdev = dev;
-
- switch (igbuio_intr_mode_preferred) {
- case RTE_INTR_MODE_MSIX:
- /* Only 1 msi-x vector needed */
- msix_entry.entry = 0;
- if (pci_enable_msix(dev, &msix_entry, 1) == 0) {
- dev_dbg(&dev->dev, "using MSI-X");
- udev->info.irq = msix_entry.vector;
- udev->mode = RTE_INTR_MODE_MSIX;
- break;
- }
- /* fall back to INTX */
- case RTE_INTR_MODE_LEGACY:
- if (pci_intx_mask_supported(dev)) {
- dev_dbg(&dev->dev, "using INTX");
- udev->info.irq_flags = IRQF_SHARED;
- udev->info.irq = dev->irq;
- udev->mode = RTE_INTR_MODE_LEGACY;
- break;
- }
- dev_notice(&dev->dev, "PCI INTX mask not supported\n");
- /* fall back to no IRQ */
- case RTE_INTR_MODE_NONE:
- udev->mode = RTE_INTR_MODE_NONE;
- udev->info.irq = 0;
- break;
-
- default:
- dev_err(&dev->dev, "invalid IRQ mode %u",
- igbuio_intr_mode_preferred);
- err = -EINVAL;
- goto fail_release_iomem;
- }
-
- err = sysfs_create_group(&dev->dev.kobj, &dev_attr_grp);
- if (err != 0)
- goto fail_release_iomem;
-
- /* register uio driver */
- err = uio_register_device(&dev->dev, &udev->info);
- if (err != 0)
- goto fail_remove_group;
-
- pci_set_drvdata(dev, udev);
-
- dev_info(&dev->dev, "uio device registered with irq %lx\n",
- udev->info.irq);
-
- return 0;
-
-fail_remove_group:
- sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
-fail_release_iomem:
- igbuio_pci_release_iomem(&udev->info);
- if (udev->mode == RTE_INTR_MODE_MSIX)
- pci_disable_msix(udev->pdev);
- pci_release_regions(dev);
-fail_disable:
- pci_disable_device(dev);
-fail_free:
- kfree(udev);
-
- return err;
-}
-
-static void
-igbuio_pci_remove(struct pci_dev *dev)
-{
- struct uio_info *info = pci_get_drvdata(dev);
- struct rte_uio_pci_dev *udev = igbuio_get_uio_pci_dev(info);
-
- if (info->priv == NULL) {
- pr_notice("Not igbuio device\n");
- return;
- }
-
- sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
- uio_unregister_device(info);
- igbuio_pci_release_iomem(info);
- if (udev->mode == RTE_INTR_MODE_MSIX)
- pci_disable_msix(dev);
- pci_release_regions(dev);
- pci_disable_device(dev);
- pci_set_drvdata(dev, NULL);
- kfree(info);
-}
-
-static int
-igbuio_config_intr_mode(char *intr_str)
-{
- if (!intr_str) {
- pr_info("Use MSIX interrupt by default\n");
- return 0;
- }
-
- if (!strcmp(intr_str, RTE_INTR_MODE_MSIX_NAME)) {
- igbuio_intr_mode_preferred = RTE_INTR_MODE_MSIX;
- pr_info("Use MSIX interrupt\n");
- } else if (!strcmp(intr_str, RTE_INTR_MODE_LEGACY_NAME)) {
- igbuio_intr_mode_preferred = RTE_INTR_MODE_LEGACY;
- pr_info("Use legacy interrupt\n");
- } else {
- pr_info("Error: bad parameter - %s\n", intr_str);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static struct pci_driver igbuio_pci_driver = {
- .name = "igb_uio",
- .id_table = NULL,
- .probe = igbuio_pci_probe,
- .remove = igbuio_pci_remove,
-};
-
-static int __init
-igbuio_pci_init_module(void)
-{
- int ret;
-
- ret = igbuio_config_intr_mode(intr_mode);
- if (ret < 0)
- return ret;
-
- return pci_register_driver(&igbuio_pci_driver);
-}
-
-static void __exit
-igbuio_pci_exit_module(void)
-{
- pci_unregister_driver(&igbuio_pci_driver);
-}
-
-module_init(igbuio_pci_init_module);
-module_exit(igbuio_pci_exit_module);
-
-module_param(intr_mode, charp, S_IRUGO);
-MODULE_PARM_DESC(intr_mode,
-"igb_uio interrupt mode (default=msix):\n"
-" " RTE_INTR_MODE_MSIX_NAME " Use MSIX interrupt\n"
-" " RTE_INTR_MODE_LEGACY_NAME " Use Legacy interrupt\n"
-"\n");
-
-MODULE_DESCRIPTION("UIO driver for Intel IGB PCI cards");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Intel Corporation");
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/Makefile b/src/dpdk_lib18/librte_eal/linuxapp/kni/Makefile
deleted file mode 100755
index fb673d93..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/Makefile
+++ /dev/null
@@ -1,93 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-#
-# module name and path
-#
-MODULE = rte_kni
-
-#
-# CFLAGS
-#
-MODULE_CFLAGS += -I$(SRCDIR) --param max-inline-insns-single=50
-MODULE_CFLAGS += -I$(RTE_OUTPUT)/include -I$(SRCDIR)/ethtool/ixgbe -I$(SRCDIR)/ethtool/igb
-MODULE_CFLAGS += -include $(RTE_OUTPUT)/include/rte_config.h
-MODULE_CFLAGS += -Wall -Werror
-
-ifeq ($(shell test -f /proc/version_signature && lsb_release -si 2>/dev/null),Ubuntu)
-MODULE_CFLAGS += -DUBUNTU_RELEASE_CODE=$(shell lsb_release -sr | tr -d .)
-UBUNTU_KERNEL_CODE := $(shell cut -d' ' -f2 /proc/version_signature | \
- cut -d'~' -f1 | cut -d- -f1,2 | tr .- $(comma))
-MODULE_CFLAGS += -D"UBUNTU_KERNEL_CODE=UBUNTU_KERNEL_VERSION($(UBUNTU_KERNEL_CODE))"
-endif
-
-# this lib needs main eal
-DEPDIRS-y += lib/librte_eal/linuxapp/eal
-
-#
-# all source are stored in SRCS-y
-#
-SRCS-y := ethtool/ixgbe/ixgbe_main.c
-SRCS-y += ethtool/ixgbe/ixgbe_api.c
-SRCS-y += ethtool/ixgbe/ixgbe_common.c
-SRCS-y += ethtool/ixgbe/ixgbe_ethtool.c
-SRCS-y += ethtool/ixgbe/ixgbe_82599.c
-SRCS-y += ethtool/ixgbe/ixgbe_82598.c
-SRCS-y += ethtool/ixgbe/ixgbe_x540.c
-SRCS-y += ethtool/ixgbe/ixgbe_phy.c
-SRCS-y += ethtool/ixgbe/kcompat.c
-
-SRCS-y += ethtool/igb/e1000_82575.c
-SRCS-y += ethtool/igb/e1000_i210.c
-SRCS-y += ethtool/igb/e1000_api.c
-SRCS-y += ethtool/igb/e1000_mac.c
-SRCS-y += ethtool/igb/e1000_manage.c
-SRCS-y += ethtool/igb/e1000_mbx.c
-SRCS-y += ethtool/igb/e1000_nvm.c
-SRCS-y += ethtool/igb/e1000_phy.c
-SRCS-y += ethtool/igb/igb_ethtool.c
-SRCS-y += ethtool/igb/igb_hwmon.c
-SRCS-y += ethtool/igb/igb_main.c
-SRCS-y += ethtool/igb/igb_debugfs.c
-SRCS-y += ethtool/igb/igb_param.c
-SRCS-y += ethtool/igb/igb_procfs.c
-SRCS-y += ethtool/igb/igb_vmdq.c
-#SRCS-y += ethtool/igb/igb_ptp.c
-#SRCS-y += ethtool/igb/kcompat.c
-
-SRCS-y += kni_misc.c
-SRCS-y += kni_net.c
-SRCS-y += kni_ethtool.c
-SRCS-$(CONFIG_RTE_KNI_VHOST) += kni_vhost.c
-
-include $(RTE_SDK)/mk/rte.module.mk
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/compat.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/compat.h
deleted file mode 100755
index 13135236..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/compat.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Minimal wrappers to allow compiling kni on older kernels.
- */
-
-#ifndef RHEL_RELEASE_VERSION
-#define RHEL_RELEASE_VERSION(a, b) (((a) << 8) + (b))
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) && \
- (!(defined(RHEL_RELEASE_CODE) && \
- RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4)))
-
-#define kstrtoul strict_strtoul
-
-#endif /* < 2.6.39 */
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
-
-#define sk_sleep(s) (s)->sk_sleep
-
-#endif /* < 2.6.35 */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/README b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/README
deleted file mode 100755
index 2cfefe72..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/README
+++ /dev/null
@@ -1,100 +0,0 @@
-..
- BSD LICENSE
-
- Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-Description
-
-In order to support ethtool in Kernel NIC Interface, the standard Linux kernel
-drivers of ixgbe/igb are needed to be reused here. ixgbe-3.9.17 is the version
-modified from in kernel NIC interface kernel module to support ixgbe NIC, and
-igb-3.4.8 is the version modified from in kernel NIC interface kernel module to
-support igb NIC.
-
-The source code package of ixgbe can be downloaded from sourceforge.net as below.
-http://sourceforge.net/projects/e1000/files/ixgbe%20stable/
-Below source files are copied or modified from ixgbe.
-
-ixgbe_82598.h
-ixgbe_82599.c
-ixgbe_82599.h
-ixgbe_api.c
-ixgbe_api.h
-ixgbe_common.c
-ixgbe_common.h
-ixgbe_dcb.h
-ixgbe_ethtool.c
-ixgbe_fcoe.h
-ixgbe.h
-ixgbe_main.c
-ixgbe_mbx.h
-ixgbe_osdep.h
-ixgbe_phy.c
-ixgbe_phy.h
-ixgbe_sriov.h
-ixgbe_type.h
-kcompat.c
-kcompat.h
-
-The source code package of igb can be downloaded from sourceforge.net as below.
-http://sourceforge.net/projects/e1000/files/igb%20stable/
-Below source files are copied or modified from igb.
-
-e1000_82575.c
-e1000_82575.h
-e1000_api.c
-e1000_api.h
-e1000_defines.h
-e1000_hw.h
-e1000_mac.c
-e1000_mac.h
-e1000_manage.c
-e1000_manage.h
-e1000_mbx.c
-e1000_mbx.h
-e1000_nvm.c
-e1000_nvm.h
-e1000_osdep.h
-e1000_phy.c
-e1000_phy.h
-e1000_regs.h
-igb_ethtool.c
-igb.h
-igb_main.c
-igb_param.c
-igb_procfs.c
-igb_regtest.h
-igb_sysfs.c
-igb_vmdq.c
-igb_vmdq.h
-kcompat.c
-kcompat_ethtool.c
-kcompat.h
-
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/COPYING b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/COPYING
deleted file mode 100755
index 5f297e5b..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/COPYING
+++ /dev/null
@@ -1,339 +0,0 @@
-
-"This software program is licensed subject to the GNU General Public License
-(GPL). Version 2, June 1991, available at
-<http://www.fsf.org/copyleft/gpl.html>"
-
-GNU General Public License
-
-Version 2, June 1991
-
-Copyright (C) 1989, 1991 Free Software Foundation, Inc.
-59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
-
-Everyone is permitted to copy and distribute verbatim copies of this license
-document, but changing it is not allowed.
-
-Preamble
-
-The licenses for most software are designed to take away your freedom to
-share and change it. By contrast, the GNU General Public License is intended
-to guarantee your freedom to share and change free software--to make sure
-the software is free for all its users. This General Public License applies
-to most of the Free Software Foundation's software and to any other program
-whose authors commit to using it. (Some other Free Software Foundation
-software is covered by the GNU Library General Public License instead.) You
-can apply it to your programs, too.
-
-When we speak of free software, we are referring to freedom, not price. Our
-General Public Licenses are designed to make sure that you have the freedom
-to distribute copies of free software (and charge for this service if you
-wish), that you receive source code or can get it if you want it, that you
-can change the software or use pieces of it in new free programs; and that
-you know you can do these things.
-
-To protect your rights, we need to make restrictions that forbid anyone to
-deny you these rights or to ask you to surrender the rights. These
-restrictions translate to certain responsibilities for you if you distribute
-copies of the software, or if you modify it.
-
-For example, if you distribute copies of such a program, whether gratis or
-for a fee, you must give the recipients all the rights that you have. You
-must make sure that they, too, receive or can get the source code. And you
-must show them these terms so they know their rights.
-
-We protect your rights with two steps: (1) copyright the software, and (2)
-offer you this license which gives you legal permission to copy, distribute
-and/or modify the software.
-
-Also, for each author's protection and ours, we want to make certain that
-everyone understands that there is no warranty for this free software. If
-the software is modified by someone else and passed on, we want its
-recipients to know that what they have is not the original, so that any
-problems introduced by others will not reflect on the original authors'
-reputations.
-
-Finally, any free program is threatened constantly by software patents. We
-wish to avoid the danger that redistributors of a free program will
-individually obtain patent licenses, in effect making the program
-proprietary. To prevent this, we have made it clear that any patent must be
-licensed for everyone's free use or not licensed at all.
-
-The precise terms and conditions for copying, distribution and modification
-follow.
-
-TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
-0. This License applies to any program or other work which contains a notice
- placed by the copyright holder saying it may be distributed under the
- terms of this General Public License. The "Program", below, refers to any
- such program or work, and a "work based on the Program" means either the
- Program or any derivative work under copyright law: that is to say, a
- work containing the Program or a portion of it, either verbatim or with
- modifications and/or translated into another language. (Hereinafter,
- translation is included without limitation in the term "modification".)
- Each licensee is addressed as "you".
-
- Activities other than copying, distribution and modification are not
- covered by this License; they are outside its scope. The act of running
- the Program is not restricted, and the output from the Program is covered
- only if its contents constitute a work based on the Program (independent
- of having been made by running the Program). Whether that is true depends
- on what the Program does.
-
-1. You may copy and distribute verbatim copies of the Program's source code
- as you receive it, in any medium, provided that you conspicuously and
- appropriately publish on each copy an appropriate copyright notice and
- disclaimer of warranty; keep intact all the notices that refer to this
- License and to the absence of any warranty; and give any other recipients
- of the Program a copy of this License along with the Program.
-
- You may charge a fee for the physical act of transferring a copy, and you
- may at your option offer warranty protection in exchange for a fee.
-
-2. You may modify your copy or copies of the Program or any portion of it,
- thus forming a work based on the Program, and copy and distribute such
- modifications or work under the terms of Section 1 above, provided that
- you also meet all of these conditions:
-
- * a) You must cause the modified files to carry prominent notices stating
- that you changed the files and the date of any change.
-
- * b) You must cause any work that you distribute or publish, that in
- whole or in part contains or is derived from the Program or any part
- thereof, to be licensed as a whole at no charge to all third parties
- under the terms of this License.
-
- * c) If the modified program normally reads commands interactively when
- run, you must cause it, when started running for such interactive
- use in the most ordinary way, to print or display an announcement
- including an appropriate copyright notice and a notice that there is
- no warranty (or else, saying that you provide a warranty) and that
- users may redistribute the program under these conditions, and
- telling the user how to view a copy of this License. (Exception: if
- the Program itself is interactive but does not normally print such
- an announcement, your work based on the Program is not required to
- print an announcement.)
-
- These requirements apply to the modified work as a whole. If identifiable
- sections of that work are not derived from the Program, and can be
- reasonably considered independent and separate works in themselves, then
- this License, and its terms, do not apply to those sections when you
- distribute them as separate works. But when you distribute the same
- sections as part of a whole which is a work based on the Program, the
- distribution of the whole must be on the terms of this License, whose
- permissions for other licensees extend to the entire whole, and thus to
- each and every part regardless of who wrote it.
-
- Thus, it is not the intent of this section to claim rights or contest
- your rights to work written entirely by you; rather, the intent is to
- exercise the right to control the distribution of derivative or
- collective works based on the Program.
-
- In addition, mere aggregation of another work not based on the Program
- with the Program (or with a work based on the Program) on a volume of a
- storage or distribution medium does not bring the other work under the
- scope of this License.
-
-3. You may copy and distribute the Program (or a work based on it, under
- Section 2) in object code or executable form under the terms of Sections
- 1 and 2 above provided that you also do one of the following:
-
- * a) Accompany it with the complete corresponding machine-readable source
- code, which must be distributed under the terms of Sections 1 and 2
- above on a medium customarily used for software interchange; or,
-
- * b) Accompany it with a written offer, valid for at least three years,
- to give any third party, for a charge no more than your cost of
- physically performing source distribution, a complete machine-
- readable copy of the corresponding source code, to be distributed
- under the terms of Sections 1 and 2 above on a medium customarily
- used for software interchange; or,
-
- * c) Accompany it with the information you received as to the offer to
- distribute corresponding source code. (This alternative is allowed
- only for noncommercial distribution and only if you received the
- program in object code or executable form with such an offer, in
- accord with Subsection b above.)
-
- The source code for a work means the preferred form of the work for
- making modifications to it. For an executable work, complete source code
- means all the source code for all modules it contains, plus any
- associated interface definition files, plus the scripts used to control
- compilation and installation of the executable. However, as a special
- exception, the source code distributed need not include anything that is
- normally distributed (in either source or binary form) with the major
- components (compiler, kernel, and so on) of the operating system on which
- the executable runs, unless that component itself accompanies the
- executable.
-
- If distribution of executable or object code is made by offering access
- to copy from a designated place, then offering equivalent access to copy
- the source code from the same place counts as distribution of the source
- code, even though third parties are not compelled to copy the source
- along with the object code.
-
-4. You may not copy, modify, sublicense, or distribute the Program except as
- expressly provided under this License. Any attempt otherwise to copy,
- modify, sublicense or distribute the Program is void, and will
- automatically terminate your rights under this License. However, parties
- who have received copies, or rights, from you under this License will not
- have their licenses terminated so long as such parties remain in full
- compliance.
-
-5. You are not required to accept this License, since you have not signed
- it. However, nothing else grants you permission to modify or distribute
- the Program or its derivative works. These actions are prohibited by law
- if you do not accept this License. Therefore, by modifying or
- distributing the Program (or any work based on the Program), you
- indicate your acceptance of this License to do so, and all its terms and
- conditions for copying, distributing or modifying the Program or works
- based on it.
-
-6. Each time you redistribute the Program (or any work based on the
- Program), the recipient automatically receives a license from the
- original licensor to copy, distribute or modify the Program subject to
- these terms and conditions. You may not impose any further restrictions
- on the recipients' exercise of the rights granted herein. You are not
- responsible for enforcing compliance by third parties to this License.
-
-7. If, as a consequence of a court judgment or allegation of patent
- infringement or for any other reason (not limited to patent issues),
- conditions are imposed on you (whether by court order, agreement or
- otherwise) that contradict the conditions of this License, they do not
- excuse you from the conditions of this License. If you cannot distribute
- so as to satisfy simultaneously your obligations under this License and
- any other pertinent obligations, then as a consequence you may not
- distribute the Program at all. For example, if a patent license would
- not permit royalty-free redistribution of the Program by all those who
- receive copies directly or indirectly through you, then the only way you
- could satisfy both it and this License would be to refrain entirely from
- distribution of the Program.
-
- If any portion of this section is held invalid or unenforceable under any
- particular circumstance, the balance of the section is intended to apply
- and the section as a whole is intended to apply in other circumstances.
-
- It is not the purpose of this section to induce you to infringe any
- patents or other property right claims or to contest validity of any
- such claims; this section has the sole purpose of protecting the
- integrity of the free software distribution system, which is implemented
- by public license practices. Many people have made generous contributions
- to the wide range of software distributed through that system in
- reliance on consistent application of that system; it is up to the
- author/donor to decide if he or she is willing to distribute software
- through any other system and a licensee cannot impose that choice.
-
- This section is intended to make thoroughly clear what is believed to be
- a consequence of the rest of this License.
-
-8. If the distribution and/or use of the Program is restricted in certain
- countries either by patents or by copyrighted interfaces, the original
- copyright holder who places the Program under this License may add an
- explicit geographical distribution limitation excluding those countries,
- so that distribution is permitted only in or among countries not thus
- excluded. In such case, this License incorporates the limitation as if
- written in the body of this License.
-
-9. The Free Software Foundation may publish revised and/or new versions of
- the General Public License from time to time. Such new versions will be
- similar in spirit to the present version, but may differ in detail to
- address new problems or concerns.
-
- Each version is given a distinguishing version number. If the Program
- specifies a version number of this License which applies to it and "any
- later version", you have the option of following the terms and
- conditions either of that version or of any later version published by
- the Free Software Foundation. If the Program does not specify a version
- number of this License, you may choose any version ever published by the
- Free Software Foundation.
-
-10. If you wish to incorporate parts of the Program into other free programs
- whose distribution conditions are different, write to the author to ask
- for permission. For software which is copyrighted by the Free Software
- Foundation, write to the Free Software Foundation; we sometimes make
- exceptions for this. Our decision will be guided by the two goals of
- preserving the free status of all derivatives of our free software and
- of promoting the sharing and reuse of software generally.
-
- NO WARRANTY
-
-11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
- FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
- OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
- PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER
- EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
- ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH
- YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL
- NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
- WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
- REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR
- DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL
- DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM
- (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED
- INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF
- THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR
- OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-
-END OF TERMS AND CONDITIONS
-
-How to Apply These Terms to Your New Programs
-
-If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it free
-software which everyone can redistribute and change under these terms.
-
-To do so, attach the following notices to the program. It is safest to
-attach them to the start of each source file to most effectively convey the
-exclusion of warranty; and each file should have at least the "copyright"
-line and a pointer to where the full notice is found.
-
-one line to give the program's name and an idea of what it does.
-Copyright (C) yyyy name of author
-
-This program is free software; you can redistribute it and/or modify it
-under the terms of the GNU General Public License as published by the Free
-Software Foundation; either version 2 of the License, or (at your option)
-any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-more details.
-
-You should have received a copy of the GNU General Public License along with
-this program; if not, write to the Free Software Foundation, Inc., 59
-Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
-Also add information on how to contact you by electronic and paper mail.
-
-If the program is interactive, make it output a short notice like this when
-it starts in an interactive mode:
-
-Gnomovision version 69, Copyright (C) year name of author Gnomovision comes
-with ABSOLUTELY NO WARRANTY; for details type 'show w'. This is free
-software, and you are welcome to redistribute it under certain conditions;
-type 'show c' for details.
-
-The hypothetical commands 'show w' and 'show c' should show the appropriate
-parts of the General Public License. Of course, the commands you use may be
-called something other than 'show w' and 'show c'; they could even be
-mouse-clicks or menu items--whatever suits your program.
-
-You should also get your employer (if you work as a programmer) or your
-school, if any, to sign a "copyright disclaimer" for the program, if
-necessary. Here is a sample; alter the names:
-
-Yoyodyne, Inc., hereby disclaims all copyright interest in the program
-'Gnomovision' (which makes passes at compilers) written by James Hacker.
-
-signature of Ty Coon, 1 April 1989
-Ty Coon, President of Vice
-
-This General Public License does not permit incorporating your program into
-proprietary programs. If your program is a subroutine library, you may
-consider it more useful to permit linking proprietary applications with the
-library. If this is what you want to do, use the GNU Library General Public
-License instead of this License.
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c
deleted file mode 100755
index b8c9a13f..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c
+++ /dev/null
@@ -1,3665 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-/*
- * 82575EB Gigabit Network Connection
- * 82575EB Gigabit Backplane Connection
- * 82575GB Gigabit Network Connection
- * 82576 Gigabit Network Connection
- * 82576 Quad Port Gigabit Mezzanine Adapter
- * 82580 Gigabit Network Connection
- * I350 Gigabit Network Connection
- */
-
-#include "e1000_api.h"
-#include "e1000_i210.h"
-
-static s32 e1000_init_phy_params_82575(struct e1000_hw *hw);
-static s32 e1000_init_mac_params_82575(struct e1000_hw *hw);
-static s32 e1000_acquire_phy_82575(struct e1000_hw *hw);
-static void e1000_release_phy_82575(struct e1000_hw *hw);
-static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw);
-static void e1000_release_nvm_82575(struct e1000_hw *hw);
-static s32 e1000_check_for_link_82575(struct e1000_hw *hw);
-static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw);
-static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw);
-static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
- u16 *duplex);
-static s32 e1000_init_hw_82575(struct e1000_hw *hw);
-static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw);
-static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
- u16 *data);
-static s32 e1000_reset_hw_82575(struct e1000_hw *hw);
-static s32 e1000_reset_hw_82580(struct e1000_hw *hw);
-static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw,
- u32 offset, u16 *data);
-static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw,
- u32 offset, u16 data);
-static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw,
- bool active);
-static s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw,
- bool active);
-static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw,
- bool active);
-static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw);
-static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw);
-static s32 e1000_get_media_type_82575(struct e1000_hw *hw);
-static s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw);
-static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data);
-static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw,
- u32 offset, u16 data);
-static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw);
-static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
-static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
- u16 *speed, u16 *duplex);
-static s32 e1000_get_phy_id_82575(struct e1000_hw *hw);
-static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
-static bool e1000_sgmii_active_82575(struct e1000_hw *hw);
-static s32 e1000_reset_init_script_82575(struct e1000_hw *hw);
-static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw);
-static void e1000_config_collision_dist_82575(struct e1000_hw *hw);
-static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw);
-static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw);
-static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw);
-static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw);
-static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw);
-static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw);
-static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw);
-static s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw,
- u16 offset);
-static s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
- u16 offset);
-static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw);
-static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw);
-static void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value);
-static void e1000_clear_vfta_i350(struct e1000_hw *hw);
-
-static void e1000_i2c_start(struct e1000_hw *hw);
-static void e1000_i2c_stop(struct e1000_hw *hw);
-static s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data);
-static s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data);
-static s32 e1000_get_i2c_ack(struct e1000_hw *hw);
-static s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data);
-static s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data);
-static void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl);
-static void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl);
-static s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data);
-static bool e1000_get_i2c_data(u32 *i2cctl);
-
-static const u16 e1000_82580_rxpbs_table[] = {
- 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 };
-#define E1000_82580_RXPBS_TABLE_SIZE \
- (sizeof(e1000_82580_rxpbs_table)/sizeof(u16))
-
-
-/**
- * e1000_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
- * @hw: pointer to the HW structure
- *
- * Called to determine if the I2C pins are being used for I2C or as an
- * external MDIO interface since the two options are mutually exclusive.
- **/
-static bool e1000_sgmii_uses_mdio_82575(struct e1000_hw *hw)
-{
- u32 reg = 0;
- bool ext_mdio = false;
-
- DEBUGFUNC("e1000_sgmii_uses_mdio_82575");
-
- switch (hw->mac.type) {
- case e1000_82575:
- case e1000_82576:
- reg = E1000_READ_REG(hw, E1000_MDIC);
- ext_mdio = !!(reg & E1000_MDIC_DEST);
- break;
- case e1000_82580:
- case e1000_i350:
- case e1000_i354:
- case e1000_i210:
- case e1000_i211:
- reg = E1000_READ_REG(hw, E1000_MDICNFG);
- ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
- break;
- default:
- break;
- }
- return ext_mdio;
-}
-
-/**
- * e1000_init_phy_params_82575 - Init PHY func ptrs.
- * @hw: pointer to the HW structure
- **/
-static s32 e1000_init_phy_params_82575(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = E1000_SUCCESS;
- u32 ctrl_ext;
-
- DEBUGFUNC("e1000_init_phy_params_82575");
-
- phy->ops.read_i2c_byte = e1000_read_i2c_byte_generic;
- phy->ops.write_i2c_byte = e1000_write_i2c_byte_generic;
-
- if (hw->phy.media_type != e1000_media_type_copper) {
- phy->type = e1000_phy_none;
- goto out;
- }
-
- phy->ops.power_up = e1000_power_up_phy_copper;
- phy->ops.power_down = e1000_power_down_phy_copper_82575;
-
- phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
- phy->reset_delay_us = 100;
-
- phy->ops.acquire = e1000_acquire_phy_82575;
- phy->ops.check_reset_block = e1000_check_reset_block_generic;
- phy->ops.commit = e1000_phy_sw_reset_generic;
- phy->ops.get_cfg_done = e1000_get_cfg_done_82575;
- phy->ops.release = e1000_release_phy_82575;
-
- ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
-
- if (e1000_sgmii_active_82575(hw)) {
- phy->ops.reset = e1000_phy_hw_reset_sgmii_82575;
- ctrl_ext |= E1000_CTRL_I2C_ENA;
- } else {
- phy->ops.reset = e1000_phy_hw_reset_generic;
- ctrl_ext &= ~E1000_CTRL_I2C_ENA;
- }
-
- E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
- e1000_reset_mdicnfg_82580(hw);
-
- if (e1000_sgmii_active_82575(hw) && !e1000_sgmii_uses_mdio_82575(hw)) {
- phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575;
- phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575;
- } else {
- switch (hw->mac.type) {
- case e1000_82580:
- case e1000_i350:
- case e1000_i354:
- phy->ops.read_reg = e1000_read_phy_reg_82580;
- phy->ops.write_reg = e1000_write_phy_reg_82580;
- break;
- case e1000_i210:
- case e1000_i211:
- phy->ops.read_reg = e1000_read_phy_reg_gs40g;
- phy->ops.write_reg = e1000_write_phy_reg_gs40g;
- break;
- default:
- phy->ops.read_reg = e1000_read_phy_reg_igp;
- phy->ops.write_reg = e1000_write_phy_reg_igp;
- }
- }
-
- /* Set phy->phy_addr and phy->id. */
- ret_val = e1000_get_phy_id_82575(hw);
-
- /* Verify phy id and set remaining function pointers */
- switch (phy->id) {
- case M88E1543_E_PHY_ID:
- case I347AT4_E_PHY_ID:
- case M88E1112_E_PHY_ID:
- case M88E1340M_E_PHY_ID:
- case M88E1111_I_PHY_ID:
- phy->type = e1000_phy_m88;
- phy->ops.check_polarity = e1000_check_polarity_m88;
- phy->ops.get_info = e1000_get_phy_info_m88;
- if (phy->id == I347AT4_E_PHY_ID ||
- phy->id == M88E1112_E_PHY_ID ||
- phy->id == M88E1340M_E_PHY_ID)
- phy->ops.get_cable_length =
- e1000_get_cable_length_m88_gen2;
- else if (phy->id == M88E1543_E_PHY_ID)
- phy->ops.get_cable_length =
- e1000_get_cable_length_m88_gen2;
- else
- phy->ops.get_cable_length = e1000_get_cable_length_m88;
- phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
- /* Check if this PHY is confgured for media swap. */
- if (phy->id == M88E1112_E_PHY_ID) {
- u16 data;
-
- ret_val = phy->ops.write_reg(hw,
- E1000_M88E1112_PAGE_ADDR,
- 2);
- if (ret_val)
- goto out;
-
- ret_val = phy->ops.read_reg(hw,
- E1000_M88E1112_MAC_CTRL_1,
- &data);
- if (ret_val)
- goto out;
-
- data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >>
- E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT;
- if (data == E1000_M88E1112_AUTO_COPPER_SGMII ||
- data == E1000_M88E1112_AUTO_COPPER_BASEX)
- hw->mac.ops.check_for_link =
- e1000_check_for_link_media_swap;
- }
- break;
- case IGP03E1000_E_PHY_ID:
- case IGP04E1000_E_PHY_ID:
- phy->type = e1000_phy_igp_3;
- phy->ops.check_polarity = e1000_check_polarity_igp;
- phy->ops.get_info = e1000_get_phy_info_igp;
- phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
- phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
- phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82575;
- phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic;
- break;
- case I82580_I_PHY_ID:
- case I350_I_PHY_ID:
- phy->type = e1000_phy_82580;
- phy->ops.check_polarity = e1000_check_polarity_82577;
- phy->ops.force_speed_duplex =
- e1000_phy_force_speed_duplex_82577;
- phy->ops.get_cable_length = e1000_get_cable_length_82577;
- phy->ops.get_info = e1000_get_phy_info_82577;
- phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580;
- phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580;
- break;
- case I210_I_PHY_ID:
- phy->type = e1000_phy_i210;
- phy->ops.check_polarity = e1000_check_polarity_m88;
- phy->ops.get_info = e1000_get_phy_info_m88;
- phy->ops.get_cable_length = e1000_get_cable_length_m88_gen2;
- phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580;
- phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580;
- phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
- break;
- default:
- ret_val = -E1000_ERR_PHY;
- goto out;
- }
-
-out:
- return ret_val;
-}
-
-/**
- * e1000_init_nvm_params_82575 - Init NVM func ptrs.
- * @hw: pointer to the HW structure
- **/
-s32 e1000_init_nvm_params_82575(struct e1000_hw *hw)
-{
- struct e1000_nvm_info *nvm = &hw->nvm;
- u32 eecd = E1000_READ_REG(hw, E1000_EECD);
- u16 size;
-
- DEBUGFUNC("e1000_init_nvm_params_82575");
-
- size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
- E1000_EECD_SIZE_EX_SHIFT);
- /*
- * Added to a constant, "size" becomes the left-shift value
- * for setting word_size.
- */
- size += NVM_WORD_SIZE_BASE_SHIFT;
-
- /* Just in case size is out of range, cap it to the largest
- * EEPROM size supported
- */
- if (size > 15)
- size = 15;
-
- nvm->word_size = 1 << size;
- if (hw->mac.type < e1000_i210) {
- nvm->opcode_bits = 8;
- nvm->delay_usec = 1;
-
- switch (nvm->override) {
- case e1000_nvm_override_spi_large:
- nvm->page_size = 32;
- nvm->address_bits = 16;
- break;
- case e1000_nvm_override_spi_small:
- nvm->page_size = 8;
- nvm->address_bits = 8;
- break;
- default:
- nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
- nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
- 16 : 8;
- break;
- }
- if (nvm->word_size == (1 << 15))
- nvm->page_size = 128;
-
- nvm->type = e1000_nvm_eeprom_spi;
- } else {
- nvm->type = e1000_nvm_flash_hw;
- }
-
- /* Function Pointers */
- nvm->ops.acquire = e1000_acquire_nvm_82575;
- nvm->ops.release = e1000_release_nvm_82575;
- if (nvm->word_size < (1 << 15))
- nvm->ops.read = e1000_read_nvm_eerd;
- else
- nvm->ops.read = e1000_read_nvm_spi;
-
- nvm->ops.write = e1000_write_nvm_spi;
- nvm->ops.validate = e1000_validate_nvm_checksum_generic;
- nvm->ops.update = e1000_update_nvm_checksum_generic;
- nvm->ops.valid_led_default = e1000_valid_led_default_82575;
-
- /* override generic family function pointers for specific descendants */
- switch (hw->mac.type) {
- case e1000_82580:
- nvm->ops.validate = e1000_validate_nvm_checksum_82580;
- nvm->ops.update = e1000_update_nvm_checksum_82580;
- break;
- case e1000_i350:
- //case e1000_i354:
- nvm->ops.validate = e1000_validate_nvm_checksum_i350;
- nvm->ops.update = e1000_update_nvm_checksum_i350;
- break;
- default:
- break;
- }
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_init_mac_params_82575 - Init MAC func ptrs.
- * @hw: pointer to the HW structure
- **/
-static s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
-{
- struct e1000_mac_info *mac = &hw->mac;
- struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
-
- DEBUGFUNC("e1000_init_mac_params_82575");
-
- /* Derives media type */
- e1000_get_media_type_82575(hw);
- /* Set mta register count */
- mac->mta_reg_count = 128;
- /* Set uta register count */
- mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128;
- /* Set rar entry count */
- mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
- if (mac->type == e1000_82576)
- mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
- if (mac->type == e1000_82580)
- mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
- if (mac->type == e1000_i350 || mac->type == e1000_i354)
- mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
-
- /* Enable EEE default settings for EEE supported devices */
- if (mac->type >= e1000_i350)
- dev_spec->eee_disable = false;
-
- /* Allow a single clear of the SW semaphore on I210 and newer */
- if (mac->type >= e1000_i210)
- dev_spec->clear_semaphore_once = true;
-
- /* Set if part includes ASF firmware */
- mac->asf_firmware_present = true;
- /* FWSM register */
- mac->has_fwsm = true;
- /* ARC supported; valid only if manageability features are enabled. */
- mac->arc_subsystem_valid =
- !!(E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK);
-
- /* Function pointers */
-
- /* bus type/speed/width */
- mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic;
- /* reset */
- if (mac->type >= e1000_82580)
- mac->ops.reset_hw = e1000_reset_hw_82580;
- else
- mac->ops.reset_hw = e1000_reset_hw_82575;
- /* hw initialization */
- mac->ops.init_hw = e1000_init_hw_82575;
- /* link setup */
- mac->ops.setup_link = e1000_setup_link_generic;
- /* physical interface link setup */
- mac->ops.setup_physical_interface =
- (hw->phy.media_type == e1000_media_type_copper)
- ? e1000_setup_copper_link_82575 : e1000_setup_serdes_link_82575;
- /* physical interface shutdown */
- mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575;
- /* physical interface power up */
- mac->ops.power_up_serdes = e1000_power_up_serdes_link_82575;
- /* check for link */
- mac->ops.check_for_link = e1000_check_for_link_82575;
- /* read mac address */
- mac->ops.read_mac_addr = e1000_read_mac_addr_82575;
- /* configure collision distance */
- mac->ops.config_collision_dist = e1000_config_collision_dist_82575;
- /* multicast address update */
- mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
- if (hw->mac.type == e1000_i350 || mac->type == e1000_i354) {
- /* writing VFTA */
- mac->ops.write_vfta = e1000_write_vfta_i350;
- /* clearing VFTA */
- mac->ops.clear_vfta = e1000_clear_vfta_i350;
- } else {
- /* writing VFTA */
- mac->ops.write_vfta = e1000_write_vfta_generic;
- /* clearing VFTA */
- mac->ops.clear_vfta = e1000_clear_vfta_generic;
- }
- if (hw->mac.type >= e1000_82580)
- mac->ops.validate_mdi_setting =
- e1000_validate_mdi_setting_crossover_generic;
- /* ID LED init */
- mac->ops.id_led_init = e1000_id_led_init_generic;
- /* blink LED */
- mac->ops.blink_led = e1000_blink_led_generic;
- /* setup LED */
- mac->ops.setup_led = e1000_setup_led_generic;
- /* cleanup LED */
- mac->ops.cleanup_led = e1000_cleanup_led_generic;
- /* turn on/off LED */
- mac->ops.led_on = e1000_led_on_generic;
- mac->ops.led_off = e1000_led_off_generic;
- /* clear hardware counters */
- mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575;
- /* link info */
- mac->ops.get_link_up_info = e1000_get_link_up_info_82575;
- /* get thermal sensor data */
- mac->ops.get_thermal_sensor_data =
- e1000_get_thermal_sensor_data_generic;
- mac->ops.init_thermal_sensor_thresh =
- e1000_init_thermal_sensor_thresh_generic;
- /* acquire SW_FW sync */
- mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_82575;
- mac->ops.release_swfw_sync = e1000_release_swfw_sync_82575;
- if (mac->type >= e1000_i210) {
- mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_i210;
- mac->ops.release_swfw_sync = e1000_release_swfw_sync_i210;
- }
-
- /* set lan id for port to determine which phy lock to use */
- hw->mac.ops.set_lan_id(hw);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_init_function_pointers_82575 - Init func ptrs.
- * @hw: pointer to the HW structure
- *
- * Called to initialize all function pointers and parameters.
- **/
-void e1000_init_function_pointers_82575(struct e1000_hw *hw)
-{
- DEBUGFUNC("e1000_init_function_pointers_82575");
-
- hw->mac.ops.init_params = e1000_init_mac_params_82575;
- hw->nvm.ops.init_params = e1000_init_nvm_params_82575;
- hw->phy.ops.init_params = e1000_init_phy_params_82575;
- hw->mbx.ops.init_params = e1000_init_mbx_params_pf;
-}
-
-/**
- * e1000_acquire_phy_82575 - Acquire rights to access PHY
- * @hw: pointer to the HW structure
- *
- * Acquire access rights to the correct PHY.
- **/
-static s32 e1000_acquire_phy_82575(struct e1000_hw *hw)
-{
- u16 mask = E1000_SWFW_PHY0_SM;
-
- DEBUGFUNC("e1000_acquire_phy_82575");
-
- if (hw->bus.func == E1000_FUNC_1)
- mask = E1000_SWFW_PHY1_SM;
- else if (hw->bus.func == E1000_FUNC_2)
- mask = E1000_SWFW_PHY2_SM;
- else if (hw->bus.func == E1000_FUNC_3)
- mask = E1000_SWFW_PHY3_SM;
-
- return hw->mac.ops.acquire_swfw_sync(hw, mask);
-}
-
-/**
- * e1000_release_phy_82575 - Release rights to access PHY
- * @hw: pointer to the HW structure
- *
- * A wrapper to release access rights to the correct PHY.
- **/
-static void e1000_release_phy_82575(struct e1000_hw *hw)
-{
- u16 mask = E1000_SWFW_PHY0_SM;
-
- DEBUGFUNC("e1000_release_phy_82575");
-
- if (hw->bus.func == E1000_FUNC_1)
- mask = E1000_SWFW_PHY1_SM;
- else if (hw->bus.func == E1000_FUNC_2)
- mask = E1000_SWFW_PHY2_SM;
- else if (hw->bus.func == E1000_FUNC_3)
- mask = E1000_SWFW_PHY3_SM;
-
- hw->mac.ops.release_swfw_sync(hw, mask);
-}
-
-/**
- * e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
- * @hw: pointer to the HW structure
- * @offset: register offset to be read
- * @data: pointer to the read data
- *
- * Reads the PHY register at offset using the serial gigabit media independent
- * interface and stores the retrieved information in data.
- **/
-static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
- u16 *data)
-{
- s32 ret_val = -E1000_ERR_PARAM;
-
- DEBUGFUNC("e1000_read_phy_reg_sgmii_82575");
-
- if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
- DEBUGOUT1("PHY Address %u is out of range\n", offset);
- goto out;
- }
-
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- goto out;
-
- ret_val = e1000_read_phy_reg_i2c(hw, offset, data);
-
- hw->phy.ops.release(hw);
-
-out:
- return ret_val;
-}
-
-/**
- * e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
- * @hw: pointer to the HW structure
- * @offset: register offset to write to
- * @data: data to write at register offset
- *
- * Writes the data to PHY register at the offset using the serial gigabit
- * media independent interface.
- **/
-static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
- u16 data)
-{
- s32 ret_val = -E1000_ERR_PARAM;
-
- DEBUGFUNC("e1000_write_phy_reg_sgmii_82575");
-
- if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
- DEBUGOUT1("PHY Address %d is out of range\n", offset);
- goto out;
- }
-
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- goto out;
-
- ret_val = e1000_write_phy_reg_i2c(hw, offset, data);
-
- hw->phy.ops.release(hw);
-
-out:
- return ret_val;
-}
-
-/**
- * e1000_get_phy_id_82575 - Retrieve PHY addr and id
- * @hw: pointer to the HW structure
- *
- * Retrieves the PHY address and ID for both PHY's which do and do not use
- * sgmi interface.
- **/
-static s32 e1000_get_phy_id_82575(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = E1000_SUCCESS;
- u16 phy_id;
- u32 ctrl_ext;
- u32 mdic;
-
- DEBUGFUNC("e1000_get_phy_id_82575");
-
- /* i354 devices can have a PHY that needs an extra read for id */
- if (hw->mac.type == e1000_i354)
- e1000_get_phy_id(hw);
-
-
- /*
- * For SGMII PHYs, we try the list of possible addresses until
- * we find one that works. For non-SGMII PHYs
- * (e.g. integrated copper PHYs), an address of 1 should
- * work. The result of this function should mean phy->phy_addr
- * and phy->id are set correctly.
- */
- if (!e1000_sgmii_active_82575(hw)) {
- phy->addr = 1;
- ret_val = e1000_get_phy_id(hw);
- goto out;
- }
-
- if (e1000_sgmii_uses_mdio_82575(hw)) {
- switch (hw->mac.type) {
- case e1000_82575:
- case e1000_82576:
- mdic = E1000_READ_REG(hw, E1000_MDIC);
- mdic &= E1000_MDIC_PHY_MASK;
- phy->addr = mdic >> E1000_MDIC_PHY_SHIFT;
- break;
- case e1000_82580:
- case e1000_i350:
- case e1000_i354:
- case e1000_i210:
- case e1000_i211:
- mdic = E1000_READ_REG(hw, E1000_MDICNFG);
- mdic &= E1000_MDICNFG_PHY_MASK;
- phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
- break;
- default:
- ret_val = -E1000_ERR_PHY;
- goto out;
- break;
- }
- ret_val = e1000_get_phy_id(hw);
- goto out;
- }
-
- /* Power on sgmii phy if it is disabled */
- ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
- E1000_WRITE_REG(hw, E1000_CTRL_EXT,
- ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
- E1000_WRITE_FLUSH(hw);
- msec_delay(300);
-
- /*
- * The address field in the I2CCMD register is 3 bits and 0 is invalid.
- * Therefore, we need to test 1-7
- */
- for (phy->addr = 1; phy->addr < 8; phy->addr++) {
- ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
- if (ret_val == E1000_SUCCESS) {
- DEBUGOUT2("Vendor ID 0x%08X read at address %u\n",
- phy_id, phy->addr);
- /*
- * At the time of this writing, The M88 part is
- * the only supported SGMII PHY product.
- */
- if (phy_id == M88_VENDOR)
- break;
- } else {
- DEBUGOUT1("PHY address %u was unreadable\n",
- phy->addr);
- }
- }
-
- /* A valid PHY type couldn't be found. */
- if (phy->addr == 8) {
- phy->addr = 0;
- ret_val = -E1000_ERR_PHY;
- } else {
- ret_val = e1000_get_phy_id(hw);
- }
-
- /* restore previous sfp cage power state */
- E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
-
-out:
- return ret_val;
-}
-
-/**
- * e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset
- * @hw: pointer to the HW structure
- *
- * Resets the PHY using the serial gigabit media independent interface.
- **/
-static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
-{
- s32 ret_val = E1000_SUCCESS;
-
- DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575");
-
- /*
- * This isn't a true "hard" reset, but is the only reset
- * available to us at this time.
- */
-
- DEBUGOUT("Soft resetting SGMII attached PHY...\n");
-
- if (!(hw->phy.ops.write_reg))
- goto out;
-
- /*
- * SFP documentation requires the following to configure the SPF module
- * to work on SGMII. No further documentation is given.
- */
- ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
- if (ret_val)
- goto out;
-
- ret_val = hw->phy.ops.commit(hw);
-
-out:
- return ret_val;
-}
-
-/**
- * e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
- * @hw: pointer to the HW structure
- * @active: true to enable LPLU, false to disable
- *
- * Sets the LPLU D0 state according to the active flag. When
- * activating LPLU this function also disables smart speed
- * and vice versa. LPLU will not be activated unless the
- * device autonegotiation advertisement meets standards of
- * either 10 or 10/100 or 10/100/1000 at all duplexes.
- * This is a function pointer entry point only called by
- * PHY setup routines.
- **/
-static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = E1000_SUCCESS;
- u16 data;
-
- DEBUGFUNC("e1000_set_d0_lplu_state_82575");
-
- if (!(hw->phy.ops.read_reg))
- goto out;
-
- ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
- if (ret_val)
- goto out;
-
- if (active) {
- data |= IGP02E1000_PM_D0_LPLU;
- ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
- data);
- if (ret_val)
- goto out;
-
- /* When LPLU is enabled, we should disable SmartSpeed */
- ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
- &data);
- data &= ~IGP01E1000_PSCFR_SMART_SPEED;
- ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
- data);
- if (ret_val)
- goto out;
- } else {
- data &= ~IGP02E1000_PM_D0_LPLU;
- ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
- data);
- /*
- * LPLU and SmartSpeed are mutually exclusive. LPLU is used
- * during Dx states where the power conservation is most
- * important. During driver activity we should enable
- * SmartSpeed, so performance is maintained.
- */
- if (phy->smart_speed == e1000_smart_speed_on) {
- ret_val = phy->ops.read_reg(hw,
- IGP01E1000_PHY_PORT_CONFIG,
- &data);
- if (ret_val)
- goto out;
-
- data |= IGP01E1000_PSCFR_SMART_SPEED;
- ret_val = phy->ops.write_reg(hw,
- IGP01E1000_PHY_PORT_CONFIG,
- data);
- if (ret_val)
- goto out;
- } else if (phy->smart_speed == e1000_smart_speed_off) {
- ret_val = phy->ops.read_reg(hw,
- IGP01E1000_PHY_PORT_CONFIG,
- &data);
- if (ret_val)
- goto out;
-
- data &= ~IGP01E1000_PSCFR_SMART_SPEED;
- ret_val = phy->ops.write_reg(hw,
- IGP01E1000_PHY_PORT_CONFIG,
- data);
- if (ret_val)
- goto out;
- }
- }
-
-out:
- return ret_val;
-}
-
-/**
- * e1000_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state
- * @hw: pointer to the HW structure
- * @active: true to enable LPLU, false to disable
- *
- * Sets the LPLU D0 state according to the active flag. When
- * activating LPLU this function also disables smart speed
- * and vice versa. LPLU will not be activated unless the
- * device autonegotiation advertisement meets standards of
- * either 10 or 10/100 or 10/100/1000 at all duplexes.
- * This is a function pointer entry point only called by
- * PHY setup routines.
- **/
-static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = E1000_SUCCESS;
- u32 data;
-
- DEBUGFUNC("e1000_set_d0_lplu_state_82580");
-
- data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
-
- if (active) {
- data |= E1000_82580_PM_D0_LPLU;
-
- /* When LPLU is enabled, we should disable SmartSpeed */
- data &= ~E1000_82580_PM_SPD;
- } else {
- data &= ~E1000_82580_PM_D0_LPLU;
-
- /*
- * LPLU and SmartSpeed are mutually exclusive. LPLU is used
- * during Dx states where the power conservation is most
- * important. During driver activity we should enable
- * SmartSpeed, so performance is maintained.
- */
- if (phy->smart_speed == e1000_smart_speed_on)
- data |= E1000_82580_PM_SPD;
- else if (phy->smart_speed == e1000_smart_speed_off)
- data &= ~E1000_82580_PM_SPD;
- }
-
- E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data);
- return ret_val;
-}
-
-/**
- * e1000_set_d3_lplu_state_82580 - Sets low power link up state for D3
- * @hw: pointer to the HW structure
- * @active: boolean used to enable/disable lplu
- *
- * Success returns 0, Failure returns 1
- *
- * The low power link up (lplu) state is set to the power management level D3
- * and SmartSpeed is disabled when active is true, else clear lplu for D3
- * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
- * is used during Dx states where the power conservation is most important.
- * During driver activity, SmartSpeed should be enabled so performance is
- * maintained.
- **/
-s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = E1000_SUCCESS;
- u32 data;
-
- DEBUGFUNC("e1000_set_d3_lplu_state_82580");
-
- data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
-
- if (!active) {
- data &= ~E1000_82580_PM_D3_LPLU;
- /*
- * LPLU and SmartSpeed are mutually exclusive. LPLU is used
- * during Dx states where the power conservation is most
- * important. During driver activity we should enable
- * SmartSpeed, so performance is maintained.
- */
- if (phy->smart_speed == e1000_smart_speed_on)
- data |= E1000_82580_PM_SPD;
- else if (phy->smart_speed == e1000_smart_speed_off)
- data &= ~E1000_82580_PM_SPD;
- } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
- (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
- (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
- data |= E1000_82580_PM_D3_LPLU;
- /* When LPLU is enabled, we should disable SmartSpeed */
- data &= ~E1000_82580_PM_SPD;
- }
-
- E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data);
- return ret_val;
-}
-
-/**
- * e1000_acquire_nvm_82575 - Request for access to EEPROM
- * @hw: pointer to the HW structure
- *
- * Acquire the necessary semaphores for exclusive access to the EEPROM.
- * Set the EEPROM access request bit and wait for EEPROM access grant bit.
- * Return successful if access grant bit set, else clear the request for
- * EEPROM access and return -E1000_ERR_NVM (-1).
- **/
-static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw)
-{
- s32 ret_val;
-
- DEBUGFUNC("e1000_acquire_nvm_82575");
-
- ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
- if (ret_val)
- goto out;
-
- /*
- * Check if there is some access
- * error this access may hook on
- */
- if (hw->mac.type == e1000_i350) {
- u32 eecd = E1000_READ_REG(hw, E1000_EECD);
- if (eecd & (E1000_EECD_BLOCKED | E1000_EECD_ABORT |
- E1000_EECD_TIMEOUT)) {
- /* Clear all access error flags */
- E1000_WRITE_REG(hw, E1000_EECD, eecd |
- E1000_EECD_ERROR_CLR);
- DEBUGOUT("Nvm bit banging access error detected and cleared.\n");
- }
- }
- if (hw->mac.type == e1000_82580) {
- u32 eecd = E1000_READ_REG(hw, E1000_EECD);
- if (eecd & E1000_EECD_BLOCKED) {
- /* Clear access error flag */
- E1000_WRITE_REG(hw, E1000_EECD, eecd |
- E1000_EECD_BLOCKED);
- DEBUGOUT("Nvm bit banging access error detected and cleared.\n");
- }
- }
-
-
- ret_val = e1000_acquire_nvm_generic(hw);
- if (ret_val)
- e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
-
-out:
- return ret_val;
-}
-
-/**
- * e1000_release_nvm_82575 - Release exclusive access to EEPROM
- * @hw: pointer to the HW structure
- *
- * Stop any current commands to the EEPROM and clear the EEPROM request bit,
- * then release the semaphores acquired.
- **/
-static void e1000_release_nvm_82575(struct e1000_hw *hw)
-{
- DEBUGFUNC("e1000_release_nvm_82575");
-
- e1000_release_nvm_generic(hw);
-
- e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
-}
-
-/**
- * e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
- * @hw: pointer to the HW structure
- * @mask: specifies which semaphore to acquire
- *
- * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
- * will also specify which port we're acquiring the lock for.
- **/
-static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
-{
- u32 swfw_sync;
- u32 swmask = mask;
- u32 fwmask = mask << 16;
- s32 ret_val = E1000_SUCCESS;
- s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
-
- DEBUGFUNC("e1000_acquire_swfw_sync_82575");
-
- while (i < timeout) {
- if (e1000_get_hw_semaphore_generic(hw)) {
- ret_val = -E1000_ERR_SWFW_SYNC;
- goto out;
- }
-
- swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
- if (!(swfw_sync & (fwmask | swmask)))
- break;
-
- /*
- * Firmware currently using resource (fwmask)
- * or other software thread using resource (swmask)
- */
- e1000_put_hw_semaphore_generic(hw);
- msec_delay_irq(5);
- i++;
- }
-
- if (i == timeout) {
- DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
- ret_val = -E1000_ERR_SWFW_SYNC;
- goto out;
- }
-
- swfw_sync |= swmask;
- E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
-
- e1000_put_hw_semaphore_generic(hw);
-
-out:
- return ret_val;
-}
-
-/**
- * e1000_release_swfw_sync_82575 - Release SW/FW semaphore
- * @hw: pointer to the HW structure
- * @mask: specifies which semaphore to acquire
- *
- * Release the SW/FW semaphore used to access the PHY or NVM. The mask
- * will also specify which port we're releasing the lock for.
- **/
-static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
-{
- u32 swfw_sync;
-
- DEBUGFUNC("e1000_release_swfw_sync_82575");
-
- while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS)
- ; /* Empty */
-
- swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
- swfw_sync &= ~mask;
- E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
-
- e1000_put_hw_semaphore_generic(hw);
-}
-
-/**
- * e1000_get_cfg_done_82575 - Read config done bit
- * @hw: pointer to the HW structure
- *
- * Read the management control register for the config done bit for
- * completion status. NOTE: silicon which is EEPROM-less will fail trying
- * to read the config done bit, so an error is *ONLY* logged and returns
- * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon
- * would not be able to be reset or change link.
- **/
-static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw)
-{
- s32 timeout = PHY_CFG_TIMEOUT;
- s32 ret_val = E1000_SUCCESS;
- u32 mask = E1000_NVM_CFG_DONE_PORT_0;
-
- DEBUGFUNC("e1000_get_cfg_done_82575");
-
- if (hw->bus.func == E1000_FUNC_1)
- mask = E1000_NVM_CFG_DONE_PORT_1;
- else if (hw->bus.func == E1000_FUNC_2)
- mask = E1000_NVM_CFG_DONE_PORT_2;
- else if (hw->bus.func == E1000_FUNC_3)
- mask = E1000_NVM_CFG_DONE_PORT_3;
- while (timeout) {
- if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask)
- break;
- msec_delay(1);
- timeout--;
- }
- if (!timeout)
- DEBUGOUT("MNG configuration cycle has not completed.\n");
-
- /* If EEPROM is not marked present, init the PHY manually */
- if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
- (hw->phy.type == e1000_phy_igp_3))
- e1000_phy_init_script_igp3(hw);
-
- return ret_val;
-}
-
-/**
- * e1000_get_link_up_info_82575 - Get link speed/duplex info
- * @hw: pointer to the HW structure
- * @speed: stores the current speed
- * @duplex: stores the current duplex
- *
- * This is a wrapper function, if using the serial gigabit media independent
- * interface, use PCS to retrieve the link speed and duplex information.
- * Otherwise, use the generic function to get the link speed and duplex info.
- **/
-static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
- u16 *duplex)
-{
- s32 ret_val;
-
- DEBUGFUNC("e1000_get_link_up_info_82575");
-
- if (hw->phy.media_type != e1000_media_type_copper)
- ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed,
- duplex);
- else
- ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed,
- duplex);
-
- return ret_val;
-}
-
-/**
- * e1000_check_for_link_82575 - Check for link
- * @hw: pointer to the HW structure
- *
- * If sgmii is enabled, then use the pcs register to determine link, otherwise
- * use the generic interface for determining link.
- **/
-static s32 e1000_check_for_link_82575(struct e1000_hw *hw)
-{
- s32 ret_val;
- u16 speed, duplex;
-
- DEBUGFUNC("e1000_check_for_link_82575");
-
- if (hw->phy.media_type != e1000_media_type_copper) {
- ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed,
- &duplex);
- /*
- * Use this flag to determine if link needs to be checked or
- * not. If we have link clear the flag so that we do not
- * continue to check for link.
- */
- hw->mac.get_link_status = !hw->mac.serdes_has_link;
-
- /*
- * Configure Flow Control now that Auto-Neg has completed.
- * First, we need to restore the desired flow control
- * settings because we may have had to re-autoneg with a
- * different link partner.
- */
- ret_val = e1000_config_fc_after_link_up_generic(hw);
- if (ret_val)
- DEBUGOUT("Error configuring flow control\n");
- } else {
- ret_val = e1000_check_for_copper_link_generic(hw);
- }
-
- return ret_val;
-}
-
-/**
- * e1000_check_for_link_media_swap - Check which M88E1112 interface linked
- * @hw: pointer to the HW structure
- *
- * Poll the M88E1112 interfaces to see which interface achieved link.
- */
-static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
- u16 data;
- u8 port = 0;
-
- DEBUGFUNC("e1000_check_for_link_media_swap");
-
- /* Check the copper medium. */
- ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
- if (ret_val)
- return ret_val;
-
- ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
- if (ret_val)
- return ret_val;
-
- if (data & E1000_M88E1112_STATUS_LINK)
- port = E1000_MEDIA_PORT_COPPER;
-
- /* Check the other medium. */
- ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1);
- if (ret_val)
- return ret_val;
-
- ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
- if (ret_val)
- return ret_val;
-
- if (data & E1000_M88E1112_STATUS_LINK)
- port = E1000_MEDIA_PORT_OTHER;
-
- /* Determine if a swap needs to happen. */
- if (port && (hw->dev_spec._82575.media_port != port)) {
- hw->dev_spec._82575.media_port = port;
- hw->dev_spec._82575.media_changed = true;
- } else {
- ret_val = e1000_check_for_link_82575(hw);
- }
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_power_up_serdes_link_82575 - Power up the serdes link after shutdown
- * @hw: pointer to the HW structure
- **/
-static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw)
-{
- u32 reg;
-
- DEBUGFUNC("e1000_power_up_serdes_link_82575");
-
- if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
- !e1000_sgmii_active_82575(hw))
- return;
-
- /* Enable PCS to turn on link */
- reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
- reg |= E1000_PCS_CFG_PCS_EN;
- E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
-
- /* Power up the laser */
- reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
- reg &= ~E1000_CTRL_EXT_SDP3_DATA;
- E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
-
- /* flush the write to verify completion */
- E1000_WRITE_FLUSH(hw);
- msec_delay(1);
-}
-
-/**
- * e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
- * @hw: pointer to the HW structure
- * @speed: stores the current speed
- * @duplex: stores the current duplex
- *
- * Using the physical coding sub-layer (PCS), retrieve the current speed and
- * duplex, then store the values in the pointers provided.
- **/
-static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
- u16 *speed, u16 *duplex)
-{
- struct e1000_mac_info *mac = &hw->mac;
- u32 pcs;
- u32 status;
-
- DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575");
-
- /*
- * Read the PCS Status register for link state. For non-copper mode,
- * the status register is not accurate. The PCS status register is
- * used instead.
- */
- pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT);
-
- /*
- * The link up bit determines when link is up on autoneg.
- */
- if (pcs & E1000_PCS_LSTS_LINK_OK) {
- mac->serdes_has_link = true;
-
- /* Detect and store PCS speed */
- if (pcs & E1000_PCS_LSTS_SPEED_1000)
- *speed = SPEED_1000;
- else if (pcs & E1000_PCS_LSTS_SPEED_100)
- *speed = SPEED_100;
- else
- *speed = SPEED_10;
-
- /* Detect and store PCS duplex */
- if (pcs & E1000_PCS_LSTS_DUPLEX_FULL)
- *duplex = FULL_DUPLEX;
- else
- *duplex = HALF_DUPLEX;
-
- /* Check if it is an I354 2.5Gb backplane connection. */
- if (mac->type == e1000_i354) {
- status = E1000_READ_REG(hw, E1000_STATUS);
- if ((status & E1000_STATUS_2P5_SKU) &&
- !(status & E1000_STATUS_2P5_SKU_OVER)) {
- *speed = SPEED_2500;
- *duplex = FULL_DUPLEX;
- DEBUGOUT("2500 Mbs, ");
- DEBUGOUT("Full Duplex\n");
- }
- }
-
- } else {
- mac->serdes_has_link = false;
- *speed = 0;
- *duplex = 0;
- }
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_shutdown_serdes_link_82575 - Remove link during power down
- * @hw: pointer to the HW structure
- *
- * In the case of serdes shut down sfp and PCS on driver unload
- * when management pass thru is not enabled.
- **/
-void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw)
-{
- u32 reg;
-
- DEBUGFUNC("e1000_shutdown_serdes_link_82575");
-
- if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
- !e1000_sgmii_active_82575(hw))
- return;
-
- if (!e1000_enable_mng_pass_thru(hw)) {
- /* Disable PCS to turn off link */
- reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
- reg &= ~E1000_PCS_CFG_PCS_EN;
- E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
-
- /* shutdown the laser */
- reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
- reg |= E1000_CTRL_EXT_SDP3_DATA;
- E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
-
- /* flush the write to verify completion */
- E1000_WRITE_FLUSH(hw);
- msec_delay(1);
- }
-
- return;
-}
-
-/**
- * e1000_reset_hw_82575 - Reset hardware
- * @hw: pointer to the HW structure
- *
- * This resets the hardware into a known state.
- **/
-static s32 e1000_reset_hw_82575(struct e1000_hw *hw)
-{
- u32 ctrl;
- s32 ret_val;
-
- DEBUGFUNC("e1000_reset_hw_82575");
-
- /*
- * Prevent the PCI-E bus from sticking if there is no TLP connection
- * on the last TLP read/write transaction when MAC is reset.
- */
- ret_val = e1000_disable_pcie_master_generic(hw);
- if (ret_val)
- DEBUGOUT("PCI-E Master disable polling has failed.\n");
-
- /* set the completion timeout for interface */
- ret_val = e1000_set_pcie_completion_timeout(hw);
- if (ret_val)
- DEBUGOUT("PCI-E Set completion timeout has failed.\n");
-
- DEBUGOUT("Masking off all interrupts\n");
- E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
-
- E1000_WRITE_REG(hw, E1000_RCTL, 0);
- E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
- E1000_WRITE_FLUSH(hw);
-
- msec_delay(10);
-
- ctrl = E1000_READ_REG(hw, E1000_CTRL);
-
- DEBUGOUT("Issuing a global reset to MAC\n");
- E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
-
- ret_val = e1000_get_auto_rd_done_generic(hw);
- if (ret_val) {
- /*
- * When auto config read does not complete, do not
- * return with an error. This can happen in situations
- * where there is no eeprom and prevents getting link.
- */
- DEBUGOUT("Auto Read Done did not complete\n");
- }
-
- /* If EEPROM is not present, run manual init scripts */
- if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES))
- e1000_reset_init_script_82575(hw);
-
- /* Clear any pending interrupt events. */
- E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
- E1000_READ_REG(hw, E1000_ICR);
-
- /* Install any alternate MAC address into RAR0 */
- ret_val = e1000_check_alt_mac_addr_generic(hw);
-
- return ret_val;
-}
-
-/**
- * e1000_init_hw_82575 - Initialize hardware
- * @hw: pointer to the HW structure
- *
- * This inits the hardware readying it for operation.
- **/
-static s32 e1000_init_hw_82575(struct e1000_hw *hw)
-{
- struct e1000_mac_info *mac = &hw->mac;
- s32 ret_val;
- u16 i, rar_count = mac->rar_entry_count;
-
- DEBUGFUNC("e1000_init_hw_82575");
-
- /* Initialize identification LED */
- ret_val = mac->ops.id_led_init(hw);
- if (ret_val) {
- DEBUGOUT("Error initializing identification LED\n");
- /* This is not fatal and we should not stop init due to this */
- }
-
- /* Disabling VLAN filtering */
- DEBUGOUT("Initializing the IEEE VLAN\n");
- mac->ops.clear_vfta(hw);
-
- /* Setup the receive address */
- e1000_init_rx_addrs_generic(hw, rar_count);
-
- /* Zero out the Multicast HASH table */
- DEBUGOUT("Zeroing the MTA\n");
- for (i = 0; i < mac->mta_reg_count; i++)
- E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
-
- /* Zero out the Unicast HASH table */
- DEBUGOUT("Zeroing the UTA\n");
- for (i = 0; i < mac->uta_reg_count; i++)
- E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0);
-
- /* Setup link and flow control */
- ret_val = mac->ops.setup_link(hw);
-
- /* Set the default MTU size */
- hw->dev_spec._82575.mtu = 1500;
-
- /*
- * Clear all of the statistics registers (clear on read). It is
- * important that we do this after we have tried to establish link
- * because the symbol error count will increment wildly if there
- * is no link.
- */
- e1000_clear_hw_cntrs_82575(hw);
-
- return ret_val;
-}
-
-/**
- * e1000_setup_copper_link_82575 - Configure copper link settings
- * @hw: pointer to the HW structure
- *
- * Configures the link for auto-neg or forced speed and duplex. Then we check
- * for link, once link is established calls to configure collision distance
- * and flow control are called.
- **/
-static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
-{
- u32 ctrl;
- s32 ret_val;
- u32 phpm_reg;
-
- DEBUGFUNC("e1000_setup_copper_link_82575");
-
- ctrl = E1000_READ_REG(hw, E1000_CTRL);
- ctrl |= E1000_CTRL_SLU;
- ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
- E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
-
- /* Clear Go Link Disconnect bit on supported devices */
- switch (hw->mac.type) {
- case e1000_82580:
- case e1000_i350:
- case e1000_i210:
- case e1000_i211:
- phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
- phpm_reg &= ~E1000_82580_PM_GO_LINKD;
- E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
- break;
- default:
- break;
- }
-
- ret_val = e1000_setup_serdes_link_82575(hw);
- if (ret_val)
- goto out;
-
- if (e1000_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
- /* allow time for SFP cage time to power up phy */
- msec_delay(300);
-
- ret_val = hw->phy.ops.reset(hw);
- if (ret_val) {
- DEBUGOUT("Error resetting the PHY.\n");
- goto out;
- }
- }
- switch (hw->phy.type) {
- case e1000_phy_i210:
- case e1000_phy_m88:
- switch (hw->phy.id) {
- case I347AT4_E_PHY_ID:
- case M88E1112_E_PHY_ID:
- case M88E1340M_E_PHY_ID:
- case M88E1543_E_PHY_ID:
- case I210_I_PHY_ID:
- ret_val = e1000_copper_link_setup_m88_gen2(hw);
- break;
- default:
- ret_val = e1000_copper_link_setup_m88(hw);
- break;
- }
- break;
- case e1000_phy_igp_3:
- ret_val = e1000_copper_link_setup_igp(hw);
- break;
- case e1000_phy_82580:
- ret_val = e1000_copper_link_setup_82577(hw);
- break;
- default:
- ret_val = -E1000_ERR_PHY;
- break;
- }
-
- if (ret_val)
- goto out;
-
- ret_val = e1000_setup_copper_link_generic(hw);
-out:
- return ret_val;
-}
-
-/**
- * e1000_setup_serdes_link_82575 - Setup link for serdes
- * @hw: pointer to the HW structure
- *
- * Configure the physical coding sub-layer (PCS) link. The PCS link is
- * used on copper connections where the serialized gigabit media independent
- * interface (sgmii), or serdes fiber is being used. Configures the link
- * for auto-negotiation or forces speed/duplex.
- **/
-static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw)
-{
- u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
- bool pcs_autoneg;
- s32 ret_val = E1000_SUCCESS;
- u16 data;
-
- DEBUGFUNC("e1000_setup_serdes_link_82575");
-
- if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
- !e1000_sgmii_active_82575(hw))
- return ret_val;
-
- /*
- * On the 82575, SerDes loopback mode persists until it is
- * explicitly turned off or a power cycle is performed. A read to
- * the register does not indicate its status. Therefore, we ensure
- * loopback mode is disabled during initialization.
- */
- E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
-
- /* power on the sfp cage if present */
- ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
- ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
- E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
-
- ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
- ctrl_reg |= E1000_CTRL_SLU;
-
- /* set both sw defined pins on 82575/82576*/
- if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576)
- ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
-
- reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
-
- /* default pcs_autoneg to the same setting as mac autoneg */
- pcs_autoneg = hw->mac.autoneg;
-
- switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
- case E1000_CTRL_EXT_LINK_MODE_SGMII:
- /* sgmii mode lets the phy handle forcing speed/duplex */
- pcs_autoneg = true;
- /* autoneg time out should be disabled for SGMII mode */
- reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
- break;
- case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
- /* disable PCS autoneg and support parallel detect only */
- pcs_autoneg = false;
- /* fall through to default case */
- default:
- if (hw->mac.type == e1000_82575 ||
- hw->mac.type == e1000_82576) {
- ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
- if (ret_val) {
- DEBUGOUT("NVM Read Error\n");
- return ret_val;
- }
-
- if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT)
- pcs_autoneg = false;
- }
-
- /*
- * non-SGMII modes only supports a speed of 1000/Full for the
- * link so it is best to just force the MAC and let the pcs
- * link either autoneg or be forced to 1000/Full
- */
- ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
- E1000_CTRL_FD | E1000_CTRL_FRCDPX;
-
- /* set speed of 1000/Full if speed/duplex is forced */
- reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
- break;
- }
-
- E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
-
- /*
- * New SerDes mode allows for forcing speed or autonegotiating speed
- * at 1gb. Autoneg should be default set by most drivers. This is the
- * mode that will be compatible with older link partners and switches.
- * However, both are supported by the hardware and some drivers/tools.
- */
- reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
- E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
-
- if (pcs_autoneg) {
- /* Set PCS register for autoneg */
- reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
- E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
-
- /* Disable force flow control for autoneg */
- reg &= ~E1000_PCS_LCTL_FORCE_FCTRL;
-
- /* Configure flow control advertisement for autoneg */
- anadv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV);
- anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE);
-
- switch (hw->fc.requested_mode) {
- case e1000_fc_full:
- case e1000_fc_rx_pause:
- anadv_reg |= E1000_TXCW_ASM_DIR;
- anadv_reg |= E1000_TXCW_PAUSE;
- break;
- case e1000_fc_tx_pause:
- anadv_reg |= E1000_TXCW_ASM_DIR;
- break;
- default:
- break;
- }
-
- E1000_WRITE_REG(hw, E1000_PCS_ANADV, anadv_reg);
-
- DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
- } else {
- /* Set PCS register for forced link */
- reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
-
- /* Force flow control for forced link */
- reg |= E1000_PCS_LCTL_FORCE_FCTRL;
-
- DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
- }
-
- E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
-
- if (!pcs_autoneg && !e1000_sgmii_active_82575(hw))
- e1000_force_mac_fc_generic(hw);
-
- return ret_val;
-}
-
-/**
- * e1000_get_media_type_82575 - derives current media type.
- * @hw: pointer to the HW structure
- *
- * The media type is chosen reflecting few settings.
- * The following are taken into account:
- * - link mode set in the current port Init Control Word #3
- * - current link mode settings in CSR register
- * - MDIO vs. I2C PHY control interface chosen
- * - SFP module media type
- **/
-static s32 e1000_get_media_type_82575(struct e1000_hw *hw)
-{
- struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
- s32 ret_val = E1000_SUCCESS;
- u32 ctrl_ext = 0;
- u32 link_mode = 0;
-
- /* Set internal phy as default */
- dev_spec->sgmii_active = false;
- dev_spec->module_plugged = false;
-
- /* Get CSR setting */
- ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
-
- /* extract link mode setting */
- link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK;
-
- switch (link_mode) {
- case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
- hw->phy.media_type = e1000_media_type_internal_serdes;
- break;
- case E1000_CTRL_EXT_LINK_MODE_GMII:
- hw->phy.media_type = e1000_media_type_copper;
- break;
- case E1000_CTRL_EXT_LINK_MODE_SGMII:
- /* Get phy control interface type set (MDIO vs. I2C)*/
- if (e1000_sgmii_uses_mdio_82575(hw)) {
- hw->phy.media_type = e1000_media_type_copper;
- dev_spec->sgmii_active = true;
- break;
- }
- /* fall through for I2C based SGMII */
- case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
- /* read media type from SFP EEPROM */
- ret_val = e1000_set_sfp_media_type_82575(hw);
- if ((ret_val != E1000_SUCCESS) ||
- (hw->phy.media_type == e1000_media_type_unknown)) {
- /*
- * If media type was not identified then return media
- * type defined by the CTRL_EXT settings.
- */
- hw->phy.media_type = e1000_media_type_internal_serdes;
-
- if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) {
- hw->phy.media_type = e1000_media_type_copper;
- dev_spec->sgmii_active = true;
- }
-
- break;
- }
-
- /* do not change link mode for 100BaseFX */
- if (dev_spec->eth_flags.e100_base_fx)
- break;
-
- /* change current link mode setting */
- ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
-
- if (hw->phy.media_type == e1000_media_type_copper)
- ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII;
- else
- ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
-
- E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
-
- break;
- }
-
- return ret_val;
-}
-
-/**
- * e1000_set_sfp_media_type_82575 - derives SFP module media type.
- * @hw: pointer to the HW structure
- *
- * The media type is chosen based on SFP module.
- * compatibility flags retrieved from SFP ID EEPROM.
- **/
-static s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw)
-{
- s32 ret_val = E1000_ERR_CONFIG;
- u32 ctrl_ext = 0;
- struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
- struct sfp_e1000_flags *eth_flags = &dev_spec->eth_flags;
- u8 tranceiver_type = 0;
- s32 timeout = 3;
-
- /* Turn I2C interface ON and power on sfp cage */
- ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
- ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
- E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA);
-
- E1000_WRITE_FLUSH(hw);
-
- /* Read SFP module data */
- while (timeout) {
- ret_val = e1000_read_sfp_data_byte(hw,
- E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET),
- &tranceiver_type);
- if (ret_val == E1000_SUCCESS)
- break;
- msec_delay(100);
- timeout--;
- }
- if (ret_val != E1000_SUCCESS)
- goto out;
-
- ret_val = e1000_read_sfp_data_byte(hw,
- E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET),
- (u8 *)eth_flags);
- if (ret_val != E1000_SUCCESS)
- goto out;
-
- /* Check if there is some SFP module plugged and powered */
- if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) ||
- (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) {
- dev_spec->module_plugged = true;
- if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) {
- hw->phy.media_type = e1000_media_type_internal_serdes;
- } else if (eth_flags->e100_base_fx) {
- dev_spec->sgmii_active = true;
- hw->phy.media_type = e1000_media_type_internal_serdes;
- } else if (eth_flags->e1000_base_t) {
- dev_spec->sgmii_active = true;
- hw->phy.media_type = e1000_media_type_copper;
- } else {
- hw->phy.media_type = e1000_media_type_unknown;
- DEBUGOUT("PHY module has not been recognized\n");
- goto out;
- }
- } else {
- hw->phy.media_type = e1000_media_type_unknown;
- }
- ret_val = E1000_SUCCESS;
-out:
- /* Restore I2C interface setting */
- E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
- return ret_val;
-}
-
-/**
- * e1000_valid_led_default_82575 - Verify a valid default LED config
- * @hw: pointer to the HW structure
- * @data: pointer to the NVM (EEPROM)
- *
- * Read the EEPROM for the current default LED configuration. If the
- * LED configuration is not valid, set to a valid LED configuration.
- **/
-static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data)
-{
- s32 ret_val;
-
- DEBUGFUNC("e1000_valid_led_default_82575");
-
- ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
- if (ret_val) {
- DEBUGOUT("NVM Read Error\n");
- goto out;
- }
-
- if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
- switch (hw->phy.media_type) {
- case e1000_media_type_internal_serdes:
- *data = ID_LED_DEFAULT_82575_SERDES;
- break;
- case e1000_media_type_copper:
- default:
- *data = ID_LED_DEFAULT;
- break;
- }
- }
-out:
- return ret_val;
-}
-
-/**
- * e1000_sgmii_active_82575 - Return sgmii state
- * @hw: pointer to the HW structure
- *
- * 82575 silicon has a serialized gigabit media independent interface (sgmii)
- * which can be enabled for use in the embedded applications. Simply
- * return the current state of the sgmii interface.
- **/
-static bool e1000_sgmii_active_82575(struct e1000_hw *hw)
-{
- struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
- return dev_spec->sgmii_active;
-}
-
-/**
- * e1000_reset_init_script_82575 - Inits HW defaults after reset
- * @hw: pointer to the HW structure
- *
- * Inits recommended HW defaults after a reset when there is no EEPROM
- * detected. This is only for the 82575.
- **/
-static s32 e1000_reset_init_script_82575(struct e1000_hw *hw)
-{
- DEBUGFUNC("e1000_reset_init_script_82575");
-
- if (hw->mac.type == e1000_82575) {
- DEBUGOUT("Running reset init script for 82575\n");
- /* SerDes configuration via SERDESCTRL */
- e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x00, 0x0C);
- e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x01, 0x78);
- e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x1B, 0x23);
- e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x23, 0x15);
-
- /* CCM configuration via CCMCTL register */
- e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x14, 0x00);
- e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x10, 0x00);
-
- /* PCIe lanes configuration */
- e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x00, 0xEC);
- e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x61, 0xDF);
- e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x34, 0x05);
- e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x2F, 0x81);
-
- /* PCIe PLL Configuration */
- e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x02, 0x47);
- e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x14, 0x00);
- e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x10, 0x00);
- }
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_read_mac_addr_82575 - Read device MAC address
- * @hw: pointer to the HW structure
- **/
-static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw)
-{
- s32 ret_val = E1000_SUCCESS;
-
- DEBUGFUNC("e1000_read_mac_addr_82575");
-
- /*
- * If there's an alternate MAC address place it in RAR0
- * so that it will override the Si installed default perm
- * address.
- */
- ret_val = e1000_check_alt_mac_addr_generic(hw);
- if (ret_val)
- goto out;
-
- ret_val = e1000_read_mac_addr_generic(hw);
-
-out:
- return ret_val;
-}
-
-/**
- * e1000_config_collision_dist_82575 - Configure collision distance
- * @hw: pointer to the HW structure
- *
- * Configures the collision distance to the default value and is used
- * during link setup.
- **/
-static void e1000_config_collision_dist_82575(struct e1000_hw *hw)
-{
- u32 tctl_ext;
-
- DEBUGFUNC("e1000_config_collision_dist_82575");
-
- tctl_ext = E1000_READ_REG(hw, E1000_TCTL_EXT);
-
- tctl_ext &= ~E1000_TCTL_EXT_COLD;
- tctl_ext |= E1000_COLLISION_DISTANCE << E1000_TCTL_EXT_COLD_SHIFT;
-
- E1000_WRITE_REG(hw, E1000_TCTL_EXT, tctl_ext);
- E1000_WRITE_FLUSH(hw);
-}
-
-/**
- * e1000_power_down_phy_copper_82575 - Remove link during PHY power down
- * @hw: pointer to the HW structure
- *
- * In the case of a PHY power down to save power, or to turn off link during a
- * driver unload, or wake on lan is not enabled, remove the link.
- **/
-static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
-
- if (!(phy->ops.check_reset_block))
- return;
-
- /* If the management interface is not enabled, then power down */
- if (!(e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw)))
- e1000_power_down_phy_copper(hw);
-
- return;
-}
-
-/**
- * e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters
- * @hw: pointer to the HW structure
- *
- * Clears the hardware counters by reading the counter registers.
- **/
-static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw)
-{
- DEBUGFUNC("e1000_clear_hw_cntrs_82575");
-
- e1000_clear_hw_cntrs_base_generic(hw);
-
- E1000_READ_REG(hw, E1000_PRC64);
- E1000_READ_REG(hw, E1000_PRC127);
- E1000_READ_REG(hw, E1000_PRC255);
- E1000_READ_REG(hw, E1000_PRC511);
- E1000_READ_REG(hw, E1000_PRC1023);
- E1000_READ_REG(hw, E1000_PRC1522);
- E1000_READ_REG(hw, E1000_PTC64);
- E1000_READ_REG(hw, E1000_PTC127);
- E1000_READ_REG(hw, E1000_PTC255);
- E1000_READ_REG(hw, E1000_PTC511);
- E1000_READ_REG(hw, E1000_PTC1023);
- E1000_READ_REG(hw, E1000_PTC1522);
-
- E1000_READ_REG(hw, E1000_ALGNERRC);
- E1000_READ_REG(hw, E1000_RXERRC);
- E1000_READ_REG(hw, E1000_TNCRS);
- E1000_READ_REG(hw, E1000_CEXTERR);
- E1000_READ_REG(hw, E1000_TSCTC);
- E1000_READ_REG(hw, E1000_TSCTFC);
-
- E1000_READ_REG(hw, E1000_MGTPRC);
- E1000_READ_REG(hw, E1000_MGTPDC);
- E1000_READ_REG(hw, E1000_MGTPTC);
-
- E1000_READ_REG(hw, E1000_IAC);
- E1000_READ_REG(hw, E1000_ICRXOC);
-
- E1000_READ_REG(hw, E1000_ICRXPTC);
- E1000_READ_REG(hw, E1000_ICRXATC);
- E1000_READ_REG(hw, E1000_ICTXPTC);
- E1000_READ_REG(hw, E1000_ICTXATC);
- E1000_READ_REG(hw, E1000_ICTXQEC);
- E1000_READ_REG(hw, E1000_ICTXQMTC);
- E1000_READ_REG(hw, E1000_ICRXDMTC);
-
- E1000_READ_REG(hw, E1000_CBTMPC);
- E1000_READ_REG(hw, E1000_HTDPMC);
- E1000_READ_REG(hw, E1000_CBRMPC);
- E1000_READ_REG(hw, E1000_RPTHC);
- E1000_READ_REG(hw, E1000_HGPTC);
- E1000_READ_REG(hw, E1000_HTCBDPC);
- E1000_READ_REG(hw, E1000_HGORCL);
- E1000_READ_REG(hw, E1000_HGORCH);
- E1000_READ_REG(hw, E1000_HGOTCL);
- E1000_READ_REG(hw, E1000_HGOTCH);
- E1000_READ_REG(hw, E1000_LENERRS);
-
- /* This register should not be read in copper configurations */
- if ((hw->phy.media_type == e1000_media_type_internal_serdes) ||
- e1000_sgmii_active_82575(hw))
- E1000_READ_REG(hw, E1000_SCVPC);
-}
-
-/**
- * e1000_rx_fifo_flush_82575 - Clean rx fifo after Rx enable
- * @hw: pointer to the HW structure
- *
- * After rx enable if managability is enabled then there is likely some
- * bad data at the start of the fifo and possibly in the DMA fifo. This
- * function clears the fifos and flushes any packets that came in as rx was
- * being enabled.
- **/
-void e1000_rx_fifo_flush_82575(struct e1000_hw *hw)
-{
- u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
- int i, ms_wait;
-
- DEBUGFUNC("e1000_rx_fifo_workaround_82575");
- if (hw->mac.type != e1000_82575 ||
- !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN))
- return;
-
- /* Disable all Rx queues */
- for (i = 0; i < 4; i++) {
- rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
- E1000_WRITE_REG(hw, E1000_RXDCTL(i),
- rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
- }
- /* Poll all queues to verify they have shut down */
- for (ms_wait = 0; ms_wait < 10; ms_wait++) {
- msec_delay(1);
- rx_enabled = 0;
- for (i = 0; i < 4; i++)
- rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i));
- if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
- break;
- }
-
- if (ms_wait == 10)
- DEBUGOUT("Queue disable timed out after 10ms\n");
-
- /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
- * incoming packets are rejected. Set enable and wait 2ms so that
- * any packet that was coming in as RCTL.EN was set is flushed
- */
- rfctl = E1000_READ_REG(hw, E1000_RFCTL);
- E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
-
- rlpml = E1000_READ_REG(hw, E1000_RLPML);
- E1000_WRITE_REG(hw, E1000_RLPML, 0);
-
- rctl = E1000_READ_REG(hw, E1000_RCTL);
- temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
- temp_rctl |= E1000_RCTL_LPE;
-
- E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl);
- E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN);
- E1000_WRITE_FLUSH(hw);
- msec_delay(2);
-
- /* Enable Rx queues that were previously enabled and restore our
- * previous state
- */
- for (i = 0; i < 4; i++)
- E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]);
- E1000_WRITE_REG(hw, E1000_RCTL, rctl);
- E1000_WRITE_FLUSH(hw);
-
- E1000_WRITE_REG(hw, E1000_RLPML, rlpml);
- E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
-
- /* Flush receive errors generated by workaround */
- E1000_READ_REG(hw, E1000_ROC);
- E1000_READ_REG(hw, E1000_RNBC);
- E1000_READ_REG(hw, E1000_MPC);
-}
-
-/**
- * e1000_set_pcie_completion_timeout - set pci-e completion timeout
- * @hw: pointer to the HW structure
- *
- * The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
- * however the hardware default for these parts is 500us to 1ms which is less
- * than the 10ms recommended by the pci-e spec. To address this we need to
- * increase the value to either 10ms to 200ms for capability version 1 config,
- * or 16ms to 55ms for version 2.
- **/
-static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw)
-{
- u32 gcr = E1000_READ_REG(hw, E1000_GCR);
- s32 ret_val = E1000_SUCCESS;
- u16 pcie_devctl2;
-
- /* only take action if timeout value is defaulted to 0 */
- if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
- goto out;
-
- /*
- * if capababilities version is type 1 we can write the
- * timeout of 10ms to 200ms through the GCR register
- */
- if (!(gcr & E1000_GCR_CAP_VER2)) {
- gcr |= E1000_GCR_CMPL_TMOUT_10ms;
- goto out;
- }
-
- /*
- * for version 2 capabilities we need to write the config space
- * directly in order to set the completion timeout value for
- * 16ms to 55ms
- */
- ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
- &pcie_devctl2);
- if (ret_val)
- goto out;
-
- pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
-
- ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
- &pcie_devctl2);
-out:
- /* disable completion timeout resend */
- gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
-
- E1000_WRITE_REG(hw, E1000_GCR, gcr);
- return ret_val;
-}
-
-/**
- * e1000_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing
- * @hw: pointer to the hardware struct
- * @enable: state to enter, either enabled or disabled
- * @pf: Physical Function pool - do not set anti-spoofing for the PF
- *
- * enables/disables L2 switch anti-spoofing functionality.
- **/
-void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
-{
- u32 reg_val, reg_offset;
-
- switch (hw->mac.type) {
- case e1000_82576:
- reg_offset = E1000_DTXSWC;
- break;
- case e1000_i350:
- case e1000_i354:
- reg_offset = E1000_TXSWC;
- break;
- default:
- return;
- }
-
- reg_val = E1000_READ_REG(hw, reg_offset);
- if (enable) {
- reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK |
- E1000_DTXSWC_VLAN_SPOOF_MASK);
- /* The PF can spoof - it has to in order to
- * support emulation mode NICs
- */
- reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
- } else {
- reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
- E1000_DTXSWC_VLAN_SPOOF_MASK);
- }
- E1000_WRITE_REG(hw, reg_offset, reg_val);
-}
-
-/**
- * e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback
- * @hw: pointer to the hardware struct
- * @enable: state to enter, either enabled or disabled
- *
- * enables/disables L2 switch loopback functionality.
- **/
-void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
-{
- u32 dtxswc;
-
- switch (hw->mac.type) {
- case e1000_82576:
- dtxswc = E1000_READ_REG(hw, E1000_DTXSWC);
- if (enable)
- dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
- else
- dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
- E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
- break;
- case e1000_i350:
- case e1000_i354:
- dtxswc = E1000_READ_REG(hw, E1000_TXSWC);
- if (enable)
- dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
- else
- dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
- E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc);
- break;
- default:
- /* Currently no other hardware supports loopback */
- break;
- }
-
-
-}
-
-/**
- * e1000_vmdq_set_replication_pf - enable or disable vmdq replication
- * @hw: pointer to the hardware struct
- * @enable: state to enter, either enabled or disabled
- *
- * enables/disables replication of packets across multiple pools.
- **/
-void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
-{
- u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
-
- if (enable)
- vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
- else
- vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
-
- E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
-}
-
-/**
- * e1000_read_phy_reg_82580 - Read 82580 MDI control register
- * @hw: pointer to the HW structure
- * @offset: register offset to be read
- * @data: pointer to the read data
- *
- * Reads the MDI control register in the PHY at offset and stores the
- * information read to data.
- **/
-static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
-{
- s32 ret_val;
-
- DEBUGFUNC("e1000_read_phy_reg_82580");
-
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- goto out;
-
- ret_val = e1000_read_phy_reg_mdic(hw, offset, data);
-
- hw->phy.ops.release(hw);
-
-out:
- return ret_val;
-}
-
-/**
- * e1000_write_phy_reg_82580 - Write 82580 MDI control register
- * @hw: pointer to the HW structure
- * @offset: register offset to write to
- * @data: data to write to register at offset
- *
- * Writes data to MDI control register in the PHY at offset.
- **/
-static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
-{
- s32 ret_val;
-
- DEBUGFUNC("e1000_write_phy_reg_82580");
-
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- goto out;
-
- ret_val = e1000_write_phy_reg_mdic(hw, offset, data);
-
- hw->phy.ops.release(hw);
-
-out:
- return ret_val;
-}
-
-/**
- * e1000_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
- * @hw: pointer to the HW structure
- *
- * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
- * the values found in the EEPROM. This addresses an issue in which these
- * bits are not restored from EEPROM after reset.
- **/
-static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw)
-{
- s32 ret_val = E1000_SUCCESS;
- u32 mdicnfg;
- u16 nvm_data = 0;
-
- DEBUGFUNC("e1000_reset_mdicnfg_82580");
-
- if (hw->mac.type != e1000_82580)
- goto out;
- if (!e1000_sgmii_active_82575(hw))
- goto out;
-
- ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
- NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
- &nvm_data);
- if (ret_val) {
- DEBUGOUT("NVM Read Error\n");
- goto out;
- }
-
- mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG);
- if (nvm_data & NVM_WORD24_EXT_MDIO)
- mdicnfg |= E1000_MDICNFG_EXT_MDIO;
- if (nvm_data & NVM_WORD24_COM_MDIO)
- mdicnfg |= E1000_MDICNFG_COM_MDIO;
- E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
-out:
- return ret_val;
-}
-
-/**
- * e1000_reset_hw_82580 - Reset hardware
- * @hw: pointer to the HW structure
- *
- * This resets function or entire device (all ports, etc.)
- * to a known state.
- **/
-static s32 e1000_reset_hw_82580(struct e1000_hw *hw)
-{
- s32 ret_val = E1000_SUCCESS;
- /* BH SW mailbox bit in SW_FW_SYNC */
- u16 swmbsw_mask = E1000_SW_SYNCH_MB;
- u32 ctrl;
- bool global_device_reset = hw->dev_spec._82575.global_device_reset;
-
- DEBUGFUNC("e1000_reset_hw_82580");
-
- hw->dev_spec._82575.global_device_reset = false;
-
- /* 82580 does not reliably do global_device_reset due to hw errata */
- if (hw->mac.type == e1000_82580)
- global_device_reset = false;
-
- /* Get current control state. */
- ctrl = E1000_READ_REG(hw, E1000_CTRL);
-
- /*
- * Prevent the PCI-E bus from sticking if there is no TLP connection
- * on the last TLP read/write transaction when MAC is reset.
- */
- ret_val = e1000_disable_pcie_master_generic(hw);
- if (ret_val)
- DEBUGOUT("PCI-E Master disable polling has failed.\n");
-
- DEBUGOUT("Masking off all interrupts\n");
- E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
- E1000_WRITE_REG(hw, E1000_RCTL, 0);
- E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
- E1000_WRITE_FLUSH(hw);
-
- msec_delay(10);
-
- /* Determine whether or not a global dev reset is requested */
- if (global_device_reset && hw->mac.ops.acquire_swfw_sync(hw,
- swmbsw_mask))
- global_device_reset = false;
-
- if (global_device_reset && !(E1000_READ_REG(hw, E1000_STATUS) &
- E1000_STAT_DEV_RST_SET))
- ctrl |= E1000_CTRL_DEV_RST;
- else
- ctrl |= E1000_CTRL_RST;
-
- E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
- E1000_WRITE_FLUSH(hw);
-
- /* Add delay to insure DEV_RST has time to complete */
- if (global_device_reset)
- msec_delay(5);
-
- ret_val = e1000_get_auto_rd_done_generic(hw);
- if (ret_val) {
- /*
- * When auto config read does not complete, do not
- * return with an error. This can happen in situations
- * where there is no eeprom and prevents getting link.
- */
- DEBUGOUT("Auto Read Done did not complete\n");
- }
-
- /* clear global device reset status bit */
- E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET);
-
- /* Clear any pending interrupt events. */
- E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
- E1000_READ_REG(hw, E1000_ICR);
-
- ret_val = e1000_reset_mdicnfg_82580(hw);
- if (ret_val)
- DEBUGOUT("Could not reset MDICNFG based on EEPROM\n");
-
- /* Install any alternate MAC address into RAR0 */
- ret_val = e1000_check_alt_mac_addr_generic(hw);
-
- /* Release semaphore */
- if (global_device_reset)
- hw->mac.ops.release_swfw_sync(hw, swmbsw_mask);
-
- return ret_val;
-}
-
-/**
- * e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual Rx PBA size
- * @data: data received by reading RXPBS register
- *
- * The 82580 uses a table based approach for packet buffer allocation sizes.
- * This function converts the retrieved value into the correct table value
- * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7
- * 0x0 36 72 144 1 2 4 8 16
- * 0x8 35 70 140 rsv rsv rsv rsv rsv
- */
-u16 e1000_rxpbs_adjust_82580(u32 data)
-{
- u16 ret_val = 0;
-
- if (data < E1000_82580_RXPBS_TABLE_SIZE)
- ret_val = e1000_82580_rxpbs_table[data];
-
- return ret_val;
-}
-
-/**
- * e1000_validate_nvm_checksum_with_offset - Validate EEPROM
- * checksum
- * @hw: pointer to the HW structure
- * @offset: offset in words of the checksum protected region
- *
- * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
- * and then verifies that the sum of the EEPROM is equal to 0xBABA.
- **/
-s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
-{
- s32 ret_val = E1000_SUCCESS;
- u16 checksum = 0;
- u16 i, nvm_data;
-
- DEBUGFUNC("e1000_validate_nvm_checksum_with_offset");
-
- for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
- ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
- if (ret_val) {
- DEBUGOUT("NVM Read Error\n");
- goto out;
- }
- checksum += nvm_data;
- }
-
- if (checksum != (u16) NVM_SUM) {
- DEBUGOUT("NVM Checksum Invalid\n");
- ret_val = -E1000_ERR_NVM;
- goto out;
- }
-
-out:
- return ret_val;
-}
-
-/**
- * e1000_update_nvm_checksum_with_offset - Update EEPROM
- * checksum
- * @hw: pointer to the HW structure
- * @offset: offset in words of the checksum protected region
- *
- * Updates the EEPROM checksum by reading/adding each word of the EEPROM
- * up to the checksum. Then calculates the EEPROM checksum and writes the
- * value to the EEPROM.
- **/
-s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
-{
- s32 ret_val;
- u16 checksum = 0;
- u16 i, nvm_data;
-
- DEBUGFUNC("e1000_update_nvm_checksum_with_offset");
-
- for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
- ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
- if (ret_val) {
- DEBUGOUT("NVM Read Error while updating checksum.\n");
- goto out;
- }
- checksum += nvm_data;
- }
- checksum = (u16) NVM_SUM - checksum;
- ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
- &checksum);
- if (ret_val)
- DEBUGOUT("NVM Write Error while updating checksum.\n");
-
-out:
- return ret_val;
-}
-
-/**
- * e1000_validate_nvm_checksum_82580 - Validate EEPROM checksum
- * @hw: pointer to the HW structure
- *
- * Calculates the EEPROM section checksum by reading/adding each word of
- * the EEPROM and then verifies that the sum of the EEPROM is
- * equal to 0xBABA.
- **/
-static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw)
-{
- s32 ret_val = E1000_SUCCESS;
- u16 eeprom_regions_count = 1;
- u16 j, nvm_data;
- u16 nvm_offset;
-
- DEBUGFUNC("e1000_validate_nvm_checksum_82580");
-
- ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
- if (ret_val) {
- DEBUGOUT("NVM Read Error\n");
- goto out;
- }
-
- if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
- /* if chekcsums compatibility bit is set validate checksums
- * for all 4 ports. */
- eeprom_regions_count = 4;
- }
-
- for (j = 0; j < eeprom_regions_count; j++) {
- nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
- ret_val = e1000_validate_nvm_checksum_with_offset(hw,
- nvm_offset);
- if (ret_val != E1000_SUCCESS)
- goto out;
- }
-
-out:
- return ret_val;
-}
-
-/**
- * e1000_update_nvm_checksum_82580 - Update EEPROM checksum
- * @hw: pointer to the HW structure
- *
- * Updates the EEPROM section checksums for all 4 ports by reading/adding
- * each word of the EEPROM up to the checksum. Then calculates the EEPROM
- * checksum and writes the value to the EEPROM.
- **/
-static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw)
-{
- s32 ret_val;
- u16 j, nvm_data;
- u16 nvm_offset;
-
- DEBUGFUNC("e1000_update_nvm_checksum_82580");
-
- ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
- if (ret_val) {
- DEBUGOUT("NVM Read Error while updating checksum compatibility bit.\n");
- goto out;
- }
-
- if (!(nvm_data & NVM_COMPATIBILITY_BIT_MASK)) {
- /* set compatibility bit to validate checksums appropriately */
- nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
- ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
- &nvm_data);
- if (ret_val) {
- DEBUGOUT("NVM Write Error while updating checksum compatibility bit.\n");
- goto out;
- }
- }
-
- for (j = 0; j < 4; j++) {
- nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
- ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset);
- if (ret_val)
- goto out;
- }
-
-out:
- return ret_val;
-}
-
-/**
- * e1000_validate_nvm_checksum_i350 - Validate EEPROM checksum
- * @hw: pointer to the HW structure
- *
- * Calculates the EEPROM section checksum by reading/adding each word of
- * the EEPROM and then verifies that the sum of the EEPROM is
- * equal to 0xBABA.
- **/
-static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw)
-{
- s32 ret_val = E1000_SUCCESS;
- u16 j;
- u16 nvm_offset;
-
- DEBUGFUNC("e1000_validate_nvm_checksum_i350");
-
- for (j = 0; j < 4; j++) {
- nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
- ret_val = e1000_validate_nvm_checksum_with_offset(hw,
- nvm_offset);
- if (ret_val != E1000_SUCCESS)
- goto out;
- }
-
-out:
- return ret_val;
-}
-
-/**
- * e1000_update_nvm_checksum_i350 - Update EEPROM checksum
- * @hw: pointer to the HW structure
- *
- * Updates the EEPROM section checksums for all 4 ports by reading/adding
- * each word of the EEPROM up to the checksum. Then calculates the EEPROM
- * checksum and writes the value to the EEPROM.
- **/
-static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw)
-{
- s32 ret_val = E1000_SUCCESS;
- u16 j;
- u16 nvm_offset;
-
- DEBUGFUNC("e1000_update_nvm_checksum_i350");
-
- for (j = 0; j < 4; j++) {
- nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
- ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset);
- if (ret_val != E1000_SUCCESS)
- goto out;
- }
-
-out:
- return ret_val;
-}
-
-/**
- * __e1000_access_emi_reg - Read/write EMI register
- * @hw: pointer to the HW structure
- * @addr: EMI address to program
- * @data: pointer to value to read/write from/to the EMI address
- * @read: boolean flag to indicate read or write
- **/
-static s32 __e1000_access_emi_reg(struct e1000_hw *hw, u16 address,
- u16 *data, bool read)
-{
- s32 ret_val = E1000_SUCCESS;
-
- DEBUGFUNC("__e1000_access_emi_reg");
-
- ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address);
- if (ret_val)
- return ret_val;
-
- if (read)
- ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data);
- else
- ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data);
-
- return ret_val;
-}
-
-/**
- * e1000_read_emi_reg - Read Extended Management Interface register
- * @hw: pointer to the HW structure
- * @addr: EMI address to program
- * @data: value to be read from the EMI address
- **/
-s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
-{
- DEBUGFUNC("e1000_read_emi_reg");
-
- return __e1000_access_emi_reg(hw, addr, data, true);
-}
-
-/**
- * e1000_set_eee_i350 - Enable/disable EEE support
- * @hw: pointer to the HW structure
- *
- * Enable/disable EEE based on setting in dev_spec structure.
- *
- **/
-s32 e1000_set_eee_i350(struct e1000_hw *hw)
-{
- s32 ret_val = E1000_SUCCESS;
- u32 ipcnfg, eeer;
-
- DEBUGFUNC("e1000_set_eee_i350");
-
- if ((hw->mac.type < e1000_i350) ||
- (hw->phy.media_type != e1000_media_type_copper))
- goto out;
- ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG);
- eeer = E1000_READ_REG(hw, E1000_EEER);
-
- /* enable or disable per user setting */
- if (!(hw->dev_spec._82575.eee_disable)) {
- u32 eee_su = E1000_READ_REG(hw, E1000_EEE_SU);
-
- ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
- eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
- E1000_EEER_LPI_FC);
-
- /* This bit should not be set in normal operation. */
- if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
- DEBUGOUT("LPI Clock Stop Bit should not be set!\n");
- } else {
- ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
- eeer &= ~(E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
- E1000_EEER_LPI_FC);
- }
- E1000_WRITE_REG(hw, E1000_IPCNFG, ipcnfg);
- E1000_WRITE_REG(hw, E1000_EEER, eeer);
- E1000_READ_REG(hw, E1000_IPCNFG);
- E1000_READ_REG(hw, E1000_EEER);
-out:
-
- return ret_val;
-}
-
-/**
- * e1000_set_eee_i354 - Enable/disable EEE support
- * @hw: pointer to the HW structure
- *
- * Enable/disable EEE legacy mode based on setting in dev_spec structure.
- *
- **/
-s32 e1000_set_eee_i354(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = E1000_SUCCESS;
- u16 phy_data;
-
- DEBUGFUNC("e1000_set_eee_i354");
-
- if ((hw->phy.media_type != e1000_media_type_copper) ||
- ((phy->id != M88E1543_E_PHY_ID)))
- goto out;
-
- if (!hw->dev_spec._82575.eee_disable) {
- /* Switch to PHY page 18. */
- ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18);
- if (ret_val)
- goto out;
-
- ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1,
- &phy_data);
- if (ret_val)
- goto out;
-
- phy_data |= E1000_M88E1543_EEE_CTRL_1_MS;
- ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1,
- phy_data);
- if (ret_val)
- goto out;
-
- /* Return the PHY to page 0. */
- ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
- if (ret_val)
- goto out;
-
- /* Turn on EEE advertisement. */
- ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
- E1000_EEE_ADV_DEV_I354,
- &phy_data);
- if (ret_val)
- goto out;
-
- phy_data |= E1000_EEE_ADV_100_SUPPORTED |
- E1000_EEE_ADV_1000_SUPPORTED;
- ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
- E1000_EEE_ADV_DEV_I354,
- phy_data);
- } else {
- /* Turn off EEE advertisement. */
- ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
- E1000_EEE_ADV_DEV_I354,
- &phy_data);
- if (ret_val)
- goto out;
-
- phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED |
- E1000_EEE_ADV_1000_SUPPORTED);
- ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
- E1000_EEE_ADV_DEV_I354,
- phy_data);
- }
-
-out:
- return ret_val;
-}
-
-/**
- * e1000_get_eee_status_i354 - Get EEE status
- * @hw: pointer to the HW structure
- * @status: EEE status
- *
- * Get EEE status by guessing based on whether Tx or Rx LPI indications have
- * been received.
- **/
-s32 e1000_get_eee_status_i354(struct e1000_hw *hw, bool *status)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = E1000_SUCCESS;
- u16 phy_data;
-
- DEBUGFUNC("e1000_get_eee_status_i354");
-
- /* Check if EEE is supported on this device. */
- if ((hw->phy.media_type != e1000_media_type_copper) ||
- ((phy->id != M88E1543_E_PHY_ID)))
- goto out;
-
- ret_val = e1000_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
- E1000_PCS_STATUS_DEV_I354,
- &phy_data);
- if (ret_val)
- goto out;
-
- *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD |
- E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false;
-
-out:
- return ret_val;
-}
-
-/* Due to a hw errata, if the host tries to configure the VFTA register
- * while performing queries from the BMC or DMA, then the VFTA in some
- * cases won't be written.
- */
-
-/**
- * e1000_clear_vfta_i350 - Clear VLAN filter table
- * @hw: pointer to the HW structure
- *
- * Clears the register array which contains the VLAN filter table by
- * setting all the values to 0.
- **/
-void e1000_clear_vfta_i350(struct e1000_hw *hw)
-{
- u32 offset;
- int i;
-
- DEBUGFUNC("e1000_clear_vfta_350");
-
- for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
- for (i = 0; i < 10; i++)
- E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
-
- E1000_WRITE_FLUSH(hw);
- }
-}
-
-/**
- * e1000_write_vfta_i350 - Write value to VLAN filter table
- * @hw: pointer to the HW structure
- * @offset: register offset in VLAN filter table
- * @value: register value written to VLAN filter table
- *
- * Writes value at the given offset in the register array which stores
- * the VLAN filter table.
- **/
-void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
-{
- int i;
-
- DEBUGFUNC("e1000_write_vfta_350");
-
- for (i = 0; i < 10; i++)
- E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
-
- E1000_WRITE_FLUSH(hw);
-}
-
-
-/**
- * e1000_set_i2c_bb - Enable I2C bit-bang
- * @hw: pointer to the HW structure
- *
- * Enable I2C bit-bang interface
- *
- **/
-s32 e1000_set_i2c_bb(struct e1000_hw *hw)
-{
- s32 ret_val = E1000_SUCCESS;
- u32 ctrl_ext, i2cparams;
-
- DEBUGFUNC("e1000_set_i2c_bb");
-
- ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
- ctrl_ext |= E1000_CTRL_I2C_ENA;
- E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
- E1000_WRITE_FLUSH(hw);
-
- i2cparams = E1000_READ_REG(hw, E1000_I2CPARAMS);
- i2cparams |= E1000_I2CBB_EN;
- i2cparams |= E1000_I2C_DATA_OE_N;
- i2cparams |= E1000_I2C_CLK_OE_N;
- E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cparams);
- E1000_WRITE_FLUSH(hw);
-
- return ret_val;
-}
-
-/**
- * e1000_read_i2c_byte_generic - Reads 8 bit word over I2C
- * @hw: pointer to hardware structure
- * @byte_offset: byte offset to read
- * @dev_addr: device address
- * @data: value read
- *
- * Performs byte read operation over I2C interface at
- * a specified device address.
- **/
-s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 *data)
-{
- s32 status = E1000_SUCCESS;
- u32 max_retry = 10;
- u32 retry = 1;
- u16 swfw_mask = 0;
-
- bool nack = true;
-
- DEBUGFUNC("e1000_read_i2c_byte_generic");
-
- swfw_mask = E1000_SWFW_PHY0_SM;
-
- do {
- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)
- != E1000_SUCCESS) {
- status = E1000_ERR_SWFW_SYNC;
- goto read_byte_out;
- }
-
- e1000_i2c_start(hw);
-
- /* Device Address and write indication */
- status = e1000_clock_out_i2c_byte(hw, dev_addr);
- if (status != E1000_SUCCESS)
- goto fail;
-
- status = e1000_get_i2c_ack(hw);
- if (status != E1000_SUCCESS)
- goto fail;
-
- status = e1000_clock_out_i2c_byte(hw, byte_offset);
- if (status != E1000_SUCCESS)
- goto fail;
-
- status = e1000_get_i2c_ack(hw);
- if (status != E1000_SUCCESS)
- goto fail;
-
- e1000_i2c_start(hw);
-
- /* Device Address and read indication */
- status = e1000_clock_out_i2c_byte(hw, (dev_addr | 0x1));
- if (status != E1000_SUCCESS)
- goto fail;
-
- status = e1000_get_i2c_ack(hw);
- if (status != E1000_SUCCESS)
- goto fail;
-
- status = e1000_clock_in_i2c_byte(hw, data);
- if (status != E1000_SUCCESS)
- goto fail;
-
- status = e1000_clock_out_i2c_bit(hw, nack);
- if (status != E1000_SUCCESS)
- goto fail;
-
- e1000_i2c_stop(hw);
- break;
-
-fail:
- hw->mac.ops.release_swfw_sync(hw, swfw_mask);
- msec_delay(100);
- e1000_i2c_bus_clear(hw);
- retry++;
- if (retry < max_retry)
- DEBUGOUT("I2C byte read error - Retrying.\n");
- else
- DEBUGOUT("I2C byte read error.\n");
-
- } while (retry < max_retry);
-
- hw->mac.ops.release_swfw_sync(hw, swfw_mask);
-
-read_byte_out:
-
- return status;
-}
-
-/**
- * e1000_write_i2c_byte_generic - Writes 8 bit word over I2C
- * @hw: pointer to hardware structure
- * @byte_offset: byte offset to write
- * @dev_addr: device address
- * @data: value to write
- *
- * Performs byte write operation over I2C interface at
- * a specified device address.
- **/
-s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 data)
-{
- s32 status = E1000_SUCCESS;
- u32 max_retry = 1;
- u32 retry = 0;
- u16 swfw_mask = 0;
-
- DEBUGFUNC("e1000_write_i2c_byte_generic");
-
- swfw_mask = E1000_SWFW_PHY0_SM;
-
- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS) {
- status = E1000_ERR_SWFW_SYNC;
- goto write_byte_out;
- }
-
- do {
- e1000_i2c_start(hw);
-
- status = e1000_clock_out_i2c_byte(hw, dev_addr);
- if (status != E1000_SUCCESS)
- goto fail;
-
- status = e1000_get_i2c_ack(hw);
- if (status != E1000_SUCCESS)
- goto fail;
-
- status = e1000_clock_out_i2c_byte(hw, byte_offset);
- if (status != E1000_SUCCESS)
- goto fail;
-
- status = e1000_get_i2c_ack(hw);
- if (status != E1000_SUCCESS)
- goto fail;
-
- status = e1000_clock_out_i2c_byte(hw, data);
- if (status != E1000_SUCCESS)
- goto fail;
-
- status = e1000_get_i2c_ack(hw);
- if (status != E1000_SUCCESS)
- goto fail;
-
- e1000_i2c_stop(hw);
- break;
-
-fail:
- e1000_i2c_bus_clear(hw);
- retry++;
- if (retry < max_retry)
- DEBUGOUT("I2C byte write error - Retrying.\n");
- else
- DEBUGOUT("I2C byte write error.\n");
- } while (retry < max_retry);
-
- hw->mac.ops.release_swfw_sync(hw, swfw_mask);
-
-write_byte_out:
-
- return status;
-}
-
-/**
- * e1000_i2c_start - Sets I2C start condition
- * @hw: pointer to hardware structure
- *
- * Sets I2C start condition (High -> Low on SDA while SCL is High)
- **/
-static void e1000_i2c_start(struct e1000_hw *hw)
-{
- u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
-
- DEBUGFUNC("e1000_i2c_start");
-
- /* Start condition must begin with data and clock high */
- e1000_set_i2c_data(hw, &i2cctl, 1);
- e1000_raise_i2c_clk(hw, &i2cctl);
-
- /* Setup time for start condition (4.7us) */
- usec_delay(E1000_I2C_T_SU_STA);
-
- e1000_set_i2c_data(hw, &i2cctl, 0);
-
- /* Hold time for start condition (4us) */
- usec_delay(E1000_I2C_T_HD_STA);
-
- e1000_lower_i2c_clk(hw, &i2cctl);
-
- /* Minimum low period of clock is 4.7 us */
- usec_delay(E1000_I2C_T_LOW);
-
-}
-
-/**
- * e1000_i2c_stop - Sets I2C stop condition
- * @hw: pointer to hardware structure
- *
- * Sets I2C stop condition (Low -> High on SDA while SCL is High)
- **/
-static void e1000_i2c_stop(struct e1000_hw *hw)
-{
- u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
-
- DEBUGFUNC("e1000_i2c_stop");
-
- /* Stop condition must begin with data low and clock high */
- e1000_set_i2c_data(hw, &i2cctl, 0);
- e1000_raise_i2c_clk(hw, &i2cctl);
-
- /* Setup time for stop condition (4us) */
- usec_delay(E1000_I2C_T_SU_STO);
-
- e1000_set_i2c_data(hw, &i2cctl, 1);
-
- /* bus free time between stop and start (4.7us)*/
- usec_delay(E1000_I2C_T_BUF);
-}
-
-/**
- * e1000_clock_in_i2c_byte - Clocks in one byte via I2C
- * @hw: pointer to hardware structure
- * @data: data byte to clock in
- *
- * Clocks in one byte data via I2C data/clock
- **/
-static s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data)
-{
- s32 i;
- bool bit = 0;
-
- DEBUGFUNC("e1000_clock_in_i2c_byte");
-
- *data = 0;
- for (i = 7; i >= 0; i--) {
- e1000_clock_in_i2c_bit(hw, &bit);
- *data |= bit << i;
- }
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_clock_out_i2c_byte - Clocks out one byte via I2C
- * @hw: pointer to hardware structure
- * @data: data byte clocked out
- *
- * Clocks out one byte data via I2C data/clock
- **/
-static s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data)
-{
- s32 status = E1000_SUCCESS;
- s32 i;
- u32 i2cctl;
- bool bit = 0;
-
- DEBUGFUNC("e1000_clock_out_i2c_byte");
-
- for (i = 7; i >= 0; i--) {
- bit = (data >> i) & 0x1;
- status = e1000_clock_out_i2c_bit(hw, bit);
-
- if (status != E1000_SUCCESS)
- break;
- }
-
- /* Release SDA line (set high) */
- i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
-
- i2cctl |= E1000_I2C_DATA_OE_N;
- E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl);
- E1000_WRITE_FLUSH(hw);
-
- return status;
-}
-
-/**
- * e1000_get_i2c_ack - Polls for I2C ACK
- * @hw: pointer to hardware structure
- *
- * Clocks in/out one bit via I2C data/clock
- **/
-static s32 e1000_get_i2c_ack(struct e1000_hw *hw)
-{
- s32 status = E1000_SUCCESS;
- u32 i = 0;
- u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
- u32 timeout = 10;
- bool ack = true;
-
- DEBUGFUNC("e1000_get_i2c_ack");
-
- e1000_raise_i2c_clk(hw, &i2cctl);
-
- /* Minimum high period of clock is 4us */
- usec_delay(E1000_I2C_T_HIGH);
-
- /* Wait until SCL returns high */
- for (i = 0; i < timeout; i++) {
- usec_delay(1);
- i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
- if (i2cctl & E1000_I2C_CLK_IN)
- break;
- }
- if (!(i2cctl & E1000_I2C_CLK_IN))
- return E1000_ERR_I2C;
-
- ack = e1000_get_i2c_data(&i2cctl);
- if (ack) {
- DEBUGOUT("I2C ack was not received.\n");
- status = E1000_ERR_I2C;
- }
-
- e1000_lower_i2c_clk(hw, &i2cctl);
-
- /* Minimum low period of clock is 4.7 us */
- usec_delay(E1000_I2C_T_LOW);
-
- return status;
-}
-
-/**
- * e1000_clock_in_i2c_bit - Clocks in one bit via I2C data/clock
- * @hw: pointer to hardware structure
- * @data: read data value
- *
- * Clocks in one bit via I2C data/clock
- **/
-static s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data)
-{
- u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
-
- DEBUGFUNC("e1000_clock_in_i2c_bit");
-
- e1000_raise_i2c_clk(hw, &i2cctl);
-
- /* Minimum high period of clock is 4us */
- usec_delay(E1000_I2C_T_HIGH);
-
- i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
- *data = e1000_get_i2c_data(&i2cctl);
-
- e1000_lower_i2c_clk(hw, &i2cctl);
-
- /* Minimum low period of clock is 4.7 us */
- usec_delay(E1000_I2C_T_LOW);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock
- * @hw: pointer to hardware structure
- * @data: data value to write
- *
- * Clocks out one bit via I2C data/clock
- **/
-static s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data)
-{
- s32 status;
- u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
-
- DEBUGFUNC("e1000_clock_out_i2c_bit");
-
- status = e1000_set_i2c_data(hw, &i2cctl, data);
- if (status == E1000_SUCCESS) {
- e1000_raise_i2c_clk(hw, &i2cctl);
-
- /* Minimum high period of clock is 4us */
- usec_delay(E1000_I2C_T_HIGH);
-
- e1000_lower_i2c_clk(hw, &i2cctl);
-
- /* Minimum low period of clock is 4.7 us.
- * This also takes care of the data hold time.
- */
- usec_delay(E1000_I2C_T_LOW);
- } else {
- status = E1000_ERR_I2C;
- DEBUGOUT1("I2C data was not set to %X\n", data);
- }
-
- return status;
-}
-/**
- * e1000_raise_i2c_clk - Raises the I2C SCL clock
- * @hw: pointer to hardware structure
- * @i2cctl: Current value of I2CCTL register
- *
- * Raises the I2C clock line '0'->'1'
- **/
-static void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl)
-{
- DEBUGFUNC("e1000_raise_i2c_clk");
-
- *i2cctl |= E1000_I2C_CLK_OUT;
- *i2cctl &= ~E1000_I2C_CLK_OE_N;
- E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl);
- E1000_WRITE_FLUSH(hw);
-
- /* SCL rise time (1000ns) */
- usec_delay(E1000_I2C_T_RISE);
-}
-
-/**
- * e1000_lower_i2c_clk - Lowers the I2C SCL clock
- * @hw: pointer to hardware structure
- * @i2cctl: Current value of I2CCTL register
- *
- * Lowers the I2C clock line '1'->'0'
- **/
-static void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl)
-{
-
- DEBUGFUNC("e1000_lower_i2c_clk");
-
- *i2cctl &= ~E1000_I2C_CLK_OUT;
- *i2cctl &= ~E1000_I2C_CLK_OE_N;
- E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl);
- E1000_WRITE_FLUSH(hw);
-
- /* SCL fall time (300ns) */
- usec_delay(E1000_I2C_T_FALL);
-}
-
-/**
- * e1000_set_i2c_data - Sets the I2C data bit
- * @hw: pointer to hardware structure
- * @i2cctl: Current value of I2CCTL register
- * @data: I2C data value (0 or 1) to set
- *
- * Sets the I2C data bit
- **/
-static s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data)
-{
- s32 status = E1000_SUCCESS;
-
- DEBUGFUNC("e1000_set_i2c_data");
-
- if (data)
- *i2cctl |= E1000_I2C_DATA_OUT;
- else
- *i2cctl &= ~E1000_I2C_DATA_OUT;
-
- *i2cctl &= ~E1000_I2C_DATA_OE_N;
- *i2cctl |= E1000_I2C_CLK_OE_N;
- E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl);
- E1000_WRITE_FLUSH(hw);
-
- /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
- usec_delay(E1000_I2C_T_RISE + E1000_I2C_T_FALL + E1000_I2C_T_SU_DATA);
-
- *i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
- if (data != e1000_get_i2c_data(i2cctl)) {
- status = E1000_ERR_I2C;
- DEBUGOUT1("Error - I2C data was not set to %X.\n", data);
- }
-
- return status;
-}
-
-/**
- * e1000_get_i2c_data - Reads the I2C SDA data bit
- * @hw: pointer to hardware structure
- * @i2cctl: Current value of I2CCTL register
- *
- * Returns the I2C data bit value
- **/
-static bool e1000_get_i2c_data(u32 *i2cctl)
-{
- bool data;
-
- DEBUGFUNC("e1000_get_i2c_data");
-
- if (*i2cctl & E1000_I2C_DATA_IN)
- data = 1;
- else
- data = 0;
-
- return data;
-}
-
-/**
- * e1000_i2c_bus_clear - Clears the I2C bus
- * @hw: pointer to hardware structure
- *
- * Clears the I2C bus by sending nine clock pulses.
- * Used when data line is stuck low.
- **/
-void e1000_i2c_bus_clear(struct e1000_hw *hw)
-{
- u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
- u32 i;
-
- DEBUGFUNC("e1000_i2c_bus_clear");
-
- e1000_i2c_start(hw);
-
- e1000_set_i2c_data(hw, &i2cctl, 1);
-
- for (i = 0; i < 9; i++) {
- e1000_raise_i2c_clk(hw, &i2cctl);
-
- /* Min high period of clock is 4us */
- usec_delay(E1000_I2C_T_HIGH);
-
- e1000_lower_i2c_clk(hw, &i2cctl);
-
- /* Min low period of clock is 4.7us*/
- usec_delay(E1000_I2C_T_LOW);
- }
-
- e1000_i2c_start(hw);
-
- /* Put the i2c bus back to default state */
- e1000_i2c_stop(hw);
-}
-
-static const u8 e1000_emc_temp_data[4] = {
- E1000_EMC_INTERNAL_DATA,
- E1000_EMC_DIODE1_DATA,
- E1000_EMC_DIODE2_DATA,
- E1000_EMC_DIODE3_DATA
-};
-static const u8 e1000_emc_therm_limit[4] = {
- E1000_EMC_INTERNAL_THERM_LIMIT,
- E1000_EMC_DIODE1_THERM_LIMIT,
- E1000_EMC_DIODE2_THERM_LIMIT,
- E1000_EMC_DIODE3_THERM_LIMIT
-};
-
-/**
- * e1000_get_thermal_sensor_data_generic - Gathers thermal sensor data
- * @hw: pointer to hardware structure
- *
- * Updates the temperatures in mac.thermal_sensor_data
- **/
-s32 e1000_get_thermal_sensor_data_generic(struct e1000_hw *hw)
-{
- s32 status = E1000_SUCCESS;
- u16 ets_offset;
- u16 ets_cfg;
- u16 ets_sensor;
- u8 num_sensors;
- u8 sensor_index;
- u8 sensor_location;
- u8 i;
- struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
-
- DEBUGFUNC("e1000_get_thermal_sensor_data_generic");
-
- if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
- return E1000_NOT_IMPLEMENTED;
-
- data->sensor[0].temp = (E1000_READ_REG(hw, E1000_THMJT) & 0xFF);
-
- /* Return the internal sensor only if ETS is unsupported */
- e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_offset);
- if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
- return status;
-
- e1000_read_nvm(hw, ets_offset, 1, &ets_cfg);
- if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
- != NVM_ETS_TYPE_EMC)
- return E1000_NOT_IMPLEMENTED;
-
- num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
- if (num_sensors > E1000_MAX_SENSORS)
- num_sensors = E1000_MAX_SENSORS;
-
- for (i = 1; i < num_sensors; i++) {
- e1000_read_nvm(hw, (ets_offset + i), 1, &ets_sensor);
- sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
- NVM_ETS_DATA_INDEX_SHIFT);
- sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
- NVM_ETS_DATA_LOC_SHIFT);
-
- if (sensor_location != 0)
- hw->phy.ops.read_i2c_byte(hw,
- e1000_emc_temp_data[sensor_index],
- E1000_I2C_THERMAL_SENSOR_ADDR,
- &data->sensor[i].temp);
- }
- return status;
-}
-
-/**
- * e1000_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds
- * @hw: pointer to hardware structure
- *
- * Sets the thermal sensor thresholds according to the NVM map
- * and save off the threshold and location values into mac.thermal_sensor_data
- **/
-s32 e1000_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
-{
- s32 status = E1000_SUCCESS;
- u16 ets_offset;
- u16 ets_cfg;
- u16 ets_sensor;
- u8 low_thresh_delta;
- u8 num_sensors;
- u8 sensor_index;
- u8 sensor_location;
- u8 therm_limit;
- u8 i;
- struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
-
- DEBUGFUNC("e1000_init_thermal_sensor_thresh_generic");
-
- if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
- return E1000_NOT_IMPLEMENTED;
-
- memset(data, 0, sizeof(struct e1000_thermal_sensor_data));
-
- data->sensor[0].location = 0x1;
- data->sensor[0].caution_thresh =
- (E1000_READ_REG(hw, E1000_THHIGHTC) & 0xFF);
- data->sensor[0].max_op_thresh =
- (E1000_READ_REG(hw, E1000_THLOWTC) & 0xFF);
-
- /* Return the internal sensor only if ETS is unsupported */
- e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_offset);
- if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
- return status;
-
- e1000_read_nvm(hw, ets_offset, 1, &ets_cfg);
- if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
- != NVM_ETS_TYPE_EMC)
- return E1000_NOT_IMPLEMENTED;
-
- low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >>
- NVM_ETS_LTHRES_DELTA_SHIFT);
- num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
-
- for (i = 1; i <= num_sensors; i++) {
- e1000_read_nvm(hw, (ets_offset + i), 1, &ets_sensor);
- sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
- NVM_ETS_DATA_INDEX_SHIFT);
- sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
- NVM_ETS_DATA_LOC_SHIFT);
- therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK;
-
- hw->phy.ops.write_i2c_byte(hw,
- e1000_emc_therm_limit[sensor_index],
- E1000_I2C_THERMAL_SENSOR_ADDR,
- therm_limit);
-
- if ((i < E1000_MAX_SENSORS) && (sensor_location != 0)) {
- data->sensor[i].location = sensor_location;
- data->sensor[i].caution_thresh = therm_limit;
- data->sensor[i].max_op_thresh = therm_limit -
- low_thresh_delta;
- }
- }
- return status;
-}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.h
deleted file mode 100755
index 1aec75ab..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.h
+++ /dev/null
@@ -1,509 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#ifndef _E1000_82575_H_
-#define _E1000_82575_H_
-
-#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
- (ID_LED_DEF1_DEF2 << 8) | \
- (ID_LED_DEF1_DEF2 << 4) | \
- (ID_LED_OFF1_ON2))
-/*
- * Receive Address Register Count
- * Number of high/low register pairs in the RAR. The RAR (Receive Address
- * Registers) holds the directed and multicast addresses that we monitor.
- * These entries are also used for MAC-based filtering.
- */
-/*
- * For 82576, there are an additional set of RARs that begin at an offset
- * separate from the first set of RARs.
- */
-#define E1000_RAR_ENTRIES_82575 16
-#define E1000_RAR_ENTRIES_82576 24
-#define E1000_RAR_ENTRIES_82580 24
-#define E1000_RAR_ENTRIES_I350 32
-#define E1000_SW_SYNCH_MB 0x00000100
-#define E1000_STAT_DEV_RST_SET 0x00100000
-#define E1000_CTRL_DEV_RST 0x20000000
-
-struct e1000_adv_data_desc {
- __le64 buffer_addr; /* Address of the descriptor's data buffer */
- union {
- u32 data;
- struct {
- u32 datalen:16; /* Data buffer length */
- u32 rsvd:4;
- u32 dtyp:4; /* Descriptor type */
- u32 dcmd:8; /* Descriptor command */
- } config;
- } lower;
- union {
- u32 data;
- struct {
- u32 status:4; /* Descriptor status */
- u32 idx:4;
- u32 popts:6; /* Packet Options */
- u32 paylen:18; /* Payload length */
- } options;
- } upper;
-};
-
-#define E1000_TXD_DTYP_ADV_C 0x2 /* Advanced Context Descriptor */
-#define E1000_TXD_DTYP_ADV_D 0x3 /* Advanced Data Descriptor */
-#define E1000_ADV_TXD_CMD_DEXT 0x20 /* Descriptor extension (0 = legacy) */
-#define E1000_ADV_TUCMD_IPV4 0x2 /* IP Packet Type: 1=IPv4 */
-#define E1000_ADV_TUCMD_IPV6 0x0 /* IP Packet Type: 0=IPv6 */
-#define E1000_ADV_TUCMD_L4T_UDP 0x0 /* L4 Packet TYPE of UDP */
-#define E1000_ADV_TUCMD_L4T_TCP 0x4 /* L4 Packet TYPE of TCP */
-#define E1000_ADV_TUCMD_MKRREQ 0x10 /* Indicates markers are required */
-#define E1000_ADV_DCMD_EOP 0x1 /* End of Packet */
-#define E1000_ADV_DCMD_IFCS 0x2 /* Insert FCS (Ethernet CRC) */
-#define E1000_ADV_DCMD_RS 0x8 /* Report Status */
-#define E1000_ADV_DCMD_VLE 0x40 /* Add VLAN tag */
-#define E1000_ADV_DCMD_TSE 0x80 /* TCP Seg enable */
-/* Extended Device Control */
-#define E1000_CTRL_EXT_NSICR 0x00000001 /* Disable Intr Clear all on read */
-
-struct e1000_adv_context_desc {
- union {
- u32 ip_config;
- struct {
- u32 iplen:9;
- u32 maclen:7;
- u32 vlan_tag:16;
- } fields;
- } ip_setup;
- u32 seq_num;
- union {
- u64 l4_config;
- struct {
- u32 mkrloc:9;
- u32 tucmd:11;
- u32 dtyp:4;
- u32 adv:8;
- u32 rsvd:4;
- u32 idx:4;
- u32 l4len:8;
- u32 mss:16;
- } fields;
- } l4_setup;
-};
-
-/* SRRCTL bit definitions */
-#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
-#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00
-#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
-#define E1000_SRRCTL_DESCTYPE_LEGACY 0x00000000
-#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
-#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
-#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
-#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000
-#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
-#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000
-#define E1000_SRRCTL_TIMESTAMP 0x40000000
-#define E1000_SRRCTL_DROP_EN 0x80000000
-
-#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F
-#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00
-
-#define E1000_TX_HEAD_WB_ENABLE 0x1
-#define E1000_TX_SEQNUM_WB_ENABLE 0x2
-
-#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002
-#define E1000_MRQC_ENABLE_VMDQ 0x00000003
-#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005
-#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
-#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
-#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
-#define E1000_MRQC_ENABLE_RSS_8Q 0x00000002
-
-#define E1000_VMRCTL_MIRROR_PORT_SHIFT 8
-#define E1000_VMRCTL_MIRROR_DSTPORT_MASK (7 << \
- E1000_VMRCTL_MIRROR_PORT_SHIFT)
-#define E1000_VMRCTL_POOL_MIRROR_ENABLE (1 << 0)
-#define E1000_VMRCTL_UPLINK_MIRROR_ENABLE (1 << 1)
-#define E1000_VMRCTL_DOWNLINK_MIRROR_ENABLE (1 << 2)
-
-#define E1000_EICR_TX_QUEUE ( \
- E1000_EICR_TX_QUEUE0 | \
- E1000_EICR_TX_QUEUE1 | \
- E1000_EICR_TX_QUEUE2 | \
- E1000_EICR_TX_QUEUE3)
-
-#define E1000_EICR_RX_QUEUE ( \
- E1000_EICR_RX_QUEUE0 | \
- E1000_EICR_RX_QUEUE1 | \
- E1000_EICR_RX_QUEUE2 | \
- E1000_EICR_RX_QUEUE3)
-
-#define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE
-#define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE
-
-#define EIMS_ENABLE_MASK ( \
- E1000_EIMS_RX_QUEUE | \
- E1000_EIMS_TX_QUEUE | \
- E1000_EIMS_TCP_TIMER | \
- E1000_EIMS_OTHER)
-
-/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
-#define E1000_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */
-#define E1000_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */
-#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
-#define E1000_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */
-#define E1000_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */
-#define E1000_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */
-#define E1000_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */
-#define E1000_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */
-#define E1000_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */
-#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */
-
-/* Receive Descriptor - Advanced */
-union e1000_adv_rx_desc {
- struct {
- __le64 pkt_addr; /* Packet buffer address */
- __le64 hdr_addr; /* Header buffer address */
- } read;
- struct {
- struct {
- union {
- __le32 data;
- struct {
- __le16 pkt_info; /*RSS type, Pkt type*/
- /* Split Header, header buffer len */
- __le16 hdr_info;
- } hs_rss;
- } lo_dword;
- union {
- __le32 rss; /* RSS Hash */
- struct {
- __le16 ip_id; /* IP id */
- __le16 csum; /* Packet Checksum */
- } csum_ip;
- } hi_dword;
- } lower;
- struct {
- __le32 status_error; /* ext status/error */
- __le16 length; /* Packet length */
- __le16 vlan; /* VLAN tag */
- } upper;
- } wb; /* writeback */
-};
-
-#define E1000_RXDADV_RSSTYPE_MASK 0x0000000F
-#define E1000_RXDADV_RSSTYPE_SHIFT 12
-#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
-#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
-#define E1000_RXDADV_SPLITHEADER_EN 0x00001000
-#define E1000_RXDADV_SPH 0x8000
-#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */
-#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
-#define E1000_RXDADV_ERR_HBO 0x00800000
-
-/* RSS Hash results */
-#define E1000_RXDADV_RSSTYPE_NONE 0x00000000
-#define E1000_RXDADV_RSSTYPE_IPV4_TCP 0x00000001
-#define E1000_RXDADV_RSSTYPE_IPV4 0x00000002
-#define E1000_RXDADV_RSSTYPE_IPV6_TCP 0x00000003
-#define E1000_RXDADV_RSSTYPE_IPV6_EX 0x00000004
-#define E1000_RXDADV_RSSTYPE_IPV6 0x00000005
-#define E1000_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
-#define E1000_RXDADV_RSSTYPE_IPV4_UDP 0x00000007
-#define E1000_RXDADV_RSSTYPE_IPV6_UDP 0x00000008
-#define E1000_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
-
-/* RSS Packet Types as indicated in the receive descriptor */
-#define E1000_RXDADV_PKTTYPE_NONE 0x00000000
-#define E1000_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */
-#define E1000_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPV4 hdr + extensions */
-#define E1000_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPV6 hdr present */
-#define E1000_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPV6 hdr + extensions */
-#define E1000_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */
-#define E1000_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
-#define E1000_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
-#define E1000_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
-
-#define E1000_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */
-#define E1000_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */
-#define E1000_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */
-#define E1000_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */
-#define E1000_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */
-#define E1000_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */
-
-/* LinkSec results */
-/* Security Processing bit Indication */
-#define E1000_RXDADV_LNKSEC_STATUS_SECP 0x00020000
-#define E1000_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000
-#define E1000_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000
-#define E1000_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000
-#define E1000_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000
-
-#define E1000_RXDADV_IPSEC_STATUS_SECP 0x00020000
-#define E1000_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000
-#define E1000_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000
-#define E1000_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000
-#define E1000_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED 0x18000000
-
-/* Transmit Descriptor - Advanced */
-union e1000_adv_tx_desc {
- struct {
- __le64 buffer_addr; /* Address of descriptor's data buf */
- __le32 cmd_type_len;
- __le32 olinfo_status;
- } read;
- struct {
- __le64 rsvd; /* Reserved */
- __le32 nxtseq_seed;
- __le32 status;
- } wb;
-};
-
-/* Adv Transmit Descriptor Config Masks */
-#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
-#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
-#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
-#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
-#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */
-#define E1000_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */
-#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
-#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
-#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
-#define E1000_ADVTXD_MAC_LINKSEC 0x00040000 /* Apply LinkSec on pkt */
-#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp pkt */
-#define E1000_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED prsnt in WB */
-#define E1000_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
-#define E1000_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
-#define E1000_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
-#define E1000_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
-/* 1st & Last TSO-full iSCSI PDU*/
-#define E1000_ADVTXD_POPTS_ISCO_FULL 0x00001800
-#define E1000_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */
-#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
-
-/* Context descriptors */
-struct e1000_adv_tx_context_desc {
- __le32 vlan_macip_lens;
- __le32 seqnum_seed;
- __le32 type_tucmd_mlhl;
- __le32 mss_l4len_idx;
-};
-
-#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
-#define E1000_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
-#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
-#define E1000_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
-#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
-#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
-#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
-#define E1000_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
-/* IPSec Encrypt Enable for ESP */
-#define E1000_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000
-/* Req requires Markers and CRC */
-#define E1000_ADVTXD_TUCMD_MKRREQ 0x00002000
-#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
-#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
-/* Adv ctxt IPSec SA IDX mask */
-#define E1000_ADVTXD_IPSEC_SA_INDEX_MASK 0x000000FF
-/* Adv ctxt IPSec ESP len mask */
-#define E1000_ADVTXD_IPSEC_ESP_LEN_MASK 0x000000FF
-
-/* Additional Transmit Descriptor Control definitions */
-#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */
-#define E1000_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wbk flushing */
-/* Tx Queue Arbitration Priority 0=low, 1=high */
-#define E1000_TXDCTL_PRIORITY 0x08000000
-
-/* Additional Receive Descriptor Control definitions */
-#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */
-#define E1000_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. wbk flushing */
-
-/* Direct Cache Access (DCA) definitions */
-#define E1000_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */
-#define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */
-
-#define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */
-#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
-
-#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
-#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
-#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header ena */
-#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload ena */
-#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx Desc Relax Order */
-
-#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
-#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
-#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
-#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
-#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
-
-#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
-#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */
-#define E1000_DCA_TXCTRL_CPUID_SHIFT_82576 24 /* Tx CPUID */
-#define E1000_DCA_RXCTRL_CPUID_SHIFT_82576 24 /* Rx CPUID */
-
-/* Additional interrupt register bit definitions */
-#define E1000_ICR_LSECPNS 0x00000020 /* PN threshold - server */
-#define E1000_IMS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */
-#define E1000_ICS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */
-
-/* ETQF register bit definitions */
-#define E1000_ETQF_FILTER_ENABLE (1 << 26)
-#define E1000_ETQF_IMM_INT (1 << 29)
-#define E1000_ETQF_1588 (1 << 30)
-#define E1000_ETQF_QUEUE_ENABLE (1 << 31)
-/*
- * ETQF filter list: one static filter per filter consumer. This is
- * to avoid filter collisions later. Add new filters
- * here!!
- *
- * Current filters:
- * EAPOL 802.1x (0x888e): Filter 0
- */
-#define E1000_ETQF_FILTER_EAPOL 0
-
-#define E1000_FTQF_VF_BP 0x00008000
-#define E1000_FTQF_1588_TIME_STAMP 0x08000000
-#define E1000_FTQF_MASK 0xF0000000
-#define E1000_FTQF_MASK_PROTO_BP 0x10000000
-#define E1000_FTQF_MASK_SOURCE_ADDR_BP 0x20000000
-#define E1000_FTQF_MASK_DEST_ADDR_BP 0x40000000
-#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000
-
-#define E1000_NVM_APME_82575 0x0400
-#define MAX_NUM_VFS 7
-
-#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof cntrl */
-#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof cntrl */
-#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */
-#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
-#define E1000_DTXSWC_LLE_SHIFT 16
-#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */
-
-/* Easy defines for setting default pool, would normally be left a zero */
-#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
-#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT)
-
-/* Other useful VMD_CTL register defines */
-#define E1000_VT_CTL_IGNORE_MAC (1 << 28)
-#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29)
-#define E1000_VT_CTL_VM_REPL_EN (1 << 30)
-
-/* Per VM Offload register setup */
-#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
-#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */
-#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */
-#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */
-#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */
-#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */
-#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */
-#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */
-#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
-#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */
-
-#define E1000_VMOLR_VPE 0x00800000 /* VLAN promiscuous enable */
-#define E1000_VMOLR_UPE 0x20000000 /* Unicast promisuous enable */
-#define E1000_DVMOLR_HIDVLAN 0x20000000 /* Vlan hiding enable */
-#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
-#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */
-
-#define E1000_PBRWAC_WALPB 0x00000007 /* Wrap around event on LAN Rx PB */
-#define E1000_PBRWAC_PBE 0x00000008 /* Rx packet buffer empty */
-
-#define E1000_VLVF_ARRAY_SIZE 32
-#define E1000_VLVF_VLANID_MASK 0x00000FFF
-#define E1000_VLVF_POOLSEL_SHIFT 12
-#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT)
-#define E1000_VLVF_LVLAN 0x00100000
-#define E1000_VLVF_VLANID_ENABLE 0x80000000
-
-#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
-#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
-
-#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
-
-#define E1000_IOVCTL 0x05BBC
-#define E1000_IOVCTL_REUSE_VFQ 0x00000001
-
-#define E1000_RPLOLR_STRVLAN 0x40000000
-#define E1000_RPLOLR_STRCRC 0x80000000
-
-#define E1000_TCTL_EXT_COLD 0x000FFC00
-#define E1000_TCTL_EXT_COLD_SHIFT 10
-
-#define E1000_DTXCTL_8023LL 0x0004
-#define E1000_DTXCTL_VLAN_ADDED 0x0008
-#define E1000_DTXCTL_OOS_ENABLE 0x0010
-#define E1000_DTXCTL_MDP_EN 0x0020
-#define E1000_DTXCTL_SPOOF_INT 0x0040
-
-#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14)
-
-#define ALL_QUEUES 0xFFFF
-
-/* Rx packet buffer size defines */
-#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F
-void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable);
-void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf);
-void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable);
-s32 e1000_init_nvm_params_82575(struct e1000_hw *hw);
-
-u16 e1000_rxpbs_adjust_82580(u32 data);
-s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data);
-s32 e1000_set_eee_i350(struct e1000_hw *);
-s32 e1000_set_eee_i354(struct e1000_hw *);
-s32 e1000_get_eee_status_i354(struct e1000_hw *, bool *);
-#define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8
-#define E1000_EMC_INTERNAL_DATA 0x00
-#define E1000_EMC_INTERNAL_THERM_LIMIT 0x20
-#define E1000_EMC_DIODE1_DATA 0x01
-#define E1000_EMC_DIODE1_THERM_LIMIT 0x19
-#define E1000_EMC_DIODE2_DATA 0x23
-#define E1000_EMC_DIODE2_THERM_LIMIT 0x1A
-#define E1000_EMC_DIODE3_DATA 0x2A
-#define E1000_EMC_DIODE3_THERM_LIMIT 0x30
-
-s32 e1000_get_thermal_sensor_data_generic(struct e1000_hw *hw);
-s32 e1000_init_thermal_sensor_thresh_generic(struct e1000_hw *hw);
-
-/* I2C SDA and SCL timing parameters for standard mode */
-#define E1000_I2C_T_HD_STA 4
-#define E1000_I2C_T_LOW 5
-#define E1000_I2C_T_HIGH 4
-#define E1000_I2C_T_SU_STA 5
-#define E1000_I2C_T_HD_DATA 5
-#define E1000_I2C_T_SU_DATA 1
-#define E1000_I2C_T_RISE 1
-#define E1000_I2C_T_FALL 1
-#define E1000_I2C_T_SU_STO 4
-#define E1000_I2C_T_BUF 5
-
-s32 e1000_set_i2c_bb(struct e1000_hw *hw);
-s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 *data);
-s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 data);
-void e1000_i2c_bus_clear(struct e1000_hw *hw);
-#endif /* _E1000_82575_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_api.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_api.c
deleted file mode 100755
index b1d748fe..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_api.c
+++ /dev/null
@@ -1,1160 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#include "e1000_api.h"
-
-/**
- * e1000_init_mac_params - Initialize MAC function pointers
- * @hw: pointer to the HW structure
- *
- * This function initializes the function pointers for the MAC
- * set of functions. Called by drivers or by e1000_setup_init_funcs.
- **/
-s32 e1000_init_mac_params(struct e1000_hw *hw)
-{
- s32 ret_val = E1000_SUCCESS;
-
- if (hw->mac.ops.init_params) {
- ret_val = hw->mac.ops.init_params(hw);
- if (ret_val) {
- DEBUGOUT("MAC Initialization Error\n");
- goto out;
- }
- } else {
- DEBUGOUT("mac.init_mac_params was NULL\n");
- ret_val = -E1000_ERR_CONFIG;
- }
-
-out:
- return ret_val;
-}
-
-/**
- * e1000_init_nvm_params - Initialize NVM function pointers
- * @hw: pointer to the HW structure
- *
- * This function initializes the function pointers for the NVM
- * set of functions. Called by drivers or by e1000_setup_init_funcs.
- **/
-s32 e1000_init_nvm_params(struct e1000_hw *hw)
-{
- s32 ret_val = E1000_SUCCESS;
-
- if (hw->nvm.ops.init_params) {
- ret_val = hw->nvm.ops.init_params(hw);
- if (ret_val) {
- DEBUGOUT("NVM Initialization Error\n");
- goto out;
- }
- } else {
- DEBUGOUT("nvm.init_nvm_params was NULL\n");
- ret_val = -E1000_ERR_CONFIG;
- }
-
-out:
- return ret_val;
-}
-
-/**
- * e1000_init_phy_params - Initialize PHY function pointers
- * @hw: pointer to the HW structure
- *
- * This function initializes the function pointers for the PHY
- * set of functions. Called by drivers or by e1000_setup_init_funcs.
- **/
-s32 e1000_init_phy_params(struct e1000_hw *hw)
-{
- s32 ret_val = E1000_SUCCESS;
-
- if (hw->phy.ops.init_params) {
- ret_val = hw->phy.ops.init_params(hw);
- if (ret_val) {
- DEBUGOUT("PHY Initialization Error\n");
- goto out;
- }
- } else {
- DEBUGOUT("phy.init_phy_params was NULL\n");
- ret_val = -E1000_ERR_CONFIG;
- }
-
-out:
- return ret_val;
-}
-
-/**
- * e1000_init_mbx_params - Initialize mailbox function pointers
- * @hw: pointer to the HW structure
- *
- * This function initializes the function pointers for the PHY
- * set of functions. Called by drivers or by e1000_setup_init_funcs.
- **/
-s32 e1000_init_mbx_params(struct e1000_hw *hw)
-{
- s32 ret_val = E1000_SUCCESS;
-
- if (hw->mbx.ops.init_params) {
- ret_val = hw->mbx.ops.init_params(hw);
- if (ret_val) {
- DEBUGOUT("Mailbox Initialization Error\n");
- goto out;
- }
- } else {
- DEBUGOUT("mbx.init_mbx_params was NULL\n");
- ret_val = -E1000_ERR_CONFIG;
- }
-
-out:
- return ret_val;
-}
-
-/**
- * e1000_set_mac_type - Sets MAC type
- * @hw: pointer to the HW structure
- *
- * This function sets the mac type of the adapter based on the
- * device ID stored in the hw structure.
- * MUST BE FIRST FUNCTION CALLED (explicitly or through
- * e1000_setup_init_funcs()).
- **/
-s32 e1000_set_mac_type(struct e1000_hw *hw)
-{
- struct e1000_mac_info *mac = &hw->mac;
- s32 ret_val = E1000_SUCCESS;
-
- DEBUGFUNC("e1000_set_mac_type");
-
- switch (hw->device_id) {
- case E1000_DEV_ID_82575EB_COPPER:
- case E1000_DEV_ID_82575EB_FIBER_SERDES:
- case E1000_DEV_ID_82575GB_QUAD_COPPER:
- mac->type = e1000_82575;
- break;
- case E1000_DEV_ID_82576:
- case E1000_DEV_ID_82576_FIBER:
- case E1000_DEV_ID_82576_SERDES:
- case E1000_DEV_ID_82576_QUAD_COPPER:
- case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
- case E1000_DEV_ID_82576_NS:
- case E1000_DEV_ID_82576_NS_SERDES:
- case E1000_DEV_ID_82576_SERDES_QUAD:
- mac->type = e1000_82576;
- break;
- case E1000_DEV_ID_82580_COPPER:
- case E1000_DEV_ID_82580_FIBER:
- case E1000_DEV_ID_82580_SERDES:
- case E1000_DEV_ID_82580_SGMII:
- case E1000_DEV_ID_82580_COPPER_DUAL:
- case E1000_DEV_ID_82580_QUAD_FIBER:
- case E1000_DEV_ID_DH89XXCC_SGMII:
- case E1000_DEV_ID_DH89XXCC_SERDES:
- case E1000_DEV_ID_DH89XXCC_BACKPLANE:
- case E1000_DEV_ID_DH89XXCC_SFP:
- mac->type = e1000_82580;
- break;
- case E1000_DEV_ID_I350_COPPER:
- case E1000_DEV_ID_I350_FIBER:
- case E1000_DEV_ID_I350_SERDES:
- case E1000_DEV_ID_I350_SGMII:
- case E1000_DEV_ID_I350_DA4:
- mac->type = e1000_i350;
- break;
- case E1000_DEV_ID_I210_COPPER_FLASHLESS:
- case E1000_DEV_ID_I210_SERDES_FLASHLESS:
- case E1000_DEV_ID_I210_COPPER:
- case E1000_DEV_ID_I210_COPPER_OEM1:
- case E1000_DEV_ID_I210_COPPER_IT:
- case E1000_DEV_ID_I210_FIBER:
- case E1000_DEV_ID_I210_SERDES:
- case E1000_DEV_ID_I210_SGMII:
- mac->type = e1000_i210;
- break;
- case E1000_DEV_ID_I211_COPPER:
- mac->type = e1000_i211;
- break;
-
- case E1000_DEV_ID_I354_BACKPLANE_1GBPS:
- case E1000_DEV_ID_I354_SGMII:
- case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS:
- mac->type = e1000_i354;
- break;
- default:
- /* Should never have loaded on this device */
- ret_val = -E1000_ERR_MAC_INIT;
- break;
- }
-
- return ret_val;
-}
-
-/**
- * e1000_setup_init_funcs - Initializes function pointers
- * @hw: pointer to the HW structure
- * @init_device: true will initialize the rest of the function pointers
- * getting the device ready for use. false will only set
- * MAC type and the function pointers for the other init
- * functions. Passing false will not generate any hardware
- * reads or writes.
- *
- * This function must be called by a driver in order to use the rest
- * of the 'shared' code files. Called by drivers only.
- **/
-s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device)
-{
- s32 ret_val;
-
- /* Can't do much good without knowing the MAC type. */
- ret_val = e1000_set_mac_type(hw);
- if (ret_val) {
- DEBUGOUT("ERROR: MAC type could not be set properly.\n");
- goto out;
- }
-
- if (!hw->hw_addr) {
- DEBUGOUT("ERROR: Registers not mapped\n");
- ret_val = -E1000_ERR_CONFIG;
- goto out;
- }
-
- /*
- * Init function pointers to generic implementations. We do this first
- * allowing a driver module to override it afterward.
- */
- e1000_init_mac_ops_generic(hw);
- e1000_init_phy_ops_generic(hw);
- e1000_init_nvm_ops_generic(hw);
- e1000_init_mbx_ops_generic(hw);
-
- /*
- * Set up the init function pointers. These are functions within the
- * adapter family file that sets up function pointers for the rest of
- * the functions in that family.
- */
- switch (hw->mac.type) {
- case e1000_82575:
- case e1000_82576:
- case e1000_82580:
- case e1000_i350:
- case e1000_i354:
- e1000_init_function_pointers_82575(hw);
- break;
- case e1000_i210:
- case e1000_i211:
- e1000_init_function_pointers_i210(hw);
- break;
- default:
- DEBUGOUT("Hardware not supported\n");
- ret_val = -E1000_ERR_CONFIG;
- break;
- }
-
- /*
- * Initialize the rest of the function pointers. These require some
- * register reads/writes in some cases.
- */
- if (!(ret_val) && init_device) {
- ret_val = e1000_init_mac_params(hw);
- if (ret_val)
- goto out;
-
- ret_val = e1000_init_nvm_params(hw);
- if (ret_val)
- goto out;
-
- ret_val = e1000_init_phy_params(hw);
- if (ret_val)
- goto out;
-
- ret_val = e1000_init_mbx_params(hw);
- if (ret_val)
- goto out;
- }
-
-out:
- return ret_val;
-}
-
-/**
- * e1000_get_bus_info - Obtain bus information for adapter
- * @hw: pointer to the HW structure
- *
- * This will obtain information about the HW bus for which the
- * adapter is attached and stores it in the hw structure. This is a
- * function pointer entry point called by drivers.
- **/
-s32 e1000_get_bus_info(struct e1000_hw *hw)
-{
- if (hw->mac.ops.get_bus_info)
- return hw->mac.ops.get_bus_info(hw);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_clear_vfta - Clear VLAN filter table
- * @hw: pointer to the HW structure
- *
- * This clears the VLAN filter table on the adapter. This is a function
- * pointer entry point called by drivers.
- **/
-void e1000_clear_vfta(struct e1000_hw *hw)
-{
- if (hw->mac.ops.clear_vfta)
- hw->mac.ops.clear_vfta(hw);
-}
-
-/**
- * e1000_write_vfta - Write value to VLAN filter table
- * @hw: pointer to the HW structure
- * @offset: the 32-bit offset in which to write the value to.
- * @value: the 32-bit value to write at location offset.
- *
- * This writes a 32-bit value to a 32-bit offset in the VLAN filter
- * table. This is a function pointer entry point called by drivers.
- **/
-void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
-{
- if (hw->mac.ops.write_vfta)
- hw->mac.ops.write_vfta(hw, offset, value);
-}
-
-/**
- * e1000_update_mc_addr_list - Update Multicast addresses
- * @hw: pointer to the HW structure
- * @mc_addr_list: array of multicast addresses to program
- * @mc_addr_count: number of multicast addresses to program
- *
- * Updates the Multicast Table Array.
- * The caller must have a packed mc_addr_list of multicast addresses.
- **/
-void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count)
-{
- if (hw->mac.ops.update_mc_addr_list)
- hw->mac.ops.update_mc_addr_list(hw, mc_addr_list,
- mc_addr_count);
-}
-
-/**
- * e1000_force_mac_fc - Force MAC flow control
- * @hw: pointer to the HW structure
- *
- * Force the MAC's flow control settings. Currently no func pointer exists
- * and all implementations are handled in the generic version of this
- * function.
- **/
-s32 e1000_force_mac_fc(struct e1000_hw *hw)
-{
- return e1000_force_mac_fc_generic(hw);
-}
-
-/**
- * e1000_check_for_link - Check/Store link connection
- * @hw: pointer to the HW structure
- *
- * This checks the link condition of the adapter and stores the
- * results in the hw->mac structure. This is a function pointer entry
- * point called by drivers.
- **/
-s32 e1000_check_for_link(struct e1000_hw *hw)
-{
- if (hw->mac.ops.check_for_link)
- return hw->mac.ops.check_for_link(hw);
-
- return -E1000_ERR_CONFIG;
-}
-
-/**
- * e1000_check_mng_mode - Check management mode
- * @hw: pointer to the HW structure
- *
- * This checks if the adapter has manageability enabled.
- * This is a function pointer entry point called by drivers.
- **/
-bool e1000_check_mng_mode(struct e1000_hw *hw)
-{
- if (hw->mac.ops.check_mng_mode)
- return hw->mac.ops.check_mng_mode(hw);
-
- return false;
-}
-
-/**
- * e1000_mng_write_dhcp_info - Writes DHCP info to host interface
- * @hw: pointer to the HW structure
- * @buffer: pointer to the host interface
- * @length: size of the buffer
- *
- * Writes the DHCP information to the host interface.
- **/
-s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
-{
- return e1000_mng_write_dhcp_info_generic(hw, buffer, length);
-}
-
-/**
- * e1000_reset_hw - Reset hardware
- * @hw: pointer to the HW structure
- *
- * This resets the hardware into a known state. This is a function pointer
- * entry point called by drivers.
- **/
-s32 e1000_reset_hw(struct e1000_hw *hw)
-{
- if (hw->mac.ops.reset_hw)
- return hw->mac.ops.reset_hw(hw);
-
- return -E1000_ERR_CONFIG;
-}
-
-/**
- * e1000_init_hw - Initialize hardware
- * @hw: pointer to the HW structure
- *
- * This inits the hardware readying it for operation. This is a function
- * pointer entry point called by drivers.
- **/
-s32 e1000_init_hw(struct e1000_hw *hw)
-{
- if (hw->mac.ops.init_hw)
- return hw->mac.ops.init_hw(hw);
-
- return -E1000_ERR_CONFIG;
-}
-
-/**
- * e1000_setup_link - Configures link and flow control
- * @hw: pointer to the HW structure
- *
- * This configures link and flow control settings for the adapter. This
- * is a function pointer entry point called by drivers. While modules can
- * also call this, they probably call their own version of this function.
- **/
-s32 e1000_setup_link(struct e1000_hw *hw)
-{
- if (hw->mac.ops.setup_link)
- return hw->mac.ops.setup_link(hw);
-
- return -E1000_ERR_CONFIG;
-}
-
-/**
- * e1000_get_speed_and_duplex - Returns current speed and duplex
- * @hw: pointer to the HW structure
- * @speed: pointer to a 16-bit value to store the speed
- * @duplex: pointer to a 16-bit value to store the duplex.
- *
- * This returns the speed and duplex of the adapter in the two 'out'
- * variables passed in. This is a function pointer entry point called
- * by drivers.
- **/
-s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
-{
- if (hw->mac.ops.get_link_up_info)
- return hw->mac.ops.get_link_up_info(hw, speed, duplex);
-
- return -E1000_ERR_CONFIG;
-}
-
-/**
- * e1000_setup_led - Configures SW controllable LED
- * @hw: pointer to the HW structure
- *
- * This prepares the SW controllable LED for use and saves the current state
- * of the LED so it can be later restored. This is a function pointer entry
- * point called by drivers.
- **/
-s32 e1000_setup_led(struct e1000_hw *hw)
-{
- if (hw->mac.ops.setup_led)
- return hw->mac.ops.setup_led(hw);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_cleanup_led - Restores SW controllable LED
- * @hw: pointer to the HW structure
- *
- * This restores the SW controllable LED to the value saved off by
- * e1000_setup_led. This is a function pointer entry point called by drivers.
- **/
-s32 e1000_cleanup_led(struct e1000_hw *hw)
-{
- if (hw->mac.ops.cleanup_led)
- return hw->mac.ops.cleanup_led(hw);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_blink_led - Blink SW controllable LED
- * @hw: pointer to the HW structure
- *
- * This starts the adapter LED blinking. Request the LED to be setup first
- * and cleaned up after. This is a function pointer entry point called by
- * drivers.
- **/
-s32 e1000_blink_led(struct e1000_hw *hw)
-{
- if (hw->mac.ops.blink_led)
- return hw->mac.ops.blink_led(hw);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_id_led_init - store LED configurations in SW
- * @hw: pointer to the HW structure
- *
- * Initializes the LED config in SW. This is a function pointer entry point
- * called by drivers.
- **/
-s32 e1000_id_led_init(struct e1000_hw *hw)
-{
- if (hw->mac.ops.id_led_init)
- return hw->mac.ops.id_led_init(hw);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_led_on - Turn on SW controllable LED
- * @hw: pointer to the HW structure
- *
- * Turns the SW defined LED on. This is a function pointer entry point
- * called by drivers.
- **/
-s32 e1000_led_on(struct e1000_hw *hw)
-{
- if (hw->mac.ops.led_on)
- return hw->mac.ops.led_on(hw);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_led_off - Turn off SW controllable LED
- * @hw: pointer to the HW structure
- *
- * Turns the SW defined LED off. This is a function pointer entry point
- * called by drivers.
- **/
-s32 e1000_led_off(struct e1000_hw *hw)
-{
- if (hw->mac.ops.led_off)
- return hw->mac.ops.led_off(hw);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_reset_adaptive - Reset adaptive IFS
- * @hw: pointer to the HW structure
- *
- * Resets the adaptive IFS. Currently no func pointer exists and all
- * implementations are handled in the generic version of this function.
- **/
-void e1000_reset_adaptive(struct e1000_hw *hw)
-{
- e1000_reset_adaptive_generic(hw);
-}
-
-/**
- * e1000_update_adaptive - Update adaptive IFS
- * @hw: pointer to the HW structure
- *
- * Updates adapter IFS. Currently no func pointer exists and all
- * implementations are handled in the generic version of this function.
- **/
-void e1000_update_adaptive(struct e1000_hw *hw)
-{
- e1000_update_adaptive_generic(hw);
-}
-
-/**
- * e1000_disable_pcie_master - Disable PCI-Express master access
- * @hw: pointer to the HW structure
- *
- * Disables PCI-Express master access and verifies there are no pending
- * requests. Currently no func pointer exists and all implementations are
- * handled in the generic version of this function.
- **/
-s32 e1000_disable_pcie_master(struct e1000_hw *hw)
-{
- return e1000_disable_pcie_master_generic(hw);
-}
-
-/**
- * e1000_config_collision_dist - Configure collision distance
- * @hw: pointer to the HW structure
- *
- * Configures the collision distance to the default value and is used
- * during link setup.
- **/
-void e1000_config_collision_dist(struct e1000_hw *hw)
-{
- if (hw->mac.ops.config_collision_dist)
- hw->mac.ops.config_collision_dist(hw);
-}
-
-/**
- * e1000_rar_set - Sets a receive address register
- * @hw: pointer to the HW structure
- * @addr: address to set the RAR to
- * @index: the RAR to set
- *
- * Sets a Receive Address Register (RAR) to the specified address.
- **/
-void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
-{
- if (hw->mac.ops.rar_set)
- hw->mac.ops.rar_set(hw, addr, index);
-}
-
-/**
- * e1000_validate_mdi_setting - Ensures valid MDI/MDIX SW state
- * @hw: pointer to the HW structure
- *
- * Ensures that the MDI/MDIX SW state is valid.
- **/
-s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
-{
- if (hw->mac.ops.validate_mdi_setting)
- return hw->mac.ops.validate_mdi_setting(hw);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_hash_mc_addr - Determines address location in multicast table
- * @hw: pointer to the HW structure
- * @mc_addr: Multicast address to hash.
- *
- * This hashes an address to determine its location in the multicast
- * table. Currently no func pointer exists and all implementations
- * are handled in the generic version of this function.
- **/
-u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
-{
- return e1000_hash_mc_addr_generic(hw, mc_addr);
-}
-
-/**
- * e1000_enable_tx_pkt_filtering - Enable packet filtering on TX
- * @hw: pointer to the HW structure
- *
- * Enables packet filtering on transmit packets if manageability is enabled
- * and host interface is enabled.
- * Currently no func pointer exists and all implementations are handled in the
- * generic version of this function.
- **/
-bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
-{
- return e1000_enable_tx_pkt_filtering_generic(hw);
-}
-
-/**
- * e1000_mng_host_if_write - Writes to the manageability host interface
- * @hw: pointer to the HW structure
- * @buffer: pointer to the host interface buffer
- * @length: size of the buffer
- * @offset: location in the buffer to write to
- * @sum: sum of the data (not checksum)
- *
- * This function writes the buffer content at the offset given on the host if.
- * It also does alignment considerations to do the writes in most efficient
- * way. Also fills up the sum of the buffer in *buffer parameter.
- **/
-s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
- u16 offset, u8 *sum)
-{
- return e1000_mng_host_if_write_generic(hw, buffer, length, offset, sum);
-}
-
-/**
- * e1000_mng_write_cmd_header - Writes manageability command header
- * @hw: pointer to the HW structure
- * @hdr: pointer to the host interface command header
- *
- * Writes the command header after does the checksum calculation.
- **/
-s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
- struct e1000_host_mng_command_header *hdr)
-{
- return e1000_mng_write_cmd_header_generic(hw, hdr);
-}
-
-/**
- * e1000_mng_enable_host_if - Checks host interface is enabled
- * @hw: pointer to the HW structure
- *
- * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
- *
- * This function checks whether the HOST IF is enabled for command operation
- * and also checks whether the previous command is completed. It busy waits
- * in case of previous command is not completed.
- **/
-s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
-{
- return e1000_mng_enable_host_if_generic(hw);
-}
-
-/**
- * e1000_check_reset_block - Verifies PHY can be reset
- * @hw: pointer to the HW structure
- *
- * Checks if the PHY is in a state that can be reset or if manageability
- * has it tied up. This is a function pointer entry point called by drivers.
- **/
-s32 e1000_check_reset_block(struct e1000_hw *hw)
-{
- if (hw->phy.ops.check_reset_block)
- return hw->phy.ops.check_reset_block(hw);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_read_phy_reg - Reads PHY register
- * @hw: pointer to the HW structure
- * @offset: the register to read
- * @data: the buffer to store the 16-bit read.
- *
- * Reads the PHY register and returns the value in data.
- * This is a function pointer entry point called by drivers.
- **/
-s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
-{
- if (hw->phy.ops.read_reg)
- return hw->phy.ops.read_reg(hw, offset, data);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_write_phy_reg - Writes PHY register
- * @hw: pointer to the HW structure
- * @offset: the register to write
- * @data: the value to write.
- *
- * Writes the PHY register at offset with the value in data.
- * This is a function pointer entry point called by drivers.
- **/
-s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
-{
- if (hw->phy.ops.write_reg)
- return hw->phy.ops.write_reg(hw, offset, data);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_release_phy - Generic release PHY
- * @hw: pointer to the HW structure
- *
- * Return if silicon family does not require a semaphore when accessing the
- * PHY.
- **/
-void e1000_release_phy(struct e1000_hw *hw)
-{
- if (hw->phy.ops.release)
- hw->phy.ops.release(hw);
-}
-
-/**
- * e1000_acquire_phy - Generic acquire PHY
- * @hw: pointer to the HW structure
- *
- * Return success if silicon family does not require a semaphore when
- * accessing the PHY.
- **/
-s32 e1000_acquire_phy(struct e1000_hw *hw)
-{
- if (hw->phy.ops.acquire)
- return hw->phy.ops.acquire(hw);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_read_kmrn_reg - Reads register using Kumeran interface
- * @hw: pointer to the HW structure
- * @offset: the register to read
- * @data: the location to store the 16-bit value read.
- *
- * Reads a register out of the Kumeran interface. Currently no func pointer
- * exists and all implementations are handled in the generic version of
- * this function.
- **/
-s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
-{
- return e1000_read_kmrn_reg_generic(hw, offset, data);
-}
-
-/**
- * e1000_write_kmrn_reg - Writes register using Kumeran interface
- * @hw: pointer to the HW structure
- * @offset: the register to write
- * @data: the value to write.
- *
- * Writes a register to the Kumeran interface. Currently no func pointer
- * exists and all implementations are handled in the generic version of
- * this function.
- **/
-s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
-{
- return e1000_write_kmrn_reg_generic(hw, offset, data);
-}
-
-/**
- * e1000_get_cable_length - Retrieves cable length estimation
- * @hw: pointer to the HW structure
- *
- * This function estimates the cable length and stores them in
- * hw->phy.min_length and hw->phy.max_length. This is a function pointer
- * entry point called by drivers.
- **/
-s32 e1000_get_cable_length(struct e1000_hw *hw)
-{
- if (hw->phy.ops.get_cable_length)
- return hw->phy.ops.get_cable_length(hw);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_get_phy_info - Retrieves PHY information from registers
- * @hw: pointer to the HW structure
- *
- * This function gets some information from various PHY registers and
- * populates hw->phy values with it. This is a function pointer entry
- * point called by drivers.
- **/
-s32 e1000_get_phy_info(struct e1000_hw *hw)
-{
- if (hw->phy.ops.get_info)
- return hw->phy.ops.get_info(hw);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_phy_hw_reset - Hard PHY reset
- * @hw: pointer to the HW structure
- *
- * Performs a hard PHY reset. This is a function pointer entry point called
- * by drivers.
- **/
-s32 e1000_phy_hw_reset(struct e1000_hw *hw)
-{
- if (hw->phy.ops.reset)
- return hw->phy.ops.reset(hw);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_phy_commit - Soft PHY reset
- * @hw: pointer to the HW structure
- *
- * Performs a soft PHY reset on those that apply. This is a function pointer
- * entry point called by drivers.
- **/
-s32 e1000_phy_commit(struct e1000_hw *hw)
-{
- if (hw->phy.ops.commit)
- return hw->phy.ops.commit(hw);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_set_d0_lplu_state - Sets low power link up state for D0
- * @hw: pointer to the HW structure
- * @active: boolean used to enable/disable lplu
- *
- * Success returns 0, Failure returns 1
- *
- * The low power link up (lplu) state is set to the power management level D0
- * and SmartSpeed is disabled when active is true, else clear lplu for D0
- * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
- * is used during Dx states where the power conservation is most important.
- * During driver activity, SmartSpeed should be enabled so performance is
- * maintained. This is a function pointer entry point called by drivers.
- **/
-s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
-{
- if (hw->phy.ops.set_d0_lplu_state)
- return hw->phy.ops.set_d0_lplu_state(hw, active);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_set_d3_lplu_state - Sets low power link up state for D3
- * @hw: pointer to the HW structure
- * @active: boolean used to enable/disable lplu
- *
- * Success returns 0, Failure returns 1
- *
- * The low power link up (lplu) state is set to the power management level D3
- * and SmartSpeed is disabled when active is true, else clear lplu for D3
- * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
- * is used during Dx states where the power conservation is most important.
- * During driver activity, SmartSpeed should be enabled so performance is
- * maintained. This is a function pointer entry point called by drivers.
- **/
-s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
-{
- if (hw->phy.ops.set_d3_lplu_state)
- return hw->phy.ops.set_d3_lplu_state(hw, active);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_read_mac_addr - Reads MAC address
- * @hw: pointer to the HW structure
- *
- * Reads the MAC address out of the adapter and stores it in the HW structure.
- * Currently no func pointer exists and all implementations are handled in the
- * generic version of this function.
- **/
-s32 e1000_read_mac_addr(struct e1000_hw *hw)
-{
- if (hw->mac.ops.read_mac_addr)
- return hw->mac.ops.read_mac_addr(hw);
-
- return e1000_read_mac_addr_generic(hw);
-}
-
-/**
- * e1000_read_pba_string - Read device part number string
- * @hw: pointer to the HW structure
- * @pba_num: pointer to device part number
- * @pba_num_size: size of part number buffer
- *
- * Reads the product board assembly (PBA) number from the EEPROM and stores
- * the value in pba_num.
- * Currently no func pointer exists and all implementations are handled in the
- * generic version of this function.
- **/
-s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size)
-{
- return e1000_read_pba_string_generic(hw, pba_num, pba_num_size);
-}
-
-/**
- * e1000_read_pba_length - Read device part number string length
- * @hw: pointer to the HW structure
- * @pba_num_size: size of part number buffer
- *
- * Reads the product board assembly (PBA) number length from the EEPROM and
- * stores the value in pba_num.
- * Currently no func pointer exists and all implementations are handled in the
- * generic version of this function.
- **/
-s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size)
-{
- return e1000_read_pba_length_generic(hw, pba_num_size);
-}
-
-/**
- * e1000_validate_nvm_checksum - Verifies NVM (EEPROM) checksum
- * @hw: pointer to the HW structure
- *
- * Validates the NVM checksum is correct. This is a function pointer entry
- * point called by drivers.
- **/
-s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
-{
- if (hw->nvm.ops.validate)
- return hw->nvm.ops.validate(hw);
-
- return -E1000_ERR_CONFIG;
-}
-
-/**
- * e1000_update_nvm_checksum - Updates NVM (EEPROM) checksum
- * @hw: pointer to the HW structure
- *
- * Updates the NVM checksum. Currently no func pointer exists and all
- * implementations are handled in the generic version of this function.
- **/
-s32 e1000_update_nvm_checksum(struct e1000_hw *hw)
-{
- if (hw->nvm.ops.update)
- return hw->nvm.ops.update(hw);
-
- return -E1000_ERR_CONFIG;
-}
-
-/**
- * e1000_reload_nvm - Reloads EEPROM
- * @hw: pointer to the HW structure
- *
- * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
- * extended control register.
- **/
-void e1000_reload_nvm(struct e1000_hw *hw)
-{
- if (hw->nvm.ops.reload)
- hw->nvm.ops.reload(hw);
-}
-
-/**
- * e1000_read_nvm - Reads NVM (EEPROM)
- * @hw: pointer to the HW structure
- * @offset: the word offset to read
- * @words: number of 16-bit words to read
- * @data: pointer to the properly sized buffer for the data.
- *
- * Reads 16-bit chunks of data from the NVM (EEPROM). This is a function
- * pointer entry point called by drivers.
- **/
-s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
-{
- if (hw->nvm.ops.read)
- return hw->nvm.ops.read(hw, offset, words, data);
-
- return -E1000_ERR_CONFIG;
-}
-
-/**
- * e1000_write_nvm - Writes to NVM (EEPROM)
- * @hw: pointer to the HW structure
- * @offset: the word offset to read
- * @words: number of 16-bit words to write
- * @data: pointer to the properly sized buffer for the data.
- *
- * Writes 16-bit chunks of data to the NVM (EEPROM). This is a function
- * pointer entry point called by drivers.
- **/
-s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
-{
- if (hw->nvm.ops.write)
- return hw->nvm.ops.write(hw, offset, words, data);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_write_8bit_ctrl_reg - Writes 8bit Control register
- * @hw: pointer to the HW structure
- * @reg: 32bit register offset
- * @offset: the register to write
- * @data: the value to write.
- *
- * Writes the PHY register at offset with the value in data.
- * This is a function pointer entry point called by drivers.
- **/
-s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset,
- u8 data)
-{
- return e1000_write_8bit_ctrl_reg_generic(hw, reg, offset, data);
-}
-
-/**
- * e1000_power_up_phy - Restores link in case of PHY power down
- * @hw: pointer to the HW structure
- *
- * The phy may be powered down to save power, to turn off link when the
- * driver is unloaded, or wake on lan is not enabled (among others).
- **/
-void e1000_power_up_phy(struct e1000_hw *hw)
-{
- if (hw->phy.ops.power_up)
- hw->phy.ops.power_up(hw);
-
- e1000_setup_link(hw);
-}
-
-/**
- * e1000_power_down_phy - Power down PHY
- * @hw: pointer to the HW structure
- *
- * The phy may be powered down to save power, to turn off link when the
- * driver is unloaded, or wake on lan is not enabled (among others).
- **/
-void e1000_power_down_phy(struct e1000_hw *hw)
-{
- if (hw->phy.ops.power_down)
- hw->phy.ops.power_down(hw);
-}
-
-/**
- * e1000_power_up_fiber_serdes_link - Power up serdes link
- * @hw: pointer to the HW structure
- *
- * Power on the optics and PCS.
- **/
-void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw)
-{
- if (hw->mac.ops.power_up_serdes)
- hw->mac.ops.power_up_serdes(hw);
-}
-
-/**
- * e1000_shutdown_fiber_serdes_link - Remove link during power down
- * @hw: pointer to the HW structure
- *
- * Shutdown the optics and PCS on driver unload.
- **/
-void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw)
-{
- if (hw->mac.ops.shutdown_serdes)
- hw->mac.ops.shutdown_serdes(hw);
-}
-
-/**
- * e1000_get_thermal_sensor_data - Gathers thermal sensor data
- * @hw: pointer to hardware structure
- *
- * Updates the temperatures in mac.thermal_sensor_data
- **/
-s32 e1000_get_thermal_sensor_data(struct e1000_hw *hw)
-{
- if (hw->mac.ops.get_thermal_sensor_data)
- return hw->mac.ops.get_thermal_sensor_data(hw);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_init_thermal_sensor_thresh - Sets thermal sensor thresholds
- * @hw: pointer to hardware structure
- *
- * Sets the thermal sensor thresholds according to the NVM map
- **/
-s32 e1000_init_thermal_sensor_thresh(struct e1000_hw *hw)
-{
- if (hw->mac.ops.init_thermal_sensor_thresh)
- return hw->mac.ops.init_thermal_sensor_thresh(hw);
-
- return E1000_SUCCESS;
-}
-
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_api.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_api.h
deleted file mode 100755
index b21294ec..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_api.h
+++ /dev/null
@@ -1,157 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#ifndef _E1000_API_H_
-#define _E1000_API_H_
-
-#include "e1000_hw.h"
-
-extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
-extern void e1000_rx_fifo_flush_82575(struct e1000_hw *hw);
-extern void e1000_init_function_pointers_vf(struct e1000_hw *hw);
-extern void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw);
-extern void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw);
-extern void e1000_init_function_pointers_i210(struct e1000_hw *hw);
-
-s32 e1000_set_obff_timer(struct e1000_hw *hw, u32 itr);
-s32 e1000_set_mac_type(struct e1000_hw *hw);
-s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device);
-s32 e1000_init_mac_params(struct e1000_hw *hw);
-s32 e1000_init_nvm_params(struct e1000_hw *hw);
-s32 e1000_init_phy_params(struct e1000_hw *hw);
-s32 e1000_init_mbx_params(struct e1000_hw *hw);
-s32 e1000_get_bus_info(struct e1000_hw *hw);
-void e1000_clear_vfta(struct e1000_hw *hw);
-void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
-s32 e1000_force_mac_fc(struct e1000_hw *hw);
-s32 e1000_check_for_link(struct e1000_hw *hw);
-s32 e1000_reset_hw(struct e1000_hw *hw);
-s32 e1000_init_hw(struct e1000_hw *hw);
-s32 e1000_setup_link(struct e1000_hw *hw);
-s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex);
-s32 e1000_disable_pcie_master(struct e1000_hw *hw);
-void e1000_config_collision_dist(struct e1000_hw *hw);
-void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
-u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
-void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count);
-s32 e1000_setup_led(struct e1000_hw *hw);
-s32 e1000_cleanup_led(struct e1000_hw *hw);
-s32 e1000_check_reset_block(struct e1000_hw *hw);
-s32 e1000_blink_led(struct e1000_hw *hw);
-s32 e1000_led_on(struct e1000_hw *hw);
-s32 e1000_led_off(struct e1000_hw *hw);
-s32 e1000_id_led_init(struct e1000_hw *hw);
-void e1000_reset_adaptive(struct e1000_hw *hw);
-void e1000_update_adaptive(struct e1000_hw *hw);
-s32 e1000_get_cable_length(struct e1000_hw *hw);
-s32 e1000_validate_mdi_setting(struct e1000_hw *hw);
-s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data);
-s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data);
-s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset,
- u8 data);
-s32 e1000_get_phy_info(struct e1000_hw *hw);
-void e1000_release_phy(struct e1000_hw *hw);
-s32 e1000_acquire_phy(struct e1000_hw *hw);
-s32 e1000_phy_hw_reset(struct e1000_hw *hw);
-s32 e1000_phy_commit(struct e1000_hw *hw);
-void e1000_power_up_phy(struct e1000_hw *hw);
-void e1000_power_down_phy(struct e1000_hw *hw);
-s32 e1000_read_mac_addr(struct e1000_hw *hw);
-s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size);
-s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size);
-void e1000_reload_nvm(struct e1000_hw *hw);
-s32 e1000_update_nvm_checksum(struct e1000_hw *hw);
-s32 e1000_validate_nvm_checksum(struct e1000_hw *hw);
-s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
-s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
-s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
-s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
-s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
-s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
-bool e1000_check_mng_mode(struct e1000_hw *hw);
-bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
-s32 e1000_mng_enable_host_if(struct e1000_hw *hw);
-s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
- u16 offset, u8 *sum);
-s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
- struct e1000_host_mng_command_header *hdr);
-s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
-s32 e1000_get_thermal_sensor_data(struct e1000_hw *hw);
-s32 e1000_init_thermal_sensor_thresh(struct e1000_hw *hw);
-
-
-
-/*
- * TBI_ACCEPT macro definition:
- *
- * This macro requires:
- * adapter = a pointer to struct e1000_hw
- * status = the 8 bit status field of the Rx descriptor with EOP set
- * error = the 8 bit error field of the Rx descriptor with EOP set
- * length = the sum of all the length fields of the Rx descriptors that
- * make up the current frame
- * last_byte = the last byte of the frame DMAed by the hardware
- * max_frame_length = the maximum frame length we want to accept.
- * min_frame_length = the minimum frame length we want to accept.
- *
- * This macro is a conditional that should be used in the interrupt
- * handler's Rx processing routine when RxErrors have been detected.
- *
- * Typical use:
- * ...
- * if (TBI_ACCEPT) {
- * accept_frame = true;
- * e1000_tbi_adjust_stats(adapter, MacAddress);
- * frame_length--;
- * } else {
- * accept_frame = false;
- * }
- * ...
- */
-
-/* The carrier extension symbol, as received by the NIC. */
-#define CARRIER_EXTENSION 0x0F
-
-#define TBI_ACCEPT(a, status, errors, length, last_byte, \
- min_frame_size, max_frame_size) \
- (e1000_tbi_sbp_enabled_82543(a) && \
- (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \
- ((last_byte) == CARRIER_EXTENSION) && \
- (((status) & E1000_RXD_STAT_VP) ? \
- (((length) > (min_frame_size - VLAN_TAG_SIZE)) && \
- ((length) <= (max_frame_size + 1))) : \
- (((length) > min_frame_size) && \
- ((length) <= (max_frame_size + VLAN_TAG_SIZE + 1)))))
-
-#ifndef E1000_MAX
-#define E1000_MAX(a, b) ((a) > (b) ? (a) : (b))
-#endif
-#ifndef E1000_DIVIDE_ROUND_UP
-#define E1000_DIVIDE_ROUND_UP(a, b) (((a) + (b) - 1) / (b)) /* ceil(a/b) */
-#endif
-#endif /* _E1000_API_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_defines.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_defines.h
deleted file mode 100755
index 63b228c5..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_defines.h
+++ /dev/null
@@ -1,1380 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#ifndef _E1000_DEFINES_H_
-#define _E1000_DEFINES_H_
-
-/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
-#define REQ_TX_DESCRIPTOR_MULTIPLE 8
-#define REQ_RX_DESCRIPTOR_MULTIPLE 8
-
-/* Definitions for power management and wakeup registers */
-/* Wake Up Control */
-#define E1000_WUC_APME 0x00000001 /* APM Enable */
-#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
-#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
-#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */
-#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */
-
-/* Wake Up Filter Control */
-#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
-#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
-#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
-#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
-#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
-#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
-#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
-#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
-
-/* Wake Up Status */
-#define E1000_WUS_LNKC E1000_WUFC_LNKC
-#define E1000_WUS_MAG E1000_WUFC_MAG
-#define E1000_WUS_EX E1000_WUFC_EX
-#define E1000_WUS_MC E1000_WUFC_MC
-#define E1000_WUS_BC E1000_WUFC_BC
-
-/* Extended Device Control */
-#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* SW Definable Pin 4 data */
-#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* SW Definable Pin 6 data */
-#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* SW Definable Pin 3 data */
-#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */
-#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* Direction of SDP3 0=in 1=out */
-#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
-/* Physical Func Reset Done Indication */
-#define E1000_CTRL_EXT_PFRSTD 0x00004000
-#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
-#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
-#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clk Gating */
-#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
-/* Offset of the link mode field in Ctrl Ext register */
-#define E1000_CTRL_EXT_LINK_MODE_OFFSET 22
-#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
-#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
-#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
-#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
-#define E1000_CTRL_EXT_EIAME 0x01000000
-#define E1000_CTRL_EXT_IRCA 0x00000001
-#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */
-#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */
-#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
-#define E1000_I2CCMD_REG_ADDR_SHIFT 16
-#define E1000_I2CCMD_PHY_ADDR_SHIFT 24
-#define E1000_I2CCMD_OPCODE_READ 0x08000000
-#define E1000_I2CCMD_OPCODE_WRITE 0x00000000
-#define E1000_I2CCMD_READY 0x20000000
-#define E1000_I2CCMD_ERROR 0x80000000
-#define E1000_I2CCMD_SFP_DATA_ADDR(a) (0x0000 + (a))
-#define E1000_I2CCMD_SFP_DIAG_ADDR(a) (0x0100 + (a))
-#define E1000_MAX_SGMII_PHY_REG_ADDR 255
-#define E1000_I2CCMD_PHY_TIMEOUT 200
-#define E1000_IVAR_VALID 0x80
-#define E1000_GPIE_NSICR 0x00000001
-#define E1000_GPIE_MSIX_MODE 0x00000010
-#define E1000_GPIE_EIAME 0x40000000
-#define E1000_GPIE_PBA 0x80000000
-
-/* Receive Descriptor bit definitions */
-#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
-#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
-#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
-#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
-#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
-#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
-#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
-#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */
-#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */
-#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
-#define E1000_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
-#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
-#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
-#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
-#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
-#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
-#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */
-#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
-#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
-
-#define E1000_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */
-#define E1000_RXDEXT_STATERR_LB 0x00040000
-#define E1000_RXDEXT_STATERR_CE 0x01000000
-#define E1000_RXDEXT_STATERR_SE 0x02000000
-#define E1000_RXDEXT_STATERR_SEQ 0x04000000
-#define E1000_RXDEXT_STATERR_CXE 0x10000000
-#define E1000_RXDEXT_STATERR_TCPE 0x20000000
-#define E1000_RXDEXT_STATERR_IPE 0x40000000
-#define E1000_RXDEXT_STATERR_RXE 0x80000000
-
-/* mask to determine if packets should be dropped due to frame errors */
-#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
- E1000_RXD_ERR_CE | \
- E1000_RXD_ERR_SE | \
- E1000_RXD_ERR_SEQ | \
- E1000_RXD_ERR_CXE | \
- E1000_RXD_ERR_RXE)
-
-/* Same mask, but for extended and packet split descriptors */
-#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
- E1000_RXDEXT_STATERR_CE | \
- E1000_RXDEXT_STATERR_SE | \
- E1000_RXDEXT_STATERR_SEQ | \
- E1000_RXDEXT_STATERR_CXE | \
- E1000_RXDEXT_STATERR_RXE)
-
-#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000
-#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
-#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
-#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
-#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000
-#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
-
-#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000
-
-/* Management Control */
-#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
-#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
-#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
-#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
-#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
-/* Enable MAC address filtering */
-#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000
-/* Enable MNG packets to host memory */
-#define E1000_MANC_EN_MNG2HOST 0x00200000
-
-#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */
-#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */
-#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */
-#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */
-
-/* Receive Control */
-#define E1000_RCTL_RST 0x00000001 /* Software reset */
-#define E1000_RCTL_EN 0x00000002 /* enable */
-#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
-#define E1000_RCTL_UPE 0x00000008 /* unicast promisc enable */
-#define E1000_RCTL_MPE 0x00000010 /* multicast promisc enable */
-#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
-#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */
-#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
-#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
-#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
-#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */
-#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
-#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
-#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
-/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
-#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */
-#define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */
-#define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */
-#define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */
-/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
-#define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */
-#define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */
-#define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */
-#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
-#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
-#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */
-#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */
-#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
-#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
-#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
-
-/* Use byte values for the following shift parameters
- * Usage:
- * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
- * E1000_PSRCTL_BSIZE0_MASK) |
- * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
- * E1000_PSRCTL_BSIZE1_MASK) |
- * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
- * E1000_PSRCTL_BSIZE2_MASK) |
- * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
- * E1000_PSRCTL_BSIZE3_MASK))
- * where value0 = [128..16256], default=256
- * value1 = [1024..64512], default=4096
- * value2 = [0..64512], default=4096
- * value3 = [0..64512], default=0
- */
-
-#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F
-#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00
-#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
-#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000
-
-#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */
-#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */
-#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */
-#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
-
-/* SWFW_SYNC Definitions */
-#define E1000_SWFW_EEP_SM 0x01
-#define E1000_SWFW_PHY0_SM 0x02
-#define E1000_SWFW_PHY1_SM 0x04
-#define E1000_SWFW_CSR_SM 0x08
-#define E1000_SWFW_PHY2_SM 0x20
-#define E1000_SWFW_PHY3_SM 0x40
-#define E1000_SWFW_SW_MNG_SM 0x400
-
-/* Device Control */
-#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
-#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */
-#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master reqs */
-#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
-#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
-#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
-#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
-#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */
-#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */
-#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */
-#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
-#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
-#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
-#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
-#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
-#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */
-#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
-#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */
-#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
-#define E1000_CTRL_RST 0x04000000 /* Global reset */
-#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
-#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
-#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
-#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
-#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */
-
-
-#define E1000_CONNSW_ENRGSRC 0x4
-#define E1000_CONNSW_PHYSD 0x400
-#define E1000_CONNSW_PHY_PDN 0x800
-#define E1000_CONNSW_SERDESD 0x200
-#define E1000_CONNSW_AUTOSENSE_CONF 0x2
-#define E1000_CONNSW_AUTOSENSE_EN 0x1
-#define E1000_PCS_CFG_PCS_EN 8
-#define E1000_PCS_LCTL_FLV_LINK_UP 1
-#define E1000_PCS_LCTL_FSV_10 0
-#define E1000_PCS_LCTL_FSV_100 2
-#define E1000_PCS_LCTL_FSV_1000 4
-#define E1000_PCS_LCTL_FDV_FULL 8
-#define E1000_PCS_LCTL_FSD 0x10
-#define E1000_PCS_LCTL_FORCE_LINK 0x20
-#define E1000_PCS_LCTL_FORCE_FCTRL 0x80
-#define E1000_PCS_LCTL_AN_ENABLE 0x10000
-#define E1000_PCS_LCTL_AN_RESTART 0x20000
-#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000
-#define E1000_ENABLE_SERDES_LOOPBACK 0x0410
-
-#define E1000_PCS_LSTS_LINK_OK 1
-#define E1000_PCS_LSTS_SPEED_100 2
-#define E1000_PCS_LSTS_SPEED_1000 4
-#define E1000_PCS_LSTS_DUPLEX_FULL 8
-#define E1000_PCS_LSTS_SYNK_OK 0x10
-#define E1000_PCS_LSTS_AN_COMPLETE 0x10000
-
-/* Device Status */
-#define E1000_STATUS_FD 0x00000001 /* Duplex 0=half 1=full */
-#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
-#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
-#define E1000_STATUS_FUNC_SHIFT 2
-#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
-#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
-#define E1000_STATUS_SPEED_MASK 0x000000C0
-#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
-#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
-#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
-#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Compltn by NVM */
-#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
-#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master request status */
-#define E1000_STATUS_2P5_SKU 0x00001000 /* Val of 2.5GBE SKU strap */
-#define E1000_STATUS_2P5_SKU_OVER 0x00002000 /* Val of 2.5GBE SKU Over */
-
-#define SPEED_10 10
-#define SPEED_100 100
-#define SPEED_1000 1000
-#define SPEED_2500 2500
-#define HALF_DUPLEX 1
-#define FULL_DUPLEX 2
-
-
-#define ADVERTISE_10_HALF 0x0001
-#define ADVERTISE_10_FULL 0x0002
-#define ADVERTISE_100_HALF 0x0004
-#define ADVERTISE_100_FULL 0x0008
-#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
-#define ADVERTISE_1000_FULL 0x0020
-
-/* 1000/H is not supported, nor spec-compliant. */
-#define E1000_ALL_SPEED_DUPLEX ( \
- ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \
- ADVERTISE_100_FULL | ADVERTISE_1000_FULL)
-#define E1000_ALL_NOT_GIG ( \
- ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \
- ADVERTISE_100_FULL)
-#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL)
-#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL)
-#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF)
-
-#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
-
-/* LED Control */
-#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
-#define E1000_LEDCTL_LED0_MODE_SHIFT 0
-#define E1000_LEDCTL_LED0_IVRT 0x00000040
-#define E1000_LEDCTL_LED0_BLINK 0x00000080
-
-#define E1000_LEDCTL_MODE_LED_ON 0xE
-#define E1000_LEDCTL_MODE_LED_OFF 0xF
-
-/* Transmit Descriptor bit definitions */
-#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */
-#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */
-#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
-#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
-#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
-#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
-#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */
-#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
-#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
-#define E1000_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */
-#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
-#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
-#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
-#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */
-#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */
-#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */
-#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */
-#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
-#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
-#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
-#define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */
-
-/* Transmit Control */
-#define E1000_TCTL_EN 0x00000002 /* enable Tx */
-#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
-#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
-#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
-#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
-#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */
-
-/* Transmit Arbitration Count */
-#define E1000_TARC0_ENABLE 0x00000400 /* Enable Tx Queue 0 */
-
-/* SerDes Control */
-#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
-#define E1000_SCTL_ENABLE_SERDES_LOOPBACK 0x0410
-
-/* Receive Checksum Control */
-#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */
-#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
-#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
-#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
-#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
-
-/* Header split receive */
-#define E1000_RFCTL_NFSW_DIS 0x00000040
-#define E1000_RFCTL_NFSR_DIS 0x00000080
-#define E1000_RFCTL_ACK_DIS 0x00001000
-#define E1000_RFCTL_EXTEN 0x00008000
-#define E1000_RFCTL_IPV6_EX_DIS 0x00010000
-#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
-#define E1000_RFCTL_LEF 0x00040000
-
-/* Collision related configuration parameters */
-#define E1000_COLLISION_THRESHOLD 15
-#define E1000_CT_SHIFT 4
-#define E1000_COLLISION_DISTANCE 63
-#define E1000_COLD_SHIFT 12
-
-/* Default values for the transmit IPG register */
-#define DEFAULT_82543_TIPG_IPGT_FIBER 9
-#define DEFAULT_82543_TIPG_IPGT_COPPER 8
-
-#define E1000_TIPG_IPGT_MASK 0x000003FF
-
-#define DEFAULT_82543_TIPG_IPGR1 8
-#define E1000_TIPG_IPGR1_SHIFT 10
-
-#define DEFAULT_82543_TIPG_IPGR2 6
-#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
-#define E1000_TIPG_IPGR2_SHIFT 20
-
-/* Ethertype field values */
-#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */
-
-#define ETHERNET_FCS_SIZE 4
-#define MAX_JUMBO_FRAME_SIZE 0x3F00
-
-/* Extended Configuration Control and Size */
-#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
-#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001
-#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008
-#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
-#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080
-#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000
-#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16
-#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000
-#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16
-
-#define E1000_PHY_CTRL_D0A_LPLU 0x00000002
-#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004
-#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008
-#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040
-
-#define E1000_KABGTXD_BGSQLBIAS 0x00050000
-
-/* PBA constants */
-#define E1000_PBA_8K 0x0008 /* 8KB */
-#define E1000_PBA_10K 0x000A /* 10KB */
-#define E1000_PBA_12K 0x000C /* 12KB */
-#define E1000_PBA_14K 0x000E /* 14KB */
-#define E1000_PBA_16K 0x0010 /* 16KB */
-#define E1000_PBA_18K 0x0012
-#define E1000_PBA_20K 0x0014
-#define E1000_PBA_22K 0x0016
-#define E1000_PBA_24K 0x0018
-#define E1000_PBA_26K 0x001A
-#define E1000_PBA_30K 0x001E
-#define E1000_PBA_32K 0x0020
-#define E1000_PBA_34K 0x0022
-#define E1000_PBA_35K 0x0023
-#define E1000_PBA_38K 0x0026
-#define E1000_PBA_40K 0x0028
-#define E1000_PBA_48K 0x0030 /* 48KB */
-#define E1000_PBA_64K 0x0040 /* 64KB */
-
-#define E1000_PBA_RXA_MASK 0xFFFF
-
-#define E1000_PBS_16K E1000_PBA_16K
-
-#define IFS_MAX 80
-#define IFS_MIN 40
-#define IFS_RATIO 4
-#define IFS_STEP 10
-#define MIN_NUM_XMITS 1000
-
-/* SW Semaphore Register */
-#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
-#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
-#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */
-
-#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */
-
-/* Interrupt Cause Read */
-#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
-#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */
-#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
-#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
-#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
-#define E1000_ICR_RXO 0x00000040 /* Rx overrun */
-#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
-#define E1000_ICR_VMMB 0x00000100 /* VM MB event */
-#define E1000_ICR_RXCFG 0x00000400 /* Rx /c/ ordered set */
-#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */
-#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */
-#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */
-#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */
-#define E1000_ICR_TXD_LOW 0x00008000
-#define E1000_ICR_MNG 0x00040000 /* Manageability event */
-#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */
-#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */
-/* If this bit asserted, the driver should claim the interrupt */
-#define E1000_ICR_INT_ASSERTED 0x80000000
-#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */
-#define E1000_ICR_FER 0x00400000 /* Fatal Error */
-
-#define E1000_ICR_THS 0x00800000 /* ICR.THS: Thermal Sensor Event*/
-#define E1000_ICR_MDDET 0x10000000 /* Malicious Driver Detect */
-
-
-/* Extended Interrupt Cause Read */
-#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */
-#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */
-#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */
-#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */
-#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */
-#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */
-#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */
-#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */
-#define E1000_EICR_TCP_TIMER 0x40000000 /* TCP Timer */
-#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
-/* TCP Timer */
-#define E1000_TCPTIMER_KS 0x00000100 /* KickStart */
-#define E1000_TCPTIMER_COUNT_ENABLE 0x00000200 /* Count Enable */
-#define E1000_TCPTIMER_COUNT_FINISH 0x00000400 /* Count finish */
-#define E1000_TCPTIMER_LOOP 0x00000800 /* Loop */
-
-/* This defines the bits that are set in the Interrupt Mask
- * Set/Read Register. Each bit is documented below:
- * o RXT0 = Receiver Timer Interrupt (ring 0)
- * o TXDW = Transmit Descriptor Written Back
- * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
- * o RXSEQ = Receive Sequence Error
- * o LSC = Link Status Change
- */
-#define IMS_ENABLE_MASK ( \
- E1000_IMS_RXT0 | \
- E1000_IMS_TXDW | \
- E1000_IMS_RXDMT0 | \
- E1000_IMS_RXSEQ | \
- E1000_IMS_LSC)
-
-/* Interrupt Mask Set */
-#define E1000_IMS_TXDW E1000_ICR_TXDW /* Tx desc written back */
-#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
-#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
-#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */
-#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
-#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
-#define E1000_IMS_RXO E1000_ICR_RXO /* Rx overrun */
-#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
-#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW
-#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */
-#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */
-#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
-#define E1000_IMS_FER E1000_ICR_FER /* Fatal Error */
-
-#define E1000_IMS_THS E1000_ICR_THS /* ICR.TS: Thermal Sensor Event*/
-#define E1000_IMS_MDDET E1000_ICR_MDDET /* Malicious Driver Detect */
-/* Extended Interrupt Mask Set */
-#define E1000_EIMS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
-#define E1000_EIMS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
-#define E1000_EIMS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
-#define E1000_EIMS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
-#define E1000_EIMS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
-#define E1000_EIMS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
-#define E1000_EIMS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
-#define E1000_EIMS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
-#define E1000_EIMS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */
-#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */
-
-/* Interrupt Cause Set */
-#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
-#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
-#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
-
-/* Extended Interrupt Cause Set */
-#define E1000_EICS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
-#define E1000_EICS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
-#define E1000_EICS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
-#define E1000_EICS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
-#define E1000_EICS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
-#define E1000_EICS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
-#define E1000_EICS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
-#define E1000_EICS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
-#define E1000_EICS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */
-#define E1000_EICS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */
-
-#define E1000_EITR_ITR_INT_MASK 0x0000FFFF
-/* E1000_EITR_CNT_IGNR is only for 82576 and newer */
-#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */
-#define E1000_EITR_INTERVAL 0x00007FFC
-
-/* Transmit Descriptor Control */
-#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
-#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
-#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
-#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */
-#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
-#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */
-/* Enable the counting of descriptors still to be processed. */
-#define E1000_TXDCTL_COUNT_DESC 0x00400000
-
-/* Flow Control Constants */
-#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
-#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
-#define FLOW_CONTROL_TYPE 0x8808
-
-/* 802.1q VLAN Packet Size */
-#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */
-#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
-
-/* Receive Address
- * Number of high/low register pairs in the RAR. The RAR (Receive Address
- * Registers) holds the directed and multicast addresses that we monitor.
- * Technically, we have 16 spots. However, we reserve one of these spots
- * (RAR[15]) for our directed address used by controllers with
- * manageability enabled, allowing us room for 15 multicast addresses.
- */
-#define E1000_RAR_ENTRIES 15
-#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
-#define E1000_RAL_MAC_ADDR_LEN 4
-#define E1000_RAH_MAC_ADDR_LEN 2
-#define E1000_RAH_QUEUE_MASK_82575 0x000C0000
-#define E1000_RAH_POOL_1 0x00040000
-
-/* Error Codes */
-#define E1000_SUCCESS 0
-#define E1000_ERR_NVM 1
-#define E1000_ERR_PHY 2
-#define E1000_ERR_CONFIG 3
-#define E1000_ERR_PARAM 4
-#define E1000_ERR_MAC_INIT 5
-#define E1000_ERR_PHY_TYPE 6
-#define E1000_ERR_RESET 9
-#define E1000_ERR_MASTER_REQUESTS_PENDING 10
-#define E1000_ERR_HOST_INTERFACE_COMMAND 11
-#define E1000_BLK_PHY_RESET 12
-#define E1000_ERR_SWFW_SYNC 13
-#define E1000_NOT_IMPLEMENTED 14
-#define E1000_ERR_MBX 15
-#define E1000_ERR_INVALID_ARGUMENT 16
-#define E1000_ERR_NO_SPACE 17
-#define E1000_ERR_NVM_PBA_SECTION 18
-#define E1000_ERR_I2C 19
-#define E1000_ERR_INVM_VALUE_NOT_FOUND 20
-
-/* Loop limit on how long we wait for auto-negotiation to complete */
-#define FIBER_LINK_UP_LIMIT 50
-#define COPPER_LINK_UP_LIMIT 10
-#define PHY_AUTO_NEG_LIMIT 45
-#define PHY_FORCE_LIMIT 20
-/* Number of 100 microseconds we wait for PCI Express master disable */
-#define MASTER_DISABLE_TIMEOUT 800
-/* Number of milliseconds we wait for PHY configuration done after MAC reset */
-#define PHY_CFG_TIMEOUT 100
-/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */
-#define MDIO_OWNERSHIP_TIMEOUT 10
-/* Number of milliseconds for NVM auto read done after MAC reset. */
-#define AUTO_READ_DONE_TIMEOUT 10
-
-/* Flow Control */
-#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */
-#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */
-#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
-
-/* Transmit Configuration Word */
-#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
-#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */
-#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */
-#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */
-#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
-
-/* Receive Configuration Word */
-#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */
-#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */
-#define E1000_RXCW_C 0x20000000 /* Receive config */
-#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
-
-#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
-#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */
-
-#define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */
-#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */
-#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00
-#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02
-#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
-#define E1000_TSYNCRXCTL_TYPE_ALL 0x08
-#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
-#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */
-#define E1000_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */
-
-#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF
-#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00
-#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01
-#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02
-#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03
-#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04
-
-#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00
-#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000
-#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100
-#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200
-#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300
-#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800
-#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900
-#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00
-#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00
-#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00
-#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00
-
-#define E1000_TIMINCA_16NS_SHIFT 24
-#define E1000_TIMINCA_INCPERIOD_SHIFT 24
-#define E1000_TIMINCA_INCVALUE_MASK 0x00FFFFFF
-
-#define E1000_TSICR_TXTS 0x00000002
-#define E1000_TSIM_TXTS 0x00000002
-/* TUPLE Filtering Configuration */
-#define E1000_TTQF_DISABLE_MASK 0xF0008000 /* TTQF Disable Mask */
-#define E1000_TTQF_QUEUE_ENABLE 0x100 /* TTQF Queue Enable Bit */
-#define E1000_TTQF_PROTOCOL_MASK 0xFF /* TTQF Protocol Mask */
-/* TTQF TCP Bit, shift with E1000_TTQF_PROTOCOL SHIFT */
-#define E1000_TTQF_PROTOCOL_TCP 0x0
-/* TTQF UDP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */
-#define E1000_TTQF_PROTOCOL_UDP 0x1
-/* TTQF SCTP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */
-#define E1000_TTQF_PROTOCOL_SCTP 0x2
-#define E1000_TTQF_PROTOCOL_SHIFT 5 /* TTQF Protocol Shift */
-#define E1000_TTQF_QUEUE_SHIFT 16 /* TTQF Queue Shfit */
-#define E1000_TTQF_RX_QUEUE_MASK 0x70000 /* TTQF Queue Mask */
-#define E1000_TTQF_MASK_ENABLE 0x10000000 /* TTQF Mask Enable Bit */
-#define E1000_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */
-#define E1000_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */
-#define E1000_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */
-#define E1000_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */
-
-#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */
-#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */
-#define E1000_MDICNFG_PHY_MASK 0x03E00000
-#define E1000_MDICNFG_PHY_SHIFT 21
-
-#define E1000_MEDIA_PORT_COPPER 1
-#define E1000_MEDIA_PORT_OTHER 2
-#define E1000_M88E1112_AUTO_COPPER_SGMII 0x2
-#define E1000_M88E1112_AUTO_COPPER_BASEX 0x3
-#define E1000_M88E1112_STATUS_LINK 0x0004 /* Interface Link Bit */
-#define E1000_M88E1112_MAC_CTRL_1 0x10
-#define E1000_M88E1112_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */
-#define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT 7
-#define E1000_M88E1112_PAGE_ADDR 0x16
-#define E1000_M88E1112_STATUS 0x01
-
-#define E1000_THSTAT_LOW_EVENT 0x20000000 /* Low thermal threshold */
-#define E1000_THSTAT_MID_EVENT 0x00200000 /* Mid thermal threshold */
-#define E1000_THSTAT_HIGH_EVENT 0x00002000 /* High thermal threshold */
-#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */
-#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Spd Throttle Event */
-
-/* I350 EEE defines */
-#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */
-#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */
-#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */
-#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */
-#define E1000_EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */
-/* EEE status */
-#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */
-#define E1000_EEER_RX_LPI_STATUS 0x40000000 /* Rx in LPI state */
-#define E1000_EEER_TX_LPI_STATUS 0x80000000 /* Tx in LPI state */
-#define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */
-#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */
-#define E1000_M88E1543_EEE_CTRL_1 0x0
-#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */
-#define E1000_EEE_ADV_DEV_I354 7
-#define E1000_EEE_ADV_ADDR_I354 60
-#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */
-#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */
-#define E1000_PCS_STATUS_DEV_I354 3
-#define E1000_PCS_STATUS_ADDR_I354 1
-#define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400
-#define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800
-#define E1000_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */
-#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */
-#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */
-/* PCI Express Control */
-#define E1000_GCR_RXD_NO_SNOOP 0x00000001
-#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
-#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004
-#define E1000_GCR_TXD_NO_SNOOP 0x00000008
-#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010
-#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020
-#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000
-#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000
-#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000
-#define E1000_GCR_CAP_VER2 0x00040000
-
-#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \
- E1000_GCR_RXDSCW_NO_SNOOP | \
- E1000_GCR_RXDSCR_NO_SNOOP | \
- E1000_GCR_TXD_NO_SNOOP | \
- E1000_GCR_TXDSCW_NO_SNOOP | \
- E1000_GCR_TXDSCR_NO_SNOOP)
-
-#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */
-
-/* mPHY address control and data registers */
-#define E1000_MPHY_ADDR_CTL 0x0024 /* Address Control Reg */
-#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000
-#define E1000_MPHY_DATA 0x0E10 /* Data Register */
-
-/* AFE CSR Offset for PCS CLK */
-#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004
-/* Override for near end digital loopback. */
-#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10
-
-/* PHY Control Register */
-#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
-#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
-#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
-#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
-#define MII_CR_POWER_DOWN 0x0800 /* Power down */
-#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
-#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
-#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
-#define MII_CR_SPEED_1000 0x0040
-#define MII_CR_SPEED_100 0x2000
-#define MII_CR_SPEED_10 0x0000
-
-/* PHY Status Register */
-#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
-#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
-#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
-#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
-#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
-#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
-#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
-#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
-#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
-#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
-#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
-#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
-#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
-#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
-#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
-
-/* Autoneg Advertisement Register */
-#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */
-#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
-#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
-#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
-#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
-#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
-#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
-#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
-#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
-#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
-
-/* Link Partner Ability Register (Base Page) */
-#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */
-#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP 10T Half Dplx Capable */
-#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP 10T Full Dplx Capable */
-#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP 100TX Half Dplx Capable */
-#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP 100TX Full Dplx Capable */
-#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */
-#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
-#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asym Pause Direction bit */
-#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP detected Remote Fault */
-#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP rx'd link code word */
-#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */
-
-/* Autoneg Expansion Register */
-#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */
-#define NWAY_ER_PAGE_RXD 0x0002 /* LP 10T Half Dplx Capable */
-#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP 10T Full Dplx Capable */
-#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP 100TX Half Dplx Capable */
-#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP 100TX Full Dplx Capable */
-
-/* 1000BASE-T Control Register */
-#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */
-#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
-#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
-/* 1=Repeater/switch device port 0=DTE device */
-#define CR_1000T_REPEATER_DTE 0x0400
-/* 1=Configure PHY as Master 0=Configure PHY as Slave */
-#define CR_1000T_MS_VALUE 0x0800
-/* 1=Master/Slave manual config value 0=Automatic Master/Slave config */
-#define CR_1000T_MS_ENABLE 0x1000
-#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
-#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
-#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
-#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
-#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
-
-/* 1000BASE-T Status Register */
-#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle err since last rd */
-#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asym pause direction bit */
-#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
-#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
-#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
-#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
-#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local Tx Master, 0=Slave */
-#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
-
-#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5
-
-/* PHY 1000 MII Register/Bit Definitions */
-/* PHY Registers defined by IEEE */
-#define PHY_CONTROL 0x00 /* Control Register */
-#define PHY_STATUS 0x01 /* Status Register */
-#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
-#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
-#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
-#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
-#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */
-#define PHY_NEXT_PAGE_TX 0x07 /* Next Page Tx */
-#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
-#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
-#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
-#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
-
-#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */
-
-/* NVM Control */
-#define E1000_EECD_SK 0x00000001 /* NVM Clock */
-#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */
-#define E1000_EECD_DI 0x00000004 /* NVM Data In */
-#define E1000_EECD_DO 0x00000008 /* NVM Data Out */
-#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */
-#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */
-#define E1000_EECD_PRES 0x00000100 /* NVM Present */
-#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */
-#define E1000_EECD_BLOCKED 0x00008000 /* Bit banging access blocked flag */
-#define E1000_EECD_ABORT 0x00010000 /* NVM operation aborted flag */
-#define E1000_EECD_TIMEOUT 0x00020000 /* NVM read operation timeout flag */
-#define E1000_EECD_ERROR_CLR 0x00040000 /* NVM error status clear bit */
-/* NVM Addressing bits based on type 0=small, 1=large */
-#define E1000_EECD_ADDR_BITS 0x00000400
-#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */
-#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
-#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
-#define E1000_EECD_SIZE_EX_SHIFT 11
-#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */
-#define E1000_EECD_AUPDEN 0x00100000 /* Ena Auto FLASH update */
-#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
-#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES)
-#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */
-#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done */
-#define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */
-#define E1000_EECD_SEC1VAL_I210 0x02000000 /* Sector One Valid */
-#define E1000_FLUDONE_ATTEMPTS 20000
-#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */
-#define E1000_I210_FIFO_SEL_RX 0x00
-#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i))
-#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0)
-#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06
-#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01
-
-#define E1000_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */
-/* Secure FLASH mode requires removing MSb */
-#define E1000_I210_FW_PTR_MASK 0x7FFF
-/* Firmware code revision field word offset*/
-#define E1000_I210_FW_VER_OFFSET 328
-
-#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write regs */
-#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
-#define E1000_NVM_RW_REG_START 1 /* Start operation */
-#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
-#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */
-#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */
-#define E1000_FLASH_UPDATES 2000
-
-/* NVM Word Offsets */
-#define NVM_COMPAT 0x0003
-#define NVM_ID_LED_SETTINGS 0x0004
-#define NVM_VERSION 0x0005
-#define E1000_I210_NVM_FW_MODULE_PTR 0x0010
-#define E1000_I350_NVM_FW_MODULE_PTR 0x0051
-#define NVM_FUTURE_INIT_WORD1 0x0019
-#define NVM_ETRACK_WORD 0x0042
-#define NVM_ETRACK_HIWORD 0x0043
-#define NVM_COMB_VER_OFF 0x0083
-#define NVM_COMB_VER_PTR 0x003d
-
-/* NVM version defines */
-#define NVM_MAJOR_MASK 0xF000
-#define NVM_MINOR_MASK 0x0FF0
-#define NVM_IMAGE_ID_MASK 0x000F
-#define NVM_COMB_VER_MASK 0x00FF
-#define NVM_MAJOR_SHIFT 12
-#define NVM_MINOR_SHIFT 4
-#define NVM_COMB_VER_SHFT 8
-#define NVM_VER_INVALID 0xFFFF
-#define NVM_ETRACK_SHIFT 16
-#define NVM_ETRACK_VALID 0x8000
-#define NVM_NEW_DEC_MASK 0x0F00
-#define NVM_HEX_CONV 16
-#define NVM_HEX_TENS 10
-
-/* FW version defines */
-/* Offset of "Loader patch ptr" in Firmware Header */
-#define E1000_I350_NVM_FW_LOADER_PATCH_PTR_OFFSET 0x01
-/* Patch generation hour & minutes */
-#define E1000_I350_NVM_FW_VER_WORD1_OFFSET 0x04
-/* Patch generation month & day */
-#define E1000_I350_NVM_FW_VER_WORD2_OFFSET 0x05
-/* Patch generation year */
-#define E1000_I350_NVM_FW_VER_WORD3_OFFSET 0x06
-/* Patch major & minor numbers */
-#define E1000_I350_NVM_FW_VER_WORD4_OFFSET 0x07
-
-#define NVM_MAC_ADDR 0x0000
-#define NVM_SUB_DEV_ID 0x000B
-#define NVM_SUB_VEN_ID 0x000C
-#define NVM_DEV_ID 0x000D
-#define NVM_VEN_ID 0x000E
-#define NVM_INIT_CTRL_2 0x000F
-#define NVM_INIT_CTRL_4 0x0013
-#define NVM_LED_1_CFG 0x001C
-#define NVM_LED_0_2_CFG 0x001F
-
-#define NVM_COMPAT_VALID_CSUM 0x0001
-#define NVM_FUTURE_INIT_WORD1_VALID_CSUM 0x0040
-
-#define NVM_ETS_CFG 0x003E
-#define NVM_ETS_LTHRES_DELTA_MASK 0x07C0
-#define NVM_ETS_LTHRES_DELTA_SHIFT 6
-#define NVM_ETS_TYPE_MASK 0x0038
-#define NVM_ETS_TYPE_SHIFT 3
-#define NVM_ETS_TYPE_EMC 0x000
-#define NVM_ETS_NUM_SENSORS_MASK 0x0007
-#define NVM_ETS_DATA_LOC_MASK 0x3C00
-#define NVM_ETS_DATA_LOC_SHIFT 10
-#define NVM_ETS_DATA_INDEX_MASK 0x0300
-#define NVM_ETS_DATA_INDEX_SHIFT 8
-#define NVM_ETS_DATA_HTHRESH_MASK 0x00FF
-#define NVM_INIT_CONTROL2_REG 0x000F
-#define NVM_INIT_CONTROL3_PORT_B 0x0014
-#define NVM_INIT_3GIO_3 0x001A
-#define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020
-#define NVM_INIT_CONTROL3_PORT_A 0x0024
-#define NVM_CFG 0x0012
-#define NVM_ALT_MAC_ADDR_PTR 0x0037
-#define NVM_CHECKSUM_REG 0x003F
-#define NVM_COMPATIBILITY_REG_3 0x0003
-#define NVM_COMPATIBILITY_BIT_MASK 0x8000
-
-#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
-#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
-#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */
-#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */
-
-#define NVM_82580_LAN_FUNC_OFFSET(a) ((a) ? (0x40 + (0x40 * (a))) : 0)
-
-/* Mask bits for fields in Word 0x24 of the NVM */
-#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */
-#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed extrnl */
-/* Offset of Link Mode bits for 82575/82576 */
-#define NVM_WORD24_LNK_MODE_OFFSET 8
-/* Offset of Link Mode bits for 82580 up */
-#define NVM_WORD24_82580_LNK_MODE_OFFSET 4
-
-
-/* Mask bits for fields in Word 0x0f of the NVM */
-#define NVM_WORD0F_PAUSE_MASK 0x3000
-#define NVM_WORD0F_PAUSE 0x1000
-#define NVM_WORD0F_ASM_DIR 0x2000
-
-/* Mask bits for fields in Word 0x1a of the NVM */
-#define NVM_WORD1A_ASPM_MASK 0x000C
-
-/* Mask bits for fields in Word 0x03 of the EEPROM */
-#define NVM_COMPAT_LOM 0x0800
-
-/* length of string needed to store PBA number */
-#define E1000_PBANUM_LENGTH 11
-
-/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
-#define NVM_SUM 0xBABA
-
-/* PBA (printed board assembly) number words */
-#define NVM_PBA_OFFSET_0 8
-#define NVM_PBA_OFFSET_1 9
-#define NVM_PBA_PTR_GUARD 0xFAFA
-#define NVM_RESERVED_WORD 0xFFFF
-#define NVM_WORD_SIZE_BASE_SHIFT 6
-
-/* NVM Commands - SPI */
-#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
-#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */
-#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */
-#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
-#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */
-#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */
-
-/* SPI NVM Status Register */
-#define NVM_STATUS_RDY_SPI 0x01
-
-/* Word definitions for ID LED Settings */
-#define ID_LED_RESERVED_0000 0x0000
-#define ID_LED_RESERVED_FFFF 0xFFFF
-#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \
- (ID_LED_OFF1_OFF2 << 8) | \
- (ID_LED_DEF1_DEF2 << 4) | \
- (ID_LED_DEF1_DEF2))
-#define ID_LED_DEF1_DEF2 0x1
-#define ID_LED_DEF1_ON2 0x2
-#define ID_LED_DEF1_OFF2 0x3
-#define ID_LED_ON1_DEF2 0x4
-#define ID_LED_ON1_ON2 0x5
-#define ID_LED_ON1_OFF2 0x6
-#define ID_LED_OFF1_DEF2 0x7
-#define ID_LED_OFF1_ON2 0x8
-#define ID_LED_OFF1_OFF2 0x9
-
-#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF
-#define IGP_ACTIVITY_LED_ENABLE 0x0300
-#define IGP_LED3_MODE 0x07000000
-
-/* PCI/PCI-X/PCI-EX Config space */
-#define PCI_HEADER_TYPE_REGISTER 0x0E
-#define PCIE_LINK_STATUS 0x12
-#define PCIE_DEVICE_CONTROL2 0x28
-
-#define PCI_HEADER_TYPE_MULTIFUNC 0x80
-#define PCIE_LINK_WIDTH_MASK 0x3F0
-#define PCIE_LINK_WIDTH_SHIFT 4
-#define PCIE_LINK_SPEED_MASK 0x0F
-#define PCIE_LINK_SPEED_2500 0x01
-#define PCIE_LINK_SPEED_5000 0x02
-#define PCIE_DEVICE_CONTROL2_16ms 0x0005
-
-#ifndef ETH_ADDR_LEN
-#define ETH_ADDR_LEN 6
-#endif
-
-#define PHY_REVISION_MASK 0xFFFFFFF0
-#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
-#define MAX_PHY_MULTI_PAGE_REG 0xF
-
-/* Bit definitions for valid PHY IDs.
- * I = Integrated
- * E = External
- */
-#define M88E1000_E_PHY_ID 0x01410C50
-#define M88E1000_I_PHY_ID 0x01410C30
-#define M88E1011_I_PHY_ID 0x01410C20
-#define IGP01E1000_I_PHY_ID 0x02A80380
-#define M88E1111_I_PHY_ID 0x01410CC0
-#define M88E1543_E_PHY_ID 0x01410EA0
-#define M88E1112_E_PHY_ID 0x01410C90
-#define I347AT4_E_PHY_ID 0x01410DC0
-#define M88E1340M_E_PHY_ID 0x01410DF0
-#define GG82563_E_PHY_ID 0x01410CA0
-#define IGP03E1000_E_PHY_ID 0x02A80390
-#define IFE_E_PHY_ID 0x02A80330
-#define IFE_PLUS_E_PHY_ID 0x02A80320
-#define IFE_C_E_PHY_ID 0x02A80310
-#define I82580_I_PHY_ID 0x015403A0
-#define I350_I_PHY_ID 0x015403B0
-#define I210_I_PHY_ID 0x01410C00
-#define IGP04E1000_E_PHY_ID 0x02A80391
-#define M88_VENDOR 0x0141
-
-/* M88E1000 Specific Registers */
-#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Reg */
-#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Reg */
-#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Cntrl */
-#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */
-
-#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for pg number setting */
-#define M88E1000_PHY_GEN_CONTROL 0x1E /* meaning depends on reg 29 */
-
-/* M88E1000 PHY Specific Control Register */
-#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reverse enabled */
-/* MDI Crossover Mode bits 6:5 Manual MDI configuration */
-#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000
-#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
-/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
-#define M88E1000_PSCR_AUTO_X_1000T 0x0040
-/* Auto crossover enabled all speeds */
-#define M88E1000_PSCR_AUTO_X_MODE 0x0060
-#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Tx */
-
-/* M88E1000 PHY Specific Status Register */
-#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
-#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
-#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
-/* 0 = <50M
- * 1 = 50-80M
- * 2 = 80-110M
- * 3 = 110-140M
- * 4 = >140M
- */
-#define M88E1000_PSSR_CABLE_LENGTH 0x0380
-#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */
-#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
-#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
-#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
-
-#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
-
-/* Number of times we will attempt to autonegotiate before downshifting if we
- * are the master
- */
-#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
-#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
-/* Number of times we will attempt to autonegotiate before downshifting if we
- * are the slave
- */
-#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
-#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
-#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
-
-/* Intel I347AT4 Registers */
-#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */
-#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */
-#define I347AT4_PAGE_SELECT 0x16
-
-/* I347AT4 Extended PHY Specific Control Register */
-
-/* Number of times we will attempt to autonegotiate before downshifting if we
- * are the master
- */
-#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800
-#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000
-#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000
-#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000
-#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000
-#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000
-#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000
-#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000
-#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000
-#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000
-
-/* I347AT4 PHY Cable Diagnostics Control */
-#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */
-
-/* M88E1112 only registers */
-#define M88E1112_VCT_DSP_DISTANCE 0x001A
-
-/* M88EC018 Rev 2 specific DownShift settings */
-#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
-#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
-
-/* Bits...
- * 15-5: page
- * 4-0: register offset
- */
-#define GG82563_PAGE_SHIFT 5
-#define GG82563_REG(page, reg) \
- (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
-#define GG82563_MIN_ALT_REG 30
-
-/* GG82563 Specific Registers */
-#define GG82563_PHY_SPEC_CTRL GG82563_REG(0, 16) /* PHY Spec Cntrl */
-#define GG82563_PHY_PAGE_SELECT GG82563_REG(0, 22) /* Page Select */
-#define GG82563_PHY_SPEC_CTRL_2 GG82563_REG(0, 26) /* PHY Spec Cntrl2 */
-#define GG82563_PHY_PAGE_SELECT_ALT GG82563_REG(0, 29) /* Alt Page Select */
-
-/* MAC Specific Control Register */
-#define GG82563_PHY_MAC_SPEC_CTRL GG82563_REG(2, 21)
-
-#define GG82563_PHY_DSP_DISTANCE GG82563_REG(5, 26) /* DSP Distance */
-
-/* Page 193 - Port Control Registers */
-/* Kumeran Mode Control */
-#define GG82563_PHY_KMRN_MODE_CTRL GG82563_REG(193, 16)
-#define GG82563_PHY_PWR_MGMT_CTRL GG82563_REG(193, 20) /* Pwr Mgt Ctrl */
-
-/* Page 194 - KMRN Registers */
-#define GG82563_PHY_INBAND_CTRL GG82563_REG(194, 18) /* Inband Ctrl */
-
-/* MDI Control */
-#define E1000_MDIC_REG_MASK 0x001F0000
-#define E1000_MDIC_REG_SHIFT 16
-#define E1000_MDIC_PHY_MASK 0x03E00000
-#define E1000_MDIC_PHY_SHIFT 21
-#define E1000_MDIC_OP_WRITE 0x04000000
-#define E1000_MDIC_OP_READ 0x08000000
-#define E1000_MDIC_READY 0x10000000
-#define E1000_MDIC_ERROR 0x40000000
-#define E1000_MDIC_DEST 0x80000000
-
-/* SerDes Control */
-#define E1000_GEN_CTL_READY 0x80000000
-#define E1000_GEN_CTL_ADDRESS_SHIFT 8
-#define E1000_GEN_POLL_TIMEOUT 640
-
-/* LinkSec register fields */
-#define E1000_LSECTXCAP_SUM_MASK 0x00FF0000
-#define E1000_LSECTXCAP_SUM_SHIFT 16
-#define E1000_LSECRXCAP_SUM_MASK 0x00FF0000
-#define E1000_LSECRXCAP_SUM_SHIFT 16
-
-#define E1000_LSECTXCTRL_EN_MASK 0x00000003
-#define E1000_LSECTXCTRL_DISABLE 0x0
-#define E1000_LSECTXCTRL_AUTH 0x1
-#define E1000_LSECTXCTRL_AUTH_ENCRYPT 0x2
-#define E1000_LSECTXCTRL_AISCI 0x00000020
-#define E1000_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00
-#define E1000_LSECTXCTRL_RSV_MASK 0x000000D8
-
-#define E1000_LSECRXCTRL_EN_MASK 0x0000000C
-#define E1000_LSECRXCTRL_EN_SHIFT 2
-#define E1000_LSECRXCTRL_DISABLE 0x0
-#define E1000_LSECRXCTRL_CHECK 0x1
-#define E1000_LSECRXCTRL_STRICT 0x2
-#define E1000_LSECRXCTRL_DROP 0x3
-#define E1000_LSECRXCTRL_PLSH 0x00000040
-#define E1000_LSECRXCTRL_RP 0x00000080
-#define E1000_LSECRXCTRL_RSV_MASK 0xFFFFFF33
-
-/* Tx Rate-Scheduler Config fields */
-#define E1000_RTTBCNRC_RS_ENA 0x80000000
-#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF
-#define E1000_RTTBCNRC_RF_INT_SHIFT 14
-#define E1000_RTTBCNRC_RF_INT_MASK \
- (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT)
-
-/* DMA Coalescing register fields */
-/* DMA Coalescing Watchdog Timer */
-#define E1000_DMACR_DMACWT_MASK 0x00003FFF
-/* DMA Coalescing Rx Threshold */
-#define E1000_DMACR_DMACTHR_MASK 0x00FF0000
-#define E1000_DMACR_DMACTHR_SHIFT 16
-/* Lx when no PCIe transactions */
-#define E1000_DMACR_DMAC_LX_MASK 0x30000000
-#define E1000_DMACR_DMAC_LX_SHIFT 28
-#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */
-/* DMA Coalescing BMC-to-OS Watchdog Enable */
-#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000
-
-/* DMA Coalescing Transmit Threshold */
-#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF
-
-#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */
-
-/* Rx Traffic Rate Threshold */
-#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF
-/* Rx packet rate in current window */
-#define E1000_DMCRTRH_LRPRCW 0x80000000
-
-/* DMA Coal Rx Traffic Current Count */
-#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF
-
-/* Flow ctrl Rx Threshold High val */
-#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0
-#define E1000_FCRTC_RTH_COAL_SHIFT 4
-/* Lx power decision based on DMA coal */
-#define E1000_PCIEMISC_LX_DECISION 0x00000080
-
-#define E1000_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */
-#define E1000_RXPBS_SIZE_I210_MASK 0x0000003F /* Rx packet buffer size */
-#define E1000_TXPB0S_SIZE_I210_MASK 0x0000003F /* Tx packet buffer 0 size */
-
-/* Proxy Filter Control */
-#define E1000_PROXYFC_D0 0x00000001 /* Enable offload in D0 */
-#define E1000_PROXYFC_EX 0x00000004 /* Directed exact proxy */
-#define E1000_PROXYFC_MC 0x00000008 /* Directed MC Proxy */
-#define E1000_PROXYFC_BC 0x00000010 /* Broadcast Proxy Enable */
-#define E1000_PROXYFC_ARP_DIRECTED 0x00000020 /* Directed ARP Proxy Ena */
-#define E1000_PROXYFC_IPV4 0x00000040 /* Directed IPv4 Enable */
-#define E1000_PROXYFC_IPV6 0x00000080 /* Directed IPv6 Enable */
-#define E1000_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */
-#define E1000_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Ena */
-/* Proxy Status */
-#define E1000_PROXYS_CLEAR 0xFFFFFFFF /* Clear */
-
-/* Firmware Status */
-#define E1000_FWSTS_FWRI 0x80000000 /* FW Reset Indication */
-/* VF Control */
-#define E1000_VTCTRL_RST 0x04000000 /* Reset VF */
-
-#define E1000_STATUS_LAN_ID_MASK 0x00000000C /* Mask for Lan ID field */
-/* Lan ID bit field offset in status register */
-#define E1000_STATUS_LAN_ID_OFFSET 2
-#define E1000_VFTA_ENTRIES 128
-#ifndef E1000_UNUSEDARG
-#define E1000_UNUSEDARG
-#endif /* E1000_UNUSEDARG */
-#endif /* _E1000_DEFINES_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_hw.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_hw.h
deleted file mode 100755
index 347cef71..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_hw.h
+++ /dev/null
@@ -1,793 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#ifndef _E1000_HW_H_
-#define _E1000_HW_H_
-
-#include "e1000_osdep.h"
-#include "e1000_regs.h"
-#include "e1000_defines.h"
-
-struct e1000_hw;
-
-#define E1000_DEV_ID_82576 0x10C9
-#define E1000_DEV_ID_82576_FIBER 0x10E6
-#define E1000_DEV_ID_82576_SERDES 0x10E7
-#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
-#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526
-#define E1000_DEV_ID_82576_NS 0x150A
-#define E1000_DEV_ID_82576_NS_SERDES 0x1518
-#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D
-#define E1000_DEV_ID_82575EB_COPPER 0x10A7
-#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
-#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
-#define E1000_DEV_ID_82580_COPPER 0x150E
-#define E1000_DEV_ID_82580_FIBER 0x150F
-#define E1000_DEV_ID_82580_SERDES 0x1510
-#define E1000_DEV_ID_82580_SGMII 0x1511
-#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
-#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527
-#define E1000_DEV_ID_I350_COPPER 0x1521
-#define E1000_DEV_ID_I350_FIBER 0x1522
-#define E1000_DEV_ID_I350_SERDES 0x1523
-#define E1000_DEV_ID_I350_SGMII 0x1524
-#define E1000_DEV_ID_I350_DA4 0x1546
-#define E1000_DEV_ID_I210_COPPER 0x1533
-#define E1000_DEV_ID_I210_COPPER_OEM1 0x1534
-#define E1000_DEV_ID_I210_COPPER_IT 0x1535
-#define E1000_DEV_ID_I210_FIBER 0x1536
-#define E1000_DEV_ID_I210_SERDES 0x1537
-#define E1000_DEV_ID_I210_SGMII 0x1538
-#define E1000_DEV_ID_I210_COPPER_FLASHLESS 0x157B
-#define E1000_DEV_ID_I210_SERDES_FLASHLESS 0x157C
-#define E1000_DEV_ID_I211_COPPER 0x1539
-#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40
-#define E1000_DEV_ID_I354_SGMII 0x1F41
-#define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45
-#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438
-#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A
-#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C
-#define E1000_DEV_ID_DH89XXCC_SFP 0x0440
-
-#define E1000_REVISION_0 0
-#define E1000_REVISION_1 1
-#define E1000_REVISION_2 2
-#define E1000_REVISION_3 3
-#define E1000_REVISION_4 4
-
-#define E1000_FUNC_0 0
-#define E1000_FUNC_1 1
-#define E1000_FUNC_2 2
-#define E1000_FUNC_3 3
-
-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6
-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9
-
-enum e1000_mac_type {
- e1000_undefined = 0,
- e1000_82575,
- e1000_82576,
- e1000_82580,
- e1000_i350,
- e1000_i354,
- e1000_i210,
- e1000_i211,
- e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
-};
-
-enum e1000_media_type {
- e1000_media_type_unknown = 0,
- e1000_media_type_copper = 1,
- e1000_media_type_fiber = 2,
- e1000_media_type_internal_serdes = 3,
- e1000_num_media_types
-};
-
-enum e1000_nvm_type {
- e1000_nvm_unknown = 0,
- e1000_nvm_none,
- e1000_nvm_eeprom_spi,
- e1000_nvm_flash_hw,
- e1000_nvm_invm,
- e1000_nvm_flash_sw
-};
-
-enum e1000_nvm_override {
- e1000_nvm_override_none = 0,
- e1000_nvm_override_spi_small,
- e1000_nvm_override_spi_large,
-};
-
-enum e1000_phy_type {
- e1000_phy_unknown = 0,
- e1000_phy_none,
- e1000_phy_m88,
- e1000_phy_igp,
- e1000_phy_igp_2,
- e1000_phy_gg82563,
- e1000_phy_igp_3,
- e1000_phy_ife,
- e1000_phy_82580,
- e1000_phy_vf,
- e1000_phy_i210,
-};
-
-enum e1000_bus_type {
- e1000_bus_type_unknown = 0,
- e1000_bus_type_pci,
- e1000_bus_type_pcix,
- e1000_bus_type_pci_express,
- e1000_bus_type_reserved
-};
-
-enum e1000_bus_speed {
- e1000_bus_speed_unknown = 0,
- e1000_bus_speed_33,
- e1000_bus_speed_66,
- e1000_bus_speed_100,
- e1000_bus_speed_120,
- e1000_bus_speed_133,
- e1000_bus_speed_2500,
- e1000_bus_speed_5000,
- e1000_bus_speed_reserved
-};
-
-enum e1000_bus_width {
- e1000_bus_width_unknown = 0,
- e1000_bus_width_pcie_x1,
- e1000_bus_width_pcie_x2,
- e1000_bus_width_pcie_x4 = 4,
- e1000_bus_width_pcie_x8 = 8,
- e1000_bus_width_32,
- e1000_bus_width_64,
- e1000_bus_width_reserved
-};
-
-enum e1000_1000t_rx_status {
- e1000_1000t_rx_status_not_ok = 0,
- e1000_1000t_rx_status_ok,
- e1000_1000t_rx_status_undefined = 0xFF
-};
-
-enum e1000_rev_polarity {
- e1000_rev_polarity_normal = 0,
- e1000_rev_polarity_reversed,
- e1000_rev_polarity_undefined = 0xFF
-};
-
-enum e1000_fc_mode {
- e1000_fc_none = 0,
- e1000_fc_rx_pause,
- e1000_fc_tx_pause,
- e1000_fc_full,
- e1000_fc_default = 0xFF
-};
-
-enum e1000_ms_type {
- e1000_ms_hw_default = 0,
- e1000_ms_force_master,
- e1000_ms_force_slave,
- e1000_ms_auto
-};
-
-enum e1000_smart_speed {
- e1000_smart_speed_default = 0,
- e1000_smart_speed_on,
- e1000_smart_speed_off
-};
-
-enum e1000_serdes_link_state {
- e1000_serdes_link_down = 0,
- e1000_serdes_link_autoneg_progress,
- e1000_serdes_link_autoneg_complete,
- e1000_serdes_link_forced_up
-};
-
-#ifndef __le16
-#define __le16 u16
-#endif
-#ifndef __le32
-#define __le32 u32
-#endif
-#ifndef __le64
-#define __le64 u64
-#endif
-/* Receive Descriptor */
-struct e1000_rx_desc {
- __le64 buffer_addr; /* Address of the descriptor's data buffer */
- __le16 length; /* Length of data DMAed into data buffer */
- __le16 csum; /* Packet checksum */
- u8 status; /* Descriptor status */
- u8 errors; /* Descriptor Errors */
- __le16 special;
-};
-
-/* Receive Descriptor - Extended */
-union e1000_rx_desc_extended {
- struct {
- __le64 buffer_addr;
- __le64 reserved;
- } read;
- struct {
- struct {
- __le32 mrq; /* Multiple Rx Queues */
- union {
- __le32 rss; /* RSS Hash */
- struct {
- __le16 ip_id; /* IP id */
- __le16 csum; /* Packet Checksum */
- } csum_ip;
- } hi_dword;
- } lower;
- struct {
- __le32 status_error; /* ext status/error */
- __le16 length;
- __le16 vlan; /* VLAN tag */
- } upper;
- } wb; /* writeback */
-};
-
-#define MAX_PS_BUFFERS 4
-
-/* Number of packet split data buffers (not including the header buffer) */
-#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
-
-/* Receive Descriptor - Packet Split */
-union e1000_rx_desc_packet_split {
- struct {
- /* one buffer for protocol header(s), three data buffers */
- __le64 buffer_addr[MAX_PS_BUFFERS];
- } read;
- struct {
- struct {
- __le32 mrq; /* Multiple Rx Queues */
- union {
- __le32 rss; /* RSS Hash */
- struct {
- __le16 ip_id; /* IP id */
- __le16 csum; /* Packet Checksum */
- } csum_ip;
- } hi_dword;
- } lower;
- struct {
- __le32 status_error; /* ext status/error */
- __le16 length0; /* length of buffer 0 */
- __le16 vlan; /* VLAN tag */
- } middle;
- struct {
- __le16 header_status;
- /* length of buffers 1-3 */
- __le16 length[PS_PAGE_BUFFERS];
- } upper;
- __le64 reserved;
- } wb; /* writeback */
-};
-
-/* Transmit Descriptor */
-struct e1000_tx_desc {
- __le64 buffer_addr; /* Address of the descriptor's data buffer */
- union {
- __le32 data;
- struct {
- __le16 length; /* Data buffer length */
- u8 cso; /* Checksum offset */
- u8 cmd; /* Descriptor control */
- } flags;
- } lower;
- union {
- __le32 data;
- struct {
- u8 status; /* Descriptor status */
- u8 css; /* Checksum start */
- __le16 special;
- } fields;
- } upper;
-};
-
-/* Offload Context Descriptor */
-struct e1000_context_desc {
- union {
- __le32 ip_config;
- struct {
- u8 ipcss; /* IP checksum start */
- u8 ipcso; /* IP checksum offset */
- __le16 ipcse; /* IP checksum end */
- } ip_fields;
- } lower_setup;
- union {
- __le32 tcp_config;
- struct {
- u8 tucss; /* TCP checksum start */
- u8 tucso; /* TCP checksum offset */
- __le16 tucse; /* TCP checksum end */
- } tcp_fields;
- } upper_setup;
- __le32 cmd_and_length;
- union {
- __le32 data;
- struct {
- u8 status; /* Descriptor status */
- u8 hdr_len; /* Header length */
- __le16 mss; /* Maximum segment size */
- } fields;
- } tcp_seg_setup;
-};
-
-/* Offload data descriptor */
-struct e1000_data_desc {
- __le64 buffer_addr; /* Address of the descriptor's buffer address */
- union {
- __le32 data;
- struct {
- __le16 length; /* Data buffer length */
- u8 typ_len_ext;
- u8 cmd;
- } flags;
- } lower;
- union {
- __le32 data;
- struct {
- u8 status; /* Descriptor status */
- u8 popts; /* Packet Options */
- __le16 special;
- } fields;
- } upper;
-};
-
-/* Statistics counters collected by the MAC */
-struct e1000_hw_stats {
- u64 crcerrs;
- u64 algnerrc;
- u64 symerrs;
- u64 rxerrc;
- u64 mpc;
- u64 scc;
- u64 ecol;
- u64 mcc;
- u64 latecol;
- u64 colc;
- u64 dc;
- u64 tncrs;
- u64 sec;
- u64 cexterr;
- u64 rlec;
- u64 xonrxc;
- u64 xontxc;
- u64 xoffrxc;
- u64 xofftxc;
- u64 fcruc;
- u64 prc64;
- u64 prc127;
- u64 prc255;
- u64 prc511;
- u64 prc1023;
- u64 prc1522;
- u64 gprc;
- u64 bprc;
- u64 mprc;
- u64 gptc;
- u64 gorc;
- u64 gotc;
- u64 rnbc;
- u64 ruc;
- u64 rfc;
- u64 roc;
- u64 rjc;
- u64 mgprc;
- u64 mgpdc;
- u64 mgptc;
- u64 tor;
- u64 tot;
- u64 tpr;
- u64 tpt;
- u64 ptc64;
- u64 ptc127;
- u64 ptc255;
- u64 ptc511;
- u64 ptc1023;
- u64 ptc1522;
- u64 mptc;
- u64 bptc;
- u64 tsctc;
- u64 tsctfc;
- u64 iac;
- u64 icrxptc;
- u64 icrxatc;
- u64 ictxptc;
- u64 ictxatc;
- u64 ictxqec;
- u64 ictxqmtc;
- u64 icrxdmtc;
- u64 icrxoc;
- u64 cbtmpc;
- u64 htdpmc;
- u64 cbrdpc;
- u64 cbrmpc;
- u64 rpthc;
- u64 hgptc;
- u64 htcbdpc;
- u64 hgorc;
- u64 hgotc;
- u64 lenerrs;
- u64 scvpc;
- u64 hrmpc;
- u64 doosync;
- u64 o2bgptc;
- u64 o2bspc;
- u64 b2ospc;
- u64 b2ogprc;
-};
-
-
-struct e1000_phy_stats {
- u32 idle_errors;
- u32 receive_errors;
-};
-
-struct e1000_host_mng_dhcp_cookie {
- u32 signature;
- u8 status;
- u8 reserved0;
- u16 vlan_id;
- u32 reserved1;
- u16 reserved2;
- u8 reserved3;
- u8 checksum;
-};
-
-/* Host Interface "Rev 1" */
-struct e1000_host_command_header {
- u8 command_id;
- u8 command_length;
- u8 command_options;
- u8 checksum;
-};
-
-#define E1000_HI_MAX_DATA_LENGTH 252
-struct e1000_host_command_info {
- struct e1000_host_command_header command_header;
- u8 command_data[E1000_HI_MAX_DATA_LENGTH];
-};
-
-/* Host Interface "Rev 2" */
-struct e1000_host_mng_command_header {
- u8 command_id;
- u8 checksum;
- u16 reserved1;
- u16 reserved2;
- u16 command_length;
-};
-
-#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
-struct e1000_host_mng_command_info {
- struct e1000_host_mng_command_header command_header;
- u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
-};
-
-#include "e1000_mac.h"
-#include "e1000_phy.h"
-#include "e1000_nvm.h"
-#include "e1000_manage.h"
-#include "e1000_mbx.h"
-
-/* Function pointers for the MAC. */
-struct e1000_mac_operations {
- s32 (*init_params)(struct e1000_hw *);
- s32 (*id_led_init)(struct e1000_hw *);
- s32 (*blink_led)(struct e1000_hw *);
- bool (*check_mng_mode)(struct e1000_hw *);
- s32 (*check_for_link)(struct e1000_hw *);
- s32 (*cleanup_led)(struct e1000_hw *);
- void (*clear_hw_cntrs)(struct e1000_hw *);
- void (*clear_vfta)(struct e1000_hw *);
- s32 (*get_bus_info)(struct e1000_hw *);
- void (*set_lan_id)(struct e1000_hw *);
- s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
- s32 (*led_on)(struct e1000_hw *);
- s32 (*led_off)(struct e1000_hw *);
- void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
- s32 (*reset_hw)(struct e1000_hw *);
- s32 (*init_hw)(struct e1000_hw *);
- void (*shutdown_serdes)(struct e1000_hw *);
- void (*power_up_serdes)(struct e1000_hw *);
- s32 (*setup_link)(struct e1000_hw *);
- s32 (*setup_physical_interface)(struct e1000_hw *);
- s32 (*setup_led)(struct e1000_hw *);
- void (*write_vfta)(struct e1000_hw *, u32, u32);
- void (*config_collision_dist)(struct e1000_hw *);
- void (*rar_set)(struct e1000_hw *, u8*, u32);
- s32 (*read_mac_addr)(struct e1000_hw *);
- s32 (*validate_mdi_setting)(struct e1000_hw *);
- s32 (*get_thermal_sensor_data)(struct e1000_hw *);
- s32 (*init_thermal_sensor_thresh)(struct e1000_hw *);
- s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
- void (*release_swfw_sync)(struct e1000_hw *, u16);
-};
-
-/* When to use various PHY register access functions:
- *
- * Func Caller
- * Function Does Does When to use
- * ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * X_reg L,P,A n/a for simple PHY reg accesses
- * X_reg_locked P,A L for multiple accesses of different regs
- * on different pages
- * X_reg_page A L,P for multiple accesses of different regs
- * on the same page
- *
- * Where X=[read|write], L=locking, P=sets page, A=register access
- *
- */
-struct e1000_phy_operations {
- s32 (*init_params)(struct e1000_hw *);
- s32 (*acquire)(struct e1000_hw *);
- s32 (*check_polarity)(struct e1000_hw *);
- s32 (*check_reset_block)(struct e1000_hw *);
- s32 (*commit)(struct e1000_hw *);
- s32 (*force_speed_duplex)(struct e1000_hw *);
- s32 (*get_cfg_done)(struct e1000_hw *hw);
- s32 (*get_cable_length)(struct e1000_hw *);
- s32 (*get_info)(struct e1000_hw *);
- s32 (*set_page)(struct e1000_hw *, u16);
- s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
- s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *);
- s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *);
- void (*release)(struct e1000_hw *);
- s32 (*reset)(struct e1000_hw *);
- s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
- s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
- s32 (*write_reg)(struct e1000_hw *, u32, u16);
- s32 (*write_reg_locked)(struct e1000_hw *, u32, u16);
- s32 (*write_reg_page)(struct e1000_hw *, u32, u16);
- void (*power_up)(struct e1000_hw *);
- void (*power_down)(struct e1000_hw *);
- s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *);
- s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8);
-};
-
-/* Function pointers for the NVM. */
-struct e1000_nvm_operations {
- s32 (*init_params)(struct e1000_hw *);
- s32 (*acquire)(struct e1000_hw *);
- s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
- void (*release)(struct e1000_hw *);
- void (*reload)(struct e1000_hw *);
- s32 (*update)(struct e1000_hw *);
- s32 (*valid_led_default)(struct e1000_hw *, u16 *);
- s32 (*validate)(struct e1000_hw *);
- s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
-};
-
-#define E1000_MAX_SENSORS 3
-
-struct e1000_thermal_diode_data {
- u8 location;
- u8 temp;
- u8 caution_thresh;
- u8 max_op_thresh;
-};
-
-struct e1000_thermal_sensor_data {
- struct e1000_thermal_diode_data sensor[E1000_MAX_SENSORS];
-};
-
-struct e1000_mac_info {
- struct e1000_mac_operations ops;
- u8 addr[ETH_ADDR_LEN];
- u8 perm_addr[ETH_ADDR_LEN];
-
- enum e1000_mac_type type;
-
- u32 collision_delta;
- u32 ledctl_default;
- u32 ledctl_mode1;
- u32 ledctl_mode2;
- u32 mc_filter_type;
- u32 tx_packet_delta;
- u32 txcw;
-
- u16 current_ifs_val;
- u16 ifs_max_val;
- u16 ifs_min_val;
- u16 ifs_ratio;
- u16 ifs_step_size;
- u16 mta_reg_count;
- u16 uta_reg_count;
-
- /* Maximum size of the MTA register table in all supported adapters */
- #define MAX_MTA_REG 128
- u32 mta_shadow[MAX_MTA_REG];
- u16 rar_entry_count;
-
- u8 forced_speed_duplex;
-
- bool adaptive_ifs;
- bool has_fwsm;
- bool arc_subsystem_valid;
- bool asf_firmware_present;
- bool autoneg;
- bool autoneg_failed;
- bool get_link_status;
- bool in_ifs_mode;
- enum e1000_serdes_link_state serdes_link_state;
- bool serdes_has_link;
- bool tx_pkt_filtering;
- struct e1000_thermal_sensor_data thermal_sensor_data;
-};
-
-struct e1000_phy_info {
- struct e1000_phy_operations ops;
- enum e1000_phy_type type;
-
- enum e1000_1000t_rx_status local_rx;
- enum e1000_1000t_rx_status remote_rx;
- enum e1000_ms_type ms_type;
- enum e1000_ms_type original_ms_type;
- enum e1000_rev_polarity cable_polarity;
- enum e1000_smart_speed smart_speed;
-
- u32 addr;
- u32 id;
- u32 reset_delay_us; /* in usec */
- u32 revision;
-
- enum e1000_media_type media_type;
-
- u16 autoneg_advertised;
- u16 autoneg_mask;
- u16 cable_length;
- u16 max_cable_length;
- u16 min_cable_length;
-
- u8 mdix;
-
- bool disable_polarity_correction;
- bool is_mdix;
- bool polarity_correction;
- bool reset_disable;
- bool speed_downgraded;
- bool autoneg_wait_to_complete;
-};
-
-struct e1000_nvm_info {
- struct e1000_nvm_operations ops;
- enum e1000_nvm_type type;
- enum e1000_nvm_override override;
-
- u32 flash_bank_size;
- u32 flash_base_addr;
-
- u16 word_size;
- u16 delay_usec;
- u16 address_bits;
- u16 opcode_bits;
- u16 page_size;
-};
-
-struct e1000_bus_info {
- enum e1000_bus_type type;
- enum e1000_bus_speed speed;
- enum e1000_bus_width width;
-
- u16 func;
- u16 pci_cmd_word;
-};
-
-struct e1000_fc_info {
- u32 high_water; /* Flow control high-water mark */
- u32 low_water; /* Flow control low-water mark */
- u16 pause_time; /* Flow control pause timer */
- u16 refresh_time; /* Flow control refresh timer */
- bool send_xon; /* Flow control send XON */
- bool strict_ieee; /* Strict IEEE mode */
- enum e1000_fc_mode current_mode; /* FC mode in effect */
- enum e1000_fc_mode requested_mode; /* FC mode requested by caller */
-};
-
-struct e1000_mbx_operations {
- s32 (*init_params)(struct e1000_hw *hw);
- s32 (*read)(struct e1000_hw *, u32 *, u16, u16);
- s32 (*write)(struct e1000_hw *, u32 *, u16, u16);
- s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16);
- s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16);
- s32 (*check_for_msg)(struct e1000_hw *, u16);
- s32 (*check_for_ack)(struct e1000_hw *, u16);
- s32 (*check_for_rst)(struct e1000_hw *, u16);
-};
-
-struct e1000_mbx_stats {
- u32 msgs_tx;
- u32 msgs_rx;
-
- u32 acks;
- u32 reqs;
- u32 rsts;
-};
-
-struct e1000_mbx_info {
- struct e1000_mbx_operations ops;
- struct e1000_mbx_stats stats;
- u32 timeout;
- u32 usec_delay;
- u16 size;
-};
-
-struct e1000_dev_spec_82575 {
- bool sgmii_active;
- bool global_device_reset;
- bool eee_disable;
- bool module_plugged;
- bool clear_semaphore_once;
- u32 mtu;
- struct sfp_e1000_flags eth_flags;
- u8 media_port;
- bool media_changed;
-};
-
-struct e1000_dev_spec_vf {
- u32 vf_number;
- u32 v2p_mailbox;
-};
-
-struct e1000_hw {
- void *back;
-
- u8 __iomem *hw_addr;
- u8 __iomem *flash_address;
- unsigned long io_base;
-
- struct e1000_mac_info mac;
- struct e1000_fc_info fc;
- struct e1000_phy_info phy;
- struct e1000_nvm_info nvm;
- struct e1000_bus_info bus;
- struct e1000_mbx_info mbx;
- struct e1000_host_mng_dhcp_cookie mng_cookie;
-
- union {
- struct e1000_dev_spec_82575 _82575;
- struct e1000_dev_spec_vf vf;
- } dev_spec;
-
- u16 device_id;
- u16 subsystem_vendor_id;
- u16 subsystem_device_id;
- u16 vendor_id;
-
- u8 revision_id;
-};
-
-#include "e1000_82575.h"
-#include "e1000_i210.h"
-
-/* These functions must be implemented by drivers */
-s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
-s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
-
-#endif
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_i210.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_i210.c
deleted file mode 100755
index 1e9f3e6e..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_i210.c
+++ /dev/null
@@ -1,909 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#include "e1000_api.h"
-
-
-static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw);
-static void e1000_release_nvm_i210(struct e1000_hw *hw);
-static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw);
-static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data);
-static s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw);
-static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
-
-/**
- * e1000_acquire_nvm_i210 - Request for access to EEPROM
- * @hw: pointer to the HW structure
- *
- * Acquire the necessary semaphores for exclusive access to the EEPROM.
- * Set the EEPROM access request bit and wait for EEPROM access grant bit.
- * Return successful if access grant bit set, else clear the request for
- * EEPROM access and return -E1000_ERR_NVM (-1).
- **/
-static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw)
-{
- s32 ret_val;
-
- DEBUGFUNC("e1000_acquire_nvm_i210");
-
- ret_val = e1000_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
-
- return ret_val;
-}
-
-/**
- * e1000_release_nvm_i210 - Release exclusive access to EEPROM
- * @hw: pointer to the HW structure
- *
- * Stop any current commands to the EEPROM and clear the EEPROM request bit,
- * then release the semaphores acquired.
- **/
-static void e1000_release_nvm_i210(struct e1000_hw *hw)
-{
- DEBUGFUNC("e1000_release_nvm_i210");
-
- e1000_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
-}
-
-/**
- * e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
- * @hw: pointer to the HW structure
- * @mask: specifies which semaphore to acquire
- *
- * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
- * will also specify which port we're acquiring the lock for.
- **/
-s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
-{
- u32 swfw_sync;
- u32 swmask = mask;
- u32 fwmask = mask << 16;
- s32 ret_val = E1000_SUCCESS;
- s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
-
- DEBUGFUNC("e1000_acquire_swfw_sync_i210");
-
- while (i < timeout) {
- if (e1000_get_hw_semaphore_i210(hw)) {
- ret_val = -E1000_ERR_SWFW_SYNC;
- goto out;
- }
-
- swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
- if (!(swfw_sync & (fwmask | swmask)))
- break;
-
- /*
- * Firmware currently using resource (fwmask)
- * or other software thread using resource (swmask)
- */
- e1000_put_hw_semaphore_generic(hw);
- msec_delay_irq(5);
- i++;
- }
-
- if (i == timeout) {
- DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
- ret_val = -E1000_ERR_SWFW_SYNC;
- goto out;
- }
-
- swfw_sync |= swmask;
- E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
-
- e1000_put_hw_semaphore_generic(hw);
-
-out:
- return ret_val;
-}
-
-/**
- * e1000_release_swfw_sync_i210 - Release SW/FW semaphore
- * @hw: pointer to the HW structure
- * @mask: specifies which semaphore to acquire
- *
- * Release the SW/FW semaphore used to access the PHY or NVM. The mask
- * will also specify which port we're releasing the lock for.
- **/
-void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
-{
- u32 swfw_sync;
-
- DEBUGFUNC("e1000_release_swfw_sync_i210");
-
- while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS)
- ; /* Empty */
-
- swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
- swfw_sync &= ~mask;
- E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
-
- e1000_put_hw_semaphore_generic(hw);
-}
-
-/**
- * e1000_get_hw_semaphore_i210 - Acquire hardware semaphore
- * @hw: pointer to the HW structure
- *
- * Acquire the HW semaphore to access the PHY or NVM
- **/
-static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw)
-{
- u32 swsm;
- s32 timeout = hw->nvm.word_size + 1;
- s32 i = 0;
-
- DEBUGFUNC("e1000_get_hw_semaphore_i210");
-
- /* Get the SW semaphore */
- while (i < timeout) {
- swsm = E1000_READ_REG(hw, E1000_SWSM);
- if (!(swsm & E1000_SWSM_SMBI))
- break;
-
- usec_delay(50);
- i++;
- }
-
- if (i == timeout) {
- /* In rare circumstances, the SW semaphore may already be held
- * unintentionally. Clear the semaphore once before giving up.
- */
- if (hw->dev_spec._82575.clear_semaphore_once) {
- hw->dev_spec._82575.clear_semaphore_once = false;
- e1000_put_hw_semaphore_generic(hw);
- for (i = 0; i < timeout; i++) {
- swsm = E1000_READ_REG(hw, E1000_SWSM);
- if (!(swsm & E1000_SWSM_SMBI))
- break;
-
- usec_delay(50);
- }
- }
-
- /* If we do not have the semaphore here, we have to give up. */
- if (i == timeout) {
- DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
- return -E1000_ERR_NVM;
- }
- }
-
- /* Get the FW semaphore. */
- for (i = 0; i < timeout; i++) {
- swsm = E1000_READ_REG(hw, E1000_SWSM);
- E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
-
- /* Semaphore acquired if bit latched */
- if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
- break;
-
- usec_delay(50);
- }
-
- if (i == timeout) {
- /* Release semaphores */
- e1000_put_hw_semaphore_generic(hw);
- DEBUGOUT("Driver can't access the NVM\n");
- return -E1000_ERR_NVM;
- }
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
- * @hw: pointer to the HW structure
- * @offset: offset of word in the Shadow Ram to read
- * @words: number of words to read
- * @data: word read from the Shadow Ram
- *
- * Reads a 16 bit word from the Shadow Ram using the EERD register.
- * Uses necessary synchronization semaphores.
- **/
-s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data)
-{
- s32 status = E1000_SUCCESS;
- u16 i, count;
-
- DEBUGFUNC("e1000_read_nvm_srrd_i210");
-
- /* We cannot hold synchronization semaphores for too long,
- * because of forceful takeover procedure. However it is more efficient
- * to read in bursts than synchronizing access for each word. */
- for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
- count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
- E1000_EERD_EEWR_MAX_COUNT : (words - i);
- if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
- status = e1000_read_nvm_eerd(hw, offset, count,
- data + i);
- hw->nvm.ops.release(hw);
- } else {
- status = E1000_ERR_SWFW_SYNC;
- }
-
- if (status != E1000_SUCCESS)
- break;
- }
-
- return status;
-}
-
-/**
- * e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
- * @hw: pointer to the HW structure
- * @offset: offset within the Shadow RAM to be written to
- * @words: number of words to write
- * @data: 16 bit word(s) to be written to the Shadow RAM
- *
- * Writes data to Shadow RAM at offset using EEWR register.
- *
- * If e1000_update_nvm_checksum is not called after this function , the
- * data will not be committed to FLASH and also Shadow RAM will most likely
- * contain an invalid checksum.
- *
- * If error code is returned, data and Shadow RAM may be inconsistent - buffer
- * partially written.
- **/
-s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data)
-{
- s32 status = E1000_SUCCESS;
- u16 i, count;
-
- DEBUGFUNC("e1000_write_nvm_srwr_i210");
-
- /* We cannot hold synchronization semaphores for too long,
- * because of forceful takeover procedure. However it is more efficient
- * to write in bursts than synchronizing access for each word. */
- for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
- count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
- E1000_EERD_EEWR_MAX_COUNT : (words - i);
- if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
- status = e1000_write_nvm_srwr(hw, offset, count,
- data + i);
- hw->nvm.ops.release(hw);
- } else {
- status = E1000_ERR_SWFW_SYNC;
- }
-
- if (status != E1000_SUCCESS)
- break;
- }
-
- return status;
-}
-
-/**
- * e1000_write_nvm_srwr - Write to Shadow Ram using EEWR
- * @hw: pointer to the HW structure
- * @offset: offset within the Shadow Ram to be written to
- * @words: number of words to write
- * @data: 16 bit word(s) to be written to the Shadow Ram
- *
- * Writes data to Shadow Ram at offset using EEWR register.
- *
- * If e1000_update_nvm_checksum is not called after this function , the
- * Shadow Ram will most likely contain an invalid checksum.
- **/
-static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data)
-{
- struct e1000_nvm_info *nvm = &hw->nvm;
- u32 i, k, eewr = 0;
- u32 attempts = 100000;
- s32 ret_val = E1000_SUCCESS;
-
- DEBUGFUNC("e1000_write_nvm_srwr");
-
- /*
- * A check for invalid values: offset too large, too many words,
- * too many words for the offset, and not enough words.
- */
- if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
- (words == 0)) {
- DEBUGOUT("nvm parameter(s) out of bounds\n");
- ret_val = -E1000_ERR_NVM;
- goto out;
- }
-
- for (i = 0; i < words; i++) {
- eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
- (data[i] << E1000_NVM_RW_REG_DATA) |
- E1000_NVM_RW_REG_START;
-
- E1000_WRITE_REG(hw, E1000_SRWR, eewr);
-
- for (k = 0; k < attempts; k++) {
- if (E1000_NVM_RW_REG_DONE &
- E1000_READ_REG(hw, E1000_SRWR)) {
- ret_val = E1000_SUCCESS;
- break;
- }
- usec_delay(5);
- }
-
- if (ret_val != E1000_SUCCESS) {
- DEBUGOUT("Shadow RAM write EEWR timed out\n");
- break;
- }
- }
-
-out:
- return ret_val;
-}
-
-/** e1000_read_invm_word_i210 - Reads OTP
- * @hw: pointer to the HW structure
- * @address: the word address (aka eeprom offset) to read
- * @data: pointer to the data read
- *
- * Reads 16-bit words from the OTP. Return error when the word is not
- * stored in OTP.
- **/
-static s32 e1000_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
-{
- s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
- u32 invm_dword;
- u16 i;
- u8 record_type, word_address;
-
- DEBUGFUNC("e1000_read_invm_word_i210");
-
- for (i = 0; i < E1000_INVM_SIZE; i++) {
- invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
- /* Get record type */
- record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
- if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
- break;
- if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
- i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
- if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
- i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
- if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
- word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
- if (word_address == address) {
- *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
- DEBUGOUT2("Read INVM Word 0x%02x = %x",
- address, *data);
- status = E1000_SUCCESS;
- break;
- }
- }
- }
- if (status != E1000_SUCCESS)
- DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address);
- return status;
-}
-
-/** e1000_read_invm_i210 - Read invm wrapper function for I210/I211
- * @hw: pointer to the HW structure
- * @address: the word address (aka eeprom offset) to read
- * @data: pointer to the data read
- *
- * Wrapper function to return data formerly found in the NVM.
- **/
-static s32 e1000_read_invm_i210(struct e1000_hw *hw, u16 offset,
- u16 E1000_UNUSEDARG words, u16 *data)
-{
- s32 ret_val = E1000_SUCCESS;
-
- DEBUGFUNC("e1000_read_invm_i210");
-
- /* Only the MAC addr is required to be present in the iNVM */
- switch (offset) {
- case NVM_MAC_ADDR:
- ret_val = e1000_read_invm_word_i210(hw, (u8)offset, &data[0]);
- ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+1,
- &data[1]);
- ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+2,
- &data[2]);
- if (ret_val != E1000_SUCCESS)
- DEBUGOUT("MAC Addr not found in iNVM\n");
- break;
- case NVM_INIT_CTRL_2:
- ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
- if (ret_val != E1000_SUCCESS) {
- *data = NVM_INIT_CTRL_2_DEFAULT_I211;
- ret_val = E1000_SUCCESS;
- }
- break;
- case NVM_INIT_CTRL_4:
- ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
- if (ret_val != E1000_SUCCESS) {
- *data = NVM_INIT_CTRL_4_DEFAULT_I211;
- ret_val = E1000_SUCCESS;
- }
- break;
- case NVM_LED_1_CFG:
- ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
- if (ret_val != E1000_SUCCESS) {
- *data = NVM_LED_1_CFG_DEFAULT_I211;
- ret_val = E1000_SUCCESS;
- }
- break;
- case NVM_LED_0_2_CFG:
- ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
- if (ret_val != E1000_SUCCESS) {
- *data = NVM_LED_0_2_CFG_DEFAULT_I211;
- ret_val = E1000_SUCCESS;
- }
- break;
- case NVM_ID_LED_SETTINGS:
- ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
- if (ret_val != E1000_SUCCESS) {
- *data = ID_LED_RESERVED_FFFF;
- ret_val = E1000_SUCCESS;
- }
- break;
- case NVM_SUB_DEV_ID:
- *data = hw->subsystem_device_id;
- break;
- case NVM_SUB_VEN_ID:
- *data = hw->subsystem_vendor_id;
- break;
- case NVM_DEV_ID:
- *data = hw->device_id;
- break;
- case NVM_VEN_ID:
- *data = hw->vendor_id;
- break;
- default:
- DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset);
- *data = NVM_RESERVED_WORD;
- break;
- }
- return ret_val;
-}
-
-/**
- * e1000_read_invm_version - Reads iNVM version and image type
- * @hw: pointer to the HW structure
- * @invm_ver: version structure for the version read
- *
- * Reads iNVM version and image type.
- **/
-s32 e1000_read_invm_version(struct e1000_hw *hw,
- struct e1000_fw_version *invm_ver)
-{
- u32 *record = NULL;
- u32 *next_record = NULL;
- u32 i = 0;
- u32 invm_dword = 0;
- u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
- E1000_INVM_RECORD_SIZE_IN_BYTES);
- u32 buffer[E1000_INVM_SIZE];
- s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
- u16 version = 0;
-
- DEBUGFUNC("e1000_read_invm_version");
-
- /* Read iNVM memory */
- for (i = 0; i < E1000_INVM_SIZE; i++) {
- invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
- buffer[i] = invm_dword;
- }
-
- /* Read version number */
- for (i = 1; i < invm_blocks; i++) {
- record = &buffer[invm_blocks - i];
- next_record = &buffer[invm_blocks - i + 1];
-
- /* Check if we have first version location used */
- if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
- version = 0;
- status = E1000_SUCCESS;
- break;
- }
- /* Check if we have second version location used */
- else if ((i == 1) &&
- ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
- version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
- status = E1000_SUCCESS;
- break;
- }
- /*
- * Check if we have odd version location
- * used and it is the last one used
- */
- else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
- ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
- (i != 1))) {
- version = (*next_record & E1000_INVM_VER_FIELD_TWO)
- >> 13;
- status = E1000_SUCCESS;
- break;
- }
- /*
- * Check if we have even version location
- * used and it is the last one used
- */
- else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
- ((*record & 0x3) == 0)) {
- version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
- status = E1000_SUCCESS;
- break;
- }
- }
-
- if (status == E1000_SUCCESS) {
- invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
- >> E1000_INVM_MAJOR_SHIFT;
- invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
- }
- /* Read Image Type */
- for (i = 1; i < invm_blocks; i++) {
- record = &buffer[invm_blocks - i];
- next_record = &buffer[invm_blocks - i + 1];
-
- /* Check if we have image type in first location used */
- if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
- invm_ver->invm_img_type = 0;
- status = E1000_SUCCESS;
- break;
- }
- /* Check if we have image type in first location used */
- else if ((((*record & 0x3) == 0) &&
- ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
- ((((*record & 0x3) != 0) && (i != 1)))) {
- invm_ver->invm_img_type =
- (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
- status = E1000_SUCCESS;
- break;
- }
- }
- return status;
-}
-
-/**
- * e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum
- * @hw: pointer to the HW structure
- *
- * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
- * and then verifies that the sum of the EEPROM is equal to 0xBABA.
- **/
-s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw)
-{
- s32 status = E1000_SUCCESS;
- s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
-
- DEBUGFUNC("e1000_validate_nvm_checksum_i210");
-
- if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
-
- /*
- * Replace the read function with semaphore grabbing with
- * the one that skips this for a while.
- * We have semaphore taken already here.
- */
- read_op_ptr = hw->nvm.ops.read;
- hw->nvm.ops.read = e1000_read_nvm_eerd;
-
- status = e1000_validate_nvm_checksum_generic(hw);
-
- /* Revert original read operation. */
- hw->nvm.ops.read = read_op_ptr;
-
- hw->nvm.ops.release(hw);
- } else {
- status = E1000_ERR_SWFW_SYNC;
- }
-
- return status;
-}
-
-
-/**
- * e1000_update_nvm_checksum_i210 - Update EEPROM checksum
- * @hw: pointer to the HW structure
- *
- * Updates the EEPROM checksum by reading/adding each word of the EEPROM
- * up to the checksum. Then calculates the EEPROM checksum and writes the
- * value to the EEPROM. Next commit EEPROM data onto the Flash.
- **/
-s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw)
-{
- s32 ret_val = E1000_SUCCESS;
- u16 checksum = 0;
- u16 i, nvm_data;
-
- DEBUGFUNC("e1000_update_nvm_checksum_i210");
-
- /*
- * Read the first word from the EEPROM. If this times out or fails, do
- * not continue or we could be in for a very long wait while every
- * EEPROM read fails
- */
- ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data);
- if (ret_val != E1000_SUCCESS) {
- DEBUGOUT("EEPROM read failed\n");
- goto out;
- }
-
- if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
- /*
- * Do not use hw->nvm.ops.write, hw->nvm.ops.read
- * because we do not want to take the synchronization
- * semaphores twice here.
- */
-
- for (i = 0; i < NVM_CHECKSUM_REG; i++) {
- ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data);
- if (ret_val) {
- hw->nvm.ops.release(hw);
- DEBUGOUT("NVM Read Error while updating checksum.\n");
- goto out;
- }
- checksum += nvm_data;
- }
- checksum = (u16) NVM_SUM - checksum;
- ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
- &checksum);
- if (ret_val != E1000_SUCCESS) {
- hw->nvm.ops.release(hw);
- DEBUGOUT("NVM Write Error while updating checksum.\n");
- goto out;
- }
-
- hw->nvm.ops.release(hw);
-
- ret_val = e1000_update_flash_i210(hw);
- } else {
- ret_val = E1000_ERR_SWFW_SYNC;
- }
-out:
- return ret_val;
-}
-
-/**
- * e1000_get_flash_presence_i210 - Check if flash device is detected.
- * @hw: pointer to the HW structure
- *
- **/
-bool e1000_get_flash_presence_i210(struct e1000_hw *hw)
-{
- u32 eec = 0;
- bool ret_val = false;
-
- DEBUGFUNC("e1000_get_flash_presence_i210");
-
- eec = E1000_READ_REG(hw, E1000_EECD);
-
- if (eec & E1000_EECD_FLASH_DETECTED_I210)
- ret_val = true;
-
- return ret_val;
-}
-
-/**
- * e1000_update_flash_i210 - Commit EEPROM to the flash
- * @hw: pointer to the HW structure
- *
- **/
-s32 e1000_update_flash_i210(struct e1000_hw *hw)
-{
- s32 ret_val = E1000_SUCCESS;
- u32 flup;
-
- DEBUGFUNC("e1000_update_flash_i210");
-
- ret_val = e1000_pool_flash_update_done_i210(hw);
- if (ret_val == -E1000_ERR_NVM) {
- DEBUGOUT("Flash update time out\n");
- goto out;
- }
-
- flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210;
- E1000_WRITE_REG(hw, E1000_EECD, flup);
-
- ret_val = e1000_pool_flash_update_done_i210(hw);
- if (ret_val == E1000_SUCCESS)
- DEBUGOUT("Flash update complete\n");
- else
- DEBUGOUT("Flash update time out\n");
-
-out:
- return ret_val;
-}
-
-/**
- * e1000_pool_flash_update_done_i210 - Pool FLUDONE status.
- * @hw: pointer to the HW structure
- *
- **/
-s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw)
-{
- s32 ret_val = -E1000_ERR_NVM;
- u32 i, reg;
-
- DEBUGFUNC("e1000_pool_flash_update_done_i210");
-
- for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
- reg = E1000_READ_REG(hw, E1000_EECD);
- if (reg & E1000_EECD_FLUDONE_I210) {
- ret_val = E1000_SUCCESS;
- break;
- }
- usec_delay(5);
- }
-
- return ret_val;
-}
-
-/**
- * e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers
- * @hw: pointer to the HW structure
- *
- * Initialize the i210/i211 NVM parameters and function pointers.
- **/
-static s32 e1000_init_nvm_params_i210(struct e1000_hw *hw)
-{
- s32 ret_val = E1000_SUCCESS;
- struct e1000_nvm_info *nvm = &hw->nvm;
-
- DEBUGFUNC("e1000_init_nvm_params_i210");
-
- ret_val = e1000_init_nvm_params_82575(hw);
- nvm->ops.acquire = e1000_acquire_nvm_i210;
- nvm->ops.release = e1000_release_nvm_i210;
- nvm->ops.valid_led_default = e1000_valid_led_default_i210;
- if (e1000_get_flash_presence_i210(hw)) {
- hw->nvm.type = e1000_nvm_flash_hw;
- nvm->ops.read = e1000_read_nvm_srrd_i210;
- nvm->ops.write = e1000_write_nvm_srwr_i210;
- nvm->ops.validate = e1000_validate_nvm_checksum_i210;
- nvm->ops.update = e1000_update_nvm_checksum_i210;
- } else {
- hw->nvm.type = e1000_nvm_invm;
- nvm->ops.read = e1000_read_invm_i210;
- nvm->ops.write = e1000_null_write_nvm;
- nvm->ops.validate = e1000_null_ops_generic;
- nvm->ops.update = e1000_null_ops_generic;
- }
- return ret_val;
-}
-
-/**
- * e1000_init_function_pointers_i210 - Init func ptrs.
- * @hw: pointer to the HW structure
- *
- * Called to initialize all function pointers and parameters.
- **/
-void e1000_init_function_pointers_i210(struct e1000_hw *hw)
-{
- e1000_init_function_pointers_82575(hw);
- hw->nvm.ops.init_params = e1000_init_nvm_params_i210;
-
- return;
-}
-
-/**
- * e1000_valid_led_default_i210 - Verify a valid default LED config
- * @hw: pointer to the HW structure
- * @data: pointer to the NVM (EEPROM)
- *
- * Read the EEPROM for the current default LED configuration. If the
- * LED configuration is not valid, set to a valid LED configuration.
- **/
-static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
-{
- s32 ret_val;
-
- DEBUGFUNC("e1000_valid_led_default_i210");
-
- ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
- if (ret_val) {
- DEBUGOUT("NVM Read Error\n");
- goto out;
- }
-
- if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
- switch (hw->phy.media_type) {
- case e1000_media_type_internal_serdes:
- *data = ID_LED_DEFAULT_I210_SERDES;
- break;
- case e1000_media_type_copper:
- default:
- *data = ID_LED_DEFAULT_I210;
- break;
- }
- }
-out:
- return ret_val;
-}
-
-/**
- * __e1000_access_xmdio_reg - Read/write XMDIO register
- * @hw: pointer to the HW structure
- * @address: XMDIO address to program
- * @dev_addr: device address to program
- * @data: pointer to value to read/write from/to the XMDIO address
- * @read: boolean flag to indicate read or write
- **/
-static s32 __e1000_access_xmdio_reg(struct e1000_hw *hw, u16 address,
- u8 dev_addr, u16 *data, bool read)
-{
- s32 ret_val = E1000_SUCCESS;
-
- DEBUGFUNC("__e1000_access_xmdio_reg");
-
- ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
- if (ret_val)
- return ret_val;
-
- ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address);
- if (ret_val)
- return ret_val;
-
- ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA |
- dev_addr);
- if (ret_val)
- return ret_val;
-
- if (read)
- ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data);
- else
- ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data);
- if (ret_val)
- return ret_val;
-
- /* Recalibrate the device back to 0 */
- ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0);
- if (ret_val)
- return ret_val;
-
- return ret_val;
-}
-
-/**
- * e1000_read_xmdio_reg - Read XMDIO register
- * @hw: pointer to the HW structure
- * @addr: XMDIO address to program
- * @dev_addr: device address to program
- * @data: value to be read from the EMI address
- **/
-s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data)
-{
- DEBUGFUNC("e1000_read_xmdio_reg");
-
- return __e1000_access_xmdio_reg(hw, addr, dev_addr, data, true);
-}
-
-/**
- * e1000_write_xmdio_reg - Write XMDIO register
- * @hw: pointer to the HW structure
- * @addr: XMDIO address to program
- * @dev_addr: device address to program
- * @data: value to be written to the XMDIO address
- **/
-s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
-{
- DEBUGFUNC("e1000_read_xmdio_reg");
-
- return __e1000_access_xmdio_reg(hw, addr, dev_addr, &data, false);
-}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_i210.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_i210.h
deleted file mode 100755
index 57b2eb56..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_i210.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#ifndef _E1000_I210_H_
-#define _E1000_I210_H_
-
-bool e1000_get_flash_presence_i210(struct e1000_hw *hw);
-s32 e1000_update_flash_i210(struct e1000_hw *hw);
-s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw);
-s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw);
-s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset,
- u16 words, u16 *data);
-s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
- u16 words, u16 *data);
-s32 e1000_read_invm_version(struct e1000_hw *hw,
- struct e1000_fw_version *invm_ver);
-s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
-void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
-s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
- u16 *data);
-s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
- u16 data);
-
-#define E1000_STM_OPCODE 0xDB00
-#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
-
-#define INVM_DWORD_TO_RECORD_TYPE(invm_dword) \
- (u8)((invm_dword) & 0x7)
-#define INVM_DWORD_TO_WORD_ADDRESS(invm_dword) \
- (u8)(((invm_dword) & 0x0000FE00) >> 9)
-#define INVM_DWORD_TO_WORD_DATA(invm_dword) \
- (u16)(((invm_dword) & 0xFFFF0000) >> 16)
-
-enum E1000_INVM_STRUCTURE_TYPE {
- E1000_INVM_UNINITIALIZED_STRUCTURE = 0x00,
- E1000_INVM_WORD_AUTOLOAD_STRUCTURE = 0x01,
- E1000_INVM_CSR_AUTOLOAD_STRUCTURE = 0x02,
- E1000_INVM_PHY_REGISTER_AUTOLOAD_STRUCTURE = 0x03,
- E1000_INVM_RSA_KEY_SHA256_STRUCTURE = 0x04,
- E1000_INVM_INVALIDATED_STRUCTURE = 0x0F,
-};
-
-#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8
-#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1
-#define E1000_INVM_ULT_BYTES_SIZE 8
-#define E1000_INVM_RECORD_SIZE_IN_BYTES 4
-#define E1000_INVM_VER_FIELD_ONE 0x1FF8
-#define E1000_INVM_VER_FIELD_TWO 0x7FE000
-#define E1000_INVM_IMGTYPE_FIELD 0x1F800000
-
-#define E1000_INVM_MAJOR_MASK 0x3F0
-#define E1000_INVM_MINOR_MASK 0xF
-#define E1000_INVM_MAJOR_SHIFT 4
-
-#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \
- (ID_LED_DEF1_DEF2 << 4) | \
- (ID_LED_OFF1_OFF2))
-#define ID_LED_DEFAULT_I210_SERDES ((ID_LED_DEF1_DEF2 << 8) | \
- (ID_LED_DEF1_DEF2 << 4) | \
- (ID_LED_OFF1_ON2))
-
-/* NVM offset defaults for I211 devices */
-#define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243
-#define NVM_INIT_CTRL_4_DEFAULT_I211 0x00C1
-#define NVM_LED_1_CFG_DEFAULT_I211 0x0184
-#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C
-#endif
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mac.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mac.c
deleted file mode 100755
index 4ee59ba9..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mac.c
+++ /dev/null
@@ -1,2096 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#include "e1000_api.h"
-
-static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw);
-static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
-static void e1000_config_collision_dist_generic(struct e1000_hw *hw);
-static void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
-
-/**
- * e1000_init_mac_ops_generic - Initialize MAC function pointers
- * @hw: pointer to the HW structure
- *
- * Setups up the function pointers to no-op functions
- **/
-void e1000_init_mac_ops_generic(struct e1000_hw *hw)
-{
- struct e1000_mac_info *mac = &hw->mac;
- DEBUGFUNC("e1000_init_mac_ops_generic");
-
- /* General Setup */
- mac->ops.init_params = e1000_null_ops_generic;
- mac->ops.init_hw = e1000_null_ops_generic;
- mac->ops.reset_hw = e1000_null_ops_generic;
- mac->ops.setup_physical_interface = e1000_null_ops_generic;
- mac->ops.get_bus_info = e1000_null_ops_generic;
- mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pcie;
- mac->ops.read_mac_addr = e1000_read_mac_addr_generic;
- mac->ops.config_collision_dist = e1000_config_collision_dist_generic;
- mac->ops.clear_hw_cntrs = e1000_null_mac_generic;
- /* LED */
- mac->ops.cleanup_led = e1000_null_ops_generic;
- mac->ops.setup_led = e1000_null_ops_generic;
- mac->ops.blink_led = e1000_null_ops_generic;
- mac->ops.led_on = e1000_null_ops_generic;
- mac->ops.led_off = e1000_null_ops_generic;
- /* LINK */
- mac->ops.setup_link = e1000_null_ops_generic;
- mac->ops.get_link_up_info = e1000_null_link_info;
- mac->ops.check_for_link = e1000_null_ops_generic;
- /* Management */
- mac->ops.check_mng_mode = e1000_null_mng_mode;
- /* VLAN, MC, etc. */
- mac->ops.update_mc_addr_list = e1000_null_update_mc;
- mac->ops.clear_vfta = e1000_null_mac_generic;
- mac->ops.write_vfta = e1000_null_write_vfta;
- mac->ops.rar_set = e1000_rar_set_generic;
- mac->ops.validate_mdi_setting = e1000_validate_mdi_setting_generic;
-}
-
-/**
- * e1000_null_ops_generic - No-op function, returns 0
- * @hw: pointer to the HW structure
- **/
-s32 e1000_null_ops_generic(struct e1000_hw E1000_UNUSEDARG *hw)
-{
- DEBUGFUNC("e1000_null_ops_generic");
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_null_mac_generic - No-op function, return void
- * @hw: pointer to the HW structure
- **/
-void e1000_null_mac_generic(struct e1000_hw E1000_UNUSEDARG *hw)
-{
- DEBUGFUNC("e1000_null_mac_generic");
- return;
-}
-
-/**
- * e1000_null_link_info - No-op function, return 0
- * @hw: pointer to the HW structure
- **/
-s32 e1000_null_link_info(struct e1000_hw E1000_UNUSEDARG *hw,
- u16 E1000_UNUSEDARG *s, u16 E1000_UNUSEDARG *d)
-{
- DEBUGFUNC("e1000_null_link_info");
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_null_mng_mode - No-op function, return false
- * @hw: pointer to the HW structure
- **/
-bool e1000_null_mng_mode(struct e1000_hw E1000_UNUSEDARG *hw)
-{
- DEBUGFUNC("e1000_null_mng_mode");
- return false;
-}
-
-/**
- * e1000_null_update_mc - No-op function, return void
- * @hw: pointer to the HW structure
- **/
-void e1000_null_update_mc(struct e1000_hw E1000_UNUSEDARG *hw,
- u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a)
-{
- DEBUGFUNC("e1000_null_update_mc");
- return;
-}
-
-/**
- * e1000_null_write_vfta - No-op function, return void
- * @hw: pointer to the HW structure
- **/
-void e1000_null_write_vfta(struct e1000_hw E1000_UNUSEDARG *hw,
- u32 E1000_UNUSEDARG a, u32 E1000_UNUSEDARG b)
-{
- DEBUGFUNC("e1000_null_write_vfta");
- return;
-}
-
-/**
- * e1000_null_rar_set - No-op function, return void
- * @hw: pointer to the HW structure
- **/
-void e1000_null_rar_set(struct e1000_hw E1000_UNUSEDARG *hw,
- u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a)
-{
- DEBUGFUNC("e1000_null_rar_set");
- return;
-}
-
-/**
- * e1000_get_bus_info_pcie_generic - Get PCIe bus information
- * @hw: pointer to the HW structure
- *
- * Determines and stores the system bus information for a particular
- * network interface. The following bus information is determined and stored:
- * bus speed, bus width, type (PCIe), and PCIe function.
- **/
-s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw)
-{
- struct e1000_mac_info *mac = &hw->mac;
- struct e1000_bus_info *bus = &hw->bus;
- s32 ret_val;
- u16 pcie_link_status;
-
- DEBUGFUNC("e1000_get_bus_info_pcie_generic");
-
- bus->type = e1000_bus_type_pci_express;
-
- ret_val = e1000_read_pcie_cap_reg(hw, PCIE_LINK_STATUS,
- &pcie_link_status);
- if (ret_val) {
- bus->width = e1000_bus_width_unknown;
- bus->speed = e1000_bus_speed_unknown;
- } else {
- switch (pcie_link_status & PCIE_LINK_SPEED_MASK) {
- case PCIE_LINK_SPEED_2500:
- bus->speed = e1000_bus_speed_2500;
- break;
- case PCIE_LINK_SPEED_5000:
- bus->speed = e1000_bus_speed_5000;
- break;
- default:
- bus->speed = e1000_bus_speed_unknown;
- break;
- }
-
- bus->width = (enum e1000_bus_width)((pcie_link_status &
- PCIE_LINK_WIDTH_MASK) >> PCIE_LINK_WIDTH_SHIFT);
- }
-
- mac->ops.set_lan_id(hw);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
- *
- * @hw: pointer to the HW structure
- *
- * Determines the LAN function id by reading memory-mapped registers
- * and swaps the port value if requested.
- **/
-static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
-{
- struct e1000_bus_info *bus = &hw->bus;
- u32 reg;
-
- /* The status register reports the correct function number
- * for the device regardless of function swap state.
- */
- reg = E1000_READ_REG(hw, E1000_STATUS);
- bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
-}
-
-/**
- * e1000_set_lan_id_single_port - Set LAN id for a single port device
- * @hw: pointer to the HW structure
- *
- * Sets the LAN function id to zero for a single port device.
- **/
-void e1000_set_lan_id_single_port(struct e1000_hw *hw)
-{
- struct e1000_bus_info *bus = &hw->bus;
-
- bus->func = 0;
-}
-
-/**
- * e1000_clear_vfta_generic - Clear VLAN filter table
- * @hw: pointer to the HW structure
- *
- * Clears the register array which contains the VLAN filter table by
- * setting all the values to 0.
- **/
-void e1000_clear_vfta_generic(struct e1000_hw *hw)
-{
- u32 offset;
-
- DEBUGFUNC("e1000_clear_vfta_generic");
-
- for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
- E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
- E1000_WRITE_FLUSH(hw);
- }
-}
-
-/**
- * e1000_write_vfta_generic - Write value to VLAN filter table
- * @hw: pointer to the HW structure
- * @offset: register offset in VLAN filter table
- * @value: register value written to VLAN filter table
- *
- * Writes value at the given offset in the register array which stores
- * the VLAN filter table.
- **/
-void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
-{
- DEBUGFUNC("e1000_write_vfta_generic");
-
- E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
- E1000_WRITE_FLUSH(hw);
-}
-
-/**
- * e1000_init_rx_addrs_generic - Initialize receive address's
- * @hw: pointer to the HW structure
- * @rar_count: receive address registers
- *
- * Setup the receive address registers by setting the base receive address
- * register to the devices MAC address and clearing all the other receive
- * address registers to 0.
- **/
-void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count)
-{
- u32 i;
- u8 mac_addr[ETH_ADDR_LEN] = {0};
-
- DEBUGFUNC("e1000_init_rx_addrs_generic");
-
- /* Setup the receive address */
- DEBUGOUT("Programming MAC Address into RAR[0]\n");
-
- hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
-
- /* Zero out the other (rar_entry_count - 1) receive addresses */
- DEBUGOUT1("Clearing RAR[1-%u]\n", rar_count-1);
- for (i = 1; i < rar_count; i++)
- hw->mac.ops.rar_set(hw, mac_addr, i);
-}
-
-/**
- * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr
- * @hw: pointer to the HW structure
- *
- * Checks the nvm for an alternate MAC address. An alternate MAC address
- * can be setup by pre-boot software and must be treated like a permanent
- * address and must override the actual permanent MAC address. If an
- * alternate MAC address is found it is programmed into RAR0, replacing
- * the permanent address that was installed into RAR0 by the Si on reset.
- * This function will return SUCCESS unless it encounters an error while
- * reading the EEPROM.
- **/
-s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
-{
- u32 i;
- s32 ret_val;
- u16 offset, nvm_alt_mac_addr_offset, nvm_data;
- u8 alt_mac_addr[ETH_ADDR_LEN];
-
- DEBUGFUNC("e1000_check_alt_mac_addr_generic");
-
- ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &nvm_data);
- if (ret_val)
- return ret_val;
-
-
- /* Alternate MAC address is handled by the option ROM for 82580
- * and newer. SW support not required.
- */
- if (hw->mac.type >= e1000_82580)
- return E1000_SUCCESS;
-
- ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
- &nvm_alt_mac_addr_offset);
- if (ret_val) {
- DEBUGOUT("NVM Read Error\n");
- return ret_val;
- }
-
- if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
- (nvm_alt_mac_addr_offset == 0x0000))
- /* There is no Alternate MAC Address */
- return E1000_SUCCESS;
-
- if (hw->bus.func == E1000_FUNC_1)
- nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
- if (hw->bus.func == E1000_FUNC_2)
- nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2;
-
- if (hw->bus.func == E1000_FUNC_3)
- nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3;
- for (i = 0; i < ETH_ADDR_LEN; i += 2) {
- offset = nvm_alt_mac_addr_offset + (i >> 1);
- ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
- if (ret_val) {
- DEBUGOUT("NVM Read Error\n");
- return ret_val;
- }
-
- alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
- alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
- }
-
- /* if multicast bit is set, the alternate address will not be used */
- if (alt_mac_addr[0] & 0x01) {
- DEBUGOUT("Ignoring Alternate Mac Address with MC bit set\n");
- return E1000_SUCCESS;
- }
-
- /* We have a valid alternate MAC address, and we want to treat it the
- * same as the normal permanent MAC address stored by the HW into the
- * RAR. Do this by mapping this address into RAR0.
- */
- hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_rar_set_generic - Set receive address register
- * @hw: pointer to the HW structure
- * @addr: pointer to the receive address
- * @index: receive address array register
- *
- * Sets the receive address array register at index to the address passed
- * in by addr.
- **/
-static void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
-{
- u32 rar_low, rar_high;
-
- DEBUGFUNC("e1000_rar_set_generic");
-
- /* HW expects these in little endian so we reverse the byte order
- * from network order (big endian) to little endian
- */
- rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
- ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
-
- rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
-
- /* If MAC address zero, no need to set the AV bit */
- if (rar_low || rar_high)
- rar_high |= E1000_RAH_AV;
-
- /* Some bridges will combine consecutive 32-bit writes into
- * a single burst write, which will malfunction on some parts.
- * The flushes avoid this.
- */
- E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
- E1000_WRITE_FLUSH(hw);
- E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
- E1000_WRITE_FLUSH(hw);
-}
-
-/**
- * e1000_hash_mc_addr_generic - Generate a multicast hash value
- * @hw: pointer to the HW structure
- * @mc_addr: pointer to a multicast address
- *
- * Generates a multicast address hash value which is used to determine
- * the multicast filter table array address and new table value.
- **/
-u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr)
-{
- u32 hash_value, hash_mask;
- u8 bit_shift = 0;
-
- DEBUGFUNC("e1000_hash_mc_addr_generic");
-
- /* Register count multiplied by bits per register */
- hash_mask = (hw->mac.mta_reg_count * 32) - 1;
-
- /* For a mc_filter_type of 0, bit_shift is the number of left-shifts
- * where 0xFF would still fall within the hash mask.
- */
- while (hash_mask >> bit_shift != 0xFF)
- bit_shift++;
-
- /* The portion of the address that is used for the hash table
- * is determined by the mc_filter_type setting.
- * The algorithm is such that there is a total of 8 bits of shifting.
- * The bit_shift for a mc_filter_type of 0 represents the number of
- * left-shifts where the MSB of mc_addr[5] would still fall within
- * the hash_mask. Case 0 does this exactly. Since there are a total
- * of 8 bits of shifting, then mc_addr[4] will shift right the
- * remaining number of bits. Thus 8 - bit_shift. The rest of the
- * cases are a variation of this algorithm...essentially raising the
- * number of bits to shift mc_addr[5] left, while still keeping the
- * 8-bit shifting total.
- *
- * For example, given the following Destination MAC Address and an
- * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
- * we can see that the bit_shift for case 0 is 4. These are the hash
- * values resulting from each mc_filter_type...
- * [0] [1] [2] [3] [4] [5]
- * 01 AA 00 12 34 56
- * LSB MSB
- *
- * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
- * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
- * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
- * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
- */
- switch (hw->mac.mc_filter_type) {
- default:
- case 0:
- break;
- case 1:
- bit_shift += 1;
- break;
- case 2:
- bit_shift += 2;
- break;
- case 3:
- bit_shift += 4;
- break;
- }
-
- hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
- (((u16) mc_addr[5]) << bit_shift)));
-
- return hash_value;
-}
-
-/**
- * e1000_update_mc_addr_list_generic - Update Multicast addresses
- * @hw: pointer to the HW structure
- * @mc_addr_list: array of multicast addresses to program
- * @mc_addr_count: number of multicast addresses to program
- *
- * Updates entire Multicast Table Array.
- * The caller must have a packed mc_addr_list of multicast addresses.
- **/
-void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
- u8 *mc_addr_list, u32 mc_addr_count)
-{
- u32 hash_value, hash_bit, hash_reg;
- int i;
-
- DEBUGFUNC("e1000_update_mc_addr_list_generic");
-
- /* clear mta_shadow */
- memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
-
- /* update mta_shadow from mc_addr_list */
- for (i = 0; (u32) i < mc_addr_count; i++) {
- hash_value = e1000_hash_mc_addr_generic(hw, mc_addr_list);
-
- hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
- hash_bit = hash_value & 0x1F;
-
- hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
- mc_addr_list += (ETH_ADDR_LEN);
- }
-
- /* replace the entire MTA table */
- for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
- E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
- E1000_WRITE_FLUSH(hw);
-}
-
-/**
- * e1000_clear_hw_cntrs_base_generic - Clear base hardware counters
- * @hw: pointer to the HW structure
- *
- * Clears the base hardware counters by reading the counter registers.
- **/
-void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw)
-{
- DEBUGFUNC("e1000_clear_hw_cntrs_base_generic");
-
- E1000_READ_REG(hw, E1000_CRCERRS);
- E1000_READ_REG(hw, E1000_SYMERRS);
- E1000_READ_REG(hw, E1000_MPC);
- E1000_READ_REG(hw, E1000_SCC);
- E1000_READ_REG(hw, E1000_ECOL);
- E1000_READ_REG(hw, E1000_MCC);
- E1000_READ_REG(hw, E1000_LATECOL);
- E1000_READ_REG(hw, E1000_COLC);
- E1000_READ_REG(hw, E1000_DC);
- E1000_READ_REG(hw, E1000_SEC);
- E1000_READ_REG(hw, E1000_RLEC);
- E1000_READ_REG(hw, E1000_XONRXC);
- E1000_READ_REG(hw, E1000_XONTXC);
- E1000_READ_REG(hw, E1000_XOFFRXC);
- E1000_READ_REG(hw, E1000_XOFFTXC);
- E1000_READ_REG(hw, E1000_FCRUC);
- E1000_READ_REG(hw, E1000_GPRC);
- E1000_READ_REG(hw, E1000_BPRC);
- E1000_READ_REG(hw, E1000_MPRC);
- E1000_READ_REG(hw, E1000_GPTC);
- E1000_READ_REG(hw, E1000_GORCL);
- E1000_READ_REG(hw, E1000_GORCH);
- E1000_READ_REG(hw, E1000_GOTCL);
- E1000_READ_REG(hw, E1000_GOTCH);
- E1000_READ_REG(hw, E1000_RNBC);
- E1000_READ_REG(hw, E1000_RUC);
- E1000_READ_REG(hw, E1000_RFC);
- E1000_READ_REG(hw, E1000_ROC);
- E1000_READ_REG(hw, E1000_RJC);
- E1000_READ_REG(hw, E1000_TORL);
- E1000_READ_REG(hw, E1000_TORH);
- E1000_READ_REG(hw, E1000_TOTL);
- E1000_READ_REG(hw, E1000_TOTH);
- E1000_READ_REG(hw, E1000_TPR);
- E1000_READ_REG(hw, E1000_TPT);
- E1000_READ_REG(hw, E1000_MPTC);
- E1000_READ_REG(hw, E1000_BPTC);
-}
-
-/**
- * e1000_check_for_copper_link_generic - Check for link (Copper)
- * @hw: pointer to the HW structure
- *
- * Checks to see of the link status of the hardware has changed. If a
- * change in link status has been detected, then we read the PHY registers
- * to get the current speed/duplex if link exists.
- **/
-s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw)
-{
- struct e1000_mac_info *mac = &hw->mac;
- s32 ret_val;
- bool link;
-
- DEBUGFUNC("e1000_check_for_copper_link");
-
- /* We only want to go out to the PHY registers to see if Auto-Neg
- * has completed and/or if our link status has changed. The
- * get_link_status flag is set upon receiving a Link Status
- * Change or Rx Sequence Error interrupt.
- */
- if (!mac->get_link_status)
- return E1000_SUCCESS;
-
- /* First we want to see if the MII Status Register reports
- * link. If so, then we want to get the current speed/duplex
- * of the PHY.
- */
- ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
- if (ret_val)
- return ret_val;
-
- if (!link)
- return E1000_SUCCESS; /* No link detected */
-
- mac->get_link_status = false;
-
- /* Check if there was DownShift, must be checked
- * immediately after link-up
- */
- e1000_check_downshift_generic(hw);
-
- /* If we are forcing speed/duplex, then we simply return since
- * we have already determined whether we have link or not.
- */
- if (!mac->autoneg)
- return -E1000_ERR_CONFIG;
-
- /* Auto-Neg is enabled. Auto Speed Detection takes care
- * of MAC speed/duplex configuration. So we only need to
- * configure Collision Distance in the MAC.
- */
- mac->ops.config_collision_dist(hw);
-
- /* Configure Flow Control now that Auto-Neg has completed.
- * First, we need to restore the desired flow control
- * settings because we may have had to re-autoneg with a
- * different link partner.
- */
- ret_val = e1000_config_fc_after_link_up_generic(hw);
- if (ret_val)
- DEBUGOUT("Error configuring flow control\n");
-
- return ret_val;
-}
-
-/**
- * e1000_check_for_fiber_link_generic - Check for link (Fiber)
- * @hw: pointer to the HW structure
- *
- * Checks for link up on the hardware. If link is not up and we have
- * a signal, then we need to force link up.
- **/
-s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw)
-{
- struct e1000_mac_info *mac = &hw->mac;
- u32 rxcw;
- u32 ctrl;
- u32 status;
- s32 ret_val;
-
- DEBUGFUNC("e1000_check_for_fiber_link_generic");
-
- ctrl = E1000_READ_REG(hw, E1000_CTRL);
- status = E1000_READ_REG(hw, E1000_STATUS);
- rxcw = E1000_READ_REG(hw, E1000_RXCW);
-
- /* If we don't have link (auto-negotiation failed or link partner
- * cannot auto-negotiate), the cable is plugged in (we have signal),
- * and our link partner is not trying to auto-negotiate with us (we
- * are receiving idles or data), we need to force link up. We also
- * need to give auto-negotiation time to complete, in case the cable
- * was just plugged in. The autoneg_failed flag does this.
- */
- /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
- if ((ctrl & E1000_CTRL_SWDPIN1) && !(status & E1000_STATUS_LU) &&
- !(rxcw & E1000_RXCW_C)) {
- if (!mac->autoneg_failed) {
- mac->autoneg_failed = true;
- return E1000_SUCCESS;
- }
- DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
-
- /* Disable auto-negotiation in the TXCW register */
- E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
-
- /* Force link-up and also force full-duplex. */
- ctrl = E1000_READ_REG(hw, E1000_CTRL);
- ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
- E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
-
- /* Configure Flow Control after forcing link up. */
- ret_val = e1000_config_fc_after_link_up_generic(hw);
- if (ret_val) {
- DEBUGOUT("Error configuring flow control\n");
- return ret_val;
- }
- } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
- /* If we are forcing link and we are receiving /C/ ordered
- * sets, re-enable auto-negotiation in the TXCW register
- * and disable forced link in the Device Control register
- * in an attempt to auto-negotiate with our link partner.
- */
- DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
- E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
- E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
-
- mac->serdes_has_link = true;
- }
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_check_for_serdes_link_generic - Check for link (Serdes)
- * @hw: pointer to the HW structure
- *
- * Checks for link up on the hardware. If link is not up and we have
- * a signal, then we need to force link up.
- **/
-s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
-{
- struct e1000_mac_info *mac = &hw->mac;
- u32 rxcw;
- u32 ctrl;
- u32 status;
- s32 ret_val;
-
- DEBUGFUNC("e1000_check_for_serdes_link_generic");
-
- ctrl = E1000_READ_REG(hw, E1000_CTRL);
- status = E1000_READ_REG(hw, E1000_STATUS);
- rxcw = E1000_READ_REG(hw, E1000_RXCW);
-
- /* If we don't have link (auto-negotiation failed or link partner
- * cannot auto-negotiate), and our link partner is not trying to
- * auto-negotiate with us (we are receiving idles or data),
- * we need to force link up. We also need to give auto-negotiation
- * time to complete.
- */
- /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
- if (!(status & E1000_STATUS_LU) && !(rxcw & E1000_RXCW_C)) {
- if (!mac->autoneg_failed) {
- mac->autoneg_failed = true;
- return E1000_SUCCESS;
- }
- DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
-
- /* Disable auto-negotiation in the TXCW register */
- E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
-
- /* Force link-up and also force full-duplex. */
- ctrl = E1000_READ_REG(hw, E1000_CTRL);
- ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
- E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
-
- /* Configure Flow Control after forcing link up. */
- ret_val = e1000_config_fc_after_link_up_generic(hw);
- if (ret_val) {
- DEBUGOUT("Error configuring flow control\n");
- return ret_val;
- }
- } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
- /* If we are forcing link and we are receiving /C/ ordered
- * sets, re-enable auto-negotiation in the TXCW register
- * and disable forced link in the Device Control register
- * in an attempt to auto-negotiate with our link partner.
- */
- DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
- E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
- E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
-
- mac->serdes_has_link = true;
- } else if (!(E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW))) {
- /* If we force link for non-auto-negotiation switch, check
- * link status based on MAC synchronization for internal
- * serdes media type.
- */
- /* SYNCH bit and IV bit are sticky. */
- usec_delay(10);
- rxcw = E1000_READ_REG(hw, E1000_RXCW);
- if (rxcw & E1000_RXCW_SYNCH) {
- if (!(rxcw & E1000_RXCW_IV)) {
- mac->serdes_has_link = true;
- DEBUGOUT("SERDES: Link up - forced.\n");
- }
- } else {
- mac->serdes_has_link = false;
- DEBUGOUT("SERDES: Link down - force failed.\n");
- }
- }
-
- if (E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW)) {
- status = E1000_READ_REG(hw, E1000_STATUS);
- if (status & E1000_STATUS_LU) {
- /* SYNCH bit and IV bit are sticky, so reread rxcw. */
- usec_delay(10);
- rxcw = E1000_READ_REG(hw, E1000_RXCW);
- if (rxcw & E1000_RXCW_SYNCH) {
- if (!(rxcw & E1000_RXCW_IV)) {
- mac->serdes_has_link = true;
- DEBUGOUT("SERDES: Link up - autoneg completed successfully.\n");
- } else {
- mac->serdes_has_link = false;
- DEBUGOUT("SERDES: Link down - invalid codewords detected in autoneg.\n");
- }
- } else {
- mac->serdes_has_link = false;
- DEBUGOUT("SERDES: Link down - no sync.\n");
- }
- } else {
- mac->serdes_has_link = false;
- DEBUGOUT("SERDES: Link down - autoneg failed\n");
- }
- }
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_set_default_fc_generic - Set flow control default values
- * @hw: pointer to the HW structure
- *
- * Read the EEPROM for the default values for flow control and store the
- * values.
- **/
-static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
-{
- s32 ret_val;
- u16 nvm_data;
-
- DEBUGFUNC("e1000_set_default_fc_generic");
-
- /* Read and store word 0x0F of the EEPROM. This word contains bits
- * that determine the hardware's default PAUSE (flow control) mode,
- * a bit that determines whether the HW defaults to enabling or
- * disabling auto-negotiation, and the direction of the
- * SW defined pins. If there is no SW over-ride of the flow
- * control setting, then the variable hw->fc will
- * be initialized based on a value in the EEPROM.
- */
- ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
-
- if (ret_val) {
- DEBUGOUT("NVM Read Error\n");
- return ret_val;
- }
-
- if (!(nvm_data & NVM_WORD0F_PAUSE_MASK))
- hw->fc.requested_mode = e1000_fc_none;
- else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
- NVM_WORD0F_ASM_DIR)
- hw->fc.requested_mode = e1000_fc_tx_pause;
- else
- hw->fc.requested_mode = e1000_fc_full;
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_setup_link_generic - Setup flow control and link settings
- * @hw: pointer to the HW structure
- *
- * Determines which flow control settings to use, then configures flow
- * control. Calls the appropriate media-specific link configuration
- * function. Assuming the adapter has a valid link partner, a valid link
- * should be established. Assumes the hardware has previously been reset
- * and the transmitter and receiver are not enabled.
- **/
-s32 e1000_setup_link_generic(struct e1000_hw *hw)
-{
- s32 ret_val;
-
- DEBUGFUNC("e1000_setup_link_generic");
-
- /* In the case of the phy reset being blocked, we already have a link.
- * We do not need to set it up again.
- */
- if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
- return E1000_SUCCESS;
-
- /* If requested flow control is set to default, set flow control
- * based on the EEPROM flow control settings.
- */
- if (hw->fc.requested_mode == e1000_fc_default) {
- ret_val = e1000_set_default_fc_generic(hw);
- if (ret_val)
- return ret_val;
- }
-
- /* Save off the requested flow control mode for use later. Depending
- * on the link partner's capabilities, we may or may not use this mode.
- */
- hw->fc.current_mode = hw->fc.requested_mode;
-
- DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
- hw->fc.current_mode);
-
- /* Call the necessary media_type subroutine to configure the link. */
- ret_val = hw->mac.ops.setup_physical_interface(hw);
- if (ret_val)
- return ret_val;
-
- /* Initialize the flow control address, type, and PAUSE timer
- * registers to their default values. This is done even if flow
- * control is disabled, because it does not hurt anything to
- * initialize these registers.
- */
- DEBUGOUT("Initializing the Flow Control address, type and timer regs\n");
- E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE);
- E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
- E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
-
- E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
-
- return e1000_set_fc_watermarks_generic(hw);
-}
-
-/**
- * e1000_commit_fc_settings_generic - Configure flow control
- * @hw: pointer to the HW structure
- *
- * Write the flow control settings to the Transmit Config Word Register (TXCW)
- * base on the flow control settings in e1000_mac_info.
- **/
-static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
-{
- struct e1000_mac_info *mac = &hw->mac;
- u32 txcw;
-
- DEBUGFUNC("e1000_commit_fc_settings_generic");
-
- /* Check for a software override of the flow control settings, and
- * setup the device accordingly. If auto-negotiation is enabled, then
- * software will have to set the "PAUSE" bits to the correct value in
- * the Transmit Config Word Register (TXCW) and re-start auto-
- * negotiation. However, if auto-negotiation is disabled, then
- * software will have to manually configure the two flow control enable
- * bits in the CTRL register.
- *
- * The possible values of the "fc" parameter are:
- * 0: Flow control is completely disabled
- * 1: Rx flow control is enabled (we can receive pause frames,
- * but not send pause frames).
- * 2: Tx flow control is enabled (we can send pause frames but we
- * do not support receiving pause frames).
- * 3: Both Rx and Tx flow control (symmetric) are enabled.
- */
- switch (hw->fc.current_mode) {
- case e1000_fc_none:
- /* Flow control completely disabled by a software over-ride. */
- txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
- break;
- case e1000_fc_rx_pause:
- /* Rx Flow control is enabled and Tx Flow control is disabled
- * by a software over-ride. Since there really isn't a way to
- * advertise that we are capable of Rx Pause ONLY, we will
- * advertise that we support both symmetric and asymmetric Rx
- * PAUSE. Later, we will disable the adapter's ability to send
- * PAUSE frames.
- */
- txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
- break;
- case e1000_fc_tx_pause:
- /* Tx Flow control is enabled, and Rx Flow control is disabled,
- * by a software over-ride.
- */
- txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
- break;
- case e1000_fc_full:
- /* Flow control (both Rx and Tx) is enabled by a software
- * over-ride.
- */
- txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
- break;
- default:
- DEBUGOUT("Flow control param set incorrectly\n");
- return -E1000_ERR_CONFIG;
- break;
- }
-
- E1000_WRITE_REG(hw, E1000_TXCW, txcw);
- mac->txcw = txcw;
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_poll_fiber_serdes_link_generic - Poll for link up
- * @hw: pointer to the HW structure
- *
- * Polls for link up by reading the status register, if link fails to come
- * up with auto-negotiation, then the link is forced if a signal is detected.
- **/
-static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
-{
- struct e1000_mac_info *mac = &hw->mac;
- u32 i, status;
- s32 ret_val;
-
- DEBUGFUNC("e1000_poll_fiber_serdes_link_generic");
-
- /* If we have a signal (the cable is plugged in, or assumed true for
- * serdes media) then poll for a "Link-Up" indication in the Device
- * Status Register. Time-out if a link isn't seen in 500 milliseconds
- * seconds (Auto-negotiation should complete in less than 500
- * milliseconds even if the other end is doing it in SW).
- */
- for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
- msec_delay(10);
- status = E1000_READ_REG(hw, E1000_STATUS);
- if (status & E1000_STATUS_LU)
- break;
- }
- if (i == FIBER_LINK_UP_LIMIT) {
- DEBUGOUT("Never got a valid link from auto-neg!!!\n");
- mac->autoneg_failed = true;
- /* AutoNeg failed to achieve a link, so we'll call
- * mac->check_for_link. This routine will force the
- * link up if we detect a signal. This will allow us to
- * communicate with non-autonegotiating link partners.
- */
- ret_val = mac->ops.check_for_link(hw);
- if (ret_val) {
- DEBUGOUT("Error while checking for link\n");
- return ret_val;
- }
- mac->autoneg_failed = false;
- } else {
- mac->autoneg_failed = false;
- DEBUGOUT("Valid Link Found\n");
- }
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_setup_fiber_serdes_link_generic - Setup link for fiber/serdes
- * @hw: pointer to the HW structure
- *
- * Configures collision distance and flow control for fiber and serdes
- * links. Upon successful setup, poll for link.
- **/
-s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw)
-{
- u32 ctrl;
- s32 ret_val;
-
- DEBUGFUNC("e1000_setup_fiber_serdes_link_generic");
-
- ctrl = E1000_READ_REG(hw, E1000_CTRL);
-
- /* Take the link out of reset */
- ctrl &= ~E1000_CTRL_LRST;
-
- hw->mac.ops.config_collision_dist(hw);
-
- ret_val = e1000_commit_fc_settings_generic(hw);
- if (ret_val)
- return ret_val;
-
- /* Since auto-negotiation is enabled, take the link out of reset (the
- * link will be in reset, because we previously reset the chip). This
- * will restart auto-negotiation. If auto-negotiation is successful
- * then the link-up status bit will be set and the flow control enable
- * bits (RFCE and TFCE) will be set according to their negotiated value.
- */
- DEBUGOUT("Auto-negotiation enabled\n");
-
- E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
- E1000_WRITE_FLUSH(hw);
- msec_delay(1);
-
- /* For these adapters, the SW definable pin 1 is set when the optics
- * detect a signal. If we have a signal, then poll for a "Link-Up"
- * indication.
- */
- if (hw->phy.media_type == e1000_media_type_internal_serdes ||
- (E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) {
- ret_val = e1000_poll_fiber_serdes_link_generic(hw);
- } else {
- DEBUGOUT("No signal detected\n");
- }
-
- return ret_val;
-}
-
-/**
- * e1000_config_collision_dist_generic - Configure collision distance
- * @hw: pointer to the HW structure
- *
- * Configures the collision distance to the default value and is used
- * during link setup.
- **/
-static void e1000_config_collision_dist_generic(struct e1000_hw *hw)
-{
- u32 tctl;
-
- DEBUGFUNC("e1000_config_collision_dist_generic");
-
- tctl = E1000_READ_REG(hw, E1000_TCTL);
-
- tctl &= ~E1000_TCTL_COLD;
- tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
-
- E1000_WRITE_REG(hw, E1000_TCTL, tctl);
- E1000_WRITE_FLUSH(hw);
-}
-
-/**
- * e1000_set_fc_watermarks_generic - Set flow control high/low watermarks
- * @hw: pointer to the HW structure
- *
- * Sets the flow control high/low threshold (watermark) registers. If
- * flow control XON frame transmission is enabled, then set XON frame
- * transmission as well.
- **/
-s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw)
-{
- u32 fcrtl = 0, fcrth = 0;
-
- DEBUGFUNC("e1000_set_fc_watermarks_generic");
-
- /* Set the flow control receive threshold registers. Normally,
- * these registers will be set to a default threshold that may be
- * adjusted later by the driver's runtime code. However, if the
- * ability to transmit pause frames is not enabled, then these
- * registers will be set to 0.
- */
- if (hw->fc.current_mode & e1000_fc_tx_pause) {
- /* We need to set up the Receive Threshold high and low water
- * marks as well as (optionally) enabling the transmission of
- * XON frames.
- */
- fcrtl = hw->fc.low_water;
- if (hw->fc.send_xon)
- fcrtl |= E1000_FCRTL_XONE;
-
- fcrth = hw->fc.high_water;
- }
- E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl);
- E1000_WRITE_REG(hw, E1000_FCRTH, fcrth);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_force_mac_fc_generic - Force the MAC's flow control settings
- * @hw: pointer to the HW structure
- *
- * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
- * device control register to reflect the adapter settings. TFCE and RFCE
- * need to be explicitly set by software when a copper PHY is used because
- * autonegotiation is managed by the PHY rather than the MAC. Software must
- * also configure these bits when link is forced on a fiber connection.
- **/
-s32 e1000_force_mac_fc_generic(struct e1000_hw *hw)
-{
- u32 ctrl;
-
- DEBUGFUNC("e1000_force_mac_fc_generic");
-
- ctrl = E1000_READ_REG(hw, E1000_CTRL);
-
- /* Because we didn't get link via the internal auto-negotiation
- * mechanism (we either forced link or we got link via PHY
- * auto-neg), we have to manually enable/disable transmit an
- * receive flow control.
- *
- * The "Case" statement below enables/disable flow control
- * according to the "hw->fc.current_mode" parameter.
- *
- * The possible values of the "fc" parameter are:
- * 0: Flow control is completely disabled
- * 1: Rx flow control is enabled (we can receive pause
- * frames but not send pause frames).
- * 2: Tx flow control is enabled (we can send pause frames
- * frames but we do not receive pause frames).
- * 3: Both Rx and Tx flow control (symmetric) is enabled.
- * other: No other values should be possible at this point.
- */
- DEBUGOUT1("hw->fc.current_mode = %u\n", hw->fc.current_mode);
-
- switch (hw->fc.current_mode) {
- case e1000_fc_none:
- ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
- break;
- case e1000_fc_rx_pause:
- ctrl &= (~E1000_CTRL_TFCE);
- ctrl |= E1000_CTRL_RFCE;
- break;
- case e1000_fc_tx_pause:
- ctrl &= (~E1000_CTRL_RFCE);
- ctrl |= E1000_CTRL_TFCE;
- break;
- case e1000_fc_full:
- ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
- break;
- default:
- DEBUGOUT("Flow control param set incorrectly\n");
- return -E1000_ERR_CONFIG;
- }
-
- E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_config_fc_after_link_up_generic - Configures flow control after link
- * @hw: pointer to the HW structure
- *
- * Checks the status of auto-negotiation after link up to ensure that the
- * speed and duplex were not forced. If the link needed to be forced, then
- * flow control needs to be forced also. If auto-negotiation is enabled
- * and did not fail, then we configure flow control based on our link
- * partner.
- **/
-s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw)
-{
- struct e1000_mac_info *mac = &hw->mac;
- s32 ret_val = E1000_SUCCESS;
- u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
- u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
- u16 speed, duplex;
-
- DEBUGFUNC("e1000_config_fc_after_link_up_generic");
-
- /* Check for the case where we have fiber media and auto-neg failed
- * so we had to force link. In this case, we need to force the
- * configuration of the MAC to match the "fc" parameter.
- */
- if (mac->autoneg_failed) {
- if (hw->phy.media_type == e1000_media_type_fiber ||
- hw->phy.media_type == e1000_media_type_internal_serdes)
- ret_val = e1000_force_mac_fc_generic(hw);
- } else {
- if (hw->phy.media_type == e1000_media_type_copper)
- ret_val = e1000_force_mac_fc_generic(hw);
- }
-
- if (ret_val) {
- DEBUGOUT("Error forcing flow control settings\n");
- return ret_val;
- }
-
- /* Check for the case where we have copper media and auto-neg is
- * enabled. In this case, we need to check and see if Auto-Neg
- * has completed, and if so, how the PHY and link partner has
- * flow control configured.
- */
- if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
- /* Read the MII Status Register and check to see if AutoNeg
- * has completed. We read this twice because this reg has
- * some "sticky" (latched) bits.
- */
- ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
- if (ret_val)
- return ret_val;
- ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
- if (ret_val)
- return ret_val;
-
- if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
- DEBUGOUT("Copper PHY and Auto Neg has not completed.\n");
- return ret_val;
- }
-
- /* The AutoNeg process has completed, so we now need to
- * read both the Auto Negotiation Advertisement
- * Register (Address 4) and the Auto_Negotiation Base
- * Page Ability Register (Address 5) to determine how
- * flow control was negotiated.
- */
- ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
- &mii_nway_adv_reg);
- if (ret_val)
- return ret_val;
- ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
- &mii_nway_lp_ability_reg);
- if (ret_val)
- return ret_val;
-
- /* Two bits in the Auto Negotiation Advertisement Register
- * (Address 4) and two bits in the Auto Negotiation Base
- * Page Ability Register (Address 5) determine flow control
- * for both the PHY and the link partner. The following
- * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
- * 1999, describes these PAUSE resolution bits and how flow
- * control is determined based upon these settings.
- * NOTE: DC = Don't Care
- *
- * LOCAL DEVICE | LINK PARTNER
- * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
- *-------|---------|-------|---------|--------------------
- * 0 | 0 | DC | DC | e1000_fc_none
- * 0 | 1 | 0 | DC | e1000_fc_none
- * 0 | 1 | 1 | 0 | e1000_fc_none
- * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
- * 1 | 0 | 0 | DC | e1000_fc_none
- * 1 | DC | 1 | DC | e1000_fc_full
- * 1 | 1 | 0 | 0 | e1000_fc_none
- * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
- *
- * Are both PAUSE bits set to 1? If so, this implies
- * Symmetric Flow Control is enabled at both ends. The
- * ASM_DIR bits are irrelevant per the spec.
- *
- * For Symmetric Flow Control:
- *
- * LOCAL DEVICE | LINK PARTNER
- * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
- * 1 | DC | 1 | DC | E1000_fc_full
- *
- */
- if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
- /* Now we need to check if the user selected Rx ONLY
- * of pause frames. In this case, we had to advertise
- * FULL flow control because we could not advertise Rx
- * ONLY. Hence, we must now check to see if we need to
- * turn OFF the TRANSMISSION of PAUSE frames.
- */
- if (hw->fc.requested_mode == e1000_fc_full) {
- hw->fc.current_mode = e1000_fc_full;
- DEBUGOUT("Flow Control = FULL.\n");
- } else {
- hw->fc.current_mode = e1000_fc_rx_pause;
- DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
- }
- }
- /* For receiving PAUSE frames ONLY.
- *
- * LOCAL DEVICE | LINK PARTNER
- * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
- * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
- */
- else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
- (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
- hw->fc.current_mode = e1000_fc_tx_pause;
- DEBUGOUT("Flow Control = Tx PAUSE frames only.\n");
- }
- /* For transmitting PAUSE frames ONLY.
- *
- * LOCAL DEVICE | LINK PARTNER
- * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
- * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
- */
- else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
- (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
- !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
- hw->fc.current_mode = e1000_fc_rx_pause;
- DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
- } else {
- /* Per the IEEE spec, at this point flow control
- * should be disabled.
- */
- hw->fc.current_mode = e1000_fc_none;
- DEBUGOUT("Flow Control = NONE.\n");
- }
-
- /* Now we need to do one last check... If we auto-
- * negotiated to HALF DUPLEX, flow control should not be
- * enabled per IEEE 802.3 spec.
- */
- ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
- if (ret_val) {
- DEBUGOUT("Error getting link speed and duplex\n");
- return ret_val;
- }
-
- if (duplex == HALF_DUPLEX)
- hw->fc.current_mode = e1000_fc_none;
-
- /* Now we call a subroutine to actually force the MAC
- * controller to use the correct flow control settings.
- */
- ret_val = e1000_force_mac_fc_generic(hw);
- if (ret_val) {
- DEBUGOUT("Error forcing flow control settings\n");
- return ret_val;
- }
- }
-
- /* Check for the case where we have SerDes media and auto-neg is
- * enabled. In this case, we need to check and see if Auto-Neg
- * has completed, and if so, how the PHY and link partner has
- * flow control configured.
- */
- if ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
- mac->autoneg) {
- /* Read the PCS_LSTS and check to see if AutoNeg
- * has completed.
- */
- pcs_status_reg = E1000_READ_REG(hw, E1000_PCS_LSTAT);
-
- if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
- DEBUGOUT("PCS Auto Neg has not completed.\n");
- return ret_val;
- }
-
- /* The AutoNeg process has completed, so we now need to
- * read both the Auto Negotiation Advertisement
- * Register (PCS_ANADV) and the Auto_Negotiation Base
- * Page Ability Register (PCS_LPAB) to determine how
- * flow control was negotiated.
- */
- pcs_adv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV);
- pcs_lp_ability_reg = E1000_READ_REG(hw, E1000_PCS_LPAB);
-
- /* Two bits in the Auto Negotiation Advertisement Register
- * (PCS_ANADV) and two bits in the Auto Negotiation Base
- * Page Ability Register (PCS_LPAB) determine flow control
- * for both the PHY and the link partner. The following
- * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
- * 1999, describes these PAUSE resolution bits and how flow
- * control is determined based upon these settings.
- * NOTE: DC = Don't Care
- *
- * LOCAL DEVICE | LINK PARTNER
- * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
- *-------|---------|-------|---------|--------------------
- * 0 | 0 | DC | DC | e1000_fc_none
- * 0 | 1 | 0 | DC | e1000_fc_none
- * 0 | 1 | 1 | 0 | e1000_fc_none
- * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
- * 1 | 0 | 0 | DC | e1000_fc_none
- * 1 | DC | 1 | DC | e1000_fc_full
- * 1 | 1 | 0 | 0 | e1000_fc_none
- * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
- *
- * Are both PAUSE bits set to 1? If so, this implies
- * Symmetric Flow Control is enabled at both ends. The
- * ASM_DIR bits are irrelevant per the spec.
- *
- * For Symmetric Flow Control:
- *
- * LOCAL DEVICE | LINK PARTNER
- * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
- * 1 | DC | 1 | DC | e1000_fc_full
- *
- */
- if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
- (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
- /* Now we need to check if the user selected Rx ONLY
- * of pause frames. In this case, we had to advertise
- * FULL flow control because we could not advertise Rx
- * ONLY. Hence, we must now check to see if we need to
- * turn OFF the TRANSMISSION of PAUSE frames.
- */
- if (hw->fc.requested_mode == e1000_fc_full) {
- hw->fc.current_mode = e1000_fc_full;
- DEBUGOUT("Flow Control = FULL.\n");
- } else {
- hw->fc.current_mode = e1000_fc_rx_pause;
- DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
- }
- }
- /* For receiving PAUSE frames ONLY.
- *
- * LOCAL DEVICE | LINK PARTNER
- * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
- * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
- */
- else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
- (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
- (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
- (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
- hw->fc.current_mode = e1000_fc_tx_pause;
- DEBUGOUT("Flow Control = Tx PAUSE frames only.\n");
- }
- /* For transmitting PAUSE frames ONLY.
- *
- * LOCAL DEVICE | LINK PARTNER
- * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
- * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
- */
- else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
- (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
- !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
- (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
- hw->fc.current_mode = e1000_fc_rx_pause;
- DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
- } else {
- /* Per the IEEE spec, at this point flow control
- * should be disabled.
- */
- hw->fc.current_mode = e1000_fc_none;
- DEBUGOUT("Flow Control = NONE.\n");
- }
-
- /* Now we call a subroutine to actually force the MAC
- * controller to use the correct flow control settings.
- */
- pcs_ctrl_reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
- pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
- E1000_WRITE_REG(hw, E1000_PCS_LCTL, pcs_ctrl_reg);
-
- ret_val = e1000_force_mac_fc_generic(hw);
- if (ret_val) {
- DEBUGOUT("Error forcing flow control settings\n");
- return ret_val;
- }
- }
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_get_speed_and_duplex_copper_generic - Retrieve current speed/duplex
- * @hw: pointer to the HW structure
- * @speed: stores the current speed
- * @duplex: stores the current duplex
- *
- * Read the status register for the current speed/duplex and store the current
- * speed and duplex for copper connections.
- **/
-s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
- u16 *duplex)
-{
- u32 status;
-
- DEBUGFUNC("e1000_get_speed_and_duplex_copper_generic");
-
- status = E1000_READ_REG(hw, E1000_STATUS);
- if (status & E1000_STATUS_SPEED_1000) {
- *speed = SPEED_1000;
- DEBUGOUT("1000 Mbs, ");
- } else if (status & E1000_STATUS_SPEED_100) {
- *speed = SPEED_100;
- DEBUGOUT("100 Mbs, ");
- } else {
- *speed = SPEED_10;
- DEBUGOUT("10 Mbs, ");
- }
-
- if (status & E1000_STATUS_FD) {
- *duplex = FULL_DUPLEX;
- DEBUGOUT("Full Duplex\n");
- } else {
- *duplex = HALF_DUPLEX;
- DEBUGOUT("Half Duplex\n");
- }
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_get_speed_and_duplex_fiber_generic - Retrieve current speed/duplex
- * @hw: pointer to the HW structure
- * @speed: stores the current speed
- * @duplex: stores the current duplex
- *
- * Sets the speed and duplex to gigabit full duplex (the only possible option)
- * for fiber/serdes links.
- **/
-s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw E1000_UNUSEDARG *hw,
- u16 *speed, u16 *duplex)
-{
- DEBUGFUNC("e1000_get_speed_and_duplex_fiber_serdes_generic");
-
- *speed = SPEED_1000;
- *duplex = FULL_DUPLEX;
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_get_hw_semaphore_generic - Acquire hardware semaphore
- * @hw: pointer to the HW structure
- *
- * Acquire the HW semaphore to access the PHY or NVM
- **/
-s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw)
-{
- u32 swsm;
- s32 timeout = hw->nvm.word_size + 1;
- s32 i = 0;
-
- DEBUGFUNC("e1000_get_hw_semaphore_generic");
-
- /* Get the SW semaphore */
- while (i < timeout) {
- swsm = E1000_READ_REG(hw, E1000_SWSM);
- if (!(swsm & E1000_SWSM_SMBI))
- break;
-
- usec_delay(50);
- i++;
- }
-
- if (i == timeout) {
- DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
- return -E1000_ERR_NVM;
- }
-
- /* Get the FW semaphore. */
- for (i = 0; i < timeout; i++) {
- swsm = E1000_READ_REG(hw, E1000_SWSM);
- E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
-
- /* Semaphore acquired if bit latched */
- if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
- break;
-
- usec_delay(50);
- }
-
- if (i == timeout) {
- /* Release semaphores */
- e1000_put_hw_semaphore_generic(hw);
- DEBUGOUT("Driver can't access the NVM\n");
- return -E1000_ERR_NVM;
- }
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_put_hw_semaphore_generic - Release hardware semaphore
- * @hw: pointer to the HW structure
- *
- * Release hardware semaphore used to access the PHY or NVM
- **/
-void e1000_put_hw_semaphore_generic(struct e1000_hw *hw)
-{
- u32 swsm;
-
- DEBUGFUNC("e1000_put_hw_semaphore_generic");
-
- swsm = E1000_READ_REG(hw, E1000_SWSM);
-
- swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
-
- E1000_WRITE_REG(hw, E1000_SWSM, swsm);
-}
-
-/**
- * e1000_get_auto_rd_done_generic - Check for auto read completion
- * @hw: pointer to the HW structure
- *
- * Check EEPROM for Auto Read done bit.
- **/
-s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw)
-{
- s32 i = 0;
-
- DEBUGFUNC("e1000_get_auto_rd_done_generic");
-
- while (i < AUTO_READ_DONE_TIMEOUT) {
- if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_AUTO_RD)
- break;
- msec_delay(1);
- i++;
- }
-
- if (i == AUTO_READ_DONE_TIMEOUT) {
- DEBUGOUT("Auto read by HW from NVM has not completed.\n");
- return -E1000_ERR_RESET;
- }
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_valid_led_default_generic - Verify a valid default LED config
- * @hw: pointer to the HW structure
- * @data: pointer to the NVM (EEPROM)
- *
- * Read the EEPROM for the current default LED configuration. If the
- * LED configuration is not valid, set to a valid LED configuration.
- **/
-s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data)
-{
- s32 ret_val;
-
- DEBUGFUNC("e1000_valid_led_default_generic");
-
- ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
- if (ret_val) {
- DEBUGOUT("NVM Read Error\n");
- return ret_val;
- }
-
- if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
- *data = ID_LED_DEFAULT;
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_id_led_init_generic -
- * @hw: pointer to the HW structure
- *
- **/
-s32 e1000_id_led_init_generic(struct e1000_hw *hw)
-{
- struct e1000_mac_info *mac = &hw->mac;
- s32 ret_val;
- const u32 ledctl_mask = 0x000000FF;
- const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
- const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
- u16 data, i, temp;
- const u16 led_mask = 0x0F;
-
- DEBUGFUNC("e1000_id_led_init_generic");
-
- ret_val = hw->nvm.ops.valid_led_default(hw, &data);
- if (ret_val)
- return ret_val;
-
- mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
- mac->ledctl_mode1 = mac->ledctl_default;
- mac->ledctl_mode2 = mac->ledctl_default;
-
- for (i = 0; i < 4; i++) {
- temp = (data >> (i << 2)) & led_mask;
- switch (temp) {
- case ID_LED_ON1_DEF2:
- case ID_LED_ON1_ON2:
- case ID_LED_ON1_OFF2:
- mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
- mac->ledctl_mode1 |= ledctl_on << (i << 3);
- break;
- case ID_LED_OFF1_DEF2:
- case ID_LED_OFF1_ON2:
- case ID_LED_OFF1_OFF2:
- mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
- mac->ledctl_mode1 |= ledctl_off << (i << 3);
- break;
- default:
- /* Do nothing */
- break;
- }
- switch (temp) {
- case ID_LED_DEF1_ON2:
- case ID_LED_ON1_ON2:
- case ID_LED_OFF1_ON2:
- mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
- mac->ledctl_mode2 |= ledctl_on << (i << 3);
- break;
- case ID_LED_DEF1_OFF2:
- case ID_LED_ON1_OFF2:
- case ID_LED_OFF1_OFF2:
- mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
- mac->ledctl_mode2 |= ledctl_off << (i << 3);
- break;
- default:
- /* Do nothing */
- break;
- }
- }
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_setup_led_generic - Configures SW controllable LED
- * @hw: pointer to the HW structure
- *
- * This prepares the SW controllable LED for use and saves the current state
- * of the LED so it can be later restored.
- **/
-s32 e1000_setup_led_generic(struct e1000_hw *hw)
-{
- u32 ledctl;
-
- DEBUGFUNC("e1000_setup_led_generic");
-
- if (hw->mac.ops.setup_led != e1000_setup_led_generic)
- return -E1000_ERR_CONFIG;
-
- if (hw->phy.media_type == e1000_media_type_fiber) {
- ledctl = E1000_READ_REG(hw, E1000_LEDCTL);
- hw->mac.ledctl_default = ledctl;
- /* Turn off LED0 */
- ledctl &= ~(E1000_LEDCTL_LED0_IVRT | E1000_LEDCTL_LED0_BLINK |
- E1000_LEDCTL_LED0_MODE_MASK);
- ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
- E1000_LEDCTL_LED0_MODE_SHIFT);
- E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl);
- } else if (hw->phy.media_type == e1000_media_type_copper) {
- E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
- }
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_cleanup_led_generic - Set LED config to default operation
- * @hw: pointer to the HW structure
- *
- * Remove the current LED configuration and set the LED configuration
- * to the default value, saved from the EEPROM.
- **/
-s32 e1000_cleanup_led_generic(struct e1000_hw *hw)
-{
- DEBUGFUNC("e1000_cleanup_led_generic");
-
- E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_blink_led_generic - Blink LED
- * @hw: pointer to the HW structure
- *
- * Blink the LEDs which are set to be on.
- **/
-s32 e1000_blink_led_generic(struct e1000_hw *hw)
-{
- u32 ledctl_blink = 0;
- u32 i;
-
- DEBUGFUNC("e1000_blink_led_generic");
-
- if (hw->phy.media_type == e1000_media_type_fiber) {
- /* always blink LED0 for PCI-E fiber */
- ledctl_blink = E1000_LEDCTL_LED0_BLINK |
- (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
- } else {
- /* Set the blink bit for each LED that's "on" (0x0E)
- * (or "off" if inverted) in ledctl_mode2. The blink
- * logic in hardware only works when mode is set to "on"
- * so it must be changed accordingly when the mode is
- * "off" and inverted.
- */
- ledctl_blink = hw->mac.ledctl_mode2;
- for (i = 0; i < 32; i += 8) {
- u32 mode = (hw->mac.ledctl_mode2 >> i) &
- E1000_LEDCTL_LED0_MODE_MASK;
- u32 led_default = hw->mac.ledctl_default >> i;
-
- if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
- (mode == E1000_LEDCTL_MODE_LED_ON)) ||
- ((led_default & E1000_LEDCTL_LED0_IVRT) &&
- (mode == E1000_LEDCTL_MODE_LED_OFF))) {
- ledctl_blink &=
- ~(E1000_LEDCTL_LED0_MODE_MASK << i);
- ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
- E1000_LEDCTL_MODE_LED_ON) << i;
- }
- }
- }
-
- E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl_blink);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_led_on_generic - Turn LED on
- * @hw: pointer to the HW structure
- *
- * Turn LED on.
- **/
-s32 e1000_led_on_generic(struct e1000_hw *hw)
-{
- u32 ctrl;
-
- DEBUGFUNC("e1000_led_on_generic");
-
- switch (hw->phy.media_type) {
- case e1000_media_type_fiber:
- ctrl = E1000_READ_REG(hw, E1000_CTRL);
- ctrl &= ~E1000_CTRL_SWDPIN0;
- ctrl |= E1000_CTRL_SWDPIO0;
- E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
- break;
- case e1000_media_type_copper:
- E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
- break;
- default:
- break;
- }
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_led_off_generic - Turn LED off
- * @hw: pointer to the HW structure
- *
- * Turn LED off.
- **/
-s32 e1000_led_off_generic(struct e1000_hw *hw)
-{
- u32 ctrl;
-
- DEBUGFUNC("e1000_led_off_generic");
-
- switch (hw->phy.media_type) {
- case e1000_media_type_fiber:
- ctrl = E1000_READ_REG(hw, E1000_CTRL);
- ctrl |= E1000_CTRL_SWDPIN0;
- ctrl |= E1000_CTRL_SWDPIO0;
- E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
- break;
- case e1000_media_type_copper:
- E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
- break;
- default:
- break;
- }
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_set_pcie_no_snoop_generic - Set PCI-express capabilities
- * @hw: pointer to the HW structure
- * @no_snoop: bitmap of snoop events
- *
- * Set the PCI-express register to snoop for events enabled in 'no_snoop'.
- **/
-void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop)
-{
- u32 gcr;
-
- DEBUGFUNC("e1000_set_pcie_no_snoop_generic");
-
- if (no_snoop) {
- gcr = E1000_READ_REG(hw, E1000_GCR);
- gcr &= ~(PCIE_NO_SNOOP_ALL);
- gcr |= no_snoop;
- E1000_WRITE_REG(hw, E1000_GCR, gcr);
- }
-}
-
-/**
- * e1000_disable_pcie_master_generic - Disables PCI-express master access
- * @hw: pointer to the HW structure
- *
- * Returns E1000_SUCCESS if successful, else returns -10
- * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
- * the master requests to be disabled.
- *
- * Disables PCI-Express master access and verifies there are no pending
- * requests.
- **/
-s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw)
-{
- u32 ctrl;
- s32 timeout = MASTER_DISABLE_TIMEOUT;
-
- DEBUGFUNC("e1000_disable_pcie_master_generic");
-
- ctrl = E1000_READ_REG(hw, E1000_CTRL);
- ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
- E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
-
- while (timeout) {
- if (!(E1000_READ_REG(hw, E1000_STATUS) &
- E1000_STATUS_GIO_MASTER_ENABLE))
- break;
- usec_delay(100);
- timeout--;
- }
-
- if (!timeout) {
- DEBUGOUT("Master requests are pending.\n");
- return -E1000_ERR_MASTER_REQUESTS_PENDING;
- }
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_reset_adaptive_generic - Reset Adaptive Interframe Spacing
- * @hw: pointer to the HW structure
- *
- * Reset the Adaptive Interframe Spacing throttle to default values.
- **/
-void e1000_reset_adaptive_generic(struct e1000_hw *hw)
-{
- struct e1000_mac_info *mac = &hw->mac;
-
- DEBUGFUNC("e1000_reset_adaptive_generic");
-
- if (!mac->adaptive_ifs) {
- DEBUGOUT("Not in Adaptive IFS mode!\n");
- return;
- }
-
- mac->current_ifs_val = 0;
- mac->ifs_min_val = IFS_MIN;
- mac->ifs_max_val = IFS_MAX;
- mac->ifs_step_size = IFS_STEP;
- mac->ifs_ratio = IFS_RATIO;
-
- mac->in_ifs_mode = false;
- E1000_WRITE_REG(hw, E1000_AIT, 0);
-}
-
-/**
- * e1000_update_adaptive_generic - Update Adaptive Interframe Spacing
- * @hw: pointer to the HW structure
- *
- * Update the Adaptive Interframe Spacing Throttle value based on the
- * time between transmitted packets and time between collisions.
- **/
-void e1000_update_adaptive_generic(struct e1000_hw *hw)
-{
- struct e1000_mac_info *mac = &hw->mac;
-
- DEBUGFUNC("e1000_update_adaptive_generic");
-
- if (!mac->adaptive_ifs) {
- DEBUGOUT("Not in Adaptive IFS mode!\n");
- return;
- }
-
- if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
- if (mac->tx_packet_delta > MIN_NUM_XMITS) {
- mac->in_ifs_mode = true;
- if (mac->current_ifs_val < mac->ifs_max_val) {
- if (!mac->current_ifs_val)
- mac->current_ifs_val = mac->ifs_min_val;
- else
- mac->current_ifs_val +=
- mac->ifs_step_size;
- E1000_WRITE_REG(hw, E1000_AIT,
- mac->current_ifs_val);
- }
- }
- } else {
- if (mac->in_ifs_mode &&
- (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
- mac->current_ifs_val = 0;
- mac->in_ifs_mode = false;
- E1000_WRITE_REG(hw, E1000_AIT, 0);
- }
- }
-}
-
-/**
- * e1000_validate_mdi_setting_generic - Verify MDI/MDIx settings
- * @hw: pointer to the HW structure
- *
- * Verify that when not using auto-negotiation that MDI/MDIx is correctly
- * set, which is forced to MDI mode only.
- **/
-static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw)
-{
- DEBUGFUNC("e1000_validate_mdi_setting_generic");
-
- if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
- DEBUGOUT("Invalid MDI setting detected\n");
- hw->phy.mdix = 1;
- return -E1000_ERR_CONFIG;
- }
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_validate_mdi_setting_crossover_generic - Verify MDI/MDIx settings
- * @hw: pointer to the HW structure
- *
- * Validate the MDI/MDIx setting, allowing for auto-crossover during forced
- * operation.
- **/
-s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw E1000_UNUSEDARG *hw)
-{
- DEBUGFUNC("e1000_validate_mdi_setting_crossover_generic");
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_write_8bit_ctrl_reg_generic - Write a 8bit CTRL register
- * @hw: pointer to the HW structure
- * @reg: 32bit register offset such as E1000_SCTL
- * @offset: register offset to write to
- * @data: data to write at register offset
- *
- * Writes an address/data control type register. There are several of these
- * and they all have the format address << 8 | data and bit 31 is polled for
- * completion.
- **/
-s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
- u32 offset, u8 data)
-{
- u32 i, regvalue = 0;
-
- DEBUGFUNC("e1000_write_8bit_ctrl_reg_generic");
-
- /* Set up the address and data */
- regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
- E1000_WRITE_REG(hw, reg, regvalue);
-
- /* Poll the ready bit to see if the MDI read completed */
- for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
- usec_delay(5);
- regvalue = E1000_READ_REG(hw, reg);
- if (regvalue & E1000_GEN_CTL_READY)
- break;
- }
- if (!(regvalue & E1000_GEN_CTL_READY)) {
- DEBUGOUT1("Reg %08x did not indicate ready\n", reg);
- return -E1000_ERR_PHY;
- }
-
- return E1000_SUCCESS;
-}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mac.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mac.h
deleted file mode 100755
index 6a1b0f52..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mac.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#ifndef _E1000_MAC_H_
-#define _E1000_MAC_H_
-
-void e1000_init_mac_ops_generic(struct e1000_hw *hw);
-void e1000_null_mac_generic(struct e1000_hw *hw);
-s32 e1000_null_ops_generic(struct e1000_hw *hw);
-s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d);
-bool e1000_null_mng_mode(struct e1000_hw *hw);
-void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a);
-void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b);
-void e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a);
-s32 e1000_blink_led_generic(struct e1000_hw *hw);
-s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw);
-s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw);
-s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw);
-s32 e1000_cleanup_led_generic(struct e1000_hw *hw);
-s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw);
-s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw);
-s32 e1000_force_mac_fc_generic(struct e1000_hw *hw);
-s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw);
-s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw);
-void e1000_set_lan_id_single_port(struct e1000_hw *hw);
-s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw);
-s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
- u16 *duplex);
-s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw,
- u16 *speed, u16 *duplex);
-s32 e1000_id_led_init_generic(struct e1000_hw *hw);
-s32 e1000_led_on_generic(struct e1000_hw *hw);
-s32 e1000_led_off_generic(struct e1000_hw *hw);
-void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
- u8 *mc_addr_list, u32 mc_addr_count);
-s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw);
-s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw);
-s32 e1000_setup_led_generic(struct e1000_hw *hw);
-s32 e1000_setup_link_generic(struct e1000_hw *hw);
-s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw *hw);
-s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
- u32 offset, u8 data);
-
-u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr);
-
-void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw);
-void e1000_clear_vfta_generic(struct e1000_hw *hw);
-void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count);
-void e1000_put_hw_semaphore_generic(struct e1000_hw *hw);
-s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
-void e1000_reset_adaptive_generic(struct e1000_hw *hw);
-void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop);
-void e1000_update_adaptive_generic(struct e1000_hw *hw);
-void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
-
-#endif
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_manage.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_manage.c
deleted file mode 100755
index e1a2abe0..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_manage.c
+++ /dev/null
@@ -1,556 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#include "e1000_api.h"
-
-/**
- * e1000_calculate_checksum - Calculate checksum for buffer
- * @buffer: pointer to EEPROM
- * @length: size of EEPROM to calculate a checksum for
- *
- * Calculates the checksum for some buffer on a specified length. The
- * checksum calculated is returned.
- **/
-u8 e1000_calculate_checksum(u8 *buffer, u32 length)
-{
- u32 i;
- u8 sum = 0;
-
- DEBUGFUNC("e1000_calculate_checksum");
-
- if (!buffer)
- return 0;
-
- for (i = 0; i < length; i++)
- sum += buffer[i];
-
- return (u8) (0 - sum);
-}
-
-/**
- * e1000_mng_enable_host_if_generic - Checks host interface is enabled
- * @hw: pointer to the HW structure
- *
- * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
- *
- * This function checks whether the HOST IF is enabled for command operation
- * and also checks whether the previous command is completed. It busy waits
- * in case of previous command is not completed.
- **/
-s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw)
-{
- u32 hicr;
- u8 i;
-
- DEBUGFUNC("e1000_mng_enable_host_if_generic");
-
- if (!hw->mac.arc_subsystem_valid) {
- DEBUGOUT("ARC subsystem not valid.\n");
- return -E1000_ERR_HOST_INTERFACE_COMMAND;
- }
-
- /* Check that the host interface is enabled. */
- hicr = E1000_READ_REG(hw, E1000_HICR);
- if (!(hicr & E1000_HICR_EN)) {
- DEBUGOUT("E1000_HOST_EN bit disabled.\n");
- return -E1000_ERR_HOST_INTERFACE_COMMAND;
- }
- /* check the previous command is completed */
- for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
- hicr = E1000_READ_REG(hw, E1000_HICR);
- if (!(hicr & E1000_HICR_C))
- break;
- msec_delay_irq(1);
- }
-
- if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
- DEBUGOUT("Previous command timeout failed .\n");
- return -E1000_ERR_HOST_INTERFACE_COMMAND;
- }
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_check_mng_mode_generic - Generic check management mode
- * @hw: pointer to the HW structure
- *
- * Reads the firmware semaphore register and returns true (>0) if
- * manageability is enabled, else false (0).
- **/
-bool e1000_check_mng_mode_generic(struct e1000_hw *hw)
-{
- u32 fwsm = E1000_READ_REG(hw, E1000_FWSM);
-
- DEBUGFUNC("e1000_check_mng_mode_generic");
-
-
- return (fwsm & E1000_FWSM_MODE_MASK) ==
- (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
-}
-
-/**
- * e1000_enable_tx_pkt_filtering_generic - Enable packet filtering on Tx
- * @hw: pointer to the HW structure
- *
- * Enables packet filtering on transmit packets if manageability is enabled
- * and host interface is enabled.
- **/
-bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw)
-{
- struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
- u32 *buffer = (u32 *)&hw->mng_cookie;
- u32 offset;
- s32 ret_val, hdr_csum, csum;
- u8 i, len;
-
- DEBUGFUNC("e1000_enable_tx_pkt_filtering_generic");
-
- hw->mac.tx_pkt_filtering = true;
-
- /* No manageability, no filtering */
- if (!hw->mac.ops.check_mng_mode(hw)) {
- hw->mac.tx_pkt_filtering = false;
- return hw->mac.tx_pkt_filtering;
- }
-
- /* If we can't read from the host interface for whatever
- * reason, disable filtering.
- */
- ret_val = e1000_mng_enable_host_if_generic(hw);
- if (ret_val != E1000_SUCCESS) {
- hw->mac.tx_pkt_filtering = false;
- return hw->mac.tx_pkt_filtering;
- }
-
- /* Read in the header. Length and offset are in dwords. */
- len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
- offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
- for (i = 0; i < len; i++)
- *(buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF,
- offset + i);
- hdr_csum = hdr->checksum;
- hdr->checksum = 0;
- csum = e1000_calculate_checksum((u8 *)hdr,
- E1000_MNG_DHCP_COOKIE_LENGTH);
- /* If either the checksums or signature don't match, then
- * the cookie area isn't considered valid, in which case we
- * take the safe route of assuming Tx filtering is enabled.
- */
- if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
- hw->mac.tx_pkt_filtering = true;
- return hw->mac.tx_pkt_filtering;
- }
-
- /* Cookie area is valid, make the final check for filtering. */
- if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING))
- hw->mac.tx_pkt_filtering = false;
-
- return hw->mac.tx_pkt_filtering;
-}
-
-/**
- * e1000_mng_write_cmd_header_generic - Writes manageability command header
- * @hw: pointer to the HW structure
- * @hdr: pointer to the host interface command header
- *
- * Writes the command header after does the checksum calculation.
- **/
-s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw,
- struct e1000_host_mng_command_header *hdr)
-{
- u16 i, length = sizeof(struct e1000_host_mng_command_header);
-
- DEBUGFUNC("e1000_mng_write_cmd_header_generic");
-
- /* Write the whole command header structure with new checksum. */
-
- hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
-
- length >>= 2;
- /* Write the relevant command block into the ram area. */
- for (i = 0; i < length; i++) {
- E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i,
- *((u32 *) hdr + i));
- E1000_WRITE_FLUSH(hw);
- }
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_mng_host_if_write_generic - Write to the manageability host interface
- * @hw: pointer to the HW structure
- * @buffer: pointer to the host interface buffer
- * @length: size of the buffer
- * @offset: location in the buffer to write to
- * @sum: sum of the data (not checksum)
- *
- * This function writes the buffer content at the offset given on the host if.
- * It also does alignment considerations to do the writes in most efficient
- * way. Also fills up the sum of the buffer in *buffer parameter.
- **/
-s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer,
- u16 length, u16 offset, u8 *sum)
-{
- u8 *tmp;
- u8 *bufptr = buffer;
- u32 data = 0;
- u16 remaining, i, j, prev_bytes;
-
- DEBUGFUNC("e1000_mng_host_if_write_generic");
-
- /* sum = only sum of the data and it is not checksum */
-
- if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH)
- return -E1000_ERR_PARAM;
-
- tmp = (u8 *)&data;
- prev_bytes = offset & 0x3;
- offset >>= 2;
-
- if (prev_bytes) {
- data = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset);
- for (j = prev_bytes; j < sizeof(u32); j++) {
- *(tmp + j) = *bufptr++;
- *sum += *(tmp + j);
- }
- E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset, data);
- length -= j - prev_bytes;
- offset++;
- }
-
- remaining = length & 0x3;
- length -= remaining;
-
- /* Calculate length in DWORDs */
- length >>= 2;
-
- /* The device driver writes the relevant command block into the
- * ram area.
- */
- for (i = 0; i < length; i++) {
- for (j = 0; j < sizeof(u32); j++) {
- *(tmp + j) = *bufptr++;
- *sum += *(tmp + j);
- }
-
- E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i,
- data);
- }
- if (remaining) {
- for (j = 0; j < sizeof(u32); j++) {
- if (j < remaining)
- *(tmp + j) = *bufptr++;
- else
- *(tmp + j) = 0;
-
- *sum += *(tmp + j);
- }
- E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i,
- data);
- }
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_mng_write_dhcp_info_generic - Writes DHCP info to host interface
- * @hw: pointer to the HW structure
- * @buffer: pointer to the host interface
- * @length: size of the buffer
- *
- * Writes the DHCP information to the host interface.
- **/
-s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, u8 *buffer,
- u16 length)
-{
- struct e1000_host_mng_command_header hdr;
- s32 ret_val;
- u32 hicr;
-
- DEBUGFUNC("e1000_mng_write_dhcp_info_generic");
-
- hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
- hdr.command_length = length;
- hdr.reserved1 = 0;
- hdr.reserved2 = 0;
- hdr.checksum = 0;
-
- /* Enable the host interface */
- ret_val = e1000_mng_enable_host_if_generic(hw);
- if (ret_val)
- return ret_val;
-
- /* Populate the host interface with the contents of "buffer". */
- ret_val = e1000_mng_host_if_write_generic(hw, buffer, length,
- sizeof(hdr), &(hdr.checksum));
- if (ret_val)
- return ret_val;
-
- /* Write the manageability command header */
- ret_val = e1000_mng_write_cmd_header_generic(hw, &hdr);
- if (ret_val)
- return ret_val;
-
- /* Tell the ARC a new command is pending. */
- hicr = E1000_READ_REG(hw, E1000_HICR);
- E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_enable_mng_pass_thru - Check if management passthrough is needed
- * @hw: pointer to the HW structure
- *
- * Verifies the hardware needs to leave interface enabled so that frames can
- * be directed to and from the management interface.
- **/
-bool e1000_enable_mng_pass_thru(struct e1000_hw *hw)
-{
- u32 manc;
- u32 fwsm, factps;
-
- DEBUGFUNC("e1000_enable_mng_pass_thru");
-
- if (!hw->mac.asf_firmware_present)
- return false;
-
- manc = E1000_READ_REG(hw, E1000_MANC);
-
- if (!(manc & E1000_MANC_RCV_TCO_EN))
- return false;
-
- if (hw->mac.has_fwsm) {
- fwsm = E1000_READ_REG(hw, E1000_FWSM);
- factps = E1000_READ_REG(hw, E1000_FACTPS);
-
- if (!(factps & E1000_FACTPS_MNGCG) &&
- ((fwsm & E1000_FWSM_MODE_MASK) ==
- (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT)))
- return true;
- } else if ((manc & E1000_MANC_SMBUS_EN) &&
- !(manc & E1000_MANC_ASF_EN)) {
- return true;
- }
-
- return false;
-}
-
-/**
- * e1000_host_interface_command - Writes buffer to host interface
- * @hw: pointer to the HW structure
- * @buffer: contains a command to write
- * @length: the byte length of the buffer, must be multiple of 4 bytes
- *
- * Writes a buffer to the Host Interface. Upon success, returns E1000_SUCCESS
- * else returns E1000_ERR_HOST_INTERFACE_COMMAND.
- **/
-s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length)
-{
- u32 hicr, i;
-
- DEBUGFUNC("e1000_host_interface_command");
-
- if (!(hw->mac.arc_subsystem_valid)) {
- DEBUGOUT("Hardware doesn't support host interface command.\n");
- return E1000_SUCCESS;
- }
-
- if (!hw->mac.asf_firmware_present) {
- DEBUGOUT("Firmware is not present.\n");
- return E1000_SUCCESS;
- }
-
- if (length == 0 || length & 0x3 ||
- length > E1000_HI_MAX_BLOCK_BYTE_LENGTH) {
- DEBUGOUT("Buffer length failure.\n");
- return -E1000_ERR_HOST_INTERFACE_COMMAND;
- }
-
- /* Check that the host interface is enabled. */
- hicr = E1000_READ_REG(hw, E1000_HICR);
- if (!(hicr & E1000_HICR_EN)) {
- DEBUGOUT("E1000_HOST_EN bit disabled.\n");
- return -E1000_ERR_HOST_INTERFACE_COMMAND;
- }
-
- /* Calculate length in DWORDs */
- length >>= 2;
-
- /* The device driver writes the relevant command block
- * into the ram area.
- */
- for (i = 0; i < length; i++)
- E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i,
- *((u32 *)buffer + i));
-
- /* Setting this bit tells the ARC that a new command is pending. */
- E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
-
- for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
- hicr = E1000_READ_REG(hw, E1000_HICR);
- if (!(hicr & E1000_HICR_C))
- break;
- msec_delay(1);
- }
-
- /* Check command successful completion. */
- if (i == E1000_HI_COMMAND_TIMEOUT ||
- (!(E1000_READ_REG(hw, E1000_HICR) & E1000_HICR_SV))) {
- DEBUGOUT("Command has failed with no status valid.\n");
- return -E1000_ERR_HOST_INTERFACE_COMMAND;
- }
-
- for (i = 0; i < length; i++)
- *((u32 *)buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw,
- E1000_HOST_IF,
- i);
-
- return E1000_SUCCESS;
-}
-/**
- * e1000_load_firmware - Writes proxy FW code buffer to host interface
- * and execute.
- * @hw: pointer to the HW structure
- * @buffer: contains a firmware to write
- * @length: the byte length of the buffer, must be multiple of 4 bytes
- *
- * Upon success returns E1000_SUCCESS, returns E1000_ERR_CONFIG if not enabled
- * in HW else returns E1000_ERR_HOST_INTERFACE_COMMAND.
- **/
-s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length)
-{
- u32 hicr, hibba, fwsm, icr, i;
-
- DEBUGFUNC("e1000_load_firmware");
-
- if (hw->mac.type < e1000_i210) {
- DEBUGOUT("Hardware doesn't support loading FW by the driver\n");
- return -E1000_ERR_CONFIG;
- }
-
- /* Check that the host interface is enabled. */
- hicr = E1000_READ_REG(hw, E1000_HICR);
- if (!(hicr & E1000_HICR_EN)) {
- DEBUGOUT("E1000_HOST_EN bit disabled.\n");
- return -E1000_ERR_CONFIG;
- }
- if (!(hicr & E1000_HICR_MEMORY_BASE_EN)) {
- DEBUGOUT("E1000_HICR_MEMORY_BASE_EN bit disabled.\n");
- return -E1000_ERR_CONFIG;
- }
-
- if (length == 0 || length & 0x3 || length > E1000_HI_FW_MAX_LENGTH) {
- DEBUGOUT("Buffer length failure.\n");
- return -E1000_ERR_INVALID_ARGUMENT;
- }
-
- /* Clear notification from ROM-FW by reading ICR register */
- icr = E1000_READ_REG(hw, E1000_ICR_V2);
-
- /* Reset ROM-FW */
- hicr = E1000_READ_REG(hw, E1000_HICR);
- hicr |= E1000_HICR_FW_RESET_ENABLE;
- E1000_WRITE_REG(hw, E1000_HICR, hicr);
- hicr |= E1000_HICR_FW_RESET;
- E1000_WRITE_REG(hw, E1000_HICR, hicr);
- E1000_WRITE_FLUSH(hw);
-
- /* Wait till MAC notifies about its readiness after ROM-FW reset */
- for (i = 0; i < (E1000_HI_COMMAND_TIMEOUT * 2); i++) {
- icr = E1000_READ_REG(hw, E1000_ICR_V2);
- if (icr & E1000_ICR_MNG)
- break;
- msec_delay(1);
- }
-
- /* Check for timeout */
- if (i == E1000_HI_COMMAND_TIMEOUT) {
- DEBUGOUT("FW reset failed.\n");
- return -E1000_ERR_HOST_INTERFACE_COMMAND;
- }
-
- /* Wait till MAC is ready to accept new FW code */
- for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
- fwsm = E1000_READ_REG(hw, E1000_FWSM);
- if ((fwsm & E1000_FWSM_FW_VALID) &&
- ((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT ==
- E1000_FWSM_HI_EN_ONLY_MODE))
- break;
- msec_delay(1);
- }
-
- /* Check for timeout */
- if (i == E1000_HI_COMMAND_TIMEOUT) {
- DEBUGOUT("FW reset failed.\n");
- return -E1000_ERR_HOST_INTERFACE_COMMAND;
- }
-
- /* Calculate length in DWORDs */
- length >>= 2;
-
- /* The device driver writes the relevant FW code block
- * into the ram area in DWORDs via 1kB ram addressing window.
- */
- for (i = 0; i < length; i++) {
- if (!(i % E1000_HI_FW_BLOCK_DWORD_LENGTH)) {
- /* Point to correct 1kB ram window */
- hibba = E1000_HI_FW_BASE_ADDRESS +
- ((E1000_HI_FW_BLOCK_DWORD_LENGTH << 2) *
- (i / E1000_HI_FW_BLOCK_DWORD_LENGTH));
-
- E1000_WRITE_REG(hw, E1000_HIBBA, hibba);
- }
-
- E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF,
- i % E1000_HI_FW_BLOCK_DWORD_LENGTH,
- *((u32 *)buffer + i));
- }
-
- /* Setting this bit tells the ARC that a new FW is ready to execute. */
- hicr = E1000_READ_REG(hw, E1000_HICR);
- E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
-
- for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
- hicr = E1000_READ_REG(hw, E1000_HICR);
- if (!(hicr & E1000_HICR_C))
- break;
- msec_delay(1);
- }
-
- /* Check for successful FW start. */
- if (i == E1000_HI_COMMAND_TIMEOUT) {
- DEBUGOUT("New FW did not start within timeout period.\n");
- return -E1000_ERR_HOST_INTERFACE_COMMAND;
- }
-
- return E1000_SUCCESS;
-}
-
-
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_manage.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_manage.h
deleted file mode 100755
index c94b2185..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_manage.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#ifndef _E1000_MANAGE_H_
-#define _E1000_MANAGE_H_
-
-bool e1000_check_mng_mode_generic(struct e1000_hw *hw);
-bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw);
-s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw);
-s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer,
- u16 length, u16 offset, u8 *sum);
-s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw,
- struct e1000_host_mng_command_header *hdr);
-s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw,
- u8 *buffer, u16 length);
-bool e1000_enable_mng_pass_thru(struct e1000_hw *hw);
-u8 e1000_calculate_checksum(u8 *buffer, u32 length);
-s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length);
-s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length);
-
-enum e1000_mng_mode {
- e1000_mng_mode_none = 0,
- e1000_mng_mode_asf,
- e1000_mng_mode_pt,
- e1000_mng_mode_ipmi,
- e1000_mng_mode_host_if_only
-};
-
-#define E1000_FACTPS_MNGCG 0x20000000
-
-#define E1000_FWSM_MODE_MASK 0xE
-#define E1000_FWSM_MODE_SHIFT 1
-#define E1000_FWSM_FW_VALID 0x00008000
-#define E1000_FWSM_HI_EN_ONLY_MODE 0x4
-
-#define E1000_MNG_IAMT_MODE 0x3
-#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10
-#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0
-#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10
-#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
-#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1
-#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
-
-#define E1000_VFTA_ENTRY_SHIFT 5
-#define E1000_VFTA_ENTRY_MASK 0x7F
-#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
-
-#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
-#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */
-#define E1000_HI_COMMAND_TIMEOUT 500 /* Process HI cmd limit */
-#define E1000_HI_FW_BASE_ADDRESS 0x10000
-#define E1000_HI_FW_MAX_LENGTH (64 * 1024) /* Num of bytes */
-#define E1000_HI_FW_BLOCK_DWORD_LENGTH 256 /* Num of DWORDs per page */
-#define E1000_HICR_MEMORY_BASE_EN 0x200 /* MB Enable bit - RO */
-#define E1000_HICR_EN 0x01 /* Enable bit - RO */
-/* Driver sets this bit when done to put command in RAM */
-#define E1000_HICR_C 0x02
-#define E1000_HICR_SV 0x04 /* Status Validity */
-#define E1000_HICR_FW_RESET_ENABLE 0x40
-#define E1000_HICR_FW_RESET 0x80
-
-/* Intel(R) Active Management Technology signature */
-#define E1000_IAMT_SIGNATURE 0x544D4149
-
-#endif
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.c
deleted file mode 100755
index 6d004b65..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.c
+++ /dev/null
@@ -1,526 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#include "e1000_mbx.h"
-
-/**
- * e1000_null_mbx_check_for_flag - No-op function, return 0
- * @hw: pointer to the HW structure
- **/
-static s32 e1000_null_mbx_check_for_flag(struct e1000_hw E1000_UNUSEDARG *hw,
- u16 E1000_UNUSEDARG mbx_id)
-{
- DEBUGFUNC("e1000_null_mbx_check_flag");
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_null_mbx_transact - No-op function, return 0
- * @hw: pointer to the HW structure
- **/
-static s32 e1000_null_mbx_transact(struct e1000_hw E1000_UNUSEDARG *hw,
- u32 E1000_UNUSEDARG *msg,
- u16 E1000_UNUSEDARG size,
- u16 E1000_UNUSEDARG mbx_id)
-{
- DEBUGFUNC("e1000_null_mbx_rw_msg");
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_read_mbx - Reads a message from the mailbox
- * @hw: pointer to the HW structure
- * @msg: The message buffer
- * @size: Length of buffer
- * @mbx_id: id of mailbox to read
- *
- * returns SUCCESS if it successfully read message from buffer
- **/
-s32 e1000_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
-{
- struct e1000_mbx_info *mbx = &hw->mbx;
- s32 ret_val = -E1000_ERR_MBX;
-
- DEBUGFUNC("e1000_read_mbx");
-
- /* limit read to size of mailbox */
- if (size > mbx->size)
- size = mbx->size;
-
- if (mbx->ops.read)
- ret_val = mbx->ops.read(hw, msg, size, mbx_id);
-
- return ret_val;
-}
-
-/**
- * e1000_write_mbx - Write a message to the mailbox
- * @hw: pointer to the HW structure
- * @msg: The message buffer
- * @size: Length of buffer
- * @mbx_id: id of mailbox to write
- *
- * returns SUCCESS if it successfully copied message into the buffer
- **/
-s32 e1000_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
-{
- struct e1000_mbx_info *mbx = &hw->mbx;
- s32 ret_val = E1000_SUCCESS;
-
- DEBUGFUNC("e1000_write_mbx");
-
- if (size > mbx->size)
- ret_val = -E1000_ERR_MBX;
-
- else if (mbx->ops.write)
- ret_val = mbx->ops.write(hw, msg, size, mbx_id);
-
- return ret_val;
-}
-
-/**
- * e1000_check_for_msg - checks to see if someone sent us mail
- * @hw: pointer to the HW structure
- * @mbx_id: id of mailbox to check
- *
- * returns SUCCESS if the Status bit was found or else ERR_MBX
- **/
-s32 e1000_check_for_msg(struct e1000_hw *hw, u16 mbx_id)
-{
- struct e1000_mbx_info *mbx = &hw->mbx;
- s32 ret_val = -E1000_ERR_MBX;
-
- DEBUGFUNC("e1000_check_for_msg");
-
- if (mbx->ops.check_for_msg)
- ret_val = mbx->ops.check_for_msg(hw, mbx_id);
-
- return ret_val;
-}
-
-/**
- * e1000_check_for_ack - checks to see if someone sent us ACK
- * @hw: pointer to the HW structure
- * @mbx_id: id of mailbox to check
- *
- * returns SUCCESS if the Status bit was found or else ERR_MBX
- **/
-s32 e1000_check_for_ack(struct e1000_hw *hw, u16 mbx_id)
-{
- struct e1000_mbx_info *mbx = &hw->mbx;
- s32 ret_val = -E1000_ERR_MBX;
-
- DEBUGFUNC("e1000_check_for_ack");
-
- if (mbx->ops.check_for_ack)
- ret_val = mbx->ops.check_for_ack(hw, mbx_id);
-
- return ret_val;
-}
-
-/**
- * e1000_check_for_rst - checks to see if other side has reset
- * @hw: pointer to the HW structure
- * @mbx_id: id of mailbox to check
- *
- * returns SUCCESS if the Status bit was found or else ERR_MBX
- **/
-s32 e1000_check_for_rst(struct e1000_hw *hw, u16 mbx_id)
-{
- struct e1000_mbx_info *mbx = &hw->mbx;
- s32 ret_val = -E1000_ERR_MBX;
-
- DEBUGFUNC("e1000_check_for_rst");
-
- if (mbx->ops.check_for_rst)
- ret_val = mbx->ops.check_for_rst(hw, mbx_id);
-
- return ret_val;
-}
-
-/**
- * e1000_poll_for_msg - Wait for message notification
- * @hw: pointer to the HW structure
- * @mbx_id: id of mailbox to write
- *
- * returns SUCCESS if it successfully received a message notification
- **/
-static s32 e1000_poll_for_msg(struct e1000_hw *hw, u16 mbx_id)
-{
- struct e1000_mbx_info *mbx = &hw->mbx;
- int countdown = mbx->timeout;
-
- DEBUGFUNC("e1000_poll_for_msg");
-
- if (!countdown || !mbx->ops.check_for_msg)
- goto out;
-
- while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
- countdown--;
- if (!countdown)
- break;
- usec_delay(mbx->usec_delay);
- }
-
- /* if we failed, all future posted messages fail until reset */
- if (!countdown)
- mbx->timeout = 0;
-out:
- return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
-}
-
-/**
- * e1000_poll_for_ack - Wait for message acknowledgement
- * @hw: pointer to the HW structure
- * @mbx_id: id of mailbox to write
- *
- * returns SUCCESS if it successfully received a message acknowledgement
- **/
-static s32 e1000_poll_for_ack(struct e1000_hw *hw, u16 mbx_id)
-{
- struct e1000_mbx_info *mbx = &hw->mbx;
- int countdown = mbx->timeout;
-
- DEBUGFUNC("e1000_poll_for_ack");
-
- if (!countdown || !mbx->ops.check_for_ack)
- goto out;
-
- while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
- countdown--;
- if (!countdown)
- break;
- usec_delay(mbx->usec_delay);
- }
-
- /* if we failed, all future posted messages fail until reset */
- if (!countdown)
- mbx->timeout = 0;
-out:
- return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
-}
-
-/**
- * e1000_read_posted_mbx - Wait for message notification and receive message
- * @hw: pointer to the HW structure
- * @msg: The message buffer
- * @size: Length of buffer
- * @mbx_id: id of mailbox to write
- *
- * returns SUCCESS if it successfully received a message notification and
- * copied it into the receive buffer.
- **/
-s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
-{
- struct e1000_mbx_info *mbx = &hw->mbx;
- s32 ret_val = -E1000_ERR_MBX;
-
- DEBUGFUNC("e1000_read_posted_mbx");
-
- if (!mbx->ops.read)
- goto out;
-
- ret_val = e1000_poll_for_msg(hw, mbx_id);
-
- /* if ack received read message, otherwise we timed out */
- if (!ret_val)
- ret_val = mbx->ops.read(hw, msg, size, mbx_id);
-out:
- return ret_val;
-}
-
-/**
- * e1000_write_posted_mbx - Write a message to the mailbox, wait for ack
- * @hw: pointer to the HW structure
- * @msg: The message buffer
- * @size: Length of buffer
- * @mbx_id: id of mailbox to write
- *
- * returns SUCCESS if it successfully copied message into the buffer and
- * received an ack to that message within delay * timeout period
- **/
-s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
-{
- struct e1000_mbx_info *mbx = &hw->mbx;
- s32 ret_val = -E1000_ERR_MBX;
-
- DEBUGFUNC("e1000_write_posted_mbx");
-
- /* exit if either we can't write or there isn't a defined timeout */
- if (!mbx->ops.write || !mbx->timeout)
- goto out;
-
- /* send msg */
- ret_val = mbx->ops.write(hw, msg, size, mbx_id);
-
- /* if msg sent wait until we receive an ack */
- if (!ret_val)
- ret_val = e1000_poll_for_ack(hw, mbx_id);
-out:
- return ret_val;
-}
-
-/**
- * e1000_init_mbx_ops_generic - Initialize mbx function pointers
- * @hw: pointer to the HW structure
- *
- * Sets the function pointers to no-op functions
- **/
-void e1000_init_mbx_ops_generic(struct e1000_hw *hw)
-{
- struct e1000_mbx_info *mbx = &hw->mbx;
- mbx->ops.init_params = e1000_null_ops_generic;
- mbx->ops.read = e1000_null_mbx_transact;
- mbx->ops.write = e1000_null_mbx_transact;
- mbx->ops.check_for_msg = e1000_null_mbx_check_for_flag;
- mbx->ops.check_for_ack = e1000_null_mbx_check_for_flag;
- mbx->ops.check_for_rst = e1000_null_mbx_check_for_flag;
- mbx->ops.read_posted = e1000_read_posted_mbx;
- mbx->ops.write_posted = e1000_write_posted_mbx;
-}
-
-static s32 e1000_check_for_bit_pf(struct e1000_hw *hw, u32 mask)
-{
- u32 mbvficr = E1000_READ_REG(hw, E1000_MBVFICR);
- s32 ret_val = -E1000_ERR_MBX;
-
- if (mbvficr & mask) {
- ret_val = E1000_SUCCESS;
- E1000_WRITE_REG(hw, E1000_MBVFICR, mask);
- }
-
- return ret_val;
-}
-
-/**
- * e1000_check_for_msg_pf - checks to see if the VF has sent mail
- * @hw: pointer to the HW structure
- * @vf_number: the VF index
- *
- * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
- **/
-static s32 e1000_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number)
-{
- s32 ret_val = -E1000_ERR_MBX;
-
- DEBUGFUNC("e1000_check_for_msg_pf");
-
- if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) {
- ret_val = E1000_SUCCESS;
- hw->mbx.stats.reqs++;
- }
-
- return ret_val;
-}
-
-/**
- * e1000_check_for_ack_pf - checks to see if the VF has ACKed
- * @hw: pointer to the HW structure
- * @vf_number: the VF index
- *
- * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
- **/
-static s32 e1000_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number)
-{
- s32 ret_val = -E1000_ERR_MBX;
-
- DEBUGFUNC("e1000_check_for_ack_pf");
-
- if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) {
- ret_val = E1000_SUCCESS;
- hw->mbx.stats.acks++;
- }
-
- return ret_val;
-}
-
-/**
- * e1000_check_for_rst_pf - checks to see if the VF has reset
- * @hw: pointer to the HW structure
- * @vf_number: the VF index
- *
- * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
- **/
-static s32 e1000_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
-{
- u32 vflre = E1000_READ_REG(hw, E1000_VFLRE);
- s32 ret_val = -E1000_ERR_MBX;
-
- DEBUGFUNC("e1000_check_for_rst_pf");
-
- if (vflre & (1 << vf_number)) {
- ret_val = E1000_SUCCESS;
- E1000_WRITE_REG(hw, E1000_VFLRE, (1 << vf_number));
- hw->mbx.stats.rsts++;
- }
-
- return ret_val;
-}
-
-/**
- * e1000_obtain_mbx_lock_pf - obtain mailbox lock
- * @hw: pointer to the HW structure
- * @vf_number: the VF index
- *
- * return SUCCESS if we obtained the mailbox lock
- **/
-static s32 e1000_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
-{
- s32 ret_val = -E1000_ERR_MBX;
- u32 p2v_mailbox;
-
- DEBUGFUNC("e1000_obtain_mbx_lock_pf");
-
- /* Take ownership of the buffer */
- E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
-
- /* reserve mailbox for vf use */
- p2v_mailbox = E1000_READ_REG(hw, E1000_P2VMAILBOX(vf_number));
- if (p2v_mailbox & E1000_P2VMAILBOX_PFU)
- ret_val = E1000_SUCCESS;
-
- return ret_val;
-}
-
-/**
- * e1000_write_mbx_pf - Places a message in the mailbox
- * @hw: pointer to the HW structure
- * @msg: The message buffer
- * @size: Length of buffer
- * @vf_number: the VF index
- *
- * returns SUCCESS if it successfully copied message into the buffer
- **/
-static s32 e1000_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
- u16 vf_number)
-{
- s32 ret_val;
- u16 i;
-
- DEBUGFUNC("e1000_write_mbx_pf");
-
- /* lock the mailbox to prevent pf/vf race condition */
- ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number);
- if (ret_val)
- goto out_no_write;
-
- /* flush msg and acks as we are overwriting the message buffer */
- e1000_check_for_msg_pf(hw, vf_number);
- e1000_check_for_ack_pf(hw, vf_number);
-
- /* copy the caller specified message to the mailbox memory buffer */
- for (i = 0; i < size; i++)
- E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i, msg[i]);
-
- /* Interrupt VF to tell it a message has been sent and release buffer*/
- E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS);
-
- /* update stats */
- hw->mbx.stats.msgs_tx++;
-
-out_no_write:
- return ret_val;
-
-}
-
-/**
- * e1000_read_mbx_pf - Read a message from the mailbox
- * @hw: pointer to the HW structure
- * @msg: The message buffer
- * @size: Length of buffer
- * @vf_number: the VF index
- *
- * This function copies a message from the mailbox buffer to the caller's
- * memory buffer. The presumption is that the caller knows that there was
- * a message due to a VF request so no polling for message is needed.
- **/
-static s32 e1000_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
- u16 vf_number)
-{
- s32 ret_val;
- u16 i;
-
- DEBUGFUNC("e1000_read_mbx_pf");
-
- /* lock the mailbox to prevent pf/vf race condition */
- ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number);
- if (ret_val)
- goto out_no_read;
-
- /* copy the message to the mailbox memory buffer */
- for (i = 0; i < size; i++)
- msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i);
-
- /* Acknowledge the message and release buffer */
- E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK);
-
- /* update stats */
- hw->mbx.stats.msgs_rx++;
-
-out_no_read:
- return ret_val;
-}
-
-/**
- * e1000_init_mbx_params_pf - set initial values for pf mailbox
- * @hw: pointer to the HW structure
- *
- * Initializes the hw->mbx struct to correct values for pf mailbox
- */
-s32 e1000_init_mbx_params_pf(struct e1000_hw *hw)
-{
- struct e1000_mbx_info *mbx = &hw->mbx;
-
- switch (hw->mac.type) {
- case e1000_82576:
- case e1000_i350:
- case e1000_i354:
- mbx->timeout = 0;
- mbx->usec_delay = 0;
-
- mbx->size = E1000_VFMAILBOX_SIZE;
-
- mbx->ops.read = e1000_read_mbx_pf;
- mbx->ops.write = e1000_write_mbx_pf;
- mbx->ops.read_posted = e1000_read_posted_mbx;
- mbx->ops.write_posted = e1000_write_posted_mbx;
- mbx->ops.check_for_msg = e1000_check_for_msg_pf;
- mbx->ops.check_for_ack = e1000_check_for_ack_pf;
- mbx->ops.check_for_rst = e1000_check_for_rst_pf;
-
- mbx->stats.msgs_tx = 0;
- mbx->stats.msgs_rx = 0;
- mbx->stats.reqs = 0;
- mbx->stats.acks = 0;
- mbx->stats.rsts = 0;
- default:
- return E1000_SUCCESS;
- }
-}
-
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.h
deleted file mode 100755
index bbf838c8..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#ifndef _E1000_MBX_H_
-#define _E1000_MBX_H_
-
-#include "e1000_api.h"
-
-#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */
-#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
-#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
-#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
-#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
-
-#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */
-#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
-#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */
-#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
-
-#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
-
-/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the
- * PF. The reverse is true if it is E1000_PF_*.
- * Message ACK's are the value or'd with 0xF0000000
- */
-/* Msgs below or'd with this are the ACK */
-#define E1000_VT_MSGTYPE_ACK 0x80000000
-/* Msgs below or'd with this are the NACK */
-#define E1000_VT_MSGTYPE_NACK 0x40000000
-/* Indicates that VF is still clear to send requests */
-#define E1000_VT_MSGTYPE_CTS 0x20000000
-#define E1000_VT_MSGINFO_SHIFT 16
-/* bits 23:16 are used for extra info for certain messages */
-#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
-
-#define E1000_VF_RESET 0x01 /* VF requests reset */
-#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */
-#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */
-#define E1000_VF_SET_MULTICAST_COUNT_MASK (0x1F << E1000_VT_MSGINFO_SHIFT)
-#define E1000_VF_SET_MULTICAST_OVERFLOW (0x80 << E1000_VT_MSGINFO_SHIFT)
-#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */
-#define E1000_VF_SET_VLAN_ADD (0x01 << E1000_VT_MSGINFO_SHIFT)
-#define E1000_VF_SET_LPE 0x05 /* reqs to set VMOLR.LPE */
-#define E1000_VF_SET_PROMISC 0x06 /* reqs to clear VMOLR.ROPE/MPME*/
-#define E1000_VF_SET_PROMISC_UNICAST (0x01 << E1000_VT_MSGINFO_SHIFT)
-#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT)
-
-#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
-
-#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
-#define E1000_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
-
-s32 e1000_read_mbx(struct e1000_hw *, u32 *, u16, u16);
-s32 e1000_write_mbx(struct e1000_hw *, u32 *, u16, u16);
-s32 e1000_read_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
-s32 e1000_write_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
-s32 e1000_check_for_msg(struct e1000_hw *, u16);
-s32 e1000_check_for_ack(struct e1000_hw *, u16);
-s32 e1000_check_for_rst(struct e1000_hw *, u16);
-void e1000_init_mbx_ops_generic(struct e1000_hw *hw);
-s32 e1000_init_mbx_params_pf(struct e1000_hw *);
-
-#endif /* _E1000_MBX_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.c
deleted file mode 100755
index ff421986..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.c
+++ /dev/null
@@ -1,967 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#include "e1000_api.h"
-
-static void e1000_reload_nvm_generic(struct e1000_hw *hw);
-
-/**
- * e1000_init_nvm_ops_generic - Initialize NVM function pointers
- * @hw: pointer to the HW structure
- *
- * Setups up the function pointers to no-op functions
- **/
-void e1000_init_nvm_ops_generic(struct e1000_hw *hw)
-{
- struct e1000_nvm_info *nvm = &hw->nvm;
- DEBUGFUNC("e1000_init_nvm_ops_generic");
-
- /* Initialize function pointers */
- nvm->ops.init_params = e1000_null_ops_generic;
- nvm->ops.acquire = e1000_null_ops_generic;
- nvm->ops.read = e1000_null_read_nvm;
- nvm->ops.release = e1000_null_nvm_generic;
- nvm->ops.reload = e1000_reload_nvm_generic;
- nvm->ops.update = e1000_null_ops_generic;
- nvm->ops.valid_led_default = e1000_null_led_default;
- nvm->ops.validate = e1000_null_ops_generic;
- nvm->ops.write = e1000_null_write_nvm;
-}
-
-/**
- * e1000_null_nvm_read - No-op function, return 0
- * @hw: pointer to the HW structure
- **/
-s32 e1000_null_read_nvm(struct e1000_hw E1000_UNUSEDARG *hw,
- u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b,
- u16 E1000_UNUSEDARG *c)
-{
- DEBUGFUNC("e1000_null_read_nvm");
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_null_nvm_generic - No-op function, return void
- * @hw: pointer to the HW structure
- **/
-void e1000_null_nvm_generic(struct e1000_hw E1000_UNUSEDARG *hw)
-{
- DEBUGFUNC("e1000_null_nvm_generic");
- return;
-}
-
-/**
- * e1000_null_led_default - No-op function, return 0
- * @hw: pointer to the HW structure
- **/
-s32 e1000_null_led_default(struct e1000_hw E1000_UNUSEDARG *hw,
- u16 E1000_UNUSEDARG *data)
-{
- DEBUGFUNC("e1000_null_led_default");
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_null_write_nvm - No-op function, return 0
- * @hw: pointer to the HW structure
- **/
-s32 e1000_null_write_nvm(struct e1000_hw E1000_UNUSEDARG *hw,
- u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b,
- u16 E1000_UNUSEDARG *c)
-{
- DEBUGFUNC("e1000_null_write_nvm");
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_raise_eec_clk - Raise EEPROM clock
- * @hw: pointer to the HW structure
- * @eecd: pointer to the EEPROM
- *
- * Enable/Raise the EEPROM clock bit.
- **/
-static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
-{
- *eecd = *eecd | E1000_EECD_SK;
- E1000_WRITE_REG(hw, E1000_EECD, *eecd);
- E1000_WRITE_FLUSH(hw);
- usec_delay(hw->nvm.delay_usec);
-}
-
-/**
- * e1000_lower_eec_clk - Lower EEPROM clock
- * @hw: pointer to the HW structure
- * @eecd: pointer to the EEPROM
- *
- * Clear/Lower the EEPROM clock bit.
- **/
-static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
-{
- *eecd = *eecd & ~E1000_EECD_SK;
- E1000_WRITE_REG(hw, E1000_EECD, *eecd);
- E1000_WRITE_FLUSH(hw);
- usec_delay(hw->nvm.delay_usec);
-}
-
-/**
- * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
- * @hw: pointer to the HW structure
- * @data: data to send to the EEPROM
- * @count: number of bits to shift out
- *
- * We need to shift 'count' bits out to the EEPROM. So, the value in the
- * "data" parameter will be shifted out to the EEPROM one bit at a time.
- * In order to do this, "data" must be broken down into bits.
- **/
-static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
-{
- struct e1000_nvm_info *nvm = &hw->nvm;
- u32 eecd = E1000_READ_REG(hw, E1000_EECD);
- u32 mask;
-
- DEBUGFUNC("e1000_shift_out_eec_bits");
-
- mask = 0x01 << (count - 1);
- if (nvm->type == e1000_nvm_eeprom_spi)
- eecd |= E1000_EECD_DO;
-
- do {
- eecd &= ~E1000_EECD_DI;
-
- if (data & mask)
- eecd |= E1000_EECD_DI;
-
- E1000_WRITE_REG(hw, E1000_EECD, eecd);
- E1000_WRITE_FLUSH(hw);
-
- usec_delay(nvm->delay_usec);
-
- e1000_raise_eec_clk(hw, &eecd);
- e1000_lower_eec_clk(hw, &eecd);
-
- mask >>= 1;
- } while (mask);
-
- eecd &= ~E1000_EECD_DI;
- E1000_WRITE_REG(hw, E1000_EECD, eecd);
-}
-
-/**
- * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
- * @hw: pointer to the HW structure
- * @count: number of bits to shift in
- *
- * In order to read a register from the EEPROM, we need to shift 'count' bits
- * in from the EEPROM. Bits are "shifted in" by raising the clock input to
- * the EEPROM (setting the SK bit), and then reading the value of the data out
- * "DO" bit. During this "shifting in" process the data in "DI" bit should
- * always be clear.
- **/
-static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
-{
- u32 eecd;
- u32 i;
- u16 data;
-
- DEBUGFUNC("e1000_shift_in_eec_bits");
-
- eecd = E1000_READ_REG(hw, E1000_EECD);
-
- eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
- data = 0;
-
- for (i = 0; i < count; i++) {
- data <<= 1;
- e1000_raise_eec_clk(hw, &eecd);
-
- eecd = E1000_READ_REG(hw, E1000_EECD);
-
- eecd &= ~E1000_EECD_DI;
- if (eecd & E1000_EECD_DO)
- data |= 1;
-
- e1000_lower_eec_clk(hw, &eecd);
- }
-
- return data;
-}
-
-/**
- * e1000_poll_eerd_eewr_done - Poll for EEPROM read/write completion
- * @hw: pointer to the HW structure
- * @ee_reg: EEPROM flag for polling
- *
- * Polls the EEPROM status bit for either read or write completion based
- * upon the value of 'ee_reg'.
- **/
-s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
-{
- u32 attempts = 100000;
- u32 i, reg = 0;
-
- DEBUGFUNC("e1000_poll_eerd_eewr_done");
-
- for (i = 0; i < attempts; i++) {
- if (ee_reg == E1000_NVM_POLL_READ)
- reg = E1000_READ_REG(hw, E1000_EERD);
- else
- reg = E1000_READ_REG(hw, E1000_EEWR);
-
- if (reg & E1000_NVM_RW_REG_DONE)
- return E1000_SUCCESS;
-
- usec_delay(5);
- }
-
- return -E1000_ERR_NVM;
-}
-
-/**
- * e1000_acquire_nvm_generic - Generic request for access to EEPROM
- * @hw: pointer to the HW structure
- *
- * Set the EEPROM access request bit and wait for EEPROM access grant bit.
- * Return successful if access grant bit set, else clear the request for
- * EEPROM access and return -E1000_ERR_NVM (-1).
- **/
-s32 e1000_acquire_nvm_generic(struct e1000_hw *hw)
-{
- u32 eecd = E1000_READ_REG(hw, E1000_EECD);
- s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
-
- DEBUGFUNC("e1000_acquire_nvm_generic");
-
- E1000_WRITE_REG(hw, E1000_EECD, eecd | E1000_EECD_REQ);
- eecd = E1000_READ_REG(hw, E1000_EECD);
-
- while (timeout) {
- if (eecd & E1000_EECD_GNT)
- break;
- usec_delay(5);
- eecd = E1000_READ_REG(hw, E1000_EECD);
- timeout--;
- }
-
- if (!timeout) {
- eecd &= ~E1000_EECD_REQ;
- E1000_WRITE_REG(hw, E1000_EECD, eecd);
- DEBUGOUT("Could not acquire NVM grant\n");
- return -E1000_ERR_NVM;
- }
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_standby_nvm - Return EEPROM to standby state
- * @hw: pointer to the HW structure
- *
- * Return the EEPROM to a standby state.
- **/
-static void e1000_standby_nvm(struct e1000_hw *hw)
-{
- struct e1000_nvm_info *nvm = &hw->nvm;
- u32 eecd = E1000_READ_REG(hw, E1000_EECD);
-
- DEBUGFUNC("e1000_standby_nvm");
-
- if (nvm->type == e1000_nvm_eeprom_spi) {
- /* Toggle CS to flush commands */
- eecd |= E1000_EECD_CS;
- E1000_WRITE_REG(hw, E1000_EECD, eecd);
- E1000_WRITE_FLUSH(hw);
- usec_delay(nvm->delay_usec);
- eecd &= ~E1000_EECD_CS;
- E1000_WRITE_REG(hw, E1000_EECD, eecd);
- E1000_WRITE_FLUSH(hw);
- usec_delay(nvm->delay_usec);
- }
-}
-
-/**
- * e1000_stop_nvm - Terminate EEPROM command
- * @hw: pointer to the HW structure
- *
- * Terminates the current command by inverting the EEPROM's chip select pin.
- **/
-static void e1000_stop_nvm(struct e1000_hw *hw)
-{
- u32 eecd;
-
- DEBUGFUNC("e1000_stop_nvm");
-
- eecd = E1000_READ_REG(hw, E1000_EECD);
- if (hw->nvm.type == e1000_nvm_eeprom_spi) {
- /* Pull CS high */
- eecd |= E1000_EECD_CS;
- e1000_lower_eec_clk(hw, &eecd);
- }
-}
-
-/**
- * e1000_release_nvm_generic - Release exclusive access to EEPROM
- * @hw: pointer to the HW structure
- *
- * Stop any current commands to the EEPROM and clear the EEPROM request bit.
- **/
-void e1000_release_nvm_generic(struct e1000_hw *hw)
-{
- u32 eecd;
-
- DEBUGFUNC("e1000_release_nvm_generic");
-
- e1000_stop_nvm(hw);
-
- eecd = E1000_READ_REG(hw, E1000_EECD);
- eecd &= ~E1000_EECD_REQ;
- E1000_WRITE_REG(hw, E1000_EECD, eecd);
-}
-
-/**
- * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
- * @hw: pointer to the HW structure
- *
- * Setups the EEPROM for reading and writing.
- **/
-static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
-{
- struct e1000_nvm_info *nvm = &hw->nvm;
- u32 eecd = E1000_READ_REG(hw, E1000_EECD);
- u8 spi_stat_reg;
-
- DEBUGFUNC("e1000_ready_nvm_eeprom");
-
- if (nvm->type == e1000_nvm_eeprom_spi) {
- u16 timeout = NVM_MAX_RETRY_SPI;
-
- /* Clear SK and CS */
- eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
- E1000_WRITE_REG(hw, E1000_EECD, eecd);
- E1000_WRITE_FLUSH(hw);
- usec_delay(1);
-
- /* Read "Status Register" repeatedly until the LSB is cleared.
- * The EEPROM will signal that the command has been completed
- * by clearing bit 0 of the internal status register. If it's
- * not cleared within 'timeout', then error out.
- */
- while (timeout) {
- e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
- hw->nvm.opcode_bits);
- spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
- if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
- break;
-
- usec_delay(5);
- e1000_standby_nvm(hw);
- timeout--;
- }
-
- if (!timeout) {
- DEBUGOUT("SPI NVM Status error\n");
- return -E1000_ERR_NVM;
- }
- }
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_read_nvm_spi - Read EEPROM's using SPI
- * @hw: pointer to the HW structure
- * @offset: offset of word in the EEPROM to read
- * @words: number of words to read
- * @data: word read from the EEPROM
- *
- * Reads a 16 bit word from the EEPROM.
- **/
-s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
-{
- struct e1000_nvm_info *nvm = &hw->nvm;
- u32 i = 0;
- s32 ret_val;
- u16 word_in;
- u8 read_opcode = NVM_READ_OPCODE_SPI;
-
- DEBUGFUNC("e1000_read_nvm_spi");
-
- /* A check for invalid values: offset too large, too many words,
- * and not enough words.
- */
- if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
- (words == 0)) {
- DEBUGOUT("nvm parameter(s) out of bounds\n");
- return -E1000_ERR_NVM;
- }
-
- ret_val = nvm->ops.acquire(hw);
- if (ret_val)
- return ret_val;
-
- ret_val = e1000_ready_nvm_eeprom(hw);
- if (ret_val)
- goto release;
-
- e1000_standby_nvm(hw);
-
- if ((nvm->address_bits == 8) && (offset >= 128))
- read_opcode |= NVM_A8_OPCODE_SPI;
-
- /* Send the READ command (opcode + addr) */
- e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
- e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
-
- /* Read the data. SPI NVMs increment the address with each byte
- * read and will roll over if reading beyond the end. This allows
- * us to read the whole NVM from any offset
- */
- for (i = 0; i < words; i++) {
- word_in = e1000_shift_in_eec_bits(hw, 16);
- data[i] = (word_in >> 8) | (word_in << 8);
- }
-
-release:
- nvm->ops.release(hw);
-
- return ret_val;
-}
-
-/**
- * e1000_read_nvm_eerd - Reads EEPROM using EERD register
- * @hw: pointer to the HW structure
- * @offset: offset of word in the EEPROM to read
- * @words: number of words to read
- * @data: word read from the EEPROM
- *
- * Reads a 16 bit word from the EEPROM using the EERD register.
- **/
-s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
-{
- struct e1000_nvm_info *nvm = &hw->nvm;
- u32 i, eerd = 0;
- s32 ret_val = E1000_SUCCESS;
-
- DEBUGFUNC("e1000_read_nvm_eerd");
-
- /* A check for invalid values: offset too large, too many words,
- * too many words for the offset, and not enough words.
- */
- if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
- (words == 0)) {
- DEBUGOUT("nvm parameter(s) out of bounds\n");
- return -E1000_ERR_NVM;
- }
-
- for (i = 0; i < words; i++) {
- eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
- E1000_NVM_RW_REG_START;
-
- E1000_WRITE_REG(hw, E1000_EERD, eerd);
- ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
- if (ret_val)
- break;
-
- data[i] = (E1000_READ_REG(hw, E1000_EERD) >>
- E1000_NVM_RW_REG_DATA);
- }
-
- return ret_val;
-}
-
-/**
- * e1000_write_nvm_spi - Write to EEPROM using SPI
- * @hw: pointer to the HW structure
- * @offset: offset within the EEPROM to be written to
- * @words: number of words to write
- * @data: 16 bit word(s) to be written to the EEPROM
- *
- * Writes data to EEPROM at offset using SPI interface.
- *
- * If e1000_update_nvm_checksum is not called after this function , the
- * EEPROM will most likely contain an invalid checksum.
- **/
-s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
-{
- struct e1000_nvm_info *nvm = &hw->nvm;
- s32 ret_val = -E1000_ERR_NVM;
- u16 widx = 0;
-
- DEBUGFUNC("e1000_write_nvm_spi");
-
- /* A check for invalid values: offset too large, too many words,
- * and not enough words.
- */
- if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
- (words == 0)) {
- DEBUGOUT("nvm parameter(s) out of bounds\n");
- return -E1000_ERR_NVM;
- }
-
- while (widx < words) {
- u8 write_opcode = NVM_WRITE_OPCODE_SPI;
-
- ret_val = nvm->ops.acquire(hw);
- if (ret_val)
- return ret_val;
-
- ret_val = e1000_ready_nvm_eeprom(hw);
- if (ret_val) {
- nvm->ops.release(hw);
- return ret_val;
- }
-
- e1000_standby_nvm(hw);
-
- /* Send the WRITE ENABLE command (8 bit opcode) */
- e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
- nvm->opcode_bits);
-
- e1000_standby_nvm(hw);
-
- /* Some SPI eeproms use the 8th address bit embedded in the
- * opcode
- */
- if ((nvm->address_bits == 8) && (offset >= 128))
- write_opcode |= NVM_A8_OPCODE_SPI;
-
- /* Send the Write command (8-bit opcode + addr) */
- e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
- e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
- nvm->address_bits);
-
- /* Loop to allow for up to whole page write of eeprom */
- while (widx < words) {
- u16 word_out = data[widx];
- word_out = (word_out >> 8) | (word_out << 8);
- e1000_shift_out_eec_bits(hw, word_out, 16);
- widx++;
-
- if ((((offset + widx) * 2) % nvm->page_size) == 0) {
- e1000_standby_nvm(hw);
- break;
- }
- }
- msec_delay(10);
- nvm->ops.release(hw);
- }
-
- return ret_val;
-}
-
-/**
- * e1000_read_pba_string_generic - Read device part number
- * @hw: pointer to the HW structure
- * @pba_num: pointer to device part number
- * @pba_num_size: size of part number buffer
- *
- * Reads the product board assembly (PBA) number from the EEPROM and stores
- * the value in pba_num.
- **/
-s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
- u32 pba_num_size)
-{
- s32 ret_val;
- u16 nvm_data;
- u16 pba_ptr;
- u16 offset;
- u16 length;
-
- DEBUGFUNC("e1000_read_pba_string_generic");
-
- if (pba_num == NULL) {
- DEBUGOUT("PBA string buffer was null\n");
- return -E1000_ERR_INVALID_ARGUMENT;
- }
-
- ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
- if (ret_val) {
- DEBUGOUT("NVM Read Error\n");
- return ret_val;
- }
-
- ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
- if (ret_val) {
- DEBUGOUT("NVM Read Error\n");
- return ret_val;
- }
-
- /* if nvm_data is not ptr guard the PBA must be in legacy format which
- * means pba_ptr is actually our second data word for the PBA number
- * and we can decode it into an ascii string
- */
- if (nvm_data != NVM_PBA_PTR_GUARD) {
- DEBUGOUT("NVM PBA number is not stored as string\n");
-
- /* make sure callers buffer is big enough to store the PBA */
- if (pba_num_size < E1000_PBANUM_LENGTH) {
- DEBUGOUT("PBA string buffer too small\n");
- return E1000_ERR_NO_SPACE;
- }
-
- /* extract hex string from data and pba_ptr */
- pba_num[0] = (nvm_data >> 12) & 0xF;
- pba_num[1] = (nvm_data >> 8) & 0xF;
- pba_num[2] = (nvm_data >> 4) & 0xF;
- pba_num[3] = nvm_data & 0xF;
- pba_num[4] = (pba_ptr >> 12) & 0xF;
- pba_num[5] = (pba_ptr >> 8) & 0xF;
- pba_num[6] = '-';
- pba_num[7] = 0;
- pba_num[8] = (pba_ptr >> 4) & 0xF;
- pba_num[9] = pba_ptr & 0xF;
-
- /* put a null character on the end of our string */
- pba_num[10] = '\0';
-
- /* switch all the data but the '-' to hex char */
- for (offset = 0; offset < 10; offset++) {
- if (pba_num[offset] < 0xA)
- pba_num[offset] += '0';
- else if (pba_num[offset] < 0x10)
- pba_num[offset] += 'A' - 0xA;
- }
-
- return E1000_SUCCESS;
- }
-
- ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length);
- if (ret_val) {
- DEBUGOUT("NVM Read Error\n");
- return ret_val;
- }
-
- if (length == 0xFFFF || length == 0) {
- DEBUGOUT("NVM PBA number section invalid length\n");
- return -E1000_ERR_NVM_PBA_SECTION;
- }
- /* check if pba_num buffer is big enough */
- if (pba_num_size < (((u32)length * 2) - 1)) {
- DEBUGOUT("PBA string buffer too small\n");
- return -E1000_ERR_NO_SPACE;
- }
-
- /* trim pba length from start of string */
- pba_ptr++;
- length--;
-
- for (offset = 0; offset < length; offset++) {
- ret_val = hw->nvm.ops.read(hw, pba_ptr + offset, 1, &nvm_data);
- if (ret_val) {
- DEBUGOUT("NVM Read Error\n");
- return ret_val;
- }
- pba_num[offset * 2] = (u8)(nvm_data >> 8);
- pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
- }
- pba_num[offset * 2] = '\0';
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_read_pba_length_generic - Read device part number length
- * @hw: pointer to the HW structure
- * @pba_num_size: size of part number buffer
- *
- * Reads the product board assembly (PBA) number length from the EEPROM and
- * stores the value in pba_num_size.
- **/
-s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size)
-{
- s32 ret_val;
- u16 nvm_data;
- u16 pba_ptr;
- u16 length;
-
- DEBUGFUNC("e1000_read_pba_length_generic");
-
- if (pba_num_size == NULL) {
- DEBUGOUT("PBA buffer size was null\n");
- return -E1000_ERR_INVALID_ARGUMENT;
- }
-
- ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
- if (ret_val) {
- DEBUGOUT("NVM Read Error\n");
- return ret_val;
- }
-
- ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
- if (ret_val) {
- DEBUGOUT("NVM Read Error\n");
- return ret_val;
- }
-
- /* if data is not ptr guard the PBA must be in legacy format */
- if (nvm_data != NVM_PBA_PTR_GUARD) {
- *pba_num_size = E1000_PBANUM_LENGTH;
- return E1000_SUCCESS;
- }
-
- ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length);
- if (ret_val) {
- DEBUGOUT("NVM Read Error\n");
- return ret_val;
- }
-
- if (length == 0xFFFF || length == 0) {
- DEBUGOUT("NVM PBA number section invalid length\n");
- return -E1000_ERR_NVM_PBA_SECTION;
- }
-
- /* Convert from length in u16 values to u8 chars, add 1 for NULL,
- * and subtract 2 because length field is included in length.
- */
- *pba_num_size = ((u32)length * 2) - 1;
-
- return E1000_SUCCESS;
-}
-
-
-
-
-
-/**
- * e1000_read_mac_addr_generic - Read device MAC address
- * @hw: pointer to the HW structure
- *
- * Reads the device MAC address from the EEPROM and stores the value.
- * Since devices with two ports use the same EEPROM, we increment the
- * last bit in the MAC address for the second port.
- **/
-s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
-{
- u32 rar_high;
- u32 rar_low;
- u16 i;
-
- rar_high = E1000_READ_REG(hw, E1000_RAH(0));
- rar_low = E1000_READ_REG(hw, E1000_RAL(0));
-
- for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
- hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
-
- for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
- hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
-
- for (i = 0; i < ETH_ADDR_LEN; i++)
- hw->mac.addr[i] = hw->mac.perm_addr[i];
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_validate_nvm_checksum_generic - Validate EEPROM checksum
- * @hw: pointer to the HW structure
- *
- * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
- * and then verifies that the sum of the EEPROM is equal to 0xBABA.
- **/
-s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw)
-{
- s32 ret_val;
- u16 checksum = 0;
- u16 i, nvm_data;
-
- DEBUGFUNC("e1000_validate_nvm_checksum_generic");
-
- for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
- ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
- if (ret_val) {
- DEBUGOUT("NVM Read Error\n");
- return ret_val;
- }
- checksum += nvm_data;
- }
-
- if (checksum != (u16) NVM_SUM) {
- DEBUGOUT("NVM Checksum Invalid\n");
- return -E1000_ERR_NVM;
- }
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_update_nvm_checksum_generic - Update EEPROM checksum
- * @hw: pointer to the HW structure
- *
- * Updates the EEPROM checksum by reading/adding each word of the EEPROM
- * up to the checksum. Then calculates the EEPROM checksum and writes the
- * value to the EEPROM.
- **/
-s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw)
-{
- s32 ret_val;
- u16 checksum = 0;
- u16 i, nvm_data;
-
- DEBUGFUNC("e1000_update_nvm_checksum");
-
- for (i = 0; i < NVM_CHECKSUM_REG; i++) {
- ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
- if (ret_val) {
- DEBUGOUT("NVM Read Error while updating checksum.\n");
- return ret_val;
- }
- checksum += nvm_data;
- }
- checksum = (u16) NVM_SUM - checksum;
- ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
- if (ret_val)
- DEBUGOUT("NVM Write Error while updating checksum.\n");
-
- return ret_val;
-}
-
-/**
- * e1000_reload_nvm_generic - Reloads EEPROM
- * @hw: pointer to the HW structure
- *
- * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
- * extended control register.
- **/
-static void e1000_reload_nvm_generic(struct e1000_hw *hw)
-{
- u32 ctrl_ext;
-
- DEBUGFUNC("e1000_reload_nvm_generic");
-
- usec_delay(10);
- ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
- ctrl_ext |= E1000_CTRL_EXT_EE_RST;
- E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
- E1000_WRITE_FLUSH(hw);
-}
-
-/**
- * e1000_get_fw_version - Get firmware version information
- * @hw: pointer to the HW structure
- * @fw_vers: pointer to output version structure
- *
- * unsupported/not present features return 0 in version structure
- **/
-void e1000_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
-{
- u16 eeprom_verh, eeprom_verl, etrack_test, fw_version;
- u8 q, hval, rem, result;
- u16 comb_verh, comb_verl, comb_offset;
-
- memset(fw_vers, 0, sizeof(struct e1000_fw_version));
-
- /* basic eeprom version numbers, bits used vary by part and by tool
- * used to create the nvm images */
- /* Check which data format we have */
- hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
- switch (hw->mac.type) {
- case e1000_i211:
- e1000_read_invm_version(hw, fw_vers);
- return;
- case e1000_82575:
- case e1000_82576:
- case e1000_82580:
- /* Use this format, unless EETRACK ID exists,
- * then use alternate format
- */
- if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) {
- hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
- fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
- >> NVM_MAJOR_SHIFT;
- fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK)
- >> NVM_MINOR_SHIFT;
- fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK);
- goto etrack_id;
- }
- break;
- case e1000_i210:
- if (!(e1000_get_flash_presence_i210(hw))) {
- e1000_read_invm_version(hw, fw_vers);
- return;
- }
- /* fall through */
- case e1000_i350:
- case e1000_i354:
- /* find combo image version */
- hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
- if ((comb_offset != 0x0) &&
- (comb_offset != NVM_VER_INVALID)) {
-
- hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
- + 1), 1, &comb_verh);
- hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
- 1, &comb_verl);
-
- /* get Option Rom version if it exists and is valid */
- if ((comb_verh && comb_verl) &&
- ((comb_verh != NVM_VER_INVALID) &&
- (comb_verl != NVM_VER_INVALID))) {
-
- fw_vers->or_valid = true;
- fw_vers->or_major =
- comb_verl >> NVM_COMB_VER_SHFT;
- fw_vers->or_build =
- (comb_verl << NVM_COMB_VER_SHFT)
- | (comb_verh >> NVM_COMB_VER_SHFT);
- fw_vers->or_patch =
- comb_verh & NVM_COMB_VER_MASK;
- }
- }
- break;
- default:
- return;
- }
- hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
- fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
- >> NVM_MAJOR_SHIFT;
-
- /* check for old style version format in newer images*/
- if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) {
- eeprom_verl = (fw_version & NVM_COMB_VER_MASK);
- } else {
- eeprom_verl = (fw_version & NVM_MINOR_MASK)
- >> NVM_MINOR_SHIFT;
- }
- /* Convert minor value to hex before assigning to output struct
- * Val to be converted will not be higher than 99, per tool output
- */
- q = eeprom_verl / NVM_HEX_CONV;
- hval = q * NVM_HEX_TENS;
- rem = eeprom_verl % NVM_HEX_CONV;
- result = hval + rem;
- fw_vers->eep_minor = result;
-
-etrack_id:
- if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) {
- hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
- hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
- fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT)
- | eeprom_verl;
- }
- return;
-}
-
-
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.h
deleted file mode 100755
index fe62785a..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#ifndef _E1000_NVM_H_
-#define _E1000_NVM_H_
-
-
-struct e1000_fw_version {
- u32 etrack_id;
- u16 eep_major;
- u16 eep_minor;
- u16 eep_build;
-
- u8 invm_major;
- u8 invm_minor;
- u8 invm_img_type;
-
- bool or_valid;
- u16 or_major;
- u16 or_build;
- u16 or_patch;
-};
-
-
-void e1000_init_nvm_ops_generic(struct e1000_hw *hw);
-s32 e1000_null_read_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c);
-void e1000_null_nvm_generic(struct e1000_hw *hw);
-s32 e1000_null_led_default(struct e1000_hw *hw, u16 *data);
-s32 e1000_null_write_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c);
-s32 e1000_acquire_nvm_generic(struct e1000_hw *hw);
-
-s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
-s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
-s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
- u32 pba_num_size);
-s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size);
-s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
-s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data);
-s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data);
-s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw);
-s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data);
-s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw);
-void e1000_release_nvm_generic(struct e1000_hw *hw);
-void e1000_get_fw_version(struct e1000_hw *hw,
- struct e1000_fw_version *fw_vers);
-
-#define E1000_STM_OPCODE 0xDB00
-
-#endif
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_osdep.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_osdep.h
deleted file mode 100755
index d1cf98e2..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_osdep.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-
-/* glue for the OS independent part of e1000
- * includes register access macros
- */
-
-#ifndef _E1000_OSDEP_H_
-#define _E1000_OSDEP_H_
-
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/if_ether.h>
-#include <linux/sched.h>
-#include "kcompat.h"
-
-#ifndef __INTEL_COMPILER
-#pragma GCC diagnostic ignored "-Wunused-function"
-#endif
-
-#define usec_delay(x) udelay(x)
-#define usec_delay_irq(x) udelay(x)
-#ifndef msec_delay
-#define msec_delay(x) do { \
- /* Don't mdelay in interrupt context! */ \
- if (in_interrupt()) \
- BUG(); \
- else \
- msleep(x); \
-} while (0)
-
-/* Some workarounds require millisecond delays and are run during interrupt
- * context. Most notably, when establishing link, the phy may need tweaking
- * but cannot process phy register reads/writes faster than millisecond
- * intervals...and we establish link due to a "link status change" interrupt.
- */
-#define msec_delay_irq(x) mdelay(x)
-#endif
-
-#define PCI_COMMAND_REGISTER PCI_COMMAND
-#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE
-#define ETH_ADDR_LEN ETH_ALEN
-
-#ifdef __BIG_ENDIAN
-#define E1000_BIG_ENDIAN __BIG_ENDIAN
-#endif
-
-
-#ifdef DEBUG
-#define DEBUGOUT(S) printk(KERN_DEBUG S)
-#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S, ## A)
-#else
-#define DEBUGOUT(S)
-#define DEBUGOUT1(S, A...)
-#endif
-
-#ifdef DEBUG_FUNC
-#define DEBUGFUNC(F) DEBUGOUT(F "\n")
-#else
-#define DEBUGFUNC(F)
-#endif
-#define DEBUGOUT2 DEBUGOUT1
-#define DEBUGOUT3 DEBUGOUT2
-#define DEBUGOUT7 DEBUGOUT3
-
-#define E1000_REGISTER(a, reg) reg
-
-#define E1000_WRITE_REG(a, reg, value) ( \
- writel((value), ((a)->hw_addr + E1000_REGISTER(a, reg))))
-
-#define E1000_READ_REG(a, reg) (readl((a)->hw_addr + E1000_REGISTER(a, reg)))
-
-#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \
- writel((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 2))))
-
-#define E1000_READ_REG_ARRAY(a, reg, offset) ( \
- readl((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 2)))
-
-#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
-#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
-
-#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \
- writew((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 1))))
-
-#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \
- readw((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 1)))
-
-#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \
- writeb((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + (offset))))
-
-#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \
- readb((a)->hw_addr + E1000_REGISTER(a, reg) + (offset)))
-
-#define E1000_WRITE_REG_IO(a, reg, offset) do { \
- outl(reg, ((a)->io_base)); \
- outl(offset, ((a)->io_base + 4)); } while (0)
-
-#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS)
-
-#define E1000_WRITE_FLASH_REG(a, reg, value) ( \
- writel((value), ((a)->flash_address + reg)))
-
-#define E1000_WRITE_FLASH_REG16(a, reg, value) ( \
- writew((value), ((a)->flash_address + reg)))
-
-#define E1000_READ_FLASH_REG(a, reg) (readl((a)->flash_address + reg))
-
-#define E1000_READ_FLASH_REG16(a, reg) (readw((a)->flash_address + reg))
-
-#endif /* _E1000_OSDEP_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_phy.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_phy.c
deleted file mode 100755
index df224702..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_phy.c
+++ /dev/null
@@ -1,3405 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#include "e1000_api.h"
-
-static s32 e1000_wait_autoneg(struct e1000_hw *hw);
-/* Cable length tables */
-static const u16 e1000_m88_cable_length_table[] = {
- 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
-#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
- (sizeof(e1000_m88_cable_length_table) / \
- sizeof(e1000_m88_cable_length_table[0]))
-
-static const u16 e1000_igp_2_cable_length_table[] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3,
- 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22,
- 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40,
- 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61,
- 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82,
- 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95,
- 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121,
- 124};
-#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
- (sizeof(e1000_igp_2_cable_length_table) / \
- sizeof(e1000_igp_2_cable_length_table[0]))
-
-/**
- * e1000_init_phy_ops_generic - Initialize PHY function pointers
- * @hw: pointer to the HW structure
- *
- * Setups up the function pointers to no-op functions
- **/
-void e1000_init_phy_ops_generic(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- DEBUGFUNC("e1000_init_phy_ops_generic");
-
- /* Initialize function pointers */
- phy->ops.init_params = e1000_null_ops_generic;
- phy->ops.acquire = e1000_null_ops_generic;
- phy->ops.check_polarity = e1000_null_ops_generic;
- phy->ops.check_reset_block = e1000_null_ops_generic;
- phy->ops.commit = e1000_null_ops_generic;
- phy->ops.force_speed_duplex = e1000_null_ops_generic;
- phy->ops.get_cfg_done = e1000_null_ops_generic;
- phy->ops.get_cable_length = e1000_null_ops_generic;
- phy->ops.get_info = e1000_null_ops_generic;
- phy->ops.set_page = e1000_null_set_page;
- phy->ops.read_reg = e1000_null_read_reg;
- phy->ops.read_reg_locked = e1000_null_read_reg;
- phy->ops.read_reg_page = e1000_null_read_reg;
- phy->ops.release = e1000_null_phy_generic;
- phy->ops.reset = e1000_null_ops_generic;
- phy->ops.set_d0_lplu_state = e1000_null_lplu_state;
- phy->ops.set_d3_lplu_state = e1000_null_lplu_state;
- phy->ops.write_reg = e1000_null_write_reg;
- phy->ops.write_reg_locked = e1000_null_write_reg;
- phy->ops.write_reg_page = e1000_null_write_reg;
- phy->ops.power_up = e1000_null_phy_generic;
- phy->ops.power_down = e1000_null_phy_generic;
- phy->ops.read_i2c_byte = e1000_read_i2c_byte_null;
- phy->ops.write_i2c_byte = e1000_write_i2c_byte_null;
-}
-
-/**
- * e1000_null_set_page - No-op function, return 0
- * @hw: pointer to the HW structure
- **/
-s32 e1000_null_set_page(struct e1000_hw E1000_UNUSEDARG *hw,
- u16 E1000_UNUSEDARG data)
-{
- DEBUGFUNC("e1000_null_set_page");
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_null_read_reg - No-op function, return 0
- * @hw: pointer to the HW structure
- **/
-s32 e1000_null_read_reg(struct e1000_hw E1000_UNUSEDARG *hw,
- u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG *data)
-{
- DEBUGFUNC("e1000_null_read_reg");
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_null_phy_generic - No-op function, return void
- * @hw: pointer to the HW structure
- **/
-void e1000_null_phy_generic(struct e1000_hw E1000_UNUSEDARG *hw)
-{
- DEBUGFUNC("e1000_null_phy_generic");
- return;
-}
-
-/**
- * e1000_null_lplu_state - No-op function, return 0
- * @hw: pointer to the HW structure
- **/
-s32 e1000_null_lplu_state(struct e1000_hw E1000_UNUSEDARG *hw,
- bool E1000_UNUSEDARG active)
-{
- DEBUGFUNC("e1000_null_lplu_state");
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_null_write_reg - No-op function, return 0
- * @hw: pointer to the HW structure
- **/
-s32 e1000_null_write_reg(struct e1000_hw E1000_UNUSEDARG *hw,
- u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG data)
-{
- DEBUGFUNC("e1000_null_write_reg");
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_read_i2c_byte_null - No-op function, return 0
- * @hw: pointer to hardware structure
- * @byte_offset: byte offset to write
- * @dev_addr: device address
- * @data: data value read
- *
- **/
-s32 e1000_read_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw,
- u8 E1000_UNUSEDARG byte_offset,
- u8 E1000_UNUSEDARG dev_addr,
- u8 E1000_UNUSEDARG *data)
-{
- DEBUGFUNC("e1000_read_i2c_byte_null");
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_write_i2c_byte_null - No-op function, return 0
- * @hw: pointer to hardware structure
- * @byte_offset: byte offset to write
- * @dev_addr: device address
- * @data: data value to write
- *
- **/
-s32 e1000_write_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw,
- u8 E1000_UNUSEDARG byte_offset,
- u8 E1000_UNUSEDARG dev_addr,
- u8 E1000_UNUSEDARG data)
-{
- DEBUGFUNC("e1000_write_i2c_byte_null");
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_check_reset_block_generic - Check if PHY reset is blocked
- * @hw: pointer to the HW structure
- *
- * Read the PHY management control register and check whether a PHY reset
- * is blocked. If a reset is not blocked return E1000_SUCCESS, otherwise
- * return E1000_BLK_PHY_RESET (12).
- **/
-s32 e1000_check_reset_block_generic(struct e1000_hw *hw)
-{
- u32 manc;
-
- DEBUGFUNC("e1000_check_reset_block");
-
- manc = E1000_READ_REG(hw, E1000_MANC);
-
- return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
- E1000_BLK_PHY_RESET : E1000_SUCCESS;
-}
-
-/**
- * e1000_get_phy_id - Retrieve the PHY ID and revision
- * @hw: pointer to the HW structure
- *
- * Reads the PHY registers and stores the PHY ID and possibly the PHY
- * revision in the hardware structure.
- **/
-s32 e1000_get_phy_id(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = E1000_SUCCESS;
- u16 phy_id;
-
- DEBUGFUNC("e1000_get_phy_id");
-
- if (!phy->ops.read_reg)
- return E1000_SUCCESS;
-
- ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
- if (ret_val)
- return ret_val;
-
- phy->id = (u32)(phy_id << 16);
- usec_delay(20);
- ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
- if (ret_val)
- return ret_val;
-
- phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
- phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
-
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_phy_reset_dsp_generic - Reset PHY DSP
- * @hw: pointer to the HW structure
- *
- * Reset the digital signal processor.
- **/
-s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw)
-{
- s32 ret_val;
-
- DEBUGFUNC("e1000_phy_reset_dsp_generic");
-
- if (!hw->phy.ops.write_reg)
- return E1000_SUCCESS;
-
- ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
- if (ret_val)
- return ret_val;
-
- return hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0);
-}
-
-/**
- * e1000_read_phy_reg_mdic - Read MDI control register
- * @hw: pointer to the HW structure
- * @offset: register offset to be read
- * @data: pointer to the read data
- *
- * Reads the MDI control register in the PHY at offset and stores the
- * information read to data.
- **/
-s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
-{
- struct e1000_phy_info *phy = &hw->phy;
- u32 i, mdic = 0;
-
- DEBUGFUNC("e1000_read_phy_reg_mdic");
-
- if (offset > MAX_PHY_REG_ADDRESS) {
- DEBUGOUT1("PHY Address %d is out of range\n", offset);
- return -E1000_ERR_PARAM;
- }
-
- /* Set up Op-code, Phy Address, and register offset in the MDI
- * Control register. The MAC will take care of interfacing with the
- * PHY to retrieve the desired data.
- */
- mdic = ((offset << E1000_MDIC_REG_SHIFT) |
- (phy->addr << E1000_MDIC_PHY_SHIFT) |
- (E1000_MDIC_OP_READ));
-
- E1000_WRITE_REG(hw, E1000_MDIC, mdic);
-
- /* Poll the ready bit to see if the MDI read completed
- * Increasing the time out as testing showed failures with
- * the lower time out
- */
- for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
- usec_delay_irq(50);
- mdic = E1000_READ_REG(hw, E1000_MDIC);
- if (mdic & E1000_MDIC_READY)
- break;
- }
- if (!(mdic & E1000_MDIC_READY)) {
- DEBUGOUT("MDI Read did not complete\n");
- return -E1000_ERR_PHY;
- }
- if (mdic & E1000_MDIC_ERROR) {
- DEBUGOUT("MDI Error\n");
- return -E1000_ERR_PHY;
- }
- if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
- DEBUGOUT2("MDI Read offset error - requested %d, returned %d\n",
- offset,
- (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
- return -E1000_ERR_PHY;
- }
- *data = (u16) mdic;
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_write_phy_reg_mdic - Write MDI control register
- * @hw: pointer to the HW structure
- * @offset: register offset to write to
- * @data: data to write to register at offset
- *
- * Writes data to MDI control register in the PHY at offset.
- **/
-s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
-{
- struct e1000_phy_info *phy = &hw->phy;
- u32 i, mdic = 0;
-
- DEBUGFUNC("e1000_write_phy_reg_mdic");
-
- if (offset > MAX_PHY_REG_ADDRESS) {
- DEBUGOUT1("PHY Address %d is out of range\n", offset);
- return -E1000_ERR_PARAM;
- }
-
- /* Set up Op-code, Phy Address, and register offset in the MDI
- * Control register. The MAC will take care of interfacing with the
- * PHY to retrieve the desired data.
- */
- mdic = (((u32)data) |
- (offset << E1000_MDIC_REG_SHIFT) |
- (phy->addr << E1000_MDIC_PHY_SHIFT) |
- (E1000_MDIC_OP_WRITE));
-
- E1000_WRITE_REG(hw, E1000_MDIC, mdic);
-
- /* Poll the ready bit to see if the MDI read completed
- * Increasing the time out as testing showed failures with
- * the lower time out
- */
- for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
- usec_delay_irq(50);
- mdic = E1000_READ_REG(hw, E1000_MDIC);
- if (mdic & E1000_MDIC_READY)
- break;
- }
- if (!(mdic & E1000_MDIC_READY)) {
- DEBUGOUT("MDI Write did not complete\n");
- return -E1000_ERR_PHY;
- }
- if (mdic & E1000_MDIC_ERROR) {
- DEBUGOUT("MDI Error\n");
- return -E1000_ERR_PHY;
- }
- if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
- DEBUGOUT2("MDI Write offset error - requested %d, returned %d\n",
- offset,
- (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
- return -E1000_ERR_PHY;
- }
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_read_phy_reg_i2c - Read PHY register using i2c
- * @hw: pointer to the HW structure
- * @offset: register offset to be read
- * @data: pointer to the read data
- *
- * Reads the PHY register at offset using the i2c interface and stores the
- * retrieved information in data.
- **/
-s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data)
-{
- struct e1000_phy_info *phy = &hw->phy;
- u32 i, i2ccmd = 0;
-
- DEBUGFUNC("e1000_read_phy_reg_i2c");
-
- /* Set up Op-code, Phy Address, and register address in the I2CCMD
- * register. The MAC will take care of interfacing with the
- * PHY to retrieve the desired data.
- */
- i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
- (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
- (E1000_I2CCMD_OPCODE_READ));
-
- E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
-
- /* Poll the ready bit to see if the I2C read completed */
- for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
- usec_delay(50);
- i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
- if (i2ccmd & E1000_I2CCMD_READY)
- break;
- }
- if (!(i2ccmd & E1000_I2CCMD_READY)) {
- DEBUGOUT("I2CCMD Read did not complete\n");
- return -E1000_ERR_PHY;
- }
- if (i2ccmd & E1000_I2CCMD_ERROR) {
- DEBUGOUT("I2CCMD Error bit set\n");
- return -E1000_ERR_PHY;
- }
-
- /* Need to byte-swap the 16-bit value. */
- *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_write_phy_reg_i2c - Write PHY register using i2c
- * @hw: pointer to the HW structure
- * @offset: register offset to write to
- * @data: data to write at register offset
- *
- * Writes the data to PHY register at the offset using the i2c interface.
- **/
-s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
-{
- struct e1000_phy_info *phy = &hw->phy;
- u32 i, i2ccmd = 0;
- u16 phy_data_swapped;
-
- DEBUGFUNC("e1000_write_phy_reg_i2c");
-
- /* Prevent overwritting SFP I2C EEPROM which is at A0 address.*/
- if ((hw->phy.addr == 0) || (hw->phy.addr > 7)) {
- DEBUGOUT1("PHY I2C Address %d is out of range.\n",
- hw->phy.addr);
- return -E1000_ERR_CONFIG;
- }
-
- /* Swap the data bytes for the I2C interface */
- phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00);
-
- /* Set up Op-code, Phy Address, and register address in the I2CCMD
- * register. The MAC will take care of interfacing with the
- * PHY to retrieve the desired data.
- */
- i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
- (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
- E1000_I2CCMD_OPCODE_WRITE |
- phy_data_swapped);
-
- E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
-
- /* Poll the ready bit to see if the I2C read completed */
- for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
- usec_delay(50);
- i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
- if (i2ccmd & E1000_I2CCMD_READY)
- break;
- }
- if (!(i2ccmd & E1000_I2CCMD_READY)) {
- DEBUGOUT("I2CCMD Write did not complete\n");
- return -E1000_ERR_PHY;
- }
- if (i2ccmd & E1000_I2CCMD_ERROR) {
- DEBUGOUT("I2CCMD Error bit set\n");
- return -E1000_ERR_PHY;
- }
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_read_sfp_data_byte - Reads SFP module data.
- * @hw: pointer to the HW structure
- * @offset: byte location offset to be read
- * @data: read data buffer pointer
- *
- * Reads one byte from SFP module data stored
- * in SFP resided EEPROM memory or SFP diagnostic area.
- * Function should be called with
- * E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access
- * E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters
- * access
- **/
-s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data)
-{
- u32 i = 0;
- u32 i2ccmd = 0;
- u32 data_local = 0;
-
- DEBUGFUNC("e1000_read_sfp_data_byte");
-
- if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) {
- DEBUGOUT("I2CCMD command address exceeds upper limit\n");
- return -E1000_ERR_PHY;
- }
-
- /* Set up Op-code, EEPROM Address,in the I2CCMD
- * register. The MAC will take care of interfacing with the
- * EEPROM to retrieve the desired data.
- */
- i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
- E1000_I2CCMD_OPCODE_READ);
-
- E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
-
- /* Poll the ready bit to see if the I2C read completed */
- for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
- usec_delay(50);
- data_local = E1000_READ_REG(hw, E1000_I2CCMD);
- if (data_local & E1000_I2CCMD_READY)
- break;
- }
- if (!(data_local & E1000_I2CCMD_READY)) {
- DEBUGOUT("I2CCMD Read did not complete\n");
- return -E1000_ERR_PHY;
- }
- if (data_local & E1000_I2CCMD_ERROR) {
- DEBUGOUT("I2CCMD Error bit set\n");
- return -E1000_ERR_PHY;
- }
- *data = (u8) data_local & 0xFF;
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_write_sfp_data_byte - Writes SFP module data.
- * @hw: pointer to the HW structure
- * @offset: byte location offset to write to
- * @data: data to write
- *
- * Writes one byte to SFP module data stored
- * in SFP resided EEPROM memory or SFP diagnostic area.
- * Function should be called with
- * E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access
- * E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters
- * access
- **/
-s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data)
-{
- u32 i = 0;
- u32 i2ccmd = 0;
- u32 data_local = 0;
-
- DEBUGFUNC("e1000_write_sfp_data_byte");
-
- if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) {
- DEBUGOUT("I2CCMD command address exceeds upper limit\n");
- return -E1000_ERR_PHY;
- }
- /* The programming interface is 16 bits wide
- * so we need to read the whole word first
- * then update appropriate byte lane and write
- * the updated word back.
- */
- /* Set up Op-code, EEPROM Address,in the I2CCMD
- * register. The MAC will take care of interfacing
- * with an EEPROM to write the data given.
- */
- i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
- E1000_I2CCMD_OPCODE_READ);
- /* Set a command to read single word */
- E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
- for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
- usec_delay(50);
- /* Poll the ready bit to see if lastly
- * launched I2C operation completed
- */
- i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
- if (i2ccmd & E1000_I2CCMD_READY) {
- /* Check if this is READ or WRITE phase */
- if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) ==
- E1000_I2CCMD_OPCODE_READ) {
- /* Write the selected byte
- * lane and update whole word
- */
- data_local = i2ccmd & 0xFF00;
- data_local |= data;
- i2ccmd = ((offset <<
- E1000_I2CCMD_REG_ADDR_SHIFT) |
- E1000_I2CCMD_OPCODE_WRITE | data_local);
- E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
- } else {
- break;
- }
- }
- }
- if (!(i2ccmd & E1000_I2CCMD_READY)) {
- DEBUGOUT("I2CCMD Write did not complete\n");
- return -E1000_ERR_PHY;
- }
- if (i2ccmd & E1000_I2CCMD_ERROR) {
- DEBUGOUT("I2CCMD Error bit set\n");
- return -E1000_ERR_PHY;
- }
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_read_phy_reg_m88 - Read m88 PHY register
- * @hw: pointer to the HW structure
- * @offset: register offset to be read
- * @data: pointer to the read data
- *
- * Acquires semaphore, if necessary, then reads the PHY register at offset
- * and storing the retrieved information in data. Release any acquired
- * semaphores before exiting.
- **/
-s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data)
-{
- s32 ret_val;
-
- DEBUGFUNC("e1000_read_phy_reg_m88");
-
- if (!hw->phy.ops.acquire)
- return E1000_SUCCESS;
-
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- return ret_val;
-
- ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
- data);
-
- hw->phy.ops.release(hw);
-
- return ret_val;
-}
-
-/**
- * e1000_write_phy_reg_m88 - Write m88 PHY register
- * @hw: pointer to the HW structure
- * @offset: register offset to write to
- * @data: data to write at register offset
- *
- * Acquires semaphore, if necessary, then writes the data to PHY register
- * at the offset. Release any acquired semaphores before exiting.
- **/
-s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
-{
- s32 ret_val;
-
- DEBUGFUNC("e1000_write_phy_reg_m88");
-
- if (!hw->phy.ops.acquire)
- return E1000_SUCCESS;
-
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- return ret_val;
-
- ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
- data);
-
- hw->phy.ops.release(hw);
-
- return ret_val;
-}
-
-/**
- * e1000_set_page_igp - Set page as on IGP-like PHY(s)
- * @hw: pointer to the HW structure
- * @page: page to set (shifted left when necessary)
- *
- * Sets PHY page required for PHY register access. Assumes semaphore is
- * already acquired. Note, this function sets phy.addr to 1 so the caller
- * must set it appropriately (if necessary) after this function returns.
- **/
-s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page)
-{
- DEBUGFUNC("e1000_set_page_igp");
-
- DEBUGOUT1("Setting page 0x%x\n", page);
-
- hw->phy.addr = 1;
-
- return e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page);
-}
-
-/**
- * __e1000_read_phy_reg_igp - Read igp PHY register
- * @hw: pointer to the HW structure
- * @offset: register offset to be read
- * @data: pointer to the read data
- * @locked: semaphore has already been acquired or not
- *
- * Acquires semaphore, if necessary, then reads the PHY register at offset
- * and stores the retrieved information in data. Release any acquired
- * semaphores before exiting.
- **/
-static s32 __e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
- bool locked)
-{
- s32 ret_val = E1000_SUCCESS;
-
- DEBUGFUNC("__e1000_read_phy_reg_igp");
-
- if (!locked) {
- if (!hw->phy.ops.acquire)
- return E1000_SUCCESS;
-
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- return ret_val;
- }
-
- if (offset > MAX_PHY_MULTI_PAGE_REG)
- ret_val = e1000_write_phy_reg_mdic(hw,
- IGP01E1000_PHY_PAGE_SELECT,
- (u16)offset);
- if (!ret_val)
- ret_val = e1000_read_phy_reg_mdic(hw,
- MAX_PHY_REG_ADDRESS & offset,
- data);
- if (!locked)
- hw->phy.ops.release(hw);
-
- return ret_val;
-}
-
-/**
- * e1000_read_phy_reg_igp - Read igp PHY register
- * @hw: pointer to the HW structure
- * @offset: register offset to be read
- * @data: pointer to the read data
- *
- * Acquires semaphore then reads the PHY register at offset and stores the
- * retrieved information in data.
- * Release the acquired semaphore before exiting.
- **/
-s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
-{
- return __e1000_read_phy_reg_igp(hw, offset, data, false);
-}
-
-/**
- * e1000_read_phy_reg_igp_locked - Read igp PHY register
- * @hw: pointer to the HW structure
- * @offset: register offset to be read
- * @data: pointer to the read data
- *
- * Reads the PHY register at offset and stores the retrieved information
- * in data. Assumes semaphore already acquired.
- **/
-s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
-{
- return __e1000_read_phy_reg_igp(hw, offset, data, true);
-}
-
-/**
- * e1000_write_phy_reg_igp - Write igp PHY register
- * @hw: pointer to the HW structure
- * @offset: register offset to write to
- * @data: data to write at register offset
- * @locked: semaphore has already been acquired or not
- *
- * Acquires semaphore, if necessary, then writes the data to PHY register
- * at the offset. Release any acquired semaphores before exiting.
- **/
-static s32 __e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
- bool locked)
-{
- s32 ret_val = E1000_SUCCESS;
-
- DEBUGFUNC("e1000_write_phy_reg_igp");
-
- if (!locked) {
- if (!hw->phy.ops.acquire)
- return E1000_SUCCESS;
-
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- return ret_val;
- }
-
- if (offset > MAX_PHY_MULTI_PAGE_REG)
- ret_val = e1000_write_phy_reg_mdic(hw,
- IGP01E1000_PHY_PAGE_SELECT,
- (u16)offset);
- if (!ret_val)
- ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS &
- offset,
- data);
- if (!locked)
- hw->phy.ops.release(hw);
-
- return ret_val;
-}
-
-/**
- * e1000_write_phy_reg_igp - Write igp PHY register
- * @hw: pointer to the HW structure
- * @offset: register offset to write to
- * @data: data to write at register offset
- *
- * Acquires semaphore then writes the data to PHY register
- * at the offset. Release any acquired semaphores before exiting.
- **/
-s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
-{
- return __e1000_write_phy_reg_igp(hw, offset, data, false);
-}
-
-/**
- * e1000_write_phy_reg_igp_locked - Write igp PHY register
- * @hw: pointer to the HW structure
- * @offset: register offset to write to
- * @data: data to write at register offset
- *
- * Writes the data to PHY register at the offset.
- * Assumes semaphore already acquired.
- **/
-s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
-{
- return __e1000_write_phy_reg_igp(hw, offset, data, true);
-}
-
-/**
- * __e1000_read_kmrn_reg - Read kumeran register
- * @hw: pointer to the HW structure
- * @offset: register offset to be read
- * @data: pointer to the read data
- * @locked: semaphore has already been acquired or not
- *
- * Acquires semaphore, if necessary. Then reads the PHY register at offset
- * using the kumeran interface. The information retrieved is stored in data.
- * Release any acquired semaphores before exiting.
- **/
-static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
- bool locked)
-{
- u32 kmrnctrlsta;
-
- DEBUGFUNC("__e1000_read_kmrn_reg");
-
- if (!locked) {
- s32 ret_val = E1000_SUCCESS;
-
- if (!hw->phy.ops.acquire)
- return E1000_SUCCESS;
-
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- return ret_val;
- }
-
- kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
- E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
- E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
- E1000_WRITE_FLUSH(hw);
-
- usec_delay(2);
-
- kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA);
- *data = (u16)kmrnctrlsta;
-
- if (!locked)
- hw->phy.ops.release(hw);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_read_kmrn_reg_generic - Read kumeran register
- * @hw: pointer to the HW structure
- * @offset: register offset to be read
- * @data: pointer to the read data
- *
- * Acquires semaphore then reads the PHY register at offset using the
- * kumeran interface. The information retrieved is stored in data.
- * Release the acquired semaphore before exiting.
- **/
-s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data)
-{
- return __e1000_read_kmrn_reg(hw, offset, data, false);
-}
-
-/**
- * e1000_read_kmrn_reg_locked - Read kumeran register
- * @hw: pointer to the HW structure
- * @offset: register offset to be read
- * @data: pointer to the read data
- *
- * Reads the PHY register at offset using the kumeran interface. The
- * information retrieved is stored in data.
- * Assumes semaphore already acquired.
- **/
-s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
-{
- return __e1000_read_kmrn_reg(hw, offset, data, true);
-}
-
-/**
- * __e1000_write_kmrn_reg - Write kumeran register
- * @hw: pointer to the HW structure
- * @offset: register offset to write to
- * @data: data to write at register offset
- * @locked: semaphore has already been acquired or not
- *
- * Acquires semaphore, if necessary. Then write the data to PHY register
- * at the offset using the kumeran interface. Release any acquired semaphores
- * before exiting.
- **/
-static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
- bool locked)
-{
- u32 kmrnctrlsta;
-
- DEBUGFUNC("e1000_write_kmrn_reg_generic");
-
- if (!locked) {
- s32 ret_val = E1000_SUCCESS;
-
- if (!hw->phy.ops.acquire)
- return E1000_SUCCESS;
-
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- return ret_val;
- }
-
- kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
- E1000_KMRNCTRLSTA_OFFSET) | data;
- E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
- E1000_WRITE_FLUSH(hw);
-
- usec_delay(2);
-
- if (!locked)
- hw->phy.ops.release(hw);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_write_kmrn_reg_generic - Write kumeran register
- * @hw: pointer to the HW structure
- * @offset: register offset to write to
- * @data: data to write at register offset
- *
- * Acquires semaphore then writes the data to the PHY register at the offset
- * using the kumeran interface. Release the acquired semaphore before exiting.
- **/
-s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data)
-{
- return __e1000_write_kmrn_reg(hw, offset, data, false);
-}
-
-/**
- * e1000_write_kmrn_reg_locked - Write kumeran register
- * @hw: pointer to the HW structure
- * @offset: register offset to write to
- * @data: data to write at register offset
- *
- * Write the data to PHY register at the offset using the kumeran interface.
- * Assumes semaphore already acquired.
- **/
-s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
-{
- return __e1000_write_kmrn_reg(hw, offset, data, true);
-}
-
-/**
- * e1000_set_master_slave_mode - Setup PHY for Master/slave mode
- * @hw: pointer to the HW structure
- *
- * Sets up Master/slave mode
- **/
-static s32 e1000_set_master_slave_mode(struct e1000_hw *hw)
-{
- s32 ret_val;
- u16 phy_data;
-
- /* Resolve Master/Slave mode */
- ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data);
- if (ret_val)
- return ret_val;
-
- /* load defaults for future use */
- hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ?
- ((phy_data & CR_1000T_MS_VALUE) ?
- e1000_ms_force_master :
- e1000_ms_force_slave) : e1000_ms_auto;
-
- switch (hw->phy.ms_type) {
- case e1000_ms_force_master:
- phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
- break;
- case e1000_ms_force_slave:
- phy_data |= CR_1000T_MS_ENABLE;
- phy_data &= ~(CR_1000T_MS_VALUE);
- break;
- case e1000_ms_auto:
- phy_data &= ~CR_1000T_MS_ENABLE;
- /* fall-through */
- default:
- break;
- }
-
- return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data);
-}
-
-/**
- * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link
- * @hw: pointer to the HW structure
- *
- * Sets up Carrier-sense on Transmit and downshift values.
- **/
-s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
-{
- s32 ret_val;
- u16 phy_data;
-
- DEBUGFUNC("e1000_copper_link_setup_82577");
-
- if (hw->phy.reset_disable)
- return E1000_SUCCESS;
-
- if (hw->phy.type == e1000_phy_82580) {
- ret_val = hw->phy.ops.reset(hw);
- if (ret_val) {
- DEBUGOUT("Error resetting the PHY.\n");
- return ret_val;
- }
- }
-
- /* Enable CRS on Tx. This must be set for half-duplex operation. */
- ret_val = hw->phy.ops.read_reg(hw, I82577_CFG_REG, &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_data |= I82577_CFG_ASSERT_CRS_ON_TX;
-
- /* Enable downshift */
- phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
-
- ret_val = hw->phy.ops.write_reg(hw, I82577_CFG_REG, phy_data);
- if (ret_val)
- return ret_val;
-
- /* Set MDI/MDIX mode */
- ret_val = hw->phy.ops.read_reg(hw, I82577_PHY_CTRL_2, &phy_data);
- if (ret_val)
- return ret_val;
- phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK;
- /* Options:
- * 0 - Auto (default)
- * 1 - MDI mode
- * 2 - MDI-X mode
- */
- switch (hw->phy.mdix) {
- case 1:
- break;
- case 2:
- phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX;
- break;
- case 0:
- default:
- phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX;
- break;
- }
- ret_val = hw->phy.ops.write_reg(hw, I82577_PHY_CTRL_2, phy_data);
- if (ret_val)
- return ret_val;
-
- return e1000_set_master_slave_mode(hw);
-}
-
-/**
- * e1000_copper_link_setup_m88 - Setup m88 PHY's for copper link
- * @hw: pointer to the HW structure
- *
- * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock
- * and downshift values are set also.
- **/
-s32 e1000_copper_link_setup_m88(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
- u16 phy_data;
-
- DEBUGFUNC("e1000_copper_link_setup_m88");
-
- if (phy->reset_disable)
- return E1000_SUCCESS;
-
- /* Enable CRS on Tx. This must be set for half-duplex operation. */
- ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
-
- /* Options:
- * MDI/MDI-X = 0 (default)
- * 0 - Auto for all speeds
- * 1 - MDI mode
- * 2 - MDI-X mode
- * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
- */
- phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
-
- switch (phy->mdix) {
- case 1:
- phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
- break;
- case 2:
- phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
- break;
- case 3:
- phy_data |= M88E1000_PSCR_AUTO_X_1000T;
- break;
- case 0:
- default:
- phy_data |= M88E1000_PSCR_AUTO_X_MODE;
- break;
- }
-
- /* Options:
- * disable_polarity_correction = 0 (default)
- * Automatic Correction for Reversed Cable Polarity
- * 0 - Disabled
- * 1 - Enabled
- */
- phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
- if (phy->disable_polarity_correction)
- phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
-
- ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
- if (ret_val)
- return ret_val;
-
- if (phy->revision < E1000_REVISION_4) {
- /* Force TX_CLK in the Extended PHY Specific Control Register
- * to 25MHz clock.
- */
- ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
- &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_data |= M88E1000_EPSCR_TX_CLK_25;
-
- if ((phy->revision == E1000_REVISION_2) &&
- (phy->id == M88E1111_I_PHY_ID)) {
- /* 82573L PHY - set the downshift counter to 5x. */
- phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
- phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
- } else {
- /* Configure Master and Slave downshift values */
- phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
- M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
- phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
- M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
- }
- ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
- phy_data);
- if (ret_val)
- return ret_val;
- }
-
- /* Commit the changes. */
- ret_val = phy->ops.commit(hw);
- if (ret_val) {
- DEBUGOUT("Error committing the PHY changes\n");
- return ret_val;
- }
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link
- * @hw: pointer to the HW structure
- *
- * Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's.
- * Also enables and sets the downshift parameters.
- **/
-s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
- u16 phy_data;
-
- DEBUGFUNC("e1000_copper_link_setup_m88_gen2");
-
- if (phy->reset_disable)
- return E1000_SUCCESS;
-
- /* Enable CRS on Tx. This must be set for half-duplex operation. */
- ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
- if (ret_val)
- return ret_val;
-
- /* Options:
- * MDI/MDI-X = 0 (default)
- * 0 - Auto for all speeds
- * 1 - MDI mode
- * 2 - MDI-X mode
- * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
- */
- phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
-
- switch (phy->mdix) {
- case 1:
- phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
- break;
- case 2:
- phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
- break;
- case 3:
- /* M88E1112 does not support this mode) */
- if (phy->id != M88E1112_E_PHY_ID) {
- phy_data |= M88E1000_PSCR_AUTO_X_1000T;
- break;
- }
- case 0:
- default:
- phy_data |= M88E1000_PSCR_AUTO_X_MODE;
- break;
- }
-
- /* Options:
- * disable_polarity_correction = 0 (default)
- * Automatic Correction for Reversed Cable Polarity
- * 0 - Disabled
- * 1 - Enabled
- */
- phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
- if (phy->disable_polarity_correction)
- phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
-
- /* Enable downshift and setting it to X6 */
- if (phy->id == M88E1543_E_PHY_ID) {
- phy_data &= ~I347AT4_PSCR_DOWNSHIFT_ENABLE;
- ret_val =
- phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
- if (ret_val)
- return ret_val;
-
- ret_val = phy->ops.commit(hw);
- if (ret_val) {
- DEBUGOUT("Error committing the PHY changes\n");
- return ret_val;
- }
- }
-
- phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK;
- phy_data |= I347AT4_PSCR_DOWNSHIFT_6X;
- phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE;
-
- ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
- if (ret_val)
- return ret_val;
-
- /* Commit the changes. */
- ret_val = phy->ops.commit(hw);
- if (ret_val) {
- DEBUGOUT("Error committing the PHY changes\n");
- return ret_val;
- }
-
- ret_val = e1000_set_master_slave_mode(hw);
- if (ret_val)
- return ret_val;
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_copper_link_setup_igp - Setup igp PHY's for copper link
- * @hw: pointer to the HW structure
- *
- * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for
- * igp PHY's.
- **/
-s32 e1000_copper_link_setup_igp(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
- u16 data;
-
- DEBUGFUNC("e1000_copper_link_setup_igp");
-
- if (phy->reset_disable)
- return E1000_SUCCESS;
-
- ret_val = hw->phy.ops.reset(hw);
- if (ret_val) {
- DEBUGOUT("Error resetting the PHY.\n");
- return ret_val;
- }
-
- /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid
- * timeout issues when LFS is enabled.
- */
- msec_delay(100);
-
- /* disable lplu d0 during driver init */
- if (hw->phy.ops.set_d0_lplu_state) {
- ret_val = hw->phy.ops.set_d0_lplu_state(hw, false);
- if (ret_val) {
- DEBUGOUT("Error Disabling LPLU D0\n");
- return ret_val;
- }
- }
- /* Configure mdi-mdix settings */
- ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data);
- if (ret_val)
- return ret_val;
-
- data &= ~IGP01E1000_PSCR_AUTO_MDIX;
-
- switch (phy->mdix) {
- case 1:
- data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
- break;
- case 2:
- data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
- break;
- case 0:
- default:
- data |= IGP01E1000_PSCR_AUTO_MDIX;
- break;
- }
- ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data);
- if (ret_val)
- return ret_val;
-
- /* set auto-master slave resolution settings */
- if (hw->mac.autoneg) {
- /* when autonegotiation advertisement is only 1000Mbps then we
- * should disable SmartSpeed and enable Auto MasterSlave
- * resolution as hardware default.
- */
- if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
- /* Disable SmartSpeed */
- ret_val = phy->ops.read_reg(hw,
- IGP01E1000_PHY_PORT_CONFIG,
- &data);
- if (ret_val)
- return ret_val;
-
- data &= ~IGP01E1000_PSCFR_SMART_SPEED;
- ret_val = phy->ops.write_reg(hw,
- IGP01E1000_PHY_PORT_CONFIG,
- data);
- if (ret_val)
- return ret_val;
-
- /* Set auto Master/Slave resolution process */
- ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
- if (ret_val)
- return ret_val;
-
- data &= ~CR_1000T_MS_ENABLE;
- ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
- if (ret_val)
- return ret_val;
- }
-
- ret_val = e1000_set_master_slave_mode(hw);
- }
-
- return ret_val;
-}
-
-/**
- * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation
- * @hw: pointer to the HW structure
- *
- * Reads the MII auto-neg advertisement register and/or the 1000T control
- * register and if the PHY is already setup for auto-negotiation, then
- * return successful. Otherwise, setup advertisement and flow control to
- * the appropriate values for the wanted auto-negotiation.
- **/
-static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
- u16 mii_autoneg_adv_reg;
- u16 mii_1000t_ctrl_reg = 0;
-
- DEBUGFUNC("e1000_phy_setup_autoneg");
-
- phy->autoneg_advertised &= phy->autoneg_mask;
-
- /* Read the MII Auto-Neg Advertisement Register (Address 4). */
- ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
- if (ret_val)
- return ret_val;
-
- if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
- /* Read the MII 1000Base-T Control Register (Address 9). */
- ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL,
- &mii_1000t_ctrl_reg);
- if (ret_val)
- return ret_val;
- }
-
- /* Need to parse both autoneg_advertised and fc and set up
- * the appropriate PHY registers. First we will parse for
- * autoneg_advertised software override. Since we can advertise
- * a plethora of combinations, we need to check each bit
- * individually.
- */
-
- /* First we clear all the 10/100 mb speed bits in the Auto-Neg
- * Advertisement Register (Address 4) and the 1000 mb speed bits in
- * the 1000Base-T Control Register (Address 9).
- */
- mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
- NWAY_AR_100TX_HD_CAPS |
- NWAY_AR_10T_FD_CAPS |
- NWAY_AR_10T_HD_CAPS);
- mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
-
- DEBUGOUT1("autoneg_advertised %x\n", phy->autoneg_advertised);
-
- /* Do we want to advertise 10 Mb Half Duplex? */
- if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
- DEBUGOUT("Advertise 10mb Half duplex\n");
- mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
- }
-
- /* Do we want to advertise 10 Mb Full Duplex? */
- if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
- DEBUGOUT("Advertise 10mb Full duplex\n");
- mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
- }
-
- /* Do we want to advertise 100 Mb Half Duplex? */
- if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
- DEBUGOUT("Advertise 100mb Half duplex\n");
- mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
- }
-
- /* Do we want to advertise 100 Mb Full Duplex? */
- if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
- DEBUGOUT("Advertise 100mb Full duplex\n");
- mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
- }
-
- /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
- if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
- DEBUGOUT("Advertise 1000mb Half duplex request denied!\n");
-
- /* Do we want to advertise 1000 Mb Full Duplex? */
- if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
- DEBUGOUT("Advertise 1000mb Full duplex\n");
- mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
- }
-
- /* Check for a software override of the flow control settings, and
- * setup the PHY advertisement registers accordingly. If
- * auto-negotiation is enabled, then software will have to set the
- * "PAUSE" bits to the correct value in the Auto-Negotiation
- * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
- * negotiation.
- *
- * The possible values of the "fc" parameter are:
- * 0: Flow control is completely disabled
- * 1: Rx flow control is enabled (we can receive pause frames
- * but not send pause frames).
- * 2: Tx flow control is enabled (we can send pause frames
- * but we do not support receiving pause frames).
- * 3: Both Rx and Tx flow control (symmetric) are enabled.
- * other: No software override. The flow control configuration
- * in the EEPROM is used.
- */
- switch (hw->fc.current_mode) {
- case e1000_fc_none:
- /* Flow control (Rx & Tx) is completely disabled by a
- * software over-ride.
- */
- mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
- break;
- case e1000_fc_rx_pause:
- /* Rx Flow control is enabled, and Tx Flow control is
- * disabled, by a software over-ride.
- *
- * Since there really isn't a way to advertise that we are
- * capable of Rx Pause ONLY, we will advertise that we
- * support both symmetric and asymmetric Rx PAUSE. Later
- * (in e1000_config_fc_after_link_up) we will disable the
- * hw's ability to send PAUSE frames.
- */
- mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
- break;
- case e1000_fc_tx_pause:
- /* Tx Flow control is enabled, and Rx Flow control is
- * disabled, by a software over-ride.
- */
- mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
- mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
- break;
- case e1000_fc_full:
- /* Flow control (both Rx and Tx) is enabled by a software
- * over-ride.
- */
- mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
- break;
- default:
- DEBUGOUT("Flow control param set incorrectly\n");
- return -E1000_ERR_CONFIG;
- }
-
- ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
- if (ret_val)
- return ret_val;
-
- DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
-
- if (phy->autoneg_mask & ADVERTISE_1000_FULL)
- ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL,
- mii_1000t_ctrl_reg);
-
- return ret_val;
-}
-
-/**
- * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link
- * @hw: pointer to the HW structure
- *
- * Performs initial bounds checking on autoneg advertisement parameter, then
- * configure to advertise the full capability. Setup the PHY to autoneg
- * and restart the negotiation process between the link partner. If
- * autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
- **/
-static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
- u16 phy_ctrl;
-
- DEBUGFUNC("e1000_copper_link_autoneg");
-
- /* Perform some bounds checking on the autoneg advertisement
- * parameter.
- */
- phy->autoneg_advertised &= phy->autoneg_mask;
-
- /* If autoneg_advertised is zero, we assume it was not defaulted
- * by the calling code so we set to advertise full capability.
- */
- if (!phy->autoneg_advertised)
- phy->autoneg_advertised = phy->autoneg_mask;
-
- DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
- ret_val = e1000_phy_setup_autoneg(hw);
- if (ret_val) {
- DEBUGOUT("Error Setting up Auto-Negotiation\n");
- return ret_val;
- }
- DEBUGOUT("Restarting Auto-Neg\n");
-
- /* Restart auto-negotiation by setting the Auto Neg Enable bit and
- * the Auto Neg Restart bit in the PHY control register.
- */
- ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
- if (ret_val)
- return ret_val;
-
- phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
- ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
- if (ret_val)
- return ret_val;
-
- /* Does the user want to wait for Auto-Neg to complete here, or
- * check at a later time (for example, callback routine).
- */
- if (phy->autoneg_wait_to_complete) {
- ret_val = e1000_wait_autoneg(hw);
- if (ret_val) {
- DEBUGOUT("Error while waiting for autoneg to complete\n");
- return ret_val;
- }
- }
-
- hw->mac.get_link_status = true;
-
- return ret_val;
-}
-
-/**
- * e1000_setup_copper_link_generic - Configure copper link settings
- * @hw: pointer to the HW structure
- *
- * Calls the appropriate function to configure the link for auto-neg or forced
- * speed and duplex. Then we check for link, once link is established calls
- * to configure collision distance and flow control are called. If link is
- * not established, we return -E1000_ERR_PHY (-2).
- **/
-s32 e1000_setup_copper_link_generic(struct e1000_hw *hw)
-{
- s32 ret_val;
- bool link;
-
- DEBUGFUNC("e1000_setup_copper_link_generic");
-
- if (hw->mac.autoneg) {
- /* Setup autoneg and flow control advertisement and perform
- * autonegotiation.
- */
- ret_val = e1000_copper_link_autoneg(hw);
- if (ret_val)
- return ret_val;
- } else {
- /* PHY will be set to 10H, 10F, 100H or 100F
- * depending on user settings.
- */
- DEBUGOUT("Forcing Speed and Duplex\n");
- ret_val = hw->phy.ops.force_speed_duplex(hw);
- if (ret_val) {
- DEBUGOUT("Error Forcing Speed and Duplex\n");
- return ret_val;
- }
- }
-
- /* Check link status. Wait up to 100 microseconds for link to become
- * valid.
- */
- ret_val = e1000_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10,
- &link);
- if (ret_val)
- return ret_val;
-
- if (link) {
- DEBUGOUT("Valid link established!!!\n");
- hw->mac.ops.config_collision_dist(hw);
- ret_val = e1000_config_fc_after_link_up_generic(hw);
- } else {
- DEBUGOUT("Unable to establish link!!!\n");
- }
-
- return ret_val;
-}
-
-/**
- * e1000_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
- * @hw: pointer to the HW structure
- *
- * Calls the PHY setup function to force speed and duplex. Clears the
- * auto-crossover to force MDI manually. Waits for link and returns
- * successful if link up is successful, else -E1000_ERR_PHY (-2).
- **/
-s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
- u16 phy_data;
- bool link;
-
- DEBUGFUNC("e1000_phy_force_speed_duplex_igp");
-
- ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
- if (ret_val)
- return ret_val;
-
- e1000_phy_force_speed_duplex_setup(hw, &phy_data);
-
- ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
- if (ret_val)
- return ret_val;
-
- /* Clear Auto-Crossover to force MDI manually. IGP requires MDI
- * forced whenever speed and duplex are forced.
- */
- ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
- phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
-
- ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
- if (ret_val)
- return ret_val;
-
- DEBUGOUT1("IGP PSCR: %X\n", phy_data);
-
- usec_delay(1);
-
- if (phy->autoneg_wait_to_complete) {
- DEBUGOUT("Waiting for forced speed/duplex link on IGP phy.\n");
-
- ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
- 100000, &link);
- if (ret_val)
- return ret_val;
-
- if (!link)
- DEBUGOUT("Link taking longer than expected.\n");
-
- /* Try once more */
- ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
- 100000, &link);
- }
-
- return ret_val;
-}
-
-/**
- * e1000_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY
- * @hw: pointer to the HW structure
- *
- * Calls the PHY setup function to force speed and duplex. Clears the
- * auto-crossover to force MDI manually. Resets the PHY to commit the
- * changes. If time expires while waiting for link up, we reset the DSP.
- * After reset, TX_CLK and CRS on Tx must be set. Return successful upon
- * successful completion, else return corresponding error code.
- **/
-s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
- u16 phy_data;
- bool link;
-
- DEBUGFUNC("e1000_phy_force_speed_duplex_m88");
-
- /* I210 and I211 devices support Auto-Crossover in forced operation. */
- if (phy->type != e1000_phy_i210) {
- /* Clear Auto-Crossover to force MDI manually. M88E1000
- * requires MDI forced whenever speed and duplex are forced.
- */
- ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL,
- &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
- ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL,
- phy_data);
- if (ret_val)
- return ret_val;
- }
-
- DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data);
-
- ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
- if (ret_val)
- return ret_val;
-
- e1000_phy_force_speed_duplex_setup(hw, &phy_data);
-
- ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
- if (ret_val)
- return ret_val;
-
- /* Reset the phy to commit changes. */
- ret_val = hw->phy.ops.commit(hw);
- if (ret_val)
- return ret_val;
-
- if (phy->autoneg_wait_to_complete) {
- DEBUGOUT("Waiting for forced speed/duplex link on M88 phy.\n");
-
- ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
- 100000, &link);
- if (ret_val)
- return ret_val;
-
- if (!link) {
- bool reset_dsp = true;
-
- switch (hw->phy.id) {
- case I347AT4_E_PHY_ID:
- case M88E1340M_E_PHY_ID:
- case M88E1112_E_PHY_ID:
- case M88E1543_E_PHY_ID:
- case I210_I_PHY_ID:
- reset_dsp = false;
- break;
- default:
- if (hw->phy.type != e1000_phy_m88)
- reset_dsp = false;
- break;
- }
-
- if (!reset_dsp) {
- DEBUGOUT("Link taking longer than expected.\n");
- } else {
- /* We didn't get link.
- * Reset the DSP and cross our fingers.
- */
- ret_val = phy->ops.write_reg(hw,
- M88E1000_PHY_PAGE_SELECT,
- 0x001d);
- if (ret_val)
- return ret_val;
- ret_val = e1000_phy_reset_dsp_generic(hw);
- if (ret_val)
- return ret_val;
- }
- }
-
- /* Try once more */
- ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
- 100000, &link);
- if (ret_val)
- return ret_val;
- }
-
- if (hw->phy.type != e1000_phy_m88)
- return E1000_SUCCESS;
-
- if (hw->phy.id == I347AT4_E_PHY_ID ||
- hw->phy.id == M88E1340M_E_PHY_ID ||
- hw->phy.id == M88E1112_E_PHY_ID)
- return E1000_SUCCESS;
- if (hw->phy.id == I210_I_PHY_ID)
- return E1000_SUCCESS;
- if ((hw->phy.id == M88E1543_E_PHY_ID))
- return E1000_SUCCESS;
- ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
- if (ret_val)
- return ret_val;
-
- /* Resetting the phy means we need to re-force TX_CLK in the
- * Extended PHY Specific Control Register to 25MHz clock from
- * the reset value of 2.5MHz.
- */
- phy_data |= M88E1000_EPSCR_TX_CLK_25;
- ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
- if (ret_val)
- return ret_val;
-
- /* In addition, we must re-enable CRS on Tx for both half and full
- * duplex.
- */
- ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
- ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
-
- return ret_val;
-}
-
-/**
- * e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex
- * @hw: pointer to the HW structure
- *
- * Forces the speed and duplex settings of the PHY.
- * This is a function pointer entry point only called by
- * PHY setup routines.
- **/
-s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
- u16 data;
- bool link;
-
- DEBUGFUNC("e1000_phy_force_speed_duplex_ife");
-
- ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &data);
- if (ret_val)
- return ret_val;
-
- e1000_phy_force_speed_duplex_setup(hw, &data);
-
- ret_val = phy->ops.write_reg(hw, PHY_CONTROL, data);
- if (ret_val)
- return ret_val;
-
- /* Disable MDI-X support for 10/100 */
- ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data);
- if (ret_val)
- return ret_val;
-
- data &= ~IFE_PMC_AUTO_MDIX;
- data &= ~IFE_PMC_FORCE_MDIX;
-
- ret_val = phy->ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, data);
- if (ret_val)
- return ret_val;
-
- DEBUGOUT1("IFE PMC: %X\n", data);
-
- usec_delay(1);
-
- if (phy->autoneg_wait_to_complete) {
- DEBUGOUT("Waiting for forced speed/duplex link on IFE phy.\n");
-
- ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
- 100000, &link);
- if (ret_val)
- return ret_val;
-
- if (!link)
- DEBUGOUT("Link taking longer than expected.\n");
-
- /* Try once more */
- ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
- 100000, &link);
- if (ret_val)
- return ret_val;
- }
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
- * @hw: pointer to the HW structure
- * @phy_ctrl: pointer to current value of PHY_CONTROL
- *
- * Forces speed and duplex on the PHY by doing the following: disable flow
- * control, force speed/duplex on the MAC, disable auto speed detection,
- * disable auto-negotiation, configure duplex, configure speed, configure
- * the collision distance, write configuration to CTRL register. The
- * caller must write to the PHY_CONTROL register for these settings to
- * take affect.
- **/
-void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
-{
- struct e1000_mac_info *mac = &hw->mac;
- u32 ctrl;
-
- DEBUGFUNC("e1000_phy_force_speed_duplex_setup");
-
- /* Turn off flow control when forcing speed/duplex */
- hw->fc.current_mode = e1000_fc_none;
-
- /* Force speed/duplex on the mac */
- ctrl = E1000_READ_REG(hw, E1000_CTRL);
- ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
- ctrl &= ~E1000_CTRL_SPD_SEL;
-
- /* Disable Auto Speed Detection */
- ctrl &= ~E1000_CTRL_ASDE;
-
- /* Disable autoneg on the phy */
- *phy_ctrl &= ~MII_CR_AUTO_NEG_EN;
-
- /* Forcing Full or Half Duplex? */
- if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
- ctrl &= ~E1000_CTRL_FD;
- *phy_ctrl &= ~MII_CR_FULL_DUPLEX;
- DEBUGOUT("Half Duplex\n");
- } else {
- ctrl |= E1000_CTRL_FD;
- *phy_ctrl |= MII_CR_FULL_DUPLEX;
- DEBUGOUT("Full Duplex\n");
- }
-
- /* Forcing 10mb or 100mb? */
- if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
- ctrl |= E1000_CTRL_SPD_100;
- *phy_ctrl |= MII_CR_SPEED_100;
- *phy_ctrl &= ~MII_CR_SPEED_1000;
- DEBUGOUT("Forcing 100mb\n");
- } else {
- ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
- *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
- DEBUGOUT("Forcing 10mb\n");
- }
-
- hw->mac.ops.config_collision_dist(hw);
-
- E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
-}
-
-/**
- * e1000_set_d3_lplu_state_generic - Sets low power link up state for D3
- * @hw: pointer to the HW structure
- * @active: boolean used to enable/disable lplu
- *
- * Success returns 0, Failure returns 1
- *
- * The low power link up (lplu) state is set to the power management level D3
- * and SmartSpeed is disabled when active is true, else clear lplu for D3
- * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
- * is used during Dx states where the power conservation is most important.
- * During driver activity, SmartSpeed should be enabled so performance is
- * maintained.
- **/
-s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
- u16 data;
-
- DEBUGFUNC("e1000_set_d3_lplu_state_generic");
-
- if (!hw->phy.ops.read_reg)
- return E1000_SUCCESS;
-
- ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
- if (ret_val)
- return ret_val;
-
- if (!active) {
- data &= ~IGP02E1000_PM_D3_LPLU;
- ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
- data);
- if (ret_val)
- return ret_val;
- /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
- * during Dx states where the power conservation is most
- * important. During driver activity we should enable
- * SmartSpeed, so performance is maintained.
- */
- if (phy->smart_speed == e1000_smart_speed_on) {
- ret_val = phy->ops.read_reg(hw,
- IGP01E1000_PHY_PORT_CONFIG,
- &data);
- if (ret_val)
- return ret_val;
-
- data |= IGP01E1000_PSCFR_SMART_SPEED;
- ret_val = phy->ops.write_reg(hw,
- IGP01E1000_PHY_PORT_CONFIG,
- data);
- if (ret_val)
- return ret_val;
- } else if (phy->smart_speed == e1000_smart_speed_off) {
- ret_val = phy->ops.read_reg(hw,
- IGP01E1000_PHY_PORT_CONFIG,
- &data);
- if (ret_val)
- return ret_val;
-
- data &= ~IGP01E1000_PSCFR_SMART_SPEED;
- ret_val = phy->ops.write_reg(hw,
- IGP01E1000_PHY_PORT_CONFIG,
- data);
- if (ret_val)
- return ret_val;
- }
- } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
- (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
- (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
- data |= IGP02E1000_PM_D3_LPLU;
- ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
- data);
- if (ret_val)
- return ret_val;
-
- /* When LPLU is enabled, we should disable SmartSpeed */
- ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
- &data);
- if (ret_val)
- return ret_val;
-
- data &= ~IGP01E1000_PSCFR_SMART_SPEED;
- ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
- data);
- }
-
- return ret_val;
-}
-
-/**
- * e1000_check_downshift_generic - Checks whether a downshift in speed occurred
- * @hw: pointer to the HW structure
- *
- * Success returns 0, Failure returns 1
- *
- * A downshift is detected by querying the PHY link health.
- **/
-s32 e1000_check_downshift_generic(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
- u16 phy_data, offset, mask;
-
- DEBUGFUNC("e1000_check_downshift_generic");
-
- switch (phy->type) {
- case e1000_phy_i210:
- case e1000_phy_m88:
- case e1000_phy_gg82563:
- offset = M88E1000_PHY_SPEC_STATUS;
- mask = M88E1000_PSSR_DOWNSHIFT;
- break;
- case e1000_phy_igp_2:
- case e1000_phy_igp_3:
- offset = IGP01E1000_PHY_LINK_HEALTH;
- mask = IGP01E1000_PLHR_SS_DOWNGRADE;
- break;
- default:
- /* speed downshift not supported */
- phy->speed_downgraded = false;
- return E1000_SUCCESS;
- }
-
- ret_val = phy->ops.read_reg(hw, offset, &phy_data);
-
- if (!ret_val)
- phy->speed_downgraded = !!(phy_data & mask);
-
- return ret_val;
-}
-
-/**
- * e1000_check_polarity_m88 - Checks the polarity.
- * @hw: pointer to the HW structure
- *
- * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
- *
- * Polarity is determined based on the PHY specific status register.
- **/
-s32 e1000_check_polarity_m88(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
- u16 data;
-
- DEBUGFUNC("e1000_check_polarity_m88");
-
- ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data);
-
- if (!ret_val)
- phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY)
- ? e1000_rev_polarity_reversed
- : e1000_rev_polarity_normal);
-
- return ret_val;
-}
-
-/**
- * e1000_check_polarity_igp - Checks the polarity.
- * @hw: pointer to the HW structure
- *
- * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
- *
- * Polarity is determined based on the PHY port status register, and the
- * current speed (since there is no polarity at 100Mbps).
- **/
-s32 e1000_check_polarity_igp(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
- u16 data, offset, mask;
-
- DEBUGFUNC("e1000_check_polarity_igp");
-
- /* Polarity is determined based on the speed of
- * our connection.
- */
- ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
- if (ret_val)
- return ret_val;
-
- if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
- IGP01E1000_PSSR_SPEED_1000MBPS) {
- offset = IGP01E1000_PHY_PCS_INIT_REG;
- mask = IGP01E1000_PHY_POLARITY_MASK;
- } else {
- /* This really only applies to 10Mbps since
- * there is no polarity for 100Mbps (always 0).
- */
- offset = IGP01E1000_PHY_PORT_STATUS;
- mask = IGP01E1000_PSSR_POLARITY_REVERSED;
- }
-
- ret_val = phy->ops.read_reg(hw, offset, &data);
-
- if (!ret_val)
- phy->cable_polarity = ((data & mask)
- ? e1000_rev_polarity_reversed
- : e1000_rev_polarity_normal);
-
- return ret_val;
-}
-
-/**
- * e1000_check_polarity_ife - Check cable polarity for IFE PHY
- * @hw: pointer to the HW structure
- *
- * Polarity is determined on the polarity reversal feature being enabled.
- **/
-s32 e1000_check_polarity_ife(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
- u16 phy_data, offset, mask;
-
- DEBUGFUNC("e1000_check_polarity_ife");
-
- /* Polarity is determined based on the reversal feature being enabled.
- */
- if (phy->polarity_correction) {
- offset = IFE_PHY_EXTENDED_STATUS_CONTROL;
- mask = IFE_PESC_POLARITY_REVERSED;
- } else {
- offset = IFE_PHY_SPECIAL_CONTROL;
- mask = IFE_PSC_FORCE_POLARITY;
- }
-
- ret_val = phy->ops.read_reg(hw, offset, &phy_data);
-
- if (!ret_val)
- phy->cable_polarity = ((phy_data & mask)
- ? e1000_rev_polarity_reversed
- : e1000_rev_polarity_normal);
-
- return ret_val;
-}
-
-/**
- * e1000_wait_autoneg - Wait for auto-neg completion
- * @hw: pointer to the HW structure
- *
- * Waits for auto-negotiation to complete or for the auto-negotiation time
- * limit to expire, which ever happens first.
- **/
-static s32 e1000_wait_autoneg(struct e1000_hw *hw)
-{
- s32 ret_val = E1000_SUCCESS;
- u16 i, phy_status;
-
- DEBUGFUNC("e1000_wait_autoneg");
-
- if (!hw->phy.ops.read_reg)
- return E1000_SUCCESS;
-
- /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
- for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
- ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
- if (ret_val)
- break;
- ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
- if (ret_val)
- break;
- if (phy_status & MII_SR_AUTONEG_COMPLETE)
- break;
- msec_delay(100);
- }
-
- /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
- * has completed.
- */
- return ret_val;
-}
-
-/**
- * e1000_phy_has_link_generic - Polls PHY for link
- * @hw: pointer to the HW structure
- * @iterations: number of times to poll for link
- * @usec_interval: delay between polling attempts
- * @success: pointer to whether polling was successful or not
- *
- * Polls the PHY status register for link, 'iterations' number of times.
- **/
-s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
- u32 usec_interval, bool *success)
-{
- s32 ret_val = E1000_SUCCESS;
- u16 i, phy_status;
-
- DEBUGFUNC("e1000_phy_has_link_generic");
-
- if (!hw->phy.ops.read_reg)
- return E1000_SUCCESS;
-
- for (i = 0; i < iterations; i++) {
- /* Some PHYs require the PHY_STATUS register to be read
- * twice due to the link bit being sticky. No harm doing
- * it across the board.
- */
- ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
- if (ret_val)
- /* If the first read fails, another entity may have
- * ownership of the resources, wait and try again to
- * see if they have relinquished the resources yet.
- */
- usec_delay(usec_interval);
- ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
- if (ret_val)
- break;
- if (phy_status & MII_SR_LINK_STATUS)
- break;
- if (usec_interval >= 1000)
- msec_delay_irq(usec_interval/1000);
- else
- usec_delay(usec_interval);
- }
-
- *success = (i < iterations);
-
- return ret_val;
-}
-
-/**
- * e1000_get_cable_length_m88 - Determine cable length for m88 PHY
- * @hw: pointer to the HW structure
- *
- * Reads the PHY specific status register to retrieve the cable length
- * information. The cable length is determined by averaging the minimum and
- * maximum values to get the "average" cable length. The m88 PHY has four
- * possible cable length values, which are:
- * Register Value Cable Length
- * 0 < 50 meters
- * 1 50 - 80 meters
- * 2 80 - 110 meters
- * 3 110 - 140 meters
- * 4 > 140 meters
- **/
-s32 e1000_get_cable_length_m88(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
- u16 phy_data, index;
-
- DEBUGFUNC("e1000_get_cable_length_m88");
-
- ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
- if (ret_val)
- return ret_val;
-
- index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
- M88E1000_PSSR_CABLE_LENGTH_SHIFT);
-
- if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1)
- return -E1000_ERR_PHY;
-
- phy->min_cable_length = e1000_m88_cable_length_table[index];
- phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
-
- phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
-
- return E1000_SUCCESS;
-}
-
-s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
- u16 phy_data, phy_data2, is_cm;
- u16 index, default_page;
-
- DEBUGFUNC("e1000_get_cable_length_m88_gen2");
-
- switch (hw->phy.id) {
- case I210_I_PHY_ID:
- /* Get cable length from PHY Cable Diagnostics Control Reg */
- ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
- (I347AT4_PCDL + phy->addr),
- &phy_data);
- if (ret_val)
- return ret_val;
-
- /* Check if the unit of cable length is meters or cm */
- ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
- I347AT4_PCDC, &phy_data2);
- if (ret_val)
- return ret_val;
-
- is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT);
-
- /* Populate the phy structure with cable length in meters */
- phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
- phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
- phy->cable_length = phy_data / (is_cm ? 100 : 1);
- break;
- case M88E1543_E_PHY_ID:
- case M88E1340M_E_PHY_ID:
- case I347AT4_E_PHY_ID:
- /* Remember the original page select and set it to 7 */
- ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
- &default_page);
- if (ret_val)
- return ret_val;
-
- ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07);
- if (ret_val)
- return ret_val;
-
- /* Get cable length from PHY Cable Diagnostics Control Reg */
- ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr),
- &phy_data);
- if (ret_val)
- return ret_val;
-
- /* Check if the unit of cable length is meters or cm */
- ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2);
- if (ret_val)
- return ret_val;
-
- is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT);
-
- /* Populate the phy structure with cable length in meters */
- phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
- phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
- phy->cable_length = phy_data / (is_cm ? 100 : 1);
-
- /* Reset the page select to its original value */
- ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
- default_page);
- if (ret_val)
- return ret_val;
- break;
-
- case M88E1112_E_PHY_ID:
- /* Remember the original page select and set it to 5 */
- ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
- &default_page);
- if (ret_val)
- return ret_val;
-
- ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05);
- if (ret_val)
- return ret_val;
-
- ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE,
- &phy_data);
- if (ret_val)
- return ret_val;
-
- index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
- M88E1000_PSSR_CABLE_LENGTH_SHIFT;
-
- if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1)
- return -E1000_ERR_PHY;
-
- phy->min_cable_length = e1000_m88_cable_length_table[index];
- phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
-
- phy->cable_length = (phy->min_cable_length +
- phy->max_cable_length) / 2;
-
- /* Reset the page select to its original value */
- ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
- default_page);
- if (ret_val)
- return ret_val;
-
- break;
- default:
- return -E1000_ERR_PHY;
- }
-
- return ret_val;
-}
-
-/**
- * e1000_get_cable_length_igp_2 - Determine cable length for igp2 PHY
- * @hw: pointer to the HW structure
- *
- * The automatic gain control (agc) normalizes the amplitude of the
- * received signal, adjusting for the attenuation produced by the
- * cable. By reading the AGC registers, which represent the
- * combination of coarse and fine gain value, the value can be put
- * into a lookup table to obtain the approximate cable length
- * for each channel.
- **/
-s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
- u16 phy_data, i, agc_value = 0;
- u16 cur_agc_index, max_agc_index = 0;
- u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
- static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
- IGP02E1000_PHY_AGC_A,
- IGP02E1000_PHY_AGC_B,
- IGP02E1000_PHY_AGC_C,
- IGP02E1000_PHY_AGC_D
- };
-
- DEBUGFUNC("e1000_get_cable_length_igp_2");
-
- /* Read the AGC registers for all channels */
- for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
- ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data);
- if (ret_val)
- return ret_val;
-
- /* Getting bits 15:9, which represent the combination of
- * coarse and fine gain values. The result is a number
- * that can be put into the lookup table to obtain the
- * approximate cable length.
- */
- cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
- IGP02E1000_AGC_LENGTH_MASK);
-
- /* Array index bound check. */
- if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
- (cur_agc_index == 0))
- return -E1000_ERR_PHY;
-
- /* Remove min & max AGC values from calculation. */
- if (e1000_igp_2_cable_length_table[min_agc_index] >
- e1000_igp_2_cable_length_table[cur_agc_index])
- min_agc_index = cur_agc_index;
- if (e1000_igp_2_cable_length_table[max_agc_index] <
- e1000_igp_2_cable_length_table[cur_agc_index])
- max_agc_index = cur_agc_index;
-
- agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
- }
-
- agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
- e1000_igp_2_cable_length_table[max_agc_index]);
- agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
-
- /* Calculate cable length with the error range of +/- 10 meters. */
- phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
- (agc_value - IGP02E1000_AGC_RANGE) : 0);
- phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
-
- phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_get_phy_info_m88 - Retrieve PHY information
- * @hw: pointer to the HW structure
- *
- * Valid for only copper links. Read the PHY status register (sticky read)
- * to verify that link is up. Read the PHY special control register to
- * determine the polarity and 10base-T extended distance. Read the PHY
- * special status register to determine MDI/MDIx and current speed. If
- * speed is 1000, then determine cable length, local and remote receiver.
- **/
-s32 e1000_get_phy_info_m88(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
- u16 phy_data;
- bool link;
-
- DEBUGFUNC("e1000_get_phy_info_m88");
-
- if (phy->media_type != e1000_media_type_copper) {
- DEBUGOUT("Phy info is only valid for copper media\n");
- return -E1000_ERR_CONFIG;
- }
-
- ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
- if (ret_val)
- return ret_val;
-
- if (!link) {
- DEBUGOUT("Phy info is only valid if link is up\n");
- return -E1000_ERR_CONFIG;
- }
-
- ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
- if (ret_val)
- return ret_val;
-
- phy->polarity_correction = !!(phy_data &
- M88E1000_PSCR_POLARITY_REVERSAL);
-
- ret_val = e1000_check_polarity_m88(hw);
- if (ret_val)
- return ret_val;
-
- ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
- if (ret_val)
- return ret_val;
-
- phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX);
-
- if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
- ret_val = hw->phy.ops.get_cable_length(hw);
- if (ret_val)
- return ret_val;
-
- ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data);
- if (ret_val)
- return ret_val;
-
- phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
- ? e1000_1000t_rx_status_ok
- : e1000_1000t_rx_status_not_ok;
-
- phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
- ? e1000_1000t_rx_status_ok
- : e1000_1000t_rx_status_not_ok;
- } else {
- /* Set values to "undefined" */
- phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
- phy->local_rx = e1000_1000t_rx_status_undefined;
- phy->remote_rx = e1000_1000t_rx_status_undefined;
- }
-
- return ret_val;
-}
-
-/**
- * e1000_get_phy_info_igp - Retrieve igp PHY information
- * @hw: pointer to the HW structure
- *
- * Read PHY status to determine if link is up. If link is up, then
- * set/determine 10base-T extended distance and polarity correction. Read
- * PHY port status to determine MDI/MDIx and speed. Based on the speed,
- * determine on the cable length, local and remote receiver.
- **/
-s32 e1000_get_phy_info_igp(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
- u16 data;
- bool link;
-
- DEBUGFUNC("e1000_get_phy_info_igp");
-
- ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
- if (ret_val)
- return ret_val;
-
- if (!link) {
- DEBUGOUT("Phy info is only valid if link is up\n");
- return -E1000_ERR_CONFIG;
- }
-
- phy->polarity_correction = true;
-
- ret_val = e1000_check_polarity_igp(hw);
- if (ret_val)
- return ret_val;
-
- ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
- if (ret_val)
- return ret_val;
-
- phy->is_mdix = !!(data & IGP01E1000_PSSR_MDIX);
-
- if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
- IGP01E1000_PSSR_SPEED_1000MBPS) {
- ret_val = phy->ops.get_cable_length(hw);
- if (ret_val)
- return ret_val;
-
- ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
- if (ret_val)
- return ret_val;
-
- phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
- ? e1000_1000t_rx_status_ok
- : e1000_1000t_rx_status_not_ok;
-
- phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
- ? e1000_1000t_rx_status_ok
- : e1000_1000t_rx_status_not_ok;
- } else {
- phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
- phy->local_rx = e1000_1000t_rx_status_undefined;
- phy->remote_rx = e1000_1000t_rx_status_undefined;
- }
-
- return ret_val;
-}
-
-/**
- * e1000_get_phy_info_ife - Retrieves various IFE PHY states
- * @hw: pointer to the HW structure
- *
- * Populates "phy" structure with various feature states.
- **/
-s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
- u16 data;
- bool link;
-
- DEBUGFUNC("e1000_get_phy_info_ife");
-
- ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
- if (ret_val)
- return ret_val;
-
- if (!link) {
- DEBUGOUT("Phy info is only valid if link is up\n");
- return -E1000_ERR_CONFIG;
- }
-
- ret_val = phy->ops.read_reg(hw, IFE_PHY_SPECIAL_CONTROL, &data);
- if (ret_val)
- return ret_val;
- phy->polarity_correction = !(data & IFE_PSC_AUTO_POLARITY_DISABLE);
-
- if (phy->polarity_correction) {
- ret_val = e1000_check_polarity_ife(hw);
- if (ret_val)
- return ret_val;
- } else {
- /* Polarity is forced */
- phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY)
- ? e1000_rev_polarity_reversed
- : e1000_rev_polarity_normal);
- }
-
- ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data);
- if (ret_val)
- return ret_val;
-
- phy->is_mdix = !!(data & IFE_PMC_MDIX_STATUS);
-
- /* The following parameters are undefined for 10/100 operation. */
- phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
- phy->local_rx = e1000_1000t_rx_status_undefined;
- phy->remote_rx = e1000_1000t_rx_status_undefined;
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_phy_sw_reset_generic - PHY software reset
- * @hw: pointer to the HW structure
- *
- * Does a software reset of the PHY by reading the PHY control register and
- * setting/write the control register reset bit to the PHY.
- **/
-s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw)
-{
- s32 ret_val;
- u16 phy_ctrl;
-
- DEBUGFUNC("e1000_phy_sw_reset_generic");
-
- if (!hw->phy.ops.read_reg)
- return E1000_SUCCESS;
-
- ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
- if (ret_val)
- return ret_val;
-
- phy_ctrl |= MII_CR_RESET;
- ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
- if (ret_val)
- return ret_val;
-
- usec_delay(1);
-
- return ret_val;
-}
-
-/**
- * e1000_phy_hw_reset_generic - PHY hardware reset
- * @hw: pointer to the HW structure
- *
- * Verify the reset block is not blocking us from resetting. Acquire
- * semaphore (if necessary) and read/set/write the device control reset
- * bit in the PHY. Wait the appropriate delay time for the device to
- * reset and release the semaphore (if necessary).
- **/
-s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
- u32 ctrl;
-
- DEBUGFUNC("e1000_phy_hw_reset_generic");
-
- if (phy->ops.check_reset_block) {
- ret_val = phy->ops.check_reset_block(hw);
- if (ret_val)
- return E1000_SUCCESS;
- }
-
- ret_val = phy->ops.acquire(hw);
- if (ret_val)
- return ret_val;
-
- ctrl = E1000_READ_REG(hw, E1000_CTRL);
- E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PHY_RST);
- E1000_WRITE_FLUSH(hw);
-
- usec_delay(phy->reset_delay_us);
-
- E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
- E1000_WRITE_FLUSH(hw);
-
- usec_delay(150);
-
- phy->ops.release(hw);
-
- return phy->ops.get_cfg_done(hw);
-}
-
-/**
- * e1000_get_cfg_done_generic - Generic configuration done
- * @hw: pointer to the HW structure
- *
- * Generic function to wait 10 milli-seconds for configuration to complete
- * and return success.
- **/
-s32 e1000_get_cfg_done_generic(struct e1000_hw E1000_UNUSEDARG *hw)
-{
- DEBUGFUNC("e1000_get_cfg_done_generic");
-
- msec_delay_irq(10);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_phy_init_script_igp3 - Inits the IGP3 PHY
- * @hw: pointer to the HW structure
- *
- * Initializes a Intel Gigabit PHY3 when an EEPROM is not present.
- **/
-s32 e1000_phy_init_script_igp3(struct e1000_hw *hw)
-{
- DEBUGOUT("Running IGP 3 PHY init script\n");
-
- /* PHY init IGP 3 */
- /* Enable rise/fall, 10-mode work in class-A */
- hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018);
- /* Remove all caps from Replica path filter */
- hw->phy.ops.write_reg(hw, 0x2F52, 0x0000);
- /* Bias trimming for ADC, AFE and Driver (Default) */
- hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24);
- /* Increase Hybrid poly bias */
- hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0);
- /* Add 4% to Tx amplitude in Gig mode */
- hw->phy.ops.write_reg(hw, 0x2010, 0x10B0);
- /* Disable trimming (TTT) */
- hw->phy.ops.write_reg(hw, 0x2011, 0x0000);
- /* Poly DC correction to 94.6% + 2% for all channels */
- hw->phy.ops.write_reg(hw, 0x20DD, 0x249A);
- /* ABS DC correction to 95.9% */
- hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3);
- /* BG temp curve trim */
- hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE);
- /* Increasing ADC OPAMP stage 1 currents to max */
- hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4);
- /* Force 1000 ( required for enabling PHY regs configuration) */
- hw->phy.ops.write_reg(hw, 0x0000, 0x0140);
- /* Set upd_freq to 6 */
- hw->phy.ops.write_reg(hw, 0x1F30, 0x1606);
- /* Disable NPDFE */
- hw->phy.ops.write_reg(hw, 0x1F31, 0xB814);
- /* Disable adaptive fixed FFE (Default) */
- hw->phy.ops.write_reg(hw, 0x1F35, 0x002A);
- /* Enable FFE hysteresis */
- hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067);
- /* Fixed FFE for short cable lengths */
- hw->phy.ops.write_reg(hw, 0x1F54, 0x0065);
- /* Fixed FFE for medium cable lengths */
- hw->phy.ops.write_reg(hw, 0x1F55, 0x002A);
- /* Fixed FFE for long cable lengths */
- hw->phy.ops.write_reg(hw, 0x1F56, 0x002A);
- /* Enable Adaptive Clip Threshold */
- hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0);
- /* AHT reset limit to 1 */
- hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF);
- /* Set AHT master delay to 127 msec */
- hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC);
- /* Set scan bits for AHT */
- hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF);
- /* Set AHT Preset bits */
- hw->phy.ops.write_reg(hw, 0x1F79, 0x0210);
- /* Change integ_factor of channel A to 3 */
- hw->phy.ops.write_reg(hw, 0x1895, 0x0003);
- /* Change prop_factor of channels BCD to 8 */
- hw->phy.ops.write_reg(hw, 0x1796, 0x0008);
- /* Change cg_icount + enable integbp for channels BCD */
- hw->phy.ops.write_reg(hw, 0x1798, 0xD008);
- /* Change cg_icount + enable integbp + change prop_factor_master
- * to 8 for channel A
- */
- hw->phy.ops.write_reg(hw, 0x1898, 0xD918);
- /* Disable AHT in Slave mode on channel A */
- hw->phy.ops.write_reg(hw, 0x187A, 0x0800);
- /* Enable LPLU and disable AN to 1000 in non-D0a states,
- * Enable SPD+B2B
- */
- hw->phy.ops.write_reg(hw, 0x0019, 0x008D);
- /* Enable restart AN on an1000_dis change */
- hw->phy.ops.write_reg(hw, 0x001B, 0x2080);
- /* Enable wh_fifo read clock in 10/100 modes */
- hw->phy.ops.write_reg(hw, 0x0014, 0x0045);
- /* Restart AN, Speed selection is 1000 */
- hw->phy.ops.write_reg(hw, 0x0000, 0x1340);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_get_phy_type_from_id - Get PHY type from id
- * @phy_id: phy_id read from the phy
- *
- * Returns the phy type from the id.
- **/
-enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id)
-{
- enum e1000_phy_type phy_type = e1000_phy_unknown;
-
- switch (phy_id) {
- case M88E1000_I_PHY_ID:
- case M88E1000_E_PHY_ID:
- case M88E1111_I_PHY_ID:
- case M88E1011_I_PHY_ID:
- case M88E1543_E_PHY_ID:
- case I347AT4_E_PHY_ID:
- case M88E1112_E_PHY_ID:
- case M88E1340M_E_PHY_ID:
- phy_type = e1000_phy_m88;
- break;
- case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */
- phy_type = e1000_phy_igp_2;
- break;
- case GG82563_E_PHY_ID:
- phy_type = e1000_phy_gg82563;
- break;
- case IGP03E1000_E_PHY_ID:
- phy_type = e1000_phy_igp_3;
- break;
- case IFE_E_PHY_ID:
- case IFE_PLUS_E_PHY_ID:
- case IFE_C_E_PHY_ID:
- phy_type = e1000_phy_ife;
- break;
- case I82580_I_PHY_ID:
- phy_type = e1000_phy_82580;
- break;
- case I210_I_PHY_ID:
- phy_type = e1000_phy_i210;
- break;
- default:
- phy_type = e1000_phy_unknown;
- break;
- }
- return phy_type;
-}
-
-/**
- * e1000_determine_phy_address - Determines PHY address.
- * @hw: pointer to the HW structure
- *
- * This uses a trial and error method to loop through possible PHY
- * addresses. It tests each by reading the PHY ID registers and
- * checking for a match.
- **/
-s32 e1000_determine_phy_address(struct e1000_hw *hw)
-{
- u32 phy_addr = 0;
- u32 i;
- enum e1000_phy_type phy_type = e1000_phy_unknown;
-
- hw->phy.id = phy_type;
-
- for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) {
- hw->phy.addr = phy_addr;
- i = 0;
-
- do {
- e1000_get_phy_id(hw);
- phy_type = e1000_get_phy_type_from_id(hw->phy.id);
-
- /* If phy_type is valid, break - we found our
- * PHY address
- */
- if (phy_type != e1000_phy_unknown)
- return E1000_SUCCESS;
-
- msec_delay(1);
- i++;
- } while (i < 10);
- }
-
- return -E1000_ERR_PHY_TYPE;
-}
-
-/**
- * e1000_power_up_phy_copper - Restore copper link in case of PHY power down
- * @hw: pointer to the HW structure
- *
- * In the case of a PHY power down to save power, or to turn off link during a
- * driver unload, or wake on lan is not enabled, restore the link to previous
- * settings.
- **/
-void e1000_power_up_phy_copper(struct e1000_hw *hw)
-{
- u16 mii_reg = 0;
- u16 power_reg = 0;
-
- /* The PHY will retain its settings across a power down/up cycle */
- hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
- mii_reg &= ~MII_CR_POWER_DOWN;
- if (hw->phy.type == e1000_phy_i210) {
- hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
- power_reg &= ~GS40G_CS_POWER_DOWN;
- hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
- }
- hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
-}
-
-/**
- * e1000_power_down_phy_copper - Restore copper link in case of PHY power down
- * @hw: pointer to the HW structure
- *
- * In the case of a PHY power down to save power, or to turn off link during a
- * driver unload, or wake on lan is not enabled, restore the link to previous
- * settings.
- **/
-void e1000_power_down_phy_copper(struct e1000_hw *hw)
-{
- u16 mii_reg = 0;
- u16 power_reg = 0;
-
- /* The PHY will retain its settings across a power down/up cycle */
- hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
- mii_reg |= MII_CR_POWER_DOWN;
- /* i210 Phy requires an additional bit for power up/down */
- if (hw->phy.type == e1000_phy_i210) {
- hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
- power_reg |= GS40G_CS_POWER_DOWN;
- hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
- }
- hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
- msec_delay(1);
-}
-
-/**
- * e1000_check_polarity_82577 - Checks the polarity.
- * @hw: pointer to the HW structure
- *
- * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
- *
- * Polarity is determined based on the PHY specific status register.
- **/
-s32 e1000_check_polarity_82577(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
- u16 data;
-
- DEBUGFUNC("e1000_check_polarity_82577");
-
- ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
-
- if (!ret_val)
- phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY)
- ? e1000_rev_polarity_reversed
- : e1000_rev_polarity_normal);
-
- return ret_val;
-}
-
-/**
- * e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY
- * @hw: pointer to the HW structure
- *
- * Calls the PHY setup function to force speed and duplex.
- **/
-s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
- u16 phy_data;
- bool link;
-
- DEBUGFUNC("e1000_phy_force_speed_duplex_82577");
-
- ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
- if (ret_val)
- return ret_val;
-
- e1000_phy_force_speed_duplex_setup(hw, &phy_data);
-
- ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
- if (ret_val)
- return ret_val;
-
- usec_delay(1);
-
- if (phy->autoneg_wait_to_complete) {
- DEBUGOUT("Waiting for forced speed/duplex link on 82577 phy\n");
-
- ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
- 100000, &link);
- if (ret_val)
- return ret_val;
-
- if (!link)
- DEBUGOUT("Link taking longer than expected.\n");
-
- /* Try once more */
- ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
- 100000, &link);
- }
-
- return ret_val;
-}
-
-/**
- * e1000_get_phy_info_82577 - Retrieve I82577 PHY information
- * @hw: pointer to the HW structure
- *
- * Read PHY status to determine if link is up. If link is up, then
- * set/determine 10base-T extended distance and polarity correction. Read
- * PHY port status to determine MDI/MDIx and speed. Based on the speed,
- * determine on the cable length, local and remote receiver.
- **/
-s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
- u16 data;
- bool link;
-
- DEBUGFUNC("e1000_get_phy_info_82577");
-
- ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
- if (ret_val)
- return ret_val;
-
- if (!link) {
- DEBUGOUT("Phy info is only valid if link is up\n");
- return -E1000_ERR_CONFIG;
- }
-
- phy->polarity_correction = true;
-
- ret_val = e1000_check_polarity_82577(hw);
- if (ret_val)
- return ret_val;
-
- ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
- if (ret_val)
- return ret_val;
-
- phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX);
-
- if ((data & I82577_PHY_STATUS2_SPEED_MASK) ==
- I82577_PHY_STATUS2_SPEED_1000MBPS) {
- ret_val = hw->phy.ops.get_cable_length(hw);
- if (ret_val)
- return ret_val;
-
- ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
- if (ret_val)
- return ret_val;
-
- phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
- ? e1000_1000t_rx_status_ok
- : e1000_1000t_rx_status_not_ok;
-
- phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
- ? e1000_1000t_rx_status_ok
- : e1000_1000t_rx_status_not_ok;
- } else {
- phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
- phy->local_rx = e1000_1000t_rx_status_undefined;
- phy->remote_rx = e1000_1000t_rx_status_undefined;
- }
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_get_cable_length_82577 - Determine cable length for 82577 PHY
- * @hw: pointer to the HW structure
- *
- * Reads the diagnostic status register and verifies result is valid before
- * placing it in the phy_cable_length field.
- **/
-s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
- u16 phy_data, length;
-
- DEBUGFUNC("e1000_get_cable_length_82577");
-
- ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data);
- if (ret_val)
- return ret_val;
-
- length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
- I82577_DSTATUS_CABLE_LENGTH_SHIFT);
-
- if (length == E1000_CABLE_LENGTH_UNDEFINED)
- return -E1000_ERR_PHY;
-
- phy->cable_length = length;
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_write_phy_reg_gs40g - Write GS40G PHY register
- * @hw: pointer to the HW structure
- * @offset: register offset to write to
- * @data: data to write at register offset
- *
- * Acquires semaphore, if necessary, then writes the data to PHY register
- * at the offset. Release any acquired semaphores before exiting.
- **/
-s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data)
-{
- s32 ret_val;
- u16 page = offset >> GS40G_PAGE_SHIFT;
-
- DEBUGFUNC("e1000_write_phy_reg_gs40g");
-
- offset = offset & GS40G_OFFSET_MASK;
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- return ret_val;
-
- ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page);
- if (ret_val)
- goto release;
- ret_val = e1000_write_phy_reg_mdic(hw, offset, data);
-
-release:
- hw->phy.ops.release(hw);
- return ret_val;
-}
-
-/**
- * e1000_read_phy_reg_gs40g - Read GS40G PHY register
- * @hw: pointer to the HW structure
- * @offset: lower half is register offset to read to
- * upper half is page to use.
- * @data: data to read at register offset
- *
- * Acquires semaphore, if necessary, then reads the data in the PHY register
- * at the offset. Release any acquired semaphores before exiting.
- **/
-s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data)
-{
- s32 ret_val;
- u16 page = offset >> GS40G_PAGE_SHIFT;
-
- DEBUGFUNC("e1000_read_phy_reg_gs40g");
-
- offset = offset & GS40G_OFFSET_MASK;
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- return ret_val;
-
- ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page);
- if (ret_val)
- goto release;
- ret_val = e1000_read_phy_reg_mdic(hw, offset, data);
-
-release:
- hw->phy.ops.release(hw);
- return ret_val;
-}
-
-/**
- * e1000_read_phy_reg_mphy - Read mPHY control register
- * @hw: pointer to the HW structure
- * @address: address to be read
- * @data: pointer to the read data
- *
- * Reads the mPHY control register in the PHY at offset and stores the
- * information read to data.
- **/
-s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data)
-{
- u32 mphy_ctrl = 0;
- bool locked = false;
- bool ready = false;
-
- DEBUGFUNC("e1000_read_phy_reg_mphy");
-
- /* Check if mPHY is ready to read/write operations */
- ready = e1000_is_mphy_ready(hw);
- if (!ready)
- return -E1000_ERR_PHY;
-
- /* Check if mPHY access is disabled and enable it if so */
- mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL);
- if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) {
- locked = true;
- ready = e1000_is_mphy_ready(hw);
- if (!ready)
- return -E1000_ERR_PHY;
- mphy_ctrl |= E1000_MPHY_ENA_ACCESS;
- E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
- }
-
- /* Set the address that we want to read */
- ready = e1000_is_mphy_ready(hw);
- if (!ready)
- return -E1000_ERR_PHY;
-
- /* We mask address, because we want to use only current lane */
- mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK &
- ~E1000_MPHY_ADDRESS_FNC_OVERRIDE) |
- (address & E1000_MPHY_ADDRESS_MASK);
- E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
-
- /* Read data from the address */
- ready = e1000_is_mphy_ready(hw);
- if (!ready)
- return -E1000_ERR_PHY;
- *data = E1000_READ_REG(hw, E1000_MPHY_DATA);
-
- /* Disable access to mPHY if it was originally disabled */
- if (locked)
- ready = e1000_is_mphy_ready(hw);
- if (!ready)
- return -E1000_ERR_PHY;
- E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL,
- E1000_MPHY_DIS_ACCESS);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_write_phy_reg_mphy - Write mPHY control register
- * @hw: pointer to the HW structure
- * @address: address to write to
- * @data: data to write to register at offset
- * @line_override: used when we want to use different line than default one
- *
- * Writes data to mPHY control register.
- **/
-s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data,
- bool line_override)
-{
- u32 mphy_ctrl = 0;
- bool locked = false;
- bool ready = false;
-
- DEBUGFUNC("e1000_write_phy_reg_mphy");
-
- /* Check if mPHY is ready to read/write operations */
- ready = e1000_is_mphy_ready(hw);
- if (!ready)
- return -E1000_ERR_PHY;
-
- /* Check if mPHY access is disabled and enable it if so */
- mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL);
- if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) {
- locked = true;
- ready = e1000_is_mphy_ready(hw);
- if (!ready)
- return -E1000_ERR_PHY;
- mphy_ctrl |= E1000_MPHY_ENA_ACCESS;
- E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
- }
-
- /* Set the address that we want to read */
- ready = e1000_is_mphy_ready(hw);
- if (!ready)
- return -E1000_ERR_PHY;
-
- /* We mask address, because we want to use only current lane */
- if (line_override)
- mphy_ctrl |= E1000_MPHY_ADDRESS_FNC_OVERRIDE;
- else
- mphy_ctrl &= ~E1000_MPHY_ADDRESS_FNC_OVERRIDE;
- mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK) |
- (address & E1000_MPHY_ADDRESS_MASK);
- E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
-
- /* Read data from the address */
- ready = e1000_is_mphy_ready(hw);
- if (!ready)
- return -E1000_ERR_PHY;
- E1000_WRITE_REG(hw, E1000_MPHY_DATA, data);
-
- /* Disable access to mPHY if it was originally disabled */
- if (locked)
- ready = e1000_is_mphy_ready(hw);
- if (!ready)
- return -E1000_ERR_PHY;
- E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL,
- E1000_MPHY_DIS_ACCESS);
-
- return E1000_SUCCESS;
-}
-
-/**
- * e1000_is_mphy_ready - Check if mPHY control register is not busy
- * @hw: pointer to the HW structure
- *
- * Returns mPHY control register status.
- **/
-bool e1000_is_mphy_ready(struct e1000_hw *hw)
-{
- u16 retry_count = 0;
- u32 mphy_ctrl = 0;
- bool ready = false;
-
- while (retry_count < 2) {
- mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL);
- if (mphy_ctrl & E1000_MPHY_BUSY) {
- usec_delay(20);
- retry_count++;
- continue;
- }
- ready = true;
- break;
- }
-
- if (!ready)
- DEBUGOUT("ERROR READING mPHY control register, phy is busy.\n");
-
- return ready;
-}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_phy.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_phy.h
deleted file mode 100755
index 5387c5e7..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_phy.h
+++ /dev/null
@@ -1,256 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#ifndef _E1000_PHY_H_
-#define _E1000_PHY_H_
-
-void e1000_init_phy_ops_generic(struct e1000_hw *hw);
-s32 e1000_null_read_reg(struct e1000_hw *hw, u32 offset, u16 *data);
-void e1000_null_phy_generic(struct e1000_hw *hw);
-s32 e1000_null_lplu_state(struct e1000_hw *hw, bool active);
-s32 e1000_null_write_reg(struct e1000_hw *hw, u32 offset, u16 data);
-s32 e1000_null_set_page(struct e1000_hw *hw, u16 data);
-s32 e1000_read_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 *data);
-s32 e1000_write_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 data);
-s32 e1000_check_downshift_generic(struct e1000_hw *hw);
-s32 e1000_check_polarity_m88(struct e1000_hw *hw);
-s32 e1000_check_polarity_igp(struct e1000_hw *hw);
-s32 e1000_check_polarity_ife(struct e1000_hw *hw);
-s32 e1000_check_reset_block_generic(struct e1000_hw *hw);
-s32 e1000_copper_link_setup_igp(struct e1000_hw *hw);
-s32 e1000_copper_link_setup_m88(struct e1000_hw *hw);
-s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw);
-s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw);
-s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw);
-s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
-s32 e1000_get_cable_length_m88(struct e1000_hw *hw);
-s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw);
-s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw);
-s32 e1000_get_cfg_done_generic(struct e1000_hw *hw);
-s32 e1000_get_phy_id(struct e1000_hw *hw);
-s32 e1000_get_phy_info_igp(struct e1000_hw *hw);
-s32 e1000_get_phy_info_m88(struct e1000_hw *hw);
-s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
-s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw);
-void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
-s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw);
-s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw);
-s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data);
-s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data);
-s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page);
-s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
-s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data);
-s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
-s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active);
-s32 e1000_setup_copper_link_generic(struct e1000_hw *hw);
-s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data);
-s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data);
-s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
-s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data);
-s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
-s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
- u32 usec_interval, bool *success);
-s32 e1000_phy_init_script_igp3(struct e1000_hw *hw);
-enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id);
-s32 e1000_determine_phy_address(struct e1000_hw *hw);
-s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg);
-s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg);
-void e1000_power_up_phy_copper(struct e1000_hw *hw);
-void e1000_power_down_phy_copper(struct e1000_hw *hw);
-s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
-s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
-s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
-s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
-s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data);
-s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data);
-s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
-s32 e1000_check_polarity_82577(struct e1000_hw *hw);
-s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
-s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
-s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
-s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data);
-s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data);
-s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data);
-s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data,
- bool line_override);
-bool e1000_is_mphy_ready(struct e1000_hw *hw);
-
-#define E1000_MAX_PHY_ADDR 8
-
-/* IGP01E1000 Specific Registers */
-#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
-#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
-#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
-#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
-#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
-#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
-#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */
-#define IGP_PAGE_SHIFT 5
-#define PHY_REG_MASK 0x1F
-
-/* GS40G - I210 PHY defines */
-#define GS40G_PAGE_SELECT 0x16
-#define GS40G_PAGE_SHIFT 16
-#define GS40G_OFFSET_MASK 0xFFFF
-#define GS40G_PAGE_2 0x20000
-#define GS40G_MAC_REG2 0x15
-#define GS40G_MAC_LB 0x4140
-#define GS40G_MAC_SPEED_1G 0X0006
-#define GS40G_COPPER_SPEC 0x0010
-#define GS40G_CS_POWER_DOWN 0x0002
-
-#define HV_INTC_FC_PAGE_START 768
-#define I82578_ADDR_REG 29
-#define I82577_ADDR_REG 16
-#define I82577_CFG_REG 22
-#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
-#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */
-#define I82577_CTRL_REG 23
-
-/* 82577 specific PHY registers */
-#define I82577_PHY_CTRL_2 18
-#define I82577_PHY_LBK_CTRL 19
-#define I82577_PHY_STATUS_2 26
-#define I82577_PHY_DIAG_STATUS 31
-
-/* I82577 PHY Status 2 */
-#define I82577_PHY_STATUS2_REV_POLARITY 0x0400
-#define I82577_PHY_STATUS2_MDIX 0x0800
-#define I82577_PHY_STATUS2_SPEED_MASK 0x0300
-#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
-
-/* I82577 PHY Control 2 */
-#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200
-#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400
-#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600
-
-/* I82577 PHY Diagnostics Status */
-#define I82577_DSTATUS_CABLE_LENGTH 0x03FC
-#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2
-
-/* 82580 PHY Power Management */
-#define E1000_82580_PHY_POWER_MGMT 0xE14
-#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */
-#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */
-#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */
-#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */
-
-#define E1000_MPHY_DIS_ACCESS 0x80000000 /* disable_access bit */
-#define E1000_MPHY_ENA_ACCESS 0x40000000 /* enable_access bit */
-#define E1000_MPHY_BUSY 0x00010000 /* busy bit */
-#define E1000_MPHY_ADDRESS_FNC_OVERRIDE 0x20000000 /* fnc_override bit */
-#define E1000_MPHY_ADDRESS_MASK 0x0000FFFF /* address mask */
-
-#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
-#define IGP01E1000_PHY_POLARITY_MASK 0x0078
-
-#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
-#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
-
-#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
-
-#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
-#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
-#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
-
-#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
-
-#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
-#define IGP01E1000_PSSR_MDIX 0x0800
-#define IGP01E1000_PSSR_SPEED_MASK 0xC000
-#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
-
-#define IGP02E1000_PHY_CHANNEL_NUM 4
-#define IGP02E1000_PHY_AGC_A 0x11B1
-#define IGP02E1000_PHY_AGC_B 0x12B1
-#define IGP02E1000_PHY_AGC_C 0x14B1
-#define IGP02E1000_PHY_AGC_D 0x18B1
-
-#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course=15:13, Fine=12:9 */
-#define IGP02E1000_AGC_LENGTH_MASK 0x7F
-#define IGP02E1000_AGC_RANGE 15
-
-#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
-
-#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000
-#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
-#define E1000_KMRNCTRLSTA_REN 0x00200000
-#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
-#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
-#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
-#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */
-#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
-
-#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
-#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Ctrl */
-#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Ctrl */
-#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */
-
-/* IFE PHY Extended Status Control */
-#define IFE_PESC_POLARITY_REVERSED 0x0100
-
-/* IFE PHY Special Control */
-#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010
-#define IFE_PSC_FORCE_POLARITY 0x0020
-
-/* IFE PHY Special Control and LED Control */
-#define IFE_PSCL_PROBE_MODE 0x0020
-#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
-#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
-
-/* IFE PHY MDIX Control */
-#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
-#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */
-#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto, 0=disable */
-
-/* SFP modules ID memory locations */
-#define E1000_SFF_IDENTIFIER_OFFSET 0x00
-#define E1000_SFF_IDENTIFIER_SFF 0x02
-#define E1000_SFF_IDENTIFIER_SFP 0x03
-
-#define E1000_SFF_ETH_FLAGS_OFFSET 0x06
-/* Flags for SFP modules compatible with ETH up to 1Gb */
-struct sfp_e1000_flags {
- u8 e1000_base_sx:1;
- u8 e1000_base_lx:1;
- u8 e1000_base_cx:1;
- u8 e1000_base_t:1;
- u8 e100_base_lx:1;
- u8 e100_base_fx:1;
- u8 e10_base_bx10:1;
- u8 e10_base_px:1;
-};
-
-/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
-#define E1000_SFF_VENDOR_OUI_TYCO 0x00407600
-#define E1000_SFF_VENDOR_OUI_FTL 0x00906500
-#define E1000_SFF_VENDOR_OUI_AVAGO 0x00176A00
-#define E1000_SFF_VENDOR_OUI_INTEL 0x001B2100
-
-#endif
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_regs.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_regs.h
deleted file mode 100755
index 0e083c54..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_regs.h
+++ /dev/null
@@ -1,646 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#ifndef _E1000_REGS_H_
-#define _E1000_REGS_H_
-
-#define E1000_CTRL 0x00000 /* Device Control - RW */
-#define E1000_STATUS 0x00008 /* Device Status - RO */
-#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
-#define E1000_EERD 0x00014 /* EEPROM Read - RW */
-#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
-#define E1000_FLA 0x0001C /* Flash Access - RW */
-#define E1000_MDIC 0x00020 /* MDI Control - RW */
-#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */
-#define E1000_REGISTER_SET_SIZE 0x20000 /* CSR Size */
-#define E1000_EEPROM_INIT_CTRL_WORD_2 0x0F /* EEPROM Init Ctrl Word 2 */
-#define E1000_EEPROM_PCIE_CTRL_WORD_2 0x28 /* EEPROM PCIe Ctrl Word 2 */
-#define E1000_BARCTRL 0x5BBC /* BAR ctrl reg */
-#define E1000_BARCTRL_FLSIZE 0x0700 /* BAR ctrl Flsize */
-#define E1000_BARCTRL_CSRSIZE 0x2000 /* BAR ctrl CSR size */
-#define E1000_MPHY_ADDR_CTRL 0x0024 /* GbE MPHY Address Control */
-#define E1000_MPHY_DATA 0x0E10 /* GBE MPHY Data */
-#define E1000_MPHY_STAT 0x0E0C /* GBE MPHY Statistics */
-#define E1000_PPHY_CTRL 0x5b48 /* PCIe PHY Control */
-#define E1000_I350_BARCTRL 0x5BFC /* BAR ctrl reg */
-#define E1000_I350_DTXMXPKTSZ 0x355C /* Maximum sent packet size reg*/
-#define E1000_SCTL 0x00024 /* SerDes Control - RW */
-#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
-#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
-#define E1000_FCT 0x00030 /* Flow Control Type - RW */
-#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
-#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
-#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
-#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
-#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
-#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
-#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
-#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
-#define E1000_RCTL 0x00100 /* Rx Control - RW */
-#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
-#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */
-#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */
-#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */
-#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
-#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */
-#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */
-#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
-#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */
-#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
-#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */
-#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */
-#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
-#define E1000_TCTL 0x00400 /* Tx Control - RW */
-#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */
-#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */
-#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
-#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
-#define E1000_LEDMUX 0x08130 /* LED MUX Control */
-#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */
-#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */
-#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */
-#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
-#define E1000_PBS 0x01008 /* Packet Buffer Size */
-#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
-#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */
-#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
-#define E1000_FLOP 0x0103C /* FLASH Opcode Register */
-#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
-#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */
-#define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */
-#define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */
-#define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */
-#define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */
-#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */
-#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */
-#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */
-#define E1000_I2C_CLK_STRETCH_DIS 0x00008000 /* I2C- Dis Clk Stretching */
-#define E1000_WDSTP 0x01040 /* Watchdog Setup - RW */
-#define E1000_SWDSTS 0x01044 /* SW Device Status - RW */
-#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */
-#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */
-#define E1000_VPDDIAG 0x01060 /* VPD Diagnostic - RO */
-#define E1000_ICR_V2 0x01500 /* Intr Cause - new location - RC */
-#define E1000_ICS_V2 0x01504 /* Intr Cause Set - new location - WO */
-#define E1000_IMS_V2 0x01508 /* Intr Mask Set/Read - new location - RW */
-#define E1000_IMC_V2 0x0150C /* Intr Mask Clear - new location - WO */
-#define E1000_IAM_V2 0x01510 /* Intr Ack Auto Mask - new location - RW */
-#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */
-#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
-#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
-#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */
-#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
-#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
-#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
-#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
-#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
-#define E1000_PBRTH 0x02458 /* PB Rx Arbitration Threshold - RW */
-#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */
-/* Split and Replication Rx Control - RW */
-#define E1000_RDPUMB 0x025CC /* DMA Rx Descriptor uC Mailbox - RW */
-#define E1000_RDPUAD 0x025D0 /* DMA Rx Descriptor uC Addr Command - RW */
-#define E1000_RDPUWD 0x025D4 /* DMA Rx Descriptor uC Data Write - RW */
-#define E1000_RDPURD 0x025D8 /* DMA Rx Descriptor uC Data Read - RW */
-#define E1000_RDPUCTL 0x025DC /* DMA Rx Descriptor uC Control - RW */
-#define E1000_PBDIAG 0x02458 /* Packet Buffer Diagnostic - RW */
-#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
-#define E1000_IRPBS 0x02404 /* Same as RXPBS, renamed for newer Si - RW */
-#define E1000_PBRWAC 0x024E8 /* Rx packet buffer wrap around counter - RO */
-#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */
-#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */
-#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */
-#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */
-#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */
-#define E1000_I210_FLMNGCTL 0x12038
-#define E1000_I210_FLMNGDATA 0x1203C
-#define E1000_I210_FLMNGCNT 0x12040
-
-#define E1000_I210_FLSWCTL 0x12048
-#define E1000_I210_FLSWDATA 0x1204C
-#define E1000_I210_FLSWCNT 0x12050
-
-#define E1000_I210_FLA 0x1201C
-
-#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n))
-#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */
-
-/* QAV Tx mode control register */
-#define E1000_I210_TQAVCTRL 0x3570
-
-/* QAV Tx mode control register bitfields masks */
-/* QAV enable */
-#define E1000_TQAVCTRL_MODE (1 << 0)
-/* Fetching arbitration type */
-#define E1000_TQAVCTRL_FETCH_ARB (1 << 4)
-/* Fetching timer enable */
-#define E1000_TQAVCTRL_FETCH_TIMER_ENABLE (1 << 5)
-/* Launch arbitration type */
-#define E1000_TQAVCTRL_LAUNCH_ARB (1 << 8)
-/* Launch timer enable */
-#define E1000_TQAVCTRL_LAUNCH_TIMER_ENABLE (1 << 9)
-/* SP waits for SR enable */
-#define E1000_TQAVCTRL_SP_WAIT_SR (1 << 10)
-/* Fetching timer correction */
-#define E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET 16
-#define E1000_TQAVCTRL_FETCH_TIMER_DELTA \
- (0xFFFF << E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET)
-
-/* High credit registers where _n can be 0 or 1. */
-#define E1000_I210_TQAVHC(_n) (0x300C + 0x40 * (_n))
-
-/* Queues fetch arbitration priority control register */
-#define E1000_I210_TQAVARBCTRL 0x3574
-/* Queues priority masks where _n and _p can be 0-3. */
-#define E1000_TQAVARBCTRL_QUEUE_PRI(_n, _p) ((_p) << (2 * _n))
-/* QAV Tx mode control registers where _n can be 0 or 1. */
-#define E1000_I210_TQAVCC(_n) (0x3004 + 0x40 * (_n))
-
-/* QAV Tx mode control register bitfields masks */
-#define E1000_TQAVCC_IDLE_SLOPE 0xFFFF /* Idle slope */
-#define E1000_TQAVCC_KEEP_CREDITS (1 << 30) /* Keep credits opt enable */
-#define E1000_TQAVCC_QUEUE_MODE (1 << 31) /* SP vs. SR Tx mode */
-
-/* Good transmitted packets counter registers */
-#define E1000_PQGPTC(_n) (0x010014 + (0x100 * (_n)))
-
-/* Queues packet buffer size masks where _n can be 0-3 and _s 0-63 [kB] */
-#define E1000_I210_TXPBS_SIZE(_n, _s) ((_s) << (6 * _n))
-
-#define E1000_MMDAC 13 /* MMD Access Control */
-#define E1000_MMDAAD 14 /* MMD Access Address/Data */
-
-/* Convenience macros
- *
- * Note: "_n" is the queue number of the register to be written to.
- *
- * Example usage:
- * E1000_RDBAL_REG(current_rx_queue)
- */
-#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
- (0x0C000 + ((_n) * 0x40)))
-#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
- (0x0C004 + ((_n) * 0x40)))
-#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
- (0x0C008 + ((_n) * 0x40)))
-#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \
- (0x0C00C + ((_n) * 0x40)))
-#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
- (0x0C010 + ((_n) * 0x40)))
-#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \
- (0x0C014 + ((_n) * 0x40)))
-#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n)
-#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
- (0x0C018 + ((_n) * 0x40)))
-#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
- (0x0C028 + ((_n) * 0x40)))
-#define E1000_RQDPC(_n) ((_n) < 4 ? (0x02830 + ((_n) * 0x100)) : \
- (0x0C030 + ((_n) * 0x40)))
-#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
- (0x0E000 + ((_n) * 0x40)))
-#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
- (0x0E004 + ((_n) * 0x40)))
-#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
- (0x0E008 + ((_n) * 0x40)))
-#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
- (0x0E010 + ((_n) * 0x40)))
-#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \
- (0x0E014 + ((_n) * 0x40)))
-#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n)
-#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
- (0x0E018 + ((_n) * 0x40)))
-#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
- (0x0E028 + ((_n) * 0x40)))
-#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) : \
- (0x0E038 + ((_n) * 0x40)))
-#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) : \
- (0x0E03C + ((_n) * 0x40)))
-#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100))
-#define E1000_RSRPD 0x02C00 /* Rx Small Packet Detect - RW */
-#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */
-#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */
-#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
-#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
- (0x054E0 + ((_i - 16) * 8)))
-#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
- (0x054E4 + ((_i - 16) * 8)))
-#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8))
-#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8))
-#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
-#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
-#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
-#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8))
-#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8))
-#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8))
-#define E1000_PBSLAC 0x03100 /* Pkt Buffer Slave Access Control */
-#define E1000_PBSLAD(_n) (0x03110 + (0x4 * (_n))) /* Pkt Buffer DWORD */
-#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
-/* Same as TXPBS, renamed for newer Si - RW */
-#define E1000_ITPBS 0x03404
-#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
-#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
-#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
-#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
-#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
-#define E1000_TDPUMB 0x0357C /* DMA Tx Desc uC Mail Box - RW */
-#define E1000_TDPUAD 0x03580 /* DMA Tx Desc uC Addr Command - RW */
-#define E1000_TDPUWD 0x03584 /* DMA Tx Desc uC Data Write - RW */
-#define E1000_TDPURD 0x03588 /* DMA Tx Desc uC Data Read - RW */
-#define E1000_TDPUCTL 0x0358C /* DMA Tx Desc uC Control - RW */
-#define E1000_DTXCTL 0x03590 /* DMA Tx Control - RW */
-#define E1000_DTXTCPFLGL 0x0359C /* DMA Tx Control flag low - RW */
-#define E1000_DTXTCPFLGH 0x035A0 /* DMA Tx Control flag high - RW */
-/* DMA Tx Max Total Allow Size Reqs - RW */
-#define E1000_DTXMXSZRQ 0x03540
-#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */
-#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */
-#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
-#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
-#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
-#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */
-#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */
-#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */
-#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */
-#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */
-#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */
-#define E1000_COLC 0x04028 /* Collision Count - R/clr */
-#define E1000_DC 0x04030 /* Defer Count - R/clr */
-#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */
-#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */
-#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
-#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */
-#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */
-#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */
-#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */
-#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */
-#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */
-#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */
-#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */
-#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */
-#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */
-#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */
-#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */
-#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */
-#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */
-#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */
-#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */
-#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */
-#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */
-#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */
-#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */
-#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */
-#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */
-#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */
-#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */
-#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */
-#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */
-#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
-#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */
-#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */
-#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */
-#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */
-#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */
-#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */
-#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */
-#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */
-#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */
-#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */
-#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */
-#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */
-#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */
-#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */
-#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */
-#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */
-#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */
-#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
-#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */
-#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */
-#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */
-#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */
-#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */
-#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */
-#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */
-#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
-
-/* Virtualization statistical counters */
-#define E1000_PFVFGPRC(_n) (0x010010 + (0x100 * (_n)))
-#define E1000_PFVFGPTC(_n) (0x010014 + (0x100 * (_n)))
-#define E1000_PFVFGORC(_n) (0x010018 + (0x100 * (_n)))
-#define E1000_PFVFGOTC(_n) (0x010034 + (0x100 * (_n)))
-#define E1000_PFVFMPRC(_n) (0x010038 + (0x100 * (_n)))
-#define E1000_PFVFGPRLBC(_n) (0x010040 + (0x100 * (_n)))
-#define E1000_PFVFGPTLBC(_n) (0x010044 + (0x100 * (_n)))
-#define E1000_PFVFGORLBC(_n) (0x010048 + (0x100 * (_n)))
-#define E1000_PFVFGOTLBC(_n) (0x010050 + (0x100 * (_n)))
-
-/* LinkSec */
-#define E1000_LSECTXUT 0x04300 /* Tx Untagged Pkt Cnt */
-#define E1000_LSECTXPKTE 0x04304 /* Encrypted Tx Pkts Cnt */
-#define E1000_LSECTXPKTP 0x04308 /* Protected Tx Pkt Cnt */
-#define E1000_LSECTXOCTE 0x0430C /* Encrypted Tx Octets Cnt */
-#define E1000_LSECTXOCTP 0x04310 /* Protected Tx Octets Cnt */
-#define E1000_LSECRXUT 0x04314 /* Untagged non-Strict Rx Pkt Cnt */
-#define E1000_LSECRXOCTD 0x0431C /* Rx Octets Decrypted Count */
-#define E1000_LSECRXOCTV 0x04320 /* Rx Octets Validated */
-#define E1000_LSECRXBAD 0x04324 /* Rx Bad Tag */
-#define E1000_LSECRXNOSCI 0x04328 /* Rx Packet No SCI Count */
-#define E1000_LSECRXUNSCI 0x0432C /* Rx Packet Unknown SCI Count */
-#define E1000_LSECRXUNCH 0x04330 /* Rx Unchecked Packets Count */
-#define E1000_LSECRXDELAY 0x04340 /* Rx Delayed Packet Count */
-#define E1000_LSECRXLATE 0x04350 /* Rx Late Packets Count */
-#define E1000_LSECRXOK(_n) (0x04360 + (0x04 * (_n))) /* Rx Pkt OK Cnt */
-#define E1000_LSECRXINV(_n) (0x04380 + (0x04 * (_n))) /* Rx Invalid Cnt */
-#define E1000_LSECRXNV(_n) (0x043A0 + (0x04 * (_n))) /* Rx Not Valid Cnt */
-#define E1000_LSECRXUNSA 0x043C0 /* Rx Unused SA Count */
-#define E1000_LSECRXNUSA 0x043D0 /* Rx Not Using SA Count */
-#define E1000_LSECTXCAP 0x0B000 /* Tx Capabilities Register - RO */
-#define E1000_LSECRXCAP 0x0B300 /* Rx Capabilities Register - RO */
-#define E1000_LSECTXCTRL 0x0B004 /* Tx Control - RW */
-#define E1000_LSECRXCTRL 0x0B304 /* Rx Control - RW */
-#define E1000_LSECTXSCL 0x0B008 /* Tx SCI Low - RW */
-#define E1000_LSECTXSCH 0x0B00C /* Tx SCI High - RW */
-#define E1000_LSECTXSA 0x0B010 /* Tx SA0 - RW */
-#define E1000_LSECTXPN0 0x0B018 /* Tx SA PN 0 - RW */
-#define E1000_LSECTXPN1 0x0B01C /* Tx SA PN 1 - RW */
-#define E1000_LSECRXSCL 0x0B3D0 /* Rx SCI Low - RW */
-#define E1000_LSECRXSCH 0x0B3E0 /* Rx SCI High - RW */
-/* LinkSec Tx 128-bit Key 0 - WO */
-#define E1000_LSECTXKEY0(_n) (0x0B020 + (0x04 * (_n)))
-/* LinkSec Tx 128-bit Key 1 - WO */
-#define E1000_LSECTXKEY1(_n) (0x0B030 + (0x04 * (_n)))
-#define E1000_LSECRXSA(_n) (0x0B310 + (0x04 * (_n))) /* Rx SAs - RW */
-#define E1000_LSECRXPN(_n) (0x0B330 + (0x04 * (_n))) /* Rx SAs - RW */
-/* LinkSec Rx Keys - where _n is the SA no. and _m the 4 dwords of the 128 bit
- * key - RW.
- */
-#define E1000_LSECRXKEY(_n, _m) (0x0B350 + (0x10 * (_n)) + (0x04 * (_m)))
-
-#define E1000_SSVPC 0x041A0 /* Switch Security Violation Pkt Cnt */
-#define E1000_IPSCTRL 0xB430 /* IpSec Control Register */
-#define E1000_IPSRXCMD 0x0B408 /* IPSec Rx Command Register - RW */
-#define E1000_IPSRXIDX 0x0B400 /* IPSec Rx Index - RW */
-/* IPSec Rx IPv4/v6 Address - RW */
-#define E1000_IPSRXIPADDR(_n) (0x0B420 + (0x04 * (_n)))
-/* IPSec Rx 128-bit Key - RW */
-#define E1000_IPSRXKEY(_n) (0x0B410 + (0x04 * (_n)))
-#define E1000_IPSRXSALT 0x0B404 /* IPSec Rx Salt - RW */
-#define E1000_IPSRXSPI 0x0B40C /* IPSec Rx SPI - RW */
-/* IPSec Tx 128-bit Key - RW */
-#define E1000_IPSTXKEY(_n) (0x0B460 + (0x04 * (_n)))
-#define E1000_IPSTXSALT 0x0B454 /* IPSec Tx Salt - RW */
-#define E1000_IPSTXIDX 0x0B450 /* IPSec Tx SA IDX - RW */
-#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */
-#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */
-#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */
-#define E1000_CBTMPC 0x0402C /* Circuit Breaker Tx Packet Count */
-#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */
-#define E1000_CBRDPC 0x04044 /* Circuit Breaker Rx Dropped Count */
-#define E1000_CBRMPC 0x040FC /* Circuit Breaker Rx Packet Count */
-#define E1000_RPTHC 0x04104 /* Rx Packets To Host */
-#define E1000_HGPTC 0x04118 /* Host Good Packets Tx Count */
-#define E1000_HTCBDPC 0x04124 /* Host Tx Circuit Breaker Dropped Count */
-#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */
-#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */
-#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */
-#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */
-#define E1000_LENERRS 0x04138 /* Length Errors Count */
-#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */
-#define E1000_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */
-#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */
-#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */
-#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */
-#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Pg - RW */
-#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */
-#define E1000_RLPML 0x05004 /* Rx Long Packet Max Length */
-#define E1000_RFCTL 0x05008 /* Receive Filter Control*/
-#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
-#define E1000_RA 0x05400 /* Receive Address - RW Array */
-#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */
-#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
-#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */
-#define E1000_CIAA 0x05B88 /* Config Indirect Access Address - RW */
-#define E1000_CIAD 0x05B8C /* Config Indirect Access Data - RW */
-#define E1000_VFQA0 0x0B000 /* VLAN Filter Queue Array 0 - RW Array */
-#define E1000_VFQA1 0x0B200 /* VLAN Filter Queue Array 1 - RW Array */
-#define E1000_WUC 0x05800 /* Wakeup Control - RW */
-#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
-#define E1000_WUS 0x05810 /* Wakeup Status - RO */
-#define E1000_MANC 0x05820 /* Management Control - RW */
-#define E1000_IPAV 0x05838 /* IP Address Valid - RW */
-#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */
-#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */
-#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */
-#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */
-#define E1000_PBACL 0x05B68 /* MSIx PBA Clear - Read/Write 1's to clear */
-#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */
-#define E1000_HOST_IF 0x08800 /* Host Interface */
-#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */
-#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */
-#define E1000_HIBBA 0x8F40 /* Host Interface Buffer Base Address */
-/* Flexible Host Filter Table */
-#define E1000_FHFT(_n) (0x09000 + ((_n) * 0x100))
-/* Ext Flexible Host Filter Table */
-#define E1000_FHFT_EXT(_n) (0x09A00 + ((_n) * 0x100))
-
-
-#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */
-#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */
-/* Management Decision Filters */
-#define E1000_MDEF(_n) (0x05890 + (4 * (_n)))
-#define E1000_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */
-#define E1000_CCMCTL 0x05B48 /* CCM Control Register */
-#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */
-#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */
-#define E1000_GCR 0x05B00 /* PCI-Ex Control */
-#define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */
-#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */
-#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */
-#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */
-#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */
-#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
-#define E1000_SWSM 0x05B50 /* SW Semaphore */
-#define E1000_FWSM 0x05B54 /* FW Semaphore */
-/* Driver-only SW semaphore (not used by BOOT agents) */
-#define E1000_SWSM2 0x05B58
-#define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */
-#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */
-#define E1000_UFUSE 0x05B78 /* UFUSE - RO */
-#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
-#define E1000_HICR 0x08F00 /* Host Interface Control */
-#define E1000_FWSTS 0x08F0C /* FW Status */
-
-/* RSS registers */
-#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */
-#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
-#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */
-#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/
-#define E1000_IMIRVP 0x05AC0 /* Immediate INT Rx VLAN Priority -RW */
-#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) /* MSI-X Alloc Reg -RW */
-#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */
-#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */
-#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */
-#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */
-/* VT Registers */
-#define E1000_SWPBS 0x03004 /* Switch Packet Buffer Size - RW */
-#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */
-#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */
-#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */
-#define E1000_VFRE 0x00C8C /* VF Receive Enables */
-#define E1000_VFTE 0x00C90 /* VF Transmit Enables */
-#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */
-#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */
-#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */
-#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */
-#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */
-#define E1000_IOVTCL 0x05BBC /* IOV Control Register */
-#define E1000_VMRCTL 0X05D80 /* Virtual Mirror Rule Control */
-#define E1000_VMRVLAN 0x05D90 /* Virtual Mirror Rule VLAN */
-#define E1000_VMRVM 0x05DA0 /* Virtual Mirror Rule VM */
-#define E1000_MDFB 0x03558 /* Malicious Driver free block */
-#define E1000_LVMMC 0x03548 /* Last VM Misbehavior cause */
-#define E1000_TXSWC 0x05ACC /* Tx Switch Control */
-#define E1000_SCCRL 0x05DB0 /* Storm Control Control */
-#define E1000_BSCTRH 0x05DB8 /* Broadcast Storm Control Threshold */
-#define E1000_MSCTRH 0x05DBC /* Multicast Storm Control Threshold */
-/* These act per VF so an array friendly macro is used */
-#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n)))
-#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n)))
-#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
-#define E1000_VFVMBMEM(_n) (0x00800 + (_n))
-#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
-/* VLAN Virtual Machine Filter - RW */
-#define E1000_VLVF(_n) (0x05D00 + (4 * (_n)))
-#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
-#define E1000_DVMOLR(_n) (0x0C038 + (0x40 * (_n))) /* DMA VM offload */
-#define E1000_VTCTRL(_n) (0x10000 + (0x100 * (_n))) /* VT Control */
-#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
-#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
-#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
-#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */
-#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */
-#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */
-#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */
-#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
-#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
-#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */
-#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
-#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
-#define E1000_TIMADJL 0x0B60C /* Time sync time adjustment offset Low - RW */
-#define E1000_TIMADJH 0x0B610 /* Time sync time adjustment offset High - RW */
-#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
-#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */
-#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */
-#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */
-
-/* Filtering Registers */
-#define E1000_SAQF(_n) (0x05980 + (4 * (_n))) /* Source Address Queue Fltr */
-#define E1000_DAQF(_n) (0x059A0 + (4 * (_n))) /* Dest Address Queue Fltr */
-#define E1000_SPQF(_n) (0x059C0 + (4 * (_n))) /* Source Port Queue Fltr */
-#define E1000_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */
-#define E1000_TTQF(_n) (0x059E0 + (4 * (_n))) /* 2-tuple Queue Fltr */
-#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */
-#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
-
-#define E1000_RTTDCS 0x3600 /* Reedtown Tx Desc plane control and status */
-#define E1000_RTTPCS 0x3474 /* Reedtown Tx Packet Plane control and status */
-#define E1000_RTRPCS 0x2474 /* Rx packet plane control and status */
-#define E1000_RTRUP2TC 0x05AC4 /* Rx User Priority to Traffic Class */
-#define E1000_RTTUP2TC 0x0418 /* Transmit User Priority to Traffic Class */
-/* Tx Desc plane TC Rate-scheduler config */
-#define E1000_RTTDTCRC(_n) (0x3610 + ((_n) * 4))
-/* Tx Packet plane TC Rate-Scheduler Config */
-#define E1000_RTTPTCRC(_n) (0x3480 + ((_n) * 4))
-/* Rx Packet plane TC Rate-Scheduler Config */
-#define E1000_RTRPTCRC(_n) (0x2480 + ((_n) * 4))
-/* Tx Desc Plane TC Rate-Scheduler Status */
-#define E1000_RTTDTCRS(_n) (0x3630 + ((_n) * 4))
-/* Tx Desc Plane TC Rate-Scheduler MMW */
-#define E1000_RTTDTCRM(_n) (0x3650 + ((_n) * 4))
-/* Tx Packet plane TC Rate-Scheduler Status */
-#define E1000_RTTPTCRS(_n) (0x34A0 + ((_n) * 4))
-/* Tx Packet plane TC Rate-scheduler MMW */
-#define E1000_RTTPTCRM(_n) (0x34C0 + ((_n) * 4))
-/* Rx Packet plane TC Rate-Scheduler Status */
-#define E1000_RTRPTCRS(_n) (0x24A0 + ((_n) * 4))
-/* Rx Packet plane TC Rate-Scheduler MMW */
-#define E1000_RTRPTCRM(_n) (0x24C0 + ((_n) * 4))
-/* Tx Desc plane VM Rate-Scheduler MMW*/
-#define E1000_RTTDVMRM(_n) (0x3670 + ((_n) * 4))
-/* Tx BCN Rate-Scheduler MMW */
-#define E1000_RTTBCNRM(_n) (0x3690 + ((_n) * 4))
-#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select */
-#define E1000_RTTDVMRC 0x3608 /* Tx Desc Plane VM Rate-Scheduler Config */
-#define E1000_RTTDVMRS 0x360C /* Tx Desc Plane VM Rate-Scheduler Status */
-#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config */
-#define E1000_RTTBCNRS 0x36B4 /* Tx BCN Rate-Scheduler Status */
-#define E1000_RTTBCNCR 0xB200 /* Tx BCN Control Register */
-#define E1000_RTTBCNTG 0x35A4 /* Tx BCN Tagging */
-#define E1000_RTTBCNCP 0xB208 /* Tx BCN Congestion point */
-#define E1000_RTRBCNCR 0xB20C /* Rx BCN Control Register */
-#define E1000_RTTBCNRD 0x36B8 /* Tx BCN Rate Drift */
-#define E1000_PFCTOP 0x1080 /* Priority Flow Control Type and Opcode */
-#define E1000_RTTBCNIDX 0xB204 /* Tx BCN Congestion Point */
-#define E1000_RTTBCNACH 0x0B214 /* Tx BCN Control High */
-#define E1000_RTTBCNACL 0x0B210 /* Tx BCN Control Low */
-
-/* DMA Coalescing registers */
-#define E1000_DMACR 0x02508 /* Control Register */
-#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */
-#define E1000_DMCTLX 0x02514 /* Time to Lx Request */
-#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */
-#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */
-#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */
-#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
-
-/* PCIe Parity Status Register */
-#define E1000_PCIEERRSTS 0x05BA8
-
-#define E1000_PROXYS 0x5F64 /* Proxying Status */
-#define E1000_PROXYFC 0x5F60 /* Proxying Filter Control */
-/* Thermal sensor configuration and status registers */
-#define E1000_THMJT 0x08100 /* Junction Temperature */
-#define E1000_THLOWTC 0x08104 /* Low Threshold Control */
-#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */
-#define E1000_THHIGHTC 0x0810C /* High Threshold Control */
-#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */
-
-/* Energy Efficient Ethernet "EEE" registers */
-#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */
-#define E1000_LTRC 0x01A0 /* Latency Tolerance Reporting Control */
-#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/
-#define E1000_EEE_SU 0x0E34 /* EEE Setup */
-#define E1000_TLPIC 0x4148 /* EEE Tx LPI Count - TLPIC */
-#define E1000_RLPIC 0x414C /* EEE Rx LPI Count - RLPIC */
-
-/* OS2BMC Registers */
-#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */
-#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */
-#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */
-#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */
-
-
-
-#endif
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb.h
deleted file mode 100755
index a582f52e..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb.h
+++ /dev/null
@@ -1,859 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-/* Linux PRO/1000 Ethernet Driver main header file */
-
-#ifndef _IGB_H_
-#define _IGB_H_
-
-#include <linux/kobject.h>
-
-#ifndef IGB_NO_LRO
-#include <net/tcp.h>
-#endif
-
-#undef HAVE_HW_TIME_STAMP
-#ifdef HAVE_HW_TIME_STAMP
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/vmalloc.h>
-
-#endif
-#ifdef SIOCETHTOOL
-#include <linux/ethtool.h>
-#endif
-
-struct igb_adapter;
-
-#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
-//#define IGB_DCA
-#endif
-#ifdef IGB_DCA
-#include <linux/dca.h>
-#endif
-
-#include "kcompat.h"
-
-#ifdef HAVE_SCTP
-#include <linux/sctp.h>
-#endif
-
-#include "e1000_api.h"
-#include "e1000_82575.h"
-#include "e1000_manage.h"
-#include "e1000_mbx.h"
-
-#define IGB_ERR(args...) printk(KERN_ERR "igb: " args)
-
-#define PFX "igb: "
-#define DPRINTK(nlevel, klevel, fmt, args...) \
- (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
- printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
- __FUNCTION__ , ## args))
-
-#ifdef HAVE_PTP_1588_CLOCK
-#include <linux/clocksource.h>
-#include <linux/net_tstamp.h>
-#include <linux/ptp_clock_kernel.h>
-#endif /* HAVE_PTP_1588_CLOCK */
-
-#ifdef HAVE_I2C_SUPPORT
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
-#endif /* HAVE_I2C_SUPPORT */
-
-/* Interrupt defines */
-#define IGB_START_ITR 648 /* ~6000 ints/sec */
-#define IGB_4K_ITR 980
-#define IGB_20K_ITR 196
-#define IGB_70K_ITR 56
-
-/* Interrupt modes, as used by the IntMode parameter */
-#define IGB_INT_MODE_LEGACY 0
-#define IGB_INT_MODE_MSI 1
-#define IGB_INT_MODE_MSIX 2
-
-/* TX/RX descriptor defines */
-#define IGB_DEFAULT_TXD 256
-#define IGB_DEFAULT_TX_WORK 128
-#define IGB_MIN_TXD 80
-#define IGB_MAX_TXD 4096
-
-#define IGB_DEFAULT_RXD 256
-#define IGB_MIN_RXD 80
-#define IGB_MAX_RXD 4096
-
-#define IGB_MIN_ITR_USECS 10 /* 100k irq/sec */
-#define IGB_MAX_ITR_USECS 8191 /* 120 irq/sec */
-
-#define NON_Q_VECTORS 1
-#define MAX_Q_VECTORS 10
-
-/* Transmit and receive queues */
-#define IGB_MAX_RX_QUEUES 16
-#define IGB_MAX_TX_QUEUES 16
-
-#define IGB_MAX_VF_MC_ENTRIES 30
-#define IGB_MAX_VF_FUNCTIONS 8
-#define IGB_82576_VF_DEV_ID 0x10CA
-#define IGB_I350_VF_DEV_ID 0x1520
-#define IGB_MAX_UTA_ENTRIES 128
-#define MAX_EMULATION_MAC_ADDRS 16
-#define OUI_LEN 3
-#define IGB_MAX_VMDQ_QUEUES 8
-
-
-struct vf_data_storage {
- unsigned char vf_mac_addresses[ETH_ALEN];
- u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
- u16 num_vf_mc_hashes;
- u16 default_vf_vlan_id;
- u16 vlans_enabled;
- unsigned char em_mac_addresses[MAX_EMULATION_MAC_ADDRS * ETH_ALEN];
- u32 uta_table_copy[IGB_MAX_UTA_ENTRIES];
- u32 flags;
- unsigned long last_nack;
-#ifdef IFLA_VF_MAX
- u16 pf_vlan; /* When set, guest VLAN config not allowed. */
- u16 pf_qos;
- u16 tx_rate;
-#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
- bool spoofchk_enabled;
-#endif
-#endif
-};
-
-#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
-#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */
-#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */
-#define IGB_VF_FLAG_PF_SET_MAC 0x00000008 /* PF has set MAC address */
-
-/* RX descriptor control thresholds.
- * PTHRESH - MAC will consider prefetch if it has fewer than this number of
- * descriptors available in its onboard memory.
- * Setting this to 0 disables RX descriptor prefetch.
- * HTHRESH - MAC will only prefetch if there are at least this many descriptors
- * available in host memory.
- * If PTHRESH is 0, this should also be 0.
- * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
- * descriptors until either it has this many to write back, or the
- * ITR timer expires.
- */
-#define IGB_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8)
-#define IGB_RX_HTHRESH 8
-#define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8)
-#define IGB_TX_HTHRESH 1
-#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \
- adapter->msix_entries) ? 1 : 4)
-
-/* this is the size past which hardware will drop packets when setting LPE=0 */
-#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
-
-/* NOTE: netdev_alloc_skb reserves 16 bytes, NET_IP_ALIGN means we
- * reserve 2 more, and skb_shared_info adds an additional 384 more,
- * this adds roughly 448 bytes of extra data meaning the smallest
- * allocation we could have is 1K.
- * i.e. RXBUFFER_512 --> size-1024 slab
- */
-/* Supported Rx Buffer Sizes */
-#define IGB_RXBUFFER_256 256
-#define IGB_RXBUFFER_2048 2048
-#define IGB_RXBUFFER_16384 16384
-#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
-#if MAX_SKB_FRAGS < 8
-#define IGB_RX_BUFSZ ALIGN(MAX_JUMBO_FRAME_SIZE / MAX_SKB_FRAGS, 1024)
-#else
-#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
-#endif
-
-
-/* Packet Buffer allocations */
-#define IGB_PBA_BYTES_SHIFT 0xA
-#define IGB_TX_HEAD_ADDR_SHIFT 7
-#define IGB_PBA_TX_MASK 0xFFFF0000
-
-#define IGB_FC_PAUSE_TIME 0x0680 /* 858 usec */
-
-/* How many Rx Buffers do we bundle into one write to the hardware ? */
-#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
-
-#define IGB_EEPROM_APME 0x0400
-#define AUTO_ALL_MODES 0
-
-#ifndef IGB_MASTER_SLAVE
-/* Switch to override PHY master/slave setting */
-#define IGB_MASTER_SLAVE e1000_ms_hw_default
-#endif
-
-#define IGB_MNG_VLAN_NONE -1
-
-#ifndef IGB_NO_LRO
-#define IGB_LRO_MAX 32 /*Maximum number of LRO descriptors*/
-struct igb_lro_stats {
- u32 flushed;
- u32 coal;
-};
-
-/*
- * igb_lro_header - header format to be aggregated by LRO
- * @iph: IP header without options
- * @tcp: TCP header
- * @ts: Optional TCP timestamp data in TCP options
- *
- * This structure relies on the check above that verifies that the header
- * is IPv4 and does not contain any options.
- */
-struct igb_lrohdr {
- struct iphdr iph;
- struct tcphdr th;
- __be32 ts[0];
-};
-
-struct igb_lro_list {
- struct sk_buff_head active;
- struct igb_lro_stats stats;
-};
-
-#endif /* IGB_NO_LRO */
-struct igb_cb {
-#ifndef IGB_NO_LRO
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
- union { /* Union defining head/tail partner */
- struct sk_buff *head;
- struct sk_buff *tail;
- };
-#endif
- __be32 tsecr; /* timestamp echo response */
- u32 tsval; /* timestamp value in host order */
- u32 next_seq; /* next expected sequence number */
- u16 free; /* 65521 minus total size */
- u16 mss; /* size of data portion of packet */
- u16 append_cnt; /* number of skb's appended */
-#endif /* IGB_NO_LRO */
-#ifdef HAVE_VLAN_RX_REGISTER
- u16 vid; /* VLAN tag */
-#endif
-};
-#define IGB_CB(skb) ((struct igb_cb *)(skb)->cb)
-
-enum igb_tx_flags {
- /* cmd_type flags */
- IGB_TX_FLAGS_VLAN = 0x01,
- IGB_TX_FLAGS_TSO = 0x02,
- IGB_TX_FLAGS_TSTAMP = 0x04,
-
- /* olinfo flags */
- IGB_TX_FLAGS_IPV4 = 0x10,
- IGB_TX_FLAGS_CSUM = 0x20,
-};
-
-/* VLAN info */
-#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
-#define IGB_TX_FLAGS_VLAN_SHIFT 16
-
-/*
- * The largest size we can write to the descriptor is 65535. In order to
- * maintain a power of two alignment we have to limit ourselves to 32K.
- */
-#define IGB_MAX_TXD_PWR 15
-#define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR)
-
-/* Tx Descriptors needed, worst case */
-#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
-#ifndef MAX_SKB_FRAGS
-#define DESC_NEEDED 4
-#elif (MAX_SKB_FRAGS < 16)
-#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
-#else
-#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
-#endif
-
-/* wrapper around a pointer to a socket buffer,
- * so a DMA handle can be stored along with the buffer */
-struct igb_tx_buffer {
- union e1000_adv_tx_desc *next_to_watch;
- unsigned long time_stamp;
- struct sk_buff *skb;
- unsigned int bytecount;
- u16 gso_segs;
- __be16 protocol;
- DEFINE_DMA_UNMAP_ADDR(dma);
- DEFINE_DMA_UNMAP_LEN(len);
- u32 tx_flags;
-};
-
-struct igb_rx_buffer {
- dma_addr_t dma;
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
- struct sk_buff *skb;
-#else
- struct page *page;
- u32 page_offset;
-#endif
-};
-
-struct igb_tx_queue_stats {
- u64 packets;
- u64 bytes;
- u64 restart_queue;
-};
-
-struct igb_rx_queue_stats {
- u64 packets;
- u64 bytes;
- u64 drops;
- u64 csum_err;
- u64 alloc_failed;
- u64 ipv4_packets; /* IPv4 headers processed */
- u64 ipv4e_packets; /* IPv4E headers with extensions processed */
- u64 ipv6_packets; /* IPv6 headers processed */
- u64 ipv6e_packets; /* IPv6E headers with extensions processed */
- u64 tcp_packets; /* TCP headers processed */
- u64 udp_packets; /* UDP headers processed */
- u64 sctp_packets; /* SCTP headers processed */
- u64 nfs_packets; /* NFS headers processe */
-};
-
-struct igb_ring_container {
- struct igb_ring *ring; /* pointer to linked list of rings */
- unsigned int total_bytes; /* total bytes processed this int */
- unsigned int total_packets; /* total packets processed this int */
- u16 work_limit; /* total work allowed per interrupt */
- u8 count; /* total number of rings in vector */
- u8 itr; /* current ITR setting for ring */
-};
-
-struct igb_ring {
- struct igb_q_vector *q_vector; /* backlink to q_vector */
- struct net_device *netdev; /* back pointer to net_device */
- struct device *dev; /* device for dma mapping */
- union { /* array of buffer info structs */
- struct igb_tx_buffer *tx_buffer_info;
- struct igb_rx_buffer *rx_buffer_info;
- };
-#ifdef HAVE_PTP_1588_CLOCK
- unsigned long last_rx_timestamp;
-#endif /* HAVE_PTP_1588_CLOCK */
- void *desc; /* descriptor ring memory */
- unsigned long flags; /* ring specific flags */
- void __iomem *tail; /* pointer to ring tail register */
- dma_addr_t dma; /* phys address of the ring */
- unsigned int size; /* length of desc. ring in bytes */
-
- u16 count; /* number of desc. in the ring */
- u8 queue_index; /* logical index of the ring*/
- u8 reg_idx; /* physical index of the ring */
-
- /* everything past this point are written often */
- u16 next_to_clean;
- u16 next_to_use;
- u16 next_to_alloc;
-
- union {
- /* TX */
- struct {
- struct igb_tx_queue_stats tx_stats;
- };
- /* RX */
- struct {
- struct igb_rx_queue_stats rx_stats;
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
- u16 rx_buffer_len;
-#else
- struct sk_buff *skb;
-#endif
- };
- };
-#ifdef CONFIG_IGB_VMDQ_NETDEV
- struct net_device *vmdq_netdev;
- int vqueue_index; /* queue index for virtual netdev */
-#endif
-} ____cacheline_internodealigned_in_smp;
-
-struct igb_q_vector {
- struct igb_adapter *adapter; /* backlink */
- int cpu; /* CPU for DCA */
- u32 eims_value; /* EIMS mask value */
-
- u16 itr_val;
- u8 set_itr;
- void __iomem *itr_register;
-
- struct igb_ring_container rx, tx;
-
- struct napi_struct napi;
-#ifndef IGB_NO_LRO
- struct igb_lro_list lrolist; /* LRO list for queue vector*/
-#endif
- char name[IFNAMSIZ + 9];
-#ifndef HAVE_NETDEV_NAPI_LIST
- struct net_device poll_dev;
-#endif
-
- /* for dynamic allocation of rings associated with this q_vector */
- struct igb_ring ring[0] ____cacheline_internodealigned_in_smp;
-};
-
-enum e1000_ring_flags_t {
-#ifndef HAVE_NDO_SET_FEATURES
- IGB_RING_FLAG_RX_CSUM,
-#endif
- IGB_RING_FLAG_RX_SCTP_CSUM,
- IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
- IGB_RING_FLAG_TX_CTX_IDX,
- IGB_RING_FLAG_TX_DETECT_HANG,
-};
-
-struct igb_mac_addr {
- u8 addr[ETH_ALEN];
- u16 queue;
- u16 state; /* bitmask */
-};
-#define IGB_MAC_STATE_DEFAULT 0x1
-#define IGB_MAC_STATE_MODIFIED 0x2
-#define IGB_MAC_STATE_IN_USE 0x4
-
-#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
-
-#define IGB_RX_DESC(R, i) \
- (&(((union e1000_adv_rx_desc *)((R)->desc))[i]))
-#define IGB_TX_DESC(R, i) \
- (&(((union e1000_adv_tx_desc *)((R)->desc))[i]))
-#define IGB_TX_CTXTDESC(R, i) \
- (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i]))
-
-#ifdef CONFIG_IGB_VMDQ_NETDEV
-#define netdev_ring(ring) \
- ((ring->vmdq_netdev ? ring->vmdq_netdev : ring->netdev))
-#define ring_queue_index(ring) \
- ((ring->vmdq_netdev ? ring->vqueue_index : ring->queue_index))
-#else
-#define netdev_ring(ring) (ring->netdev)
-#define ring_queue_index(ring) (ring->queue_index)
-#endif /* CONFIG_IGB_VMDQ_NETDEV */
-
-/* igb_test_staterr - tests bits within Rx descriptor status and error fields */
-static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc,
- const u32 stat_err_bits)
-{
- return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
-}
-
-/* igb_desc_unused - calculate if we have unused descriptors */
-static inline u16 igb_desc_unused(const struct igb_ring *ring)
-{
- u16 ntc = ring->next_to_clean;
- u16 ntu = ring->next_to_use;
-
- return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
-}
-
-#ifdef CONFIG_BQL
-static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring)
-{
- return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
-}
-#endif /* CONFIG_BQL */
-
-// #ifdef EXT_THERMAL_SENSOR_SUPPORT
-// #ifdef IGB_PROCFS
-struct igb_therm_proc_data
-{
- struct e1000_hw *hw;
- struct e1000_thermal_diode_data *sensor_data;
-};
-
-// #endif /* IGB_PROCFS */
-// #endif /* EXT_THERMAL_SENSOR_SUPPORT */
-
-#ifdef IGB_HWMON
-#define IGB_HWMON_TYPE_LOC 0
-#define IGB_HWMON_TYPE_TEMP 1
-#define IGB_HWMON_TYPE_CAUTION 2
-#define IGB_HWMON_TYPE_MAX 3
-
-struct hwmon_attr {
- struct device_attribute dev_attr;
- struct e1000_hw *hw;
- struct e1000_thermal_diode_data *sensor;
- char name[12];
- };
-
-struct hwmon_buff {
- struct device *device;
- struct hwmon_attr *hwmon_list;
- unsigned int n_hwmon;
- };
-#endif /* IGB_HWMON */
-
-/* board specific private data structure */
-struct igb_adapter {
-#ifdef HAVE_VLAN_RX_REGISTER
- /* vlgrp must be first member of structure */
- struct vlan_group *vlgrp;
-#else
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
-#endif
- struct net_device *netdev;
-
- unsigned long state;
- unsigned int flags;
-
- unsigned int num_q_vectors;
- struct msix_entry *msix_entries;
-
-
- /* TX */
- u16 tx_work_limit;
- u32 tx_timeout_count;
- int num_tx_queues;
- struct igb_ring *tx_ring[IGB_MAX_TX_QUEUES];
-
- /* RX */
- int num_rx_queues;
- struct igb_ring *rx_ring[IGB_MAX_RX_QUEUES];
-
- struct timer_list watchdog_timer;
- struct timer_list dma_err_timer;
- struct timer_list phy_info_timer;
- u16 mng_vlan_id;
- u32 bd_number;
- u32 wol;
- u32 en_mng_pt;
- u16 link_speed;
- u16 link_duplex;
- u8 port_num;
-
- /* Interrupt Throttle Rate */
- u32 rx_itr_setting;
- u32 tx_itr_setting;
-
- struct work_struct reset_task;
- struct work_struct watchdog_task;
- struct work_struct dma_err_task;
- bool fc_autoneg;
- u8 tx_timeout_factor;
-
-#ifdef DEBUG
- bool tx_hang_detected;
- bool disable_hw_reset;
-#endif
- u32 max_frame_size;
-
- /* OS defined structs */
- struct pci_dev *pdev;
-#ifndef HAVE_NETDEV_STATS_IN_NETDEV
- struct net_device_stats net_stats;
-#endif
-#ifndef IGB_NO_LRO
- struct igb_lro_stats lro_stats;
-#endif
-
- /* structs defined in e1000_hw.h */
- struct e1000_hw hw;
- struct e1000_hw_stats stats;
- struct e1000_phy_info phy_info;
- struct e1000_phy_stats phy_stats;
-
-#ifdef ETHTOOL_TEST
- u32 test_icr;
- struct igb_ring test_tx_ring;
- struct igb_ring test_rx_ring;
-#endif
-
- int msg_enable;
-
- struct igb_q_vector *q_vector[MAX_Q_VECTORS];
- u32 eims_enable_mask;
- u32 eims_other;
-
- /* to not mess up cache alignment, always add to the bottom */
- u32 *config_space;
- u16 tx_ring_count;
- u16 rx_ring_count;
- struct vf_data_storage *vf_data;
-#ifdef IFLA_VF_MAX
- int vf_rate_link_speed;
-#endif
- u32 lli_port;
- u32 lli_size;
- unsigned int vfs_allocated_count;
- /* Malicious Driver Detection flag. Valid only when SR-IOV is enabled */
- bool mdd;
- int int_mode;
- u32 rss_queues;
- u32 vmdq_pools;
- char fw_version[32];
- u32 wvbr;
- struct igb_mac_addr *mac_table;
-#ifdef CONFIG_IGB_VMDQ_NETDEV
- struct net_device *vmdq_netdev[IGB_MAX_VMDQ_QUEUES];
-#endif
- int vferr_refcount;
- int dmac;
- u32 *shadow_vfta;
-
- /* External Thermal Sensor support flag */
- bool ets;
-#ifdef IGB_HWMON
- struct hwmon_buff igb_hwmon_buff;
-#else /* IGB_HWMON */
-#ifdef IGB_PROCFS
- struct proc_dir_entry *eth_dir;
- struct proc_dir_entry *info_dir;
- struct proc_dir_entry *therm_dir[E1000_MAX_SENSORS];
- struct igb_therm_proc_data therm_data[E1000_MAX_SENSORS];
- bool old_lsc;
-#endif /* IGB_PROCFS */
-#endif /* IGB_HWMON */
- u32 etrack_id;
-
-#ifdef HAVE_PTP_1588_CLOCK
- struct ptp_clock *ptp_clock;
- struct ptp_clock_info ptp_caps;
- struct delayed_work ptp_overflow_work;
- struct work_struct ptp_tx_work;
- struct sk_buff *ptp_tx_skb;
- unsigned long ptp_tx_start;
- unsigned long last_rx_ptp_check;
- spinlock_t tmreg_lock;
- struct cyclecounter cc;
- struct timecounter tc;
- u32 tx_hwtstamp_timeouts;
- u32 rx_hwtstamp_cleared;
-#endif /* HAVE_PTP_1588_CLOCK */
-
-#ifdef HAVE_I2C_SUPPORT
- struct i2c_algo_bit_data i2c_algo;
- struct i2c_adapter i2c_adap;
- struct i2c_client *i2c_client;
-#endif /* HAVE_I2C_SUPPORT */
- unsigned long link_check_timeout;
-
-
- int devrc;
-
- int copper_tries;
- u16 eee_advert;
-};
-
-#ifdef CONFIG_IGB_VMDQ_NETDEV
-struct igb_vmdq_adapter {
-#ifdef HAVE_VLAN_RX_REGISTER
- /* vlgrp must be first member of structure */
- struct vlan_group *vlgrp;
-#else
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
-#endif
- struct igb_adapter *real_adapter;
- struct net_device *vnetdev;
- struct net_device_stats net_stats;
- struct igb_ring *tx_ring;
- struct igb_ring *rx_ring;
-};
-#endif
-
-#define IGB_FLAG_HAS_MSI (1 << 0)
-#define IGB_FLAG_DCA_ENABLED (1 << 1)
-#define IGB_FLAG_LLI_PUSH (1 << 2)
-#define IGB_FLAG_QUAD_PORT_A (1 << 3)
-#define IGB_FLAG_QUEUE_PAIRS (1 << 4)
-#define IGB_FLAG_EEE (1 << 5)
-#define IGB_FLAG_DMAC (1 << 6)
-#define IGB_FLAG_DETECT_BAD_DMA (1 << 7)
-#define IGB_FLAG_PTP (1 << 8)
-#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 9)
-#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 10)
-#define IGB_FLAG_WOL_SUPPORTED (1 << 11)
-#define IGB_FLAG_NEED_LINK_UPDATE (1 << 12)
-#define IGB_FLAG_LOOPBACK_ENABLE (1 << 13)
-#define IGB_FLAG_MEDIA_RESET (1 << 14)
-#define IGB_FLAG_MAS_ENABLE (1 << 15)
-
-/* Media Auto Sense */
-#define IGB_MAS_ENABLE_0 0X0001
-#define IGB_MAS_ENABLE_1 0X0002
-#define IGB_MAS_ENABLE_2 0X0004
-#define IGB_MAS_ENABLE_3 0X0008
-
-#define IGB_MIN_TXPBSIZE 20408
-#define IGB_TX_BUF_4096 4096
-
-#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */
-
-/* DMA Coalescing defines */
-#define IGB_DMAC_DISABLE 0
-#define IGB_DMAC_MIN 250
-#define IGB_DMAC_500 500
-#define IGB_DMAC_EN_DEFAULT 1000
-#define IGB_DMAC_2000 2000
-#define IGB_DMAC_3000 3000
-#define IGB_DMAC_4000 4000
-#define IGB_DMAC_5000 5000
-#define IGB_DMAC_6000 6000
-#define IGB_DMAC_7000 7000
-#define IGB_DMAC_8000 8000
-#define IGB_DMAC_9000 9000
-#define IGB_DMAC_MAX 10000
-
-#define IGB_82576_TSYNC_SHIFT 19
-#define IGB_82580_TSYNC_SHIFT 24
-#define IGB_TS_HDR_LEN 16
-
-/* CEM Support */
-#define FW_HDR_LEN 0x4
-#define FW_CMD_DRV_INFO 0xDD
-#define FW_CMD_DRV_INFO_LEN 0x5
-#define FW_CMD_RESERVED 0X0
-#define FW_RESP_SUCCESS 0x1
-#define FW_UNUSED_VER 0x0
-#define FW_MAX_RETRIES 3
-#define FW_STATUS_SUCCESS 0x1
-#define FW_FAMILY_DRV_VER 0Xffffffff
-
-#define IGB_MAX_LINK_TRIES 20
-
-struct e1000_fw_hdr {
- u8 cmd;
- u8 buf_len;
- union
- {
- u8 cmd_resv;
- u8 ret_status;
- } cmd_or_resp;
- u8 checksum;
-};
-
-#pragma pack(push,1)
-struct e1000_fw_drv_info {
- struct e1000_fw_hdr hdr;
- u8 port_num;
- u32 drv_version;
- u16 pad; /* end spacing to ensure length is mult. of dword */
- u8 pad2; /* end spacing to ensure length is mult. of dword2 */
-};
-#pragma pack(pop)
-
-enum e1000_state_t {
- __IGB_TESTING,
- __IGB_RESETTING,
- __IGB_DOWN
-};
-
-extern char igb_driver_name[];
-extern char igb_driver_version[];
-
-extern int igb_up(struct igb_adapter *);
-extern void igb_down(struct igb_adapter *);
-extern void igb_reinit_locked(struct igb_adapter *);
-extern void igb_reset(struct igb_adapter *);
-extern int igb_set_spd_dplx(struct igb_adapter *, u16);
-extern int igb_setup_tx_resources(struct igb_ring *);
-extern int igb_setup_rx_resources(struct igb_ring *);
-extern void igb_free_tx_resources(struct igb_ring *);
-extern void igb_free_rx_resources(struct igb_ring *);
-extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
-extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
-extern void igb_setup_tctl(struct igb_adapter *);
-extern void igb_setup_rctl(struct igb_adapter *);
-extern netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
-extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
- struct igb_tx_buffer *);
-extern void igb_alloc_rx_buffers(struct igb_ring *, u16);
-extern void igb_clean_rx_ring(struct igb_ring *);
-extern void igb_update_stats(struct igb_adapter *);
-extern bool igb_has_link(struct igb_adapter *adapter);
-extern void igb_set_ethtool_ops(struct net_device *);
-extern void igb_check_options(struct igb_adapter *);
-extern void igb_power_up_link(struct igb_adapter *);
-#ifdef HAVE_PTP_1588_CLOCK
-extern void igb_ptp_init(struct igb_adapter *adapter);
-extern void igb_ptp_stop(struct igb_adapter *adapter);
-extern void igb_ptp_reset(struct igb_adapter *adapter);
-extern void igb_ptp_tx_work(struct work_struct *work);
-extern void igb_ptp_rx_hang(struct igb_adapter *adapter);
-extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
-extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
- struct sk_buff *skb);
-extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
- unsigned char *va,
- struct sk_buff *skb);
-static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
-{
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
- igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
- skb_pull(skb, IGB_TS_HDR_LEN);
-#endif
- return;
- }
-
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS))
- igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
-
- /* Update the last_rx_timestamp timer in order to enable watchdog check
- * for error case of latched timestamp on a dropped packet.
- */
- rx_ring->last_rx_timestamp = jiffies;
-}
-
-extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
- struct ifreq *ifr, int cmd);
-#endif /* HAVE_PTP_1588_CLOCK */
-#ifdef ETHTOOL_OPS_COMPAT
-extern int ethtool_ioctl(struct ifreq *);
-#endif
-extern int igb_write_mc_addr_list(struct net_device *netdev);
-extern int igb_add_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue);
-extern int igb_del_mac_filter(struct igb_adapter *adapter, u8* addr, u16 queue);
-extern int igb_available_rars(struct igb_adapter *adapter);
-extern s32 igb_vlvf_set(struct igb_adapter *, u32, bool, u32);
-extern void igb_configure_vt_default_pool(struct igb_adapter *adapter);
-extern void igb_enable_vlan_tags(struct igb_adapter *adapter);
-#ifndef HAVE_VLAN_RX_REGISTER
-extern void igb_vlan_mode(struct net_device *, u32);
-#endif
-
-#define E1000_PCS_CFG_IGN_SD 1
-
-#ifdef IGB_HWMON
-void igb_sysfs_exit(struct igb_adapter *adapter);
-int igb_sysfs_init(struct igb_adapter *adapter);
-#else
-#ifdef IGB_PROCFS
-int igb_procfs_init(struct igb_adapter* adapter);
-void igb_procfs_exit(struct igb_adapter* adapter);
-int igb_procfs_topdir_init(void);
-void igb_procfs_topdir_exit(void);
-#endif /* IGB_PROCFS */
-#endif /* IGB_HWMON */
-
-
-
-#endif /* _IGB_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_debugfs.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_debugfs.c
deleted file mode 100755
index d33c814a..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_debugfs.c
+++ /dev/null
@@ -1,29 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#include "igb.h"
-
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c
deleted file mode 100755
index f3c48b25..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c
+++ /dev/null
@@ -1,2859 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-/* ethtool support for igb */
-
-#include <linux/netdevice.h>
-#include <linux/vmalloc.h>
-
-#ifdef SIOCETHTOOL
-#include <linux/ethtool.h>
-#ifdef CONFIG_PM_RUNTIME
-#include <linux/pm_runtime.h>
-#endif /* CONFIG_PM_RUNTIME */
-#include <linux/highmem.h>
-
-#include "igb.h"
-#include "igb_regtest.h"
-#include <linux/if_vlan.h>
-#ifdef ETHTOOL_GEEE
-#include <linux/mdio.h>
-#endif
-
-#ifdef ETHTOOL_OPS_COMPAT
-#include "kcompat_ethtool.c"
-#endif
-#ifdef ETHTOOL_GSTATS
-struct igb_stats {
- char stat_string[ETH_GSTRING_LEN];
- int sizeof_stat;
- int stat_offset;
-};
-
-#define IGB_STAT(_name, _stat) { \
- .stat_string = _name, \
- .sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \
- .stat_offset = offsetof(struct igb_adapter, _stat) \
-}
-static const struct igb_stats igb_gstrings_stats[] = {
- IGB_STAT("rx_packets", stats.gprc),
- IGB_STAT("tx_packets", stats.gptc),
- IGB_STAT("rx_bytes", stats.gorc),
- IGB_STAT("tx_bytes", stats.gotc),
- IGB_STAT("rx_broadcast", stats.bprc),
- IGB_STAT("tx_broadcast", stats.bptc),
- IGB_STAT("rx_multicast", stats.mprc),
- IGB_STAT("tx_multicast", stats.mptc),
- IGB_STAT("multicast", stats.mprc),
- IGB_STAT("collisions", stats.colc),
- IGB_STAT("rx_crc_errors", stats.crcerrs),
- IGB_STAT("rx_no_buffer_count", stats.rnbc),
- IGB_STAT("rx_missed_errors", stats.mpc),
- IGB_STAT("tx_aborted_errors", stats.ecol),
- IGB_STAT("tx_carrier_errors", stats.tncrs),
- IGB_STAT("tx_window_errors", stats.latecol),
- IGB_STAT("tx_abort_late_coll", stats.latecol),
- IGB_STAT("tx_deferred_ok", stats.dc),
- IGB_STAT("tx_single_coll_ok", stats.scc),
- IGB_STAT("tx_multi_coll_ok", stats.mcc),
- IGB_STAT("tx_timeout_count", tx_timeout_count),
- IGB_STAT("rx_long_length_errors", stats.roc),
- IGB_STAT("rx_short_length_errors", stats.ruc),
- IGB_STAT("rx_align_errors", stats.algnerrc),
- IGB_STAT("tx_tcp_seg_good", stats.tsctc),
- IGB_STAT("tx_tcp_seg_failed", stats.tsctfc),
- IGB_STAT("rx_flow_control_xon", stats.xonrxc),
- IGB_STAT("rx_flow_control_xoff", stats.xoffrxc),
- IGB_STAT("tx_flow_control_xon", stats.xontxc),
- IGB_STAT("tx_flow_control_xoff", stats.xofftxc),
- IGB_STAT("rx_long_byte_count", stats.gorc),
- IGB_STAT("tx_dma_out_of_sync", stats.doosync),
-#ifndef IGB_NO_LRO
- IGB_STAT("lro_aggregated", lro_stats.coal),
- IGB_STAT("lro_flushed", lro_stats.flushed),
-#endif /* IGB_LRO */
- IGB_STAT("tx_smbus", stats.mgptc),
- IGB_STAT("rx_smbus", stats.mgprc),
- IGB_STAT("dropped_smbus", stats.mgpdc),
- IGB_STAT("os2bmc_rx_by_bmc", stats.o2bgptc),
- IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
- IGB_STAT("os2bmc_tx_by_host", stats.o2bspc),
- IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc),
-#ifdef HAVE_PTP_1588_CLOCK
- IGB_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
- IGB_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
-#endif /* HAVE_PTP_1588_CLOCK */
-};
-
-#define IGB_NETDEV_STAT(_net_stat) { \
- .stat_string = #_net_stat, \
- .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
- .stat_offset = offsetof(struct net_device_stats, _net_stat) \
-}
-static const struct igb_stats igb_gstrings_net_stats[] = {
- IGB_NETDEV_STAT(rx_errors),
- IGB_NETDEV_STAT(tx_errors),
- IGB_NETDEV_STAT(tx_dropped),
- IGB_NETDEV_STAT(rx_length_errors),
- IGB_NETDEV_STAT(rx_over_errors),
- IGB_NETDEV_STAT(rx_frame_errors),
- IGB_NETDEV_STAT(rx_fifo_errors),
- IGB_NETDEV_STAT(tx_fifo_errors),
- IGB_NETDEV_STAT(tx_heartbeat_errors)
-};
-
-#define IGB_GLOBAL_STATS_LEN ARRAY_SIZE(igb_gstrings_stats)
-#define IGB_NETDEV_STATS_LEN ARRAY_SIZE(igb_gstrings_net_stats)
-#define IGB_RX_QUEUE_STATS_LEN \
- (sizeof(struct igb_rx_queue_stats) / sizeof(u64))
-#define IGB_TX_QUEUE_STATS_LEN \
- (sizeof(struct igb_tx_queue_stats) / sizeof(u64))
-#define IGB_QUEUE_STATS_LEN \
- ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \
- IGB_RX_QUEUE_STATS_LEN) + \
- (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \
- IGB_TX_QUEUE_STATS_LEN))
-#define IGB_STATS_LEN \
- (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN)
-
-#endif /* ETHTOOL_GSTATS */
-#ifdef ETHTOOL_TEST
-static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
- "Register test (offline)", "Eeprom test (offline)",
- "Interrupt test (offline)", "Loopback test (offline)",
- "Link test (on/offline)"
-};
-#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN)
-#endif /* ETHTOOL_TEST */
-
-static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 status;
-
- if (hw->phy.media_type == e1000_media_type_copper) {
-
- ecmd->supported = (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full|
- SUPPORTED_Autoneg |
- SUPPORTED_TP |
- SUPPORTED_Pause);
- ecmd->advertising = ADVERTISED_TP;
-
- if (hw->mac.autoneg == 1) {
- ecmd->advertising |= ADVERTISED_Autoneg;
- /* the e1000 autoneg seems to match ethtool nicely */
- ecmd->advertising |= hw->phy.autoneg_advertised;
- }
-
- ecmd->port = PORT_TP;
- ecmd->phy_address = hw->phy.addr;
- ecmd->transceiver = XCVR_INTERNAL;
-
- } else {
- ecmd->supported = (SUPPORTED_1000baseT_Full |
- SUPPORTED_100baseT_Full |
- SUPPORTED_FIBRE |
- SUPPORTED_Autoneg |
- SUPPORTED_Pause);
- if (hw->mac.type == e1000_i354)
- ecmd->supported |= (SUPPORTED_2500baseX_Full);
-
- ecmd->advertising = ADVERTISED_FIBRE;
-
- switch (adapter->link_speed) {
- case SPEED_2500:
- ecmd->advertising = ADVERTISED_2500baseX_Full;
- break;
- case SPEED_1000:
- ecmd->advertising = ADVERTISED_1000baseT_Full;
- break;
- case SPEED_100:
- ecmd->advertising = ADVERTISED_100baseT_Full;
- break;
- default:
- break;
- }
-
- if (hw->mac.autoneg == 1)
- ecmd->advertising |= ADVERTISED_Autoneg;
-
- ecmd->port = PORT_FIBRE;
- ecmd->transceiver = XCVR_EXTERNAL;
- }
-
- if (hw->mac.autoneg != 1)
- ecmd->advertising &= ~(ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
-
- if (hw->fc.requested_mode == e1000_fc_full)
- ecmd->advertising |= ADVERTISED_Pause;
- else if (hw->fc.requested_mode == e1000_fc_rx_pause)
- ecmd->advertising |= (ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
- else if (hw->fc.requested_mode == e1000_fc_tx_pause)
- ecmd->advertising |= ADVERTISED_Asym_Pause;
- else
- ecmd->advertising &= ~(ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
-
- status = E1000_READ_REG(hw, E1000_STATUS);
-
- if (status & E1000_STATUS_LU) {
- if ((hw->mac.type == e1000_i354) &&
- (status & E1000_STATUS_2P5_SKU) &&
- !(status & E1000_STATUS_2P5_SKU_OVER))
- ecmd->speed = SPEED_2500;
- else if (status & E1000_STATUS_SPEED_1000)
- ecmd->speed = SPEED_1000;
- else if (status & E1000_STATUS_SPEED_100)
- ecmd->speed = SPEED_100;
- else
- ecmd->speed = SPEED_10;
-
- if ((status & E1000_STATUS_FD) ||
- hw->phy.media_type != e1000_media_type_copper)
- ecmd->duplex = DUPLEX_FULL;
- else
- ecmd->duplex = DUPLEX_HALF;
-
- } else {
- ecmd->speed = -1;
- ecmd->duplex = -1;
- }
-
- if ((hw->phy.media_type == e1000_media_type_fiber) ||
- hw->mac.autoneg)
- ecmd->autoneg = AUTONEG_ENABLE;
- else
- ecmd->autoneg = AUTONEG_DISABLE;
-#ifdef ETH_TP_MDI_X
-
- /* MDI-X => 2; MDI =>1; Invalid =>0 */
- if (hw->phy.media_type == e1000_media_type_copper)
- ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
- ETH_TP_MDI;
- else
- ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
-
-#ifdef ETH_TP_MDI_AUTO
- if (hw->phy.mdix == AUTO_ALL_MODES)
- ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
- else
- ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
-
-#endif
-#endif /* ETH_TP_MDI_X */
- return 0;
-}
-
-static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
-
- if (ecmd->duplex == DUPLEX_HALF) {
- if (!hw->dev_spec._82575.eee_disable)
- dev_info(pci_dev_to_dev(adapter->pdev), "EEE disabled: not supported with half duplex\n");
- hw->dev_spec._82575.eee_disable = true;
- } else {
- if (hw->dev_spec._82575.eee_disable)
- dev_info(pci_dev_to_dev(adapter->pdev), "EEE enabled\n");
- hw->dev_spec._82575.eee_disable = false;
- }
-
- /* When SoL/IDER sessions are active, autoneg/speed/duplex
- * cannot be changed */
- if (e1000_check_reset_block(hw)) {
- dev_err(pci_dev_to_dev(adapter->pdev), "Cannot change link "
- "characteristics when SoL/IDER is active.\n");
- return -EINVAL;
- }
-
-#ifdef ETH_TP_MDI_AUTO
- /*
- * MDI setting is only allowed when autoneg enabled because
- * some hardware doesn't allow MDI setting when speed or
- * duplex is forced.
- */
- if (ecmd->eth_tp_mdix_ctrl) {
- if (hw->phy.media_type != e1000_media_type_copper)
- return -EOPNOTSUPP;
-
- if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
- (ecmd->autoneg != AUTONEG_ENABLE)) {
- dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
- return -EINVAL;
- }
- }
-
-#endif /* ETH_TP_MDI_AUTO */
- while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- usleep_range(1000, 2000);
-
- if (ecmd->autoneg == AUTONEG_ENABLE) {
- hw->mac.autoneg = 1;
- if (hw->phy.media_type == e1000_media_type_fiber) {
- hw->phy.autoneg_advertised = ecmd->advertising |
- ADVERTISED_FIBRE |
- ADVERTISED_Autoneg;
- switch (adapter->link_speed) {
- case SPEED_2500:
- hw->phy.autoneg_advertised =
- ADVERTISED_2500baseX_Full;
- break;
- case SPEED_1000:
- hw->phy.autoneg_advertised =
- ADVERTISED_1000baseT_Full;
- break;
- case SPEED_100:
- hw->phy.autoneg_advertised =
- ADVERTISED_100baseT_Full;
- break;
- default:
- break;
- }
- } else {
- hw->phy.autoneg_advertised = ecmd->advertising |
- ADVERTISED_TP |
- ADVERTISED_Autoneg;
- }
- ecmd->advertising = hw->phy.autoneg_advertised;
- if (adapter->fc_autoneg)
- hw->fc.requested_mode = e1000_fc_default;
- } else {
- if (igb_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
- clear_bit(__IGB_RESETTING, &adapter->state);
- return -EINVAL;
- }
- }
-
-#ifdef ETH_TP_MDI_AUTO
- /* MDI-X => 2; MDI => 1; Auto => 3 */
- if (ecmd->eth_tp_mdix_ctrl) {
- /* fix up the value for auto (3 => 0) as zero is mapped
- * internally to auto
- */
- if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
- hw->phy.mdix = AUTO_ALL_MODES;
- else
- hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
- }
-
-#endif /* ETH_TP_MDI_AUTO */
- /* reset the link */
- if (netif_running(adapter->netdev)) {
- igb_down(adapter);
- igb_up(adapter);
- } else
- igb_reset(adapter);
-
- clear_bit(__IGB_RESETTING, &adapter->state);
- return 0;
-}
-
-static u32 igb_get_link(struct net_device *netdev)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_mac_info *mac = &adapter->hw.mac;
-
- /*
- * If the link is not reported up to netdev, interrupts are disabled,
- * and so the physical link state may have changed since we last
- * looked. Set get_link_status to make sure that the true link
- * state is interrogated, rather than pulling a cached and possibly
- * stale link state from the driver.
- */
- if (!netif_carrier_ok(netdev))
- mac->get_link_status = 1;
-
- return igb_has_link(adapter);
-}
-
-static void igb_get_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
-
- pause->autoneg =
- (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
-
- if (hw->fc.current_mode == e1000_fc_rx_pause)
- pause->rx_pause = 1;
- else if (hw->fc.current_mode == e1000_fc_tx_pause)
- pause->tx_pause = 1;
- else if (hw->fc.current_mode == e1000_fc_full) {
- pause->rx_pause = 1;
- pause->tx_pause = 1;
- }
-}
-
-static int igb_set_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- int retval = 0;
-
- adapter->fc_autoneg = pause->autoneg;
-
- while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- usleep_range(1000, 2000);
-
- if (adapter->fc_autoneg == AUTONEG_ENABLE) {
- hw->fc.requested_mode = e1000_fc_default;
- if (netif_running(adapter->netdev)) {
- igb_down(adapter);
- igb_up(adapter);
- } else {
- igb_reset(adapter);
- }
- } else {
- if (pause->rx_pause && pause->tx_pause)
- hw->fc.requested_mode = e1000_fc_full;
- else if (pause->rx_pause && !pause->tx_pause)
- hw->fc.requested_mode = e1000_fc_rx_pause;
- else if (!pause->rx_pause && pause->tx_pause)
- hw->fc.requested_mode = e1000_fc_tx_pause;
- else if (!pause->rx_pause && !pause->tx_pause)
- hw->fc.requested_mode = e1000_fc_none;
-
- hw->fc.current_mode = hw->fc.requested_mode;
-
- if (hw->phy.media_type == e1000_media_type_fiber) {
- retval = hw->mac.ops.setup_link(hw);
- /* implicit goto out */
- } else {
- retval = e1000_force_mac_fc(hw);
- if (retval)
- goto out;
- e1000_set_fc_watermarks_generic(hw);
- }
- }
-
-out:
- clear_bit(__IGB_RESETTING, &adapter->state);
- return retval;
-}
-
-static u32 igb_get_msglevel(struct net_device *netdev)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- return adapter->msg_enable;
-}
-
-static void igb_set_msglevel(struct net_device *netdev, u32 data)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- adapter->msg_enable = data;
-}
-
-static int igb_get_regs_len(struct net_device *netdev)
-{
-#define IGB_REGS_LEN 555
- return IGB_REGS_LEN * sizeof(u32);
-}
-
-static void igb_get_regs(struct net_device *netdev,
- struct ethtool_regs *regs, void *p)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 *regs_buff = p;
- u8 i;
-
- memset(p, 0, IGB_REGS_LEN * sizeof(u32));
-
- regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
-
- /* General Registers */
- regs_buff[0] = E1000_READ_REG(hw, E1000_CTRL);
- regs_buff[1] = E1000_READ_REG(hw, E1000_STATUS);
- regs_buff[2] = E1000_READ_REG(hw, E1000_CTRL_EXT);
- regs_buff[3] = E1000_READ_REG(hw, E1000_MDIC);
- regs_buff[4] = E1000_READ_REG(hw, E1000_SCTL);
- regs_buff[5] = E1000_READ_REG(hw, E1000_CONNSW);
- regs_buff[6] = E1000_READ_REG(hw, E1000_VET);
- regs_buff[7] = E1000_READ_REG(hw, E1000_LEDCTL);
- regs_buff[8] = E1000_READ_REG(hw, E1000_PBA);
- regs_buff[9] = E1000_READ_REG(hw, E1000_PBS);
- regs_buff[10] = E1000_READ_REG(hw, E1000_FRTIMER);
- regs_buff[11] = E1000_READ_REG(hw, E1000_TCPTIMER);
-
- /* NVM Register */
- regs_buff[12] = E1000_READ_REG(hw, E1000_EECD);
-
- /* Interrupt */
- /* Reading EICS for EICR because they read the
- * same but EICS does not clear on read */
- regs_buff[13] = E1000_READ_REG(hw, E1000_EICS);
- regs_buff[14] = E1000_READ_REG(hw, E1000_EICS);
- regs_buff[15] = E1000_READ_REG(hw, E1000_EIMS);
- regs_buff[16] = E1000_READ_REG(hw, E1000_EIMC);
- regs_buff[17] = E1000_READ_REG(hw, E1000_EIAC);
- regs_buff[18] = E1000_READ_REG(hw, E1000_EIAM);
- /* Reading ICS for ICR because they read the
- * same but ICS does not clear on read */
- regs_buff[19] = E1000_READ_REG(hw, E1000_ICS);
- regs_buff[20] = E1000_READ_REG(hw, E1000_ICS);
- regs_buff[21] = E1000_READ_REG(hw, E1000_IMS);
- regs_buff[22] = E1000_READ_REG(hw, E1000_IMC);
- regs_buff[23] = E1000_READ_REG(hw, E1000_IAC);
- regs_buff[24] = E1000_READ_REG(hw, E1000_IAM);
- regs_buff[25] = E1000_READ_REG(hw, E1000_IMIRVP);
-
- /* Flow Control */
- regs_buff[26] = E1000_READ_REG(hw, E1000_FCAL);
- regs_buff[27] = E1000_READ_REG(hw, E1000_FCAH);
- regs_buff[28] = E1000_READ_REG(hw, E1000_FCTTV);
- regs_buff[29] = E1000_READ_REG(hw, E1000_FCRTL);
- regs_buff[30] = E1000_READ_REG(hw, E1000_FCRTH);
- regs_buff[31] = E1000_READ_REG(hw, E1000_FCRTV);
-
- /* Receive */
- regs_buff[32] = E1000_READ_REG(hw, E1000_RCTL);
- regs_buff[33] = E1000_READ_REG(hw, E1000_RXCSUM);
- regs_buff[34] = E1000_READ_REG(hw, E1000_RLPML);
- regs_buff[35] = E1000_READ_REG(hw, E1000_RFCTL);
- regs_buff[36] = E1000_READ_REG(hw, E1000_MRQC);
- regs_buff[37] = E1000_READ_REG(hw, E1000_VT_CTL);
-
- /* Transmit */
- regs_buff[38] = E1000_READ_REG(hw, E1000_TCTL);
- regs_buff[39] = E1000_READ_REG(hw, E1000_TCTL_EXT);
- regs_buff[40] = E1000_READ_REG(hw, E1000_TIPG);
- regs_buff[41] = E1000_READ_REG(hw, E1000_DTXCTL);
-
- /* Wake Up */
- regs_buff[42] = E1000_READ_REG(hw, E1000_WUC);
- regs_buff[43] = E1000_READ_REG(hw, E1000_WUFC);
- regs_buff[44] = E1000_READ_REG(hw, E1000_WUS);
- regs_buff[45] = E1000_READ_REG(hw, E1000_IPAV);
- regs_buff[46] = E1000_READ_REG(hw, E1000_WUPL);
-
- /* MAC */
- regs_buff[47] = E1000_READ_REG(hw, E1000_PCS_CFG0);
- regs_buff[48] = E1000_READ_REG(hw, E1000_PCS_LCTL);
- regs_buff[49] = E1000_READ_REG(hw, E1000_PCS_LSTAT);
- regs_buff[50] = E1000_READ_REG(hw, E1000_PCS_ANADV);
- regs_buff[51] = E1000_READ_REG(hw, E1000_PCS_LPAB);
- regs_buff[52] = E1000_READ_REG(hw, E1000_PCS_NPTX);
- regs_buff[53] = E1000_READ_REG(hw, E1000_PCS_LPABNP);
-
- /* Statistics */
- regs_buff[54] = adapter->stats.crcerrs;
- regs_buff[55] = adapter->stats.algnerrc;
- regs_buff[56] = adapter->stats.symerrs;
- regs_buff[57] = adapter->stats.rxerrc;
- regs_buff[58] = adapter->stats.mpc;
- regs_buff[59] = adapter->stats.scc;
- regs_buff[60] = adapter->stats.ecol;
- regs_buff[61] = adapter->stats.mcc;
- regs_buff[62] = adapter->stats.latecol;
- regs_buff[63] = adapter->stats.colc;
- regs_buff[64] = adapter->stats.dc;
- regs_buff[65] = adapter->stats.tncrs;
- regs_buff[66] = adapter->stats.sec;
- regs_buff[67] = adapter->stats.htdpmc;
- regs_buff[68] = adapter->stats.rlec;
- regs_buff[69] = adapter->stats.xonrxc;
- regs_buff[70] = adapter->stats.xontxc;
- regs_buff[71] = adapter->stats.xoffrxc;
- regs_buff[72] = adapter->stats.xofftxc;
- regs_buff[73] = adapter->stats.fcruc;
- regs_buff[74] = adapter->stats.prc64;
- regs_buff[75] = adapter->stats.prc127;
- regs_buff[76] = adapter->stats.prc255;
- regs_buff[77] = adapter->stats.prc511;
- regs_buff[78] = adapter->stats.prc1023;
- regs_buff[79] = adapter->stats.prc1522;
- regs_buff[80] = adapter->stats.gprc;
- regs_buff[81] = adapter->stats.bprc;
- regs_buff[82] = adapter->stats.mprc;
- regs_buff[83] = adapter->stats.gptc;
- regs_buff[84] = adapter->stats.gorc;
- regs_buff[86] = adapter->stats.gotc;
- regs_buff[88] = adapter->stats.rnbc;
- regs_buff[89] = adapter->stats.ruc;
- regs_buff[90] = adapter->stats.rfc;
- regs_buff[91] = adapter->stats.roc;
- regs_buff[92] = adapter->stats.rjc;
- regs_buff[93] = adapter->stats.mgprc;
- regs_buff[94] = adapter->stats.mgpdc;
- regs_buff[95] = adapter->stats.mgptc;
- regs_buff[96] = adapter->stats.tor;
- regs_buff[98] = adapter->stats.tot;
- regs_buff[100] = adapter->stats.tpr;
- regs_buff[101] = adapter->stats.tpt;
- regs_buff[102] = adapter->stats.ptc64;
- regs_buff[103] = adapter->stats.ptc127;
- regs_buff[104] = adapter->stats.ptc255;
- regs_buff[105] = adapter->stats.ptc511;
- regs_buff[106] = adapter->stats.ptc1023;
- regs_buff[107] = adapter->stats.ptc1522;
- regs_buff[108] = adapter->stats.mptc;
- regs_buff[109] = adapter->stats.bptc;
- regs_buff[110] = adapter->stats.tsctc;
- regs_buff[111] = adapter->stats.iac;
- regs_buff[112] = adapter->stats.rpthc;
- regs_buff[113] = adapter->stats.hgptc;
- regs_buff[114] = adapter->stats.hgorc;
- regs_buff[116] = adapter->stats.hgotc;
- regs_buff[118] = adapter->stats.lenerrs;
- regs_buff[119] = adapter->stats.scvpc;
- regs_buff[120] = adapter->stats.hrmpc;
-
- for (i = 0; i < 4; i++)
- regs_buff[121 + i] = E1000_READ_REG(hw, E1000_SRRCTL(i));
- for (i = 0; i < 4; i++)
- regs_buff[125 + i] = E1000_READ_REG(hw, E1000_PSRTYPE(i));
- for (i = 0; i < 4; i++)
- regs_buff[129 + i] = E1000_READ_REG(hw, E1000_RDBAL(i));
- for (i = 0; i < 4; i++)
- regs_buff[133 + i] = E1000_READ_REG(hw, E1000_RDBAH(i));
- for (i = 0; i < 4; i++)
- regs_buff[137 + i] = E1000_READ_REG(hw, E1000_RDLEN(i));
- for (i = 0; i < 4; i++)
- regs_buff[141 + i] = E1000_READ_REG(hw, E1000_RDH(i));
- for (i = 0; i < 4; i++)
- regs_buff[145 + i] = E1000_READ_REG(hw, E1000_RDT(i));
- for (i = 0; i < 4; i++)
- regs_buff[149 + i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
-
- for (i = 0; i < 10; i++)
- regs_buff[153 + i] = E1000_READ_REG(hw, E1000_EITR(i));
- for (i = 0; i < 8; i++)
- regs_buff[163 + i] = E1000_READ_REG(hw, E1000_IMIR(i));
- for (i = 0; i < 8; i++)
- regs_buff[171 + i] = E1000_READ_REG(hw, E1000_IMIREXT(i));
- for (i = 0; i < 16; i++)
- regs_buff[179 + i] = E1000_READ_REG(hw, E1000_RAL(i));
- for (i = 0; i < 16; i++)
- regs_buff[195 + i] = E1000_READ_REG(hw, E1000_RAH(i));
-
- for (i = 0; i < 4; i++)
- regs_buff[211 + i] = E1000_READ_REG(hw, E1000_TDBAL(i));
- for (i = 0; i < 4; i++)
- regs_buff[215 + i] = E1000_READ_REG(hw, E1000_TDBAH(i));
- for (i = 0; i < 4; i++)
- regs_buff[219 + i] = E1000_READ_REG(hw, E1000_TDLEN(i));
- for (i = 0; i < 4; i++)
- regs_buff[223 + i] = E1000_READ_REG(hw, E1000_TDH(i));
- for (i = 0; i < 4; i++)
- regs_buff[227 + i] = E1000_READ_REG(hw, E1000_TDT(i));
- for (i = 0; i < 4; i++)
- regs_buff[231 + i] = E1000_READ_REG(hw, E1000_TXDCTL(i));
- for (i = 0; i < 4; i++)
- regs_buff[235 + i] = E1000_READ_REG(hw, E1000_TDWBAL(i));
- for (i = 0; i < 4; i++)
- regs_buff[239 + i] = E1000_READ_REG(hw, E1000_TDWBAH(i));
- for (i = 0; i < 4; i++)
- regs_buff[243 + i] = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i));
-
- for (i = 0; i < 4; i++)
- regs_buff[247 + i] = E1000_READ_REG(hw, E1000_IP4AT_REG(i));
- for (i = 0; i < 4; i++)
- regs_buff[251 + i] = E1000_READ_REG(hw, E1000_IP6AT_REG(i));
- for (i = 0; i < 32; i++)
- regs_buff[255 + i] = E1000_READ_REG(hw, E1000_WUPM_REG(i));
- for (i = 0; i < 128; i++)
- regs_buff[287 + i] = E1000_READ_REG(hw, E1000_FFMT_REG(i));
- for (i = 0; i < 128; i++)
- regs_buff[415 + i] = E1000_READ_REG(hw, E1000_FFVT_REG(i));
- for (i = 0; i < 4; i++)
- regs_buff[543 + i] = E1000_READ_REG(hw, E1000_FFLT_REG(i));
-
- regs_buff[547] = E1000_READ_REG(hw, E1000_TDFH);
- regs_buff[548] = E1000_READ_REG(hw, E1000_TDFT);
- regs_buff[549] = E1000_READ_REG(hw, E1000_TDFHS);
- regs_buff[550] = E1000_READ_REG(hw, E1000_TDFPC);
- if (hw->mac.type > e1000_82580) {
- regs_buff[551] = adapter->stats.o2bgptc;
- regs_buff[552] = adapter->stats.b2ospc;
- regs_buff[553] = adapter->stats.o2bspc;
- regs_buff[554] = adapter->stats.b2ogprc;
- }
-}
-
-static int igb_get_eeprom_len(struct net_device *netdev)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- return adapter->hw.nvm.word_size * 2;
-}
-
-static int igb_get_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u16 *eeprom_buff;
- int first_word, last_word;
- int ret_val = 0;
- u16 i;
-
- if (eeprom->len == 0)
- return -EINVAL;
-
- eeprom->magic = hw->vendor_id | (hw->device_id << 16);
-
- first_word = eeprom->offset >> 1;
- last_word = (eeprom->offset + eeprom->len - 1) >> 1;
-
- eeprom_buff = kmalloc(sizeof(u16) *
- (last_word - first_word + 1), GFP_KERNEL);
- if (!eeprom_buff)
- return -ENOMEM;
-
- if (hw->nvm.type == e1000_nvm_eeprom_spi)
- ret_val = e1000_read_nvm(hw, first_word,
- last_word - first_word + 1,
- eeprom_buff);
- else {
- for (i = 0; i < last_word - first_word + 1; i++) {
- ret_val = e1000_read_nvm(hw, first_word + i, 1,
- &eeprom_buff[i]);
- if (ret_val)
- break;
- }
- }
-
- /* Device's eeprom is always little-endian, word addressable */
- for (i = 0; i < last_word - first_word + 1; i++)
- eeprom_buff[i] = le16_to_cpu(eeprom_buff[i]);
-
- memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
- eeprom->len);
- kfree(eeprom_buff);
-
- return ret_val;
-}
-
-static int igb_set_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u16 *eeprom_buff;
- void *ptr;
- int max_len, first_word, last_word, ret_val = 0;
- u16 i;
-
- if (eeprom->len == 0)
- return -EOPNOTSUPP;
-
- if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
- return -EFAULT;
-
- max_len = hw->nvm.word_size * 2;
-
- first_word = eeprom->offset >> 1;
- last_word = (eeprom->offset + eeprom->len - 1) >> 1;
- eeprom_buff = kmalloc(max_len, GFP_KERNEL);
- if (!eeprom_buff)
- return -ENOMEM;
-
- ptr = (void *)eeprom_buff;
-
- if (eeprom->offset & 1) {
- /* need read/modify/write of first changed EEPROM word */
- /* only the second byte of the word is being modified */
- ret_val = e1000_read_nvm(hw, first_word, 1,
- &eeprom_buff[0]);
- ptr++;
- }
- if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
- /* need read/modify/write of last changed EEPROM word */
- /* only the first byte of the word is being modified */
- ret_val = e1000_read_nvm(hw, last_word, 1,
- &eeprom_buff[last_word - first_word]);
- }
-
- /* Device's eeprom is always little-endian, word addressable */
- for (i = 0; i < last_word - first_word + 1; i++)
- le16_to_cpus(&eeprom_buff[i]);
-
- memcpy(ptr, bytes, eeprom->len);
-
- for (i = 0; i < last_word - first_word + 1; i++)
- cpu_to_le16s(&eeprom_buff[i]);
-
- ret_val = e1000_write_nvm(hw, first_word,
- last_word - first_word + 1, eeprom_buff);
-
- /* Update the checksum if write succeeded.
- * and flush shadow RAM for 82573 controllers */
- if (ret_val == 0)
- e1000_update_nvm_checksum(hw);
-
- kfree(eeprom_buff);
- return ret_val;
-}
-
-static void igb_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
-
- strncpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver) - 1);
- strncpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version) - 1);
-
- strncpy(drvinfo->fw_version, adapter->fw_version,
- sizeof(drvinfo->fw_version) - 1);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info) -1);
- drvinfo->n_stats = IGB_STATS_LEN;
- drvinfo->testinfo_len = IGB_TEST_LEN;
- drvinfo->regdump_len = igb_get_regs_len(netdev);
- drvinfo->eedump_len = igb_get_eeprom_len(netdev);
-}
-
-static void igb_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
-
- ring->rx_max_pending = IGB_MAX_RXD;
- ring->tx_max_pending = IGB_MAX_TXD;
- ring->rx_mini_max_pending = 0;
- ring->rx_jumbo_max_pending = 0;
- ring->rx_pending = adapter->rx_ring_count;
- ring->tx_pending = adapter->tx_ring_count;
- ring->rx_mini_pending = 0;
- ring->rx_jumbo_pending = 0;
-}
-
-static int igb_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct igb_ring *temp_ring;
- int i, err = 0;
- u16 new_rx_count, new_tx_count;
-
- if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
- return -EINVAL;
-
- new_rx_count = min(ring->rx_pending, (u32)IGB_MAX_RXD);
- new_rx_count = max(new_rx_count, (u16)IGB_MIN_RXD);
- new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
-
- new_tx_count = min(ring->tx_pending, (u32)IGB_MAX_TXD);
- new_tx_count = max(new_tx_count, (u16)IGB_MIN_TXD);
- new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
-
- if ((new_tx_count == adapter->tx_ring_count) &&
- (new_rx_count == adapter->rx_ring_count)) {
- /* nothing to do */
- return 0;
- }
-
- while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- usleep_range(1000, 2000);
-
- if (!netif_running(adapter->netdev)) {
- for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i]->count = new_tx_count;
- for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i]->count = new_rx_count;
- adapter->tx_ring_count = new_tx_count;
- adapter->rx_ring_count = new_rx_count;
- goto clear_reset;
- }
-
- if (adapter->num_tx_queues > adapter->num_rx_queues)
- temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring));
- else
- temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring));
-
- if (!temp_ring) {
- err = -ENOMEM;
- goto clear_reset;
- }
-
- igb_down(adapter);
-
- /*
- * We can't just free everything and then setup again,
- * because the ISRs in MSI-X mode get passed pointers
- * to the tx and rx ring structs.
- */
- if (new_tx_count != adapter->tx_ring_count) {
- for (i = 0; i < adapter->num_tx_queues; i++) {
- memcpy(&temp_ring[i], adapter->tx_ring[i],
- sizeof(struct igb_ring));
-
- temp_ring[i].count = new_tx_count;
- err = igb_setup_tx_resources(&temp_ring[i]);
- if (err) {
- while (i) {
- i--;
- igb_free_tx_resources(&temp_ring[i]);
- }
- goto err_setup;
- }
- }
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- igb_free_tx_resources(adapter->tx_ring[i]);
-
- memcpy(adapter->tx_ring[i], &temp_ring[i],
- sizeof(struct igb_ring));
- }
-
- adapter->tx_ring_count = new_tx_count;
- }
-
- if (new_rx_count != adapter->rx_ring_count) {
- for (i = 0; i < adapter->num_rx_queues; i++) {
- memcpy(&temp_ring[i], adapter->rx_ring[i],
- sizeof(struct igb_ring));
-
- temp_ring[i].count = new_rx_count;
- err = igb_setup_rx_resources(&temp_ring[i]);
- if (err) {
- while (i) {
- i--;
- igb_free_rx_resources(&temp_ring[i]);
- }
- goto err_setup;
- }
-
- }
-
- for (i = 0; i < adapter->num_rx_queues; i++) {
- igb_free_rx_resources(adapter->rx_ring[i]);
-
- memcpy(adapter->rx_ring[i], &temp_ring[i],
- sizeof(struct igb_ring));
- }
-
- adapter->rx_ring_count = new_rx_count;
- }
-err_setup:
- igb_up(adapter);
- vfree(temp_ring);
-clear_reset:
- clear_bit(__IGB_RESETTING, &adapter->state);
- return err;
-}
-static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
- int reg, u32 mask, u32 write)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 pat, val;
- static const u32 _test[] =
- {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
- for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
- E1000_WRITE_REG(hw, reg, (_test[pat] & write));
- val = E1000_READ_REG(hw, reg) & mask;
- if (val != (_test[pat] & write & mask)) {
- dev_err(pci_dev_to_dev(adapter->pdev), "pattern test reg %04X "
- "failed: got 0x%08X expected 0x%08X\n",
- E1000_REGISTER(hw, reg), val, (_test[pat] & write & mask));
- *data = E1000_REGISTER(hw, reg);
- return 1;
- }
- }
-
- return 0;
-}
-
-static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
- int reg, u32 mask, u32 write)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 val;
- E1000_WRITE_REG(hw, reg, write & mask);
- val = E1000_READ_REG(hw, reg);
- if ((write & mask) != (val & mask)) {
- dev_err(pci_dev_to_dev(adapter->pdev), "set/check reg %04X test failed:"
- " got 0x%08X expected 0x%08X\n", reg,
- (val & mask), (write & mask));
- *data = E1000_REGISTER(hw, reg);
- return 1;
- }
-
- return 0;
-}
-
-#define REG_PATTERN_TEST(reg, mask, write) \
- do { \
- if (reg_pattern_test(adapter, data, reg, mask, write)) \
- return 1; \
- } while (0)
-
-#define REG_SET_AND_CHECK(reg, mask, write) \
- do { \
- if (reg_set_and_check(adapter, data, reg, mask, write)) \
- return 1; \
- } while (0)
-
-static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
-{
- struct e1000_hw *hw = &adapter->hw;
- struct igb_reg_test *test;
- u32 value, before, after;
- u32 i, toggle;
-
- switch (adapter->hw.mac.type) {
- case e1000_i350:
- case e1000_i354:
- test = reg_test_i350;
- toggle = 0x7FEFF3FF;
- break;
- case e1000_i210:
- case e1000_i211:
- test = reg_test_i210;
- toggle = 0x7FEFF3FF;
- break;
- case e1000_82580:
- test = reg_test_82580;
- toggle = 0x7FEFF3FF;
- break;
- case e1000_82576:
- test = reg_test_82576;
- toggle = 0x7FFFF3FF;
- break;
- default:
- test = reg_test_82575;
- toggle = 0x7FFFF3FF;
- break;
- }
-
- /* Because the status register is such a special case,
- * we handle it separately from the rest of the register
- * tests. Some bits are read-only, some toggle, and some
- * are writable on newer MACs.
- */
- before = E1000_READ_REG(hw, E1000_STATUS);
- value = (E1000_READ_REG(hw, E1000_STATUS) & toggle);
- E1000_WRITE_REG(hw, E1000_STATUS, toggle);
- after = E1000_READ_REG(hw, E1000_STATUS) & toggle;
- if (value != after) {
- dev_err(pci_dev_to_dev(adapter->pdev), "failed STATUS register test "
- "got: 0x%08X expected: 0x%08X\n", after, value);
- *data = 1;
- return 1;
- }
- /* restore previous status */
- E1000_WRITE_REG(hw, E1000_STATUS, before);
-
- /* Perform the remainder of the register test, looping through
- * the test table until we either fail or reach the null entry.
- */
- while (test->reg) {
- for (i = 0; i < test->array_len; i++) {
- switch (test->test_type) {
- case PATTERN_TEST:
- REG_PATTERN_TEST(test->reg +
- (i * test->reg_offset),
- test->mask,
- test->write);
- break;
- case SET_READ_TEST:
- REG_SET_AND_CHECK(test->reg +
- (i * test->reg_offset),
- test->mask,
- test->write);
- break;
- case WRITE_NO_TEST:
- writel(test->write,
- (adapter->hw.hw_addr + test->reg)
- + (i * test->reg_offset));
- break;
- case TABLE32_TEST:
- REG_PATTERN_TEST(test->reg + (i * 4),
- test->mask,
- test->write);
- break;
- case TABLE64_TEST_LO:
- REG_PATTERN_TEST(test->reg + (i * 8),
- test->mask,
- test->write);
- break;
- case TABLE64_TEST_HI:
- REG_PATTERN_TEST((test->reg + 4) + (i * 8),
- test->mask,
- test->write);
- break;
- }
- }
- test++;
- }
-
- *data = 0;
- return 0;
-}
-
-static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
-{
- *data = 0;
-
- /* Validate NVM checksum */
- if (e1000_validate_nvm_checksum(&adapter->hw) < 0)
- *data = 2;
-
- return *data;
-}
-
-static irqreturn_t igb_test_intr(int irq, void *data)
-{
- struct igb_adapter *adapter = (struct igb_adapter *) data;
- struct e1000_hw *hw = &adapter->hw;
-
- adapter->test_icr |= E1000_READ_REG(hw, E1000_ICR);
-
- return IRQ_HANDLED;
-}
-
-static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
-{
- struct e1000_hw *hw = &adapter->hw;
- struct net_device *netdev = adapter->netdev;
- u32 mask, ics_mask, i = 0, shared_int = TRUE;
- u32 irq = adapter->pdev->irq;
-
- *data = 0;
-
- /* Hook up test interrupt handler just for this test */
- if (adapter->msix_entries) {
- if (request_irq(adapter->msix_entries[0].vector,
- &igb_test_intr, 0, netdev->name, adapter)) {
- *data = 1;
- return -1;
- }
- } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
- shared_int = FALSE;
- if (request_irq(irq,
- igb_test_intr, 0, netdev->name, adapter)) {
- *data = 1;
- return -1;
- }
- } else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED,
- netdev->name, adapter)) {
- shared_int = FALSE;
- } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED,
- netdev->name, adapter)) {
- *data = 1;
- return -1;
- }
- dev_info(pci_dev_to_dev(adapter->pdev), "testing %s interrupt\n",
- (shared_int ? "shared" : "unshared"));
-
- /* Disable all the interrupts */
- E1000_WRITE_REG(hw, E1000_IMC, ~0);
- E1000_WRITE_FLUSH(hw);
- usleep_range(10000, 20000);
-
- /* Define all writable bits for ICS */
- switch (hw->mac.type) {
- case e1000_82575:
- ics_mask = 0x37F47EDD;
- break;
- case e1000_82576:
- ics_mask = 0x77D4FBFD;
- break;
- case e1000_82580:
- ics_mask = 0x77DCFED5;
- break;
- case e1000_i350:
- case e1000_i354:
- ics_mask = 0x77DCFED5;
- break;
- case e1000_i210:
- case e1000_i211:
- ics_mask = 0x774CFED5;
- break;
- default:
- ics_mask = 0x7FFFFFFF;
- break;
- }
-
- /* Test each interrupt */
- for (; i < 31; i++) {
- /* Interrupt to test */
- mask = 1 << i;
-
- if (!(mask & ics_mask))
- continue;
-
- if (!shared_int) {
- /* Disable the interrupt to be reported in
- * the cause register and then force the same
- * interrupt and see if one gets posted. If
- * an interrupt was posted to the bus, the
- * test failed.
- */
- adapter->test_icr = 0;
-
- /* Flush any pending interrupts */
- E1000_WRITE_REG(hw, E1000_ICR, ~0);
-
- E1000_WRITE_REG(hw, E1000_IMC, mask);
- E1000_WRITE_REG(hw, E1000_ICS, mask);
- E1000_WRITE_FLUSH(hw);
- usleep_range(10000, 20000);
-
- if (adapter->test_icr & mask) {
- *data = 3;
- break;
- }
- }
-
- /* Enable the interrupt to be reported in
- * the cause register and then force the same
- * interrupt and see if one gets posted. If
- * an interrupt was not posted to the bus, the
- * test failed.
- */
- adapter->test_icr = 0;
-
- /* Flush any pending interrupts */
- E1000_WRITE_REG(hw, E1000_ICR, ~0);
-
- E1000_WRITE_REG(hw, E1000_IMS, mask);
- E1000_WRITE_REG(hw, E1000_ICS, mask);
- E1000_WRITE_FLUSH(hw);
- usleep_range(10000, 20000);
-
- if (!(adapter->test_icr & mask)) {
- *data = 4;
- break;
- }
-
- if (!shared_int) {
- /* Disable the other interrupts to be reported in
- * the cause register and then force the other
- * interrupts and see if any get posted. If
- * an interrupt was posted to the bus, the
- * test failed.
- */
- adapter->test_icr = 0;
-
- /* Flush any pending interrupts */
- E1000_WRITE_REG(hw, E1000_ICR, ~0);
-
- E1000_WRITE_REG(hw, E1000_IMC, ~mask);
- E1000_WRITE_REG(hw, E1000_ICS, ~mask);
- E1000_WRITE_FLUSH(hw);
- usleep_range(10000, 20000);
-
- if (adapter->test_icr & mask) {
- *data = 5;
- break;
- }
- }
- }
-
- /* Disable all the interrupts */
- E1000_WRITE_REG(hw, E1000_IMC, ~0);
- E1000_WRITE_FLUSH(hw);
- usleep_range(10000, 20000);
-
- /* Unhook test interrupt handler */
- if (adapter->msix_entries)
- free_irq(adapter->msix_entries[0].vector, adapter);
- else
- free_irq(irq, adapter);
-
- return *data;
-}
-
-static void igb_free_desc_rings(struct igb_adapter *adapter)
-{
- igb_free_tx_resources(&adapter->test_tx_ring);
- igb_free_rx_resources(&adapter->test_rx_ring);
-}
-
-static int igb_setup_desc_rings(struct igb_adapter *adapter)
-{
- struct igb_ring *tx_ring = &adapter->test_tx_ring;
- struct igb_ring *rx_ring = &adapter->test_rx_ring;
- struct e1000_hw *hw = &adapter->hw;
- int ret_val;
-
- /* Setup Tx descriptor ring and Tx buffers */
- tx_ring->count = IGB_DEFAULT_TXD;
- tx_ring->dev = pci_dev_to_dev(adapter->pdev);
- tx_ring->netdev = adapter->netdev;
- tx_ring->reg_idx = adapter->vfs_allocated_count;
-
- if (igb_setup_tx_resources(tx_ring)) {
- ret_val = 1;
- goto err_nomem;
- }
-
- igb_setup_tctl(adapter);
- igb_configure_tx_ring(adapter, tx_ring);
-
- /* Setup Rx descriptor ring and Rx buffers */
- rx_ring->count = IGB_DEFAULT_RXD;
- rx_ring->dev = pci_dev_to_dev(adapter->pdev);
- rx_ring->netdev = adapter->netdev;
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
- rx_ring->rx_buffer_len = IGB_RX_HDR_LEN;
-#endif
- rx_ring->reg_idx = adapter->vfs_allocated_count;
-
- if (igb_setup_rx_resources(rx_ring)) {
- ret_val = 2;
- goto err_nomem;
- }
-
- /* set the default queue to queue 0 of PF */
- E1000_WRITE_REG(hw, E1000_MRQC, adapter->vfs_allocated_count << 3);
-
- /* enable receive ring */
- igb_setup_rctl(adapter);
- igb_configure_rx_ring(adapter, rx_ring);
-
- igb_alloc_rx_buffers(rx_ring, igb_desc_unused(rx_ring));
-
- return 0;
-
-err_nomem:
- igb_free_desc_rings(adapter);
- return ret_val;
-}
-
-static void igb_phy_disable_receiver(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
-
- /* Write out to PHY registers 29 and 30 to disable the Receiver. */
- e1000_write_phy_reg(hw, 29, 0x001F);
- e1000_write_phy_reg(hw, 30, 0x8FFC);
- e1000_write_phy_reg(hw, 29, 0x001A);
- e1000_write_phy_reg(hw, 30, 0x8FF0);
-}
-
-static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 ctrl_reg = 0;
-
- hw->mac.autoneg = FALSE;
-
- if (hw->phy.type == e1000_phy_m88) {
- if (hw->phy.id != I210_I_PHY_ID) {
- /* Auto-MDI/MDIX Off */
- e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
- /* reset to update Auto-MDI/MDIX */
- e1000_write_phy_reg(hw, PHY_CONTROL, 0x9140);
- /* autoneg off */
- e1000_write_phy_reg(hw, PHY_CONTROL, 0x8140);
- } else {
- /* force 1000, set loopback */
- e1000_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
- e1000_write_phy_reg(hw, PHY_CONTROL, 0x4140);
- }
- } else {
- /* enable MII loopback */
- if (hw->phy.type == e1000_phy_82580)
- e1000_write_phy_reg(hw, I82577_PHY_LBK_CTRL, 0x8041);
- }
-
- /* force 1000, set loopback */
- e1000_write_phy_reg(hw, PHY_CONTROL, 0x4140);
-
- /* Now set up the MAC to the same speed/duplex as the PHY. */
- ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
- ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
- ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
- E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
- E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
- E1000_CTRL_FD | /* Force Duplex to FULL */
- E1000_CTRL_SLU); /* Set link up enable bit */
-
- if (hw->phy.type == e1000_phy_m88)
- ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
-
- E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
-
- /* Disable the receiver on the PHY so when a cable is plugged in, the
- * PHY does not begin to autoneg when a cable is reconnected to the NIC.
- */
- if (hw->phy.type == e1000_phy_m88)
- igb_phy_disable_receiver(adapter);
-
- mdelay(500);
- return 0;
-}
-
-static int igb_set_phy_loopback(struct igb_adapter *adapter)
-{
- return igb_integrated_phy_loopback(adapter);
-}
-
-static int igb_setup_loopback_test(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 reg;
-
- reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
-
- /* use CTRL_EXT to identify link type as SGMII can appear as copper */
- if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) {
- if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
- (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
- (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
- (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
-
- /* Enable DH89xxCC MPHY for near end loopback */
- reg = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTL);
- reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) |
- E1000_MPHY_PCS_CLK_REG_OFFSET;
- E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTL, reg);
-
- reg = E1000_READ_REG(hw, E1000_MPHY_DATA);
- reg |= E1000_MPHY_PCS_CLK_REG_DIGINELBEN;
- E1000_WRITE_REG(hw, E1000_MPHY_DATA, reg);
- }
-
- reg = E1000_READ_REG(hw, E1000_RCTL);
- reg |= E1000_RCTL_LBM_TCVR;
- E1000_WRITE_REG(hw, E1000_RCTL, reg);
-
- E1000_WRITE_REG(hw, E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK);
-
- reg = E1000_READ_REG(hw, E1000_CTRL);
- reg &= ~(E1000_CTRL_RFCE |
- E1000_CTRL_TFCE |
- E1000_CTRL_LRST);
- reg |= E1000_CTRL_SLU |
- E1000_CTRL_FD;
- E1000_WRITE_REG(hw, E1000_CTRL, reg);
-
- /* Unset switch control to serdes energy detect */
- reg = E1000_READ_REG(hw, E1000_CONNSW);
- reg &= ~E1000_CONNSW_ENRGSRC;
- E1000_WRITE_REG(hw, E1000_CONNSW, reg);
-
- /* Unset sigdetect for SERDES loopback on
- * 82580 and newer devices
- */
- if (hw->mac.type >= e1000_82580) {
- reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
- reg |= E1000_PCS_CFG_IGN_SD;
- E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
- }
-
- /* Set PCS register for forced speed */
- reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
- reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/
- reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */
- E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */
- E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */
- E1000_PCS_LCTL_FSD | /* Force Speed */
- E1000_PCS_LCTL_FORCE_LINK; /* Force Link */
- E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
-
- return 0;
- }
-
- return igb_set_phy_loopback(adapter);
-}
-
-static void igb_loopback_cleanup(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 rctl;
- u16 phy_reg;
-
- if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
- (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
- (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
- (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
- u32 reg;
-
- /* Disable near end loopback on DH89xxCC */
- reg = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTL);
- reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK ) |
- E1000_MPHY_PCS_CLK_REG_OFFSET;
- E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTL, reg);
-
- reg = E1000_READ_REG(hw, E1000_MPHY_DATA);
- reg &= ~E1000_MPHY_PCS_CLK_REG_DIGINELBEN;
- E1000_WRITE_REG(hw, E1000_MPHY_DATA, reg);
- }
-
- rctl = E1000_READ_REG(hw, E1000_RCTL);
- rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
- E1000_WRITE_REG(hw, E1000_RCTL, rctl);
-
- hw->mac.autoneg = TRUE;
- e1000_read_phy_reg(hw, PHY_CONTROL, &phy_reg);
- if (phy_reg & MII_CR_LOOPBACK) {
- phy_reg &= ~MII_CR_LOOPBACK;
- if (hw->phy.type == I210_I_PHY_ID)
- e1000_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
- e1000_write_phy_reg(hw, PHY_CONTROL, phy_reg);
- e1000_phy_commit(hw);
- }
-}
-static void igb_create_lbtest_frame(struct sk_buff *skb,
- unsigned int frame_size)
-{
- memset(skb->data, 0xFF, frame_size);
- frame_size /= 2;
- memset(&skb->data[frame_size], 0xAA, frame_size - 1);
- memset(&skb->data[frame_size + 10], 0xBE, 1);
- memset(&skb->data[frame_size + 12], 0xAF, 1);
-}
-
-static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer,
- unsigned int frame_size)
-{
- unsigned char *data;
- bool match = true;
-
- frame_size >>= 1;
-
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
- data = rx_buffer->skb->data;
-#else
- data = kmap(rx_buffer->page);
-#endif
-
- if (data[3] != 0xFF ||
- data[frame_size + 10] != 0xBE ||
- data[frame_size + 12] != 0xAF)
- match = false;
-
-#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
- kunmap(rx_buffer->page);
-
-#endif
- return match;
-}
-
-static u16 igb_clean_test_rings(struct igb_ring *rx_ring,
- struct igb_ring *tx_ring,
- unsigned int size)
-{
- union e1000_adv_rx_desc *rx_desc;
- struct igb_rx_buffer *rx_buffer_info;
- struct igb_tx_buffer *tx_buffer_info;
- u16 rx_ntc, tx_ntc, count = 0;
-
- /* initialize next to clean and descriptor values */
- rx_ntc = rx_ring->next_to_clean;
- tx_ntc = tx_ring->next_to_clean;
- rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
-
- while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
- /* check rx buffer */
- rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
-
- /* sync Rx buffer for CPU read */
- dma_sync_single_for_cpu(rx_ring->dev,
- rx_buffer_info->dma,
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
- IGB_RX_HDR_LEN,
-#else
- IGB_RX_BUFSZ,
-#endif
- DMA_FROM_DEVICE);
-
- /* verify contents of skb */
- if (igb_check_lbtest_frame(rx_buffer_info, size))
- count++;
-
- /* sync Rx buffer for device write */
- dma_sync_single_for_device(rx_ring->dev,
- rx_buffer_info->dma,
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
- IGB_RX_HDR_LEN,
-#else
- IGB_RX_BUFSZ,
-#endif
- DMA_FROM_DEVICE);
-
- /* unmap buffer on tx side */
- tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
- igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
-
- /* increment rx/tx next to clean counters */
- rx_ntc++;
- if (rx_ntc == rx_ring->count)
- rx_ntc = 0;
- tx_ntc++;
- if (tx_ntc == tx_ring->count)
- tx_ntc = 0;
-
- /* fetch next descriptor */
- rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
- }
-
- /* re-map buffers to ring, store next to clean values */
- igb_alloc_rx_buffers(rx_ring, count);
- rx_ring->next_to_clean = rx_ntc;
- tx_ring->next_to_clean = tx_ntc;
-
- return count;
-}
-
-static int igb_run_loopback_test(struct igb_adapter *adapter)
-{
- struct igb_ring *tx_ring = &adapter->test_tx_ring;
- struct igb_ring *rx_ring = &adapter->test_rx_ring;
- u16 i, j, lc, good_cnt;
- int ret_val = 0;
- unsigned int size = IGB_RX_HDR_LEN;
- netdev_tx_t tx_ret_val;
- struct sk_buff *skb;
-
- /* allocate test skb */
- skb = alloc_skb(size, GFP_KERNEL);
- if (!skb)
- return 11;
-
- /* place data into test skb */
- igb_create_lbtest_frame(skb, size);
- skb_put(skb, size);
-
- /*
- * Calculate the loop count based on the largest descriptor ring
- * The idea is to wrap the largest ring a number of times using 64
- * send/receive pairs during each loop
- */
-
- if (rx_ring->count <= tx_ring->count)
- lc = ((tx_ring->count / 64) * 2) + 1;
- else
- lc = ((rx_ring->count / 64) * 2) + 1;
-
- for (j = 0; j <= lc; j++) { /* loop count loop */
- /* reset count of good packets */
- good_cnt = 0;
-
- /* place 64 packets on the transmit queue*/
- for (i = 0; i < 64; i++) {
- skb_get(skb);
- tx_ret_val = igb_xmit_frame_ring(skb, tx_ring);
- if (tx_ret_val == NETDEV_TX_OK)
- good_cnt++;
- }
-
- if (good_cnt != 64) {
- ret_val = 12;
- break;
- }
-
- /* allow 200 milliseconds for packets to go from tx to rx */
- msleep(200);
-
- good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size);
- if (good_cnt != 64) {
- ret_val = 13;
- break;
- }
- } /* end loop count loop */
-
- /* free the original skb */
- kfree_skb(skb);
-
- return ret_val;
-}
-
-static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
-{
- /* PHY loopback cannot be performed if SoL/IDER
- * sessions are active */
- if (e1000_check_reset_block(&adapter->hw)) {
- dev_err(pci_dev_to_dev(adapter->pdev),
- "Cannot do PHY loopback test "
- "when SoL/IDER is active.\n");
- *data = 0;
- goto out;
- }
- if (adapter->hw.mac.type == e1000_i354) {
- dev_info(&adapter->pdev->dev,
- "Loopback test not supported on i354.\n");
- *data = 0;
- goto out;
- }
- *data = igb_setup_desc_rings(adapter);
- if (*data)
- goto out;
- *data = igb_setup_loopback_test(adapter);
- if (*data)
- goto err_loopback;
- *data = igb_run_loopback_test(adapter);
-
- igb_loopback_cleanup(adapter);
-
-err_loopback:
- igb_free_desc_rings(adapter);
-out:
- return *data;
-}
-
-static int igb_link_test(struct igb_adapter *adapter, u64 *data)
-{
- u32 link;
- int i, time;
-
- *data = 0;
- time = 0;
- if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
- int i = 0;
- adapter->hw.mac.serdes_has_link = FALSE;
-
- /* On some blade server designs, link establishment
- * could take as long as 2-3 minutes */
- do {
- e1000_check_for_link(&adapter->hw);
- if (adapter->hw.mac.serdes_has_link)
- goto out;
- msleep(20);
- } while (i++ < 3750);
-
- *data = 1;
- } else {
- for (i=0; i < IGB_MAX_LINK_TRIES; i++) {
- link = igb_has_link(adapter);
- if (link)
- goto out;
- else {
- time++;
- msleep(1000);
- }
- }
- if (!link)
- *data = 1;
- }
- out:
- return *data;
-}
-
-static void igb_diag_test(struct net_device *netdev,
- struct ethtool_test *eth_test, u64 *data)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- u16 autoneg_advertised;
- u8 forced_speed_duplex, autoneg;
- bool if_running = netif_running(netdev);
-
- set_bit(__IGB_TESTING, &adapter->state);
- if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
- /* Offline tests */
-
- /* save speed, duplex, autoneg settings */
- autoneg_advertised = adapter->hw.phy.autoneg_advertised;
- forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
- autoneg = adapter->hw.mac.autoneg;
-
- dev_info(pci_dev_to_dev(adapter->pdev), "offline testing starting\n");
-
- /* power up link for link test */
- igb_power_up_link(adapter);
-
- /* Link test performed before hardware reset so autoneg doesn't
- * interfere with test result */
- if (igb_link_test(adapter, &data[4]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
- if (if_running)
- /* indicate we're in test mode */
- dev_close(netdev);
- else
- igb_reset(adapter);
-
- if (igb_reg_test(adapter, &data[0]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
- igb_reset(adapter);
- if (igb_eeprom_test(adapter, &data[1]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
- igb_reset(adapter);
- if (igb_intr_test(adapter, &data[2]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
- igb_reset(adapter);
-
- /* power up link for loopback test */
- igb_power_up_link(adapter);
-
- if (igb_loopback_test(adapter, &data[3]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
- /* restore speed, duplex, autoneg settings */
- adapter->hw.phy.autoneg_advertised = autoneg_advertised;
- adapter->hw.mac.forced_speed_duplex = forced_speed_duplex;
- adapter->hw.mac.autoneg = autoneg;
-
- /* force this routine to wait until autoneg complete/timeout */
- adapter->hw.phy.autoneg_wait_to_complete = TRUE;
- igb_reset(adapter);
- adapter->hw.phy.autoneg_wait_to_complete = FALSE;
-
- clear_bit(__IGB_TESTING, &adapter->state);
- if (if_running)
- dev_open(netdev);
- } else {
- dev_info(pci_dev_to_dev(adapter->pdev), "online testing starting\n");
-
- /* PHY is powered down when interface is down */
- if (if_running && igb_link_test(adapter, &data[4]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
- else
- data[4] = 0;
-
- /* Online tests aren't run; pass by default */
- data[0] = 0;
- data[1] = 0;
- data[2] = 0;
- data[3] = 0;
-
- clear_bit(__IGB_TESTING, &adapter->state);
- }
- msleep_interruptible(4 * 1000);
-}
-
-static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
-
- wol->supported = WAKE_UCAST | WAKE_MCAST |
- WAKE_BCAST | WAKE_MAGIC |
- WAKE_PHY;
- wol->wolopts = 0;
-
- if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
- return;
-
- /* apply any specific unsupported masks here */
- switch (adapter->hw.device_id) {
- default:
- break;
- }
-
- if (adapter->wol & E1000_WUFC_EX)
- wol->wolopts |= WAKE_UCAST;
- if (adapter->wol & E1000_WUFC_MC)
- wol->wolopts |= WAKE_MCAST;
- if (adapter->wol & E1000_WUFC_BC)
- wol->wolopts |= WAKE_BCAST;
- if (adapter->wol & E1000_WUFC_MAG)
- wol->wolopts |= WAKE_MAGIC;
- if (adapter->wol & E1000_WUFC_LNKC)
- wol->wolopts |= WAKE_PHY;
-}
-
-static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
-
- if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
- return -EOPNOTSUPP;
-
- if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
- return wol->wolopts ? -EOPNOTSUPP : 0;
-
- /* these settings will always override what we currently have */
- adapter->wol = 0;
-
- if (wol->wolopts & WAKE_UCAST)
- adapter->wol |= E1000_WUFC_EX;
- if (wol->wolopts & WAKE_MCAST)
- adapter->wol |= E1000_WUFC_MC;
- if (wol->wolopts & WAKE_BCAST)
- adapter->wol |= E1000_WUFC_BC;
- if (wol->wolopts & WAKE_MAGIC)
- adapter->wol |= E1000_WUFC_MAG;
- if (wol->wolopts & WAKE_PHY)
- adapter->wol |= E1000_WUFC_LNKC;
- device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
-
- return 0;
-}
-
-/* bit defines for adapter->led_status */
-#ifdef HAVE_ETHTOOL_SET_PHYS_ID
-static int igb_set_phys_id(struct net_device *netdev,
- enum ethtool_phys_id_state state)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
-
- switch (state) {
- case ETHTOOL_ID_ACTIVE:
- e1000_blink_led(hw);
- return 2;
- case ETHTOOL_ID_ON:
- e1000_led_on(hw);
- break;
- case ETHTOOL_ID_OFF:
- e1000_led_off(hw);
- break;
- case ETHTOOL_ID_INACTIVE:
- e1000_led_off(hw);
- e1000_cleanup_led(hw);
- break;
- }
-
- return 0;
-}
-#else
-static int igb_phys_id(struct net_device *netdev, u32 data)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- unsigned long timeout;
-
- timeout = data * 1000;
-
- /*
- * msleep_interruptable only accepts unsigned int so we are limited
- * in how long a duration we can wait
- */
- if (!timeout || timeout > UINT_MAX)
- timeout = UINT_MAX;
-
- e1000_blink_led(hw);
- msleep_interruptible(timeout);
-
- e1000_led_off(hw);
- e1000_cleanup_led(hw);
-
- return 0;
-}
-#endif /* HAVE_ETHTOOL_SET_PHYS_ID */
-
-static int igb_set_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- int i;
-
- if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
- ((ec->rx_coalesce_usecs > 3) &&
- (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
- (ec->rx_coalesce_usecs == 2))
- {
- printk("set_coalesce:invalid parameter..");
- return -EINVAL;
- }
-
- if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
- ((ec->tx_coalesce_usecs > 3) &&
- (ec->tx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
- (ec->tx_coalesce_usecs == 2))
- return -EINVAL;
-
- if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
- return -EINVAL;
-
- if (ec->tx_max_coalesced_frames_irq)
- adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq;
-
- /* If ITR is disabled, disable DMAC */
- if (ec->rx_coalesce_usecs == 0) {
- adapter->dmac = IGB_DMAC_DISABLE;
- }
-
- /* convert to rate of irq's per second */
- if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3)
- adapter->rx_itr_setting = ec->rx_coalesce_usecs;
- else
- adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
-
- /* convert to rate of irq's per second */
- if (adapter->flags & IGB_FLAG_QUEUE_PAIRS)
- adapter->tx_itr_setting = adapter->rx_itr_setting;
- else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3)
- adapter->tx_itr_setting = ec->tx_coalesce_usecs;
- else
- adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
-
- for (i = 0; i < adapter->num_q_vectors; i++) {
- struct igb_q_vector *q_vector = adapter->q_vector[i];
- q_vector->tx.work_limit = adapter->tx_work_limit;
- if (q_vector->rx.ring)
- q_vector->itr_val = adapter->rx_itr_setting;
- else
- q_vector->itr_val = adapter->tx_itr_setting;
- if (q_vector->itr_val && q_vector->itr_val <= 3)
- q_vector->itr_val = IGB_START_ITR;
- q_vector->set_itr = 1;
- }
-
- return 0;
-}
-
-static int igb_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
-
- if (adapter->rx_itr_setting <= 3)
- ec->rx_coalesce_usecs = adapter->rx_itr_setting;
- else
- ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
-
- ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit;
-
- if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) {
- if (adapter->tx_itr_setting <= 3)
- ec->tx_coalesce_usecs = adapter->tx_itr_setting;
- else
- ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
- }
-
- return 0;
-}
-
-static int igb_nway_reset(struct net_device *netdev)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- if (netif_running(netdev))
- igb_reinit_locked(adapter);
- return 0;
-}
-
-#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
-static int igb_get_sset_count(struct net_device *netdev, int sset)
-{
- switch (sset) {
- case ETH_SS_STATS:
- return IGB_STATS_LEN;
- case ETH_SS_TEST:
- return IGB_TEST_LEN;
- default:
- return -ENOTSUPP;
- }
-}
-#else
-static int igb_get_stats_count(struct net_device *netdev)
-{
- return IGB_STATS_LEN;
-}
-
-static int igb_diag_test_count(struct net_device *netdev)
-{
- return IGB_TEST_LEN;
-}
-#endif
-
-static void igb_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, u64 *data)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
-#ifdef HAVE_NETDEV_STATS_IN_NETDEV
- struct net_device_stats *net_stats = &netdev->stats;
-#else
- struct net_device_stats *net_stats = &adapter->net_stats;
-#endif
- u64 *queue_stat;
- int i, j, k;
- char *p;
-
- igb_update_stats(adapter);
-
- for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
- p = (char *)adapter + igb_gstrings_stats[i].stat_offset;
- data[i] = (igb_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
- for (j = 0; j < IGB_NETDEV_STATS_LEN; j++, i++) {
- p = (char *)net_stats + igb_gstrings_net_stats[j].stat_offset;
- data[i] = (igb_gstrings_net_stats[j].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
- for (j = 0; j < adapter->num_tx_queues; j++) {
- queue_stat = (u64 *)&adapter->tx_ring[j]->tx_stats;
- for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++)
- data[i] = queue_stat[k];
- }
- for (j = 0; j < adapter->num_rx_queues; j++) {
- queue_stat = (u64 *)&adapter->rx_ring[j]->rx_stats;
- for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++)
- data[i] = queue_stat[k];
- }
-}
-
-static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- u8 *p = data;
- int i;
-
- switch (stringset) {
- case ETH_SS_TEST:
- memcpy(data, *igb_gstrings_test,
- IGB_TEST_LEN*ETH_GSTRING_LEN);
- break;
- case ETH_SS_STATS:
- for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
- memcpy(p, igb_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) {
- memcpy(p, igb_gstrings_net_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < adapter->num_tx_queues; i++) {
- sprintf(p, "tx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_restart", i);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < adapter->num_rx_queues; i++) {
- sprintf(p, "rx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_drops", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_csum_err", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_alloc_failed", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_ipv4_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_ipv4e_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_ipv6_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_ipv6e_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_tcp_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_udp_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_sctp_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_nfs_packets", i);
- p += ETH_GSTRING_LEN;
- }
-/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
- break;
- }
-}
-
-#ifdef HAVE_ETHTOOL_GET_TS_INFO
-static int igb_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
-{
- struct igb_adapter *adapter = netdev_priv(dev);
-
- switch (adapter->hw.mac.type) {
-#ifdef HAVE_PTP_1588_CLOCK
- case e1000_82575:
- info->so_timestamping =
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
- return 0;
- case e1000_82576:
- case e1000_82580:
- case e1000_i350:
- case e1000_i354:
- case e1000_i210:
- case e1000_i211:
- info->so_timestamping =
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
-
- if (adapter->ptp_clock)
- info->phc_index = ptp_clock_index(adapter->ptp_clock);
- else
- info->phc_index = -1;
-
- info->tx_types =
- (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON);
-
- info->rx_filters = 1 << HWTSTAMP_FILTER_NONE;
-
- /* 82576 does not support timestamping all packets. */
- if (adapter->hw.mac.type >= e1000_82580)
- info->rx_filters |= 1 << HWTSTAMP_FILTER_ALL;
- else
- info->rx_filters |=
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
-
- return 0;
-#endif /* HAVE_PTP_1588_CLOCK */
- default:
- return -EOPNOTSUPP;
- }
-}
-#endif /* HAVE_ETHTOOL_GET_TS_INFO */
-
-#ifdef CONFIG_PM_RUNTIME
-static int igb_ethtool_begin(struct net_device *netdev)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
-
- pm_runtime_get_sync(&adapter->pdev->dev);
-
- return 0;
-}
-
-static void igb_ethtool_complete(struct net_device *netdev)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
-
- pm_runtime_put(&adapter->pdev->dev);
-}
-#endif /* CONFIG_PM_RUNTIME */
-
-#ifndef HAVE_NDO_SET_FEATURES
-static u32 igb_get_rx_csum(struct net_device *netdev)
-{
- return !!(netdev->features & NETIF_F_RXCSUM);
-}
-
-static int igb_set_rx_csum(struct net_device *netdev, u32 data)
-{
- const u32 feature_list = NETIF_F_RXCSUM;
-
- if (data)
- netdev->features |= feature_list;
- else
- netdev->features &= ~feature_list;
-
- return 0;
-}
-
-static int igb_set_tx_csum(struct net_device *netdev, u32 data)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
-#ifdef NETIF_F_IPV6_CSUM
- u32 feature_list = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
-#else
- u32 feature_list = NETIF_F_IP_CSUM;
-#endif
-
- if (adapter->hw.mac.type >= e1000_82576)
- feature_list |= NETIF_F_SCTP_CSUM;
-
- if (data)
- netdev->features |= feature_list;
- else
- netdev->features &= ~feature_list;
-
- return 0;
-}
-
-#ifdef NETIF_F_TSO
-static int igb_set_tso(struct net_device *netdev, u32 data)
-{
-#ifdef NETIF_F_TSO6
- const u32 feature_list = NETIF_F_TSO | NETIF_F_TSO6;
-#else
- const u32 feature_list = NETIF_F_TSO;
-#endif
-
- if (data)
- netdev->features |= feature_list;
- else
- netdev->features &= ~feature_list;
-
-#ifndef HAVE_NETDEV_VLAN_FEATURES
- if (!data) {
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct net_device *v_netdev;
- int i;
-
- /* disable TSO on all VLANs if they're present */
- if (!adapter->vlgrp)
- goto tso_out;
-
- for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
- v_netdev = vlan_group_get_device(adapter->vlgrp, i);
- if (!v_netdev)
- continue;
-
- v_netdev->features &= ~feature_list;
- vlan_group_set_device(adapter->vlgrp, i, v_netdev);
- }
- }
-
-tso_out:
-
-#endif /* HAVE_NETDEV_VLAN_FEATURES */
- return 0;
-}
-
-#endif /* NETIF_F_TSO */
-#ifdef ETHTOOL_GFLAGS
-static int igb_set_flags(struct net_device *netdev, u32 data)
-{
- u32 supported_flags = ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN |
- ETH_FLAG_RXHASH;
-#ifndef HAVE_VLAN_RX_REGISTER
- u32 changed = netdev->features ^ data;
-#endif
- int rc;
-#ifndef IGB_NO_LRO
-
- supported_flags |= ETH_FLAG_LRO;
-#endif
- /*
- * Since there is no support for separate tx vlan accel
- * enabled make sure tx flag is cleared if rx is.
- */
- if (!(data & ETH_FLAG_RXVLAN))
- data &= ~ETH_FLAG_TXVLAN;
-
- rc = ethtool_op_set_flags(netdev, data, supported_flags);
- if (rc)
- return rc;
-#ifndef HAVE_VLAN_RX_REGISTER
-
- if (changed & ETH_FLAG_RXVLAN)
- igb_vlan_mode(netdev, data);
-#endif
-
- return 0;
-}
-
-#endif /* ETHTOOL_GFLAGS */
-#endif /* HAVE_NDO_SET_FEATURES */
-#ifdef ETHTOOL_SADV_COAL
-static int igb_set_adv_coal(struct net_device *netdev, struct ethtool_value *edata)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
-
- switch (edata->data) {
- case IGB_DMAC_DISABLE:
- adapter->dmac = edata->data;
- break;
- case IGB_DMAC_MIN:
- adapter->dmac = edata->data;
- break;
- case IGB_DMAC_500:
- adapter->dmac = edata->data;
- break;
- case IGB_DMAC_EN_DEFAULT:
- adapter->dmac = edata->data;
- break;
- case IGB_DMAC_2000:
- adapter->dmac = edata->data;
- break;
- case IGB_DMAC_3000:
- adapter->dmac = edata->data;
- break;
- case IGB_DMAC_4000:
- adapter->dmac = edata->data;
- break;
- case IGB_DMAC_5000:
- adapter->dmac = edata->data;
- break;
- case IGB_DMAC_6000:
- adapter->dmac = edata->data;
- break;
- case IGB_DMAC_7000:
- adapter->dmac = edata->data;
- break;
- case IGB_DMAC_8000:
- adapter->dmac = edata->data;
- break;
- case IGB_DMAC_9000:
- adapter->dmac = edata->data;
- break;
- case IGB_DMAC_MAX:
- adapter->dmac = edata->data;
- break;
- default:
- adapter->dmac = IGB_DMAC_DISABLE;
- printk("set_dmac: invalid setting, setting DMAC to %d\n",
- adapter->dmac);
- }
- printk("%s: setting DMAC to %d\n", netdev->name, adapter->dmac);
- return 0;
-}
-#endif /* ETHTOOL_SADV_COAL */
-#ifdef ETHTOOL_GADV_COAL
-static void igb_get_dmac(struct net_device *netdev,
- struct ethtool_value *edata)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- edata->data = adapter->dmac;
-
- return;
-}
-#endif
-
-#ifdef ETHTOOL_GEEE
-static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 ret_val;
- u16 phy_data;
-
- if ((hw->mac.type < e1000_i350) ||
- (hw->phy.media_type != e1000_media_type_copper))
- return -EOPNOTSUPP;
-
- edata->supported = (SUPPORTED_1000baseT_Full |
- SUPPORTED_100baseT_Full);
-
- if (!hw->dev_spec._82575.eee_disable)
- edata->advertised =
- mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
-
- /* The IPCNFG and EEER registers are not supported on I354. */
- if (hw->mac.type == e1000_i354) {
- e1000_get_eee_status_i354(hw, (bool *)&edata->eee_active);
- } else {
- u32 eeer;
-
- eeer = E1000_READ_REG(hw, E1000_EEER);
-
- /* EEE status on negotiated link */
- if (eeer & E1000_EEER_EEE_NEG)
- edata->eee_active = true;
-
- if (eeer & E1000_EEER_TX_LPI_EN)
- edata->tx_lpi_enabled = true;
- }
-
- /* EEE Link Partner Advertised */
- switch (hw->mac.type) {
- case e1000_i350:
- ret_val = e1000_read_emi_reg(hw, E1000_EEE_LP_ADV_ADDR_I350,
- &phy_data);
- if (ret_val)
- return -ENODATA;
-
- edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
-
- break;
- case e1000_i354:
- case e1000_i210:
- case e1000_i211:
- ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210,
- E1000_EEE_LP_ADV_DEV_I210,
- &phy_data);
- if (ret_val)
- return -ENODATA;
-
- edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
-
- break;
- default:
- break;
- }
-
- edata->eee_enabled = !hw->dev_spec._82575.eee_disable;
-
- if ((hw->mac.type == e1000_i354) &&
- (edata->eee_enabled))
- edata->tx_lpi_enabled = true;
-
- /*
- * report correct negotiated EEE status for devices that
- * wrongly report EEE at half-duplex
- */
- if (adapter->link_duplex == HALF_DUPLEX) {
- edata->eee_enabled = false;
- edata->eee_active = false;
- edata->tx_lpi_enabled = false;
- edata->advertised &= ~edata->advertised;
- }
-
- return 0;
-}
-#endif
-
-#ifdef ETHTOOL_SEEE
-static int igb_set_eee(struct net_device *netdev,
- struct ethtool_eee *edata)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- struct ethtool_eee eee_curr;
- s32 ret_val;
-
- if ((hw->mac.type < e1000_i350) ||
- (hw->phy.media_type != e1000_media_type_copper))
- return -EOPNOTSUPP;
-
- ret_val = igb_get_eee(netdev, &eee_curr);
- if (ret_val)
- return ret_val;
-
- if (eee_curr.eee_enabled) {
- if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) {
- dev_err(pci_dev_to_dev(adapter->pdev),
- "Setting EEE tx-lpi is not supported\n");
- return -EINVAL;
- }
-
- /* Tx LPI time is not implemented currently */
- if (edata->tx_lpi_timer) {
- dev_err(pci_dev_to_dev(adapter->pdev),
- "Setting EEE Tx LPI timer is not supported\n");
- return -EINVAL;
- }
-
- if (edata->advertised &
- ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) {
- dev_err(pci_dev_to_dev(adapter->pdev),
- "EEE Advertisement supports only 100Tx and or 100T full duplex\n");
- return -EINVAL;
- }
-
- } else if (!edata->eee_enabled) {
- dev_err(pci_dev_to_dev(adapter->pdev),
- "Setting EEE options is not supported with EEE disabled\n");
- return -EINVAL;
- }
-
- adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
-
- if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
- hw->dev_spec._82575.eee_disable = !edata->eee_enabled;
-
- /* reset link */
- if (netif_running(netdev))
- igb_reinit_locked(adapter);
- else
- igb_reset(adapter);
- }
-
- return 0;
-}
-#endif /* ETHTOOL_SEEE */
-
-#ifdef ETHTOOL_GRXRINGS
-static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
- struct ethtool_rxnfc *cmd)
-{
- cmd->data = 0;
-
- /* Report default options for RSS on igb */
- switch (cmd->flow_type) {
- case TCP_V4_FLOW:
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- case UDP_V4_FLOW:
- if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- case SCTP_V4_FLOW:
- case AH_ESP_V4_FLOW:
- case AH_V4_FLOW:
- case ESP_V4_FLOW:
- case IPV4_FLOW:
- cmd->data |= RXH_IP_SRC | RXH_IP_DST;
- break;
- case TCP_V6_FLOW:
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- case UDP_V6_FLOW:
- if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- case SCTP_V6_FLOW:
- case AH_ESP_V6_FLOW:
- case AH_V6_FLOW:
- case ESP_V6_FLOW:
- case IPV6_FLOW:
- cmd->data |= RXH_IP_SRC | RXH_IP_DST;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
-#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
- void *rule_locs)
-#else
- u32 *rule_locs)
-#endif
-{
- struct igb_adapter *adapter = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = adapter->num_rx_queues;
- ret = 0;
- break;
- case ETHTOOL_GRXFH:
- ret = igb_get_rss_hash_opts(adapter, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
-#define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \
- IGB_FLAG_RSS_FIELD_IPV6_UDP)
-static int igb_set_rss_hash_opt(struct igb_adapter *adapter,
- struct ethtool_rxnfc *nfc)
-{
- u32 flags = adapter->flags;
-
- /*
- * RSS does not support anything other than hashing
- * to queues on src and dst IPs and ports
- */
- if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3))
- return -EINVAL;
-
- switch (nfc->flow_type) {
- case TCP_V4_FLOW:
- case TCP_V6_FLOW:
- if (!(nfc->data & RXH_IP_SRC) ||
- !(nfc->data & RXH_IP_DST) ||
- !(nfc->data & RXH_L4_B_0_1) ||
- !(nfc->data & RXH_L4_B_2_3))
- return -EINVAL;
- break;
- case UDP_V4_FLOW:
- if (!(nfc->data & RXH_IP_SRC) ||
- !(nfc->data & RXH_IP_DST))
- return -EINVAL;
- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- case 0:
- flags &= ~IGB_FLAG_RSS_FIELD_IPV4_UDP;
- break;
- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- flags |= IGB_FLAG_RSS_FIELD_IPV4_UDP;
- break;
- default:
- return -EINVAL;
- }
- break;
- case UDP_V6_FLOW:
- if (!(nfc->data & RXH_IP_SRC) ||
- !(nfc->data & RXH_IP_DST))
- return -EINVAL;
- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- case 0:
- flags &= ~IGB_FLAG_RSS_FIELD_IPV6_UDP;
- break;
- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- flags |= IGB_FLAG_RSS_FIELD_IPV6_UDP;
- break;
- default:
- return -EINVAL;
- }
- break;
- case AH_ESP_V4_FLOW:
- case AH_V4_FLOW:
- case ESP_V4_FLOW:
- case SCTP_V4_FLOW:
- case AH_ESP_V6_FLOW:
- case AH_V6_FLOW:
- case ESP_V6_FLOW:
- case SCTP_V6_FLOW:
- if (!(nfc->data & RXH_IP_SRC) ||
- !(nfc->data & RXH_IP_DST) ||
- (nfc->data & RXH_L4_B_0_1) ||
- (nfc->data & RXH_L4_B_2_3))
- return -EINVAL;
- break;
- default:
- return -EINVAL;
- }
-
- /* if we changed something we need to update flags */
- if (flags != adapter->flags) {
- struct e1000_hw *hw = &adapter->hw;
- u32 mrqc = E1000_READ_REG(hw, E1000_MRQC);
-
- if ((flags & UDP_RSS_FLAGS) &&
- !(adapter->flags & UDP_RSS_FLAGS))
- DPRINTK(DRV, WARNING,
- "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
-
- adapter->flags = flags;
-
- /* Perform hash on these packet types */
- mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
- E1000_MRQC_RSS_FIELD_IPV4_TCP |
- E1000_MRQC_RSS_FIELD_IPV6 |
- E1000_MRQC_RSS_FIELD_IPV6_TCP;
-
- mrqc &= ~(E1000_MRQC_RSS_FIELD_IPV4_UDP |
- E1000_MRQC_RSS_FIELD_IPV6_UDP);
-
- if (flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
- mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
-
- if (flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
- mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
-
- E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
- }
-
- return 0;
-}
-
-static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
-{
- struct igb_adapter *adapter = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = igb_set_rss_hash_opt(adapter, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-#endif /* ETHTOOL_GRXRINGS */
-
-static const struct ethtool_ops igb_ethtool_ops = {
- .get_settings = igb_get_settings,
- .set_settings = igb_set_settings,
- .get_drvinfo = igb_get_drvinfo,
- .get_regs_len = igb_get_regs_len,
- .get_regs = igb_get_regs,
- .get_wol = igb_get_wol,
- .set_wol = igb_set_wol,
- .get_msglevel = igb_get_msglevel,
- .set_msglevel = igb_set_msglevel,
- .nway_reset = igb_nway_reset,
- .get_link = igb_get_link,
- .get_eeprom_len = igb_get_eeprom_len,
- .get_eeprom = igb_get_eeprom,
- .set_eeprom = igb_set_eeprom,
- .get_ringparam = igb_get_ringparam,
- .set_ringparam = igb_set_ringparam,
- .get_pauseparam = igb_get_pauseparam,
- .set_pauseparam = igb_set_pauseparam,
- .self_test = igb_diag_test,
- .get_strings = igb_get_strings,
-#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
-#ifdef HAVE_ETHTOOL_SET_PHYS_ID
- .set_phys_id = igb_set_phys_id,
-#else
- .phys_id = igb_phys_id,
-#endif /* HAVE_ETHTOOL_SET_PHYS_ID */
-#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
-#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
- .get_sset_count = igb_get_sset_count,
-#else
- .get_stats_count = igb_get_stats_count,
- .self_test_count = igb_diag_test_count,
-#endif
- .get_ethtool_stats = igb_get_ethtool_stats,
-#ifdef HAVE_ETHTOOL_GET_PERM_ADDR
- .get_perm_addr = ethtool_op_get_perm_addr,
-#endif
- .get_coalesce = igb_get_coalesce,
- .set_coalesce = igb_set_coalesce,
-#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
-#ifdef HAVE_ETHTOOL_GET_TS_INFO
- .get_ts_info = igb_get_ts_info,
-#endif /* HAVE_ETHTOOL_GET_TS_INFO */
-#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
-#ifdef CONFIG_PM_RUNTIME
- .begin = igb_ethtool_begin,
- .complete = igb_ethtool_complete,
-#endif /* CONFIG_PM_RUNTIME */
-#ifndef HAVE_NDO_SET_FEATURES
- .get_rx_csum = igb_get_rx_csum,
- .set_rx_csum = igb_set_rx_csum,
- .get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = igb_set_tx_csum,
- .get_sg = ethtool_op_get_sg,
- .set_sg = ethtool_op_set_sg,
-#ifdef NETIF_F_TSO
- .get_tso = ethtool_op_get_tso,
- .set_tso = igb_set_tso,
-#endif
-#ifdef ETHTOOL_GFLAGS
- .get_flags = ethtool_op_get_flags,
- .set_flags = igb_set_flags,
-#endif /* ETHTOOL_GFLAGS */
-#endif /* HAVE_NDO_SET_FEATURES */
-#ifdef ETHTOOL_GADV_COAL
- .get_advcoal = igb_get_adv_coal,
- .set_advcoal = igb_set_dmac_coal,
-#endif /* ETHTOOL_GADV_COAL */
-#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
-#ifdef ETHTOOL_GEEE
- .get_eee = igb_get_eee,
-#endif
-#ifdef ETHTOOL_SEEE
- .set_eee = igb_set_eee,
-#endif
-#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
-#ifdef ETHTOOL_GRXRINGS
- .get_rxnfc = igb_get_rxnfc,
- .set_rxnfc = igb_set_rxnfc,
-#endif
-};
-
-#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
-static const struct ethtool_ops_ext igb_ethtool_ops_ext = {
- .size = sizeof(struct ethtool_ops_ext),
- .get_ts_info = igb_get_ts_info,
- .set_phys_id = igb_set_phys_id,
- .get_eee = igb_get_eee,
- .set_eee = igb_set_eee,
-};
-
-void igb_set_ethtool_ops(struct net_device *netdev)
-{
- SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops);
- set_ethtool_ops_ext(netdev, &igb_ethtool_ops_ext);
-}
-#else
-void igb_set_ethtool_ops(struct net_device *netdev)
-{
- /* have to "undeclare" const on this struct to remove warnings */
- SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igb_ethtool_ops);
-}
-#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
-#endif /* SIOCETHTOOL */
-
-
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_hwmon.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_hwmon.c
deleted file mode 100755
index 07a1ae07..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_hwmon.c
+++ /dev/null
@@ -1,260 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#include "igb.h"
-#include "e1000_82575.h"
-#include "e1000_hw.h"
-#ifdef IGB_HWMON
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/sysfs.h>
-#include <linux/kobject.h>
-#include <linux/device.h>
-#include <linux/netdevice.h>
-#include <linux/hwmon.h>
-#include <linux/pci.h>
-
-#ifdef HAVE_I2C_SUPPORT
-static struct i2c_board_info i350_sensor_info = {
- I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)),
-};
-#endif /* HAVE_I2C_SUPPORT */
-
-/* hwmon callback functions */
-static ssize_t igb_hwmon_show_location(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
- dev_attr);
- return sprintf(buf, "loc%u\n",
- igb_attr->sensor->location);
-}
-
-static ssize_t igb_hwmon_show_temp(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
- dev_attr);
- unsigned int value;
-
- /* reset the temp field */
- igb_attr->hw->mac.ops.get_thermal_sensor_data(igb_attr->hw);
-
- value = igb_attr->sensor->temp;
-
- /* display millidegree */
- value *= 1000;
-
- return sprintf(buf, "%u\n", value);
-}
-
-static ssize_t igb_hwmon_show_cautionthresh(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
- dev_attr);
- unsigned int value = igb_attr->sensor->caution_thresh;
-
- /* display millidegree */
- value *= 1000;
-
- return sprintf(buf, "%u\n", value);
-}
-
-static ssize_t igb_hwmon_show_maxopthresh(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
- dev_attr);
- unsigned int value = igb_attr->sensor->max_op_thresh;
-
- /* display millidegree */
- value *= 1000;
-
- return sprintf(buf, "%u\n", value);
-}
-
-/* igb_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file.
- * @ adapter: pointer to the adapter structure
- * @ offset: offset in the eeprom sensor data table
- * @ type: type of sensor data to display
- *
- * For each file we want in hwmon's sysfs interface we need a device_attribute
- * This is included in our hwmon_attr struct that contains the references to
- * the data structures we need to get the data to display.
- */
-static int igb_add_hwmon_attr(struct igb_adapter *adapter,
- unsigned int offset, int type) {
- int rc;
- unsigned int n_attr;
- struct hwmon_attr *igb_attr;
-
- n_attr = adapter->igb_hwmon_buff.n_hwmon;
- igb_attr = &adapter->igb_hwmon_buff.hwmon_list[n_attr];
-
- switch (type) {
- case IGB_HWMON_TYPE_LOC:
- igb_attr->dev_attr.show = igb_hwmon_show_location;
- snprintf(igb_attr->name, sizeof(igb_attr->name),
- "temp%u_label", offset);
- break;
- case IGB_HWMON_TYPE_TEMP:
- igb_attr->dev_attr.show = igb_hwmon_show_temp;
- snprintf(igb_attr->name, sizeof(igb_attr->name),
- "temp%u_input", offset);
- break;
- case IGB_HWMON_TYPE_CAUTION:
- igb_attr->dev_attr.show = igb_hwmon_show_cautionthresh;
- snprintf(igb_attr->name, sizeof(igb_attr->name),
- "temp%u_max", offset);
- break;
- case IGB_HWMON_TYPE_MAX:
- igb_attr->dev_attr.show = igb_hwmon_show_maxopthresh;
- snprintf(igb_attr->name, sizeof(igb_attr->name),
- "temp%u_crit", offset);
- break;
- default:
- rc = -EPERM;
- return rc;
- }
-
- /* These always the same regardless of type */
- igb_attr->sensor =
- &adapter->hw.mac.thermal_sensor_data.sensor[offset];
- igb_attr->hw = &adapter->hw;
- igb_attr->dev_attr.store = NULL;
- igb_attr->dev_attr.attr.mode = S_IRUGO;
- igb_attr->dev_attr.attr.name = igb_attr->name;
- sysfs_attr_init(&igb_attr->dev_attr.attr);
- rc = device_create_file(&adapter->pdev->dev,
- &igb_attr->dev_attr);
- if (rc == 0)
- ++adapter->igb_hwmon_buff.n_hwmon;
-
- return rc;
-}
-
-static void igb_sysfs_del_adapter(struct igb_adapter *adapter)
-{
- int i;
-
- if (adapter == NULL)
- return;
-
- for (i = 0; i < adapter->igb_hwmon_buff.n_hwmon; i++) {
- device_remove_file(&adapter->pdev->dev,
- &adapter->igb_hwmon_buff.hwmon_list[i].dev_attr);
- }
-
- kfree(adapter->igb_hwmon_buff.hwmon_list);
-
- if (adapter->igb_hwmon_buff.device)
- hwmon_device_unregister(adapter->igb_hwmon_buff.device);
-}
-
-/* called from igb_main.c */
-void igb_sysfs_exit(struct igb_adapter *adapter)
-{
- igb_sysfs_del_adapter(adapter);
-}
-
-/* called from igb_main.c */
-int igb_sysfs_init(struct igb_adapter *adapter)
-{
- struct hwmon_buff *igb_hwmon = &adapter->igb_hwmon_buff;
- unsigned int i;
- int n_attrs;
- int rc = 0;
-#ifdef HAVE_I2C_SUPPORT
- struct i2c_client *client = NULL;
-#endif /* HAVE_I2C_SUPPORT */
-
- /* If this method isn't defined we don't support thermals */
- if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL)
- goto exit;
-
- /* Don't create thermal hwmon interface if no sensors present */
- rc = (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw));
- if (rc)
- goto exit;
-#ifdef HAVE_I2C_SUPPORT
- /* init i2c_client */
- client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
- if (client == NULL) {
- dev_info(&adapter->pdev->dev,
- "Failed to create new i2c device..\n");
- goto exit;
- }
- adapter->i2c_client = client;
-#endif /* HAVE_I2C_SUPPORT */
-
- /* Allocation space for max attributes
- * max num sensors * values (loc, temp, max, caution)
- */
- n_attrs = E1000_MAX_SENSORS * 4;
- igb_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
- GFP_KERNEL);
- if (!igb_hwmon->hwmon_list) {
- rc = -ENOMEM;
- goto err;
- }
-
- igb_hwmon->device = hwmon_device_register(&adapter->pdev->dev);
- if (IS_ERR(igb_hwmon->device)) {
- rc = PTR_ERR(igb_hwmon->device);
- goto err;
- }
-
- for (i = 0; i < E1000_MAX_SENSORS; i++) {
-
- /* Only create hwmon sysfs entries for sensors that have
- * meaningful data.
- */
- if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0)
- continue;
-
- /* Bail if any hwmon attr struct fails to initialize */
- rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_CAUTION);
- rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC);
- rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP);
- rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX);
- if (rc)
- goto err;
- }
-
- goto exit;
-
-err:
- igb_sysfs_del_adapter(adapter);
-exit:
- return rc;
-}
-#endif /* IGB_HWMON */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c
deleted file mode 100755
index a802a021..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c
+++ /dev/null
@@ -1,10263 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/vmalloc.h>
-#include <linux/pagemap.h>
-#include <linux/netdevice.h>
-#include <linux/tcp.h>
-#ifdef NETIF_F_TSO
-#include <net/checksum.h>
-#ifdef NETIF_F_TSO6
-#include <linux/ipv6.h>
-#include <net/ip6_checksum.h>
-#endif
-#endif
-#ifdef SIOCGMIIPHY
-#include <linux/mii.h>
-#endif
-#ifdef SIOCETHTOOL
-#include <linux/ethtool.h>
-#endif
-#include <linux/if_vlan.h>
-#ifdef CONFIG_PM_RUNTIME
-#include <linux/pm_runtime.h>
-#endif /* CONFIG_PM_RUNTIME */
-
-#include <linux/if_bridge.h>
-#include "igb.h"
-#include "igb_vmdq.h"
-
-#include <linux/uio_driver.h>
-
-#if defined(DEBUG) || defined (DEBUG_DUMP) || defined (DEBUG_ICR) || defined(DEBUG_ITR)
-#define DRV_DEBUG "_debug"
-#else
-#define DRV_DEBUG
-#endif
-#define DRV_HW_PERF
-#define VERSION_SUFFIX
-
-#define MAJ 5
-#define MIN 0
-#define BUILD 6
-#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." __stringify(BUILD) VERSION_SUFFIX DRV_DEBUG DRV_HW_PERF
-
-char igb_driver_name[] = "igb";
-char igb_driver_version[] = DRV_VERSION;
-static const char igb_driver_string[] =
- "Intel(R) Gigabit Ethernet Network Driver";
-static const char igb_copyright[] =
- "Copyright (c) 2007-2013 Intel Corporation.";
-
-static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER) },
- /* required last entry */
- {0, }
-};
-
-//MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
-static void igb_set_sriov_capability(struct igb_adapter *adapter) __attribute__((__unused__));
-void igb_reset(struct igb_adapter *);
-static int igb_setup_all_tx_resources(struct igb_adapter *);
-static int igb_setup_all_rx_resources(struct igb_adapter *);
-static void igb_free_all_tx_resources(struct igb_adapter *);
-static void igb_free_all_rx_resources(struct igb_adapter *);
-static void igb_setup_mrqc(struct igb_adapter *);
-void igb_update_stats(struct igb_adapter *);
-static int igb_probe(struct pci_dev *, const struct pci_device_id *);
-static void __devexit igb_remove(struct pci_dev *pdev);
-static int igb_sw_init(struct igb_adapter *);
-static int igb_open(struct net_device *);
-static int igb_close(struct net_device *);
-static void igb_configure(struct igb_adapter *);
-static void igb_configure_tx(struct igb_adapter *);
-static void igb_configure_rx(struct igb_adapter *);
-static void igb_clean_all_tx_rings(struct igb_adapter *);
-static void igb_clean_all_rx_rings(struct igb_adapter *);
-static void igb_clean_tx_ring(struct igb_ring *);
-static void igb_set_rx_mode(struct net_device *);
-static void igb_update_phy_info(unsigned long);
-static void igb_watchdog(unsigned long);
-static void igb_watchdog_task(struct work_struct *);
-static void igb_dma_err_task(struct work_struct *);
-static void igb_dma_err_timer(unsigned long data);
-static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
-static struct net_device_stats *igb_get_stats(struct net_device *);
-static int igb_change_mtu(struct net_device *, int);
-void igb_full_sync_mac_table(struct igb_adapter *adapter);
-static int igb_set_mac(struct net_device *, void *);
-static void igb_set_uta(struct igb_adapter *adapter);
-static irqreturn_t igb_intr(int irq, void *);
-static irqreturn_t igb_intr_msi(int irq, void *);
-static irqreturn_t igb_msix_other(int irq, void *);
-static irqreturn_t igb_msix_ring(int irq, void *);
-#ifdef IGB_DCA
-static void igb_update_dca(struct igb_q_vector *);
-static void igb_setup_dca(struct igb_adapter *);
-#endif /* IGB_DCA */
-static int igb_poll(struct napi_struct *, int);
-static bool igb_clean_tx_irq(struct igb_q_vector *);
-static bool igb_clean_rx_irq(struct igb_q_vector *, int);
-static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
-static void igb_tx_timeout(struct net_device *);
-static void igb_reset_task(struct work_struct *);
-#ifdef HAVE_VLAN_RX_REGISTER
-static void igb_vlan_mode(struct net_device *, struct vlan_group *);
-#endif
-#ifdef HAVE_VLAN_PROTOCOL
-static int igb_vlan_rx_add_vid(struct net_device *,
- __be16 proto, u16);
-static int igb_vlan_rx_kill_vid(struct net_device *,
- __be16 proto, u16);
-#elif defined HAVE_INT_NDO_VLAN_RX_ADD_VID
-#ifdef NETIF_F_HW_VLAN_CTAG_RX
-static int igb_vlan_rx_add_vid(struct net_device *,
- __always_unused __be16 proto, u16);
-static int igb_vlan_rx_kill_vid(struct net_device *,
- __always_unused __be16 proto, u16);
-#else
-static int igb_vlan_rx_add_vid(struct net_device *, u16);
-static int igb_vlan_rx_kill_vid(struct net_device *, u16);
-#endif
-#else
-static void igb_vlan_rx_add_vid(struct net_device *, u16);
-static void igb_vlan_rx_kill_vid(struct net_device *, u16);
-#endif
-static void igb_restore_vlan(struct igb_adapter *);
-void igb_rar_set(struct igb_adapter *adapter, u32 index);
-static void igb_ping_all_vfs(struct igb_adapter *);
-static void igb_msg_task(struct igb_adapter *);
-static void igb_vmm_control(struct igb_adapter *);
-static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
-static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
-static void igb_process_mdd_event(struct igb_adapter *);
-#ifdef IFLA_VF_MAX
-static int igb_ndo_set_vf_mac( struct net_device *netdev, int vf, u8 *mac);
-static int igb_ndo_set_vf_vlan(struct net_device *netdev,
- int vf, u16 vlan, u8 qos);
-#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
-static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
- bool setting);
-#endif
-#ifdef HAVE_VF_MIN_MAX_TXRATE
-static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
-#else /* HAVE_VF_MIN_MAX_TXRATE */
-static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
-#endif /* HAVE_VF_MIN_MAX_TXRATE */
-static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
- struct ifla_vf_info *ivi);
-static void igb_check_vf_rate_limit(struct igb_adapter *);
-#endif
-static int igb_vf_configure(struct igb_adapter *adapter, int vf);
-#ifdef CONFIG_PM
-#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
-static int igb_suspend(struct device *dev);
-static int igb_resume(struct device *dev);
-#ifdef CONFIG_PM_RUNTIME
-static int igb_runtime_suspend(struct device *dev);
-static int igb_runtime_resume(struct device *dev);
-static int igb_runtime_idle(struct device *dev);
-#endif /* CONFIG_PM_RUNTIME */
-static const struct dev_pm_ops igb_pm_ops = {
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)
- .suspend = igb_suspend,
- .resume = igb_resume,
- .freeze = igb_suspend,
- .thaw = igb_resume,
- .poweroff = igb_suspend,
- .restore = igb_resume,
-#ifdef CONFIG_PM_RUNTIME
- .runtime_suspend = igb_runtime_suspend,
- .runtime_resume = igb_runtime_resume,
- .runtime_idle = igb_runtime_idle,
-#endif
-#else /* Linux >= 2.6.34 */
- SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
-#ifdef CONFIG_PM_RUNTIME
- SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
- igb_runtime_idle)
-#endif /* CONFIG_PM_RUNTIME */
-#endif /* Linux version */
-};
-#else
-static int igb_suspend(struct pci_dev *pdev, pm_message_t state);
-static int igb_resume(struct pci_dev *pdev);
-#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
-#endif /* CONFIG_PM */
-#ifndef USE_REBOOT_NOTIFIER
-static void igb_shutdown(struct pci_dev *);
-#else
-static int igb_notify_reboot(struct notifier_block *, unsigned long, void *);
-static struct notifier_block igb_notifier_reboot = {
- .notifier_call = igb_notify_reboot,
- .next = NULL,
- .priority = 0
-};
-#endif
-#ifdef IGB_DCA
-static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
-static struct notifier_block dca_notifier = {
- .notifier_call = igb_notify_dca,
- .next = NULL,
- .priority = 0
-};
-#endif
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* for netdump / net console */
-static void igb_netpoll(struct net_device *);
-#endif
-
-#ifdef HAVE_PCI_ERS
-static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
- pci_channel_state_t);
-static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
-static void igb_io_resume(struct pci_dev *);
-
-static struct pci_error_handlers igb_err_handler = {
- .error_detected = igb_io_error_detected,
- .slot_reset = igb_io_slot_reset,
- .resume = igb_io_resume,
-};
-#endif
-
-static void igb_init_fw(struct igb_adapter *adapter);
-static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
-
-static struct pci_driver igb_driver = {
- .name = igb_driver_name,
- .id_table = igb_pci_tbl,
- .probe = igb_probe,
- .remove = __devexit_p(igb_remove),
-#ifdef CONFIG_PM
-#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
- .driver.pm = &igb_pm_ops,
-#else
- .suspend = igb_suspend,
- .resume = igb_resume,
-#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
-#endif /* CONFIG_PM */
-#ifndef USE_REBOOT_NOTIFIER
- .shutdown = igb_shutdown,
-#endif
-#ifdef HAVE_PCI_ERS
- .err_handler = &igb_err_handler
-#endif
-};
-
-//MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
-//MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
-//MODULE_LICENSE("GPL");
-//MODULE_VERSION(DRV_VERSION);
-
-static void igb_vfta_set(struct igb_adapter *adapter, u32 vid, bool add)
-{
- struct e1000_hw *hw = &adapter->hw;
- struct e1000_host_mng_dhcp_cookie *mng_cookie = &hw->mng_cookie;
- u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK;
- u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
- u32 vfta;
-
- /*
- * if this is the management vlan the only option is to add it in so
- * that the management pass through will continue to work
- */
- if ((mng_cookie->status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
- (vid == mng_cookie->vlan_id))
- add = TRUE;
-
- vfta = adapter->shadow_vfta[index];
-
- if (add)
- vfta |= mask;
- else
- vfta &= ~mask;
-
- e1000_write_vfta(hw, index, vfta);
- adapter->shadow_vfta[index] = vfta;
-}
-
-static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
-//module_param(debug, int, 0);
-//MODULE_PARM_DESC(debug, "Debug level (0=none, ..., 16=all)");
-
-/**
- * igb_init_module - Driver Registration Routine
- *
- * igb_init_module is the first routine called when the driver is
- * loaded. All it does is register with the PCI subsystem.
- **/
-static int __init igb_init_module(void)
-{
- int ret;
-
- printk(KERN_INFO "%s - version %s\n",
- igb_driver_string, igb_driver_version);
-
- printk(KERN_INFO "%s\n", igb_copyright);
-#ifdef IGB_HWMON
-/* only use IGB_PROCFS if IGB_HWMON is not defined */
-#else
-#ifdef IGB_PROCFS
- if (igb_procfs_topdir_init())
- printk(KERN_INFO "Procfs failed to initialize topdir\n");
-#endif /* IGB_PROCFS */
-#endif /* IGB_HWMON */
-
-#ifdef IGB_DCA
- dca_register_notify(&dca_notifier);
-#endif
- ret = pci_register_driver(&igb_driver);
-#ifdef USE_REBOOT_NOTIFIER
- if (ret >= 0) {
- register_reboot_notifier(&igb_notifier_reboot);
- }
-#endif
- return ret;
-}
-
-#undef module_init
-#define module_init(x) static int x(void) __attribute__((__unused__));
-module_init(igb_init_module);
-
-/**
- * igb_exit_module - Driver Exit Cleanup Routine
- *
- * igb_exit_module is called just before the driver is removed
- * from memory.
- **/
-static void __exit igb_exit_module(void)
-{
-#ifdef IGB_DCA
- dca_unregister_notify(&dca_notifier);
-#endif
-#ifdef USE_REBOOT_NOTIFIER
- unregister_reboot_notifier(&igb_notifier_reboot);
-#endif
- pci_unregister_driver(&igb_driver);
-
-#ifdef IGB_HWMON
-/* only compile IGB_PROCFS if IGB_HWMON is not defined */
-#else
-#ifdef IGB_PROCFS
- igb_procfs_topdir_exit();
-#endif /* IGB_PROCFS */
-#endif /* IGB_HWMON */
-}
-
-#undef module_exit
-#define module_exit(x) static void x(void) __attribute__((__unused__));
-module_exit(igb_exit_module);
-
-#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
-/**
- * igb_cache_ring_register - Descriptor ring to register mapping
- * @adapter: board private structure to initialize
- *
- * Once we know the feature-set enabled for the device, we'll cache
- * the register offset the descriptor ring is assigned to.
- **/
-static void igb_cache_ring_register(struct igb_adapter *adapter)
-{
- int i = 0, j = 0;
- u32 rbase_offset = adapter->vfs_allocated_count;
-
- switch (adapter->hw.mac.type) {
- case e1000_82576:
- /* The queues are allocated for virtualization such that VF 0
- * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
- * In order to avoid collision we start at the first free queue
- * and continue consuming queues in the same sequence
- */
- if ((adapter->rss_queues > 1) && adapter->vmdq_pools) {
- for (; i < adapter->rss_queues; i++)
- adapter->rx_ring[i]->reg_idx = rbase_offset +
- Q_IDX_82576(i);
- }
- case e1000_82575:
- case e1000_82580:
- case e1000_i350:
- case e1000_i354:
- case e1000_i210:
- case e1000_i211:
- default:
- for (; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i]->reg_idx = rbase_offset + i;
- for (; j < adapter->num_tx_queues; j++)
- adapter->tx_ring[j]->reg_idx = rbase_offset + j;
- break;
- }
-}
-
-static void igb_configure_lli(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u16 port;
-
- /* LLI should only be enabled for MSI-X or MSI interrupts */
- if (!adapter->msix_entries && !(adapter->flags & IGB_FLAG_HAS_MSI))
- return;
-
- if (adapter->lli_port) {
- /* use filter 0 for port */
- port = htons((u16)adapter->lli_port);
- E1000_WRITE_REG(hw, E1000_IMIR(0),
- (port | E1000_IMIR_PORT_IM_EN));
- E1000_WRITE_REG(hw, E1000_IMIREXT(0),
- (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
- }
-
- if (adapter->flags & IGB_FLAG_LLI_PUSH) {
- /* use filter 1 for push flag */
- E1000_WRITE_REG(hw, E1000_IMIR(1),
- (E1000_IMIR_PORT_BP | E1000_IMIR_PORT_IM_EN));
- E1000_WRITE_REG(hw, E1000_IMIREXT(1),
- (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_PSH));
- }
-
- if (adapter->lli_size) {
- /* use filter 2 for size */
- E1000_WRITE_REG(hw, E1000_IMIR(2),
- (E1000_IMIR_PORT_BP | E1000_IMIR_PORT_IM_EN));
- E1000_WRITE_REG(hw, E1000_IMIREXT(2),
- (adapter->lli_size | E1000_IMIREXT_CTRL_BP));
- }
-
-}
-
-/**
- * igb_write_ivar - configure ivar for given MSI-X vector
- * @hw: pointer to the HW structure
- * @msix_vector: vector number we are allocating to a given ring
- * @index: row index of IVAR register to write within IVAR table
- * @offset: column offset of in IVAR, should be multiple of 8
- *
- * This function is intended to handle the writing of the IVAR register
- * for adapters 82576 and newer. The IVAR table consists of 2 columns,
- * each containing an cause allocation for an Rx and Tx ring, and a
- * variable number of rows depending on the number of queues supported.
- **/
-static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
- int index, int offset)
-{
- u32 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
-
- /* clear any bits that are currently set */
- ivar &= ~((u32)0xFF << offset);
-
- /* write vector and valid bit */
- ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
-
- E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
-}
-
-#define IGB_N0_QUEUE -1
-static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
-{
- struct igb_adapter *adapter = q_vector->adapter;
- struct e1000_hw *hw = &adapter->hw;
- int rx_queue = IGB_N0_QUEUE;
- int tx_queue = IGB_N0_QUEUE;
- u32 msixbm = 0;
-
- if (q_vector->rx.ring)
- rx_queue = q_vector->rx.ring->reg_idx;
- if (q_vector->tx.ring)
- tx_queue = q_vector->tx.ring->reg_idx;
-
- switch (hw->mac.type) {
- case e1000_82575:
- /* The 82575 assigns vectors using a bitmask, which matches the
- bitmask for the EICR/EIMS/EIMC registers. To assign one
- or more queues to a vector, we write the appropriate bits
- into the MSIXBM register for that vector. */
- if (rx_queue > IGB_N0_QUEUE)
- msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
- if (tx_queue > IGB_N0_QUEUE)
- msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
- if (!adapter->msix_entries && msix_vector == 0)
- msixbm |= E1000_EIMS_OTHER;
- E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), msix_vector, msixbm);
- q_vector->eims_value = msixbm;
- break;
- case e1000_82576:
- /*
- * 82576 uses a table that essentially consists of 2 columns
- * with 8 rows. The ordering is column-major so we use the
- * lower 3 bits as the row index, and the 4th bit as the
- * column offset.
- */
- if (rx_queue > IGB_N0_QUEUE)
- igb_write_ivar(hw, msix_vector,
- rx_queue & 0x7,
- (rx_queue & 0x8) << 1);
- if (tx_queue > IGB_N0_QUEUE)
- igb_write_ivar(hw, msix_vector,
- tx_queue & 0x7,
- ((tx_queue & 0x8) << 1) + 8);
- q_vector->eims_value = 1 << msix_vector;
- break;
- case e1000_82580:
- case e1000_i350:
- case e1000_i354:
- case e1000_i210:
- case e1000_i211:
- /*
- * On 82580 and newer adapters the scheme is similar to 82576
- * however instead of ordering column-major we have things
- * ordered row-major. So we traverse the table by using
- * bit 0 as the column offset, and the remaining bits as the
- * row index.
- */
- if (rx_queue > IGB_N0_QUEUE)
- igb_write_ivar(hw, msix_vector,
- rx_queue >> 1,
- (rx_queue & 0x1) << 4);
- if (tx_queue > IGB_N0_QUEUE)
- igb_write_ivar(hw, msix_vector,
- tx_queue >> 1,
- ((tx_queue & 0x1) << 4) + 8);
- q_vector->eims_value = 1 << msix_vector;
- break;
- default:
- BUG();
- break;
- }
-
- /* add q_vector eims value to global eims_enable_mask */
- adapter->eims_enable_mask |= q_vector->eims_value;
-
- /* configure q_vector to set itr on first interrupt */
- q_vector->set_itr = 1;
-}
-
-/**
- * igb_configure_msix - Configure MSI-X hardware
- *
- * igb_configure_msix sets up the hardware to properly
- * generate MSI-X interrupts.
- **/
-static void igb_configure_msix(struct igb_adapter *adapter)
-{
- u32 tmp;
- int i, vector = 0;
- struct e1000_hw *hw = &adapter->hw;
-
- adapter->eims_enable_mask = 0;
-
- /* set vector for other causes, i.e. link changes */
- switch (hw->mac.type) {
- case e1000_82575:
- tmp = E1000_READ_REG(hw, E1000_CTRL_EXT);
- /* enable MSI-X PBA support*/
- tmp |= E1000_CTRL_EXT_PBA_CLR;
-
- /* Auto-Mask interrupts upon ICR read. */
- tmp |= E1000_CTRL_EXT_EIAME;
- tmp |= E1000_CTRL_EXT_IRCA;
-
- E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp);
-
- /* enable msix_other interrupt */
- E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), vector++,
- E1000_EIMS_OTHER);
- adapter->eims_other = E1000_EIMS_OTHER;
-
- break;
-
- case e1000_82576:
- case e1000_82580:
- case e1000_i350:
- case e1000_i354:
- case e1000_i210:
- case e1000_i211:
- /* Turn on MSI-X capability first, or our settings
- * won't stick. And it will take days to debug. */
- E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE |
- E1000_GPIE_PBA | E1000_GPIE_EIAME |
- E1000_GPIE_NSICR);
-
- /* enable msix_other interrupt */
- adapter->eims_other = 1 << vector;
- tmp = (vector++ | E1000_IVAR_VALID) << 8;
-
- E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmp);
- break;
- default:
- /* do nothing, since nothing else supports MSI-X */
- break;
- } /* switch (hw->mac.type) */
-
- adapter->eims_enable_mask |= adapter->eims_other;
-
- for (i = 0; i < adapter->num_q_vectors; i++)
- igb_assign_vector(adapter->q_vector[i], vector++);
-
- E1000_WRITE_FLUSH(hw);
-}
-
-/**
- * igb_request_msix - Initialize MSI-X interrupts
- *
- * igb_request_msix allocates MSI-X vectors and requests interrupts from the
- * kernel.
- **/
-static int igb_request_msix(struct igb_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- struct e1000_hw *hw = &adapter->hw;
- int i, err = 0, vector = 0, free_vector = 0;
-
- err = request_irq(adapter->msix_entries[vector].vector,
- &igb_msix_other, 0, netdev->name, adapter);
- if (err)
- goto err_out;
-
- for (i = 0; i < adapter->num_q_vectors; i++) {
- struct igb_q_vector *q_vector = adapter->q_vector[i];
-
- vector++;
-
- q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
-
- if (q_vector->rx.ring && q_vector->tx.ring)
- sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
- q_vector->rx.ring->queue_index);
- else if (q_vector->tx.ring)
- sprintf(q_vector->name, "%s-tx-%u", netdev->name,
- q_vector->tx.ring->queue_index);
- else if (q_vector->rx.ring)
- sprintf(q_vector->name, "%s-rx-%u", netdev->name,
- q_vector->rx.ring->queue_index);
- else
- sprintf(q_vector->name, "%s-unused", netdev->name);
-
- err = request_irq(adapter->msix_entries[vector].vector,
- igb_msix_ring, 0, q_vector->name,
- q_vector);
- if (err)
- goto err_free;
- }
-
- igb_configure_msix(adapter);
- return 0;
-
-err_free:
- /* free already assigned IRQs */
- free_irq(adapter->msix_entries[free_vector++].vector, adapter);
-
- vector--;
- for (i = 0; i < vector; i++) {
- free_irq(adapter->msix_entries[free_vector++].vector,
- adapter->q_vector[i]);
- }
-err_out:
- return err;
-}
-
-static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
-{
- if (adapter->msix_entries) {
- pci_disable_msix(adapter->pdev);
- kfree(adapter->msix_entries);
- adapter->msix_entries = NULL;
- } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
- pci_disable_msi(adapter->pdev);
- }
-}
-
-/**
- * igb_free_q_vector - Free memory allocated for specific interrupt vector
- * @adapter: board private structure to initialize
- * @v_idx: Index of vector to be freed
- *
- * This function frees the memory allocated to the q_vector. In addition if
- * NAPI is enabled it will delete any references to the NAPI struct prior
- * to freeing the q_vector.
- **/
-static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
-{
- struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
-
- if (q_vector->tx.ring)
- adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
-
- if (q_vector->rx.ring)
- adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL;
-
- adapter->q_vector[v_idx] = NULL;
- netif_napi_del(&q_vector->napi);
-#ifndef IGB_NO_LRO
- __skb_queue_purge(&q_vector->lrolist.active);
-#endif
- kfree(q_vector);
-}
-
-/**
- * igb_free_q_vectors - Free memory allocated for interrupt vectors
- * @adapter: board private structure to initialize
- *
- * This function frees the memory allocated to the q_vectors. In addition if
- * NAPI is enabled it will delete any references to the NAPI struct prior
- * to freeing the q_vector.
- **/
-static void igb_free_q_vectors(struct igb_adapter *adapter)
-{
- int v_idx = adapter->num_q_vectors;
-
- adapter->num_tx_queues = 0;
- adapter->num_rx_queues = 0;
- adapter->num_q_vectors = 0;
-
- while (v_idx--)
- igb_free_q_vector(adapter, v_idx);
-}
-
-/**
- * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
- *
- * This function resets the device so that it has 0 rx queues, tx queues, and
- * MSI-X interrupts allocated.
- */
-static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
-{
- igb_free_q_vectors(adapter);
- igb_reset_interrupt_capability(adapter);
-}
-
-/**
- * igb_process_mdd_event
- * @adapter - board private structure
- *
- * Identify a malicious VF, disable the VF TX/RX queues and log a message.
- */
-static void igb_process_mdd_event(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 lvmmc, vfte, vfre, mdfb;
- u8 vf_queue;
-
- lvmmc = E1000_READ_REG(hw, E1000_LVMMC);
- vf_queue = lvmmc >> 29;
-
- /* VF index cannot be bigger or equal to VFs allocated */
- if (vf_queue >= adapter->vfs_allocated_count)
- return;
-
- netdev_info(adapter->netdev,
- "VF %d misbehaved. VF queues are disabled. "
- "VM misbehavior code is 0x%x\n", vf_queue, lvmmc);
-
- /* Disable VFTE and VFRE related bits */
- vfte = E1000_READ_REG(hw, E1000_VFTE);
- vfte &= ~(1 << vf_queue);
- E1000_WRITE_REG(hw, E1000_VFTE, vfte);
-
- vfre = E1000_READ_REG(hw, E1000_VFRE);
- vfre &= ~(1 << vf_queue);
- E1000_WRITE_REG(hw, E1000_VFRE, vfre);
-
- /* Disable MDFB related bit. Clear on write */
- mdfb = E1000_READ_REG(hw, E1000_MDFB);
- mdfb |= (1 << vf_queue);
- E1000_WRITE_REG(hw, E1000_MDFB, mdfb);
-
- /* Reset the specific VF */
- E1000_WRITE_REG(hw, E1000_VTCTRL(vf_queue), E1000_VTCTRL_RST);
-}
-
-/**
- * igb_disable_mdd
- * @adapter - board private structure
- *
- * Disable MDD behavior in the HW
- **/
-static void igb_disable_mdd(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 reg;
-
- if ((hw->mac.type != e1000_i350) ||
- (hw->mac.type != e1000_i354))
- return;
-
- reg = E1000_READ_REG(hw, E1000_DTXCTL);
- reg &= (~E1000_DTXCTL_MDP_EN);
- E1000_WRITE_REG(hw, E1000_DTXCTL, reg);
-}
-
-/**
- * igb_enable_mdd
- * @adapter - board private structure
- *
- * Enable the HW to detect malicious driver and sends an interrupt to
- * the driver.
- **/
-static void igb_enable_mdd(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 reg;
-
- /* Only available on i350 device */
- if (hw->mac.type != e1000_i350)
- return;
-
- reg = E1000_READ_REG(hw, E1000_DTXCTL);
- reg |= E1000_DTXCTL_MDP_EN;
- E1000_WRITE_REG(hw, E1000_DTXCTL, reg);
-}
-
-/**
- * igb_reset_sriov_capability - disable SR-IOV if enabled
- *
- * Attempt to disable single root IO virtualization capabilites present in the
- * kernel.
- **/
-static void igb_reset_sriov_capability(struct igb_adapter *adapter)
-{
- struct pci_dev *pdev = adapter->pdev;
- struct e1000_hw *hw = &adapter->hw;
-
- /* reclaim resources allocated to VFs */
- if (adapter->vf_data) {
- if (!pci_vfs_assigned(pdev)) {
- /*
- * disable iov and allow time for transactions to
- * clear
- */
- pci_disable_sriov(pdev);
- msleep(500);
-
- dev_info(pci_dev_to_dev(pdev), "IOV Disabled\n");
- } else {
- dev_info(pci_dev_to_dev(pdev), "IOV Not Disabled\n "
- "VF(s) are assigned to guests!\n");
- }
- /* Disable Malicious Driver Detection */
- igb_disable_mdd(adapter);
-
- /* free vf data storage */
- kfree(adapter->vf_data);
- adapter->vf_data = NULL;
-
- /* switch rings back to PF ownership */
- E1000_WRITE_REG(hw, E1000_IOVCTL,
- E1000_IOVCTL_REUSE_VFQ);
- E1000_WRITE_FLUSH(hw);
- msleep(100);
- }
-
- adapter->vfs_allocated_count = 0;
-}
-
-/**
- * igb_set_sriov_capability - setup SR-IOV if supported
- *
- * Attempt to enable single root IO virtualization capabilites present in the
- * kernel.
- **/
-static void igb_set_sriov_capability(struct igb_adapter *adapter)
-{
- struct pci_dev *pdev = adapter->pdev;
- int old_vfs = 0;
- int i;
-
- old_vfs = pci_num_vf(pdev);
- if (old_vfs) {
- dev_info(pci_dev_to_dev(pdev),
- "%d pre-allocated VFs found - override "
- "max_vfs setting of %d\n", old_vfs,
- adapter->vfs_allocated_count);
- adapter->vfs_allocated_count = old_vfs;
- }
- /* no VFs requested, do nothing */
- if (!adapter->vfs_allocated_count)
- return;
-
- /* allocate vf data storage */
- adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
- sizeof(struct vf_data_storage),
- GFP_KERNEL);
-
- if (adapter->vf_data) {
- if (!old_vfs) {
- if (pci_enable_sriov(pdev,
- adapter->vfs_allocated_count))
- goto err_out;
- }
- for (i = 0; i < adapter->vfs_allocated_count; i++)
- igb_vf_configure(adapter, i);
-
- switch (adapter->hw.mac.type) {
- case e1000_82576:
- case e1000_i350:
- /* Enable VM to VM loopback by default */
- adapter->flags |= IGB_FLAG_LOOPBACK_ENABLE;
- break;
- default:
- /* Currently no other hardware supports loopback */
- break;
- }
-
- /* DMA Coalescing is not supported in IOV mode. */
- if (adapter->hw.mac.type >= e1000_i350)
- adapter->dmac = IGB_DMAC_DISABLE;
- if (adapter->hw.mac.type < e1000_i350)
- adapter->flags |= IGB_FLAG_DETECT_BAD_DMA;
- return;
-
- }
-
-err_out:
- kfree(adapter->vf_data);
- adapter->vf_data = NULL;
- adapter->vfs_allocated_count = 0;
- dev_warn(pci_dev_to_dev(pdev),
- "Failed to initialize SR-IOV virtualization\n");
-}
-
-/**
- * igb_set_interrupt_capability - set MSI or MSI-X if supported
- *
- * Attempt to configure interrupts using the best available
- * capabilities of the hardware and kernel.
- **/
-static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
-{
- struct pci_dev *pdev = adapter->pdev;
- int err;
- int numvecs, i;
-
- if (!msix)
- adapter->int_mode = IGB_INT_MODE_MSI;
-
- /* Number of supported queues. */
- adapter->num_rx_queues = adapter->rss_queues;
-
- if (adapter->vmdq_pools > 1)
- adapter->num_rx_queues += adapter->vmdq_pools - 1;
-
-#ifdef HAVE_TX_MQ
- if (adapter->vmdq_pools)
- adapter->num_tx_queues = adapter->vmdq_pools;
- else
- adapter->num_tx_queues = adapter->num_rx_queues;
-#else
- adapter->num_tx_queues = max_t(u32, 1, adapter->vmdq_pools);
-#endif
-
- switch (adapter->int_mode) {
- case IGB_INT_MODE_MSIX:
- /* start with one vector for every rx queue */
- numvecs = adapter->num_rx_queues;
-
- /* if tx handler is separate add 1 for every tx queue */
- if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
- numvecs += adapter->num_tx_queues;
-
- /* store the number of vectors reserved for queues */
- adapter->num_q_vectors = numvecs;
-
- /* add 1 vector for link status interrupts */
- numvecs++;
- adapter->msix_entries = kcalloc(numvecs,
- sizeof(struct msix_entry),
- GFP_KERNEL);
- if (adapter->msix_entries) {
- for (i = 0; i < numvecs; i++)
- adapter->msix_entries[i].entry = i;
-
- err = pci_enable_msix(pdev,
- adapter->msix_entries, numvecs);
- if (err == 0)
- break;
- }
- /* MSI-X failed, so fall through and try MSI */
- dev_warn(pci_dev_to_dev(pdev), "Failed to initialize MSI-X interrupts. "
- "Falling back to MSI interrupts.\n");
- igb_reset_interrupt_capability(adapter);
- case IGB_INT_MODE_MSI:
- if (!pci_enable_msi(pdev))
- adapter->flags |= IGB_FLAG_HAS_MSI;
- else
- dev_warn(pci_dev_to_dev(pdev), "Failed to initialize MSI "
- "interrupts. Falling back to legacy "
- "interrupts.\n");
- /* Fall through */
- case IGB_INT_MODE_LEGACY:
- /* disable advanced features and set number of queues to 1 */
- igb_reset_sriov_capability(adapter);
- adapter->vmdq_pools = 0;
- adapter->rss_queues = 1;
- adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
- adapter->num_rx_queues = 1;
- adapter->num_tx_queues = 1;
- adapter->num_q_vectors = 1;
- /* Don't do anything; this is system default */
- break;
- }
-}
-
-static void igb_add_ring(struct igb_ring *ring,
- struct igb_ring_container *head)
-{
- head->ring = ring;
- head->count++;
-}
-
-/**
- * igb_alloc_q_vector - Allocate memory for a single interrupt vector
- * @adapter: board private structure to initialize
- * @v_count: q_vectors allocated on adapter, used for ring interleaving
- * @v_idx: index of vector in adapter struct
- * @txr_count: total number of Tx rings to allocate
- * @txr_idx: index of first Tx ring to allocate
- * @rxr_count: total number of Rx rings to allocate
- * @rxr_idx: index of first Rx ring to allocate
- *
- * We allocate one q_vector. If allocation fails we return -ENOMEM.
- **/
-static int igb_alloc_q_vector(struct igb_adapter *adapter,
- unsigned int v_count, unsigned int v_idx,
- unsigned int txr_count, unsigned int txr_idx,
- unsigned int rxr_count, unsigned int rxr_idx)
-{
- struct igb_q_vector *q_vector;
- struct igb_ring *ring;
- int ring_count, size;
-
- /* igb only supports 1 Tx and/or 1 Rx queue per vector */
- if (txr_count > 1 || rxr_count > 1)
- return -ENOMEM;
-
- ring_count = txr_count + rxr_count;
- size = sizeof(struct igb_q_vector) +
- (sizeof(struct igb_ring) * ring_count);
-
- /* allocate q_vector and rings */
- q_vector = kzalloc(size, GFP_KERNEL);
- if (!q_vector)
- return -ENOMEM;
-
-#ifndef IGB_NO_LRO
- /* initialize LRO */
- __skb_queue_head_init(&q_vector->lrolist.active);
-
-#endif
- /* initialize NAPI */
- netif_napi_add(adapter->netdev, &q_vector->napi,
- igb_poll, 64);
-
- /* tie q_vector and adapter together */
- adapter->q_vector[v_idx] = q_vector;
- q_vector->adapter = adapter;
-
- /* initialize work limits */
- q_vector->tx.work_limit = adapter->tx_work_limit;
-
- /* initialize ITR configuration */
- q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0);
- q_vector->itr_val = IGB_START_ITR;
-
- /* initialize pointer to rings */
- ring = q_vector->ring;
-
- /* intialize ITR */
- if (rxr_count) {
- /* rx or rx/tx vector */
- if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
- q_vector->itr_val = adapter->rx_itr_setting;
- } else {
- /* tx only vector */
- if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
- q_vector->itr_val = adapter->tx_itr_setting;
- }
-
- if (txr_count) {
- /* assign generic ring traits */
- ring->dev = &adapter->pdev->dev;
- ring->netdev = adapter->netdev;
-
- /* configure backlink on ring */
- ring->q_vector = q_vector;
-
- /* update q_vector Tx values */
- igb_add_ring(ring, &q_vector->tx);
-
- /* For 82575, context index must be unique per ring. */
- if (adapter->hw.mac.type == e1000_82575)
- set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
-
- /* apply Tx specific ring traits */
- ring->count = adapter->tx_ring_count;
- ring->queue_index = txr_idx;
-
- /* assign ring to adapter */
- adapter->tx_ring[txr_idx] = ring;
-
- /* push pointer to next ring */
- ring++;
- }
-
- if (rxr_count) {
- /* assign generic ring traits */
- ring->dev = &adapter->pdev->dev;
- ring->netdev = adapter->netdev;
-
- /* configure backlink on ring */
- ring->q_vector = q_vector;
-
- /* update q_vector Rx values */
- igb_add_ring(ring, &q_vector->rx);
-
-#ifndef HAVE_NDO_SET_FEATURES
- /* enable rx checksum */
- set_bit(IGB_RING_FLAG_RX_CSUM, &ring->flags);
-
-#endif
- /* set flag indicating ring supports SCTP checksum offload */
- if (adapter->hw.mac.type >= e1000_82576)
- set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
-
- if ((adapter->hw.mac.type == e1000_i350) ||
- (adapter->hw.mac.type == e1000_i354))
- set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
-
- /* apply Rx specific ring traits */
- ring->count = adapter->rx_ring_count;
- ring->queue_index = rxr_idx;
-
- /* assign ring to adapter */
- adapter->rx_ring[rxr_idx] = ring;
- }
-
- return 0;
-}
-
-/**
- * igb_alloc_q_vectors - Allocate memory for interrupt vectors
- * @adapter: board private structure to initialize
- *
- * We allocate one q_vector per queue interrupt. If allocation fails we
- * return -ENOMEM.
- **/
-static int igb_alloc_q_vectors(struct igb_adapter *adapter)
-{
- int q_vectors = adapter->num_q_vectors;
- int rxr_remaining = adapter->num_rx_queues;
- int txr_remaining = adapter->num_tx_queues;
- int rxr_idx = 0, txr_idx = 0, v_idx = 0;
- int err;
-
- if (q_vectors >= (rxr_remaining + txr_remaining)) {
- for (; rxr_remaining; v_idx++) {
- err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
- 0, 0, 1, rxr_idx);
-
- if (err)
- goto err_out;
-
- /* update counts and index */
- rxr_remaining--;
- rxr_idx++;
- }
- }
-
- for (; v_idx < q_vectors; v_idx++) {
- int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
- int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
- err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
- tqpv, txr_idx, rqpv, rxr_idx);
-
- if (err)
- goto err_out;
-
- /* update counts and index */
- rxr_remaining -= rqpv;
- txr_remaining -= tqpv;
- rxr_idx++;
- txr_idx++;
- }
-
- return 0;
-
-err_out:
- adapter->num_tx_queues = 0;
- adapter->num_rx_queues = 0;
- adapter->num_q_vectors = 0;
-
- while (v_idx--)
- igb_free_q_vector(adapter, v_idx);
-
- return -ENOMEM;
-}
-
-/**
- * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
- *
- * This function initializes the interrupts and allocates all of the queues.
- **/
-static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
-{
- struct pci_dev *pdev = adapter->pdev;
- int err;
-
- igb_set_interrupt_capability(adapter, msix);
-
- err = igb_alloc_q_vectors(adapter);
- if (err) {
- dev_err(pci_dev_to_dev(pdev), "Unable to allocate memory for vectors\n");
- goto err_alloc_q_vectors;
- }
-
- igb_cache_ring_register(adapter);
-
- return 0;
-
-err_alloc_q_vectors:
- igb_reset_interrupt_capability(adapter);
- return err;
-}
-
-/**
- * igb_request_irq - initialize interrupts
- *
- * Attempts to configure interrupts using the best available
- * capabilities of the hardware and kernel.
- **/
-static int igb_request_irq(struct igb_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
- int err = 0;
-
- if (adapter->msix_entries) {
- err = igb_request_msix(adapter);
- if (!err)
- goto request_done;
- /* fall back to MSI */
- igb_free_all_tx_resources(adapter);
- igb_free_all_rx_resources(adapter);
-
- igb_clear_interrupt_scheme(adapter);
- igb_reset_sriov_capability(adapter);
- err = igb_init_interrupt_scheme(adapter, false);
- if (err)
- goto request_done;
- igb_setup_all_tx_resources(adapter);
- igb_setup_all_rx_resources(adapter);
- igb_configure(adapter);
- }
-
- igb_assign_vector(adapter->q_vector[0], 0);
-
- if (adapter->flags & IGB_FLAG_HAS_MSI) {
- err = request_irq(pdev->irq, &igb_intr_msi, 0,
- netdev->name, adapter);
- if (!err)
- goto request_done;
-
- /* fall back to legacy interrupts */
- igb_reset_interrupt_capability(adapter);
- adapter->flags &= ~IGB_FLAG_HAS_MSI;
- }
-
- err = request_irq(pdev->irq, &igb_intr, IRQF_SHARED,
- netdev->name, adapter);
-
- if (err)
- dev_err(pci_dev_to_dev(pdev), "Error %d getting interrupt\n",
- err);
-
-request_done:
- return err;
-}
-
-static void igb_free_irq(struct igb_adapter *adapter)
-{
- if (adapter->msix_entries) {
- int vector = 0, i;
-
- free_irq(adapter->msix_entries[vector++].vector, adapter);
-
- for (i = 0; i < adapter->num_q_vectors; i++)
- free_irq(adapter->msix_entries[vector++].vector,
- adapter->q_vector[i]);
- } else {
- free_irq(adapter->pdev->irq, adapter);
- }
-}
-
-/**
- * igb_irq_disable - Mask off interrupt generation on the NIC
- * @adapter: board private structure
- **/
-static void igb_irq_disable(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
-
- /*
- * we need to be careful when disabling interrupts. The VFs are also
- * mapped into these registers and so clearing the bits can cause
- * issues on the VF drivers so we only need to clear what we set
- */
- if (adapter->msix_entries) {
- u32 regval = E1000_READ_REG(hw, E1000_EIAM);
- E1000_WRITE_REG(hw, E1000_EIAM, regval & ~adapter->eims_enable_mask);
- E1000_WRITE_REG(hw, E1000_EIMC, adapter->eims_enable_mask);
- regval = E1000_READ_REG(hw, E1000_EIAC);
- E1000_WRITE_REG(hw, E1000_EIAC, regval & ~adapter->eims_enable_mask);
- }
-
- E1000_WRITE_REG(hw, E1000_IAM, 0);
- E1000_WRITE_REG(hw, E1000_IMC, ~0);
- E1000_WRITE_FLUSH(hw);
-
- if (adapter->msix_entries) {
- int vector = 0, i;
-
- synchronize_irq(adapter->msix_entries[vector++].vector);
-
- for (i = 0; i < adapter->num_q_vectors; i++)
- synchronize_irq(adapter->msix_entries[vector++].vector);
- } else {
- synchronize_irq(adapter->pdev->irq);
- }
-}
-
-/**
- * igb_irq_enable - Enable default interrupt generation settings
- * @adapter: board private structure
- **/
-static void igb_irq_enable(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
-
- if (adapter->msix_entries) {
- u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
- u32 regval = E1000_READ_REG(hw, E1000_EIAC);
- E1000_WRITE_REG(hw, E1000_EIAC, regval | adapter->eims_enable_mask);
- regval = E1000_READ_REG(hw, E1000_EIAM);
- E1000_WRITE_REG(hw, E1000_EIAM, regval | adapter->eims_enable_mask);
- E1000_WRITE_REG(hw, E1000_EIMS, adapter->eims_enable_mask);
- if (adapter->vfs_allocated_count) {
- E1000_WRITE_REG(hw, E1000_MBVFIMR, 0xFF);
- ims |= E1000_IMS_VMMB;
- if (adapter->mdd)
- if ((adapter->hw.mac.type == e1000_i350) ||
- (adapter->hw.mac.type == e1000_i354))
- ims |= E1000_IMS_MDDET;
- }
- E1000_WRITE_REG(hw, E1000_IMS, ims);
- } else {
- E1000_WRITE_REG(hw, E1000_IMS, IMS_ENABLE_MASK |
- E1000_IMS_DRSTA);
- E1000_WRITE_REG(hw, E1000_IAM, IMS_ENABLE_MASK |
- E1000_IMS_DRSTA);
- }
-}
-
-static void igb_update_mng_vlan(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u16 vid = adapter->hw.mng_cookie.vlan_id;
- u16 old_vid = adapter->mng_vlan_id;
-
- if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
- /* add VID to filter table */
- igb_vfta_set(adapter, vid, TRUE);
- adapter->mng_vlan_id = vid;
- } else {
- adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
- }
-
- if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
- (vid != old_vid) &&
-#ifdef HAVE_VLAN_RX_REGISTER
- !vlan_group_get_device(adapter->vlgrp, old_vid)) {
-#else
- !test_bit(old_vid, adapter->active_vlans)) {
-#endif
- /* remove VID from filter table */
- igb_vfta_set(adapter, old_vid, FALSE);
- }
-}
-
-/**
- * igb_release_hw_control - release control of the h/w to f/w
- * @adapter: address of board private structure
- *
- * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
- * For ASF and Pass Through versions of f/w this means that the
- * driver is no longer loaded.
- *
- **/
-static void igb_release_hw_control(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 ctrl_ext;
-
- /* Let firmware take over control of h/w */
- ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
- E1000_WRITE_REG(hw, E1000_CTRL_EXT,
- ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
-}
-
-/**
- * igb_get_hw_control - get control of the h/w from f/w
- * @adapter: address of board private structure
- *
- * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
- * For ASF and Pass Through versions of f/w this means that
- * the driver is loaded.
- *
- **/
-static void igb_get_hw_control(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 ctrl_ext;
-
- /* Let firmware know the driver has taken over */
- ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
- E1000_WRITE_REG(hw, E1000_CTRL_EXT,
- ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
-}
-
-/**
- * igb_configure - configure the hardware for RX and TX
- * @adapter: private board structure
- **/
-static void igb_configure(struct igb_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- int i;
-
- igb_get_hw_control(adapter);
- igb_set_rx_mode(netdev);
-
- igb_restore_vlan(adapter);
-
- igb_setup_tctl(adapter);
- igb_setup_mrqc(adapter);
- igb_setup_rctl(adapter);
-
- igb_configure_tx(adapter);
- igb_configure_rx(adapter);
-
- e1000_rx_fifo_flush_82575(&adapter->hw);
-#ifdef CONFIG_NETDEVICES_MULTIQUEUE
- if (adapter->num_tx_queues > 1)
- netdev->features |= NETIF_F_MULTI_QUEUE;
- else
- netdev->features &= ~NETIF_F_MULTI_QUEUE;
-#endif
-
- /* call igb_desc_unused which always leaves
- * at least 1 descriptor unused to make sure
- * next_to_use != next_to_clean */
- for (i = 0; i < adapter->num_rx_queues; i++) {
- struct igb_ring *ring = adapter->rx_ring[i];
- igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
- }
-}
-
-/**
- * igb_power_up_link - Power up the phy/serdes link
- * @adapter: address of board private structure
- **/
-void igb_power_up_link(struct igb_adapter *adapter)
-{
- e1000_phy_hw_reset(&adapter->hw);
-
- if (adapter->hw.phy.media_type == e1000_media_type_copper)
- e1000_power_up_phy(&adapter->hw);
- else
- e1000_power_up_fiber_serdes_link(&adapter->hw);
-}
-
-/**
- * igb_power_down_link - Power down the phy/serdes link
- * @adapter: address of board private structure
- */
-static void igb_power_down_link(struct igb_adapter *adapter)
-{
- if (adapter->hw.phy.media_type == e1000_media_type_copper)
- e1000_power_down_phy(&adapter->hw);
- else
- e1000_shutdown_fiber_serdes_link(&adapter->hw);
-}
-
-/* Detect and switch function for Media Auto Sense */
-static void igb_check_swap_media(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 ctrl_ext, connsw;
- bool swap_now = false;
- bool link;
-
- ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
- connsw = E1000_READ_REG(hw, E1000_CONNSW);
- link = igb_has_link(adapter);
-
- /* need to live swap if current media is copper and we have fiber/serdes
- * to go to.
- */
-
- if ((hw->phy.media_type == e1000_media_type_copper) &&
- (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
- swap_now = true;
- } else if (!(connsw & E1000_CONNSW_SERDESD)) {
- /* copper signal takes time to appear */
- if (adapter->copper_tries < 2) {
- adapter->copper_tries++;
- connsw |= E1000_CONNSW_AUTOSENSE_CONF;
- E1000_WRITE_REG(hw, E1000_CONNSW, connsw);
- return;
- } else {
- adapter->copper_tries = 0;
- if ((connsw & E1000_CONNSW_PHYSD) &&
- (!(connsw & E1000_CONNSW_PHY_PDN))) {
- swap_now = true;
- connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
- E1000_WRITE_REG(hw, E1000_CONNSW, connsw);
- }
- }
- }
-
- if (swap_now) {
- switch (hw->phy.media_type) {
- case e1000_media_type_copper:
- dev_info(pci_dev_to_dev(adapter->pdev),
- "%s:MAS: changing media to fiber/serdes\n",
- adapter->netdev->name);
- ctrl_ext |=
- E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
- adapter->flags |= IGB_FLAG_MEDIA_RESET;
- adapter->copper_tries = 0;
- break;
- case e1000_media_type_internal_serdes:
- case e1000_media_type_fiber:
- dev_info(pci_dev_to_dev(adapter->pdev),
- "%s:MAS: changing media to copper\n",
- adapter->netdev->name);
- ctrl_ext &=
- ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
- adapter->flags |= IGB_FLAG_MEDIA_RESET;
- break;
- default:
- /* shouldn't get here during regular operation */
- dev_err(pci_dev_to_dev(adapter->pdev),
- "%s:AMS: Invalid media type found, returning\n",
- adapter->netdev->name);
- break;
- }
- E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
- }
-}
-
-#ifdef HAVE_I2C_SUPPORT
-/* igb_get_i2c_data - Reads the I2C SDA data bit
- * @hw: pointer to hardware structure
- * @i2cctl: Current value of I2CCTL register
- *
- * Returns the I2C data bit value
- */
-static int igb_get_i2c_data(void *data)
-{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- struct e1000_hw *hw = &adapter->hw;
- s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
-
- return ((i2cctl & E1000_I2C_DATA_IN) != 0);
-}
-
-/* igb_set_i2c_data - Sets the I2C data bit
- * @data: pointer to hardware structure
- * @state: I2C data value (0 or 1) to set
- *
- * Sets the I2C data bit
- */
-static void igb_set_i2c_data(void *data, int state)
-{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- struct e1000_hw *hw = &adapter->hw;
- s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
-
- if (state)
- i2cctl |= E1000_I2C_DATA_OUT;
- else
- i2cctl &= ~E1000_I2C_DATA_OUT;
-
- i2cctl &= ~E1000_I2C_DATA_OE_N;
- i2cctl |= E1000_I2C_CLK_OE_N;
-
- E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl);
- E1000_WRITE_FLUSH(hw);
-
-}
-
-/* igb_set_i2c_clk - Sets the I2C SCL clock
- * @data: pointer to hardware structure
- * @state: state to set clock
- *
- * Sets the I2C clock line to state
- */
-static void igb_set_i2c_clk(void *data, int state)
-{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- struct e1000_hw *hw = &adapter->hw;
- s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
-
- if (state) {
- i2cctl |= E1000_I2C_CLK_OUT;
- i2cctl &= ~E1000_I2C_CLK_OE_N;
- } else {
- i2cctl &= ~E1000_I2C_CLK_OUT;
- i2cctl &= ~E1000_I2C_CLK_OE_N;
- }
- E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl);
- E1000_WRITE_FLUSH(hw);
-}
-
-/* igb_get_i2c_clk - Gets the I2C SCL clock state
- * @data: pointer to hardware structure
- *
- * Gets the I2C clock state
- */
-static int igb_get_i2c_clk(void *data)
-{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- struct e1000_hw *hw = &adapter->hw;
- s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
-
- return ((i2cctl & E1000_I2C_CLK_IN) != 0);
-}
-
-static const struct i2c_algo_bit_data igb_i2c_algo = {
- .setsda = igb_set_i2c_data,
- .setscl = igb_set_i2c_clk,
- .getsda = igb_get_i2c_data,
- .getscl = igb_get_i2c_clk,
- .udelay = 5,
- .timeout = 20,
-};
-
-/* igb_init_i2c - Init I2C interface
- * @adapter: pointer to adapter structure
- *
- */
-static s32 igb_init_i2c(struct igb_adapter *adapter)
-{
- s32 status = E1000_SUCCESS;
-
- /* I2C interface supported on i350 devices */
- if (adapter->hw.mac.type != e1000_i350)
- return E1000_SUCCESS;
-
- /* Initialize the i2c bus which is controlled by the registers.
- * This bus will use the i2c_algo_bit structue that implements
- * the protocol through toggling of the 4 bits in the register.
- */
- adapter->i2c_adap.owner = THIS_MODULE;
- adapter->i2c_algo = igb_i2c_algo;
- adapter->i2c_algo.data = adapter;
- adapter->i2c_adap.algo_data = &adapter->i2c_algo;
- adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
- strlcpy(adapter->i2c_adap.name, "igb BB",
- sizeof(adapter->i2c_adap.name));
- status = i2c_bit_add_bus(&adapter->i2c_adap);
- return status;
-}
-
-#endif /* HAVE_I2C_SUPPORT */
-/**
- * igb_up - Open the interface and prepare it to handle traffic
- * @adapter: board private structure
- **/
-int igb_up(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- int i;
-
- /* hardware has been reset, we need to reload some things */
- igb_configure(adapter);
-
- clear_bit(__IGB_DOWN, &adapter->state);
-
- for (i = 0; i < adapter->num_q_vectors; i++)
- napi_enable(&(adapter->q_vector[i]->napi));
-
- if (adapter->msix_entries)
- igb_configure_msix(adapter);
- else
- igb_assign_vector(adapter->q_vector[0], 0);
-
- igb_configure_lli(adapter);
-
- /* Clear any pending interrupts. */
- E1000_READ_REG(hw, E1000_ICR);
- igb_irq_enable(adapter);
-
- /* notify VFs that reset has been completed */
- if (adapter->vfs_allocated_count) {
- u32 reg_data = E1000_READ_REG(hw, E1000_CTRL_EXT);
- reg_data |= E1000_CTRL_EXT_PFRSTD;
- E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg_data);
- }
-
- netif_tx_start_all_queues(adapter->netdev);
-
- if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
- schedule_work(&adapter->dma_err_task);
- /* start the watchdog. */
- hw->mac.get_link_status = 1;
- schedule_work(&adapter->watchdog_task);
-
- if ((adapter->flags & IGB_FLAG_EEE) &&
- (!hw->dev_spec._82575.eee_disable))
- adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
-
- return 0;
-}
-
-void igb_down(struct igb_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- struct e1000_hw *hw = &adapter->hw;
- u32 tctl, rctl;
- int i;
-
- /* signal that we're down so the interrupt handler does not
- * reschedule our watchdog timer */
- set_bit(__IGB_DOWN, &adapter->state);
-
- /* disable receives in the hardware */
- rctl = E1000_READ_REG(hw, E1000_RCTL);
- E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
- /* flush and sleep below */
-
- netif_tx_stop_all_queues(netdev);
-
- /* disable transmits in the hardware */
- tctl = E1000_READ_REG(hw, E1000_TCTL);
- tctl &= ~E1000_TCTL_EN;
- E1000_WRITE_REG(hw, E1000_TCTL, tctl);
- /* flush both disables and wait for them to finish */
- E1000_WRITE_FLUSH(hw);
- usleep_range(10000, 20000);
-
- for (i = 0; i < adapter->num_q_vectors; i++)
- napi_disable(&(adapter->q_vector[i]->napi));
-
- igb_irq_disable(adapter);
-
- adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
-
- del_timer_sync(&adapter->watchdog_timer);
- if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
- del_timer_sync(&adapter->dma_err_timer);
- del_timer_sync(&adapter->phy_info_timer);
-
- netif_carrier_off(netdev);
-
- /* record the stats before reset*/
- igb_update_stats(adapter);
-
- adapter->link_speed = 0;
- adapter->link_duplex = 0;
-
-#ifdef HAVE_PCI_ERS
- if (!pci_channel_offline(adapter->pdev))
- igb_reset(adapter);
-#else
- igb_reset(adapter);
-#endif
- igb_clean_all_tx_rings(adapter);
- igb_clean_all_rx_rings(adapter);
-#ifdef IGB_DCA
- /* since we reset the hardware DCA settings were cleared */
- igb_setup_dca(adapter);
-#endif
-}
-
-void igb_reinit_locked(struct igb_adapter *adapter)
-{
- WARN_ON(in_interrupt());
- while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- usleep_range(1000, 2000);
- igb_down(adapter);
- igb_up(adapter);
- clear_bit(__IGB_RESETTING, &adapter->state);
-}
-
-/**
- * igb_enable_mas - Media Autosense re-enable after swap
- *
- * @adapter: adapter struct
- **/
-static s32 igb_enable_mas(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 connsw;
- s32 ret_val = E1000_SUCCESS;
-
- connsw = E1000_READ_REG(hw, E1000_CONNSW);
- if (hw->phy.media_type == e1000_media_type_copper) {
- /* configure for SerDes media detect */
- if (!(connsw & E1000_CONNSW_SERDESD)) {
- connsw |= E1000_CONNSW_ENRGSRC;
- connsw |= E1000_CONNSW_AUTOSENSE_EN;
- E1000_WRITE_REG(hw, E1000_CONNSW, connsw);
- E1000_WRITE_FLUSH(hw);
- } else if (connsw & E1000_CONNSW_SERDESD) {
- /* already SerDes, no need to enable anything */
- return ret_val;
- } else {
- dev_info(pci_dev_to_dev(adapter->pdev),
- "%s:MAS: Unable to configure feature, disabling..\n",
- adapter->netdev->name);
- adapter->flags &= ~IGB_FLAG_MAS_ENABLE;
- }
- }
- return ret_val;
-}
-
-void igb_reset(struct igb_adapter *adapter)
-{
- struct pci_dev *pdev = adapter->pdev;
- struct e1000_hw *hw = &adapter->hw;
- struct e1000_mac_info *mac = &hw->mac;
- struct e1000_fc_info *fc = &hw->fc;
- u32 pba = 0, tx_space, min_tx_space, min_rx_space, hwm;
-
- /* Repartition Pba for greater than 9k mtu
- * To take effect CTRL.RST is required.
- */
- switch (mac->type) {
- case e1000_i350:
- case e1000_82580:
- case e1000_i354:
- pba = E1000_READ_REG(hw, E1000_RXPBS);
- pba = e1000_rxpbs_adjust_82580(pba);
- break;
- case e1000_82576:
- pba = E1000_READ_REG(hw, E1000_RXPBS);
- pba &= E1000_RXPBS_SIZE_MASK_82576;
- break;
- case e1000_82575:
- case e1000_i210:
- case e1000_i211:
- default:
- pba = E1000_PBA_34K;
- break;
- }
-
- if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
- (mac->type < e1000_82576)) {
- /* adjust PBA for jumbo frames */
- E1000_WRITE_REG(hw, E1000_PBA, pba);
-
- /* To maintain wire speed transmits, the Tx FIFO should be
- * large enough to accommodate two full transmit packets,
- * rounded up to the next 1KB and expressed in KB. Likewise,
- * the Rx FIFO should be large enough to accommodate at least
- * one full receive packet and is similarly rounded up and
- * expressed in KB. */
- pba = E1000_READ_REG(hw, E1000_PBA);
- /* upper 16 bits has Tx packet buffer allocation size in KB */
- tx_space = pba >> 16;
- /* lower 16 bits has Rx packet buffer allocation size in KB */
- pba &= 0xffff;
- /* the tx fifo also stores 16 bytes of information about the tx
- * but don't include ethernet FCS because hardware appends it */
- min_tx_space = (adapter->max_frame_size +
- sizeof(union e1000_adv_tx_desc) -
- ETH_FCS_LEN) * 2;
- min_tx_space = ALIGN(min_tx_space, 1024);
- min_tx_space >>= 10;
- /* software strips receive CRC, so leave room for it */
- min_rx_space = adapter->max_frame_size;
- min_rx_space = ALIGN(min_rx_space, 1024);
- min_rx_space >>= 10;
-
- /* If current Tx allocation is less than the min Tx FIFO size,
- * and the min Tx FIFO size is less than the current Rx FIFO
- * allocation, take space away from current Rx allocation */
- if (tx_space < min_tx_space &&
- ((min_tx_space - tx_space) < pba)) {
- pba = pba - (min_tx_space - tx_space);
-
- /* if short on rx space, rx wins and must trump tx
- * adjustment */
- if (pba < min_rx_space)
- pba = min_rx_space;
- }
- E1000_WRITE_REG(hw, E1000_PBA, pba);
- }
-
- /* flow control settings */
- /* The high water mark must be low enough to fit one full frame
- * (or the size used for early receive) above it in the Rx FIFO.
- * Set it to the lower of:
- * - 90% of the Rx FIFO size, or
- * - the full Rx FIFO size minus one full frame */
- hwm = min(((pba << 10) * 9 / 10),
- ((pba << 10) - 2 * adapter->max_frame_size));
-
- fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
- fc->low_water = fc->high_water - 16;
- fc->pause_time = 0xFFFF;
- fc->send_xon = 1;
- fc->current_mode = fc->requested_mode;
-
- /* disable receive for all VFs and wait one second */
- if (adapter->vfs_allocated_count) {
- int i;
- /*
- * Clear all flags except indication that the PF has set
- * the VF MAC addresses administratively
- */
- for (i = 0 ; i < adapter->vfs_allocated_count; i++)
- adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
-
- /* ping all the active vfs to let them know we are going down */
- igb_ping_all_vfs(adapter);
-
- /* disable transmits and receives */
- E1000_WRITE_REG(hw, E1000_VFRE, 0);
- E1000_WRITE_REG(hw, E1000_VFTE, 0);
- }
-
- /* Allow time for pending master requests to run */
- e1000_reset_hw(hw);
- E1000_WRITE_REG(hw, E1000_WUC, 0);
-
- if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
- e1000_setup_init_funcs(hw, TRUE);
- igb_check_options(adapter);
- e1000_get_bus_info(hw);
- adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
- }
- if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
- if (igb_enable_mas(adapter))
- dev_err(pci_dev_to_dev(pdev),
- "Error enabling Media Auto Sense\n");
- }
- if (e1000_init_hw(hw))
- dev_err(pci_dev_to_dev(pdev), "Hardware Error\n");
-
- /*
- * Flow control settings reset on hardware reset, so guarantee flow
- * control is off when forcing speed.
- */
- if (!hw->mac.autoneg)
- e1000_force_mac_fc(hw);
-
- igb_init_dmac(adapter, pba);
- /* Re-initialize the thermal sensor on i350 devices. */
- if (mac->type == e1000_i350 && hw->bus.func == 0) {
- /*
- * If present, re-initialize the external thermal sensor
- * interface.
- */
- if (adapter->ets)
- e1000_set_i2c_bb(hw);
- e1000_init_thermal_sensor_thresh(hw);
- }
-
- /*Re-establish EEE setting */
- if (hw->phy.media_type == e1000_media_type_copper) {
- switch (mac->type) {
- case e1000_i350:
- case e1000_i210:
- case e1000_i211:
- e1000_set_eee_i350(hw);
- break;
- case e1000_i354:
- e1000_set_eee_i354(hw);
- break;
- default:
- break;
- }
- }
-
- if (!netif_running(adapter->netdev))
- igb_power_down_link(adapter);
-
- igb_update_mng_vlan(adapter);
-
- /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
- E1000_WRITE_REG(hw, E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
-
-
-#ifdef HAVE_PTP_1588_CLOCK
- /* Re-enable PTP, where applicable. */
- igb_ptp_reset(adapter);
-#endif /* HAVE_PTP_1588_CLOCK */
-
- e1000_get_phy_info(hw);
-
- adapter->devrc++;
-}
-
-#ifdef HAVE_NDO_SET_FEATURES
-static kni_netdev_features_t igb_fix_features(struct net_device *netdev,
- kni_netdev_features_t features)
-{
- /*
- * Since there is no support for separate tx vlan accel
- * enabled make sure tx flag is cleared if rx is.
- */
-#ifdef NETIF_F_HW_VLAN_CTAG_RX
- if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
- features &= ~NETIF_F_HW_VLAN_CTAG_TX;
-#else
- if (!(features & NETIF_F_HW_VLAN_RX))
- features &= ~NETIF_F_HW_VLAN_TX;
-#endif
-
- /* If Rx checksum is disabled, then LRO should also be disabled */
- if (!(features & NETIF_F_RXCSUM))
- features &= ~NETIF_F_LRO;
-
- return features;
-}
-
-static int igb_set_features(struct net_device *netdev,
- kni_netdev_features_t features)
-{
- u32 changed = netdev->features ^ features;
-
-#ifdef NETIF_F_HW_VLAN_CTAG_RX
- if (changed & NETIF_F_HW_VLAN_CTAG_RX)
-#else
- if (changed & NETIF_F_HW_VLAN_RX)
-#endif
- igb_vlan_mode(netdev, features);
-
- return 0;
-}
-
-#ifdef NTF_SELF
-#ifdef USE_CONST_DEV_UC_CHAR
-static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr,
- u16 flags)
-#else
-static int igb_ndo_fdb_add(struct ndmsg *ndm,
- struct net_device *dev,
- unsigned char *addr,
- u16 flags)
-#endif
-{
- struct igb_adapter *adapter = netdev_priv(dev);
- struct e1000_hw *hw = &adapter->hw;
- int err;
-
- if (!(adapter->vfs_allocated_count))
- return -EOPNOTSUPP;
-
- /* Hardware does not support aging addresses so if a
- * ndm_state is given only allow permanent addresses
- */
- if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
- pr_info("%s: FDB only supports static addresses\n",
- igb_driver_name);
- return -EINVAL;
- }
-
- if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
- u32 rar_uc_entries = hw->mac.rar_entry_count -
- (adapter->vfs_allocated_count + 1);
-
- if (netdev_uc_count(dev) < rar_uc_entries)
- err = dev_uc_add_excl(dev, addr);
- else
- err = -ENOMEM;
- } else if (is_multicast_ether_addr(addr)) {
- err = dev_mc_add_excl(dev, addr);
- } else {
- err = -EINVAL;
- }
-
- /* Only return duplicate errors if NLM_F_EXCL is set */
- if (err == -EEXIST && !(flags & NLM_F_EXCL))
- err = 0;
-
- return err;
-}
-
-#ifndef USE_DEFAULT_FDB_DEL_DUMP
-#ifdef USE_CONST_DEV_UC_CHAR
-static int igb_ndo_fdb_del(struct ndmsg *ndm,
- struct net_device *dev,
- const unsigned char *addr)
-#else
-static int igb_ndo_fdb_del(struct ndmsg *ndm,
- struct net_device *dev,
- unsigned char *addr)
-#endif
-{
- struct igb_adapter *adapter = netdev_priv(dev);
- int err = -EOPNOTSUPP;
-
- if (ndm->ndm_state & NUD_PERMANENT) {
- pr_info("%s: FDB only supports static addresses\n",
- igb_driver_name);
- return -EINVAL;
- }
-
- if (adapter->vfs_allocated_count) {
- if (is_unicast_ether_addr(addr))
- err = dev_uc_del(dev, addr);
- else if (is_multicast_ether_addr(addr))
- err = dev_mc_del(dev, addr);
- else
- err = -EINVAL;
- }
-
- return err;
-}
-
-static int igb_ndo_fdb_dump(struct sk_buff *skb,
- struct netlink_callback *cb,
- struct net_device *dev,
- int idx)
-{
- struct igb_adapter *adapter = netdev_priv(dev);
-
- if (adapter->vfs_allocated_count)
- idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
-
- return idx;
-}
-#endif /* USE_DEFAULT_FDB_DEL_DUMP */
-
-#ifdef HAVE_BRIDGE_ATTRIBS
-static int igb_ndo_bridge_setlink(struct net_device *dev,
- struct nlmsghdr *nlh)
-{
- struct igb_adapter *adapter = netdev_priv(dev);
- struct e1000_hw *hw = &adapter->hw;
- struct nlattr *attr, *br_spec;
- int rem;
-
- if (!(adapter->vfs_allocated_count))
- return -EOPNOTSUPP;
-
- switch (adapter->hw.mac.type) {
- case e1000_82576:
- case e1000_i350:
- case e1000_i354:
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
-
- nla_for_each_nested(attr, br_spec, rem) {
- __u16 mode;
-
- if (nla_type(attr) != IFLA_BRIDGE_MODE)
- continue;
-
- mode = nla_get_u16(attr);
- if (mode == BRIDGE_MODE_VEPA) {
- e1000_vmdq_set_loopback_pf(hw, 0);
- adapter->flags &= ~IGB_FLAG_LOOPBACK_ENABLE;
- } else if (mode == BRIDGE_MODE_VEB) {
- e1000_vmdq_set_loopback_pf(hw, 1);
- adapter->flags |= IGB_FLAG_LOOPBACK_ENABLE;
- } else
- return -EINVAL;
-
- netdev_info(adapter->netdev, "enabling bridge mode: %s\n",
- mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
- }
-
- return 0;
-}
-
-#ifdef HAVE_BRIDGE_FILTER
-static int igb_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev, u32 filter_mask)
-#else
-static int igb_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev)
-#endif
-{
- struct igb_adapter *adapter = netdev_priv(dev);
- u16 mode;
-
- if (!(adapter->vfs_allocated_count))
- return -EOPNOTSUPP;
-
- if (adapter->flags & IGB_FLAG_LOOPBACK_ENABLE)
- mode = BRIDGE_MODE_VEB;
- else
- mode = BRIDGE_MODE_VEPA;
-
- return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode);
-}
-#endif /* HAVE_BRIDGE_ATTRIBS */
-#endif /* NTF_SELF */
-
-#endif /* HAVE_NDO_SET_FEATURES */
-#ifdef HAVE_NET_DEVICE_OPS
-static const struct net_device_ops igb_netdev_ops = {
- .ndo_open = igb_open,
- .ndo_stop = igb_close,
- .ndo_start_xmit = igb_xmit_frame,
- .ndo_get_stats = igb_get_stats,
- .ndo_set_rx_mode = igb_set_rx_mode,
- .ndo_set_mac_address = igb_set_mac,
- .ndo_change_mtu = igb_change_mtu,
- .ndo_do_ioctl = igb_ioctl,
- .ndo_tx_timeout = igb_tx_timeout,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
-#ifdef IFLA_VF_MAX
- .ndo_set_vf_mac = igb_ndo_set_vf_mac,
- .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
-#ifdef HAVE_VF_MIN_MAX_TXRATE
- .ndo_set_vf_rate = igb_ndo_set_vf_bw,
-#else /* HAVE_VF_MIN_MAX_TXRATE */
- .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
-#endif /* HAVE_VF_MIN_MAX_TXRATE */
- .ndo_get_vf_config = igb_ndo_get_vf_config,
-#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
- .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
-#endif /* HAVE_VF_SPOOFCHK_CONFIGURE */
-#endif /* IFLA_VF_MAX */
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = igb_netpoll,
-#endif
-#ifdef HAVE_NDO_SET_FEATURES
- .ndo_fix_features = igb_fix_features,
- .ndo_set_features = igb_set_features,
-#endif
-#ifdef HAVE_VLAN_RX_REGISTER
- .ndo_vlan_rx_register = igb_vlan_mode,
-#endif
-#ifndef HAVE_RHEL6_NETDEV_OPS_EXT_FDB
-#ifdef NTF_SELF
- .ndo_fdb_add = igb_ndo_fdb_add,
-#ifndef USE_DEFAULT_FDB_DEL_DUMP
- .ndo_fdb_del = igb_ndo_fdb_del,
- .ndo_fdb_dump = igb_ndo_fdb_dump,
-#endif
-#endif /* ! HAVE_RHEL6_NETDEV_OPS_EXT_FDB */
-#ifdef HAVE_BRIDGE_ATTRIBS
- .ndo_bridge_setlink = igb_ndo_bridge_setlink,
- .ndo_bridge_getlink = igb_ndo_bridge_getlink,
-#endif /* HAVE_BRIDGE_ATTRIBS */
-#endif
-};
-
-#ifdef CONFIG_IGB_VMDQ_NETDEV
-static const struct net_device_ops igb_vmdq_ops = {
- .ndo_open = &igb_vmdq_open,
- .ndo_stop = &igb_vmdq_close,
- .ndo_start_xmit = &igb_vmdq_xmit_frame,
- .ndo_get_stats = &igb_vmdq_get_stats,
- .ndo_set_rx_mode = &igb_vmdq_set_rx_mode,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = &igb_vmdq_set_mac,
- .ndo_change_mtu = &igb_vmdq_change_mtu,
- .ndo_tx_timeout = &igb_vmdq_tx_timeout,
- .ndo_vlan_rx_register = &igb_vmdq_vlan_rx_register,
- .ndo_vlan_rx_add_vid = &igb_vmdq_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = &igb_vmdq_vlan_rx_kill_vid,
-};
-
-#endif /* CONFIG_IGB_VMDQ_NETDEV */
-#endif /* HAVE_NET_DEVICE_OPS */
-#ifdef CONFIG_IGB_VMDQ_NETDEV
-void igb_assign_vmdq_netdev_ops(struct net_device *vnetdev)
-{
-#ifdef HAVE_NET_DEVICE_OPS
- vnetdev->netdev_ops = &igb_vmdq_ops;
-#else
- dev->open = &igb_vmdq_open;
- dev->stop = &igb_vmdq_close;
- dev->hard_start_xmit = &igb_vmdq_xmit_frame;
- dev->get_stats = &igb_vmdq_get_stats;
-#ifdef HAVE_SET_RX_MODE
- dev->set_rx_mode = &igb_vmdq_set_rx_mode;
-#endif
- dev->set_multicast_list = &igb_vmdq_set_rx_mode;
- dev->set_mac_address = &igb_vmdq_set_mac;
- dev->change_mtu = &igb_vmdq_change_mtu;
-#ifdef HAVE_TX_TIMEOUT
- dev->tx_timeout = &igb_vmdq_tx_timeout;
-#endif
-#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX)
- dev->vlan_rx_register = &igb_vmdq_vlan_rx_register;
- dev->vlan_rx_add_vid = &igb_vmdq_vlan_rx_add_vid;
- dev->vlan_rx_kill_vid = &igb_vmdq_vlan_rx_kill_vid;
-#endif
-#endif
- igb_vmdq_set_ethtool_ops(vnetdev);
- vnetdev->watchdog_timeo = 5 * HZ;
-
-}
-
-int igb_init_vmdq_netdevs(struct igb_adapter *adapter)
-{
- int pool, err = 0, base_queue;
- struct net_device *vnetdev;
- struct igb_vmdq_adapter *vmdq_adapter;
-
- for (pool = 1; pool < adapter->vmdq_pools; pool++) {
- int qpp = (!adapter->rss_queues ? 1 : adapter->rss_queues);
- base_queue = pool * qpp;
- vnetdev = alloc_etherdev(sizeof(struct igb_vmdq_adapter));
- if (!vnetdev) {
- err = -ENOMEM;
- break;
- }
- vmdq_adapter = netdev_priv(vnetdev);
- vmdq_adapter->vnetdev = vnetdev;
- vmdq_adapter->real_adapter = adapter;
- vmdq_adapter->rx_ring = adapter->rx_ring[base_queue];
- vmdq_adapter->tx_ring = adapter->tx_ring[base_queue];
- igb_assign_vmdq_netdev_ops(vnetdev);
- snprintf(vnetdev->name, IFNAMSIZ, "%sv%d",
- adapter->netdev->name, pool);
- vnetdev->features = adapter->netdev->features;
-#ifdef HAVE_NETDEV_VLAN_FEATURES
- vnetdev->vlan_features = adapter->netdev->vlan_features;
-#endif
- adapter->vmdq_netdev[pool-1] = vnetdev;
- err = register_netdev(vnetdev);
- if (err)
- break;
- }
- return err;
-}
-
-int igb_remove_vmdq_netdevs(struct igb_adapter *adapter)
-{
- int pool, err = 0;
-
- for (pool = 1; pool < adapter->vmdq_pools; pool++) {
- unregister_netdev(adapter->vmdq_netdev[pool-1]);
- free_netdev(adapter->vmdq_netdev[pool-1]);
- adapter->vmdq_netdev[pool-1] = NULL;
- }
- return err;
-}
-#endif /* CONFIG_IGB_VMDQ_NETDEV */
-
-/**
- * igb_set_fw_version - Configure version string for ethtool
- * @adapter: adapter struct
- *
- **/
-static void igb_set_fw_version(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- struct e1000_fw_version fw;
-
- e1000_get_fw_version(hw, &fw);
-
- switch (hw->mac.type) {
- case e1000_i210:
- case e1000_i211:
- if (!(e1000_get_flash_presence_i210(hw))) {
- snprintf(adapter->fw_version,
- sizeof(adapter->fw_version),
- "%2d.%2d-%d",
- fw.invm_major, fw.invm_minor, fw.invm_img_type);
- break;
- }
- /* fall through */
- default:
- /* if option rom is valid, display its version too*/
- if (fw.or_valid) {
- snprintf(adapter->fw_version,
- sizeof(adapter->fw_version),
- "%d.%d, 0x%08x, %d.%d.%d",
- fw.eep_major, fw.eep_minor, fw.etrack_id,
- fw.or_major, fw.or_build, fw.or_patch);
- /* no option rom */
- } else {
- if (fw.etrack_id != 0X0000) {
- snprintf(adapter->fw_version,
- sizeof(adapter->fw_version),
- "%d.%d, 0x%08x",
- fw.eep_major, fw.eep_minor, fw.etrack_id);
- } else {
- snprintf(adapter->fw_version,
- sizeof(adapter->fw_version),
- "%d.%d.%d",
- fw.eep_major, fw.eep_minor, fw.eep_build);
- }
- }
- break;
- }
-
- return;
-}
-
-/**
- * igb_init_mas - init Media Autosense feature if enabled in the NVM
- *
- * @adapter: adapter struct
- **/
-static void igb_init_mas(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u16 eeprom_data;
-
- e1000_read_nvm(hw, NVM_COMPAT, 1, &eeprom_data);
- switch (hw->bus.func) {
- case E1000_FUNC_0:
- if (eeprom_data & IGB_MAS_ENABLE_0)
- adapter->flags |= IGB_FLAG_MAS_ENABLE;
- break;
- case E1000_FUNC_1:
- if (eeprom_data & IGB_MAS_ENABLE_1)
- adapter->flags |= IGB_FLAG_MAS_ENABLE;
- break;
- case E1000_FUNC_2:
- if (eeprom_data & IGB_MAS_ENABLE_2)
- adapter->flags |= IGB_FLAG_MAS_ENABLE;
- break;
- case E1000_FUNC_3:
- if (eeprom_data & IGB_MAS_ENABLE_3)
- adapter->flags |= IGB_FLAG_MAS_ENABLE;
- break;
- default:
- /* Shouldn't get here */
- dev_err(pci_dev_to_dev(adapter->pdev),
- "%s:AMS: Invalid port configuration, returning\n",
- adapter->netdev->name);
- break;
- }
-}
-
-/**
- * igb_probe - Device Initialization Routine
- * @pdev: PCI device information struct
- * @ent: entry in igb_pci_tbl
- *
- * Returns 0 on success, negative on failure
- *
- * igb_probe initializes an adapter identified by a pci_dev structure.
- * The OS initialization, configuring of the adapter private structure,
- * and a hardware reset occur.
- **/
-static int __devinit igb_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct net_device *netdev;
- struct igb_adapter *adapter;
- struct e1000_hw *hw;
- u16 eeprom_data = 0;
- u8 pba_str[E1000_PBANUM_LENGTH];
- s32 ret_val;
- static int global_quad_port_a; /* global quad port a indication */
- int i, err, pci_using_dac;
- static int cards_found;
-
- err = pci_enable_device_mem(pdev);
- if (err)
- return err;
-
- pci_using_dac = 0;
- err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64));
- if (!err) {
- err = dma_set_coherent_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64));
- if (!err)
- pci_using_dac = 1;
- } else {
- err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32));
- if (err) {
- err = dma_set_coherent_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32));
- if (err) {
- IGB_ERR("No usable DMA configuration, "
- "aborting\n");
- goto err_dma;
- }
- }
- }
-
-#ifndef HAVE_ASPM_QUIRKS
- /* 82575 requires that the pci-e link partner disable the L0s state */
- switch (pdev->device) {
- case E1000_DEV_ID_82575EB_COPPER:
- case E1000_DEV_ID_82575EB_FIBER_SERDES:
- case E1000_DEV_ID_82575GB_QUAD_COPPER:
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
- default:
- break;
- }
-
-#endif /* HAVE_ASPM_QUIRKS */
- err = pci_request_selected_regions(pdev,
- pci_select_bars(pdev,
- IORESOURCE_MEM),
- igb_driver_name);
- if (err)
- goto err_pci_reg;
-
- pci_enable_pcie_error_reporting(pdev);
-
- pci_set_master(pdev);
-
- err = -ENOMEM;
-#ifdef HAVE_TX_MQ
- netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
- IGB_MAX_TX_QUEUES);
-#else
- netdev = alloc_etherdev(sizeof(struct igb_adapter));
-#endif /* HAVE_TX_MQ */
- if (!netdev)
- goto err_alloc_etherdev;
-
- SET_MODULE_OWNER(netdev);
- SET_NETDEV_DEV(netdev, &pdev->dev);
-
- pci_set_drvdata(pdev, netdev);
- adapter = netdev_priv(netdev);
- adapter->netdev = netdev;
- adapter->pdev = pdev;
- hw = &adapter->hw;
- hw->back = adapter;
- adapter->port_num = hw->bus.func;
- adapter->msg_enable = (1 << debug) - 1;
-
-#ifdef HAVE_PCI_ERS
- err = pci_save_state(pdev);
- if (err)
- goto err_ioremap;
-#endif
- err = -EIO;
- hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
- if (!hw->hw_addr)
- goto err_ioremap;
-
-#ifdef HAVE_NET_DEVICE_OPS
- netdev->netdev_ops = &igb_netdev_ops;
-#else /* HAVE_NET_DEVICE_OPS */
- netdev->open = &igb_open;
- netdev->stop = &igb_close;
- netdev->get_stats = &igb_get_stats;
-#ifdef HAVE_SET_RX_MODE
- netdev->set_rx_mode = &igb_set_rx_mode;
-#endif
- netdev->set_multicast_list = &igb_set_rx_mode;
- netdev->set_mac_address = &igb_set_mac;
- netdev->change_mtu = &igb_change_mtu;
- netdev->do_ioctl = &igb_ioctl;
-#ifdef HAVE_TX_TIMEOUT
- netdev->tx_timeout = &igb_tx_timeout;
-#endif
- netdev->vlan_rx_register = igb_vlan_mode;
- netdev->vlan_rx_add_vid = igb_vlan_rx_add_vid;
- netdev->vlan_rx_kill_vid = igb_vlan_rx_kill_vid;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- netdev->poll_controller = igb_netpoll;
-#endif
- netdev->hard_start_xmit = &igb_xmit_frame;
-#endif /* HAVE_NET_DEVICE_OPS */
- igb_set_ethtool_ops(netdev);
-#ifdef HAVE_TX_TIMEOUT
- netdev->watchdog_timeo = 5 * HZ;
-#endif
-
- strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
-
- adapter->bd_number = cards_found;
-
- /* setup the private structure */
- err = igb_sw_init(adapter);
- if (err)
- goto err_sw_init;
-
- e1000_get_bus_info(hw);
-
- hw->phy.autoneg_wait_to_complete = FALSE;
- hw->mac.adaptive_ifs = FALSE;
-
- /* Copper options */
- if (hw->phy.media_type == e1000_media_type_copper) {
- hw->phy.mdix = AUTO_ALL_MODES;
- hw->phy.disable_polarity_correction = FALSE;
- hw->phy.ms_type = e1000_ms_hw_default;
- }
-
- if (e1000_check_reset_block(hw))
- dev_info(pci_dev_to_dev(pdev),
- "PHY reset is blocked due to SOL/IDER session.\n");
-
- /*
- * features is initialized to 0 in allocation, it might have bits
- * set by igb_sw_init so we should use an or instead of an
- * assignment.
- */
- netdev->features |= NETIF_F_SG |
- NETIF_F_IP_CSUM |
-#ifdef NETIF_F_IPV6_CSUM
- NETIF_F_IPV6_CSUM |
-#endif
-#ifdef NETIF_F_TSO
- NETIF_F_TSO |
-#ifdef NETIF_F_TSO6
- NETIF_F_TSO6 |
-#endif
-#endif /* NETIF_F_TSO */
-#ifdef NETIF_F_RXHASH
- NETIF_F_RXHASH |
-#endif
- NETIF_F_RXCSUM |
-#ifdef NETIF_F_HW_VLAN_CTAG_RX
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_TX;
-#else
- NETIF_F_HW_VLAN_RX |
- NETIF_F_HW_VLAN_TX;
-#endif
-
- if (hw->mac.type >= e1000_82576)
- netdev->features |= NETIF_F_SCTP_CSUM;
-
-#ifdef HAVE_NDO_SET_FEATURES
- /* copy netdev features into list of user selectable features */
- netdev->hw_features |= netdev->features;
-#ifndef IGB_NO_LRO
-
- /* give us the option of enabling LRO later */
- netdev->hw_features |= NETIF_F_LRO;
-#endif
-#else
-#ifdef NETIF_F_GRO
-
- /* this is only needed on kernels prior to 2.6.39 */
- netdev->features |= NETIF_F_GRO;
-#endif
-#endif
-
- /* set this bit last since it cannot be part of hw_features */
-#ifdef NETIF_F_HW_VLAN_CTAG_FILTER
- netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-#else
- netdev->features |= NETIF_F_HW_VLAN_FILTER;
-#endif
-
-#ifdef HAVE_NETDEV_VLAN_FEATURES
- netdev->vlan_features |= NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_SG;
-
-#endif
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
-
- adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
-#ifdef DEBUG
- if (adapter->dmac != IGB_DMAC_DISABLE)
- printk("%s: DMA Coalescing is enabled..\n", netdev->name);
-#endif
-
- /* before reading the NVM, reset the controller to put the device in a
- * known good starting state */
- e1000_reset_hw(hw);
-
- /* make sure the NVM is good */
- if (e1000_validate_nvm_checksum(hw) < 0) {
- dev_err(pci_dev_to_dev(pdev), "The NVM Checksum Is Not"
- " Valid\n");
- err = -EIO;
- goto err_eeprom;
- }
-
- /* copy the MAC address out of the NVM */
- if (e1000_read_mac_addr(hw))
- dev_err(pci_dev_to_dev(pdev), "NVM Read Error\n");
- memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
-#ifdef ETHTOOL_GPERMADDR
- memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
-
- if (!is_valid_ether_addr(netdev->perm_addr)) {
-#else
- if (!is_valid_ether_addr(netdev->dev_addr)) {
-#endif
- dev_err(pci_dev_to_dev(pdev), "Invalid MAC Address\n");
- err = -EIO;
- goto err_eeprom;
- }
-
- memcpy(&adapter->mac_table[0].addr, hw->mac.addr, netdev->addr_len);
- adapter->mac_table[0].queue = adapter->vfs_allocated_count;
- adapter->mac_table[0].state = (IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE);
- igb_rar_set(adapter, 0);
-
- /* get firmware version for ethtool -i */
- igb_set_fw_version(adapter);
-
- /* Check if Media Autosense is enabled */
- if (hw->mac.type == e1000_82580)
- igb_init_mas(adapter);
- setup_timer(&adapter->watchdog_timer, &igb_watchdog,
- (unsigned long) adapter);
- if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
- setup_timer(&adapter->dma_err_timer, &igb_dma_err_timer,
- (unsigned long) adapter);
- setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
- (unsigned long) adapter);
-
- INIT_WORK(&adapter->reset_task, igb_reset_task);
- INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
- if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
- INIT_WORK(&adapter->dma_err_task, igb_dma_err_task);
-
- /* Initialize link properties that are user-changeable */
- adapter->fc_autoneg = true;
- hw->mac.autoneg = true;
- hw->phy.autoneg_advertised = 0x2f;
-
- hw->fc.requested_mode = e1000_fc_default;
- hw->fc.current_mode = e1000_fc_default;
-
- e1000_validate_mdi_setting(hw);
-
- /* By default, support wake on port A */
- if (hw->bus.func == 0)
- adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
-
- /* Check the NVM for wake support for non-port A ports */
- if (hw->mac.type >= e1000_82580)
- hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
- NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
- &eeprom_data);
- else if (hw->bus.func == 1)
- e1000_read_nvm(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
-
- if (eeprom_data & IGB_EEPROM_APME)
- adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
-
- /* now that we have the eeprom settings, apply the special cases where
- * the eeprom may be wrong or the board simply won't support wake on
- * lan on a particular port */
- switch (pdev->device) {
- case E1000_DEV_ID_82575GB_QUAD_COPPER:
- adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
- break;
- case E1000_DEV_ID_82575EB_FIBER_SERDES:
- case E1000_DEV_ID_82576_FIBER:
- case E1000_DEV_ID_82576_SERDES:
- /* Wake events only supported on port A for dual fiber
- * regardless of eeprom setting */
- if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1)
- adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
- break;
- case E1000_DEV_ID_82576_QUAD_COPPER:
- case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
- /* if quad port adapter, disable WoL on all but port A */
- if (global_quad_port_a != 0)
- adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
- else
- adapter->flags |= IGB_FLAG_QUAD_PORT_A;
- /* Reset for multiple quad port adapters */
- if (++global_quad_port_a == 4)
- global_quad_port_a = 0;
- break;
- default:
- /* If the device can't wake, don't set software support */
- if (!device_can_wakeup(&adapter->pdev->dev))
- adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
- break;
- }
-
- /* initialize the wol settings based on the eeprom settings */
- if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
- adapter->wol |= E1000_WUFC_MAG;
-
- /* Some vendors want WoL disabled by default, but still supported */
- if ((hw->mac.type == e1000_i350) &&
- (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
- adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
- adapter->wol = 0;
- }
-
- device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev),
- adapter->flags & IGB_FLAG_WOL_SUPPORTED);
-
- /* reset the hardware with the new settings */
- igb_reset(adapter);
- adapter->devrc = 0;
-
-#ifdef HAVE_I2C_SUPPORT
- /* Init the I2C interface */
- err = igb_init_i2c(adapter);
- if (err) {
- dev_err(&pdev->dev, "failed to init i2c interface\n");
- goto err_eeprom;
- }
-#endif /* HAVE_I2C_SUPPORT */
-
- /* let the f/w know that the h/w is now under the control of the
- * driver. */
- igb_get_hw_control(adapter);
-
- strncpy(netdev->name, "eth%d", IFNAMSIZ);
- err = register_netdev(netdev);
- if (err)
- goto err_register;
-
-#ifdef CONFIG_IGB_VMDQ_NETDEV
- err = igb_init_vmdq_netdevs(adapter);
- if (err)
- goto err_register;
-#endif
- /* carrier off reporting is important to ethtool even BEFORE open */
- netif_carrier_off(netdev);
-
-#ifdef IGB_DCA
- if (dca_add_requester(&pdev->dev) == E1000_SUCCESS) {
- adapter->flags |= IGB_FLAG_DCA_ENABLED;
- dev_info(pci_dev_to_dev(pdev), "DCA enabled\n");
- igb_setup_dca(adapter);
- }
-
-#endif
-#ifdef HAVE_PTP_1588_CLOCK
- /* do hw tstamp init after resetting */
- igb_ptp_init(adapter);
-#endif /* HAVE_PTP_1588_CLOCK */
-
- dev_info(pci_dev_to_dev(pdev), "Intel(R) Gigabit Ethernet Network Connection\n");
- /* print bus type/speed/width info */
- dev_info(pci_dev_to_dev(pdev), "%s: (PCIe:%s:%s) ",
- netdev->name,
- ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5GT/s" :
- (hw->bus.speed == e1000_bus_speed_5000) ? "5.0GT/s" :
- (hw->mac.type == e1000_i354) ? "integrated" :
- "unknown"),
- ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
- (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
- (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
- (hw->mac.type == e1000_i354) ? "integrated" :
- "unknown"));
- dev_info(pci_dev_to_dev(pdev), "%s: MAC: ", netdev->name);
- for (i = 0; i < 6; i++)
- printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
-
- ret_val = e1000_read_pba_string(hw, pba_str, E1000_PBANUM_LENGTH);
- if (ret_val)
- strncpy(pba_str, "Unknown", sizeof(pba_str) - 1);
- dev_info(pci_dev_to_dev(pdev), "%s: PBA No: %s\n", netdev->name,
- pba_str);
-
-
- /* Initialize the thermal sensor on i350 devices. */
- if (hw->mac.type == e1000_i350) {
- if (hw->bus.func == 0) {
- u16 ets_word;
-
- /*
- * Read the NVM to determine if this i350 device
- * supports an external thermal sensor.
- */
- e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_word);
- if (ets_word != 0x0000 && ets_word != 0xFFFF)
- adapter->ets = true;
- else
- adapter->ets = false;
- }
-#ifdef IGB_HWMON
-
- igb_sysfs_init(adapter);
-#else
-#ifdef IGB_PROCFS
-
- igb_procfs_init(adapter);
-#endif /* IGB_PROCFS */
-#endif /* IGB_HWMON */
- } else {
- adapter->ets = false;
- }
-
- if (hw->phy.media_type == e1000_media_type_copper) {
- switch (hw->mac.type) {
- case e1000_i350:
- case e1000_i210:
- case e1000_i211:
- /* Enable EEE for internal copper PHY devices */
- err = e1000_set_eee_i350(hw);
- if ((!err) &&
- (adapter->flags & IGB_FLAG_EEE))
- adapter->eee_advert =
- MDIO_EEE_100TX | MDIO_EEE_1000T;
- break;
- case e1000_i354:
- if ((E1000_READ_REG(hw, E1000_CTRL_EXT)) &
- (E1000_CTRL_EXT_LINK_MODE_SGMII)) {
- err = e1000_set_eee_i354(hw);
- if ((!err) &&
- (adapter->flags & IGB_FLAG_EEE))
- adapter->eee_advert =
- MDIO_EEE_100TX | MDIO_EEE_1000T;
- }
- break;
- default:
- break;
- }
- }
-
- /* send driver version info to firmware */
- if (hw->mac.type >= e1000_i350)
- igb_init_fw(adapter);
-
-#ifndef IGB_NO_LRO
- if (netdev->features & NETIF_F_LRO)
- dev_info(pci_dev_to_dev(pdev), "Internal LRO is enabled \n");
- else
- dev_info(pci_dev_to_dev(pdev), "LRO is disabled \n");
-#endif
- dev_info(pci_dev_to_dev(pdev),
- "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
- adapter->msix_entries ? "MSI-X" :
- (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
- adapter->num_rx_queues, adapter->num_tx_queues);
-
- cards_found++;
-
- pm_runtime_put_noidle(&pdev->dev);
- return 0;
-
-err_register:
- igb_release_hw_control(adapter);
-#ifdef HAVE_I2C_SUPPORT
- memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
-#endif /* HAVE_I2C_SUPPORT */
-err_eeprom:
- if (!e1000_check_reset_block(hw))
- e1000_phy_hw_reset(hw);
-
- if (hw->flash_address)
- iounmap(hw->flash_address);
-err_sw_init:
- igb_clear_interrupt_scheme(adapter);
- igb_reset_sriov_capability(adapter);
- iounmap(hw->hw_addr);
-err_ioremap:
- free_netdev(netdev);
-err_alloc_etherdev:
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
-err_pci_reg:
-err_dma:
- pci_disable_device(pdev);
- return err;
-}
-#ifdef HAVE_I2C_SUPPORT
-/*
- * igb_remove_i2c - Cleanup I2C interface
- * @adapter: pointer to adapter structure
- *
- */
-static void igb_remove_i2c(struct igb_adapter *adapter)
-{
-
- /* free the adapter bus structure */
- i2c_del_adapter(&adapter->i2c_adap);
-}
-#endif /* HAVE_I2C_SUPPORT */
-
-/**
- * igb_remove - Device Removal Routine
- * @pdev: PCI device information struct
- *
- * igb_remove is called by the PCI subsystem to alert the driver
- * that it should release a PCI device. The could be caused by a
- * Hot-Plug event, or because the driver is going to be removed from
- * memory.
- **/
-static void __devexit igb_remove(struct pci_dev *pdev)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
-
- pm_runtime_get_noresume(&pdev->dev);
-#ifdef HAVE_I2C_SUPPORT
- igb_remove_i2c(adapter);
-#endif /* HAVE_I2C_SUPPORT */
-#ifdef HAVE_PTP_1588_CLOCK
- igb_ptp_stop(adapter);
-#endif /* HAVE_PTP_1588_CLOCK */
-
- /* flush_scheduled work may reschedule our watchdog task, so
- * explicitly disable watchdog tasks from being rescheduled */
- set_bit(__IGB_DOWN, &adapter->state);
- del_timer_sync(&adapter->watchdog_timer);
- if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
- del_timer_sync(&adapter->dma_err_timer);
- del_timer_sync(&adapter->phy_info_timer);
-
- flush_scheduled_work();
-
-#ifdef IGB_DCA
- if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
- dev_info(pci_dev_to_dev(pdev), "DCA disabled\n");
- dca_remove_requester(&pdev->dev);
- adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
- E1000_WRITE_REG(hw, E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_DISABLE);
- }
-#endif
-
- /* Release control of h/w to f/w. If f/w is AMT enabled, this
- * would have already happened in close and is redundant. */
- igb_release_hw_control(adapter);
-
- unregister_netdev(netdev);
-#ifdef CONFIG_IGB_VMDQ_NETDEV
- igb_remove_vmdq_netdevs(adapter);
-#endif
-
- igb_clear_interrupt_scheme(adapter);
- igb_reset_sriov_capability(adapter);
-
- iounmap(hw->hw_addr);
- if (hw->flash_address)
- iounmap(hw->flash_address);
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
-
-#ifdef IGB_HWMON
- igb_sysfs_exit(adapter);
-#else
-#ifdef IGB_PROCFS
- igb_procfs_exit(adapter);
-#endif /* IGB_PROCFS */
-#endif /* IGB_HWMON */
- kfree(adapter->mac_table);
- kfree(adapter->shadow_vfta);
- free_netdev(netdev);
-
- pci_disable_pcie_error_reporting(pdev);
-
- pci_disable_device(pdev);
-}
-
-/**
- * igb_sw_init - Initialize general software structures (struct igb_adapter)
- * @adapter: board private structure to initialize
- *
- * igb_sw_init initializes the Adapter private data structure.
- * Fields are initialized based on PCI device information and
- * OS network device settings (MTU size).
- **/
-static int igb_sw_init(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
-
- /* PCI config space info */
-
- hw->vendor_id = pdev->vendor;
- hw->device_id = pdev->device;
- hw->subsystem_vendor_id = pdev->subsystem_vendor;
- hw->subsystem_device_id = pdev->subsystem_device;
-
- pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
-
- pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
-
- /* set default ring sizes */
- adapter->tx_ring_count = IGB_DEFAULT_TXD;
- adapter->rx_ring_count = IGB_DEFAULT_RXD;
-
- /* set default work limits */
- adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
-
- adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
- VLAN_HLEN;
-
- /* Initialize the hardware-specific values */
- if (e1000_setup_init_funcs(hw, TRUE)) {
- dev_err(pci_dev_to_dev(pdev), "Hardware Initialization Failure\n");
- return -EIO;
- }
-
- adapter->mac_table = kzalloc(sizeof(struct igb_mac_addr) *
- hw->mac.rar_entry_count,
- GFP_ATOMIC);
-
- /* Setup and initialize a copy of the hw vlan table array */
- adapter->shadow_vfta = (u32 *)kzalloc(sizeof(u32) * E1000_VFTA_ENTRIES,
- GFP_ATOMIC);
-#ifdef NO_KNI
- /* These calls may decrease the number of queues */
- if (hw->mac.type < e1000_i210) {
- igb_set_sriov_capability(adapter);
- }
-
- if (igb_init_interrupt_scheme(adapter, true)) {
- dev_err(pci_dev_to_dev(pdev), "Unable to allocate memory for queues\n");
- return -ENOMEM;
- }
-
- /* Explicitly disable IRQ since the NIC can be in any state. */
- igb_irq_disable(adapter);
-
- set_bit(__IGB_DOWN, &adapter->state);
-#endif
- return 0;
-}
-
-/**
- * igb_open - Called when a network interface is made active
- * @netdev: network interface device structure
- *
- * Returns 0 on success, negative value on failure
- *
- * The open entry point is called when a network interface is made
- * active by the system (IFF_UP). At this point all resources needed
- * for transmit and receive operations are allocated, the interrupt
- * handler is registered with the OS, the watchdog timer is started,
- * and the stack is notified that the interface is ready.
- **/
-static int __igb_open(struct net_device *netdev, bool resuming)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
-#ifdef CONFIG_PM_RUNTIME
- struct pci_dev *pdev = adapter->pdev;
-#endif /* CONFIG_PM_RUNTIME */
- int err;
- int i;
-
- /* disallow open during test */
- if (test_bit(__IGB_TESTING, &adapter->state)) {
- WARN_ON(resuming);
- return -EBUSY;
- }
-
-#ifdef CONFIG_PM_RUNTIME
- if (!resuming)
- pm_runtime_get_sync(&pdev->dev);
-#endif /* CONFIG_PM_RUNTIME */
-
- netif_carrier_off(netdev);
-
- /* allocate transmit descriptors */
- err = igb_setup_all_tx_resources(adapter);
- if (err)
- goto err_setup_tx;
-
- /* allocate receive descriptors */
- err = igb_setup_all_rx_resources(adapter);
- if (err)
- goto err_setup_rx;
-
- igb_power_up_link(adapter);
-
- /* before we allocate an interrupt, we must be ready to handle it.
- * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
- * as soon as we call pci_request_irq, so we have to setup our
- * clean_rx handler before we do so. */
- igb_configure(adapter);
-
- err = igb_request_irq(adapter);
- if (err)
- goto err_req_irq;
-
- /* Notify the stack of the actual queue counts. */
- netif_set_real_num_tx_queues(netdev,
- adapter->vmdq_pools ? 1 :
- adapter->num_tx_queues);
-
- err = netif_set_real_num_rx_queues(netdev,
- adapter->vmdq_pools ? 1 :
- adapter->num_rx_queues);
- if (err)
- goto err_set_queues;
-
- /* From here on the code is the same as igb_up() */
- clear_bit(__IGB_DOWN, &adapter->state);
-
- for (i = 0; i < adapter->num_q_vectors; i++)
- napi_enable(&(adapter->q_vector[i]->napi));
- igb_configure_lli(adapter);
-
- /* Clear any pending interrupts. */
- E1000_READ_REG(hw, E1000_ICR);
-
- igb_irq_enable(adapter);
-
- /* notify VFs that reset has been completed */
- if (adapter->vfs_allocated_count) {
- u32 reg_data = E1000_READ_REG(hw, E1000_CTRL_EXT);
- reg_data |= E1000_CTRL_EXT_PFRSTD;
- E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg_data);
- }
-
- netif_tx_start_all_queues(netdev);
-
- if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
- schedule_work(&adapter->dma_err_task);
-
- /* start the watchdog. */
- hw->mac.get_link_status = 1;
- schedule_work(&adapter->watchdog_task);
-
- return E1000_SUCCESS;
-
-err_set_queues:
- igb_free_irq(adapter);
-err_req_irq:
- igb_release_hw_control(adapter);
- igb_power_down_link(adapter);
- igb_free_all_rx_resources(adapter);
-err_setup_rx:
- igb_free_all_tx_resources(adapter);
-err_setup_tx:
- igb_reset(adapter);
-
-#ifdef CONFIG_PM_RUNTIME
- if (!resuming)
- pm_runtime_put(&pdev->dev);
-#endif /* CONFIG_PM_RUNTIME */
-
- return err;
-}
-
-static int igb_open(struct net_device *netdev)
-{
- return __igb_open(netdev, false);
-}
-
-/**
- * igb_close - Disables a network interface
- * @netdev: network interface device structure
- *
- * Returns 0, this is not allowed to fail
- *
- * The close entry point is called when an interface is de-activated
- * by the OS. The hardware is still under the driver's control, but
- * needs to be disabled. A global MAC reset is issued to stop the
- * hardware, and all transmit and receive resources are freed.
- **/
-static int __igb_close(struct net_device *netdev, bool suspending)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
-#ifdef CONFIG_PM_RUNTIME
- struct pci_dev *pdev = adapter->pdev;
-#endif /* CONFIG_PM_RUNTIME */
-
- WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
-
-#ifdef CONFIG_PM_RUNTIME
- if (!suspending)
- pm_runtime_get_sync(&pdev->dev);
-#endif /* CONFIG_PM_RUNTIME */
-
- igb_down(adapter);
-
- igb_release_hw_control(adapter);
-
- igb_free_irq(adapter);
-
- igb_free_all_tx_resources(adapter);
- igb_free_all_rx_resources(adapter);
-
-#ifdef CONFIG_PM_RUNTIME
- if (!suspending)
- pm_runtime_put_sync(&pdev->dev);
-#endif /* CONFIG_PM_RUNTIME */
-
- return 0;
-}
-
-static int igb_close(struct net_device *netdev)
-{
- return __igb_close(netdev, false);
-}
-
-/**
- * igb_setup_tx_resources - allocate Tx resources (Descriptors)
- * @tx_ring: tx descriptor ring (for a specific queue) to setup
- *
- * Return 0 on success, negative on failure
- **/
-int igb_setup_tx_resources(struct igb_ring *tx_ring)
-{
- struct device *dev = tx_ring->dev;
- int size;
-
- size = sizeof(struct igb_tx_buffer) * tx_ring->count;
- tx_ring->tx_buffer_info = vzalloc(size);
- if (!tx_ring->tx_buffer_info)
- goto err;
-
- /* round up to nearest 4K */
- tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
- tx_ring->size = ALIGN(tx_ring->size, 4096);
-
- tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
- &tx_ring->dma, GFP_KERNEL);
-
- if (!tx_ring->desc)
- goto err;
-
- tx_ring->next_to_use = 0;
- tx_ring->next_to_clean = 0;
-
- return 0;
-
-err:
- vfree(tx_ring->tx_buffer_info);
- dev_err(dev,
- "Unable to allocate memory for the transmit descriptor ring\n");
- return -ENOMEM;
-}
-
-/**
- * igb_setup_all_tx_resources - wrapper to allocate Tx resources
- * (Descriptors) for all queues
- * @adapter: board private structure
- *
- * Return 0 on success, negative on failure
- **/
-static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
-{
- struct pci_dev *pdev = adapter->pdev;
- int i, err = 0;
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- err = igb_setup_tx_resources(adapter->tx_ring[i]);
- if (err) {
- dev_err(pci_dev_to_dev(pdev),
- "Allocation for Tx Queue %u failed\n", i);
- for (i--; i >= 0; i--)
- igb_free_tx_resources(adapter->tx_ring[i]);
- break;
- }
- }
-
- return err;
-}
-
-/**
- * igb_setup_tctl - configure the transmit control registers
- * @adapter: Board private structure
- **/
-void igb_setup_tctl(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 tctl;
-
- /* disable queue 0 which is enabled by default on 82575 and 82576 */
- E1000_WRITE_REG(hw, E1000_TXDCTL(0), 0);
-
- /* Program the Transmit Control Register */
- tctl = E1000_READ_REG(hw, E1000_TCTL);
- tctl &= ~E1000_TCTL_CT;
- tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
- (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
-
- e1000_config_collision_dist(hw);
-
- /* Enable transmits */
- tctl |= E1000_TCTL_EN;
-
- E1000_WRITE_REG(hw, E1000_TCTL, tctl);
-}
-
-static u32 igb_tx_wthresh(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- switch (hw->mac.type) {
- case e1000_i354:
- return 4;
- case e1000_82576:
- if (adapter->msix_entries)
- return 1;
- default:
- break;
- }
-
- return 16;
-}
-
-/**
- * igb_configure_tx_ring - Configure transmit ring after Reset
- * @adapter: board private structure
- * @ring: tx ring to configure
- *
- * Configure a transmit ring after a reset.
- **/
-void igb_configure_tx_ring(struct igb_adapter *adapter,
- struct igb_ring *ring)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 txdctl = 0;
- u64 tdba = ring->dma;
- int reg_idx = ring->reg_idx;
-
- /* disable the queue */
- E1000_WRITE_REG(hw, E1000_TXDCTL(reg_idx), 0);
- E1000_WRITE_FLUSH(hw);
- mdelay(10);
-
- E1000_WRITE_REG(hw, E1000_TDLEN(reg_idx),
- ring->count * sizeof(union e1000_adv_tx_desc));
- E1000_WRITE_REG(hw, E1000_TDBAL(reg_idx),
- tdba & 0x00000000ffffffffULL);
- E1000_WRITE_REG(hw, E1000_TDBAH(reg_idx), tdba >> 32);
-
- ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
- E1000_WRITE_REG(hw, E1000_TDH(reg_idx), 0);
- writel(0, ring->tail);
-
- txdctl |= IGB_TX_PTHRESH;
- txdctl |= IGB_TX_HTHRESH << 8;
- txdctl |= igb_tx_wthresh(adapter) << 16;
-
- txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
- E1000_WRITE_REG(hw, E1000_TXDCTL(reg_idx), txdctl);
-}
-
-/**
- * igb_configure_tx - Configure transmit Unit after Reset
- * @adapter: board private structure
- *
- * Configure the Tx unit of the MAC after a reset.
- **/
-static void igb_configure_tx(struct igb_adapter *adapter)
-{
- int i;
-
- for (i = 0; i < adapter->num_tx_queues; i++)
- igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
-}
-
-/**
- * igb_setup_rx_resources - allocate Rx resources (Descriptors)
- * @rx_ring: rx descriptor ring (for a specific queue) to setup
- *
- * Returns 0 on success, negative on failure
- **/
-int igb_setup_rx_resources(struct igb_ring *rx_ring)
-{
- struct device *dev = rx_ring->dev;
- int size, desc_len;
-
- size = sizeof(struct igb_rx_buffer) * rx_ring->count;
- rx_ring->rx_buffer_info = vzalloc(size);
- if (!rx_ring->rx_buffer_info)
- goto err;
-
- desc_len = sizeof(union e1000_adv_rx_desc);
-
- /* Round up to nearest 4K */
- rx_ring->size = rx_ring->count * desc_len;
- rx_ring->size = ALIGN(rx_ring->size, 4096);
-
- rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
- &rx_ring->dma, GFP_KERNEL);
-
- if (!rx_ring->desc)
- goto err;
-
- rx_ring->next_to_alloc = 0;
- rx_ring->next_to_clean = 0;
- rx_ring->next_to_use = 0;
-
- return 0;
-
-err:
- vfree(rx_ring->rx_buffer_info);
- rx_ring->rx_buffer_info = NULL;
- dev_err(dev, "Unable to allocate memory for the receive descriptor"
- " ring\n");
- return -ENOMEM;
-}
-
-/**
- * igb_setup_all_rx_resources - wrapper to allocate Rx resources
- * (Descriptors) for all queues
- * @adapter: board private structure
- *
- * Return 0 on success, negative on failure
- **/
-static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
-{
- struct pci_dev *pdev = adapter->pdev;
- int i, err = 0;
-
- for (i = 0; i < adapter->num_rx_queues; i++) {
- err = igb_setup_rx_resources(adapter->rx_ring[i]);
- if (err) {
- dev_err(pci_dev_to_dev(pdev),
- "Allocation for Rx Queue %u failed\n", i);
- for (i--; i >= 0; i--)
- igb_free_rx_resources(adapter->rx_ring[i]);
- break;
- }
- }
-
- return err;
-}
-
-/**
- * igb_setup_mrqc - configure the multiple receive queue control registers
- * @adapter: Board private structure
- **/
-static void igb_setup_mrqc(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 mrqc, rxcsum;
- u32 j, num_rx_queues, shift = 0, shift2 = 0;
- static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741,
- 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE,
- 0xA32DCB77, 0x0CF23080, 0x3BB7426A,
- 0xFA01ACBE };
-
- /* Fill out hash function seeds */
- for (j = 0; j < 10; j++)
- E1000_WRITE_REG(hw, E1000_RSSRK(j), rsskey[j]);
-
- num_rx_queues = adapter->rss_queues;
-
- /* 82575 and 82576 supports 2 RSS queues for VMDq */
- switch (hw->mac.type) {
- case e1000_82575:
- if (adapter->vmdq_pools) {
- shift = 2;
- shift2 = 6;
- break;
- }
- shift = 6;
- break;
- case e1000_82576:
- /* 82576 supports 2 RSS queues for SR-IOV */
- if (adapter->vfs_allocated_count || adapter->vmdq_pools) {
- shift = 3;
- num_rx_queues = 2;
- }
- break;
- default:
- break;
- }
-
- /*
- * Populate the redirection table 4 entries at a time. To do this
- * we are generating the results for n and n+2 and then interleaving
- * those with the results with n+1 and n+3.
- */
- for (j = 0; j < 32; j++) {
- /* first pass generates n and n+2 */
- u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues;
- u32 reta = (base & 0x07800780) >> (7 - shift);
-
- /* second pass generates n+1 and n+3 */
- base += 0x00010001 * num_rx_queues;
- reta |= (base & 0x07800780) << (1 + shift);
-
- /* generate 2nd table for 82575 based parts */
- if (shift2)
- reta |= (0x01010101 * num_rx_queues) << shift2;
-
- E1000_WRITE_REG(hw, E1000_RETA(j), reta);
- }
-
- /*
- * Disable raw packet checksumming so that RSS hash is placed in
- * descriptor on writeback. No need to enable TCP/UDP/IP checksum
- * offloads as they are enabled by default
- */
- rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
- rxcsum |= E1000_RXCSUM_PCSD;
-
- if (adapter->hw.mac.type >= e1000_82576)
- /* Enable Receive Checksum Offload for SCTP */
- rxcsum |= E1000_RXCSUM_CRCOFL;
-
- /* Don't need to set TUOFL or IPOFL, they default to 1 */
- E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
-
- /* Generate RSS hash based on packet types, TCP/UDP
- * port numbers and/or IPv4/v6 src and dst addresses
- */
- mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
- E1000_MRQC_RSS_FIELD_IPV4_TCP |
- E1000_MRQC_RSS_FIELD_IPV6 |
- E1000_MRQC_RSS_FIELD_IPV6_TCP |
- E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
-
- if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
- mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
- if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
- mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
-
- /* If VMDq is enabled then we set the appropriate mode for that, else
- * we default to RSS so that an RSS hash is calculated per packet even
- * if we are only using one queue */
- if (adapter->vfs_allocated_count || adapter->vmdq_pools) {
- if (hw->mac.type > e1000_82575) {
- /* Set the default pool for the PF's first queue */
- u32 vtctl = E1000_READ_REG(hw, E1000_VT_CTL);
- vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
- E1000_VT_CTL_DISABLE_DEF_POOL);
- vtctl |= adapter->vfs_allocated_count <<
- E1000_VT_CTL_DEFAULT_POOL_SHIFT;
- E1000_WRITE_REG(hw, E1000_VT_CTL, vtctl);
- } else if (adapter->rss_queues > 1) {
- /* set default queue for pool 1 to queue 2 */
- E1000_WRITE_REG(hw, E1000_VT_CTL,
- adapter->rss_queues << 7);
- }
- if (adapter->rss_queues > 1)
- mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
- else
- mrqc |= E1000_MRQC_ENABLE_VMDQ;
- } else {
- mrqc |= E1000_MRQC_ENABLE_RSS_4Q;
- }
- igb_vmm_control(adapter);
-
- E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
-}
-
-/**
- * igb_setup_rctl - configure the receive control registers
- * @adapter: Board private structure
- **/
-void igb_setup_rctl(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 rctl;
-
- rctl = E1000_READ_REG(hw, E1000_RCTL);
-
- rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
- rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
-
- rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
- (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
-
- /*
- * enable stripping of CRC. It's unlikely this will break BMC
- * redirection as it did with e1000. Newer features require
- * that the HW strips the CRC.
- */
- rctl |= E1000_RCTL_SECRC;
-
- /* disable store bad packets and clear size bits. */
- rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
-
- /* enable LPE to prevent packets larger than max_frame_size */
- rctl |= E1000_RCTL_LPE;
-
- /* disable queue 0 to prevent tail write w/o re-config */
- E1000_WRITE_REG(hw, E1000_RXDCTL(0), 0);
-
- /* Attention!!! For SR-IOV PF driver operations you must enable
- * queue drop for all VF and PF queues to prevent head of line blocking
- * if an un-trusted VF does not provide descriptors to hardware.
- */
- if (adapter->vfs_allocated_count) {
- /* set all queue drop enable bits */
- E1000_WRITE_REG(hw, E1000_QDE, ALL_QUEUES);
- }
-
- E1000_WRITE_REG(hw, E1000_RCTL, rctl);
-}
-
-static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
- int vfn)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 vmolr;
-
- /* if it isn't the PF check to see if VFs are enabled and
- * increase the size to support vlan tags */
- if (vfn < adapter->vfs_allocated_count &&
- adapter->vf_data[vfn].vlans_enabled)
- size += VLAN_HLEN;
-
-#ifdef CONFIG_IGB_VMDQ_NETDEV
- if (vfn >= adapter->vfs_allocated_count) {
- int queue = vfn - adapter->vfs_allocated_count;
- struct igb_vmdq_adapter *vadapter;
-
- vadapter = netdev_priv(adapter->vmdq_netdev[queue-1]);
- if (vadapter->vlgrp)
- size += VLAN_HLEN;
- }
-#endif
- vmolr = E1000_READ_REG(hw, E1000_VMOLR(vfn));
- vmolr &= ~E1000_VMOLR_RLPML_MASK;
- vmolr |= size | E1000_VMOLR_LPE;
- E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr);
-
- return 0;
-}
-
-/**
- * igb_rlpml_set - set maximum receive packet size
- * @adapter: board private structure
- *
- * Configure maximum receivable packet size.
- **/
-static void igb_rlpml_set(struct igb_adapter *adapter)
-{
- u32 max_frame_size = adapter->max_frame_size;
- struct e1000_hw *hw = &adapter->hw;
- u16 pf_id = adapter->vfs_allocated_count;
-
- if (adapter->vmdq_pools && hw->mac.type != e1000_82575) {
- int i;
- for (i = 0; i < adapter->vmdq_pools; i++)
- igb_set_vf_rlpml(adapter, max_frame_size, pf_id + i);
- /*
- * If we're in VMDQ or SR-IOV mode, then set global RLPML
- * to our max jumbo frame size, in case we need to enable
- * jumbo frames on one of the rings later.
- * This will not pass over-length frames into the default
- * queue because it's gated by the VMOLR.RLPML.
- */
- max_frame_size = MAX_JUMBO_FRAME_SIZE;
- }
- /* Set VF RLPML for the PF device. */
- if (adapter->vfs_allocated_count)
- igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
-
- E1000_WRITE_REG(hw, E1000_RLPML, max_frame_size);
-}
-
-static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter,
- int vfn, bool enable)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 val;
- void __iomem *reg;
-
- if (hw->mac.type < e1000_82576)
- return;
-
- if (hw->mac.type == e1000_i350)
- reg = hw->hw_addr + E1000_DVMOLR(vfn);
- else
- reg = hw->hw_addr + E1000_VMOLR(vfn);
-
- val = readl(reg);
- if (enable)
- val |= E1000_VMOLR_STRVLAN;
- else
- val &= ~(E1000_VMOLR_STRVLAN);
- writel(val, reg);
-}
-static inline void igb_set_vmolr(struct igb_adapter *adapter,
- int vfn, bool aupe)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 vmolr;
-
- /*
- * This register exists only on 82576 and newer so if we are older then
- * we should exit and do nothing
- */
- if (hw->mac.type < e1000_82576)
- return;
-
- vmolr = E1000_READ_REG(hw, E1000_VMOLR(vfn));
-
- if (aupe)
- vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
- else
- vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
-
- /* clear all bits that might not be set */
- vmolr &= ~E1000_VMOLR_RSSE;
-
- if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
- vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
-
- vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
- vmolr |= E1000_VMOLR_LPE; /* Accept long packets */
-
- E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr);
-}
-
-/**
- * igb_configure_rx_ring - Configure a receive ring after Reset
- * @adapter: board private structure
- * @ring: receive ring to be configured
- *
- * Configure the Rx unit of the MAC after a reset.
- **/
-void igb_configure_rx_ring(struct igb_adapter *adapter,
- struct igb_ring *ring)
-{
- struct e1000_hw *hw = &adapter->hw;
- u64 rdba = ring->dma;
- int reg_idx = ring->reg_idx;
- u32 srrctl = 0, rxdctl = 0;
-
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
- /*
- * RLPML prevents us from receiving a frame larger than max_frame so
- * it is safe to just set the rx_buffer_len to max_frame without the
- * risk of an skb over panic.
- */
- ring->rx_buffer_len = max_t(u32, adapter->max_frame_size,
- MAXIMUM_ETHERNET_VLAN_SIZE);
-
-#endif
- /* disable the queue */
- E1000_WRITE_REG(hw, E1000_RXDCTL(reg_idx), 0);
-
- /* Set DMA base address registers */
- E1000_WRITE_REG(hw, E1000_RDBAL(reg_idx),
- rdba & 0x00000000ffffffffULL);
- E1000_WRITE_REG(hw, E1000_RDBAH(reg_idx), rdba >> 32);
- E1000_WRITE_REG(hw, E1000_RDLEN(reg_idx),
- ring->count * sizeof(union e1000_adv_rx_desc));
-
- /* initialize head and tail */
- ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
- E1000_WRITE_REG(hw, E1000_RDH(reg_idx), 0);
- writel(0, ring->tail);
-
- /* reset next-to- use/clean to place SW in sync with hardwdare */
- ring->next_to_clean = 0;
- ring->next_to_use = 0;
-#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
- ring->next_to_alloc = 0;
-
-#endif
- /* set descriptor configuration */
-#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
- srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
- srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
-#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
- srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
- E1000_SRRCTL_BSIZEPKT_SHIFT;
-#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
- srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
-#ifdef HAVE_PTP_1588_CLOCK
- if (hw->mac.type >= e1000_82580)
- srrctl |= E1000_SRRCTL_TIMESTAMP;
-#endif /* HAVE_PTP_1588_CLOCK */
- /*
- * We should set the drop enable bit if:
- * SR-IOV is enabled
- * or
- * Flow Control is disabled and number of RX queues > 1
- *
- * This allows us to avoid head of line blocking for security
- * and performance reasons.
- */
- if (adapter->vfs_allocated_count ||
- (adapter->num_rx_queues > 1 &&
- (hw->fc.requested_mode == e1000_fc_none ||
- hw->fc.requested_mode == e1000_fc_rx_pause)))
- srrctl |= E1000_SRRCTL_DROP_EN;
-
- E1000_WRITE_REG(hw, E1000_SRRCTL(reg_idx), srrctl);
-
- /* set filtering for VMDQ pools */
- igb_set_vmolr(adapter, reg_idx & 0x7, true);
-
- rxdctl |= IGB_RX_PTHRESH;
- rxdctl |= IGB_RX_HTHRESH << 8;
- rxdctl |= IGB_RX_WTHRESH << 16;
-
- /* enable receive descriptor fetching */
- rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
- E1000_WRITE_REG(hw, E1000_RXDCTL(reg_idx), rxdctl);
-}
-
-/**
- * igb_configure_rx - Configure receive Unit after Reset
- * @adapter: board private structure
- *
- * Configure the Rx unit of the MAC after a reset.
- **/
-static void igb_configure_rx(struct igb_adapter *adapter)
-{
- int i;
-
- /* set UTA to appropriate mode */
- igb_set_uta(adapter);
-
- igb_full_sync_mac_table(adapter);
- /* Setup the HW Rx Head and Tail Descriptor Pointers and
- * the Base and Length of the Rx Descriptor Ring */
- for (i = 0; i < adapter->num_rx_queues; i++)
- igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
-}
-
-/**
- * igb_free_tx_resources - Free Tx Resources per Queue
- * @tx_ring: Tx descriptor ring for a specific queue
- *
- * Free all transmit software resources
- **/
-void igb_free_tx_resources(struct igb_ring *tx_ring)
-{
- igb_clean_tx_ring(tx_ring);
-
- vfree(tx_ring->tx_buffer_info);
- tx_ring->tx_buffer_info = NULL;
-
- /* if not set, then don't free */
- if (!tx_ring->desc)
- return;
-
- dma_free_coherent(tx_ring->dev, tx_ring->size,
- tx_ring->desc, tx_ring->dma);
-
- tx_ring->desc = NULL;
-}
-
-/**
- * igb_free_all_tx_resources - Free Tx Resources for All Queues
- * @adapter: board private structure
- *
- * Free all transmit software resources
- **/
-static void igb_free_all_tx_resources(struct igb_adapter *adapter)
-{
- int i;
-
- for (i = 0; i < adapter->num_tx_queues; i++)
- igb_free_tx_resources(adapter->tx_ring[i]);
-}
-
-void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
- struct igb_tx_buffer *tx_buffer)
-{
- if (tx_buffer->skb) {
- dev_kfree_skb_any(tx_buffer->skb);
- if (dma_unmap_len(tx_buffer, len))
- dma_unmap_single(ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
- } else if (dma_unmap_len(tx_buffer, len)) {
- dma_unmap_page(ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
- }
- tx_buffer->next_to_watch = NULL;
- tx_buffer->skb = NULL;
- dma_unmap_len_set(tx_buffer, len, 0);
- /* buffer_info must be completely set up in the transmit path */
-}
-
-/**
- * igb_clean_tx_ring - Free Tx Buffers
- * @tx_ring: ring to be cleaned
- **/
-static void igb_clean_tx_ring(struct igb_ring *tx_ring)
-{
- struct igb_tx_buffer *buffer_info;
- unsigned long size;
- u16 i;
-
- if (!tx_ring->tx_buffer_info)
- return;
- /* Free all the Tx ring sk_buffs */
-
- for (i = 0; i < tx_ring->count; i++) {
- buffer_info = &tx_ring->tx_buffer_info[i];
- igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
- }
-
- netdev_tx_reset_queue(txring_txq(tx_ring));
-
- size = sizeof(struct igb_tx_buffer) * tx_ring->count;
- memset(tx_ring->tx_buffer_info, 0, size);
-
- /* Zero out the descriptor ring */
- memset(tx_ring->desc, 0, tx_ring->size);
-
- tx_ring->next_to_use = 0;
- tx_ring->next_to_clean = 0;
-}
-
-/**
- * igb_clean_all_tx_rings - Free Tx Buffers for all queues
- * @adapter: board private structure
- **/
-static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
-{
- int i;
-
- for (i = 0; i < adapter->num_tx_queues; i++)
- igb_clean_tx_ring(adapter->tx_ring[i]);
-}
-
-/**
- * igb_free_rx_resources - Free Rx Resources
- * @rx_ring: ring to clean the resources from
- *
- * Free all receive software resources
- **/
-void igb_free_rx_resources(struct igb_ring *rx_ring)
-{
- igb_clean_rx_ring(rx_ring);
-
- vfree(rx_ring->rx_buffer_info);
- rx_ring->rx_buffer_info = NULL;
-
- /* if not set, then don't free */
- if (!rx_ring->desc)
- return;
-
- dma_free_coherent(rx_ring->dev, rx_ring->size,
- rx_ring->desc, rx_ring->dma);
-
- rx_ring->desc = NULL;
-}
-
-/**
- * igb_free_all_rx_resources - Free Rx Resources for All Queues
- * @adapter: board private structure
- *
- * Free all receive software resources
- **/
-static void igb_free_all_rx_resources(struct igb_adapter *adapter)
-{
- int i;
-
- for (i = 0; i < adapter->num_rx_queues; i++)
- igb_free_rx_resources(adapter->rx_ring[i]);
-}
-
-/**
- * igb_clean_rx_ring - Free Rx Buffers per Queue
- * @rx_ring: ring to free buffers from
- **/
-void igb_clean_rx_ring(struct igb_ring *rx_ring)
-{
- unsigned long size;
- u16 i;
-
- if (!rx_ring->rx_buffer_info)
- return;
-
-#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
- if (rx_ring->skb)
- dev_kfree_skb(rx_ring->skb);
- rx_ring->skb = NULL;
-
-#endif
- /* Free all the Rx ring sk_buffs */
- for (i = 0; i < rx_ring->count; i++) {
- struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
- if (buffer_info->dma) {
- dma_unmap_single(rx_ring->dev,
- buffer_info->dma,
- rx_ring->rx_buffer_len,
- DMA_FROM_DEVICE);
- buffer_info->dma = 0;
- }
-
- if (buffer_info->skb) {
- dev_kfree_skb(buffer_info->skb);
- buffer_info->skb = NULL;
- }
-#else
- if (!buffer_info->page)
- continue;
-
- dma_unmap_page(rx_ring->dev,
- buffer_info->dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- __free_page(buffer_info->page);
-
- buffer_info->page = NULL;
-#endif
- }
-
- size = sizeof(struct igb_rx_buffer) * rx_ring->count;
- memset(rx_ring->rx_buffer_info, 0, size);
-
- /* Zero out the descriptor ring */
- memset(rx_ring->desc, 0, rx_ring->size);
-
- rx_ring->next_to_alloc = 0;
- rx_ring->next_to_clean = 0;
- rx_ring->next_to_use = 0;
-}
-
-/**
- * igb_clean_all_rx_rings - Free Rx Buffers for all queues
- * @adapter: board private structure
- **/
-static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
-{
- int i;
-
- for (i = 0; i < adapter->num_rx_queues; i++)
- igb_clean_rx_ring(adapter->rx_ring[i]);
-}
-
-/**
- * igb_set_mac - Change the Ethernet Address of the NIC
- * @netdev: network interface device structure
- * @p: pointer to an address structure
- *
- * Returns 0 on success, negative on failure
- **/
-static int igb_set_mac(struct net_device *netdev, void *p)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- struct sockaddr *addr = p;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- igb_del_mac_filter(adapter, hw->mac.addr,
- adapter->vfs_allocated_count);
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
-
- /* set the correct pool for the new PF MAC address in entry 0 */
- return igb_add_mac_filter(adapter, hw->mac.addr,
- adapter->vfs_allocated_count);
-}
-
-/**
- * igb_write_mc_addr_list - write multicast addresses to MTA
- * @netdev: network interface device structure
- *
- * Writes multicast address list to the MTA hash table.
- * Returns: -ENOMEM on failure
- * 0 on no addresses written
- * X on writing X addresses to MTA
- **/
-int igb_write_mc_addr_list(struct net_device *netdev)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
-#ifdef NETDEV_HW_ADDR_T_MULTICAST
- struct netdev_hw_addr *ha;
-#else
- struct dev_mc_list *ha;
-#endif
- u8 *mta_list;
- int i, count;
-#ifdef CONFIG_IGB_VMDQ_NETDEV
- int vm;
-#endif
- count = netdev_mc_count(netdev);
-#ifdef CONFIG_IGB_VMDQ_NETDEV
- for (vm = 1; vm < adapter->vmdq_pools; vm++) {
- if (!adapter->vmdq_netdev[vm])
- break;
- if (!netif_running(adapter->vmdq_netdev[vm]))
- continue;
- count += netdev_mc_count(adapter->vmdq_netdev[vm]);
- }
-#endif
-
- if (!count) {
- e1000_update_mc_addr_list(hw, NULL, 0);
- return 0;
- }
- mta_list = kzalloc(count * 6, GFP_ATOMIC);
- if (!mta_list)
- return -ENOMEM;
-
- /* The shared function expects a packed array of only addresses. */
- i = 0;
- netdev_for_each_mc_addr(ha, netdev)
-#ifdef NETDEV_HW_ADDR_T_MULTICAST
- memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
-#else
- memcpy(mta_list + (i++ * ETH_ALEN), ha->dmi_addr, ETH_ALEN);
-#endif
-#ifdef CONFIG_IGB_VMDQ_NETDEV
- for (vm = 1; vm < adapter->vmdq_pools; vm++) {
- if (!adapter->vmdq_netdev[vm])
- break;
- if (!netif_running(adapter->vmdq_netdev[vm]) ||
- !netdev_mc_count(adapter->vmdq_netdev[vm]))
- continue;
- netdev_for_each_mc_addr(ha, adapter->vmdq_netdev[vm])
-#ifdef NETDEV_HW_ADDR_T_MULTICAST
- memcpy(mta_list + (i++ * ETH_ALEN),
- ha->addr, ETH_ALEN);
-#else
- memcpy(mta_list + (i++ * ETH_ALEN),
- ha->dmi_addr, ETH_ALEN);
-#endif
- }
-#endif
- e1000_update_mc_addr_list(hw, mta_list, i);
- kfree(mta_list);
-
- return count;
-}
-
-void igb_rar_set(struct igb_adapter *adapter, u32 index)
-{
- u32 rar_low, rar_high;
- struct e1000_hw *hw = &adapter->hw;
- u8 *addr = adapter->mac_table[index].addr;
- /* HW expects these in little endian so we reverse the byte order
- * from network order (big endian) to little endian
- */
- rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
- ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
- rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
-
- /* Indicate to hardware the Address is Valid. */
- if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE)
- rar_high |= E1000_RAH_AV;
-
- if (hw->mac.type == e1000_82575)
- rar_high |= E1000_RAH_POOL_1 * adapter->mac_table[index].queue;
- else
- rar_high |= E1000_RAH_POOL_1 << adapter->mac_table[index].queue;
-
- E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
- E1000_WRITE_FLUSH(hw);
- E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
- E1000_WRITE_FLUSH(hw);
-}
-
-void igb_full_sync_mac_table(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- int i;
- for (i = 0; i < hw->mac.rar_entry_count; i++) {
- igb_rar_set(adapter, i);
- }
-}
-
-void igb_sync_mac_table(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- int i;
- for (i = 0; i < hw->mac.rar_entry_count; i++) {
- if (adapter->mac_table[i].state & IGB_MAC_STATE_MODIFIED)
- igb_rar_set(adapter, i);
- adapter->mac_table[i].state &= ~(IGB_MAC_STATE_MODIFIED);
- }
-}
-
-int igb_available_rars(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- int i, count = 0;
-
- for (i = 0; i < hw->mac.rar_entry_count; i++) {
- if (adapter->mac_table[i].state == 0)
- count++;
- }
- return count;
-}
-
-#ifdef HAVE_SET_RX_MODE
-/**
- * igb_write_uc_addr_list - write unicast addresses to RAR table
- * @netdev: network interface device structure
- *
- * Writes unicast address list to the RAR table.
- * Returns: -ENOMEM on failure/insufficient address space
- * 0 on no addresses written
- * X on writing X addresses to the RAR table
- **/
-static int igb_write_uc_addr_list(struct net_device *netdev)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- unsigned int vfn = adapter->vfs_allocated_count;
- int count = 0;
-
- /* return ENOMEM indicating insufficient memory for addresses */
- if (netdev_uc_count(netdev) > igb_available_rars(adapter))
- return -ENOMEM;
- if (!netdev_uc_empty(netdev)) {
-#ifdef NETDEV_HW_ADDR_T_UNICAST
- struct netdev_hw_addr *ha;
-#else
- struct dev_mc_list *ha;
-#endif
- netdev_for_each_uc_addr(ha, netdev) {
-#ifdef NETDEV_HW_ADDR_T_UNICAST
- igb_del_mac_filter(adapter, ha->addr, vfn);
- igb_add_mac_filter(adapter, ha->addr, vfn);
-#else
- igb_del_mac_filter(adapter, ha->da_addr, vfn);
- igb_add_mac_filter(adapter, ha->da_addr, vfn);
-#endif
- count++;
- }
- }
- return count;
-}
-
-#endif /* HAVE_SET_RX_MODE */
-/**
- * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
- * @netdev: network interface device structure
- *
- * The set_rx_mode entry point is called whenever the unicast or multicast
- * address lists or the network interface flags are updated. This routine is
- * responsible for configuring the hardware for proper unicast, multicast,
- * promiscuous mode, and all-multi behavior.
- **/
-static void igb_set_rx_mode(struct net_device *netdev)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- unsigned int vfn = adapter->vfs_allocated_count;
- u32 rctl, vmolr = 0;
- int count;
-
- /* Check for Promiscuous and All Multicast modes */
- rctl = E1000_READ_REG(hw, E1000_RCTL);
-
- /* clear the effected bits */
- rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
-
- if (netdev->flags & IFF_PROMISC) {
- rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
- vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
- /* retain VLAN HW filtering if in VT mode */
- if (adapter->vfs_allocated_count || adapter->vmdq_pools)
- rctl |= E1000_RCTL_VFE;
- } else {
- if (netdev->flags & IFF_ALLMULTI) {
- rctl |= E1000_RCTL_MPE;
- vmolr |= E1000_VMOLR_MPME;
- } else {
- /*
- * Write addresses to the MTA, if the attempt fails
- * then we should just turn on promiscuous mode so
- * that we can at least receive multicast traffic
- */
- count = igb_write_mc_addr_list(netdev);
- if (count < 0) {
- rctl |= E1000_RCTL_MPE;
- vmolr |= E1000_VMOLR_MPME;
- } else if (count) {
- vmolr |= E1000_VMOLR_ROMPE;
- }
- }
-#ifdef HAVE_SET_RX_MODE
- /*
- * Write addresses to available RAR registers, if there is not
- * sufficient space to store all the addresses then enable
- * unicast promiscuous mode
- */
- count = igb_write_uc_addr_list(netdev);
- if (count < 0) {
- rctl |= E1000_RCTL_UPE;
- vmolr |= E1000_VMOLR_ROPE;
- }
-#endif /* HAVE_SET_RX_MODE */
- rctl |= E1000_RCTL_VFE;
- }
- E1000_WRITE_REG(hw, E1000_RCTL, rctl);
-
- /*
- * In order to support SR-IOV and eventually VMDq it is necessary to set
- * the VMOLR to enable the appropriate modes. Without this workaround
- * we will have issues with VLAN tag stripping not being done for frames
- * that are only arriving because we are the default pool
- */
- if (hw->mac.type < e1000_82576)
- return;
-
- vmolr |= E1000_READ_REG(hw, E1000_VMOLR(vfn)) &
- ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
- E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr);
- igb_restore_vf_multicasts(adapter);
-}
-
-static void igb_check_wvbr(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 wvbr = 0;
-
- switch (hw->mac.type) {
- case e1000_82576:
- case e1000_i350:
- if (!(wvbr = E1000_READ_REG(hw, E1000_WVBR)))
- return;
- break;
- default:
- break;
- }
-
- adapter->wvbr |= wvbr;
-}
-
-#define IGB_STAGGERED_QUEUE_OFFSET 8
-
-static void igb_spoof_check(struct igb_adapter *adapter)
-{
- int j;
-
- if (!adapter->wvbr)
- return;
-
- switch (adapter->hw.mac.type) {
- case e1000_82576:
- for (j = 0; j < adapter->vfs_allocated_count; j++) {
- if (adapter->wvbr & (1 << j) ||
- adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
- DPRINTK(DRV, WARNING,
- "Spoof event(s) detected on VF %d\n", j);
- adapter->wvbr &=
- ~((1 << j) |
- (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
- }
- }
- break;
- case e1000_i350:
- for (j = 0; j < adapter->vfs_allocated_count; j++) {
- if (adapter->wvbr & (1 << j)) {
- DPRINTK(DRV, WARNING,
- "Spoof event(s) detected on VF %d\n", j);
- adapter->wvbr &= ~(1 << j);
- }
- }
- break;
- default:
- break;
- }
-}
-
-/* Need to wait a few seconds after link up to get diagnostic information from
- * the phy */
-static void igb_update_phy_info(unsigned long data)
-{
- struct igb_adapter *adapter = (struct igb_adapter *) data;
- e1000_get_phy_info(&adapter->hw);
-}
-
-/**
- * igb_has_link - check shared code for link and determine up/down
- * @adapter: pointer to driver private info
- **/
-bool igb_has_link(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- bool link_active = FALSE;
-
- /* get_link_status is set on LSC (link status) interrupt or
- * rx sequence error interrupt. get_link_status will stay
- * false until the e1000_check_for_link establishes link
- * for copper adapters ONLY
- */
- switch (hw->phy.media_type) {
- case e1000_media_type_copper:
- if (!hw->mac.get_link_status)
- return true;
- case e1000_media_type_internal_serdes:
- e1000_check_for_link(hw);
- link_active = !hw->mac.get_link_status;
- break;
- case e1000_media_type_unknown:
- default:
- break;
- }
-
- if (((hw->mac.type == e1000_i210) ||
- (hw->mac.type == e1000_i211)) &&
- (hw->phy.id == I210_I_PHY_ID)) {
- if (!netif_carrier_ok(adapter->netdev)) {
- adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
- } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
- adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
- adapter->link_check_timeout = jiffies;
- }
- }
-
- return link_active;
-}
-
-/**
- * igb_watchdog - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
- **/
-static void igb_watchdog(unsigned long data)
-{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- /* Do the rest outside of interrupt context */
- schedule_work(&adapter->watchdog_task);
-}
-
-static void igb_watchdog_task(struct work_struct *work)
-{
- struct igb_adapter *adapter = container_of(work,
- struct igb_adapter,
- watchdog_task);
- struct e1000_hw *hw = &adapter->hw;
- struct net_device *netdev = adapter->netdev;
- u32 link;
- int i;
- u32 thstat, ctrl_ext;
- u32 connsw;
-
- link = igb_has_link(adapter);
- /* Force link down if we have fiber to swap to */
- if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
- if (hw->phy.media_type == e1000_media_type_copper) {
- connsw = E1000_READ_REG(hw, E1000_CONNSW);
- if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
- link = 0;
- }
- }
-
- if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
- if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
- adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
- else
- link = FALSE;
- }
-
- if (link) {
- /* Perform a reset if the media type changed. */
- if (hw->dev_spec._82575.media_changed) {
- hw->dev_spec._82575.media_changed = false;
- adapter->flags |= IGB_FLAG_MEDIA_RESET;
- igb_reset(adapter);
- }
-
- /* Cancel scheduled suspend requests. */
- pm_runtime_resume(netdev->dev.parent);
-
- if (!netif_carrier_ok(netdev)) {
- u32 ctrl;
- e1000_get_speed_and_duplex(hw,
- &adapter->link_speed,
- &adapter->link_duplex);
-
- ctrl = E1000_READ_REG(hw, E1000_CTRL);
- /* Links status message must follow this format */
- printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
- "Flow Control: %s\n",
- netdev->name,
- adapter->link_speed,
- adapter->link_duplex == FULL_DUPLEX ?
- "Full Duplex" : "Half Duplex",
- ((ctrl & E1000_CTRL_TFCE) &&
- (ctrl & E1000_CTRL_RFCE)) ? "RX/TX":
- ((ctrl & E1000_CTRL_RFCE) ? "RX" :
- ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
- /* adjust timeout factor according to speed/duplex */
- adapter->tx_timeout_factor = 1;
- switch (adapter->link_speed) {
- case SPEED_10:
- adapter->tx_timeout_factor = 14;
- break;
- case SPEED_100:
- /* maybe add some timeout factor ? */
- break;
- default:
- break;
- }
-
- netif_carrier_on(netdev);
- netif_tx_wake_all_queues(netdev);
-
- igb_ping_all_vfs(adapter);
-#ifdef IFLA_VF_MAX
- igb_check_vf_rate_limit(adapter);
-#endif /* IFLA_VF_MAX */
-
- /* link state has changed, schedule phy info update */
- if (!test_bit(__IGB_DOWN, &adapter->state))
- mod_timer(&adapter->phy_info_timer,
- round_jiffies(jiffies + 2 * HZ));
- }
- } else {
- if (netif_carrier_ok(netdev)) {
- adapter->link_speed = 0;
- adapter->link_duplex = 0;
- /* check for thermal sensor event on i350 */
- if (hw->mac.type == e1000_i350) {
- thstat = E1000_READ_REG(hw, E1000_THSTAT);
- ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
- if ((hw->phy.media_type ==
- e1000_media_type_copper) &&
- !(ctrl_ext &
- E1000_CTRL_EXT_LINK_MODE_SGMII)) {
- if (thstat & E1000_THSTAT_PWR_DOWN) {
- printk(KERN_ERR "igb: %s The "
- "network adapter was stopped "
- "because it overheated.\n",
- netdev->name);
- }
- if (thstat & E1000_THSTAT_LINK_THROTTLE) {
- printk(KERN_INFO
- "igb: %s The network "
- "adapter supported "
- "link speed "
- "was downshifted "
- "because it "
- "overheated.\n",
- netdev->name);
- }
- }
- }
-
- /* Links status message must follow this format */
- printk(KERN_INFO "igb: %s NIC Link is Down\n",
- netdev->name);
- netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
-
- igb_ping_all_vfs(adapter);
-
- /* link state has changed, schedule phy info update */
- if (!test_bit(__IGB_DOWN, &adapter->state))
- mod_timer(&adapter->phy_info_timer,
- round_jiffies(jiffies + 2 * HZ));
- /* link is down, time to check for alternate media */
- if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
- igb_check_swap_media(adapter);
- if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
- schedule_work(&adapter->reset_task);
- /* return immediately */
- return;
- }
- }
- pm_schedule_suspend(netdev->dev.parent,
- MSEC_PER_SEC * 5);
-
- /* also check for alternate media here */
- } else if (!netif_carrier_ok(netdev) &&
- (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
- hw->mac.ops.power_up_serdes(hw);
- igb_check_swap_media(adapter);
- if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
- schedule_work(&adapter->reset_task);
- /* return immediately */
- return;
- }
- }
- }
-
- igb_update_stats(adapter);
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- struct igb_ring *tx_ring = adapter->tx_ring[i];
- if (!netif_carrier_ok(netdev)) {
- /* We've lost link, so the controller stops DMA,
- * but we've got queued Tx work that's never going
- * to get done, so reset controller to flush Tx.
- * (Do the reset outside of interrupt context). */
- if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
- adapter->tx_timeout_count++;
- schedule_work(&adapter->reset_task);
- /* return immediately since reset is imminent */
- return;
- }
- }
-
- /* Force detection of hung controller every watchdog period */
- set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
- }
-
- /* Cause software interrupt to ensure rx ring is cleaned */
- if (adapter->msix_entries) {
- u32 eics = 0;
- for (i = 0; i < adapter->num_q_vectors; i++)
- eics |= adapter->q_vector[i]->eims_value;
- E1000_WRITE_REG(hw, E1000_EICS, eics);
- } else {
- E1000_WRITE_REG(hw, E1000_ICS, E1000_ICS_RXDMT0);
- }
-
- igb_spoof_check(adapter);
-
- /* Reset the timer */
- if (!test_bit(__IGB_DOWN, &adapter->state)) {
- if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
- mod_timer(&adapter->watchdog_timer,
- round_jiffies(jiffies + HZ));
- else
- mod_timer(&adapter->watchdog_timer,
- round_jiffies(jiffies + 2 * HZ));
- }
-}
-
-static void igb_dma_err_task(struct work_struct *work)
-{
- struct igb_adapter *adapter = container_of(work,
- struct igb_adapter,
- dma_err_task);
- int vf;
- struct e1000_hw *hw = &adapter->hw;
- struct net_device *netdev = adapter->netdev;
- u32 hgptc;
- u32 ciaa, ciad;
-
- hgptc = E1000_READ_REG(hw, E1000_HGPTC);
- if (hgptc) /* If incrementing then no need for the check below */
- goto dma_timer_reset;
- /*
- * Check to see if a bad DMA write target from an errant or
- * malicious VF has caused a PCIe error. If so then we can
- * issue a VFLR to the offending VF(s) and then resume without
- * requesting a full slot reset.
- */
-
- for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
- ciaa = (vf << 16) | 0x80000000;
- /* 32 bit read so align, we really want status at offset 6 */
- ciaa |= PCI_COMMAND;
- E1000_WRITE_REG(hw, E1000_CIAA, ciaa);
- ciad = E1000_READ_REG(hw, E1000_CIAD);
- ciaa &= 0x7FFFFFFF;
- /* disable debug mode asap after reading data */
- E1000_WRITE_REG(hw, E1000_CIAA, ciaa);
- /* Get the upper 16 bits which will be the PCI status reg */
- ciad >>= 16;
- if (ciad & (PCI_STATUS_REC_MASTER_ABORT |
- PCI_STATUS_REC_TARGET_ABORT |
- PCI_STATUS_SIG_SYSTEM_ERROR)) {
- netdev_err(netdev, "VF %d suffered error\n", vf);
- /* Issue VFLR */
- ciaa = (vf << 16) | 0x80000000;
- ciaa |= 0xA8;
- E1000_WRITE_REG(hw, E1000_CIAA, ciaa);
- ciad = 0x00008000; /* VFLR */
- E1000_WRITE_REG(hw, E1000_CIAD, ciad);
- ciaa &= 0x7FFFFFFF;
- E1000_WRITE_REG(hw, E1000_CIAA, ciaa);
- }
- }
-dma_timer_reset:
- /* Reset the timer */
- if (!test_bit(__IGB_DOWN, &adapter->state))
- mod_timer(&adapter->dma_err_timer,
- round_jiffies(jiffies + HZ / 10));
-}
-
-/**
- * igb_dma_err_timer - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
- **/
-static void igb_dma_err_timer(unsigned long data)
-{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- /* Do the rest outside of interrupt context */
- schedule_work(&adapter->dma_err_task);
-}
-
-enum latency_range {
- lowest_latency = 0,
- low_latency = 1,
- bulk_latency = 2,
- latency_invalid = 255
-};
-
-/**
- * igb_update_ring_itr - update the dynamic ITR value based on packet size
- *
- * Stores a new ITR value based on strictly on packet size. This
- * algorithm is less sophisticated than that used in igb_update_itr,
- * due to the difficulty of synchronizing statistics across multiple
- * receive rings. The divisors and thresholds used by this function
- * were determined based on theoretical maximum wire speed and testing
- * data, in order to minimize response time while increasing bulk
- * throughput.
- * This functionality is controlled by the InterruptThrottleRate module
- * parameter (see igb_param.c)
- * NOTE: This function is called only when operating in a multiqueue
- * receive environment.
- * @q_vector: pointer to q_vector
- **/
-static void igb_update_ring_itr(struct igb_q_vector *q_vector)
-{
- int new_val = q_vector->itr_val;
- int avg_wire_size = 0;
- struct igb_adapter *adapter = q_vector->adapter;
- unsigned int packets;
-
- /* For non-gigabit speeds, just fix the interrupt rate at 4000
- * ints/sec - ITR timer value of 120 ticks.
- */
- switch (adapter->link_speed) {
- case SPEED_10:
- case SPEED_100:
- new_val = IGB_4K_ITR;
- goto set_itr_val;
- default:
- break;
- }
-
- packets = q_vector->rx.total_packets;
- if (packets)
- avg_wire_size = q_vector->rx.total_bytes / packets;
-
- packets = q_vector->tx.total_packets;
- if (packets)
- avg_wire_size = max_t(u32, avg_wire_size,
- q_vector->tx.total_bytes / packets);
-
- /* if avg_wire_size isn't set no work was done */
- if (!avg_wire_size)
- goto clear_counts;
-
- /* Add 24 bytes to size to account for CRC, preamble, and gap */
- avg_wire_size += 24;
-
- /* Don't starve jumbo frames */
- avg_wire_size = min(avg_wire_size, 3000);
-
- /* Give a little boost to mid-size frames */
- if ((avg_wire_size > 300) && (avg_wire_size < 1200))
- new_val = avg_wire_size / 3;
- else
- new_val = avg_wire_size / 2;
-
- /* conservative mode (itr 3) eliminates the lowest_latency setting */
- if (new_val < IGB_20K_ITR &&
- ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
- (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
- new_val = IGB_20K_ITR;
-
-set_itr_val:
- if (new_val != q_vector->itr_val) {
- q_vector->itr_val = new_val;
- q_vector->set_itr = 1;
- }
-clear_counts:
- q_vector->rx.total_bytes = 0;
- q_vector->rx.total_packets = 0;
- q_vector->tx.total_bytes = 0;
- q_vector->tx.total_packets = 0;
-}
-
-/**
- * igb_update_itr - update the dynamic ITR value based on statistics
- * Stores a new ITR value based on packets and byte
- * counts during the last interrupt. The advantage of per interrupt
- * computation is faster updates and more accurate ITR for the current
- * traffic pattern. Constants in this function were computed
- * based on theoretical maximum wire speed and thresholds were set based
- * on testing data as well as attempting to minimize response time
- * while increasing bulk throughput.
- * this functionality is controlled by the InterruptThrottleRate module
- * parameter (see igb_param.c)
- * NOTE: These calculations are only valid when operating in a single-
- * queue environment.
- * @q_vector: pointer to q_vector
- * @ring_container: ring info to update the itr for
- **/
-static void igb_update_itr(struct igb_q_vector *q_vector,
- struct igb_ring_container *ring_container)
-{
- unsigned int packets = ring_container->total_packets;
- unsigned int bytes = ring_container->total_bytes;
- u8 itrval = ring_container->itr;
-
- /* no packets, exit with status unchanged */
- if (packets == 0)
- return;
-
- switch (itrval) {
- case lowest_latency:
- /* handle TSO and jumbo frames */
- if (bytes/packets > 8000)
- itrval = bulk_latency;
- else if ((packets < 5) && (bytes > 512))
- itrval = low_latency;
- break;
- case low_latency: /* 50 usec aka 20000 ints/s */
- if (bytes > 10000) {
- /* this if handles the TSO accounting */
- if (bytes/packets > 8000) {
- itrval = bulk_latency;
- } else if ((packets < 10) || ((bytes/packets) > 1200)) {
- itrval = bulk_latency;
- } else if ((packets > 35)) {
- itrval = lowest_latency;
- }
- } else if (bytes/packets > 2000) {
- itrval = bulk_latency;
- } else if (packets <= 2 && bytes < 512) {
- itrval = lowest_latency;
- }
- break;
- case bulk_latency: /* 250 usec aka 4000 ints/s */
- if (bytes > 25000) {
- if (packets > 35)
- itrval = low_latency;
- } else if (bytes < 1500) {
- itrval = low_latency;
- }
- break;
- }
-
- /* clear work counters since we have the values we need */
- ring_container->total_bytes = 0;
- ring_container->total_packets = 0;
-
- /* write updated itr to ring container */
- ring_container->itr = itrval;
-}
-
-static void igb_set_itr(struct igb_q_vector *q_vector)
-{
- struct igb_adapter *adapter = q_vector->adapter;
- u32 new_itr = q_vector->itr_val;
- u8 current_itr = 0;
-
- /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
- switch (adapter->link_speed) {
- case SPEED_10:
- case SPEED_100:
- current_itr = 0;
- new_itr = IGB_4K_ITR;
- goto set_itr_now;
- default:
- break;
- }
-
- igb_update_itr(q_vector, &q_vector->tx);
- igb_update_itr(q_vector, &q_vector->rx);
-
- current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
-
- /* conservative mode (itr 3) eliminates the lowest_latency setting */
- if (current_itr == lowest_latency &&
- ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
- (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
- current_itr = low_latency;
-
- switch (current_itr) {
- /* counts and packets in update_itr are dependent on these numbers */
- case lowest_latency:
- new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
- break;
- case low_latency:
- new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
- break;
- case bulk_latency:
- new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
- break;
- default:
- break;
- }
-
-set_itr_now:
- if (new_itr != q_vector->itr_val) {
- /* this attempts to bias the interrupt rate towards Bulk
- * by adding intermediate steps when interrupt rate is
- * increasing */
- new_itr = new_itr > q_vector->itr_val ?
- max((new_itr * q_vector->itr_val) /
- (new_itr + (q_vector->itr_val >> 2)),
- new_itr) :
- new_itr;
- /* Don't write the value here; it resets the adapter's
- * internal timer, and causes us to delay far longer than
- * we should between interrupts. Instead, we write the ITR
- * value at the beginning of the next interrupt so the timing
- * ends up being correct.
- */
- q_vector->itr_val = new_itr;
- q_vector->set_itr = 1;
- }
-}
-
-void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
- u32 type_tucmd, u32 mss_l4len_idx)
-{
- struct e1000_adv_tx_context_desc *context_desc;
- u16 i = tx_ring->next_to_use;
-
- context_desc = IGB_TX_CTXTDESC(tx_ring, i);
-
- i++;
- tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
-
- /* set bits to identify this as an advanced context descriptor */
- type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
-
- /* For 82575, context index must be unique per ring. */
- if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
- mss_l4len_idx |= tx_ring->reg_idx << 4;
-
- context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
- context_desc->seqnum_seed = 0;
- context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
- context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
-}
-
-static int igb_tso(struct igb_ring *tx_ring,
- struct igb_tx_buffer *first,
- u8 *hdr_len)
-{
-#ifdef NETIF_F_TSO
- struct sk_buff *skb = first->skb;
- u32 vlan_macip_lens, type_tucmd;
- u32 mss_l4len_idx, l4len;
-
- if (skb->ip_summed != CHECKSUM_PARTIAL)
- return 0;
-
- if (!skb_is_gso(skb))
-#endif /* NETIF_F_TSO */
- return 0;
-#ifdef NETIF_F_TSO
-
- if (skb_header_cloned(skb)) {
- int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (err)
- return err;
- }
-
- /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
- type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
-
- if (first->protocol == __constant_htons(ETH_P_IP)) {
- struct iphdr *iph = ip_hdr(skb);
- iph->tot_len = 0;
- iph->check = 0;
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
- type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
- first->tx_flags |= IGB_TX_FLAGS_TSO |
- IGB_TX_FLAGS_CSUM |
- IGB_TX_FLAGS_IPV4;
-#ifdef NETIF_F_TSO6
- } else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
- first->tx_flags |= IGB_TX_FLAGS_TSO |
- IGB_TX_FLAGS_CSUM;
-#endif
- }
-
- /* compute header lengths */
- l4len = tcp_hdrlen(skb);
- *hdr_len = skb_transport_offset(skb) + l4len;
-
- /* update gso size and bytecount with header size */
- first->gso_segs = skb_shinfo(skb)->gso_segs;
- first->bytecount += (first->gso_segs - 1) * *hdr_len;
-
- /* MSS L4LEN IDX */
- mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
- mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
-
- /* VLAN MACLEN IPLEN */
- vlan_macip_lens = skb_network_header_len(skb);
- vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
- vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
-
- igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
-
- return 1;
-#endif /* NETIF_F_TSO */
-}
-
-static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
-{
- struct sk_buff *skb = first->skb;
- u32 vlan_macip_lens = 0;
- u32 mss_l4len_idx = 0;
- u32 type_tucmd = 0;
-
- if (skb->ip_summed != CHECKSUM_PARTIAL) {
- if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
- return;
- } else {
- u8 nexthdr = 0;
- switch (first->protocol) {
- case __constant_htons(ETH_P_IP):
- vlan_macip_lens |= skb_network_header_len(skb);
- type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
- nexthdr = ip_hdr(skb)->protocol;
- break;
-#ifdef NETIF_F_IPV6_CSUM
- case __constant_htons(ETH_P_IPV6):
- vlan_macip_lens |= skb_network_header_len(skb);
- nexthdr = ipv6_hdr(skb)->nexthdr;
- break;
-#endif
- default:
- if (unlikely(net_ratelimit())) {
- dev_warn(tx_ring->dev,
- "partial checksum but proto=%x!\n",
- first->protocol);
- }
- break;
- }
-
- switch (nexthdr) {
- case IPPROTO_TCP:
- type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
- mss_l4len_idx = tcp_hdrlen(skb) <<
- E1000_ADVTXD_L4LEN_SHIFT;
- break;
-#ifdef HAVE_SCTP
- case IPPROTO_SCTP:
- type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
- mss_l4len_idx = sizeof(struct sctphdr) <<
- E1000_ADVTXD_L4LEN_SHIFT;
- break;
-#endif
- case IPPROTO_UDP:
- mss_l4len_idx = sizeof(struct udphdr) <<
- E1000_ADVTXD_L4LEN_SHIFT;
- break;
- default:
- if (unlikely(net_ratelimit())) {
- dev_warn(tx_ring->dev,
- "partial checksum but l4 proto=%x!\n",
- nexthdr);
- }
- break;
- }
-
- /* update TX checksum flag */
- first->tx_flags |= IGB_TX_FLAGS_CSUM;
- }
-
- vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
- vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
-
- igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
-}
-
-#define IGB_SET_FLAG(_input, _flag, _result) \
- ((_flag <= _result) ? \
- ((u32)(_input & _flag) * (_result / _flag)) : \
- ((u32)(_input & _flag) / (_flag / _result)))
-
-static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
-{
- /* set type for advanced descriptor with frame checksum insertion */
- u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
- E1000_ADVTXD_DCMD_DEXT |
- E1000_ADVTXD_DCMD_IFCS;
-
- /* set HW vlan bit if vlan is present */
- cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
- (E1000_ADVTXD_DCMD_VLE));
-
- /* set segmentation bits for TSO */
- cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
- (E1000_ADVTXD_DCMD_TSE));
-
- /* set timestamp bit if present */
- cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
- (E1000_ADVTXD_MAC_TSTAMP));
-
- return cmd_type;
-}
-
-static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
- union e1000_adv_tx_desc *tx_desc,
- u32 tx_flags, unsigned int paylen)
-{
- u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
-
- /* 82575 requires a unique index per ring */
- if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
- olinfo_status |= tx_ring->reg_idx << 4;
-
- /* insert L4 checksum */
- olinfo_status |= IGB_SET_FLAG(tx_flags,
- IGB_TX_FLAGS_CSUM,
- (E1000_TXD_POPTS_TXSM << 8));
-
- /* insert IPv4 checksum */
- olinfo_status |= IGB_SET_FLAG(tx_flags,
- IGB_TX_FLAGS_IPV4,
- (E1000_TXD_POPTS_IXSM << 8));
-
- tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
-}
-
-static void igb_tx_map(struct igb_ring *tx_ring,
- struct igb_tx_buffer *first,
- const u8 hdr_len)
-{
- struct sk_buff *skb = first->skb;
- struct igb_tx_buffer *tx_buffer;
- union e1000_adv_tx_desc *tx_desc;
- struct skb_frag_struct *frag;
- dma_addr_t dma;
- unsigned int data_len, size;
- u32 tx_flags = first->tx_flags;
- u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
- u16 i = tx_ring->next_to_use;
-
- tx_desc = IGB_TX_DESC(tx_ring, i);
-
- igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
-
- size = skb_headlen(skb);
- data_len = skb->data_len;
-
- dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
-
- tx_buffer = first;
-
- for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
- if (dma_mapping_error(tx_ring->dev, dma))
- goto dma_error;
-
- /* record length, and DMA address */
- dma_unmap_len_set(tx_buffer, len, size);
- dma_unmap_addr_set(tx_buffer, dma, dma);
-
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
-
- while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
- tx_desc->read.cmd_type_len =
- cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
-
- i++;
- tx_desc++;
- if (i == tx_ring->count) {
- tx_desc = IGB_TX_DESC(tx_ring, 0);
- i = 0;
- }
- tx_desc->read.olinfo_status = 0;
-
- dma += IGB_MAX_DATA_PER_TXD;
- size -= IGB_MAX_DATA_PER_TXD;
-
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
- }
-
- if (likely(!data_len))
- break;
-
- tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
-
- i++;
- tx_desc++;
- if (i == tx_ring->count) {
- tx_desc = IGB_TX_DESC(tx_ring, 0);
- i = 0;
- }
- tx_desc->read.olinfo_status = 0;
-
- size = skb_frag_size(frag);
- data_len -= size;
-
- dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
- size, DMA_TO_DEVICE);
-
- tx_buffer = &tx_ring->tx_buffer_info[i];
- }
-
- /* write last descriptor with RS and EOP bits */
- cmd_type |= size | IGB_TXD_DCMD;
- tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
-
- netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
- /* set the timestamp */
- first->time_stamp = jiffies;
-
- /*
- * Force memory writes to complete before letting h/w know there
- * are new descriptors to fetch. (Only applicable for weak-ordered
- * memory model archs, such as IA-64).
- *
- * We also need this memory barrier to make certain all of the
- * status bits have been updated before next_to_watch is written.
- */
- wmb();
-
- /* set next_to_watch value indicating a packet is present */
- first->next_to_watch = tx_desc;
-
- i++;
- if (i == tx_ring->count)
- i = 0;
-
- tx_ring->next_to_use = i;
-
- writel(i, tx_ring->tail);
-
- /* we need this if more than one processor can write to our tail
- * at a time, it syncronizes IO on IA64/Altix systems */
- mmiowb();
-
- return;
-
-dma_error:
- dev_err(tx_ring->dev, "TX DMA map failed\n");
-
- /* clear dma mappings for failed tx_buffer_info map */
- for (;;) {
- tx_buffer = &tx_ring->tx_buffer_info[i];
- igb_unmap_and_free_tx_resource(tx_ring, tx_buffer);
- if (tx_buffer == first)
- break;
- if (i == 0)
- i = tx_ring->count;
- i--;
- }
-
- tx_ring->next_to_use = i;
-}
-
-static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
-{
- struct net_device *netdev = netdev_ring(tx_ring);
-
- if (netif_is_multiqueue(netdev))
- netif_stop_subqueue(netdev, ring_queue_index(tx_ring));
- else
- netif_stop_queue(netdev);
-
- /* Herbert's original patch had:
- * smp_mb__after_netif_stop_queue();
- * but since that doesn't exist yet, just open code it. */
- smp_mb();
-
- /* We need to check again in a case another CPU has just
- * made room available. */
- if (igb_desc_unused(tx_ring) < size)
- return -EBUSY;
-
- /* A reprieve! */
- if (netif_is_multiqueue(netdev))
- netif_wake_subqueue(netdev, ring_queue_index(tx_ring));
- else
- netif_wake_queue(netdev);
-
- tx_ring->tx_stats.restart_queue++;
-
- return 0;
-}
-
-static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
-{
- if (igb_desc_unused(tx_ring) >= size)
- return 0;
- return __igb_maybe_stop_tx(tx_ring, size);
-}
-
-netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
- struct igb_ring *tx_ring)
-{
- struct igb_tx_buffer *first;
- int tso;
- u32 tx_flags = 0;
-#if PAGE_SIZE > IGB_MAX_DATA_PER_TXD
- unsigned short f;
-#endif
- u16 count = TXD_USE_COUNT(skb_headlen(skb));
- __be16 protocol = vlan_get_protocol(skb);
- u8 hdr_len = 0;
-
- /*
- * need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
- * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,
- * + 2 desc gap to keep tail from touching head,
- * + 1 desc for context descriptor,
- * otherwise try next time
- */
-#if PAGE_SIZE > IGB_MAX_DATA_PER_TXD
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
-#else
- count += skb_shinfo(skb)->nr_frags;
-#endif
- if (igb_maybe_stop_tx(tx_ring, count + 3)) {
- /* this is a hard error */
- return NETDEV_TX_BUSY;
- }
-
- /* record the location of the first descriptor for this packet */
- first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
- first->skb = skb;
- first->bytecount = skb->len;
- first->gso_segs = 1;
-
- skb_tx_timestamp(skb);
-
-#ifdef HAVE_PTP_1588_CLOCK
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
- struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
- if (!adapter->ptp_tx_skb) {
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- tx_flags |= IGB_TX_FLAGS_TSTAMP;
-
- adapter->ptp_tx_skb = skb_get(skb);
- adapter->ptp_tx_start = jiffies;
- if (adapter->hw.mac.type == e1000_82576)
- schedule_work(&adapter->ptp_tx_work);
- }
- }
-#endif /* HAVE_PTP_1588_CLOCK */
-
- if (vlan_tx_tag_present(skb)) {
- tx_flags |= IGB_TX_FLAGS_VLAN;
- tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
- }
-
- /* record initial flags and protocol */
- first->tx_flags = tx_flags;
- first->protocol = protocol;
-
- tso = igb_tso(tx_ring, first, &hdr_len);
- if (tso < 0)
- goto out_drop;
- else if (!tso)
- igb_tx_csum(tx_ring, first);
-
- igb_tx_map(tx_ring, first, hdr_len);
-
-#ifndef HAVE_TRANS_START_IN_QUEUE
- netdev_ring(tx_ring)->trans_start = jiffies;
-
-#endif
- /* Make sure there is space in the ring for the next send. */
- igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
-
- return NETDEV_TX_OK;
-
-out_drop:
- igb_unmap_and_free_tx_resource(tx_ring, first);
-
- return NETDEV_TX_OK;
-}
-
-#ifdef HAVE_TX_MQ
-static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
- struct sk_buff *skb)
-{
- unsigned int r_idx = skb->queue_mapping;
-
- if (r_idx >= adapter->num_tx_queues)
- r_idx = r_idx % adapter->num_tx_queues;
-
- return adapter->tx_ring[r_idx];
-}
-#else
-#define igb_tx_queue_mapping(_adapter, _skb) (_adapter)->tx_ring[0]
-#endif
-
-static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
- struct net_device *netdev)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
-
- if (test_bit(__IGB_DOWN, &adapter->state)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- if (skb->len <= 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- /*
- * The minimum packet size with TCTL.PSP set is 17 so pad the skb
- * in order to meet this minimum size requirement.
- */
- if (skb->len < 17) {
- if (skb_padto(skb, 17))
- return NETDEV_TX_OK;
- skb->len = 17;
- }
-
- return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
-}
-
-/**
- * igb_tx_timeout - Respond to a Tx Hang
- * @netdev: network interface device structure
- **/
-static void igb_tx_timeout(struct net_device *netdev)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
-
- /* Do the reset outside of interrupt context */
- adapter->tx_timeout_count++;
-
- if (hw->mac.type >= e1000_82580)
- hw->dev_spec._82575.global_device_reset = true;
-
- schedule_work(&adapter->reset_task);
- E1000_WRITE_REG(hw, E1000_EICS,
- (adapter->eims_enable_mask & ~adapter->eims_other));
-}
-
-static void igb_reset_task(struct work_struct *work)
-{
- struct igb_adapter *adapter;
- adapter = container_of(work, struct igb_adapter, reset_task);
-
- igb_reinit_locked(adapter);
-}
-
-/**
- * igb_get_stats - Get System Network Statistics
- * @netdev: network interface device structure
- *
- * Returns the address of the device statistics structure.
- * The statistics are updated here and also from the timer callback.
- **/
-static struct net_device_stats *igb_get_stats(struct net_device *netdev)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
-
- if (!test_bit(__IGB_RESETTING, &adapter->state))
- igb_update_stats(adapter);
-
-#ifdef HAVE_NETDEV_STATS_IN_NETDEV
- /* only return the current stats */
- return &netdev->stats;
-#else
- /* only return the current stats */
- return &adapter->net_stats;
-#endif /* HAVE_NETDEV_STATS_IN_NETDEV */
-}
-
-/**
- * igb_change_mtu - Change the Maximum Transfer Unit
- * @netdev: network interface device structure
- * @new_mtu: new value for maximum frame size
- *
- * Returns 0 on success, negative on failure
- **/
-static int igb_change_mtu(struct net_device *netdev, int new_mtu)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- struct pci_dev *pdev = adapter->pdev;
- int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
-
- if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
- dev_err(pci_dev_to_dev(pdev), "Invalid MTU setting\n");
- return -EINVAL;
- }
-
-#define MAX_STD_JUMBO_FRAME_SIZE 9238
- if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
- dev_err(pci_dev_to_dev(pdev), "MTU > 9216 not supported.\n");
- return -EINVAL;
- }
-
- /* adjust max frame to be at least the size of a standard frame */
- if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
- max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
-
- while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- usleep_range(1000, 2000);
-
- /* igb_down has a dependency on max_frame_size */
- adapter->max_frame_size = max_frame;
-
- if (netif_running(netdev))
- igb_down(adapter);
-
- dev_info(pci_dev_to_dev(pdev), "changing MTU from %d to %d\n",
- netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
- hw->dev_spec._82575.mtu = new_mtu;
-
- if (netif_running(netdev))
- igb_up(adapter);
- else
- igb_reset(adapter);
-
- clear_bit(__IGB_RESETTING, &adapter->state);
-
- return 0;
-}
-
-/**
- * igb_update_stats - Update the board statistics counters
- * @adapter: board private structure
- **/
-
-void igb_update_stats(struct igb_adapter *adapter)
-{
-#ifdef HAVE_NETDEV_STATS_IN_NETDEV
- struct net_device_stats *net_stats = &adapter->netdev->stats;
-#else
- struct net_device_stats *net_stats = &adapter->net_stats;
-#endif /* HAVE_NETDEV_STATS_IN_NETDEV */
- struct e1000_hw *hw = &adapter->hw;
-#ifdef HAVE_PCI_ERS
- struct pci_dev *pdev = adapter->pdev;
-#endif
- u32 reg, mpc;
- u16 phy_tmp;
- int i;
- u64 bytes, packets;
-#ifndef IGB_NO_LRO
- u32 flushed = 0, coal = 0;
- struct igb_q_vector *q_vector;
-#endif
-
-#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
-
- /*
- * Prevent stats update while adapter is being reset, or if the pci
- * connection is down.
- */
- if (adapter->link_speed == 0)
- return;
-#ifdef HAVE_PCI_ERS
- if (pci_channel_offline(pdev))
- return;
-
-#endif
-#ifndef IGB_NO_LRO
- for (i = 0; i < adapter->num_q_vectors; i++) {
- q_vector = adapter->q_vector[i];
- if (!q_vector)
- continue;
- flushed += q_vector->lrolist.stats.flushed;
- coal += q_vector->lrolist.stats.coal;
- }
- adapter->lro_stats.flushed = flushed;
- adapter->lro_stats.coal = coal;
-
-#endif
- bytes = 0;
- packets = 0;
- for (i = 0; i < adapter->num_rx_queues; i++) {
- u32 rqdpc_tmp = E1000_READ_REG(hw, E1000_RQDPC(i)) & 0x0FFF;
- struct igb_ring *ring = adapter->rx_ring[i];
- ring->rx_stats.drops += rqdpc_tmp;
- net_stats->rx_fifo_errors += rqdpc_tmp;
-#ifdef CONFIG_IGB_VMDQ_NETDEV
- if (!ring->vmdq_netdev) {
- bytes += ring->rx_stats.bytes;
- packets += ring->rx_stats.packets;
- }
-#else
- bytes += ring->rx_stats.bytes;
- packets += ring->rx_stats.packets;
-#endif
- }
-
- net_stats->rx_bytes = bytes;
- net_stats->rx_packets = packets;
-
- bytes = 0;
- packets = 0;
- for (i = 0; i < adapter->num_tx_queues; i++) {
- struct igb_ring *ring = adapter->tx_ring[i];
-#ifdef CONFIG_IGB_VMDQ_NETDEV
- if (!ring->vmdq_netdev) {
- bytes += ring->tx_stats.bytes;
- packets += ring->tx_stats.packets;
- }
-#else
- bytes += ring->tx_stats.bytes;
- packets += ring->tx_stats.packets;
-#endif
- }
- net_stats->tx_bytes = bytes;
- net_stats->tx_packets = packets;
-
- /* read stats registers */
- adapter->stats.crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
- adapter->stats.gprc += E1000_READ_REG(hw, E1000_GPRC);
- adapter->stats.gorc += E1000_READ_REG(hw, E1000_GORCL);
- E1000_READ_REG(hw, E1000_GORCH); /* clear GORCL */
- adapter->stats.bprc += E1000_READ_REG(hw, E1000_BPRC);
- adapter->stats.mprc += E1000_READ_REG(hw, E1000_MPRC);
- adapter->stats.roc += E1000_READ_REG(hw, E1000_ROC);
-
- adapter->stats.prc64 += E1000_READ_REG(hw, E1000_PRC64);
- adapter->stats.prc127 += E1000_READ_REG(hw, E1000_PRC127);
- adapter->stats.prc255 += E1000_READ_REG(hw, E1000_PRC255);
- adapter->stats.prc511 += E1000_READ_REG(hw, E1000_PRC511);
- adapter->stats.prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
- adapter->stats.prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
- adapter->stats.symerrs += E1000_READ_REG(hw, E1000_SYMERRS);
- adapter->stats.sec += E1000_READ_REG(hw, E1000_SEC);
-
- mpc = E1000_READ_REG(hw, E1000_MPC);
- adapter->stats.mpc += mpc;
- net_stats->rx_fifo_errors += mpc;
- adapter->stats.scc += E1000_READ_REG(hw, E1000_SCC);
- adapter->stats.ecol += E1000_READ_REG(hw, E1000_ECOL);
- adapter->stats.mcc += E1000_READ_REG(hw, E1000_MCC);
- adapter->stats.latecol += E1000_READ_REG(hw, E1000_LATECOL);
- adapter->stats.dc += E1000_READ_REG(hw, E1000_DC);
- adapter->stats.rlec += E1000_READ_REG(hw, E1000_RLEC);
- adapter->stats.xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
- adapter->stats.xontxc += E1000_READ_REG(hw, E1000_XONTXC);
- adapter->stats.xoffrxc += E1000_READ_REG(hw, E1000_XOFFRXC);
- adapter->stats.xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
- adapter->stats.fcruc += E1000_READ_REG(hw, E1000_FCRUC);
- adapter->stats.gptc += E1000_READ_REG(hw, E1000_GPTC);
- adapter->stats.gotc += E1000_READ_REG(hw, E1000_GOTCL);
- E1000_READ_REG(hw, E1000_GOTCH); /* clear GOTCL */
- adapter->stats.rnbc += E1000_READ_REG(hw, E1000_RNBC);
- adapter->stats.ruc += E1000_READ_REG(hw, E1000_RUC);
- adapter->stats.rfc += E1000_READ_REG(hw, E1000_RFC);
- adapter->stats.rjc += E1000_READ_REG(hw, E1000_RJC);
- adapter->stats.tor += E1000_READ_REG(hw, E1000_TORH);
- adapter->stats.tot += E1000_READ_REG(hw, E1000_TOTH);
- adapter->stats.tpr += E1000_READ_REG(hw, E1000_TPR);
-
- adapter->stats.ptc64 += E1000_READ_REG(hw, E1000_PTC64);
- adapter->stats.ptc127 += E1000_READ_REG(hw, E1000_PTC127);
- adapter->stats.ptc255 += E1000_READ_REG(hw, E1000_PTC255);
- adapter->stats.ptc511 += E1000_READ_REG(hw, E1000_PTC511);
- adapter->stats.ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
- adapter->stats.ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
-
- adapter->stats.mptc += E1000_READ_REG(hw, E1000_MPTC);
- adapter->stats.bptc += E1000_READ_REG(hw, E1000_BPTC);
-
- adapter->stats.tpt += E1000_READ_REG(hw, E1000_TPT);
- adapter->stats.colc += E1000_READ_REG(hw, E1000_COLC);
-
- adapter->stats.algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
- /* read internal phy sepecific stats */
- reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
- if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
- adapter->stats.rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
-
- /* this stat has invalid values on i210/i211 */
- if ((hw->mac.type != e1000_i210) &&
- (hw->mac.type != e1000_i211))
- adapter->stats.tncrs += E1000_READ_REG(hw, E1000_TNCRS);
- }
- adapter->stats.tsctc += E1000_READ_REG(hw, E1000_TSCTC);
- adapter->stats.tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
-
- adapter->stats.iac += E1000_READ_REG(hw, E1000_IAC);
- adapter->stats.icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
- adapter->stats.icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
- adapter->stats.icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
- adapter->stats.ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
- adapter->stats.ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
- adapter->stats.ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
- adapter->stats.ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
- adapter->stats.icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
-
- /* Fill out the OS statistics structure */
- net_stats->multicast = adapter->stats.mprc;
- net_stats->collisions = adapter->stats.colc;
-
- /* Rx Errors */
-
- /* RLEC on some newer hardware can be incorrect so build
- * our own version based on RUC and ROC */
- net_stats->rx_errors = adapter->stats.rxerrc +
- adapter->stats.crcerrs + adapter->stats.algnerrc +
- adapter->stats.ruc + adapter->stats.roc +
- adapter->stats.cexterr;
- net_stats->rx_length_errors = adapter->stats.ruc +
- adapter->stats.roc;
- net_stats->rx_crc_errors = adapter->stats.crcerrs;
- net_stats->rx_frame_errors = adapter->stats.algnerrc;
- net_stats->rx_missed_errors = adapter->stats.mpc;
-
- /* Tx Errors */
- net_stats->tx_errors = adapter->stats.ecol +
- adapter->stats.latecol;
- net_stats->tx_aborted_errors = adapter->stats.ecol;
- net_stats->tx_window_errors = adapter->stats.latecol;
- net_stats->tx_carrier_errors = adapter->stats.tncrs;
-
- /* Tx Dropped needs to be maintained elsewhere */
-
- /* Phy Stats */
- if (hw->phy.media_type == e1000_media_type_copper) {
- if ((adapter->link_speed == SPEED_1000) &&
- (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
- phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
- adapter->phy_stats.idle_errors += phy_tmp;
- }
- }
-
- /* Management Stats */
- adapter->stats.mgptc += E1000_READ_REG(hw, E1000_MGTPTC);
- adapter->stats.mgprc += E1000_READ_REG(hw, E1000_MGTPRC);
- if (hw->mac.type > e1000_82580) {
- adapter->stats.o2bgptc += E1000_READ_REG(hw, E1000_O2BGPTC);
- adapter->stats.o2bspc += E1000_READ_REG(hw, E1000_O2BSPC);
- adapter->stats.b2ospc += E1000_READ_REG(hw, E1000_B2OSPC);
- adapter->stats.b2ogprc += E1000_READ_REG(hw, E1000_B2OGPRC);
- }
-}
-
-static irqreturn_t igb_msix_other(int irq, void *data)
-{
- struct igb_adapter *adapter = data;
- struct e1000_hw *hw = &adapter->hw;
- u32 icr = E1000_READ_REG(hw, E1000_ICR);
- /* reading ICR causes bit 31 of EICR to be cleared */
-
- if (icr & E1000_ICR_DRSTA)
- schedule_work(&adapter->reset_task);
-
- if (icr & E1000_ICR_DOUTSYNC) {
- /* HW is reporting DMA is out of sync */
- adapter->stats.doosync++;
- /* The DMA Out of Sync is also indication of a spoof event
- * in IOV mode. Check the Wrong VM Behavior register to
- * see if it is really a spoof event. */
- igb_check_wvbr(adapter);
- }
-
- /* Check for a mailbox event */
- if (icr & E1000_ICR_VMMB)
- igb_msg_task(adapter);
-
- if (icr & E1000_ICR_LSC) {
- hw->mac.get_link_status = 1;
- /* guard against interrupt when we're going down */
- if (!test_bit(__IGB_DOWN, &adapter->state))
- mod_timer(&adapter->watchdog_timer, jiffies + 1);
- }
-
-#ifdef HAVE_PTP_1588_CLOCK
- if (icr & E1000_ICR_TS) {
- u32 tsicr = E1000_READ_REG(hw, E1000_TSICR);
-
- if (tsicr & E1000_TSICR_TXTS) {
- /* acknowledge the interrupt */
- E1000_WRITE_REG(hw, E1000_TSICR, E1000_TSICR_TXTS);
- /* retrieve hardware timestamp */
- schedule_work(&adapter->ptp_tx_work);
- }
- }
-#endif /* HAVE_PTP_1588_CLOCK */
-
- /* Check for MDD event */
- if (icr & E1000_ICR_MDDET)
- igb_process_mdd_event(adapter);
-
- E1000_WRITE_REG(hw, E1000_EIMS, adapter->eims_other);
-
- return IRQ_HANDLED;
-}
-
-static void igb_write_itr(struct igb_q_vector *q_vector)
-{
- struct igb_adapter *adapter = q_vector->adapter;
- u32 itr_val = q_vector->itr_val & 0x7FFC;
-
- if (!q_vector->set_itr)
- return;
-
- if (!itr_val)
- itr_val = 0x4;
-
- if (adapter->hw.mac.type == e1000_82575)
- itr_val |= itr_val << 16;
- else
- itr_val |= E1000_EITR_CNT_IGNR;
-
- writel(itr_val, q_vector->itr_register);
- q_vector->set_itr = 0;
-}
-
-static irqreturn_t igb_msix_ring(int irq, void *data)
-{
- struct igb_q_vector *q_vector = data;
-
- /* Write the ITR value calculated from the previous interrupt. */
- igb_write_itr(q_vector);
-
- napi_schedule(&q_vector->napi);
-
- return IRQ_HANDLED;
-}
-
-#ifdef IGB_DCA
-static void igb_update_tx_dca(struct igb_adapter *adapter,
- struct igb_ring *tx_ring,
- int cpu)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
-
- if (hw->mac.type != e1000_82575)
- txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT_82576;
-
- /*
- * We can enable relaxed ordering for reads, but not writes when
- * DCA is enabled. This is due to a known issue in some chipsets
- * which will cause the DCA tag to be cleared.
- */
- txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
- E1000_DCA_TXCTRL_DATA_RRO_EN |
- E1000_DCA_TXCTRL_DESC_DCA_EN;
-
- E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
-}
-
-static void igb_update_rx_dca(struct igb_adapter *adapter,
- struct igb_ring *rx_ring,
- int cpu)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
-
- if (hw->mac.type != e1000_82575)
- rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT_82576;
-
- /*
- * We can enable relaxed ordering for reads, but not writes when
- * DCA is enabled. This is due to a known issue in some chipsets
- * which will cause the DCA tag to be cleared.
- */
- rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
- E1000_DCA_RXCTRL_DESC_DCA_EN;
-
- E1000_WRITE_REG(hw, E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
-}
-
-static void igb_update_dca(struct igb_q_vector *q_vector)
-{
- struct igb_adapter *adapter = q_vector->adapter;
- int cpu = get_cpu();
-
- if (q_vector->cpu == cpu)
- goto out_no_update;
-
- if (q_vector->tx.ring)
- igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
-
- if (q_vector->rx.ring)
- igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
-
- q_vector->cpu = cpu;
-out_no_update:
- put_cpu();
-}
-
-static void igb_setup_dca(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- int i;
-
- if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
- return;
-
- /* Always use CB2 mode, difference is masked in the CB driver. */
- E1000_WRITE_REG(hw, E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
-
- for (i = 0; i < adapter->num_q_vectors; i++) {
- adapter->q_vector[i]->cpu = -1;
- igb_update_dca(adapter->q_vector[i]);
- }
-}
-
-static int __igb_notify_dca(struct device *dev, void *data)
-{
- struct net_device *netdev = dev_get_drvdata(dev);
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct pci_dev *pdev = adapter->pdev;
- struct e1000_hw *hw = &adapter->hw;
- unsigned long event = *(unsigned long *)data;
-
- switch (event) {
- case DCA_PROVIDER_ADD:
- /* if already enabled, don't do it again */
- if (adapter->flags & IGB_FLAG_DCA_ENABLED)
- break;
- if (dca_add_requester(dev) == E1000_SUCCESS) {
- adapter->flags |= IGB_FLAG_DCA_ENABLED;
- dev_info(pci_dev_to_dev(pdev), "DCA enabled\n");
- igb_setup_dca(adapter);
- break;
- }
- /* Fall Through since DCA is disabled. */
- case DCA_PROVIDER_REMOVE:
- if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
- /* without this a class_device is left
- * hanging around in the sysfs model */
- dca_remove_requester(dev);
- dev_info(pci_dev_to_dev(pdev), "DCA disabled\n");
- adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
- E1000_WRITE_REG(hw, E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_DISABLE);
- }
- break;
- }
-
- return E1000_SUCCESS;
-}
-
-static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
- void *p)
-{
- int ret_val;
-
- ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
- __igb_notify_dca);
-
- return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
-}
-#endif /* IGB_DCA */
-
-static int igb_vf_configure(struct igb_adapter *adapter, int vf)
-{
- unsigned char mac_addr[ETH_ALEN];
-
- random_ether_addr(mac_addr);
- igb_set_vf_mac(adapter, vf, mac_addr);
-
-#ifdef IFLA_VF_MAX
-#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
- /* By default spoof check is enabled for all VFs */
- adapter->vf_data[vf].spoofchk_enabled = true;
-#endif
-#endif
-
- return true;
-}
-
-static void igb_ping_all_vfs(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 ping;
- int i;
-
- for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
- ping = E1000_PF_CONTROL_MSG;
- if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
- ping |= E1000_VT_MSGTYPE_CTS;
- e1000_write_mbx(hw, &ping, 1, i);
- }
-}
-
-/**
- * igb_mta_set_ - Set multicast filter table address
- * @adapter: pointer to the adapter structure
- * @hash_value: determines the MTA register and bit to set
- *
- * The multicast table address is a register array of 32-bit registers.
- * The hash_value is used to determine what register the bit is in, the
- * current value is read, the new bit is OR'd in and the new value is
- * written back into the register.
- **/
-void igb_mta_set(struct igb_adapter *adapter, u32 hash_value)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 hash_bit, hash_reg, mta;
-
- /*
- * The MTA is a register array of 32-bit registers. It is
- * treated like an array of (32*mta_reg_count) bits. We want to
- * set bit BitArray[hash_value]. So we figure out what register
- * the bit is in, read it, OR in the new bit, then write
- * back the new value. The (hw->mac.mta_reg_count - 1) serves as a
- * mask to bits 31:5 of the hash value which gives us the
- * register we're modifying. The hash bit within that register
- * is determined by the lower 5 bits of the hash value.
- */
- hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
- hash_bit = hash_value & 0x1F;
-
- mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg);
-
- mta |= (1 << hash_bit);
-
- E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta);
- E1000_WRITE_FLUSH(hw);
-}
-
-static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
-{
-
- struct e1000_hw *hw = &adapter->hw;
- u32 vmolr = E1000_READ_REG(hw, E1000_VMOLR(vf));
- struct vf_data_storage *vf_data = &adapter->vf_data[vf];
-
- vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
- IGB_VF_FLAG_MULTI_PROMISC);
- vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
-
-#ifdef IGB_ENABLE_VF_PROMISC
- if (*msgbuf & E1000_VF_SET_PROMISC_UNICAST) {
- vmolr |= E1000_VMOLR_ROPE;
- vf_data->flags |= IGB_VF_FLAG_UNI_PROMISC;
- *msgbuf &= ~E1000_VF_SET_PROMISC_UNICAST;
- }
-#endif
- if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
- vmolr |= E1000_VMOLR_MPME;
- vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
- *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
- } else {
- /*
- * if we have hashes and we are clearing a multicast promisc
- * flag we need to write the hashes to the MTA as this step
- * was previously skipped
- */
- if (vf_data->num_vf_mc_hashes > 30) {
- vmolr |= E1000_VMOLR_MPME;
- } else if (vf_data->num_vf_mc_hashes) {
- int j;
- vmolr |= E1000_VMOLR_ROMPE;
- for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
- igb_mta_set(adapter, vf_data->vf_mc_hashes[j]);
- }
- }
-
- E1000_WRITE_REG(hw, E1000_VMOLR(vf), vmolr);
-
- /* there are flags left unprocessed, likely not supported */
- if (*msgbuf & E1000_VT_MSGINFO_MASK)
- return -EINVAL;
-
- return 0;
-
-}
-
-static int igb_set_vf_multicasts(struct igb_adapter *adapter,
- u32 *msgbuf, u32 vf)
-{
- int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
- u16 *hash_list = (u16 *)&msgbuf[1];
- struct vf_data_storage *vf_data = &adapter->vf_data[vf];
- int i;
-
- /* salt away the number of multicast addresses assigned
- * to this VF for later use to restore when the PF multi cast
- * list changes
- */
- vf_data->num_vf_mc_hashes = n;
-
- /* only up to 30 hash values supported */
- if (n > 30)
- n = 30;
-
- /* store the hashes for later use */
- for (i = 0; i < n; i++)
- vf_data->vf_mc_hashes[i] = hash_list[i];
-
- /* Flush and reset the mta with the new values */
- igb_set_rx_mode(adapter->netdev);
-
- return 0;
-}
-
-static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- struct vf_data_storage *vf_data;
- int i, j;
-
- for (i = 0; i < adapter->vfs_allocated_count; i++) {
- u32 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
- vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
-
- vf_data = &adapter->vf_data[i];
-
- if ((vf_data->num_vf_mc_hashes > 30) ||
- (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
- vmolr |= E1000_VMOLR_MPME;
- } else if (vf_data->num_vf_mc_hashes) {
- vmolr |= E1000_VMOLR_ROMPE;
- for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
- igb_mta_set(adapter, vf_data->vf_mc_hashes[j]);
- }
- E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
- }
-}
-
-static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 pool_mask, reg, vid;
- u16 vlan_default;
- int i;
-
- pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
-
- /* Find the vlan filter for this id */
- for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
- reg = E1000_READ_REG(hw, E1000_VLVF(i));
-
- /* remove the vf from the pool */
- reg &= ~pool_mask;
-
- /* if pool is empty then remove entry from vfta */
- if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
- (reg & E1000_VLVF_VLANID_ENABLE)) {
- reg = 0;
- vid = reg & E1000_VLVF_VLANID_MASK;
- igb_vfta_set(adapter, vid, FALSE);
- }
-
- E1000_WRITE_REG(hw, E1000_VLVF(i), reg);
- }
-
- adapter->vf_data[vf].vlans_enabled = 0;
-
- vlan_default = adapter->vf_data[vf].default_vf_vlan_id;
- if (vlan_default)
- igb_vlvf_set(adapter, vlan_default, true, vf);
-}
-
-s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 reg, i;
-
- /* The vlvf table only exists on 82576 hardware and newer */
- if (hw->mac.type < e1000_82576)
- return -1;
-
- /* we only need to do this if VMDq is enabled */
- if (!adapter->vmdq_pools)
- return -1;
-
- /* Find the vlan filter for this id */
- for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
- reg = E1000_READ_REG(hw, E1000_VLVF(i));
- if ((reg & E1000_VLVF_VLANID_ENABLE) &&
- vid == (reg & E1000_VLVF_VLANID_MASK))
- break;
- }
-
- if (add) {
- if (i == E1000_VLVF_ARRAY_SIZE) {
- /* Did not find a matching VLAN ID entry that was
- * enabled. Search for a free filter entry, i.e.
- * one without the enable bit set
- */
- for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
- reg = E1000_READ_REG(hw, E1000_VLVF(i));
- if (!(reg & E1000_VLVF_VLANID_ENABLE))
- break;
- }
- }
- if (i < E1000_VLVF_ARRAY_SIZE) {
- /* Found an enabled/available entry */
- reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
-
- /* if !enabled we need to set this up in vfta */
- if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
- /* add VID to filter table */
- igb_vfta_set(adapter, vid, TRUE);
- reg |= E1000_VLVF_VLANID_ENABLE;
- }
- reg &= ~E1000_VLVF_VLANID_MASK;
- reg |= vid;
- E1000_WRITE_REG(hw, E1000_VLVF(i), reg);
-
- /* do not modify RLPML for PF devices */
- if (vf >= adapter->vfs_allocated_count)
- return E1000_SUCCESS;
-
- if (!adapter->vf_data[vf].vlans_enabled) {
- u32 size;
- reg = E1000_READ_REG(hw, E1000_VMOLR(vf));
- size = reg & E1000_VMOLR_RLPML_MASK;
- size += 4;
- reg &= ~E1000_VMOLR_RLPML_MASK;
- reg |= size;
- E1000_WRITE_REG(hw, E1000_VMOLR(vf), reg);
- }
-
- adapter->vf_data[vf].vlans_enabled++;
- }
- } else {
- if (i < E1000_VLVF_ARRAY_SIZE) {
- /* remove vf from the pool */
- reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
- /* if pool is empty then remove entry from vfta */
- if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
- reg = 0;
- igb_vfta_set(adapter, vid, FALSE);
- }
- E1000_WRITE_REG(hw, E1000_VLVF(i), reg);
-
- /* do not modify RLPML for PF devices */
- if (vf >= adapter->vfs_allocated_count)
- return E1000_SUCCESS;
-
- adapter->vf_data[vf].vlans_enabled--;
- if (!adapter->vf_data[vf].vlans_enabled) {
- u32 size;
- reg = E1000_READ_REG(hw, E1000_VMOLR(vf));
- size = reg & E1000_VMOLR_RLPML_MASK;
- size -= 4;
- reg &= ~E1000_VMOLR_RLPML_MASK;
- reg |= size;
- E1000_WRITE_REG(hw, E1000_VMOLR(vf), reg);
- }
- }
- }
- return E1000_SUCCESS;
-}
-
-#ifdef IFLA_VF_MAX
-static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
-{
- struct e1000_hw *hw = &adapter->hw;
-
- if (vid)
- E1000_WRITE_REG(hw, E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
- else
- E1000_WRITE_REG(hw, E1000_VMVIR(vf), 0);
-}
-
-static int igb_ndo_set_vf_vlan(struct net_device *netdev,
- int vf, u16 vlan, u8 qos)
-{
- int err = 0;
- struct igb_adapter *adapter = netdev_priv(netdev);
-
- /* VLAN IDs accepted range 0-4094 */
- if ((vf >= adapter->vfs_allocated_count) || (vlan > VLAN_VID_MASK-1) || (qos > 7))
- return -EINVAL;
- if (vlan || qos) {
- err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
- if (err)
- goto out;
- igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
- igb_set_vmolr(adapter, vf, !vlan);
- adapter->vf_data[vf].pf_vlan = vlan;
- adapter->vf_data[vf].pf_qos = qos;
- igb_set_vf_vlan_strip(adapter, vf, true);
- dev_info(&adapter->pdev->dev,
- "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
- if (test_bit(__IGB_DOWN, &adapter->state)) {
- dev_warn(&adapter->pdev->dev,
- "The VF VLAN has been set,"
- " but the PF device is not up.\n");
- dev_warn(&adapter->pdev->dev,
- "Bring the PF device up before"
- " attempting to use the VF device.\n");
- }
- } else {
- if (adapter->vf_data[vf].pf_vlan)
- dev_info(&adapter->pdev->dev,
- "Clearing VLAN on VF %d\n", vf);
- igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
- false, vf);
- igb_set_vmvir(adapter, vlan, vf);
- igb_set_vmolr(adapter, vf, true);
- igb_set_vf_vlan_strip(adapter, vf, false);
- adapter->vf_data[vf].pf_vlan = 0;
- adapter->vf_data[vf].pf_qos = 0;
- }
-out:
- return err;
-}
-
-#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
-static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
- bool setting)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 dtxswc, reg_offset;
-
- if (!adapter->vfs_allocated_count)
- return -EOPNOTSUPP;
-
- if (vf >= adapter->vfs_allocated_count)
- return -EINVAL;
-
- reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
- dtxswc = E1000_READ_REG(hw, reg_offset);
- if (setting)
- dtxswc |= ((1 << vf) |
- (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
- else
- dtxswc &= ~((1 << vf) |
- (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
- E1000_WRITE_REG(hw, reg_offset, dtxswc);
-
- adapter->vf_data[vf].spoofchk_enabled = setting;
- return E1000_SUCCESS;
-}
-#endif /* HAVE_VF_SPOOFCHK_CONFIGURE */
-#endif /* IFLA_VF_MAX */
-
-static int igb_find_vlvf_entry(struct igb_adapter *adapter, int vid)
-{
- struct e1000_hw *hw = &adapter->hw;
- int i;
- u32 reg;
-
- /* Find the vlan filter for this id */
- for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
- reg = E1000_READ_REG(hw, E1000_VLVF(i));
- if ((reg & E1000_VLVF_VLANID_ENABLE) &&
- vid == (reg & E1000_VLVF_VLANID_MASK))
- break;
- }
-
- if (i >= E1000_VLVF_ARRAY_SIZE)
- i = -1;
-
- return i;
-}
-
-static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
-{
- struct e1000_hw *hw = &adapter->hw;
- int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
- int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
- int err = 0;
-
- if (vid)
- igb_set_vf_vlan_strip(adapter, vf, true);
- else
- igb_set_vf_vlan_strip(adapter, vf, false);
-
- /* If in promiscuous mode we need to make sure the PF also has
- * the VLAN filter set.
- */
- if (add && (adapter->netdev->flags & IFF_PROMISC))
- err = igb_vlvf_set(adapter, vid, add,
- adapter->vfs_allocated_count);
- if (err)
- goto out;
-
- err = igb_vlvf_set(adapter, vid, add, vf);
-
- if (err)
- goto out;
-
- /* Go through all the checks to see if the VLAN filter should
- * be wiped completely.
- */
- if (!add && (adapter->netdev->flags & IFF_PROMISC)) {
- u32 vlvf, bits;
-
- int regndx = igb_find_vlvf_entry(adapter, vid);
- if (regndx < 0)
- goto out;
- /* See if any other pools are set for this VLAN filter
- * entry other than the PF.
- */
- vlvf = bits = E1000_READ_REG(hw, E1000_VLVF(regndx));
- bits &= 1 << (E1000_VLVF_POOLSEL_SHIFT +
- adapter->vfs_allocated_count);
- /* If the filter was removed then ensure PF pool bit
- * is cleared if the PF only added itself to the pool
- * because the PF is in promiscuous mode.
- */
- if ((vlvf & VLAN_VID_MASK) == vid &&
-#ifndef HAVE_VLAN_RX_REGISTER
- !test_bit(vid, adapter->active_vlans) &&
-#endif
- !bits)
- igb_vlvf_set(adapter, vid, add,
- adapter->vfs_allocated_count);
- }
-
-out:
- return err;
-}
-
-static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
-{
- struct e1000_hw *hw = &adapter->hw;
-
- /* clear flags except flag that the PF has set the MAC */
- adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
- adapter->vf_data[vf].last_nack = jiffies;
-
- /* reset offloads to defaults */
- igb_set_vmolr(adapter, vf, true);
-
- /* reset vlans for device */
- igb_clear_vf_vfta(adapter, vf);
-#ifdef IFLA_VF_MAX
- if (adapter->vf_data[vf].pf_vlan)
- igb_ndo_set_vf_vlan(adapter->netdev, vf,
- adapter->vf_data[vf].pf_vlan,
- adapter->vf_data[vf].pf_qos);
- else
- igb_clear_vf_vfta(adapter, vf);
-#endif
-
- /* reset multicast table array for vf */
- adapter->vf_data[vf].num_vf_mc_hashes = 0;
-
- /* Flush and reset the mta with the new values */
- igb_set_rx_mode(adapter->netdev);
-
- /*
- * Reset the VFs TDWBAL and TDWBAH registers which are not
- * cleared by a VFLR
- */
- E1000_WRITE_REG(hw, E1000_TDWBAH(vf), 0);
- E1000_WRITE_REG(hw, E1000_TDWBAL(vf), 0);
- if (hw->mac.type == e1000_82576) {
- E1000_WRITE_REG(hw, E1000_TDWBAH(IGB_MAX_VF_FUNCTIONS + vf), 0);
- E1000_WRITE_REG(hw, E1000_TDWBAL(IGB_MAX_VF_FUNCTIONS + vf), 0);
- }
-}
-
-static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
-{
- unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
-
- /* generate a new mac address as we were hotplug removed/added */
- if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
- random_ether_addr(vf_mac);
-
- /* process remaining reset events */
- igb_vf_reset(adapter, vf);
-}
-
-static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
-{
- struct e1000_hw *hw = &adapter->hw;
- unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
- u32 reg, msgbuf[3];
- u8 *addr = (u8 *)(&msgbuf[1]);
-
- /* process all the same items cleared in a function level reset */
- igb_vf_reset(adapter, vf);
-
- /* set vf mac address */
- igb_del_mac_filter(adapter, vf_mac, vf);
- igb_add_mac_filter(adapter, vf_mac, vf);
-
- /* enable transmit and receive for vf */
- reg = E1000_READ_REG(hw, E1000_VFTE);
- E1000_WRITE_REG(hw, E1000_VFTE, reg | (1 << vf));
- reg = E1000_READ_REG(hw, E1000_VFRE);
- E1000_WRITE_REG(hw, E1000_VFRE, reg | (1 << vf));
-
- adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
-
- /* reply to reset with ack and vf mac address */
- msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
- memcpy(addr, vf_mac, 6);
- e1000_write_mbx(hw, msgbuf, 3, vf);
-}
-
-static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
-{
- /*
- * The VF MAC Address is stored in a packed array of bytes
- * starting at the second 32 bit word of the msg array
- */
- unsigned char *addr = (unsigned char *)&msg[1];
- int err = -1;
-
- if (is_valid_ether_addr(addr))
- err = igb_set_vf_mac(adapter, vf, addr);
-
- return err;
-}
-
-static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
-{
- struct e1000_hw *hw = &adapter->hw;
- struct vf_data_storage *vf_data = &adapter->vf_data[vf];
- u32 msg = E1000_VT_MSGTYPE_NACK;
-
- /* if device isn't clear to send it shouldn't be reading either */
- if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
- time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
- e1000_write_mbx(hw, &msg, 1, vf);
- vf_data->last_nack = jiffies;
- }
-}
-
-static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
-{
- struct pci_dev *pdev = adapter->pdev;
- u32 msgbuf[E1000_VFMAILBOX_SIZE];
- struct e1000_hw *hw = &adapter->hw;
- struct vf_data_storage *vf_data = &adapter->vf_data[vf];
- s32 retval;
-
- retval = e1000_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
-
- if (retval) {
- dev_err(pci_dev_to_dev(pdev), "Error receiving message from VF\n");
- return;
- }
-
- /* this is a message we already processed, do nothing */
- if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
- return;
-
- /*
- * until the vf completes a reset it should not be
- * allowed to start any configuration.
- */
-
- if (msgbuf[0] == E1000_VF_RESET) {
- igb_vf_reset_msg(adapter, vf);
- return;
- }
-
- if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
- msgbuf[0] = E1000_VT_MSGTYPE_NACK;
- if (time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
- e1000_write_mbx(hw, msgbuf, 1, vf);
- vf_data->last_nack = jiffies;
- }
- return;
- }
-
- switch ((msgbuf[0] & 0xFFFF)) {
- case E1000_VF_SET_MAC_ADDR:
- retval = -EINVAL;
-#ifndef IGB_DISABLE_VF_MAC_SET
- if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
- retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
- else
- DPRINTK(DRV, INFO,
- "VF %d attempted to override administratively "
- "set MAC address\nReload the VF driver to "
- "resume operations\n", vf);
-#endif
- break;
- case E1000_VF_SET_PROMISC:
- retval = igb_set_vf_promisc(adapter, msgbuf, vf);
- break;
- case E1000_VF_SET_MULTICAST:
- retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
- break;
- case E1000_VF_SET_LPE:
- retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
- break;
- case E1000_VF_SET_VLAN:
- retval = -1;
-#ifdef IFLA_VF_MAX
- if (vf_data->pf_vlan)
- DPRINTK(DRV, INFO,
- "VF %d attempted to override administratively "
- "set VLAN tag\nReload the VF driver to "
- "resume operations\n", vf);
- else
-#endif
- retval = igb_set_vf_vlan(adapter, msgbuf, vf);
- break;
- default:
- dev_err(pci_dev_to_dev(pdev), "Unhandled Msg %08x\n", msgbuf[0]);
- retval = -E1000_ERR_MBX;
- break;
- }
-
- /* notify the VF of the results of what it sent us */
- if (retval)
- msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
- else
- msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
-
- msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
-
- e1000_write_mbx(hw, msgbuf, 1, vf);
-}
-
-static void igb_msg_task(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 vf;
-
- for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
- /* process any reset requests */
- if (!e1000_check_for_rst(hw, vf))
- igb_vf_reset_event(adapter, vf);
-
- /* process any messages pending */
- if (!e1000_check_for_msg(hw, vf))
- igb_rcv_msg_from_vf(adapter, vf);
-
- /* process any acks */
- if (!e1000_check_for_ack(hw, vf))
- igb_rcv_ack_from_vf(adapter, vf);
- }
-}
-
-/**
- * igb_set_uta - Set unicast filter table address
- * @adapter: board private structure
- *
- * The unicast table address is a register array of 32-bit registers.
- * The table is meant to be used in a way similar to how the MTA is used
- * however due to certain limitations in the hardware it is necessary to
- * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
- * enable bit to allow vlan tag stripping when promiscuous mode is enabled
- **/
-static void igb_set_uta(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- int i;
-
- /* The UTA table only exists on 82576 hardware and newer */
- if (hw->mac.type < e1000_82576)
- return;
-
- /* we only need to do this if VMDq is enabled */
- if (!adapter->vmdq_pools)
- return;
-
- for (i = 0; i < hw->mac.uta_reg_count; i++)
- E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, ~0);
-}
-
-/**
- * igb_intr_msi - Interrupt Handler
- * @irq: interrupt number
- * @data: pointer to a network interface device structure
- **/
-static irqreturn_t igb_intr_msi(int irq, void *data)
-{
- struct igb_adapter *adapter = data;
- struct igb_q_vector *q_vector = adapter->q_vector[0];
- struct e1000_hw *hw = &adapter->hw;
- /* read ICR disables interrupts using IAM */
- u32 icr = E1000_READ_REG(hw, E1000_ICR);
-
- igb_write_itr(q_vector);
-
- if (icr & E1000_ICR_DRSTA)
- schedule_work(&adapter->reset_task);
-
- if (icr & E1000_ICR_DOUTSYNC) {
- /* HW is reporting DMA is out of sync */
- adapter->stats.doosync++;
- }
-
- if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
- hw->mac.get_link_status = 1;
- if (!test_bit(__IGB_DOWN, &adapter->state))
- mod_timer(&adapter->watchdog_timer, jiffies + 1);
- }
-
-#ifdef HAVE_PTP_1588_CLOCK
- if (icr & E1000_ICR_TS) {
- u32 tsicr = E1000_READ_REG(hw, E1000_TSICR);
-
- if (tsicr & E1000_TSICR_TXTS) {
- /* acknowledge the interrupt */
- E1000_WRITE_REG(hw, E1000_TSICR, E1000_TSICR_TXTS);
- /* retrieve hardware timestamp */
- schedule_work(&adapter->ptp_tx_work);
- }
- }
-#endif /* HAVE_PTP_1588_CLOCK */
-
- napi_schedule(&q_vector->napi);
-
- return IRQ_HANDLED;
-}
-
-/**
- * igb_intr - Legacy Interrupt Handler
- * @irq: interrupt number
- * @data: pointer to a network interface device structure
- **/
-static irqreturn_t igb_intr(int irq, void *data)
-{
- struct igb_adapter *adapter = data;
- struct igb_q_vector *q_vector = adapter->q_vector[0];
- struct e1000_hw *hw = &adapter->hw;
- /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
- * need for the IMC write */
- u32 icr = E1000_READ_REG(hw, E1000_ICR);
-
- /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
- * not set, then the adapter didn't send an interrupt */
- if (!(icr & E1000_ICR_INT_ASSERTED))
- return IRQ_NONE;
-
- igb_write_itr(q_vector);
-
- if (icr & E1000_ICR_DRSTA)
- schedule_work(&adapter->reset_task);
-
- if (icr & E1000_ICR_DOUTSYNC) {
- /* HW is reporting DMA is out of sync */
- adapter->stats.doosync++;
- }
-
- if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
- hw->mac.get_link_status = 1;
- /* guard against interrupt when we're going down */
- if (!test_bit(__IGB_DOWN, &adapter->state))
- mod_timer(&adapter->watchdog_timer, jiffies + 1);
- }
-
-#ifdef HAVE_PTP_1588_CLOCK
- if (icr & E1000_ICR_TS) {
- u32 tsicr = E1000_READ_REG(hw, E1000_TSICR);
-
- if (tsicr & E1000_TSICR_TXTS) {
- /* acknowledge the interrupt */
- E1000_WRITE_REG(hw, E1000_TSICR, E1000_TSICR_TXTS);
- /* retrieve hardware timestamp */
- schedule_work(&adapter->ptp_tx_work);
- }
- }
-#endif /* HAVE_PTP_1588_CLOCK */
-
- napi_schedule(&q_vector->napi);
-
- return IRQ_HANDLED;
-}
-
-void igb_ring_irq_enable(struct igb_q_vector *q_vector)
-{
- struct igb_adapter *adapter = q_vector->adapter;
- struct e1000_hw *hw = &adapter->hw;
-
- if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
- (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
- if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
- igb_set_itr(q_vector);
- else
- igb_update_ring_itr(q_vector);
- }
-
- if (!test_bit(__IGB_DOWN, &adapter->state)) {
- if (adapter->msix_entries)
- E1000_WRITE_REG(hw, E1000_EIMS, q_vector->eims_value);
- else
- igb_irq_enable(adapter);
- }
-}
-
-/**
- * igb_poll - NAPI Rx polling callback
- * @napi: napi polling structure
- * @budget: count of how many packets we should handle
- **/
-static int igb_poll(struct napi_struct *napi, int budget)
-{
- struct igb_q_vector *q_vector = container_of(napi, struct igb_q_vector, napi);
- bool clean_complete = true;
-
-#ifdef IGB_DCA
- if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
- igb_update_dca(q_vector);
-#endif
- if (q_vector->tx.ring)
- clean_complete = igb_clean_tx_irq(q_vector);
-
- if (q_vector->rx.ring)
- clean_complete &= igb_clean_rx_irq(q_vector, budget);
-
-#ifndef HAVE_NETDEV_NAPI_LIST
- /* if netdev is disabled we need to stop polling */
- if (!netif_running(q_vector->adapter->netdev))
- clean_complete = true;
-
-#endif
- /* If all work not completed, return budget and keep polling */
- if (!clean_complete)
- return budget;
-
- /* If not enough Rx work done, exit the polling mode */
- napi_complete(napi);
- igb_ring_irq_enable(q_vector);
-
- return 0;
-}
-
-/**
- * igb_clean_tx_irq - Reclaim resources after transmit completes
- * @q_vector: pointer to q_vector containing needed info
- * returns TRUE if ring is completely cleaned
- **/
-static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
-{
- struct igb_adapter *adapter = q_vector->adapter;
- struct igb_ring *tx_ring = q_vector->tx.ring;
- struct igb_tx_buffer *tx_buffer;
- union e1000_adv_tx_desc *tx_desc;
- unsigned int total_bytes = 0, total_packets = 0;
- unsigned int budget = q_vector->tx.work_limit;
- unsigned int i = tx_ring->next_to_clean;
-
- if (test_bit(__IGB_DOWN, &adapter->state))
- return true;
-
- tx_buffer = &tx_ring->tx_buffer_info[i];
- tx_desc = IGB_TX_DESC(tx_ring, i);
- i -= tx_ring->count;
-
- do {
- union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
-
- /* if next_to_watch is not set then there is no work pending */
- if (!eop_desc)
- break;
-
- /* prevent any other reads prior to eop_desc */
- read_barrier_depends();
-
- /* if DD is not set pending work has not been completed */
- if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
- break;
-
- /* clear next_to_watch to prevent false hangs */
- tx_buffer->next_to_watch = NULL;
-
- /* update the statistics for this packet */
- total_bytes += tx_buffer->bytecount;
- total_packets += tx_buffer->gso_segs;
-
- /* free the skb */
- dev_kfree_skb_any(tx_buffer->skb);
-
- /* unmap skb header data */
- dma_unmap_single(tx_ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
-
- /* clear tx_buffer data */
- tx_buffer->skb = NULL;
- dma_unmap_len_set(tx_buffer, len, 0);
-
- /* clear last DMA location and unmap remaining buffers */
- while (tx_desc != eop_desc) {
- tx_buffer++;
- tx_desc++;
- i++;
- if (unlikely(!i)) {
- i -= tx_ring->count;
- tx_buffer = tx_ring->tx_buffer_info;
- tx_desc = IGB_TX_DESC(tx_ring, 0);
- }
-
- /* unmap any remaining paged data */
- if (dma_unmap_len(tx_buffer, len)) {
- dma_unmap_page(tx_ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buffer, len, 0);
- }
- }
-
- /* move us one more past the eop_desc for start of next pkt */
- tx_buffer++;
- tx_desc++;
- i++;
- if (unlikely(!i)) {
- i -= tx_ring->count;
- tx_buffer = tx_ring->tx_buffer_info;
- tx_desc = IGB_TX_DESC(tx_ring, 0);
- }
-
- /* issue prefetch for next Tx descriptor */
- prefetch(tx_desc);
-
- /* update budget accounting */
- budget--;
- } while (likely(budget));
-
- netdev_tx_completed_queue(txring_txq(tx_ring),
- total_packets, total_bytes);
-
- i += tx_ring->count;
- tx_ring->next_to_clean = i;
- tx_ring->tx_stats.bytes += total_bytes;
- tx_ring->tx_stats.packets += total_packets;
- q_vector->tx.total_bytes += total_bytes;
- q_vector->tx.total_packets += total_packets;
-
-#ifdef DEBUG
- if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags) &&
- !(adapter->disable_hw_reset && adapter->tx_hang_detected)) {
-#else
- if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
-#endif
- struct e1000_hw *hw = &adapter->hw;
-
- /* Detect a transmit hang in hardware, this serializes the
- * check with the clearing of time_stamp and movement of i */
- clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
- if (tx_buffer->next_to_watch &&
- time_after(jiffies, tx_buffer->time_stamp +
- (adapter->tx_timeout_factor * HZ))
- && !(E1000_READ_REG(hw, E1000_STATUS) &
- E1000_STATUS_TXOFF)) {
-
- /* detected Tx unit hang */
-#ifdef DEBUG
- adapter->tx_hang_detected = TRUE;
- if (adapter->disable_hw_reset) {
- DPRINTK(DRV, WARNING,
- "Deactivating netdev watchdog timer\n");
- if (del_timer(&netdev_ring(tx_ring)->watchdog_timer))
- dev_put(netdev_ring(tx_ring));
-#ifndef HAVE_NET_DEVICE_OPS
- netdev_ring(tx_ring)->tx_timeout = NULL;
-#endif
- }
-#endif /* DEBUG */
- dev_err(tx_ring->dev,
- "Detected Tx Unit Hang\n"
- " Tx Queue <%d>\n"
- " TDH <%x>\n"
- " TDT <%x>\n"
- " next_to_use <%x>\n"
- " next_to_clean <%x>\n"
- "buffer_info[next_to_clean]\n"
- " time_stamp <%lx>\n"
- " next_to_watch <%p>\n"
- " jiffies <%lx>\n"
- " desc.status <%x>\n",
- tx_ring->queue_index,
- E1000_READ_REG(hw, E1000_TDH(tx_ring->reg_idx)),
- readl(tx_ring->tail),
- tx_ring->next_to_use,
- tx_ring->next_to_clean,
- tx_buffer->time_stamp,
- tx_buffer->next_to_watch,
- jiffies,
- tx_buffer->next_to_watch->wb.status);
- if (netif_is_multiqueue(netdev_ring(tx_ring)))
- netif_stop_subqueue(netdev_ring(tx_ring),
- ring_queue_index(tx_ring));
- else
- netif_stop_queue(netdev_ring(tx_ring));
-
- /* we are about to reset, no point in enabling stuff */
- return true;
- }
- }
-
-#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
- if (unlikely(total_packets &&
- netif_carrier_ok(netdev_ring(tx_ring)) &&
- igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
- /* Make sure that anybody stopping the queue after this
- * sees the new next_to_clean.
- */
- smp_mb();
- if (netif_is_multiqueue(netdev_ring(tx_ring))) {
- if (__netif_subqueue_stopped(netdev_ring(tx_ring),
- ring_queue_index(tx_ring)) &&
- !(test_bit(__IGB_DOWN, &adapter->state))) {
- netif_wake_subqueue(netdev_ring(tx_ring),
- ring_queue_index(tx_ring));
- tx_ring->tx_stats.restart_queue++;
- }
- } else {
- if (netif_queue_stopped(netdev_ring(tx_ring)) &&
- !(test_bit(__IGB_DOWN, &adapter->state))) {
- netif_wake_queue(netdev_ring(tx_ring));
- tx_ring->tx_stats.restart_queue++;
- }
- }
- }
-
- return !!budget;
-}
-
-#ifdef HAVE_VLAN_RX_REGISTER
-/**
- * igb_receive_skb - helper function to handle rx indications
- * @q_vector: structure containing interrupt and ring information
- * @skb: packet to send up
- **/
-static void igb_receive_skb(struct igb_q_vector *q_vector,
- struct sk_buff *skb)
-{
- struct vlan_group **vlgrp = netdev_priv(skb->dev);
-
- if (IGB_CB(skb)->vid) {
- if (*vlgrp) {
- vlan_gro_receive(&q_vector->napi, *vlgrp,
- IGB_CB(skb)->vid, skb);
- } else {
- dev_kfree_skb_any(skb);
- }
- } else {
- napi_gro_receive(&q_vector->napi, skb);
- }
-}
-
-#endif /* HAVE_VLAN_RX_REGISTER */
-#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
-/**
- * igb_reuse_rx_page - page flip buffer and store it back on the ring
- * @rx_ring: rx descriptor ring to store buffers on
- * @old_buff: donor buffer to have page reused
- *
- * Synchronizes page for reuse by the adapter
- **/
-static void igb_reuse_rx_page(struct igb_ring *rx_ring,
- struct igb_rx_buffer *old_buff)
-{
- struct igb_rx_buffer *new_buff;
- u16 nta = rx_ring->next_to_alloc;
-
- new_buff = &rx_ring->rx_buffer_info[nta];
-
- /* update, and store next to alloc */
- nta++;
- rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
-
- /* transfer page from old buffer to new buffer */
- memcpy(new_buff, old_buff, sizeof(struct igb_rx_buffer));
-
- /* sync the buffer for use by the device */
- dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
- old_buff->page_offset,
- IGB_RX_BUFSZ,
- DMA_FROM_DEVICE);
-}
-
-static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
- struct page *page,
- unsigned int truesize)
-{
- /* avoid re-using remote pages */
- if (unlikely(page_to_nid(page) != numa_node_id()))
- return false;
-
-#if (PAGE_SIZE < 8192)
- /* if we are only owner of page we can reuse it */
- if (unlikely(page_count(page) != 1))
- return false;
-
- /* flip page offset to other buffer */
- rx_buffer->page_offset ^= IGB_RX_BUFSZ;
-
-#else
- /* move offset up to the next cache line */
- rx_buffer->page_offset += truesize;
-
- if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
- return false;
-#endif
-
- /* bump ref count on page before it is given to the stack */
- get_page(page);
-
- return true;
-}
-
-/**
- * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
- * @rx_ring: rx descriptor ring to transact packets on
- * @rx_buffer: buffer containing page to add
- * @rx_desc: descriptor containing length of buffer written by hardware
- * @skb: sk_buff to place the data into
- *
- * This function will add the data contained in rx_buffer->page to the skb.
- * This is done either through a direct copy if the data in the buffer is
- * less than the skb header size, otherwise it will just attach the page as
- * a frag to the skb.
- *
- * The function will then update the page offset if necessary and return
- * true if the buffer can be reused by the adapter.
- **/
-static bool igb_add_rx_frag(struct igb_ring *rx_ring,
- struct igb_rx_buffer *rx_buffer,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
-{
- struct page *page = rx_buffer->page;
- unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = IGB_RX_BUFSZ;
-#else
- unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
-#endif
-
- if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
- unsigned char *va = page_address(page) + rx_buffer->page_offset;
-
-#ifdef HAVE_PTP_1588_CLOCK
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
- igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
- va += IGB_TS_HDR_LEN;
- size -= IGB_TS_HDR_LEN;
- }
-#endif /* HAVE_PTP_1588_CLOCK */
-
- memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
-
- /* we can reuse buffer as-is, just make sure it is local */
- if (likely(page_to_nid(page) == numa_node_id()))
- return true;
-
- /* this page cannot be reused so discard it */
- put_page(page);
- return false;
- }
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- rx_buffer->page_offset, size, truesize);
-
- return igb_can_reuse_rx_page(rx_buffer, page, truesize);
-}
-
-static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
-{
- struct igb_rx_buffer *rx_buffer;
- struct page *page;
-
- rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
-
- page = rx_buffer->page;
- prefetchw(page);
-
- if (likely(!skb)) {
- void *page_addr = page_address(page) +
- rx_buffer->page_offset;
-
- /* prefetch first cache line of first page */
- prefetch(page_addr);
-#if L1_CACHE_BYTES < 128
- prefetch(page_addr + L1_CACHE_BYTES);
-#endif
-
- /* allocate a skb to store the frags */
- skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
- IGB_RX_HDR_LEN);
- if (unlikely(!skb)) {
- rx_ring->rx_stats.alloc_failed++;
- return NULL;
- }
-
- /*
- * we will be copying header into skb->data in
- * pskb_may_pull so it is in our interest to prefetch
- * it now to avoid a possible cache miss
- */
- prefetchw(skb->data);
- }
-
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_buffer->dma,
- rx_buffer->page_offset,
- IGB_RX_BUFSZ,
- DMA_FROM_DEVICE);
-
- /* pull page into skb */
- if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
- /* hand second half of page back to the ring */
- igb_reuse_rx_page(rx_ring, rx_buffer);
- } else {
- /* we are not reusing the buffer so unmap it */
- dma_unmap_page(rx_ring->dev, rx_buffer->dma,
- PAGE_SIZE, DMA_FROM_DEVICE);
- }
-
- /* clear contents of rx_buffer */
- rx_buffer->page = NULL;
-
- return skb;
-}
-
-#endif
-static inline void igb_rx_checksum(struct igb_ring *ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
-{
- skb_checksum_none_assert(skb);
-
- /* Ignore Checksum bit is set */
- if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
- return;
-
- /* Rx checksum disabled via ethtool */
- if (!(netdev_ring(ring)->features & NETIF_F_RXCSUM))
- return;
-
- /* TCP/UDP checksum error bit is set */
- if (igb_test_staterr(rx_desc,
- E1000_RXDEXT_STATERR_TCPE |
- E1000_RXDEXT_STATERR_IPE)) {
- /*
- * work around errata with sctp packets where the TCPE aka
- * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
- * packets, (aka let the stack check the crc32c)
- */
- if (!((skb->len == 60) &&
- test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags)))
- ring->rx_stats.csum_err++;
-
- /* let the stack verify checksum errors */
- return;
- }
- /* It must be a TCP or UDP packet with a valid checksum */
- if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
- E1000_RXD_STAT_UDPCS))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-}
-
-#ifdef NETIF_F_RXHASH
-static inline void igb_rx_hash(struct igb_ring *ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
-{
- if (netdev_ring(ring)->features & NETIF_F_RXHASH)
- skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
- PKT_HASH_TYPE_L3);
-}
-
-#endif
-#ifndef IGB_NO_LRO
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
-/**
- * igb_merge_active_tail - merge active tail into lro skb
- * @tail: pointer to active tail in frag_list
- *
- * This function merges the length and data of an active tail into the
- * skb containing the frag_list. It resets the tail's pointer to the head,
- * but it leaves the heads pointer to tail intact.
- **/
-static inline struct sk_buff *igb_merge_active_tail(struct sk_buff *tail)
-{
- struct sk_buff *head = IGB_CB(tail)->head;
-
- if (!head)
- return tail;
-
- head->len += tail->len;
- head->data_len += tail->len;
- head->truesize += tail->len;
-
- IGB_CB(tail)->head = NULL;
-
- return head;
-}
-
-/**
- * igb_add_active_tail - adds an active tail into the skb frag_list
- * @head: pointer to the start of the skb
- * @tail: pointer to active tail to add to frag_list
- *
- * This function adds an active tail to the end of the frag list. This tail
- * will still be receiving data so we cannot yet ad it's stats to the main
- * skb. That is done via igb_merge_active_tail.
- **/
-static inline void igb_add_active_tail(struct sk_buff *head, struct sk_buff *tail)
-{
- struct sk_buff *old_tail = IGB_CB(head)->tail;
-
- if (old_tail) {
- igb_merge_active_tail(old_tail);
- old_tail->next = tail;
- } else {
- skb_shinfo(head)->frag_list = tail;
- }
-
- IGB_CB(tail)->head = head;
- IGB_CB(head)->tail = tail;
-
- IGB_CB(head)->append_cnt++;
-}
-
-/**
- * igb_close_active_frag_list - cleanup pointers on a frag_list skb
- * @head: pointer to head of an active frag list
- *
- * This function will clear the frag_tail_tracker pointer on an active
- * frag_list and returns true if the pointer was actually set
- **/
-static inline bool igb_close_active_frag_list(struct sk_buff *head)
-{
- struct sk_buff *tail = IGB_CB(head)->tail;
-
- if (!tail)
- return false;
-
- igb_merge_active_tail(tail);
-
- IGB_CB(head)->tail = NULL;
-
- return true;
-}
-
-#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
-/**
- * igb_can_lro - returns true if packet is TCP/IPV4 and LRO is enabled
- * @adapter: board private structure
- * @rx_desc: pointer to the rx descriptor
- * @skb: pointer to the skb to be merged
- *
- **/
-static inline bool igb_can_lro(struct igb_ring *rx_ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
-{
- struct iphdr *iph = (struct iphdr *)skb->data;
- __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
-
- /* verify hardware indicates this is IPv4/TCP */
- if((!(pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_TCP)) ||
- !(pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_IPV4))))
- return false;
-
- /* .. and LRO is enabled */
- if (!(netdev_ring(rx_ring)->features & NETIF_F_LRO))
- return false;
-
- /* .. and we are not in promiscuous mode */
- if (netdev_ring(rx_ring)->flags & IFF_PROMISC)
- return false;
-
- /* .. and the header is large enough for us to read IP/TCP fields */
- if (!pskb_may_pull(skb, sizeof(struct igb_lrohdr)))
- return false;
-
- /* .. and there are no VLANs on packet */
- if (skb->protocol != __constant_htons(ETH_P_IP))
- return false;
-
- /* .. and we are version 4 with no options */
- if (*(u8 *)iph != 0x45)
- return false;
-
- /* .. and the packet is not fragmented */
- if (iph->frag_off & htons(IP_MF | IP_OFFSET))
- return false;
-
- /* .. and that next header is TCP */
- if (iph->protocol != IPPROTO_TCP)
- return false;
-
- return true;
-}
-
-static inline struct igb_lrohdr *igb_lro_hdr(struct sk_buff *skb)
-{
- return (struct igb_lrohdr *)skb->data;
-}
-
-/**
- * igb_lro_flush - Indicate packets to upper layer.
- *
- * Update IP and TCP header part of head skb if more than one
- * skb's chained and indicate packets to upper layer.
- **/
-static void igb_lro_flush(struct igb_q_vector *q_vector,
- struct sk_buff *skb)
-{
- struct igb_lro_list *lrolist = &q_vector->lrolist;
-
- __skb_unlink(skb, &lrolist->active);
-
- if (IGB_CB(skb)->append_cnt) {
- struct igb_lrohdr *lroh = igb_lro_hdr(skb);
-
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
- /* close any active lro contexts */
- igb_close_active_frag_list(skb);
-
-#endif
- /* incorporate ip header and re-calculate checksum */
- lroh->iph.tot_len = ntohs(skb->len);
- lroh->iph.check = 0;
-
- /* header length is 5 since we know no options exist */
- lroh->iph.check = ip_fast_csum((u8 *)lroh, 5);
-
- /* clear TCP checksum to indicate we are an LRO frame */
- lroh->th.check = 0;
-
- /* incorporate latest timestamp into the tcp header */
- if (IGB_CB(skb)->tsecr) {
- lroh->ts[2] = IGB_CB(skb)->tsecr;
- lroh->ts[1] = htonl(IGB_CB(skb)->tsval);
- }
-#ifdef NETIF_F_GSO
-
- skb_shinfo(skb)->gso_size = IGB_CB(skb)->mss;
- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
-#endif
- }
-
-#ifdef HAVE_VLAN_RX_REGISTER
- igb_receive_skb(q_vector, skb);
-#else
- napi_gro_receive(&q_vector->napi, skb);
-#endif
- lrolist->stats.flushed++;
-}
-
-static void igb_lro_flush_all(struct igb_q_vector *q_vector)
-{
- struct igb_lro_list *lrolist = &q_vector->lrolist;
- struct sk_buff *skb, *tmp;
-
- skb_queue_reverse_walk_safe(&lrolist->active, skb, tmp)
- igb_lro_flush(q_vector, skb);
-}
-
-/*
- * igb_lro_header_ok - Main LRO function.
- **/
-static void igb_lro_header_ok(struct sk_buff *skb)
-{
- struct igb_lrohdr *lroh = igb_lro_hdr(skb);
- u16 opt_bytes, data_len;
-
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
- IGB_CB(skb)->tail = NULL;
-#endif
- IGB_CB(skb)->tsecr = 0;
- IGB_CB(skb)->append_cnt = 0;
- IGB_CB(skb)->mss = 0;
-
- /* ensure that the checksum is valid */
- if (skb->ip_summed != CHECKSUM_UNNECESSARY)
- return;
-
- /* If we see CE codepoint in IP header, packet is not mergeable */
- if (INET_ECN_is_ce(ipv4_get_dsfield(&lroh->iph)))
- return;
-
- /* ensure no bits set besides ack or psh */
- if (lroh->th.fin || lroh->th.syn || lroh->th.rst ||
- lroh->th.urg || lroh->th.ece || lroh->th.cwr ||
- !lroh->th.ack)
- return;
-
- /* store the total packet length */
- data_len = ntohs(lroh->iph.tot_len);
-
- /* remove any padding from the end of the skb */
- __pskb_trim(skb, data_len);
-
- /* remove header length from data length */
- data_len -= sizeof(struct igb_lrohdr);
-
- /*
- * check for timestamps. Since the only option we handle are timestamps,
- * we only have to handle the simple case of aligned timestamps
- */
- opt_bytes = (lroh->th.doff << 2) - sizeof(struct tcphdr);
- if (opt_bytes != 0) {
- if ((opt_bytes != TCPOLEN_TSTAMP_ALIGNED) ||
- !pskb_may_pull(skb, sizeof(struct igb_lrohdr) +
- TCPOLEN_TSTAMP_ALIGNED) ||
- (lroh->ts[0] != htonl((TCPOPT_NOP << 24) |
- (TCPOPT_NOP << 16) |
- (TCPOPT_TIMESTAMP << 8) |
- TCPOLEN_TIMESTAMP)) ||
- (lroh->ts[2] == 0)) {
- return;
- }
-
- IGB_CB(skb)->tsval = ntohl(lroh->ts[1]);
- IGB_CB(skb)->tsecr = lroh->ts[2];
-
- data_len -= TCPOLEN_TSTAMP_ALIGNED;
- }
-
- /* record data_len as mss for the packet */
- IGB_CB(skb)->mss = data_len;
- IGB_CB(skb)->next_seq = ntohl(lroh->th.seq);
-}
-
-#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
-static void igb_merge_frags(struct sk_buff *lro_skb, struct sk_buff *new_skb)
-{
- struct skb_shared_info *sh_info;
- struct skb_shared_info *new_skb_info;
- unsigned int data_len;
-
- sh_info = skb_shinfo(lro_skb);
- new_skb_info = skb_shinfo(new_skb);
-
- /* copy frags into the last skb */
- memcpy(sh_info->frags + sh_info->nr_frags,
- new_skb_info->frags,
- new_skb_info->nr_frags * sizeof(skb_frag_t));
-
- /* copy size data over */
- sh_info->nr_frags += new_skb_info->nr_frags;
- data_len = IGB_CB(new_skb)->mss;
- lro_skb->len += data_len;
- lro_skb->data_len += data_len;
- lro_skb->truesize += data_len;
-
- /* wipe record of data from new_skb */
- new_skb_info->nr_frags = 0;
- new_skb->len = new_skb->data_len = 0;
- dev_kfree_skb_any(new_skb);
-}
-
-#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
-/**
- * igb_lro_receive - if able, queue skb into lro chain
- * @q_vector: structure containing interrupt and ring information
- * @new_skb: pointer to current skb being checked
- *
- * Checks whether the skb given is eligible for LRO and if that's
- * fine chains it to the existing lro_skb based on flowid. If an LRO for
- * the flow doesn't exist create one.
- **/
-static void igb_lro_receive(struct igb_q_vector *q_vector,
- struct sk_buff *new_skb)
-{
- struct sk_buff *lro_skb;
- struct igb_lro_list *lrolist = &q_vector->lrolist;
- struct igb_lrohdr *lroh = igb_lro_hdr(new_skb);
- __be32 saddr = lroh->iph.saddr;
- __be32 daddr = lroh->iph.daddr;
- __be32 tcp_ports = *(__be32 *)&lroh->th;
- u16 data_len;
-#ifdef HAVE_VLAN_RX_REGISTER
- u16 vid = IGB_CB(new_skb)->vid;
-#else
- u16 vid = new_skb->vlan_tci;
-#endif
-
- igb_lro_header_ok(new_skb);
-
- /*
- * we have a packet that might be eligible for LRO,
- * so see if it matches anything we might expect
- */
- skb_queue_walk(&lrolist->active, lro_skb) {
- if (*(__be32 *)&igb_lro_hdr(lro_skb)->th != tcp_ports ||
- igb_lro_hdr(lro_skb)->iph.saddr != saddr ||
- igb_lro_hdr(lro_skb)->iph.daddr != daddr)
- continue;
-
-#ifdef HAVE_VLAN_RX_REGISTER
- if (IGB_CB(lro_skb)->vid != vid)
-#else
- if (lro_skb->vlan_tci != vid)
-#endif
- continue;
-
- /* out of order packet */
- if (IGB_CB(lro_skb)->next_seq != IGB_CB(new_skb)->next_seq) {
- igb_lro_flush(q_vector, lro_skb);
- IGB_CB(new_skb)->mss = 0;
- break;
- }
-
- /* TCP timestamp options have changed */
- if (!IGB_CB(lro_skb)->tsecr != !IGB_CB(new_skb)->tsecr) {
- igb_lro_flush(q_vector, lro_skb);
- break;
- }
-
- /* make sure timestamp values are increasing */
- if (IGB_CB(lro_skb)->tsecr &&
- IGB_CB(lro_skb)->tsval > IGB_CB(new_skb)->tsval) {
- igb_lro_flush(q_vector, lro_skb);
- IGB_CB(new_skb)->mss = 0;
- break;
- }
-
- data_len = IGB_CB(new_skb)->mss;
-
- /* Check for all of the above below
- * malformed header
- * no tcp data
- * resultant packet would be too large
- * new skb is larger than our current mss
- * data would remain in header
- * we would consume more frags then the sk_buff contains
- * ack sequence numbers changed
- * window size has changed
- */
- if (data_len == 0 ||
- data_len > IGB_CB(lro_skb)->mss ||
- data_len > IGB_CB(lro_skb)->free ||
-#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
- data_len != new_skb->data_len ||
- skb_shinfo(new_skb)->nr_frags >=
- (MAX_SKB_FRAGS - skb_shinfo(lro_skb)->nr_frags) ||
-#endif
- igb_lro_hdr(lro_skb)->th.ack_seq != lroh->th.ack_seq ||
- igb_lro_hdr(lro_skb)->th.window != lroh->th.window) {
- igb_lro_flush(q_vector, lro_skb);
- break;
- }
-
- /* Remove IP and TCP header*/
- skb_pull(new_skb, new_skb->len - data_len);
-
- /* update timestamp and timestamp echo response */
- IGB_CB(lro_skb)->tsval = IGB_CB(new_skb)->tsval;
- IGB_CB(lro_skb)->tsecr = IGB_CB(new_skb)->tsecr;
-
- /* update sequence and free space */
- IGB_CB(lro_skb)->next_seq += data_len;
- IGB_CB(lro_skb)->free -= data_len;
-
- /* update append_cnt */
- IGB_CB(lro_skb)->append_cnt++;
-
-#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
- /* if header is empty pull pages into current skb */
- igb_merge_frags(lro_skb, new_skb);
-#else
- /* chain this new skb in frag_list */
- igb_add_active_tail(lro_skb, new_skb);
-#endif
-
- if ((data_len < IGB_CB(lro_skb)->mss) || lroh->th.psh ||
- skb_shinfo(lro_skb)->nr_frags == MAX_SKB_FRAGS) {
- igb_lro_hdr(lro_skb)->th.psh |= lroh->th.psh;
- igb_lro_flush(q_vector, lro_skb);
- }
-
- lrolist->stats.coal++;
- return;
- }
-
- if (IGB_CB(new_skb)->mss && !lroh->th.psh) {
- /* if we are at capacity flush the tail */
- if (skb_queue_len(&lrolist->active) >= IGB_LRO_MAX) {
- lro_skb = skb_peek_tail(&lrolist->active);
- if (lro_skb)
- igb_lro_flush(q_vector, lro_skb);
- }
-
- /* update sequence and free space */
- IGB_CB(new_skb)->next_seq += IGB_CB(new_skb)->mss;
- IGB_CB(new_skb)->free = 65521 - new_skb->len;
-
- /* .. and insert at the front of the active list */
- __skb_queue_head(&lrolist->active, new_skb);
-
- lrolist->stats.coal++;
- return;
- }
-
- /* packet not handled by any of the above, pass it to the stack */
-#ifdef HAVE_VLAN_RX_REGISTER
- igb_receive_skb(q_vector, new_skb);
-#else
- napi_gro_receive(&q_vector->napi, new_skb);
-#endif
-}
-
-#endif /* IGB_NO_LRO */
-/**
- * igb_process_skb_fields - Populate skb header fields from Rx descriptor
- * @rx_ring: rx descriptor ring packet is being transacted on
- * @rx_desc: pointer to the EOP Rx descriptor
- * @skb: pointer to current skb being populated
- *
- * This function checks the ring, descriptor, and packet information in
- * order to populate the hash, checksum, VLAN, timestamp, protocol, and
- * other fields within the skb.
- **/
-static void igb_process_skb_fields(struct igb_ring *rx_ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
-{
- struct net_device *dev = rx_ring->netdev;
- __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
-
-#ifdef NETIF_F_RXHASH
- igb_rx_hash(rx_ring, rx_desc, skb);
-
-#endif
- igb_rx_checksum(rx_ring, rx_desc, skb);
-
- /* update packet type stats */
- if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_IPV4))
- rx_ring->rx_stats.ipv4_packets++;
- else if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_IPV4_EX))
- rx_ring->rx_stats.ipv4e_packets++;
- else if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_IPV6))
- rx_ring->rx_stats.ipv6_packets++;
- else if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_IPV6_EX))
- rx_ring->rx_stats.ipv6e_packets++;
- else if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_TCP))
- rx_ring->rx_stats.tcp_packets++;
- else if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_UDP))
- rx_ring->rx_stats.udp_packets++;
- else if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_SCTP))
- rx_ring->rx_stats.sctp_packets++;
- else if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_NFS))
- rx_ring->rx_stats.nfs_packets++;
-
-#ifdef HAVE_PTP_1588_CLOCK
- igb_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
-#endif /* HAVE_PTP_1588_CLOCK */
-
-#ifdef NETIF_F_HW_VLAN_CTAG_RX
- if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
-#else
- if ((dev->features & NETIF_F_HW_VLAN_RX) &&
-#endif
- igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
- u16 vid = 0;
- if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
- test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
- vid = be16_to_cpu(rx_desc->wb.upper.vlan);
- else
- vid = le16_to_cpu(rx_desc->wb.upper.vlan);
-#ifdef HAVE_VLAN_RX_REGISTER
- IGB_CB(skb)->vid = vid;
- } else {
- IGB_CB(skb)->vid = 0;
-#else
-
-#ifdef HAVE_VLAN_PROTOCOL
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
-#else
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
-#endif
-
-
-#endif
- }
-
- skb_record_rx_queue(skb, rx_ring->queue_index);
-
- skb->protocol = eth_type_trans(skb, dev);
-}
-
-/**
- * igb_is_non_eop - process handling of non-EOP buffers
- * @rx_ring: Rx ring being processed
- * @rx_desc: Rx descriptor for current buffer
- *
- * This function updates next to clean. If the buffer is an EOP buffer
- * this function exits returning false, otherwise it will place the
- * sk_buff in the next buffer to be chained and return true indicating
- * that this is in fact a non-EOP buffer.
- **/
-static bool igb_is_non_eop(struct igb_ring *rx_ring,
- union e1000_adv_rx_desc *rx_desc)
-{
- u32 ntc = rx_ring->next_to_clean + 1;
-
- /* fetch, update, and store next to clean */
- ntc = (ntc < rx_ring->count) ? ntc : 0;
- rx_ring->next_to_clean = ntc;
-
- prefetch(IGB_RX_DESC(rx_ring, ntc));
-
- if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
- return false;
-
- return true;
-}
-
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
-/* igb_clean_rx_irq -- * legacy */
-static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
-{
- struct igb_ring *rx_ring = q_vector->rx.ring;
- unsigned int total_bytes = 0, total_packets = 0;
- u16 cleaned_count = igb_desc_unused(rx_ring);
-
- do {
- struct igb_rx_buffer *rx_buffer;
- union e1000_adv_rx_desc *rx_desc;
- struct sk_buff *skb;
- u16 ntc;
-
- /* return some buffers to hardware, one at a time is too slow */
- if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
- igb_alloc_rx_buffers(rx_ring, cleaned_count);
- cleaned_count = 0;
- }
-
- ntc = rx_ring->next_to_clean;
- rx_desc = IGB_RX_DESC(rx_ring, ntc);
- rx_buffer = &rx_ring->rx_buffer_info[ntc];
-
- if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
- break;
-
- /*
- * This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc until we know the
- * RXD_STAT_DD bit is set
- */
- rmb();
-
- skb = rx_buffer->skb;
-
- prefetch(skb->data);
-
- /* pull the header of the skb in */
- __skb_put(skb, le16_to_cpu(rx_desc->wb.upper.length));
-
- /* clear skb reference in buffer info structure */
- rx_buffer->skb = NULL;
-
- cleaned_count++;
-
- BUG_ON(igb_is_non_eop(rx_ring, rx_desc));
-
- dma_unmap_single(rx_ring->dev, rx_buffer->dma,
- rx_ring->rx_buffer_len,
- DMA_FROM_DEVICE);
- rx_buffer->dma = 0;
-
- if (igb_test_staterr(rx_desc,
- E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
- dev_kfree_skb_any(skb);
- continue;
- }
-
- total_bytes += skb->len;
-
- /* populate checksum, timestamp, VLAN, and protocol */
- igb_process_skb_fields(rx_ring, rx_desc, skb);
-
-#ifndef IGB_NO_LRO
- if (igb_can_lro(rx_ring, rx_desc, skb))
- igb_lro_receive(q_vector, skb);
- else
-#endif
-#ifdef HAVE_VLAN_RX_REGISTER
- igb_receive_skb(q_vector, skb);
-#else
- napi_gro_receive(&q_vector->napi, skb);
-#endif
-
-#ifndef NETIF_F_GRO
- netdev_ring(rx_ring)->last_rx = jiffies;
-
-#endif
- /* update budget accounting */
- total_packets++;
- } while (likely(total_packets < budget));
-
- rx_ring->rx_stats.packets += total_packets;
- rx_ring->rx_stats.bytes += total_bytes;
- q_vector->rx.total_packets += total_packets;
- q_vector->rx.total_bytes += total_bytes;
-
- if (cleaned_count)
- igb_alloc_rx_buffers(rx_ring, cleaned_count);
-
-#ifndef IGB_NO_LRO
- igb_lro_flush_all(q_vector);
-
-#endif /* IGB_NO_LRO */
- return (total_packets < budget);
-}
-#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
-/**
- * igb_get_headlen - determine size of header for LRO/GRO
- * @data: pointer to the start of the headers
- * @max_len: total length of section to find headers in
- *
- * This function is meant to determine the length of headers that will
- * be recognized by hardware for LRO, and GRO offloads. The main
- * motivation of doing this is to only perform one pull for IPv4 TCP
- * packets so that we can do basic things like calculating the gso_size
- * based on the average data per packet.
- **/
-static unsigned int igb_get_headlen(unsigned char *data,
- unsigned int max_len)
-{
- union {
- unsigned char *network;
- /* l2 headers */
- struct ethhdr *eth;
- struct vlan_hdr *vlan;
- /* l3 headers */
- struct iphdr *ipv4;
- struct ipv6hdr *ipv6;
- } hdr;
- __be16 protocol;
- u8 nexthdr = 0; /* default to not TCP */
- u8 hlen;
-
- /* this should never happen, but better safe than sorry */
- if (max_len < ETH_HLEN)
- return max_len;
-
- /* initialize network frame pointer */
- hdr.network = data;
-
- /* set first protocol and move network header forward */
- protocol = hdr.eth->h_proto;
- hdr.network += ETH_HLEN;
-
- /* handle any vlan tag if present */
- if (protocol == __constant_htons(ETH_P_8021Q)) {
- if ((hdr.network - data) > (max_len - VLAN_HLEN))
- return max_len;
-
- protocol = hdr.vlan->h_vlan_encapsulated_proto;
- hdr.network += VLAN_HLEN;
- }
-
- /* handle L3 protocols */
- if (protocol == __constant_htons(ETH_P_IP)) {
- if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
- return max_len;
-
- /* access ihl as a u8 to avoid unaligned access on ia64 */
- hlen = (hdr.network[0] & 0x0F) << 2;
-
- /* verify hlen meets minimum size requirements */
- if (hlen < sizeof(struct iphdr))
- return hdr.network - data;
-
- /* record next protocol if header is present */
- if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
- nexthdr = hdr.ipv4->protocol;
-#ifdef NETIF_F_TSO6
- } else if (protocol == __constant_htons(ETH_P_IPV6)) {
- if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
- return max_len;
-
- /* record next protocol */
- nexthdr = hdr.ipv6->nexthdr;
- hlen = sizeof(struct ipv6hdr);
-#endif /* NETIF_F_TSO6 */
- } else {
- return hdr.network - data;
- }
-
- /* relocate pointer to start of L4 header */
- hdr.network += hlen;
-
- /* finally sort out TCP */
- if (nexthdr == IPPROTO_TCP) {
- if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
- return max_len;
-
- /* access doff as a u8 to avoid unaligned access on ia64 */
- hlen = (hdr.network[12] & 0xF0) >> 2;
-
- /* verify hlen meets minimum size requirements */
- if (hlen < sizeof(struct tcphdr))
- return hdr.network - data;
-
- hdr.network += hlen;
- } else if (nexthdr == IPPROTO_UDP) {
- if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
- return max_len;
-
- hdr.network += sizeof(struct udphdr);
- }
-
- /*
- * If everything has gone correctly hdr.network should be the
- * data section of the packet and will be the end of the header.
- * If not then it probably represents the end of the last recognized
- * header.
- */
- if ((hdr.network - data) < max_len)
- return hdr.network - data;
- else
- return max_len;
-}
-
-/**
- * igb_pull_tail - igb specific version of skb_pull_tail
- * @rx_ring: rx descriptor ring packet is being transacted on
- * @rx_desc: pointer to the EOP Rx descriptor
- * @skb: pointer to current skb being adjusted
- *
- * This function is an igb specific version of __pskb_pull_tail. The
- * main difference between this version and the original function is that
- * this function can make several assumptions about the state of things
- * that allow for significant optimizations versus the standard function.
- * As a result we can do things like drop a frag and maintain an accurate
- * truesize for the skb.
- */
-static void igb_pull_tail(struct igb_ring *rx_ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
-{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned char *va;
- unsigned int pull_len;
-
- /*
- * it is valid to use page_address instead of kmap since we are
- * working with pages allocated out of the lomem pool per
- * alloc_page(GFP_ATOMIC)
- */
- va = skb_frag_address(frag);
-
-#ifdef HAVE_PTP_1588_CLOCK
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
- /* retrieve timestamp from buffer */
- igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
-
- /* update pointers to remove timestamp header */
- skb_frag_size_sub(frag, IGB_TS_HDR_LEN);
- frag->page_offset += IGB_TS_HDR_LEN;
- skb->data_len -= IGB_TS_HDR_LEN;
- skb->len -= IGB_TS_HDR_LEN;
-
- /* move va to start of packet data */
- va += IGB_TS_HDR_LEN;
- }
-#endif /* HAVE_PTP_1588_CLOCK */
-
- /*
- * we need the header to contain the greater of either ETH_HLEN or
- * 60 bytes if the skb->len is less than 60 for skb_pad.
- */
- pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN);
-
- /* align pull length to size of long to optimize memcpy performance */
- skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
-
- /* update all of the pointers */
- skb_frag_size_sub(frag, pull_len);
- frag->page_offset += pull_len;
- skb->data_len -= pull_len;
- skb->tail += pull_len;
-}
-
-/**
- * igb_cleanup_headers - Correct corrupted or empty headers
- * @rx_ring: rx descriptor ring packet is being transacted on
- * @rx_desc: pointer to the EOP Rx descriptor
- * @skb: pointer to current skb being fixed
- *
- * Address the case where we are pulling data in on pages only
- * and as such no data is present in the skb header.
- *
- * In addition if skb is not at least 60 bytes we need to pad it so that
- * it is large enough to qualify as a valid Ethernet frame.
- *
- * Returns true if an error was encountered and skb was freed.
- **/
-static bool igb_cleanup_headers(struct igb_ring *rx_ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
-{
-
- if (unlikely((igb_test_staterr(rx_desc,
- E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
- struct net_device *netdev = rx_ring->netdev;
- if (!(netdev->features & NETIF_F_RXALL)) {
- dev_kfree_skb_any(skb);
- return true;
- }
- }
-
- /* place header in linear portion of buffer */
- if (skb_is_nonlinear(skb))
- igb_pull_tail(rx_ring, rx_desc, skb);
-
- /* if skb_pad returns an error the skb was freed */
- if (unlikely(skb->len < 60)) {
- int pad_len = 60 - skb->len;
-
- if (skb_pad(skb, pad_len))
- return true;
- __skb_put(skb, pad_len);
- }
-
- return false;
-}
-
-/* igb_clean_rx_irq -- * packet split */
-static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
-{
- struct igb_ring *rx_ring = q_vector->rx.ring;
- struct sk_buff *skb = rx_ring->skb;
- unsigned int total_bytes = 0, total_packets = 0;
- u16 cleaned_count = igb_desc_unused(rx_ring);
-
- do {
- union e1000_adv_rx_desc *rx_desc;
-
- /* return some buffers to hardware, one at a time is too slow */
- if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
- igb_alloc_rx_buffers(rx_ring, cleaned_count);
- cleaned_count = 0;
- }
-
- rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
-
- if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
- break;
-
- /*
- * This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc until we know the
- * RXD_STAT_DD bit is set
- */
- rmb();
-
- /* retrieve a buffer from the ring */
- skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
-
- /* exit if we failed to retrieve a buffer */
- if (!skb)
- break;
-
- cleaned_count++;
-
- /* fetch next buffer in frame if non-eop */
- if (igb_is_non_eop(rx_ring, rx_desc))
- continue;
-
- /* verify the packet layout is correct */
- if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
- skb = NULL;
- continue;
- }
-
- /* probably a little skewed due to removing CRC */
- total_bytes += skb->len;
-
- /* populate checksum, timestamp, VLAN, and protocol */
- igb_process_skb_fields(rx_ring, rx_desc, skb);
-
-#ifndef IGB_NO_LRO
- if (igb_can_lro(rx_ring, rx_desc, skb))
- igb_lro_receive(q_vector, skb);
- else
-#endif
-#ifdef HAVE_VLAN_RX_REGISTER
- igb_receive_skb(q_vector, skb);
-#else
- napi_gro_receive(&q_vector->napi, skb);
-#endif
-#ifndef NETIF_F_GRO
-
- netdev_ring(rx_ring)->last_rx = jiffies;
-#endif
-
- /* reset skb pointer */
- skb = NULL;
-
- /* update budget accounting */
- total_packets++;
- } while (likely(total_packets < budget));
-
- /* place incomplete frames back on ring for completion */
- rx_ring->skb = skb;
-
- rx_ring->rx_stats.packets += total_packets;
- rx_ring->rx_stats.bytes += total_bytes;
- q_vector->rx.total_packets += total_packets;
- q_vector->rx.total_bytes += total_bytes;
-
- if (cleaned_count)
- igb_alloc_rx_buffers(rx_ring, cleaned_count);
-
-#ifndef IGB_NO_LRO
- igb_lro_flush_all(q_vector);
-
-#endif /* IGB_NO_LRO */
- return (total_packets < budget);
-}
-#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
-
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
-static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
- struct igb_rx_buffer *bi)
-{
- struct sk_buff *skb = bi->skb;
- dma_addr_t dma = bi->dma;
-
- if (dma)
- return true;
-
- if (likely(!skb)) {
- skb = netdev_alloc_skb_ip_align(netdev_ring(rx_ring),
- rx_ring->rx_buffer_len);
- bi->skb = skb;
- if (!skb) {
- rx_ring->rx_stats.alloc_failed++;
- return false;
- }
-
- /* initialize skb for ring */
- skb_record_rx_queue(skb, ring_queue_index(rx_ring));
- }
-
- dma = dma_map_single(rx_ring->dev, skb->data,
- rx_ring->rx_buffer_len, DMA_FROM_DEVICE);
-
- /* if mapping failed free memory back to system since
- * there isn't much point in holding memory we can't use
- */
- if (dma_mapping_error(rx_ring->dev, dma)) {
- dev_kfree_skb_any(skb);
- bi->skb = NULL;
-
- rx_ring->rx_stats.alloc_failed++;
- return false;
- }
-
- bi->dma = dma;
- return true;
-}
-
-#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
-static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
- struct igb_rx_buffer *bi)
-{
- struct page *page = bi->page;
- dma_addr_t dma;
-
- /* since we are recycling buffers we should seldom need to alloc */
- if (likely(page))
- return true;
-
- /* alloc new page for storage */
- page = alloc_page(GFP_ATOMIC | __GFP_COLD);
- if (unlikely(!page)) {
- rx_ring->rx_stats.alloc_failed++;
- return false;
- }
-
- /* map page for use */
- dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
-
- /*
- * if mapping failed free memory back to system since
- * there isn't much point in holding memory we can't use
- */
- if (dma_mapping_error(rx_ring->dev, dma)) {
- __free_page(page);
-
- rx_ring->rx_stats.alloc_failed++;
- return false;
- }
-
- bi->dma = dma;
- bi->page = page;
- bi->page_offset = 0;
-
- return true;
-}
-
-#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
-/**
- * igb_alloc_rx_buffers - Replace used receive buffers; packet split
- * @adapter: address of board private structure
- **/
-void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
-{
- union e1000_adv_rx_desc *rx_desc;
- struct igb_rx_buffer *bi;
- u16 i = rx_ring->next_to_use;
-
- /* nothing to do */
- if (!cleaned_count)
- return;
-
- rx_desc = IGB_RX_DESC(rx_ring, i);
- bi = &rx_ring->rx_buffer_info[i];
- i -= rx_ring->count;
-
- do {
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
- if (!igb_alloc_mapped_skb(rx_ring, bi))
-#else
- if (!igb_alloc_mapped_page(rx_ring, bi))
-#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
- break;
-
- /*
- * Refresh the desc even if buffer_addrs didn't change
- * because each write-back erases this info.
- */
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
-#else
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
-#endif
-
- rx_desc++;
- bi++;
- i++;
- if (unlikely(!i)) {
- rx_desc = IGB_RX_DESC(rx_ring, 0);
- bi = rx_ring->rx_buffer_info;
- i -= rx_ring->count;
- }
-
- /* clear the hdr_addr for the next_to_use descriptor */
- rx_desc->read.hdr_addr = 0;
-
- cleaned_count--;
- } while (cleaned_count);
-
- i += rx_ring->count;
-
- if (rx_ring->next_to_use != i) {
- /* record the next descriptor to use */
- rx_ring->next_to_use = i;
-
-#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
- /* update next to alloc since we have filled the ring */
- rx_ring->next_to_alloc = i;
-
-#endif
- /*
- * Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
- writel(i, rx_ring->tail);
- }
-}
-
-#ifdef SIOCGMIIPHY
-/**
- * igb_mii_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- **/
-static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct mii_ioctl_data *data = if_mii(ifr);
-
- if (adapter->hw.phy.media_type != e1000_media_type_copper)
- return -EOPNOTSUPP;
-
- switch (cmd) {
- case SIOCGMIIPHY:
- data->phy_id = adapter->hw.phy.addr;
- break;
- case SIOCGMIIREG:
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
- &data->val_out))
- return -EIO;
- break;
- case SIOCSMIIREG:
- default:
- return -EOPNOTSUPP;
- }
- return E1000_SUCCESS;
-}
-
-#endif
-/**
- * igb_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- **/
-static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
-#ifdef SIOCGMIIPHY
- case SIOCGMIIPHY:
- case SIOCGMIIREG:
- case SIOCSMIIREG:
- return igb_mii_ioctl(netdev, ifr, cmd);
-#endif
-#ifdef HAVE_PTP_1588_CLOCK
- case SIOCSHWTSTAMP:
- return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd);
-#endif /* HAVE_PTP_1588_CLOCK */
-#ifdef ETHTOOL_OPS_COMPAT
- case SIOCETHTOOL:
- return ethtool_ioctl(ifr);
-#endif
- default:
- return -EOPNOTSUPP;
- }
-}
-
-s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
-{
- struct igb_adapter *adapter = hw->back;
- u16 cap_offset;
-
- cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
- if (!cap_offset)
- return -E1000_ERR_CONFIG;
-
- pci_read_config_word(adapter->pdev, cap_offset + reg, value);
-
- return E1000_SUCCESS;
-}
-
-s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
-{
- struct igb_adapter *adapter = hw->back;
- u16 cap_offset;
-
- cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
- if (!cap_offset)
- return -E1000_ERR_CONFIG;
-
- pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
-
- return E1000_SUCCESS;
-}
-
-#ifdef HAVE_VLAN_RX_REGISTER
-static void igb_vlan_mode(struct net_device *netdev, struct vlan_group *vlgrp)
-#else
-void igb_vlan_mode(struct net_device *netdev, u32 features)
-#endif
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 ctrl, rctl;
- int i;
-#ifdef HAVE_VLAN_RX_REGISTER
- bool enable = !!vlgrp;
-
- igb_irq_disable(adapter);
-
- adapter->vlgrp = vlgrp;
-
- if (!test_bit(__IGB_DOWN, &adapter->state))
- igb_irq_enable(adapter);
-#else
-#ifdef NETIF_F_HW_VLAN_CTAG_RX
- bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
-#else
- bool enable = !!(features & NETIF_F_HW_VLAN_RX);
-#endif
-#endif
-
- if (enable) {
- /* enable VLAN tag insert/strip */
- ctrl = E1000_READ_REG(hw, E1000_CTRL);
- ctrl |= E1000_CTRL_VME;
- E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
-
- /* Disable CFI check */
- rctl = E1000_READ_REG(hw, E1000_RCTL);
- rctl &= ~E1000_RCTL_CFIEN;
- E1000_WRITE_REG(hw, E1000_RCTL, rctl);
- } else {
- /* disable VLAN tag insert/strip */
- ctrl = E1000_READ_REG(hw, E1000_CTRL);
- ctrl &= ~E1000_CTRL_VME;
- E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
- }
-
-#ifndef CONFIG_IGB_VMDQ_NETDEV
- for (i = 0; i < adapter->vmdq_pools; i++) {
- igb_set_vf_vlan_strip(adapter,
- adapter->vfs_allocated_count + i,
- enable);
- }
-
-#else
- igb_set_vf_vlan_strip(adapter,
- adapter->vfs_allocated_count,
- enable);
-
- for (i = 1; i < adapter->vmdq_pools; i++) {
-#ifdef HAVE_VLAN_RX_REGISTER
- struct igb_vmdq_adapter *vadapter;
- vadapter = netdev_priv(adapter->vmdq_netdev[i-1]);
- enable = !!vadapter->vlgrp;
-#else
- struct net_device *vnetdev;
- vnetdev = adapter->vmdq_netdev[i-1];
-#ifdef NETIF_F_HW_VLAN_CTAG_RX
- enable = !!(vnetdev->features & NETIF_F_HW_VLAN_CTAG_RX);
-#else
- enable = !!(vnetdev->features & NETIF_F_HW_VLAN_RX);
-#endif
-#endif
- igb_set_vf_vlan_strip(adapter,
- adapter->vfs_allocated_count + i,
- enable);
- }
-
-#endif
- igb_rlpml_set(adapter);
-}
-
-#ifdef HAVE_VLAN_PROTOCOL
-static int igb_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
-#elif defined HAVE_INT_NDO_VLAN_RX_ADD_VID
-#ifdef NETIF_F_HW_VLAN_CTAG_RX
-static int igb_vlan_rx_add_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid)
-#else
-static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
-#endif
-#else
-static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
-#endif
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- int pf_id = adapter->vfs_allocated_count;
-
- /* attempt to add filter to vlvf array */
- igb_vlvf_set(adapter, vid, TRUE, pf_id);
-
- /* add the filter since PF can receive vlans w/o entry in vlvf */
- igb_vfta_set(adapter, vid, TRUE);
-#ifndef HAVE_NETDEV_VLAN_FEATURES
-
- /* Copy feature flags from netdev to the vlan netdev for this vid.
- * This allows things like TSO to bubble down to our vlan device.
- * There is no need to update netdev for vlan 0 (DCB), since it
- * wouldn't has v_netdev.
- */
- if (adapter->vlgrp) {
- struct vlan_group *vlgrp = adapter->vlgrp;
- struct net_device *v_netdev = vlan_group_get_device(vlgrp, vid);
- if (v_netdev) {
- v_netdev->features |= netdev->features;
- vlan_group_set_device(vlgrp, vid, v_netdev);
- }
- }
-#endif
-#ifndef HAVE_VLAN_RX_REGISTER
-
- set_bit(vid, adapter->active_vlans);
-#endif
-#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
- return 0;
-#endif
-}
-
-#ifdef HAVE_VLAN_PROTOCOL
-static int igb_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
-#elif defined HAVE_INT_NDO_VLAN_RX_ADD_VID
-#ifdef NETIF_F_HW_VLAN_CTAG_RX
-static int igb_vlan_rx_kill_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid)
-#else
-static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
-#endif
-#else
-static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
-#endif
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- int pf_id = adapter->vfs_allocated_count;
- s32 err;
-
-#ifdef HAVE_VLAN_RX_REGISTER
- igb_irq_disable(adapter);
-
- vlan_group_set_device(adapter->vlgrp, vid, NULL);
-
- if (!test_bit(__IGB_DOWN, &adapter->state))
- igb_irq_enable(adapter);
-
-#endif /* HAVE_VLAN_RX_REGISTER */
- /* remove vlan from VLVF table array */
- err = igb_vlvf_set(adapter, vid, FALSE, pf_id);
-
- /* if vid was not present in VLVF just remove it from table */
- if (err)
- igb_vfta_set(adapter, vid, FALSE);
-#ifndef HAVE_VLAN_RX_REGISTER
-
- clear_bit(vid, adapter->active_vlans);
-#endif
-#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
- return 0;
-#endif
-}
-
-static void igb_restore_vlan(struct igb_adapter *adapter)
-{
-#ifdef HAVE_VLAN_RX_REGISTER
- igb_vlan_mode(adapter->netdev, adapter->vlgrp);
-
- if (adapter->vlgrp) {
- u16 vid;
- for (vid = 0; vid < VLAN_N_VID; vid++) {
- if (!vlan_group_get_device(adapter->vlgrp, vid))
- continue;
-#ifdef NETIF_F_HW_VLAN_CTAG_RX
- igb_vlan_rx_add_vid(adapter->netdev,
- htons(ETH_P_8021Q), vid);
-#else
- igb_vlan_rx_add_vid(adapter->netdev, vid);
-#endif
- }
- }
-#else
- u16 vid;
-
- igb_vlan_mode(adapter->netdev, adapter->netdev->features);
-
- for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
-#ifdef NETIF_F_HW_VLAN_CTAG_RX
- igb_vlan_rx_add_vid(adapter->netdev,
- htons(ETH_P_8021Q), vid);
-#else
- igb_vlan_rx_add_vid(adapter->netdev, vid);
-#endif
-#endif
-}
-
-int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
-{
- struct pci_dev *pdev = adapter->pdev;
- struct e1000_mac_info *mac = &adapter->hw.mac;
-
- mac->autoneg = 0;
-
- /* SerDes device's does not support 10Mbps Full/duplex
- * and 100Mbps Half duplex
- */
- if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
- switch (spddplx) {
- case SPEED_10 + DUPLEX_HALF:
- case SPEED_10 + DUPLEX_FULL:
- case SPEED_100 + DUPLEX_HALF:
- dev_err(pci_dev_to_dev(pdev),
- "Unsupported Speed/Duplex configuration\n");
- return -EINVAL;
- default:
- break;
- }
- }
-
- switch (spddplx) {
- case SPEED_10 + DUPLEX_HALF:
- mac->forced_speed_duplex = ADVERTISE_10_HALF;
- break;
- case SPEED_10 + DUPLEX_FULL:
- mac->forced_speed_duplex = ADVERTISE_10_FULL;
- break;
- case SPEED_100 + DUPLEX_HALF:
- mac->forced_speed_duplex = ADVERTISE_100_HALF;
- break;
- case SPEED_100 + DUPLEX_FULL:
- mac->forced_speed_duplex = ADVERTISE_100_FULL;
- break;
- case SPEED_1000 + DUPLEX_FULL:
- mac->autoneg = 1;
- adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
- break;
- case SPEED_1000 + DUPLEX_HALF: /* not supported */
- default:
- dev_err(pci_dev_to_dev(pdev), "Unsupported Speed/Duplex configuration\n");
- return -EINVAL;
- }
-
- /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
- adapter->hw.phy.mdix = AUTO_ALL_MODES;
-
- return 0;
-}
-
-static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
- bool runtime)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 ctrl, rctl, status;
- u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
-#ifdef CONFIG_PM
- int retval = 0;
-#endif
-
- netif_device_detach(netdev);
-
- status = E1000_READ_REG(hw, E1000_STATUS);
- if (status & E1000_STATUS_LU)
- wufc &= ~E1000_WUFC_LNKC;
-
- if (netif_running(netdev))
- __igb_close(netdev, true);
-
- igb_clear_interrupt_scheme(adapter);
-
-#ifdef CONFIG_PM
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-#endif
-
- if (wufc) {
- igb_setup_rctl(adapter);
- igb_set_rx_mode(netdev);
-
- /* turn on all-multi mode if wake on multicast is enabled */
- if (wufc & E1000_WUFC_MC) {
- rctl = E1000_READ_REG(hw, E1000_RCTL);
- rctl |= E1000_RCTL_MPE;
- E1000_WRITE_REG(hw, E1000_RCTL, rctl);
- }
-
- ctrl = E1000_READ_REG(hw, E1000_CTRL);
- /* phy power management enable */
- #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
- ctrl |= E1000_CTRL_ADVD3WUC;
- E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
-
- /* Allow time for pending master requests to run */
- e1000_disable_pcie_master(hw);
-
- E1000_WRITE_REG(hw, E1000_WUC, E1000_WUC_PME_EN);
- E1000_WRITE_REG(hw, E1000_WUFC, wufc);
- } else {
- E1000_WRITE_REG(hw, E1000_WUC, 0);
- E1000_WRITE_REG(hw, E1000_WUFC, 0);
- }
-
- *enable_wake = wufc || adapter->en_mng_pt;
- if (!*enable_wake)
- igb_power_down_link(adapter);
- else
- igb_power_up_link(adapter);
-
- /* Release control of h/w to f/w. If f/w is AMT enabled, this
- * would have already happened in close and is redundant. */
- igb_release_hw_control(adapter);
-
- pci_disable_device(pdev);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
-static int igb_suspend(struct device *dev)
-#else
-static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
-#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
-{
-#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
- struct pci_dev *pdev = to_pci_dev(dev);
-#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
- int retval;
- bool wake;
-
- retval = __igb_shutdown(pdev, &wake, 0);
- if (retval)
- return retval;
-
- if (wake) {
- pci_prepare_to_sleep(pdev);
- } else {
- pci_wake_from_d3(pdev, false);
- pci_set_power_state(pdev, PCI_D3hot);
- }
-
- return 0;
-}
-
-#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
-static int igb_resume(struct device *dev)
-#else
-static int igb_resume(struct pci_dev *pdev)
-#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
-{
-#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
- struct pci_dev *pdev = to_pci_dev(dev);
-#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 err;
-
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- pci_save_state(pdev);
-
- err = pci_enable_device_mem(pdev);
- if (err) {
- dev_err(pci_dev_to_dev(pdev),
- "igb: Cannot enable PCI device from suspend\n");
- return err;
- }
- pci_set_master(pdev);
-
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_enable_wake(pdev, PCI_D3cold, 0);
-
- if (igb_init_interrupt_scheme(adapter, true)) {
- dev_err(pci_dev_to_dev(pdev), "Unable to allocate memory for queues\n");
- return -ENOMEM;
- }
-
- igb_reset(adapter);
-
- /* let the f/w know that the h/w is now under the control of the
- * driver. */
- igb_get_hw_control(adapter);
-
- E1000_WRITE_REG(hw, E1000_WUS, ~0);
-
- if (netdev->flags & IFF_UP) {
- rtnl_lock();
- err = __igb_open(netdev, true);
- rtnl_unlock();
- if (err)
- return err;
- }
-
- netif_device_attach(netdev);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_RUNTIME
-#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
-static int igb_runtime_idle(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct igb_adapter *adapter = netdev_priv(netdev);
-
- if (!igb_has_link(adapter))
- pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
-
- return -EBUSY;
-}
-
-static int igb_runtime_suspend(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int retval;
- bool wake;
-
- retval = __igb_shutdown(pdev, &wake, 1);
- if (retval)
- return retval;
-
- if (wake) {
- pci_prepare_to_sleep(pdev);
- } else {
- pci_wake_from_d3(pdev, false);
- pci_set_power_state(pdev, PCI_D3hot);
- }
-
- return 0;
-}
-
-static int igb_runtime_resume(struct device *dev)
-{
- return igb_resume(dev);
-}
-#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
-#endif /* CONFIG_PM_RUNTIME */
-#endif /* CONFIG_PM */
-
-#ifdef USE_REBOOT_NOTIFIER
-/* only want to do this for 2.4 kernels? */
-static int igb_notify_reboot(struct notifier_block *nb, unsigned long event,
- void *p)
-{
- struct pci_dev *pdev = NULL;
- bool wake;
-
- switch (event) {
- case SYS_DOWN:
- case SYS_HALT:
- case SYS_POWER_OFF:
- while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
- if (pci_dev_driver(pdev) == &igb_driver) {
- __igb_shutdown(pdev, &wake, 0);
- if (event == SYS_POWER_OFF) {
- pci_wake_from_d3(pdev, wake);
- pci_set_power_state(pdev, PCI_D3hot);
- }
- }
- }
- }
- return NOTIFY_DONE;
-}
-#else
-static void igb_shutdown(struct pci_dev *pdev)
-{
- bool wake = false;
-
- __igb_shutdown(pdev, &wake, 0);
-
- if (system_state == SYSTEM_POWER_OFF) {
- pci_wake_from_d3(pdev, wake);
- pci_set_power_state(pdev, PCI_D3hot);
- }
-}
-#endif /* USE_REBOOT_NOTIFIER */
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling 'interrupt' - used by things like netconsole to send skbs
- * without having to re-enable interrupts. It's not called while
- * the interrupt routine is executing.
- */
-static void igb_netpoll(struct net_device *netdev)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- struct igb_q_vector *q_vector;
- int i;
-
- for (i = 0; i < adapter->num_q_vectors; i++) {
- q_vector = adapter->q_vector[i];
- if (adapter->msix_entries)
- E1000_WRITE_REG(hw, E1000_EIMC, q_vector->eims_value);
- else
- igb_irq_disable(adapter);
- napi_schedule(&q_vector->napi);
- }
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
-#ifdef HAVE_PCI_ERS
-#define E1000_DEV_ID_82576_VF 0x10CA
-/**
- * igb_io_error_detected - called when PCI error is detected
- * @pdev: Pointer to PCI device
- * @state: The current pci connection state
- *
- * This function is called after a PCI bus error affecting
- * this device has been detected.
- */
-static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct igb_adapter *adapter = netdev_priv(netdev);
-
-#ifdef CONFIG_PCI_IOV__UNUSED
- struct pci_dev *bdev, *vfdev;
- u32 dw0, dw1, dw2, dw3;
- int vf, pos;
- u16 req_id, pf_func;
-
- if (!(adapter->flags & IGB_FLAG_DETECT_BAD_DMA))
- goto skip_bad_vf_detection;
-
- bdev = pdev->bus->self;
- while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
- bdev = bdev->bus->self;
-
- if (!bdev)
- goto skip_bad_vf_detection;
-
- pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
- if (!pos)
- goto skip_bad_vf_detection;
-
- pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG, &dw0);
- pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 4, &dw1);
- pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 8, &dw2);
- pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 12, &dw3);
-
- req_id = dw1 >> 16;
- /* On the 82576 if bit 7 of the requestor ID is set then it's a VF */
- if (!(req_id & 0x0080))
- goto skip_bad_vf_detection;
-
- pf_func = req_id & 0x01;
- if ((pf_func & 1) == (pdev->devfn & 1)) {
-
- vf = (req_id & 0x7F) >> 1;
- dev_err(pci_dev_to_dev(pdev),
- "VF %d has caused a PCIe error\n", vf);
- dev_err(pci_dev_to_dev(pdev),
- "TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
- "%8.8x\tdw3: %8.8x\n",
- dw0, dw1, dw2, dw3);
-
- /* Find the pci device of the offending VF */
- vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- E1000_DEV_ID_82576_VF, NULL);
- while (vfdev) {
- if (vfdev->devfn == (req_id & 0xFF))
- break;
- vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- E1000_DEV_ID_82576_VF, vfdev);
- }
- /*
- * There's a slim chance the VF could have been hot plugged,
- * so if it is no longer present we don't need to issue the
- * VFLR. Just clean up the AER in that case.
- */
- if (vfdev) {
- dev_err(pci_dev_to_dev(pdev),
- "Issuing VFLR to VF %d\n", vf);
- pci_write_config_dword(vfdev, 0xA8, 0x00008000);
- }
-
- pci_cleanup_aer_uncorrect_error_status(pdev);
- }
-
- /*
- * Even though the error may have occurred on the other port
- * we still need to increment the vf error reference count for
- * both ports because the I/O resume function will be called
- * for both of them.
- */
- adapter->vferr_refcount++;
-
- return PCI_ERS_RESULT_RECOVERED;
-
-skip_bad_vf_detection:
-#endif /* CONFIG_PCI_IOV */
-
- netif_device_detach(netdev);
-
- if (state == pci_channel_io_perm_failure)
- return PCI_ERS_RESULT_DISCONNECT;
-
- if (netif_running(netdev))
- igb_down(adapter);
- pci_disable_device(pdev);
-
- /* Request a slot slot reset. */
- return PCI_ERS_RESULT_NEED_RESET;
-}
-
-/**
- * igb_io_slot_reset - called after the pci bus has been reset.
- * @pdev: Pointer to PCI device
- *
- * Restart the card from scratch, as if from a cold-boot. Implementation
- * resembles the first-half of the igb_resume routine.
- */
-static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- pci_ers_result_t result;
-
- if (pci_enable_device_mem(pdev)) {
- dev_err(pci_dev_to_dev(pdev),
- "Cannot re-enable PCI device after reset.\n");
- result = PCI_ERS_RESULT_DISCONNECT;
- } else {
- pci_set_master(pdev);
- pci_restore_state(pdev);
- pci_save_state(pdev);
-
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_enable_wake(pdev, PCI_D3cold, 0);
-
- schedule_work(&adapter->reset_task);
- E1000_WRITE_REG(hw, E1000_WUS, ~0);
- result = PCI_ERS_RESULT_RECOVERED;
- }
-
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
- return result;
-}
-
-/**
- * igb_io_resume - called when traffic can start flowing again.
- * @pdev: Pointer to PCI device
- *
- * This callback is called when the error recovery driver tells us that
- * its OK to resume normal operation. Implementation resembles the
- * second-half of the igb_resume routine.
- */
-static void igb_io_resume(struct pci_dev *pdev)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct igb_adapter *adapter = netdev_priv(netdev);
-
- if (adapter->vferr_refcount) {
- dev_info(pci_dev_to_dev(pdev), "Resuming after VF err\n");
- adapter->vferr_refcount--;
- return;
- }
-
- if (netif_running(netdev)) {
- if (igb_up(adapter)) {
- dev_err(pci_dev_to_dev(pdev), "igb_up failed after reset\n");
- return;
- }
- }
-
- netif_device_attach(netdev);
-
- /* let the f/w know that the h/w is now under the control of the
- * driver. */
- igb_get_hw_control(adapter);
-}
-
-#endif /* HAVE_PCI_ERS */
-
-int igb_add_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue)
-{
- struct e1000_hw *hw = &adapter->hw;
- int i;
-
- if (is_zero_ether_addr(addr))
- return 0;
-
- for (i = 0; i < hw->mac.rar_entry_count; i++) {
- if (adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE)
- continue;
- adapter->mac_table[i].state = (IGB_MAC_STATE_MODIFIED |
- IGB_MAC_STATE_IN_USE);
- memcpy(adapter->mac_table[i].addr, addr, ETH_ALEN);
- adapter->mac_table[i].queue = queue;
- igb_sync_mac_table(adapter);
- return 0;
- }
- return -ENOMEM;
-}
-int igb_del_mac_filter(struct igb_adapter *adapter, u8* addr, u16 queue)
-{
- /* search table for addr, if found, set to 0 and sync */
- int i;
- struct e1000_hw *hw = &adapter->hw;
-
- if (is_zero_ether_addr(addr))
- return 0;
- for (i = 0; i < hw->mac.rar_entry_count; i++) {
- if (ether_addr_equal(addr, adapter->mac_table[i].addr) &&
- adapter->mac_table[i].queue == queue) {
- adapter->mac_table[i].state = IGB_MAC_STATE_MODIFIED;
- memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
- adapter->mac_table[i].queue = 0;
- igb_sync_mac_table(adapter);
- return 0;
- }
- }
- return -ENOMEM;
-}
-static int igb_set_vf_mac(struct igb_adapter *adapter,
- int vf, unsigned char *mac_addr)
-{
- igb_del_mac_filter(adapter, adapter->vf_data[vf].vf_mac_addresses, vf);
- memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
-
- igb_add_mac_filter(adapter, mac_addr, vf);
-
- return 0;
-}
-
-#ifdef IFLA_VF_MAX
-static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
- return -EINVAL;
- adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
- dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
- dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
- " change effective.\n");
- if (test_bit(__IGB_DOWN, &adapter->state)) {
- dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
- " but the PF device is not up.\n");
- dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
- " attempting to use the VF device.\n");
- }
- return igb_set_vf_mac(adapter, vf, mac);
-}
-
-static int igb_link_mbps(int internal_link_speed)
-{
- switch (internal_link_speed) {
- case SPEED_100:
- return 100;
- case SPEED_1000:
- return 1000;
- case SPEED_2500:
- return 2500;
- default:
- return 0;
- }
-}
-
-static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
- int link_speed)
-{
- int rf_dec, rf_int;
- u32 bcnrc_val;
-
- if (tx_rate != 0) {
- /* Calculate the rate factor values to set */
- rf_int = link_speed / tx_rate;
- rf_dec = (link_speed - (rf_int * tx_rate));
- rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
-
- bcnrc_val = E1000_RTTBCNRC_RS_ENA;
- bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
- E1000_RTTBCNRC_RF_INT_MASK);
- bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
- } else {
- bcnrc_val = 0;
- }
-
- E1000_WRITE_REG(hw, E1000_RTTDQSEL, vf); /* vf X uses queue X */
- /*
- * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
- * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
- */
- E1000_WRITE_REG(hw, E1000_RTTBCNRM(0), 0x14);
- E1000_WRITE_REG(hw, E1000_RTTBCNRC, bcnrc_val);
-}
-
-static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
-{
- int actual_link_speed, i;
- bool reset_rate = false;
-
- /* VF TX rate limit was not set */
- if ((adapter->vf_rate_link_speed == 0) ||
- (adapter->hw.mac.type != e1000_82576))
- return;
-
- actual_link_speed = igb_link_mbps(adapter->link_speed);
- if (actual_link_speed != adapter->vf_rate_link_speed) {
- reset_rate = true;
- adapter->vf_rate_link_speed = 0;
- dev_info(&adapter->pdev->dev,
- "Link speed has been changed. VF Transmit rate is disabled\n");
- }
-
- for (i = 0; i < adapter->vfs_allocated_count; i++) {
- if (reset_rate)
- adapter->vf_data[i].tx_rate = 0;
-
- igb_set_vf_rate_limit(&adapter->hw, i,
- adapter->vf_data[i].tx_rate, actual_link_speed);
- }
-}
-
-#ifdef HAVE_VF_MIN_MAX_TXRATE
-static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
- int tx_rate)
-#else /* HAVE_VF_MIN_MAX_TXRATE */
-static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
-#endif /* HAVE_VF_MIN_MAX_TXRATE */
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- int actual_link_speed;
-
- if (hw->mac.type != e1000_82576)
- return -EOPNOTSUPP;
-
-#ifdef HAVE_VF_MIN_MAX_TXRATE
- if (min_tx_rate)
- return -EINVAL;
-#endif /* HAVE_VF_MIN_MAX_TXRATE */
-
- actual_link_speed = igb_link_mbps(adapter->link_speed);
- if ((vf >= adapter->vfs_allocated_count) ||
- (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) ||
- (tx_rate < 0) || (tx_rate > actual_link_speed))
- return -EINVAL;
-
- adapter->vf_rate_link_speed = actual_link_speed;
- adapter->vf_data[vf].tx_rate = (u16)tx_rate;
- igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
-
- return 0;
-}
-
-static int igb_ndo_get_vf_config(struct net_device *netdev,
- int vf, struct ifla_vf_info *ivi)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- if (vf >= adapter->vfs_allocated_count)
- return -EINVAL;
- ivi->vf = vf;
- memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
-#ifdef HAVE_VF_MIN_MAX_TXRATE
- ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
- ivi->min_tx_rate = 0;
-#else /* HAVE_VF_MIN_MAX_TXRATE */
- ivi->tx_rate = adapter->vf_data[vf].tx_rate;
-#endif /* HAVE_VF_MIN_MAX_TXRATE */
- ivi->vlan = adapter->vf_data[vf].pf_vlan;
- ivi->qos = adapter->vf_data[vf].pf_qos;
-#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
- ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
-#endif
- return 0;
-}
-#endif
-static void igb_vmm_control(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- int count;
- u32 reg;
-
- switch (hw->mac.type) {
- case e1000_82575:
- default:
- /* replication is not supported for 82575 */
- return;
- case e1000_82576:
- /* notify HW that the MAC is adding vlan tags */
- reg = E1000_READ_REG(hw, E1000_DTXCTL);
- reg |= (E1000_DTXCTL_VLAN_ADDED |
- E1000_DTXCTL_SPOOF_INT);
- E1000_WRITE_REG(hw, E1000_DTXCTL, reg);
- case e1000_82580:
- /* enable replication vlan tag stripping */
- reg = E1000_READ_REG(hw, E1000_RPLOLR);
- reg |= E1000_RPLOLR_STRVLAN;
- E1000_WRITE_REG(hw, E1000_RPLOLR, reg);
- case e1000_i350:
- case e1000_i354:
- /* none of the above registers are supported by i350 */
- break;
- }
-
- /* Enable Malicious Driver Detection */
- if ((adapter->vfs_allocated_count) &&
- (adapter->mdd)) {
- if (hw->mac.type == e1000_i350)
- igb_enable_mdd(adapter);
- }
-
- /* enable replication and loopback support */
- count = adapter->vfs_allocated_count || adapter->vmdq_pools;
- if (adapter->flags & IGB_FLAG_LOOPBACK_ENABLE && count)
- e1000_vmdq_set_loopback_pf(hw, 1);
- e1000_vmdq_set_anti_spoofing_pf(hw,
- adapter->vfs_allocated_count || adapter->vmdq_pools,
- adapter->vfs_allocated_count);
- e1000_vmdq_set_replication_pf(hw, adapter->vfs_allocated_count ||
- adapter->vmdq_pools);
-}
-
-static void igb_init_fw(struct igb_adapter *adapter)
-{
- struct e1000_fw_drv_info fw_cmd;
- struct e1000_hw *hw = &adapter->hw;
- int i;
- u16 mask;
-
- if (hw->mac.type == e1000_i210)
- mask = E1000_SWFW_EEP_SM;
- else
- mask = E1000_SWFW_PHY0_SM;
- /* i211 parts do not support this feature */
- if (hw->mac.type == e1000_i211)
- hw->mac.arc_subsystem_valid = false;
-
- if (!hw->mac.ops.acquire_swfw_sync(hw, mask)) {
- for (i = 0; i <= FW_MAX_RETRIES; i++) {
- E1000_WRITE_REG(hw, E1000_FWSTS, E1000_FWSTS_FWRI);
- fw_cmd.hdr.cmd = FW_CMD_DRV_INFO;
- fw_cmd.hdr.buf_len = FW_CMD_DRV_INFO_LEN;
- fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CMD_RESERVED;
- fw_cmd.port_num = hw->bus.func;
- fw_cmd.drv_version = FW_FAMILY_DRV_VER;
- fw_cmd.hdr.checksum = 0;
- fw_cmd.hdr.checksum = e1000_calculate_checksum((u8 *)&fw_cmd,
- (FW_HDR_LEN +
- fw_cmd.hdr.buf_len));
- e1000_host_interface_command(hw, (u8*)&fw_cmd,
- sizeof(fw_cmd));
- if (fw_cmd.hdr.cmd_or_resp.ret_status == FW_STATUS_SUCCESS)
- break;
- }
- } else
- dev_warn(pci_dev_to_dev(adapter->pdev),
- "Unable to get semaphore, firmware init failed.\n");
- hw->mac.ops.release_swfw_sync(hw, mask);
-}
-
-static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 dmac_thr;
- u16 hwm;
- u32 status;
-
- if (hw->mac.type == e1000_i211)
- return;
-
- if (hw->mac.type > e1000_82580) {
- if (adapter->dmac != IGB_DMAC_DISABLE) {
- u32 reg;
-
- /* force threshold to 0. */
- E1000_WRITE_REG(hw, E1000_DMCTXTH, 0);
-
- /*
- * DMA Coalescing high water mark needs to be greater
- * than the Rx threshold. Set hwm to PBA - max frame
- * size in 16B units, capping it at PBA - 6KB.
- */
- hwm = 64 * pba - adapter->max_frame_size / 16;
- if (hwm < 64 * (pba - 6))
- hwm = 64 * (pba - 6);
- reg = E1000_READ_REG(hw, E1000_FCRTC);
- reg &= ~E1000_FCRTC_RTH_COAL_MASK;
- reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
- & E1000_FCRTC_RTH_COAL_MASK);
- E1000_WRITE_REG(hw, E1000_FCRTC, reg);
-
- /*
- * Set the DMA Coalescing Rx threshold to PBA - 2 * max
- * frame size, capping it at PBA - 10KB.
- */
- dmac_thr = pba - adapter->max_frame_size / 512;
- if (dmac_thr < pba - 10)
- dmac_thr = pba - 10;
- reg = E1000_READ_REG(hw, E1000_DMACR);
- reg &= ~E1000_DMACR_DMACTHR_MASK;
- reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
- & E1000_DMACR_DMACTHR_MASK);
-
- /* transition to L0x or L1 if available..*/
- reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
-
- /* Check if status is 2.5Gb backplane connection
- * before configuration of watchdog timer, which is
- * in msec values in 12.8usec intervals
- * watchdog timer= msec values in 32usec intervals
- * for non 2.5Gb connection
- */
- if (hw->mac.type == e1000_i354) {
- status = E1000_READ_REG(hw, E1000_STATUS);
- if ((status & E1000_STATUS_2P5_SKU) &&
- (!(status & E1000_STATUS_2P5_SKU_OVER)))
- reg |= ((adapter->dmac * 5) >> 6);
- else
- reg |= ((adapter->dmac) >> 5);
- } else {
- reg |= ((adapter->dmac) >> 5);
- }
-
- /*
- * Disable BMC-to-OS Watchdog enable
- * on devices that support OS-to-BMC
- */
- if (hw->mac.type != e1000_i354)
- reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
- E1000_WRITE_REG(hw, E1000_DMACR, reg);
-
- /* no lower threshold to disable coalescing(smart fifb)-UTRESH=0*/
- E1000_WRITE_REG(hw, E1000_DMCRTRH, 0);
-
- /* This sets the time to wait before requesting
- * transition to low power state to number of usecs
- * needed to receive 1 512 byte frame at gigabit
- * line rate. On i350 device, time to make transition
- * to Lx state is delayed by 4 usec with flush disable
- * bit set to avoid losing mailbox interrupts
- */
- reg = E1000_READ_REG(hw, E1000_DMCTLX);
- if (hw->mac.type == e1000_i350)
- reg |= IGB_DMCTLX_DCFLUSH_DIS;
-
- /* in 2.5Gb connection, TTLX unit is 0.4 usec
- * which is 0x4*2 = 0xA. But delay is still 4 usec
- */
- if (hw->mac.type == e1000_i354) {
- status = E1000_READ_REG(hw, E1000_STATUS);
- if ((status & E1000_STATUS_2P5_SKU) &&
- (!(status & E1000_STATUS_2P5_SKU_OVER)))
- reg |= 0xA;
- else
- reg |= 0x4;
- } else {
- reg |= 0x4;
- }
- E1000_WRITE_REG(hw, E1000_DMCTLX, reg);
-
- /* free space in tx packet buffer to wake from DMA coal */
- E1000_WRITE_REG(hw, E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
- (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
-
- /* make low power state decision controlled by DMA coal */
- reg = E1000_READ_REG(hw, E1000_PCIEMISC);
- reg &= ~E1000_PCIEMISC_LX_DECISION;
- E1000_WRITE_REG(hw, E1000_PCIEMISC, reg);
- } /* endif adapter->dmac is not disabled */
- } else if (hw->mac.type == e1000_82580) {
- u32 reg = E1000_READ_REG(hw, E1000_PCIEMISC);
- E1000_WRITE_REG(hw, E1000_PCIEMISC,
- reg & ~E1000_PCIEMISC_LX_DECISION);
- E1000_WRITE_REG(hw, E1000_DMACR, 0);
- }
-}
-
-#ifdef HAVE_I2C_SUPPORT
-/* igb_read_i2c_byte - Reads 8 bit word over I2C
- * @hw: pointer to hardware structure
- * @byte_offset: byte offset to read
- * @dev_addr: device address
- * @data: value read
- *
- * Performs byte read operation over I2C interface at
- * a specified device address.
- */
-s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 *data)
-{
- struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
- struct i2c_client *this_client = adapter->i2c_client;
- s32 status;
- u16 swfw_mask = 0;
-
- if (!this_client)
- return E1000_ERR_I2C;
-
- swfw_mask = E1000_SWFW_PHY0_SM;
-
- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)
- != E1000_SUCCESS)
- return E1000_ERR_SWFW_SYNC;
-
- status = i2c_smbus_read_byte_data(this_client, byte_offset);
- hw->mac.ops.release_swfw_sync(hw, swfw_mask);
-
- if (status < 0)
- return E1000_ERR_I2C;
- else {
- *data = status;
- return E1000_SUCCESS;
- }
-}
-
-/* igb_write_i2c_byte - Writes 8 bit word over I2C
- * @hw: pointer to hardware structure
- * @byte_offset: byte offset to write
- * @dev_addr: device address
- * @data: value to write
- *
- * Performs byte write operation over I2C interface at
- * a specified device address.
- */
-s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 data)
-{
- struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
- struct i2c_client *this_client = adapter->i2c_client;
- s32 status;
- u16 swfw_mask = E1000_SWFW_PHY0_SM;
-
- if (!this_client)
- return E1000_ERR_I2C;
-
- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS)
- return E1000_ERR_SWFW_SYNC;
- status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
- hw->mac.ops.release_swfw_sync(hw, swfw_mask);
-
- if (status)
- return E1000_ERR_I2C;
- else
- return E1000_SUCCESS;
-}
-#endif /* HAVE_I2C_SUPPORT */
-/* igb_main.c */
-
-
-/**
- * igb_probe - Device Initialization Routine
- * @pdev: PCI device information struct
- * @ent: entry in igb_pci_tbl
- *
- * Returns 0 on success, negative on failure
- *
- * igb_probe initializes an adapter identified by a pci_dev structure.
- * The OS initialization, configuring of the adapter private structure,
- * and a hardware reset occur.
- **/
-int igb_kni_probe(struct pci_dev *pdev,
- struct net_device **lad_dev)
-{
- struct net_device *netdev;
- struct igb_adapter *adapter;
- struct e1000_hw *hw;
- u16 eeprom_data = 0;
- u8 pba_str[E1000_PBANUM_LENGTH];
- s32 ret_val;
- static int global_quad_port_a; /* global quad port a indication */
- int i, err, pci_using_dac = 0;
- static int cards_found;
-
- err = pci_enable_device_mem(pdev);
- if (err)
- return err;
-
-#ifdef NO_KNI
- pci_using_dac = 0;
- err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64));
- if (!err) {
- err = dma_set_coherent_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64));
- if (!err)
- pci_using_dac = 1;
- } else {
- err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32));
- if (err) {
- err = dma_set_coherent_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32));
- if (err) {
- IGB_ERR("No usable DMA configuration, "
- "aborting\n");
- goto err_dma;
- }
- }
- }
-
-#ifndef HAVE_ASPM_QUIRKS
- /* 82575 requires that the pci-e link partner disable the L0s state */
- switch (pdev->device) {
- case E1000_DEV_ID_82575EB_COPPER:
- case E1000_DEV_ID_82575EB_FIBER_SERDES:
- case E1000_DEV_ID_82575GB_QUAD_COPPER:
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
- default:
- break;
- }
-
-#endif /* HAVE_ASPM_QUIRKS */
- err = pci_request_selected_regions(pdev,
- pci_select_bars(pdev,
- IORESOURCE_MEM),
- igb_driver_name);
- if (err)
- goto err_pci_reg;
-
- pci_enable_pcie_error_reporting(pdev);
-
- pci_set_master(pdev);
-
- err = -ENOMEM;
-#endif /* NO_KNI */
-#ifdef HAVE_TX_MQ
- netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
- IGB_MAX_TX_QUEUES);
-#else
- netdev = alloc_etherdev(sizeof(struct igb_adapter));
-#endif /* HAVE_TX_MQ */
- if (!netdev)
- goto err_alloc_etherdev;
-
- SET_MODULE_OWNER(netdev);
- SET_NETDEV_DEV(netdev, &pdev->dev);
-
- //pci_set_drvdata(pdev, netdev);
- adapter = netdev_priv(netdev);
- adapter->netdev = netdev;
- adapter->pdev = pdev;
- hw = &adapter->hw;
- hw->back = adapter;
- adapter->port_num = hw->bus.func;
- adapter->msg_enable = (1 << debug) - 1;
-
-#ifdef HAVE_PCI_ERS
- err = pci_save_state(pdev);
- if (err)
- goto err_ioremap;
-#endif
- err = -EIO;
- hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
- if (!hw->hw_addr)
- goto err_ioremap;
-
-#ifdef HAVE_NET_DEVICE_OPS
- netdev->netdev_ops = &igb_netdev_ops;
-#else /* HAVE_NET_DEVICE_OPS */
- netdev->open = &igb_open;
- netdev->stop = &igb_close;
- netdev->get_stats = &igb_get_stats;
-#ifdef HAVE_SET_RX_MODE
- netdev->set_rx_mode = &igb_set_rx_mode;
-#endif
- netdev->set_multicast_list = &igb_set_rx_mode;
- netdev->set_mac_address = &igb_set_mac;
- netdev->change_mtu = &igb_change_mtu;
- netdev->do_ioctl = &igb_ioctl;
-#ifdef HAVE_TX_TIMEOUT
- netdev->tx_timeout = &igb_tx_timeout;
-#endif
- netdev->vlan_rx_register = igb_vlan_mode;
- netdev->vlan_rx_add_vid = igb_vlan_rx_add_vid;
- netdev->vlan_rx_kill_vid = igb_vlan_rx_kill_vid;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- netdev->poll_controller = igb_netpoll;
-#endif
- netdev->hard_start_xmit = &igb_xmit_frame;
-#endif /* HAVE_NET_DEVICE_OPS */
- igb_set_ethtool_ops(netdev);
-#ifdef HAVE_TX_TIMEOUT
- netdev->watchdog_timeo = 5 * HZ;
-#endif
-
- strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
-
- adapter->bd_number = cards_found;
-
- /* setup the private structure */
- err = igb_sw_init(adapter);
- if (err)
- goto err_sw_init;
-
- e1000_get_bus_info(hw);
-
- hw->phy.autoneg_wait_to_complete = FALSE;
- hw->mac.adaptive_ifs = FALSE;
-
- /* Copper options */
- if (hw->phy.media_type == e1000_media_type_copper) {
- hw->phy.mdix = AUTO_ALL_MODES;
- hw->phy.disable_polarity_correction = FALSE;
- hw->phy.ms_type = e1000_ms_hw_default;
- }
-
- if (e1000_check_reset_block(hw))
- dev_info(pci_dev_to_dev(pdev),
- "PHY reset is blocked due to SOL/IDER session.\n");
-
- /*
- * features is initialized to 0 in allocation, it might have bits
- * set by igb_sw_init so we should use an or instead of an
- * assignment.
- */
- netdev->features |= NETIF_F_SG |
- NETIF_F_IP_CSUM |
-#ifdef NETIF_F_IPV6_CSUM
- NETIF_F_IPV6_CSUM |
-#endif
-#ifdef NETIF_F_TSO
- NETIF_F_TSO |
-#ifdef NETIF_F_TSO6
- NETIF_F_TSO6 |
-#endif
-#endif /* NETIF_F_TSO */
-#ifdef NETIF_F_RXHASH
- NETIF_F_RXHASH |
-#endif
- NETIF_F_RXCSUM |
-#ifdef NETIF_F_HW_VLAN_CTAG_RX
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_TX;
-#else
- NETIF_F_HW_VLAN_RX |
- NETIF_F_HW_VLAN_TX;
-#endif
-
- if (hw->mac.type >= e1000_82576)
- netdev->features |= NETIF_F_SCTP_CSUM;
-
-#ifdef HAVE_NDO_SET_FEATURES
- /* copy netdev features into list of user selectable features */
- netdev->hw_features |= netdev->features;
-#ifndef IGB_NO_LRO
-
- /* give us the option of enabling LRO later */
- netdev->hw_features |= NETIF_F_LRO;
-#endif
-#else
-#ifdef NETIF_F_GRO
-
- /* this is only needed on kernels prior to 2.6.39 */
- netdev->features |= NETIF_F_GRO;
-#endif
-#endif
-
- /* set this bit last since it cannot be part of hw_features */
-#ifdef NETIF_F_HW_VLAN_CTAG_FILTER
- netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-#else
- netdev->features |= NETIF_F_HW_VLAN_FILTER;
-#endif
-
-#ifdef HAVE_NETDEV_VLAN_FEATURES
- netdev->vlan_features |= NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_SG;
-
-#endif
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
-
-#ifdef NO_KNI
- adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
-#ifdef DEBUG
- if (adapter->dmac != IGB_DMAC_DISABLE)
- printk("%s: DMA Coalescing is enabled..\n", netdev->name);
-#endif
-
- /* before reading the NVM, reset the controller to put the device in a
- * known good starting state */
- e1000_reset_hw(hw);
-#endif /* NO_KNI */
-
- /* make sure the NVM is good */
- if (e1000_validate_nvm_checksum(hw) < 0) {
- dev_err(pci_dev_to_dev(pdev), "The NVM Checksum Is Not"
- " Valid\n");
- err = -EIO;
- goto err_eeprom;
- }
-
- /* copy the MAC address out of the NVM */
- if (e1000_read_mac_addr(hw))
- dev_err(pci_dev_to_dev(pdev), "NVM Read Error\n");
- memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
-#ifdef ETHTOOL_GPERMADDR
- memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
-
- if (!is_valid_ether_addr(netdev->perm_addr)) {
-#else
- if (!is_valid_ether_addr(netdev->dev_addr)) {
-#endif
- dev_err(pci_dev_to_dev(pdev), "Invalid MAC Address\n");
- err = -EIO;
- goto err_eeprom;
- }
-
- memcpy(&adapter->mac_table[0].addr, hw->mac.addr, netdev->addr_len);
- adapter->mac_table[0].queue = adapter->vfs_allocated_count;
- adapter->mac_table[0].state = (IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE);
- igb_rar_set(adapter, 0);
-
- /* get firmware version for ethtool -i */
- igb_set_fw_version(adapter);
-
- /* Check if Media Autosense is enabled */
- if (hw->mac.type == e1000_82580)
- igb_init_mas(adapter);
-
-#ifdef NO_KNI
- setup_timer(&adapter->watchdog_timer, &igb_watchdog,
- (unsigned long) adapter);
- if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
- setup_timer(&adapter->dma_err_timer, &igb_dma_err_timer,
- (unsigned long) adapter);
- setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
- (unsigned long) adapter);
-
- INIT_WORK(&adapter->reset_task, igb_reset_task);
- INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
- if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
- INIT_WORK(&adapter->dma_err_task, igb_dma_err_task);
-#endif
-
- /* Initialize link properties that are user-changeable */
- adapter->fc_autoneg = true;
- hw->mac.autoneg = true;
- hw->phy.autoneg_advertised = 0x2f;
-
- hw->fc.requested_mode = e1000_fc_default;
- hw->fc.current_mode = e1000_fc_default;
-
- e1000_validate_mdi_setting(hw);
-
- /* By default, support wake on port A */
- if (hw->bus.func == 0)
- adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
-
- /* Check the NVM for wake support for non-port A ports */
- if (hw->mac.type >= e1000_82580)
- hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
- NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
- &eeprom_data);
- else if (hw->bus.func == 1)
- e1000_read_nvm(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
-
- if (eeprom_data & IGB_EEPROM_APME)
- adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
-
- /* now that we have the eeprom settings, apply the special cases where
- * the eeprom may be wrong or the board simply won't support wake on
- * lan on a particular port */
- switch (pdev->device) {
- case E1000_DEV_ID_82575GB_QUAD_COPPER:
- adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
- break;
- case E1000_DEV_ID_82575EB_FIBER_SERDES:
- case E1000_DEV_ID_82576_FIBER:
- case E1000_DEV_ID_82576_SERDES:
- /* Wake events only supported on port A for dual fiber
- * regardless of eeprom setting */
- if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1)
- adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
- break;
- case E1000_DEV_ID_82576_QUAD_COPPER:
- case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
- /* if quad port adapter, disable WoL on all but port A */
- if (global_quad_port_a != 0)
- adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
- else
- adapter->flags |= IGB_FLAG_QUAD_PORT_A;
- /* Reset for multiple quad port adapters */
- if (++global_quad_port_a == 4)
- global_quad_port_a = 0;
- break;
- default:
- /* If the device can't wake, don't set software support */
- if (!device_can_wakeup(&adapter->pdev->dev))
- adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
- break;
- }
-
- /* initialize the wol settings based on the eeprom settings */
- if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
- adapter->wol |= E1000_WUFC_MAG;
-
- /* Some vendors want WoL disabled by default, but still supported */
- if ((hw->mac.type == e1000_i350) &&
- (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
- adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
- adapter->wol = 0;
- }
-
-#ifdef NO_KNI
- device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev),
- adapter->flags & IGB_FLAG_WOL_SUPPORTED);
-
- /* reset the hardware with the new settings */
- igb_reset(adapter);
- adapter->devrc = 0;
-
-#ifdef HAVE_I2C_SUPPORT
- /* Init the I2C interface */
- err = igb_init_i2c(adapter);
- if (err) {
- dev_err(&pdev->dev, "failed to init i2c interface\n");
- goto err_eeprom;
- }
-#endif /* HAVE_I2C_SUPPORT */
-
- /* let the f/w know that the h/w is now under the control of the
- * driver. */
- igb_get_hw_control(adapter);
-
- strncpy(netdev->name, "eth%d", IFNAMSIZ);
- err = register_netdev(netdev);
- if (err)
- goto err_register;
-
-#ifdef CONFIG_IGB_VMDQ_NETDEV
- err = igb_init_vmdq_netdevs(adapter);
- if (err)
- goto err_register;
-#endif
- /* carrier off reporting is important to ethtool even BEFORE open */
- netif_carrier_off(netdev);
-
-#ifdef IGB_DCA
- if (dca_add_requester(&pdev->dev) == E1000_SUCCESS) {
- adapter->flags |= IGB_FLAG_DCA_ENABLED;
- dev_info(pci_dev_to_dev(pdev), "DCA enabled\n");
- igb_setup_dca(adapter);
- }
-
-#endif
-#ifdef HAVE_PTP_1588_CLOCK
- /* do hw tstamp init after resetting */
- igb_ptp_init(adapter);
-#endif /* HAVE_PTP_1588_CLOCK */
-
-#endif /* NO_KNI */
- dev_info(pci_dev_to_dev(pdev), "Intel(R) Gigabit Ethernet Network Connection\n");
- /* print bus type/speed/width info */
- dev_info(pci_dev_to_dev(pdev), "%s: (PCIe:%s:%s) ",
- netdev->name,
- ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5GT/s" :
- (hw->bus.speed == e1000_bus_speed_5000) ? "5.0GT/s" :
- (hw->mac.type == e1000_i354) ? "integrated" :
- "unknown"),
- ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
- (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
- (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
- (hw->mac.type == e1000_i354) ? "integrated" :
- "unknown"));
- dev_info(pci_dev_to_dev(pdev), "%s: MAC: ", netdev->name);
- for (i = 0; i < 6; i++)
- printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
-
- ret_val = e1000_read_pba_string(hw, pba_str, E1000_PBANUM_LENGTH);
- if (ret_val)
- strncpy(pba_str, "Unknown", sizeof(pba_str) - 1);
- dev_info(pci_dev_to_dev(pdev), "%s: PBA No: %s\n", netdev->name,
- pba_str);
-
-
- /* Initialize the thermal sensor on i350 devices. */
- if (hw->mac.type == e1000_i350) {
- if (hw->bus.func == 0) {
- u16 ets_word;
-
- /*
- * Read the NVM to determine if this i350 device
- * supports an external thermal sensor.
- */
- e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_word);
- if (ets_word != 0x0000 && ets_word != 0xFFFF)
- adapter->ets = true;
- else
- adapter->ets = false;
- }
-#ifdef NO_KNI
-#ifdef IGB_HWMON
-
- igb_sysfs_init(adapter);
-#else
-#ifdef IGB_PROCFS
-
- igb_procfs_init(adapter);
-#endif /* IGB_PROCFS */
-#endif /* IGB_HWMON */
-#endif /* NO_KNI */
- } else {
- adapter->ets = false;
- }
-
- if (hw->phy.media_type == e1000_media_type_copper) {
- switch (hw->mac.type) {
- case e1000_i350:
- case e1000_i210:
- case e1000_i211:
- /* Enable EEE for internal copper PHY devices */
- err = e1000_set_eee_i350(hw);
- if ((!err) &&
- (adapter->flags & IGB_FLAG_EEE))
- adapter->eee_advert =
- MDIO_EEE_100TX | MDIO_EEE_1000T;
- break;
- case e1000_i354:
- if ((E1000_READ_REG(hw, E1000_CTRL_EXT)) &
- (E1000_CTRL_EXT_LINK_MODE_SGMII)) {
- err = e1000_set_eee_i354(hw);
- if ((!err) &&
- (adapter->flags & IGB_FLAG_EEE))
- adapter->eee_advert =
- MDIO_EEE_100TX | MDIO_EEE_1000T;
- }
- break;
- default:
- break;
- }
- }
-
- /* send driver version info to firmware */
- if (hw->mac.type >= e1000_i350)
- igb_init_fw(adapter);
-
-#ifndef IGB_NO_LRO
- if (netdev->features & NETIF_F_LRO)
- dev_info(pci_dev_to_dev(pdev), "Internal LRO is enabled \n");
- else
- dev_info(pci_dev_to_dev(pdev), "LRO is disabled \n");
-#endif
- dev_info(pci_dev_to_dev(pdev),
- "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
- adapter->msix_entries ? "MSI-X" :
- (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
- adapter->num_rx_queues, adapter->num_tx_queues);
-
- cards_found++;
- *lad_dev = netdev;
-
- pm_runtime_put_noidle(&pdev->dev);
- return 0;
-
-//err_register:
-// igb_release_hw_control(adapter);
-#ifdef HAVE_I2C_SUPPORT
- memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
-#endif /* HAVE_I2C_SUPPORT */
-err_eeprom:
-// if (!e1000_check_reset_block(hw))
-// e1000_phy_hw_reset(hw);
-
- if (hw->flash_address)
- iounmap(hw->flash_address);
-err_sw_init:
-// igb_clear_interrupt_scheme(adapter);
-// igb_reset_sriov_capability(adapter);
- iounmap(hw->hw_addr);
-err_ioremap:
- free_netdev(netdev);
-err_alloc_etherdev:
-// pci_release_selected_regions(pdev,
-// pci_select_bars(pdev, IORESOURCE_MEM));
-//err_pci_reg:
-//err_dma:
- pci_disable_device(pdev);
- return err;
-}
-
-
-void igb_kni_remove(struct pci_dev *pdev)
-{
- pci_disable_device(pdev);
-}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_param.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_param.c
deleted file mode 100755
index 14439ad5..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_param.c
+++ /dev/null
@@ -1,848 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-
-#include <linux/netdevice.h>
-
-#include "igb.h"
-
-/* This is the only thing that needs to be changed to adjust the
- * maximum number of ports that the driver can manage.
- */
-
-#define IGB_MAX_NIC 32
-
-#define OPTION_UNSET -1
-#define OPTION_DISABLED 0
-#define OPTION_ENABLED 1
-#define MAX_NUM_LIST_OPTS 15
-
-/* All parameters are treated the same, as an integer array of values.
- * This macro just reduces the need to repeat the same declaration code
- * over and over (plus this helps to avoid typo bugs).
- */
-
-#define IGB_PARAM_INIT { [0 ... IGB_MAX_NIC] = OPTION_UNSET }
-#ifndef module_param_array
-/* Module Parameters are always initialized to -1, so that the driver
- * can tell the difference between no user specified value or the
- * user asking for the default value.
- * The true default values are loaded in when igb_check_options is called.
- *
- * This is a GCC extension to ANSI C.
- * See the item "Labeled Elements in Initializers" in the section
- * "Extensions to the C Language Family" of the GCC documentation.
- */
-
-#define IGB_PARAM(X, desc) \
- static const int X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \
- MODULE_PARM(X, "1-" __MODULE_STRING(IGB_MAX_NIC) "i"); \
- MODULE_PARM_DESC(X, desc);
-#else
-#define IGB_PARAM(X, desc) \
- static int X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \
- static unsigned int num_##X; \
- module_param_array_named(X, X, int, &num_##X, 0); \
- MODULE_PARM_DESC(X, desc);
-#endif
-
-/* Interrupt Throttle Rate (interrupts/sec)
- *
- * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
- */
-IGB_PARAM(InterruptThrottleRate,
- "Maximum interrupts per second, per vector, (max 100000), default 3=adaptive");
-#define DEFAULT_ITR 3
-#define MAX_ITR 100000
-/* #define MIN_ITR 120 */
-#define MIN_ITR 0
-/* IntMode (Interrupt Mode)
- *
- * Valid Range: 0 - 2
- *
- * Default Value: 2 (MSI-X)
- */
-IGB_PARAM(IntMode, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), default 2");
-#define MAX_INTMODE IGB_INT_MODE_MSIX
-#define MIN_INTMODE IGB_INT_MODE_LEGACY
-
-IGB_PARAM(Node, "set the starting node to allocate memory on, default -1");
-
-/* LLIPort (Low Latency Interrupt TCP Port)
- *
- * Valid Range: 0 - 65535
- *
- * Default Value: 0 (disabled)
- */
-IGB_PARAM(LLIPort, "Low Latency Interrupt TCP Port (0-65535), default 0=off");
-
-#define DEFAULT_LLIPORT 0
-#define MAX_LLIPORT 0xFFFF
-#define MIN_LLIPORT 0
-
-/* LLIPush (Low Latency Interrupt on TCP Push flag)
- *
- * Valid Range: 0, 1
- *
- * Default Value: 0 (disabled)
- */
-IGB_PARAM(LLIPush, "Low Latency Interrupt on TCP Push flag (0,1), default 0=off");
-
-#define DEFAULT_LLIPUSH 0
-#define MAX_LLIPUSH 1
-#define MIN_LLIPUSH 0
-
-/* LLISize (Low Latency Interrupt on Packet Size)
- *
- * Valid Range: 0 - 1500
- *
- * Default Value: 0 (disabled)
- */
-IGB_PARAM(LLISize, "Low Latency Interrupt on Packet Size (0-1500), default 0=off");
-
-#define DEFAULT_LLISIZE 0
-#define MAX_LLISIZE 1500
-#define MIN_LLISIZE 0
-
-/* RSS (Enable RSS multiqueue receive)
- *
- * Valid Range: 0 - 8
- *
- * Default Value: 1
- */
-IGB_PARAM(RSS, "Number of Receive-Side Scaling Descriptor Queues (0-8), default 1, 0=number of cpus");
-
-#define DEFAULT_RSS 1
-#define MAX_RSS 8
-#define MIN_RSS 0
-
-/* VMDQ (Enable VMDq multiqueue receive)
- *
- * Valid Range: 0 - 8
- *
- * Default Value: 0
- */
-IGB_PARAM(VMDQ, "Number of Virtual Machine Device Queues: 0-1 = disable, 2-8 enable, default 0");
-
-#define DEFAULT_VMDQ 0
-#define MAX_VMDQ MAX_RSS
-#define MIN_VMDQ 0
-
-/* max_vfs (Enable SR-IOV VF devices)
- *
- * Valid Range: 0 - 7
- *
- * Default Value: 0
- */
-IGB_PARAM(max_vfs, "Number of Virtual Functions: 0 = disable, 1-7 enable, default 0");
-
-#define DEFAULT_SRIOV 0
-#define MAX_SRIOV 7
-#define MIN_SRIOV 0
-
-/* MDD (Enable Malicious Driver Detection)
- *
- * Only available when SR-IOV is enabled - max_vfs is greater than 0
- *
- * Valid Range: 0, 1
- *
- * Default Value: 1
- */
-IGB_PARAM(MDD, "Malicious Driver Detection (0/1), default 1 = enabled. "
- "Only available when max_vfs is greater than 0");
-
-#ifdef DEBUG
-
-/* Disable Hardware Reset on Tx Hang
- *
- * Valid Range: 0, 1
- *
- * Default Value: 0 (disabled, i.e. h/w will reset)
- */
-IGB_PARAM(DisableHwReset, "Disable reset of hardware on Tx hang");
-
-/* Dump Transmit and Receive buffers
- *
- * Valid Range: 0, 1
- *
- * Default Value: 0
- */
-IGB_PARAM(DumpBuffers, "Dump Tx/Rx buffers on Tx hang or by request");
-
-#endif /* DEBUG */
-
-/* QueuePairs (Enable TX/RX queue pairs for interrupt handling)
- *
- * Valid Range: 0 - 1
- *
- * Default Value: 1
- */
-IGB_PARAM(QueuePairs, "Enable Tx/Rx queue pairs for interrupt handling (0,1), default 1=on");
-
-#define DEFAULT_QUEUE_PAIRS 1
-#define MAX_QUEUE_PAIRS 1
-#define MIN_QUEUE_PAIRS 0
-
-/* Enable/disable EEE (a.k.a. IEEE802.3az)
- *
- * Valid Range: 0, 1
- *
- * Default Value: 1
- */
- IGB_PARAM(EEE, "Enable/disable on parts that support the feature");
-
-/* Enable/disable DMA Coalescing
- *
- * Valid Values: 0(off), 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000,
- * 9000, 10000(msec), 250(usec), 500(usec)
- *
- * Default Value: 0
- */
- IGB_PARAM(DMAC, "Disable or set latency for DMA Coalescing ((0=off, 1000-10000(msec), 250, 500 (usec))");
-
-#ifndef IGB_NO_LRO
-/* Enable/disable Large Receive Offload
- *
- * Valid Values: 0(off), 1(on)
- *
- * Default Value: 0
- */
- IGB_PARAM(LRO, "Large Receive Offload (0,1), default 0=off");
-
-#endif
-struct igb_opt_list {
- int i;
- char *str;
-};
-struct igb_option {
- enum { enable_option, range_option, list_option } type;
- const char *name;
- const char *err;
- int def;
- union {
- struct { /* range_option info */
- int min;
- int max;
- } r;
- struct { /* list_option info */
- int nr;
- struct igb_opt_list *p;
- } l;
- } arg;
-};
-
-static int igb_validate_option(unsigned int *value,
- struct igb_option *opt,
- struct igb_adapter *adapter)
-{
- if (*value == OPTION_UNSET) {
- *value = opt->def;
- return 0;
- }
-
- switch (opt->type) {
- case enable_option:
- switch (*value) {
- case OPTION_ENABLED:
- DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name);
- return 0;
- case OPTION_DISABLED:
- DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name);
- return 0;
- }
- break;
- case range_option:
- if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
- DPRINTK(PROBE, INFO,
- "%s set to %d\n", opt->name, *value);
- return 0;
- }
- break;
- case list_option: {
- int i;
- struct igb_opt_list *ent;
-
- for (i = 0; i < opt->arg.l.nr; i++) {
- ent = &opt->arg.l.p[i];
- if (*value == ent->i) {
- if (ent->str[0] != '\0')
- DPRINTK(PROBE, INFO, "%s\n", ent->str);
- return 0;
- }
- }
- }
- break;
- default:
- BUG();
- }
-
- DPRINTK(PROBE, INFO, "Invalid %s value specified (%d) %s\n",
- opt->name, *value, opt->err);
- *value = opt->def;
- return -1;
-}
-
-/**
- * igb_check_options - Range Checking for Command Line Parameters
- * @adapter: board private structure
- *
- * This routine checks all command line parameters for valid user
- * input. If an invalid value is given, or if no user specified
- * value exists, a default value is used. The final value is stored
- * in a variable in the adapter structure.
- **/
-
-void igb_check_options(struct igb_adapter *adapter)
-{
- int bd = adapter->bd_number;
- struct e1000_hw *hw = &adapter->hw;
-
- if (bd >= IGB_MAX_NIC) {
- DPRINTK(PROBE, NOTICE,
- "Warning: no configuration for board #%d\n", bd);
- DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
-#ifndef module_param_array
- bd = IGB_MAX_NIC;
-#endif
- }
-
- { /* Interrupt Throttling Rate */
- struct igb_option opt = {
- .type = range_option,
- .name = "Interrupt Throttling Rate (ints/sec)",
- .err = "using default of " __MODULE_STRING(DEFAULT_ITR),
- .def = DEFAULT_ITR,
- .arg = { .r = { .min = MIN_ITR,
- .max = MAX_ITR } }
- };
-
-#ifdef module_param_array
- if (num_InterruptThrottleRate > bd) {
-#endif
- unsigned int itr = InterruptThrottleRate[bd];
-
- switch (itr) {
- case 0:
- DPRINTK(PROBE, INFO, "%s turned off\n",
- opt.name);
- if (hw->mac.type >= e1000_i350)
- adapter->dmac = IGB_DMAC_DISABLE;
- adapter->rx_itr_setting = itr;
- break;
- case 1:
- DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
- opt.name);
- adapter->rx_itr_setting = itr;
- break;
- case 3:
- DPRINTK(PROBE, INFO,
- "%s set to dynamic conservative mode\n",
- opt.name);
- adapter->rx_itr_setting = itr;
- break;
- default:
- igb_validate_option(&itr, &opt, adapter);
- /* Save the setting, because the dynamic bits
- * change itr. In case of invalid user value,
- * default to conservative mode, else need to
- * clear the lower two bits because they are
- * used as control */
- if (itr == 3) {
- adapter->rx_itr_setting = itr;
- } else {
- adapter->rx_itr_setting = 1000000000 /
- (itr * 256);
- adapter->rx_itr_setting &= ~3;
- }
- break;
- }
-#ifdef module_param_array
- } else {
- adapter->rx_itr_setting = opt.def;
- }
-#endif
- adapter->tx_itr_setting = adapter->rx_itr_setting;
- }
- { /* Interrupt Mode */
- struct igb_option opt = {
- .type = range_option,
- .name = "Interrupt Mode",
- .err = "defaulting to 2 (MSI-X)",
- .def = IGB_INT_MODE_MSIX,
- .arg = { .r = { .min = MIN_INTMODE,
- .max = MAX_INTMODE } }
- };
-
-#ifdef module_param_array
- if (num_IntMode > bd) {
-#endif
- unsigned int int_mode = IntMode[bd];
- igb_validate_option(&int_mode, &opt, adapter);
- adapter->int_mode = int_mode;
-#ifdef module_param_array
- } else {
- adapter->int_mode = opt.def;
- }
-#endif
- }
- { /* Low Latency Interrupt TCP Port */
- struct igb_option opt = {
- .type = range_option,
- .name = "Low Latency Interrupt TCP Port",
- .err = "using default of " __MODULE_STRING(DEFAULT_LLIPORT),
- .def = DEFAULT_LLIPORT,
- .arg = { .r = { .min = MIN_LLIPORT,
- .max = MAX_LLIPORT } }
- };
-
-#ifdef module_param_array
- if (num_LLIPort > bd) {
-#endif
- adapter->lli_port = LLIPort[bd];
- if (adapter->lli_port) {
- igb_validate_option(&adapter->lli_port, &opt,
- adapter);
- } else {
- DPRINTK(PROBE, INFO, "%s turned off\n",
- opt.name);
- }
-#ifdef module_param_array
- } else {
- adapter->lli_port = opt.def;
- }
-#endif
- }
- { /* Low Latency Interrupt on Packet Size */
- struct igb_option opt = {
- .type = range_option,
- .name = "Low Latency Interrupt on Packet Size",
- .err = "using default of " __MODULE_STRING(DEFAULT_LLISIZE),
- .def = DEFAULT_LLISIZE,
- .arg = { .r = { .min = MIN_LLISIZE,
- .max = MAX_LLISIZE } }
- };
-
-#ifdef module_param_array
- if (num_LLISize > bd) {
-#endif
- adapter->lli_size = LLISize[bd];
- if (adapter->lli_size) {
- igb_validate_option(&adapter->lli_size, &opt,
- adapter);
- } else {
- DPRINTK(PROBE, INFO, "%s turned off\n",
- opt.name);
- }
-#ifdef module_param_array
- } else {
- adapter->lli_size = opt.def;
- }
-#endif
- }
- { /* Low Latency Interrupt on TCP Push flag */
- struct igb_option opt = {
- .type = enable_option,
- .name = "Low Latency Interrupt on TCP Push flag",
- .err = "defaulting to Disabled",
- .def = OPTION_DISABLED
- };
-
-#ifdef module_param_array
- if (num_LLIPush > bd) {
-#endif
- unsigned int lli_push = LLIPush[bd];
- igb_validate_option(&lli_push, &opt, adapter);
- adapter->flags |= lli_push ? IGB_FLAG_LLI_PUSH : 0;
-#ifdef module_param_array
- } else {
- adapter->flags |= opt.def ? IGB_FLAG_LLI_PUSH : 0;
- }
-#endif
- }
- { /* SRIOV - Enable SR-IOV VF devices */
- struct igb_option opt = {
- .type = range_option,
- .name = "max_vfs - SR-IOV VF devices",
- .err = "using default of " __MODULE_STRING(DEFAULT_SRIOV),
- .def = DEFAULT_SRIOV,
- .arg = { .r = { .min = MIN_SRIOV,
- .max = MAX_SRIOV } }
- };
-
-#ifdef module_param_array
- if (num_max_vfs > bd) {
-#endif
- adapter->vfs_allocated_count = max_vfs[bd];
- igb_validate_option(&adapter->vfs_allocated_count, &opt, adapter);
-
-#ifdef module_param_array
- } else {
- adapter->vfs_allocated_count = opt.def;
- }
-#endif
- if (adapter->vfs_allocated_count) {
- switch (hw->mac.type) {
- case e1000_82575:
- case e1000_82580:
- case e1000_i210:
- case e1000_i211:
- case e1000_i354:
- adapter->vfs_allocated_count = 0;
- DPRINTK(PROBE, INFO, "SR-IOV option max_vfs not supported.\n");
- default:
- break;
- }
- }
- }
- { /* VMDQ - Enable VMDq multiqueue receive */
- struct igb_option opt = {
- .type = range_option,
- .name = "VMDQ - VMDq multiqueue queue count",
- .err = "using default of " __MODULE_STRING(DEFAULT_VMDQ),
- .def = DEFAULT_VMDQ,
- .arg = { .r = { .min = MIN_VMDQ,
- .max = (MAX_VMDQ - adapter->vfs_allocated_count) } }
- };
- if ((hw->mac.type != e1000_i210) ||
- (hw->mac.type != e1000_i211)) {
-#ifdef module_param_array
- if (num_VMDQ > bd) {
-#endif
- adapter->vmdq_pools = (VMDQ[bd] == 1 ? 0 : VMDQ[bd]);
- if (adapter->vfs_allocated_count && !adapter->vmdq_pools) {
- DPRINTK(PROBE, INFO, "Enabling SR-IOV requires VMDq be set to at least 1\n");
- adapter->vmdq_pools = 1;
- }
- igb_validate_option(&adapter->vmdq_pools, &opt, adapter);
-
-#ifdef module_param_array
- } else {
- if (!adapter->vfs_allocated_count)
- adapter->vmdq_pools = (opt.def == 1 ? 0 : opt.def);
- else
- adapter->vmdq_pools = 1;
- }
-#endif
-#ifdef CONFIG_IGB_VMDQ_NETDEV
- if (hw->mac.type == e1000_82575 && adapter->vmdq_pools) {
- DPRINTK(PROBE, INFO, "VMDq not supported on this part.\n");
- adapter->vmdq_pools = 0;
- }
-#endif
-
- } else {
- DPRINTK(PROBE, INFO, "VMDq option is not supported.\n");
- adapter->vmdq_pools = opt.def;
- }
- }
- { /* RSS - Enable RSS multiqueue receives */
- struct igb_option opt = {
- .type = range_option,
- .name = "RSS - RSS multiqueue receive count",
- .err = "using default of " __MODULE_STRING(DEFAULT_RSS),
- .def = DEFAULT_RSS,
- .arg = { .r = { .min = MIN_RSS,
- .max = MAX_RSS } }
- };
-
- switch (hw->mac.type) {
- case e1000_82575:
-#ifndef CONFIG_IGB_VMDQ_NETDEV
- if (!!adapter->vmdq_pools) {
- if (adapter->vmdq_pools <= 2) {
- if (adapter->vmdq_pools == 2)
- opt.arg.r.max = 3;
- } else {
- opt.arg.r.max = 1;
- }
- } else {
- opt.arg.r.max = 4;
- }
-#else
- opt.arg.r.max = !!adapter->vmdq_pools ? 1 : 4;
-#endif /* CONFIG_IGB_VMDQ_NETDEV */
- break;
- case e1000_i210:
- opt.arg.r.max = 4;
- break;
- case e1000_i211:
- opt.arg.r.max = 2;
- break;
- case e1000_82576:
-#ifndef CONFIG_IGB_VMDQ_NETDEV
- if (!!adapter->vmdq_pools)
- opt.arg.r.max = 2;
- break;
-#endif /* CONFIG_IGB_VMDQ_NETDEV */
- case e1000_82580:
- case e1000_i350:
- case e1000_i354:
- default:
- if (!!adapter->vmdq_pools)
- opt.arg.r.max = 1;
- break;
- }
-
- if (adapter->int_mode != IGB_INT_MODE_MSIX) {
- DPRINTK(PROBE, INFO, "RSS is not supported when in MSI/Legacy Interrupt mode, %s\n",
- opt.err);
- opt.arg.r.max = 1;
- }
-
-#ifdef module_param_array
- if (num_RSS > bd) {
-#endif
- adapter->rss_queues = RSS[bd];
- switch (adapter->rss_queues) {
- case 1:
- break;
- default:
- igb_validate_option(&adapter->rss_queues, &opt, adapter);
- if (adapter->rss_queues)
- break;
- case 0:
- adapter->rss_queues = min_t(u32, opt.arg.r.max, num_online_cpus());
- break;
- }
-#ifdef module_param_array
- } else {
- adapter->rss_queues = opt.def;
- }
-#endif
- }
- { /* QueuePairs - Enable Tx/Rx queue pairs for interrupt handling */
- struct igb_option opt = {
- .type = enable_option,
- .name = "QueuePairs - Tx/Rx queue pairs for interrupt handling",
- .err = "defaulting to Enabled",
- .def = OPTION_ENABLED
- };
-#ifdef module_param_array
- if (num_QueuePairs > bd) {
-#endif
- unsigned int qp = QueuePairs[bd];
- /*
- * We must enable queue pairs if the number of queues
- * exceeds the number of available interrupts. We are
- * limited to 10, or 3 per unallocated vf. On I210 and
- * I211 devices, we are limited to 5 interrupts.
- * However, since I211 only supports 2 queues, we do not
- * need to check and override the user option.
- */
- if (qp == OPTION_DISABLED) {
- if (adapter->rss_queues > 4)
- qp = OPTION_ENABLED;
-
- if (adapter->vmdq_pools > 4)
- qp = OPTION_ENABLED;
-
- if (adapter->rss_queues > 1 &&
- (adapter->vmdq_pools > 3 ||
- adapter->vfs_allocated_count > 6))
- qp = OPTION_ENABLED;
-
- if (hw->mac.type == e1000_i210 &&
- adapter->rss_queues > 2)
- qp = OPTION_ENABLED;
-
- if (qp == OPTION_ENABLED)
- DPRINTK(PROBE, INFO, "Number of queues exceeds available interrupts, %s\n",
- opt.err);
- }
- igb_validate_option(&qp, &opt, adapter);
- adapter->flags |= qp ? IGB_FLAG_QUEUE_PAIRS : 0;
-#ifdef module_param_array
- } else {
- adapter->flags |= opt.def ? IGB_FLAG_QUEUE_PAIRS : 0;
- }
-#endif
- }
- { /* EEE - Enable EEE for capable adapters */
-
- if (hw->mac.type >= e1000_i350) {
- struct igb_option opt = {
- .type = enable_option,
- .name = "EEE Support",
- .err = "defaulting to Enabled",
- .def = OPTION_ENABLED
- };
-#ifdef module_param_array
- if (num_EEE > bd) {
-#endif
- unsigned int eee = EEE[bd];
- igb_validate_option(&eee, &opt, adapter);
- adapter->flags |= eee ? IGB_FLAG_EEE : 0;
- if (eee)
- hw->dev_spec._82575.eee_disable = false;
- else
- hw->dev_spec._82575.eee_disable = true;
-
-#ifdef module_param_array
- } else {
- adapter->flags |= opt.def ? IGB_FLAG_EEE : 0;
- if (adapter->flags & IGB_FLAG_EEE)
- hw->dev_spec._82575.eee_disable = false;
- else
- hw->dev_spec._82575.eee_disable = true;
- }
-#endif
- }
- }
- { /* DMAC - Enable DMA Coalescing for capable adapters */
-
- if (hw->mac.type >= e1000_i350) {
- struct igb_opt_list list [] = {
- { IGB_DMAC_DISABLE, "DMAC Disable"},
- { IGB_DMAC_MIN, "DMAC 250 usec"},
- { IGB_DMAC_500, "DMAC 500 usec"},
- { IGB_DMAC_EN_DEFAULT, "DMAC 1000 usec"},
- { IGB_DMAC_2000, "DMAC 2000 usec"},
- { IGB_DMAC_3000, "DMAC 3000 usec"},
- { IGB_DMAC_4000, "DMAC 4000 usec"},
- { IGB_DMAC_5000, "DMAC 5000 usec"},
- { IGB_DMAC_6000, "DMAC 6000 usec"},
- { IGB_DMAC_7000, "DMAC 7000 usec"},
- { IGB_DMAC_8000, "DMAC 8000 usec"},
- { IGB_DMAC_9000, "DMAC 9000 usec"},
- { IGB_DMAC_MAX, "DMAC 10000 usec"}
- };
- struct igb_option opt = {
- .type = list_option,
- .name = "DMA Coalescing",
- .err = "using default of "__MODULE_STRING(IGB_DMAC_DISABLE),
- .def = IGB_DMAC_DISABLE,
- .arg = { .l = { .nr = 13,
- .p = list
- }
- }
- };
-#ifdef module_param_array
- if (num_DMAC > bd) {
-#endif
- unsigned int dmac = DMAC[bd];
- if (adapter->rx_itr_setting == IGB_DMAC_DISABLE)
- dmac = IGB_DMAC_DISABLE;
- igb_validate_option(&dmac, &opt, adapter);
- switch (dmac) {
- case IGB_DMAC_DISABLE:
- adapter->dmac = dmac;
- break;
- case IGB_DMAC_MIN:
- adapter->dmac = dmac;
- break;
- case IGB_DMAC_500:
- adapter->dmac = dmac;
- break;
- case IGB_DMAC_EN_DEFAULT:
- adapter->dmac = dmac;
- break;
- case IGB_DMAC_2000:
- adapter->dmac = dmac;
- break;
- case IGB_DMAC_3000:
- adapter->dmac = dmac;
- break;
- case IGB_DMAC_4000:
- adapter->dmac = dmac;
- break;
- case IGB_DMAC_5000:
- adapter->dmac = dmac;
- break;
- case IGB_DMAC_6000:
- adapter->dmac = dmac;
- break;
- case IGB_DMAC_7000:
- adapter->dmac = dmac;
- break;
- case IGB_DMAC_8000:
- adapter->dmac = dmac;
- break;
- case IGB_DMAC_9000:
- adapter->dmac = dmac;
- break;
- case IGB_DMAC_MAX:
- adapter->dmac = dmac;
- break;
- default:
- adapter->dmac = opt.def;
- DPRINTK(PROBE, INFO,
- "Invalid DMAC setting, "
- "resetting DMAC to %d\n", opt.def);
- }
-#ifdef module_param_array
- } else
- adapter->dmac = opt.def;
-#endif
- }
- }
-#ifndef IGB_NO_LRO
- { /* LRO - Enable Large Receive Offload */
- struct igb_option opt = {
- .type = enable_option,
- .name = "LRO - Large Receive Offload",
- .err = "defaulting to Disabled",
- .def = OPTION_DISABLED
- };
- struct net_device *netdev = adapter->netdev;
-#ifdef module_param_array
- if (num_LRO > bd) {
-#endif
- unsigned int lro = LRO[bd];
- igb_validate_option(&lro, &opt, adapter);
- netdev->features |= lro ? NETIF_F_LRO : 0;
-#ifdef module_param_array
- } else if (opt.def == OPTION_ENABLED) {
- netdev->features |= NETIF_F_LRO;
- }
-#endif
- }
-#endif /* IGB_NO_LRO */
- { /* MDD - Enable Malicious Driver Detection. Only available when
- SR-IOV is enabled. */
- struct igb_option opt = {
- .type = enable_option,
- .name = "Malicious Driver Detection",
- .err = "defaulting to 1",
- .def = OPTION_ENABLED,
- .arg = { .r = { .min = OPTION_DISABLED,
- .max = OPTION_ENABLED } }
- };
-
-#ifdef module_param_array
- if (num_MDD > bd) {
-#endif
- adapter->mdd = MDD[bd];
- igb_validate_option((uint *)&adapter->mdd, &opt,
- adapter);
-#ifdef module_param_array
- } else {
- adapter->mdd = opt.def;
- }
-#endif
- }
-}
-
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_procfs.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_procfs.c
deleted file mode 100755
index 2e7850ca..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_procfs.c
+++ /dev/null
@@ -1,363 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#include "igb.h"
-#include "e1000_82575.h"
-#include "e1000_hw.h"
-
-#ifdef IGB_PROCFS
-#ifndef IGB_HWMON
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/proc_fs.h>
-#include <linux/device.h>
-#include <linux/netdevice.h>
-
-static struct proc_dir_entry *igb_top_dir = NULL;
-
-
-bool igb_thermal_present(struct igb_adapter *adapter)
-{
- s32 status;
- struct e1000_hw *hw;
-
- if (adapter == NULL)
- return false;
- hw = &adapter->hw;
-
- /*
- * Only set I2C bit-bang mode if an external thermal sensor is
- * supported on this device.
- */
- if (adapter->ets) {
- status = e1000_set_i2c_bb(hw);
- if (status != E1000_SUCCESS)
- return false;
- }
-
- status = hw->mac.ops.init_thermal_sensor_thresh(hw);
- if (status != E1000_SUCCESS)
- return false;
-
- return true;
-}
-
-
-static int igb_macburn(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- struct e1000_hw *hw;
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
-
- hw = &adapter->hw;
- if (hw == NULL)
- return snprintf(page, count, "error: no hw data\n");
-
- return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n",
- (unsigned int)hw->mac.perm_addr[0],
- (unsigned int)hw->mac.perm_addr[1],
- (unsigned int)hw->mac.perm_addr[2],
- (unsigned int)hw->mac.perm_addr[3],
- (unsigned int)hw->mac.perm_addr[4],
- (unsigned int)hw->mac.perm_addr[5]);
-}
-
-static int igb_macadmn(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- struct e1000_hw *hw;
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
-
- hw = &adapter->hw;
- if (hw == NULL)
- return snprintf(page, count, "error: no hw data\n");
-
- return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n",
- (unsigned int)hw->mac.addr[0],
- (unsigned int)hw->mac.addr[1],
- (unsigned int)hw->mac.addr[2],
- (unsigned int)hw->mac.addr[3],
- (unsigned int)hw->mac.addr[4],
- (unsigned int)hw->mac.addr[5]);
-}
-
-static int igb_numeports(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- struct e1000_hw *hw;
- int ports;
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
-
- hw = &adapter->hw;
- if (hw == NULL)
- return snprintf(page, count, "error: no hw data\n");
-
- ports = 4;
-
- return snprintf(page, count, "%d\n", ports);
-}
-
-static int igb_porttype(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- if (adapter == NULL)
- return snprintf(page, count, "error: no adapter\n");
-
- return snprintf(page, count, "%d\n",
- test_bit(__IGB_DOWN, &adapter->state));
-}
-
-static int igb_therm_location(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- struct igb_therm_proc_data *therm_data =
- (struct igb_therm_proc_data *)data;
-
- if (therm_data == NULL)
- return snprintf(page, count, "error: no therm_data\n");
-
- return snprintf(page, count, "%d\n", therm_data->sensor_data->location);
-}
-
-static int igb_therm_maxopthresh(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- struct igb_therm_proc_data *therm_data =
- (struct igb_therm_proc_data *)data;
-
- if (therm_data == NULL)
- return snprintf(page, count, "error: no therm_data\n");
-
- return snprintf(page, count, "%d\n",
- therm_data->sensor_data->max_op_thresh);
-}
-
-static int igb_therm_cautionthresh(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- struct igb_therm_proc_data *therm_data =
- (struct igb_therm_proc_data *)data;
-
- if (therm_data == NULL)
- return snprintf(page, count, "error: no therm_data\n");
-
- return snprintf(page, count, "%d\n",
- therm_data->sensor_data->caution_thresh);
-}
-
-static int igb_therm_temp(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- s32 status;
- struct igb_therm_proc_data *therm_data =
- (struct igb_therm_proc_data *)data;
-
- if (therm_data == NULL)
- return snprintf(page, count, "error: no therm_data\n");
-
- status = e1000_get_thermal_sensor_data(therm_data->hw);
- if (status != E1000_SUCCESS)
- snprintf(page, count, "error: status %d returned\n", status);
-
- return snprintf(page, count, "%d\n", therm_data->sensor_data->temp);
-}
-
-struct igb_proc_type{
- char name[32];
- int (*read)(char*, char**, off_t, int, int*, void*);
-};
-
-struct igb_proc_type igb_proc_entries[] = {
- {"numeports", &igb_numeports},
- {"porttype", &igb_porttype},
- {"macburn", &igb_macburn},
- {"macadmn", &igb_macadmn},
- {"", NULL}
-};
-
-struct igb_proc_type igb_internal_entries[] = {
- {"location", &igb_therm_location},
- {"temp", &igb_therm_temp},
- {"cautionthresh", &igb_therm_cautionthresh},
- {"maxopthresh", &igb_therm_maxopthresh},
- {"", NULL}
-};
-
-void igb_del_proc_entries(struct igb_adapter *adapter)
-{
- int index, i;
- char buf[16]; /* much larger than the sensor number will ever be */
-
- if (igb_top_dir == NULL)
- return;
-
- for (i = 0; i < E1000_MAX_SENSORS; i++) {
- if (adapter->therm_dir[i] == NULL)
- continue;
-
- for (index = 0; ; index++) {
- if (igb_internal_entries[index].read == NULL)
- break;
-
- remove_proc_entry(igb_internal_entries[index].name,
- adapter->therm_dir[i]);
- }
- snprintf(buf, sizeof(buf), "sensor_%d", i);
- remove_proc_entry(buf, adapter->info_dir);
- }
-
- if (adapter->info_dir != NULL) {
- for (index = 0; ; index++) {
- if (igb_proc_entries[index].read == NULL)
- break;
- remove_proc_entry(igb_proc_entries[index].name,
- adapter->info_dir);
- }
- remove_proc_entry("info", adapter->eth_dir);
- }
-
- if (adapter->eth_dir != NULL)
- remove_proc_entry(pci_name(adapter->pdev), igb_top_dir);
-}
-
-/* called from igb_main.c */
-void igb_procfs_exit(struct igb_adapter *adapter)
-{
- igb_del_proc_entries(adapter);
-}
-
-int igb_procfs_topdir_init(void)
-{
- igb_top_dir = proc_mkdir("driver/igb", NULL);
- if (igb_top_dir == NULL)
- return (-ENOMEM);
-
- return 0;
-}
-
-void igb_procfs_topdir_exit(void)
-{
- remove_proc_entry("driver/igb", NULL);
-}
-
-/* called from igb_main.c */
-int igb_procfs_init(struct igb_adapter *adapter)
-{
- int rc = 0;
- int i;
- int index;
- char buf[16]; /* much larger than the sensor number will ever be */
-
- adapter->eth_dir = NULL;
- adapter->info_dir = NULL;
- for (i = 0; i < E1000_MAX_SENSORS; i++)
- adapter->therm_dir[i] = NULL;
-
- if ( igb_top_dir == NULL ) {
- rc = -ENOMEM;
- goto fail;
- }
-
- adapter->eth_dir = proc_mkdir(pci_name(adapter->pdev), igb_top_dir);
- if (adapter->eth_dir == NULL) {
- rc = -ENOMEM;
- goto fail;
- }
-
- adapter->info_dir = proc_mkdir("info", adapter->eth_dir);
- if (adapter->info_dir == NULL) {
- rc = -ENOMEM;
- goto fail;
- }
- for (index = 0; ; index++) {
- if (igb_proc_entries[index].read == NULL) {
- break;
- }
- if (!(create_proc_read_entry(igb_proc_entries[index].name,
- 0444,
- adapter->info_dir,
- igb_proc_entries[index].read,
- adapter))) {
-
- rc = -ENOMEM;
- goto fail;
- }
- }
- if (igb_thermal_present(adapter) == false)
- goto exit;
-
- for (i = 0; i < E1000_MAX_SENSORS; i++) {
-
- if (adapter->hw.mac.thermal_sensor_data.sensor[i].location== 0)
- continue;
-
- snprintf(buf, sizeof(buf), "sensor_%d", i);
- adapter->therm_dir[i] = proc_mkdir(buf, adapter->info_dir);
- if (adapter->therm_dir[i] == NULL) {
- rc = -ENOMEM;
- goto fail;
- }
- for (index = 0; ; index++) {
- if (igb_internal_entries[index].read == NULL)
- break;
- /*
- * therm_data struct contains pointer the read func
- * will be needing
- */
- adapter->therm_data[i].hw = &adapter->hw;
- adapter->therm_data[i].sensor_data =
- &adapter->hw.mac.thermal_sensor_data.sensor[i];
-
- if (!(create_proc_read_entry(
- igb_internal_entries[index].name,
- 0444,
- adapter->therm_dir[i],
- igb_internal_entries[index].read,
- &adapter->therm_data[i]))) {
- rc = -ENOMEM;
- goto fail;
- }
- }
- }
- goto exit;
-
-fail:
- igb_del_proc_entries(adapter);
-exit:
- return rc;
-}
-
-#endif /* !IGB_HWMON */
-#endif /* IGB_PROCFS */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_ptp.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_ptp.c
deleted file mode 100755
index 454b70ce..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_ptp.c
+++ /dev/null
@@ -1,944 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-/******************************************************************************
- Copyright(c) 2011 Richard Cochran <richardcochran@gmail.com> for some of the
- 82576 and 82580 code
-******************************************************************************/
-
-#include "igb.h"
-
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/pci.h>
-#include <linux/ptp_classify.h>
-
-#define INCVALUE_MASK 0x7fffffff
-#define ISGN 0x80000000
-
-/*
- * The 82580 timesync updates the system timer every 8ns by 8ns,
- * and this update value cannot be reprogrammed.
- *
- * Neither the 82576 nor the 82580 offer registers wide enough to hold
- * nanoseconds time values for very long. For the 82580, SYSTIM always
- * counts nanoseconds, but the upper 24 bits are not available. The
- * frequency is adjusted by changing the 32 bit fractional nanoseconds
- * register, TIMINCA.
- *
- * For the 82576, the SYSTIM register time unit is affect by the
- * choice of the 24 bit TININCA:IV (incvalue) field. Five bits of this
- * field are needed to provide the nominal 16 nanosecond period,
- * leaving 19 bits for fractional nanoseconds.
- *
- * We scale the NIC clock cycle by a large factor so that relatively
- * small clock corrections can be added or subtracted at each clock
- * tick. The drawbacks of a large factor are a) that the clock
- * register overflows more quickly (not such a big deal) and b) that
- * the increment per tick has to fit into 24 bits. As a result we
- * need to use a shift of 19 so we can fit a value of 16 into the
- * TIMINCA register.
- *
- *
- * SYSTIMH SYSTIML
- * +--------------+ +---+---+------+
- * 82576 | 32 | | 8 | 5 | 19 |
- * +--------------+ +---+---+------+
- * \________ 45 bits _______/ fract
- *
- * +----------+---+ +--------------+
- * 82580 | 24 | 8 | | 32 |
- * +----------+---+ +--------------+
- * reserved \______ 40 bits _____/
- *
- *
- * The 45 bit 82576 SYSTIM overflows every
- * 2^45 * 10^-9 / 3600 = 9.77 hours.
- *
- * The 40 bit 82580 SYSTIM overflows every
- * 2^40 * 10^-9 / 60 = 18.3 minutes.
- */
-
-#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9)
-#define IGB_PTP_TX_TIMEOUT (HZ * 15)
-#define INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT)
-#define INCVALUE_82576_MASK ((1 << E1000_TIMINCA_16NS_SHIFT) - 1)
-#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
-#define IGB_NBITS_82580 40
-
-/*
- * SYSTIM read access for the 82576
- */
-
-static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc)
-{
- struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
- struct e1000_hw *hw = &igb->hw;
- u64 val;
- u32 lo, hi;
-
- lo = E1000_READ_REG(hw, E1000_SYSTIML);
- hi = E1000_READ_REG(hw, E1000_SYSTIMH);
-
- val = ((u64) hi) << 32;
- val |= lo;
-
- return val;
-}
-
-/*
- * SYSTIM read access for the 82580
- */
-
-static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
-{
- struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
- struct e1000_hw *hw = &igb->hw;
- u64 val;
- u32 lo, hi;
-
- /* The timestamp latches on lowest register read. For the 82580
- * the lowest register is SYSTIMR instead of SYSTIML. However we only
- * need to provide nanosecond resolution, so we just ignore it.
- */
- E1000_READ_REG(hw, E1000_SYSTIMR);
- lo = E1000_READ_REG(hw, E1000_SYSTIML);
- hi = E1000_READ_REG(hw, E1000_SYSTIMH);
-
- val = ((u64) hi) << 32;
- val |= lo;
-
- return val;
-}
-
-/*
- * SYSTIM read access for I210/I211
- */
-
-static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 sec, nsec;
-
- /* The timestamp latches on lowest register read. For I210/I211, the
- * lowest register is SYSTIMR. Since we only need to provide nanosecond
- * resolution, we can ignore it.
- */
- E1000_READ_REG(hw, E1000_SYSTIMR);
- nsec = E1000_READ_REG(hw, E1000_SYSTIML);
- sec = E1000_READ_REG(hw, E1000_SYSTIMH);
-
- ts->tv_sec = sec;
- ts->tv_nsec = nsec;
-}
-
-static void igb_ptp_write_i210(struct igb_adapter *adapter,
- const struct timespec *ts)
-{
- struct e1000_hw *hw = &adapter->hw;
-
- /*
- * Writing the SYSTIMR register is not necessary as it only provides
- * sub-nanosecond resolution.
- */
- E1000_WRITE_REG(hw, E1000_SYSTIML, ts->tv_nsec);
- E1000_WRITE_REG(hw, E1000_SYSTIMH, ts->tv_sec);
-}
-
-/**
- * igb_ptp_systim_to_hwtstamp - convert system time value to hw timestamp
- * @adapter: board private structure
- * @hwtstamps: timestamp structure to update
- * @systim: unsigned 64bit system time value.
- *
- * We need to convert the system time value stored in the RX/TXSTMP registers
- * into a hwtstamp which can be used by the upper level timestamping functions.
- *
- * The 'tmreg_lock' spinlock is used to protect the consistency of the
- * system time value. This is needed because reading the 64 bit time
- * value involves reading two (or three) 32 bit registers. The first
- * read latches the value. Ditto for writing.
- *
- * In addition, here have extended the system time with an overflow
- * counter in software.
- **/
-static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter,
- struct skb_shared_hwtstamps *hwtstamps,
- u64 systim)
-{
- unsigned long flags;
- u64 ns;
-
- switch (adapter->hw.mac.type) {
- case e1000_82576:
- case e1000_82580:
- case e1000_i350:
- case e1000_i354:
- spin_lock_irqsave(&adapter->tmreg_lock, flags);
-
- ns = timecounter_cyc2time(&adapter->tc, systim);
-
- spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
-
- memset(hwtstamps, 0, sizeof(*hwtstamps));
- hwtstamps->hwtstamp = ns_to_ktime(ns);
- break;
- case e1000_i210:
- case e1000_i211:
- memset(hwtstamps, 0, sizeof(*hwtstamps));
- /* Upper 32 bits contain s, lower 32 bits contain ns. */
- hwtstamps->hwtstamp = ktime_set(systim >> 32,
- systim & 0xFFFFFFFF);
- break;
- default:
- break;
- }
-}
-
-/*
- * PTP clock operations
- */
-
-static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb)
-{
- struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
- ptp_caps);
- struct e1000_hw *hw = &igb->hw;
- int neg_adj = 0;
- u64 rate;
- u32 incvalue;
-
- if (ppb < 0) {
- neg_adj = 1;
- ppb = -ppb;
- }
- rate = ppb;
- rate <<= 14;
- rate = div_u64(rate, 1953125);
-
- incvalue = 16 << IGB_82576_TSYNC_SHIFT;
-
- if (neg_adj)
- incvalue -= rate;
- else
- incvalue += rate;
-
- E1000_WRITE_REG(hw, E1000_TIMINCA, INCPERIOD_82576 | (incvalue & INCVALUE_82576_MASK));
-
- return 0;
-}
-
-static int igb_ptp_adjfreq_82580(struct ptp_clock_info *ptp, s32 ppb)
-{
- struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
- ptp_caps);
- struct e1000_hw *hw = &igb->hw;
- int neg_adj = 0;
- u64 rate;
- u32 inca;
-
- if (ppb < 0) {
- neg_adj = 1;
- ppb = -ppb;
- }
- rate = ppb;
- rate <<= 26;
- rate = div_u64(rate, 1953125);
-
- /* At 2.5G speeds, the TIMINCA register on I354 updates the clock 2.5x
- * as quickly. Account for this by dividing the adjustment by 2.5.
- */
- if (hw->mac.type == e1000_i354) {
- u32 status = E1000_READ_REG(hw, E1000_STATUS);
-
- if ((status & E1000_STATUS_2P5_SKU) &&
- !(status & E1000_STATUS_2P5_SKU_OVER)) {
- rate <<= 1;
- rate = div_u64(rate, 5);
- }
- }
-
- inca = rate & INCVALUE_MASK;
- if (neg_adj)
- inca |= ISGN;
-
- E1000_WRITE_REG(hw, E1000_TIMINCA, inca);
-
- return 0;
-}
-
-static int igb_ptp_adjtime_82576(struct ptp_clock_info *ptp, s64 delta)
-{
- struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
- ptp_caps);
- unsigned long flags;
- s64 now;
-
- spin_lock_irqsave(&igb->tmreg_lock, flags);
-
- now = timecounter_read(&igb->tc);
- now += delta;
- timecounter_init(&igb->tc, &igb->cc, now);
-
- spin_unlock_irqrestore(&igb->tmreg_lock, flags);
-
- return 0;
-}
-
-static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp, s64 delta)
-{
- struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
- ptp_caps);
- unsigned long flags;
- struct timespec now, then = ns_to_timespec(delta);
-
- spin_lock_irqsave(&igb->tmreg_lock, flags);
-
- igb_ptp_read_i210(igb, &now);
- now = timespec_add(now, then);
- igb_ptp_write_i210(igb, (const struct timespec *)&now);
-
- spin_unlock_irqrestore(&igb->tmreg_lock, flags);
-
- return 0;
-}
-
-static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp,
- struct timespec *ts)
-{
- struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
- ptp_caps);
- unsigned long flags;
- u64 ns;
- u32 remainder;
-
- spin_lock_irqsave(&igb->tmreg_lock, flags);
-
- ns = timecounter_read(&igb->tc);
-
- spin_unlock_irqrestore(&igb->tmreg_lock, flags);
-
- ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
- ts->tv_nsec = remainder;
-
- return 0;
-}
-
-static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp,
- struct timespec *ts)
-{
- struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
- ptp_caps);
- unsigned long flags;
-
- spin_lock_irqsave(&igb->tmreg_lock, flags);
-
- igb_ptp_read_i210(igb, ts);
-
- spin_unlock_irqrestore(&igb->tmreg_lock, flags);
-
- return 0;
-}
-
-static int igb_ptp_settime_82576(struct ptp_clock_info *ptp,
- const struct timespec *ts)
-{
- struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
- ptp_caps);
- unsigned long flags;
- u64 ns;
-
- ns = ts->tv_sec * 1000000000ULL;
- ns += ts->tv_nsec;
-
- spin_lock_irqsave(&igb->tmreg_lock, flags);
-
- timecounter_init(&igb->tc, &igb->cc, ns);
-
- spin_unlock_irqrestore(&igb->tmreg_lock, flags);
-
- return 0;
-}
-
-static int igb_ptp_settime_i210(struct ptp_clock_info *ptp,
- const struct timespec *ts)
-{
- struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
- ptp_caps);
- unsigned long flags;
-
- spin_lock_irqsave(&igb->tmreg_lock, flags);
-
- igb_ptp_write_i210(igb, ts);
-
- spin_unlock_irqrestore(&igb->tmreg_lock, flags);
-
- return 0;
-}
-
-static int igb_ptp_enable(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq, int on)
-{
- return -EOPNOTSUPP;
-}
-
-/**
- * igb_ptp_tx_work
- * @work: pointer to work struct
- *
- * This work function polls the TSYNCTXCTL valid bit to determine when a
- * timestamp has been taken for the current stored skb.
- */
-void igb_ptp_tx_work(struct work_struct *work)
-{
- struct igb_adapter *adapter = container_of(work, struct igb_adapter,
- ptp_tx_work);
- struct e1000_hw *hw = &adapter->hw;
- u32 tsynctxctl;
-
- if (!adapter->ptp_tx_skb)
- return;
-
- if (time_is_before_jiffies(adapter->ptp_tx_start +
- IGB_PTP_TX_TIMEOUT)) {
- dev_kfree_skb_any(adapter->ptp_tx_skb);
- adapter->ptp_tx_skb = NULL;
- adapter->tx_hwtstamp_timeouts++;
- dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang");
- return;
- }
-
- tsynctxctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
- if (tsynctxctl & E1000_TSYNCTXCTL_VALID)
- igb_ptp_tx_hwtstamp(adapter);
- else
- /* reschedule to check later */
- schedule_work(&adapter->ptp_tx_work);
-}
-
-static void igb_ptp_overflow_check(struct work_struct *work)
-{
- struct igb_adapter *igb =
- container_of(work, struct igb_adapter, ptp_overflow_work.work);
- struct timespec ts;
-
- igb->ptp_caps.gettime(&igb->ptp_caps, &ts);
-
- pr_debug("igb overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
-
- schedule_delayed_work(&igb->ptp_overflow_work,
- IGB_SYSTIM_OVERFLOW_PERIOD);
-}
-
-/**
- * igb_ptp_rx_hang - detect error case when Rx timestamp registers latched
- * @adapter: private network adapter structure
- *
- * This watchdog task is scheduled to detect error case where hardware has
- * dropped an Rx packet that was timestamped when the ring is full. The
- * particular error is rare but leaves the device in a state unable to timestamp
- * any future packets.
- */
-void igb_ptp_rx_hang(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- struct igb_ring *rx_ring;
- u32 tsyncrxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
- unsigned long rx_event;
- int n;
-
- if (hw->mac.type != e1000_82576)
- return;
-
- /* If we don't have a valid timestamp in the registers, just update the
- * timeout counter and exit
- */
- if (!(tsyncrxctl & E1000_TSYNCRXCTL_VALID)) {
- adapter->last_rx_ptp_check = jiffies;
- return;
- }
-
- /* Determine the most recent watchdog or rx_timestamp event */
- rx_event = adapter->last_rx_ptp_check;
- for (n = 0; n < adapter->num_rx_queues; n++) {
- rx_ring = adapter->rx_ring[n];
- if (time_after(rx_ring->last_rx_timestamp, rx_event))
- rx_event = rx_ring->last_rx_timestamp;
- }
-
- /* Only need to read the high RXSTMP register to clear the lock */
- if (time_is_before_jiffies(rx_event + 5 * HZ)) {
- E1000_READ_REG(hw, E1000_RXSTMPH);
- adapter->last_rx_ptp_check = jiffies;
- adapter->rx_hwtstamp_cleared++;
- dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang");
- }
-}
-
-/**
- * igb_ptp_tx_hwtstamp - utility function which checks for TX time stamp
- * @adapter: Board private structure.
- *
- * If we were asked to do hardware stamping and such a time stamp is
- * available, then it must have been for this skb here because we only
- * allow only one such packet into the queue.
- */
-void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- struct skb_shared_hwtstamps shhwtstamps;
- u64 regval;
-
- regval = E1000_READ_REG(hw, E1000_TXSTMPL);
- regval |= (u64)E1000_READ_REG(hw, E1000_TXSTMPH) << 32;
-
- igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
- skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
- dev_kfree_skb_any(adapter->ptp_tx_skb);
- adapter->ptp_tx_skb = NULL;
-}
-
-/**
- * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp
- * @q_vector: Pointer to interrupt specific structure
- * @va: Pointer to address containing Rx buffer
- * @skb: Buffer containing timestamp and packet
- *
- * This function is meant to retrieve a timestamp from the first buffer of an
- * incoming frame. The value is stored in little endian format starting on
- * byte 8.
- */
-void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
- unsigned char *va,
- struct sk_buff *skb)
-{
- __le64 *regval = (__le64 *)va;
-
- /*
- * The timestamp is recorded in little endian format.
- * DWORD: 0 1 2 3
- * Field: Reserved Reserved SYSTIML SYSTIMH
- */
- igb_ptp_systim_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb),
- le64_to_cpu(regval[1]));
-}
-
-/**
- * igb_ptp_rx_rgtstamp - retrieve Rx timestamp stored in register
- * @q_vector: Pointer to interrupt specific structure
- * @skb: Buffer containing timestamp and packet
- *
- * This function is meant to retrieve a timestamp from the internal registers
- * of the adapter and store it in the skb.
- */
-void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
- struct sk_buff *skb)
-{
- struct igb_adapter *adapter = q_vector->adapter;
- struct e1000_hw *hw = &adapter->hw;
- u64 regval;
-
- /*
- * If this bit is set, then the RX registers contain the time stamp. No
- * other packet will be time stamped until we read these registers, so
- * read the registers to make them available again. Because only one
- * packet can be time stamped at a time, we know that the register
- * values must belong to this one here and therefore we don't need to
- * compare any of the additional attributes stored for it.
- *
- * If nothing went wrong, then it should have a shared tx_flags that we
- * can turn into a skb_shared_hwtstamps.
- */
- if (!(E1000_READ_REG(hw, E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
- return;
-
- regval = E1000_READ_REG(hw, E1000_RXSTMPL);
- regval |= (u64)E1000_READ_REG(hw, E1000_RXSTMPH) << 32;
-
- igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
-}
-
-/**
- * igb_ptp_hwtstamp_ioctl - control hardware time stamping
- * @netdev:
- * @ifreq:
- * @cmd:
- *
- * Outgoing time stamping can be enabled and disabled. Play nice and
- * disable it when requested, although it shouldn't case any overhead
- * when no packet needs it. At most one packet in the queue may be
- * marked for time stamping, otherwise it would be impossible to tell
- * for sure to which packet the hardware time stamp belongs.
- *
- * Incoming time stamping has to be configured via the hardware
- * filters. Not all combinations are supported, in particular event
- * type has to be specified. Matching the kind of event packet is
- * not supported, with the exception of "all V2 events regardless of
- * level 2 or 4".
- *
- **/
-int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
- struct ifreq *ifr, int cmd)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- struct hwtstamp_config config;
- u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
- u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
- u32 tsync_rx_cfg = 0;
- bool is_l4 = false;
- bool is_l2 = false;
- u32 regval;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
- switch (config.tx_type) {
- case HWTSTAMP_TX_OFF:
- tsync_tx_ctl = 0;
- case HWTSTAMP_TX_ON:
- break;
- default:
- return -ERANGE;
- }
-
- switch (config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- tsync_rx_ctl = 0;
- break;
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
- tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
- is_l4 = true;
- break;
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
- tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
- is_l4 = true;
- break;
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
- is_l2 = true;
- is_l4 = true;
- break;
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_ALL:
- /*
- * 82576 cannot timestamp all packets, which it needs to do to
- * support both V1 Sync and Delay_Req messages
- */
- if (hw->mac.type != e1000_82576) {
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- break;
- }
- /* fall through */
- default:
- config.rx_filter = HWTSTAMP_FILTER_NONE;
- return -ERANGE;
- }
-
- if (hw->mac.type == e1000_82575) {
- if (tsync_rx_ctl | tsync_tx_ctl)
- return -EINVAL;
- return 0;
- }
-
- /*
- * Per-packet timestamping only works if all packets are
- * timestamped, so enable timestamping in all packets as
- * long as one rx filter was configured.
- */
- if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
- tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- is_l2 = true;
- is_l4 = true;
-
- if ((hw->mac.type == e1000_i210) ||
- (hw->mac.type == e1000_i211)) {
- regval = E1000_READ_REG(hw, E1000_RXPBS);
- regval |= E1000_RXPBS_CFG_TS_EN;
- E1000_WRITE_REG(hw, E1000_RXPBS, regval);
- }
- }
-
- /* enable/disable TX */
- regval = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
- regval &= ~E1000_TSYNCTXCTL_ENABLED;
- regval |= tsync_tx_ctl;
- E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, regval);
-
- /* enable/disable RX */
- regval = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
- regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
- regval |= tsync_rx_ctl;
- E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, regval);
-
- /* define which PTP packets are time stamped */
- E1000_WRITE_REG(hw, E1000_TSYNCRXCFG, tsync_rx_cfg);
-
- /* define ethertype filter for timestamped packets */
- if (is_l2)
- E1000_WRITE_REG(hw, E1000_ETQF(3),
- (E1000_ETQF_FILTER_ENABLE | /* enable filter */
- E1000_ETQF_1588 | /* enable timestamping */
- ETH_P_1588)); /* 1588 eth protocol type */
- else
- E1000_WRITE_REG(hw, E1000_ETQF(3), 0);
-
- /* L4 Queue Filter[3]: filter by destination port and protocol */
- if (is_l4) {
- u32 ftqf = (IPPROTO_UDP /* UDP */
- | E1000_FTQF_VF_BP /* VF not compared */
- | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
- | E1000_FTQF_MASK); /* mask all inputs */
- ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
-
- E1000_WRITE_REG(hw, E1000_IMIR(3), htons(PTP_EV_PORT));
- E1000_WRITE_REG(hw, E1000_IMIREXT(3),
- (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
- if (hw->mac.type == e1000_82576) {
- /* enable source port check */
- E1000_WRITE_REG(hw, E1000_SPQF(3), htons(PTP_EV_PORT));
- ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
- }
- E1000_WRITE_REG(hw, E1000_FTQF(3), ftqf);
- } else {
- E1000_WRITE_REG(hw, E1000_FTQF(3), E1000_FTQF_MASK);
- }
- E1000_WRITE_FLUSH(hw);
-
- /* clear TX/RX time stamp registers, just to be sure */
- regval = E1000_READ_REG(hw, E1000_TXSTMPL);
- regval = E1000_READ_REG(hw, E1000_TXSTMPH);
- regval = E1000_READ_REG(hw, E1000_RXSTMPL);
- regval = E1000_READ_REG(hw, E1000_RXSTMPH);
-
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
-}
-
-void igb_ptp_init(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- struct net_device *netdev = adapter->netdev;
-
- switch (hw->mac.type) {
- case e1000_82576:
- snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
- adapter->ptp_caps.owner = THIS_MODULE;
- adapter->ptp_caps.max_adj = 999999881;
- adapter->ptp_caps.n_ext_ts = 0;
- adapter->ptp_caps.pps = 0;
- adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576;
- adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
- adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
- adapter->ptp_caps.settime = igb_ptp_settime_82576;
- adapter->ptp_caps.enable = igb_ptp_enable;
- adapter->cc.read = igb_ptp_read_82576;
- adapter->cc.mask = CLOCKSOURCE_MASK(64);
- adapter->cc.mult = 1;
- adapter->cc.shift = IGB_82576_TSYNC_SHIFT;
- /* Dial the nominal frequency. */
- E1000_WRITE_REG(hw, E1000_TIMINCA, INCPERIOD_82576 |
- INCVALUE_82576);
- break;
- case e1000_82580:
- case e1000_i350:
- case e1000_i354:
- snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
- adapter->ptp_caps.owner = THIS_MODULE;
- adapter->ptp_caps.max_adj = 62499999;
- adapter->ptp_caps.n_ext_ts = 0;
- adapter->ptp_caps.pps = 0;
- adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
- adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
- adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
- adapter->ptp_caps.settime = igb_ptp_settime_82576;
- adapter->ptp_caps.enable = igb_ptp_enable;
- adapter->cc.read = igb_ptp_read_82580;
- adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580);
- adapter->cc.mult = 1;
- adapter->cc.shift = 0;
- /* Enable the timer functions by clearing bit 31. */
- E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0);
- break;
- case e1000_i210:
- case e1000_i211:
- snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
- adapter->ptp_caps.owner = THIS_MODULE;
- adapter->ptp_caps.max_adj = 62499999;
- adapter->ptp_caps.n_ext_ts = 0;
- adapter->ptp_caps.pps = 0;
- adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
- adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
- adapter->ptp_caps.gettime = igb_ptp_gettime_i210;
- adapter->ptp_caps.settime = igb_ptp_settime_i210;
- adapter->ptp_caps.enable = igb_ptp_enable;
- /* Enable the timer functions by clearing bit 31. */
- E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0);
- break;
- default:
- adapter->ptp_clock = NULL;
- return;
- }
-
- E1000_WRITE_FLUSH(hw);
-
- spin_lock_init(&adapter->tmreg_lock);
- INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
-
- /* Initialize the clock and overflow work for devices that need it. */
- if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
- struct timespec ts = ktime_to_timespec(ktime_get_real());
-
- igb_ptp_settime_i210(&adapter->ptp_caps, &ts);
- } else {
- timecounter_init(&adapter->tc, &adapter->cc,
- ktime_to_ns(ktime_get_real()));
-
- INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
- igb_ptp_overflow_check);
-
- schedule_delayed_work(&adapter->ptp_overflow_work,
- IGB_SYSTIM_OVERFLOW_PERIOD);
- }
-
- /* Initialize the time sync interrupts for devices that support it. */
- if (hw->mac.type >= e1000_82580) {
- E1000_WRITE_REG(hw, E1000_TSIM, E1000_TSIM_TXTS);
- E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_TS);
- }
-
- adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
- &adapter->pdev->dev);
- if (IS_ERR(adapter->ptp_clock)) {
- adapter->ptp_clock = NULL;
- dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n");
- } else {
- dev_info(&adapter->pdev->dev, "added PHC on %s\n",
- adapter->netdev->name);
- adapter->flags |= IGB_FLAG_PTP;
- }
-}
-
-/**
- * igb_ptp_stop - Disable PTP device and stop the overflow check.
- * @adapter: Board private structure.
- *
- * This function stops the PTP support and cancels the delayed work.
- **/
-void igb_ptp_stop(struct igb_adapter *adapter)
-{
- switch (adapter->hw.mac.type) {
- case e1000_82576:
- case e1000_82580:
- case e1000_i350:
- case e1000_i354:
- cancel_delayed_work_sync(&adapter->ptp_overflow_work);
- break;
- case e1000_i210:
- case e1000_i211:
- /* No delayed work to cancel. */
- break;
- default:
- return;
- }
-
- cancel_work_sync(&adapter->ptp_tx_work);
- if (adapter->ptp_tx_skb) {
- dev_kfree_skb_any(adapter->ptp_tx_skb);
- adapter->ptp_tx_skb = NULL;
- }
-
- if (adapter->ptp_clock) {
- ptp_clock_unregister(adapter->ptp_clock);
- dev_info(&adapter->pdev->dev, "removed PHC on %s\n",
- adapter->netdev->name);
- adapter->flags &= ~IGB_FLAG_PTP;
- }
-}
-
-/**
- * igb_ptp_reset - Re-enable the adapter for PTP following a reset.
- * @adapter: Board private structure.
- *
- * This function handles the reset work required to re-enable the PTP device.
- **/
-void igb_ptp_reset(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
-
- if (!(adapter->flags & IGB_FLAG_PTP))
- return;
-
- switch (adapter->hw.mac.type) {
- case e1000_82576:
- /* Dial the nominal frequency. */
- E1000_WRITE_REG(hw, E1000_TIMINCA, INCPERIOD_82576 |
- INCVALUE_82576);
- break;
- case e1000_82580:
- case e1000_i350:
- case e1000_i354:
- case e1000_i210:
- case e1000_i211:
- /* Enable the timer functions and interrupts. */
- E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0);
- E1000_WRITE_REG(hw, E1000_TSIM, E1000_TSIM_TXTS);
- E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_TS);
- break;
- default:
- /* No work to do. */
- return;
- }
-
- /* Re-initialize the timer. */
- if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
- struct timespec ts = ktime_to_timespec(ktime_get_real());
-
- igb_ptp_settime_i210(&adapter->ptp_caps, &ts);
- } else {
- timecounter_init(&adapter->tc, &adapter->cc,
- ktime_to_ns(ktime_get_real()));
- }
-}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_regtest.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_regtest.h
deleted file mode 100755
index a6761db8..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_regtest.h
+++ /dev/null
@@ -1,251 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-/* ethtool register test data */
-struct igb_reg_test {
- u16 reg;
- u16 reg_offset;
- u16 array_len;
- u16 test_type;
- u32 mask;
- u32 write;
-};
-
-/* In the hardware, registers are laid out either singly, in arrays
- * spaced 0x100 bytes apart, or in contiguous tables. We assume
- * most tests take place on arrays or single registers (handled
- * as a single-element array) and special-case the tables.
- * Table tests are always pattern tests.
- *
- * We also make provision for some required setup steps by specifying
- * registers to be written without any read-back testing.
- */
-
-#define PATTERN_TEST 1
-#define SET_READ_TEST 2
-#define WRITE_NO_TEST 3
-#define TABLE32_TEST 4
-#define TABLE64_TEST_LO 5
-#define TABLE64_TEST_HI 6
-
-/* i210 reg test */
-static struct igb_reg_test reg_test_i210[] = {
- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
- /* RDH is read-only for i210, only test RDT. */
- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0003FFF0, 0x0003FFF0 },
- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
- { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RA, 0, 16, TABLE64_TEST_LO,
- 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RA, 0, 16, TABLE64_TEST_HI,
- 0x900FFFFF, 0xFFFFFFFF },
- { E1000_MTA, 0, 128, TABLE32_TEST,
- 0xFFFFFFFF, 0xFFFFFFFF },
- { 0, 0, 0, 0 }
-};
-
-/* i350 reg test */
-static struct igb_reg_test reg_test_i350[] = {
- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
- /* VET is readonly on i350 */
- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
- { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
- /* RDH is read-only for i350, only test RDT. */
- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
- { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
- { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RA, 0, 16, TABLE64_TEST_LO,
- 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RA, 0, 16, TABLE64_TEST_HI,
- 0xC3FFFFFF, 0xFFFFFFFF },
- { E1000_RA2, 0, 16, TABLE64_TEST_LO,
- 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RA2, 0, 16, TABLE64_TEST_HI,
- 0xC3FFFFFF, 0xFFFFFFFF },
- { E1000_MTA, 0, 128, TABLE32_TEST,
- 0xFFFFFFFF, 0xFFFFFFFF },
- { 0, 0, 0, 0 }
-};
-
-/* 82580 reg test */
-static struct igb_reg_test reg_test_82580[] = {
- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
- { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
- { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
- /* RDH is read-only for 82580, only test RDT. */
- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
- { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
- { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RA, 0, 16, TABLE64_TEST_LO,
- 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RA, 0, 16, TABLE64_TEST_HI,
- 0x83FFFFFF, 0xFFFFFFFF },
- { E1000_RA2, 0, 8, TABLE64_TEST_LO,
- 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RA2, 0, 8, TABLE64_TEST_HI,
- 0x83FFFFFF, 0xFFFFFFFF },
- { E1000_MTA, 0, 128, TABLE32_TEST,
- 0xFFFFFFFF, 0xFFFFFFFF },
- { 0, 0, 0, 0 }
-};
-
-/* 82576 reg test */
-static struct igb_reg_test reg_test_82576[] = {
- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
- { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
- { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
- /* Enable all queues before testing. */
- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
- { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
- /* RDH is read-only for 82576, only test RDT. */
- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
- { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 },
- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
- { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RA, 0, 16, TABLE64_TEST_LO,
- 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RA, 0, 16, TABLE64_TEST_HI,
- 0x83FFFFFF, 0xFFFFFFFF },
- { E1000_RA2, 0, 8, TABLE64_TEST_LO,
- 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RA2, 0, 8, TABLE64_TEST_HI,
- 0x83FFFFFF, 0xFFFFFFFF },
- { E1000_MTA, 0, 128, TABLE32_TEST,
- 0xFFFFFFFF, 0xFFFFFFFF },
- { 0, 0, 0, 0 }
-};
-
-/* 82575 register test */
-static struct igb_reg_test reg_test_82575[] = {
- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
- { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
- /* Enable all four RX queues before testing. */
- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
- /* RDH is read-only for 82575, only test RDT. */
- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF },
- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF },
- { E1000_RA, 0, 16, TABLE64_TEST_LO,
- 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RA, 0, 16, TABLE64_TEST_HI,
- 0x800FFFFF, 0xFFFFFFFF },
- { E1000_MTA, 0, 128, TABLE32_TEST,
- 0xFFFFFFFF, 0xFFFFFFFF },
- { 0, 0, 0, 0 }
-};
-
-
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.c
deleted file mode 100755
index 90d96e62..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.c
+++ /dev/null
@@ -1,437 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-
-#include <linux/tcp.h>
-
-#include "igb.h"
-#include "igb_vmdq.h"
-#include <linux/if_vlan.h>
-
-#ifdef CONFIG_IGB_VMDQ_NETDEV
-int igb_vmdq_open(struct net_device *dev)
-{
- struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
- struct igb_adapter *adapter = vadapter->real_adapter;
- struct net_device *main_netdev = adapter->netdev;
- int hw_queue = vadapter->rx_ring->queue_index +
- adapter->vfs_allocated_count;
-
- if (test_bit(__IGB_DOWN, &adapter->state)) {
- DPRINTK(DRV, WARNING,
- "Open %s before opening this device.\n",
- main_netdev->name);
- return -EAGAIN;
- }
- netif_carrier_off(dev);
- vadapter->tx_ring->vmdq_netdev = dev;
- vadapter->rx_ring->vmdq_netdev = dev;
- if (is_valid_ether_addr(dev->dev_addr)) {
- igb_del_mac_filter(adapter, dev->dev_addr, hw_queue);
- igb_add_mac_filter(adapter, dev->dev_addr, hw_queue);
- }
- netif_carrier_on(dev);
- return 0;
-}
-
-int igb_vmdq_close(struct net_device *dev)
-{
- struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
- struct igb_adapter *adapter = vadapter->real_adapter;
- int hw_queue = vadapter->rx_ring->queue_index +
- adapter->vfs_allocated_count;
-
- netif_carrier_off(dev);
- igb_del_mac_filter(adapter, dev->dev_addr, hw_queue);
-
- vadapter->tx_ring->vmdq_netdev = NULL;
- vadapter->rx_ring->vmdq_netdev = NULL;
- return 0;
-}
-
-netdev_tx_t igb_vmdq_xmit_frame(struct sk_buff *skb, struct net_device *dev)
-{
- struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
-
- return igb_xmit_frame_ring(skb, vadapter->tx_ring);
-}
-
-struct net_device_stats *igb_vmdq_get_stats(struct net_device *dev)
-{
- struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
- struct igb_adapter *adapter = vadapter->real_adapter;
- struct e1000_hw *hw = &adapter->hw;
- int hw_queue = vadapter->rx_ring->queue_index +
- adapter->vfs_allocated_count;
-
- vadapter->net_stats.rx_packets +=
- E1000_READ_REG(hw, E1000_PFVFGPRC(hw_queue));
- E1000_WRITE_REG(hw, E1000_PFVFGPRC(hw_queue), 0);
- vadapter->net_stats.tx_packets +=
- E1000_READ_REG(hw, E1000_PFVFGPTC(hw_queue));
- E1000_WRITE_REG(hw, E1000_PFVFGPTC(hw_queue), 0);
- vadapter->net_stats.rx_bytes +=
- E1000_READ_REG(hw, E1000_PFVFGORC(hw_queue));
- E1000_WRITE_REG(hw, E1000_PFVFGORC(hw_queue), 0);
- vadapter->net_stats.tx_bytes +=
- E1000_READ_REG(hw, E1000_PFVFGOTC(hw_queue));
- E1000_WRITE_REG(hw, E1000_PFVFGOTC(hw_queue), 0);
- vadapter->net_stats.multicast +=
- E1000_READ_REG(hw, E1000_PFVFMPRC(hw_queue));
- E1000_WRITE_REG(hw, E1000_PFVFMPRC(hw_queue), 0);
- /* only return the current stats */
- return &vadapter->net_stats;
-}
-
-/**
- * igb_write_vm_addr_list - write unicast addresses to RAR table
- * @netdev: network interface device structure
- *
- * Writes unicast address list to the RAR table.
- * Returns: -ENOMEM on failure/insufficient address space
- * 0 on no addresses written
- * X on writing X addresses to the RAR table
- **/
-static int igb_write_vm_addr_list(struct net_device *netdev)
-{
- struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
- struct igb_adapter *adapter = vadapter->real_adapter;
- int count = 0;
- int hw_queue = vadapter->rx_ring->queue_index +
- adapter->vfs_allocated_count;
-
- /* return ENOMEM indicating insufficient memory for addresses */
- if (netdev_uc_count(netdev) > igb_available_rars(adapter))
- return -ENOMEM;
-
- if (!netdev_uc_empty(netdev)) {
-#ifdef NETDEV_HW_ADDR_T_UNICAST
- struct netdev_hw_addr *ha;
-#else
- struct dev_mc_list *ha;
-#endif
- netdev_for_each_uc_addr(ha, netdev) {
-#ifdef NETDEV_HW_ADDR_T_UNICAST
- igb_del_mac_filter(adapter, ha->addr, hw_queue);
- igb_add_mac_filter(adapter, ha->addr, hw_queue);
-#else
- igb_del_mac_filter(adapter, ha->da_addr, hw_queue);
- igb_add_mac_filter(adapter, ha->da_addr, hw_queue);
-#endif
- count++;
- }
- }
- return count;
-}
-
-
-#define E1000_VMOLR_UPE 0x20000000 /* Unicast promiscuous mode */
-void igb_vmdq_set_rx_mode(struct net_device *dev)
-{
- struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
- struct igb_adapter *adapter = vadapter->real_adapter;
- struct e1000_hw *hw = &adapter->hw;
- u32 vmolr, rctl;
- int hw_queue = vadapter->rx_ring->queue_index +
- adapter->vfs_allocated_count;
-
- /* Check for Promiscuous and All Multicast modes */
- vmolr = E1000_READ_REG(hw, E1000_VMOLR(hw_queue));
-
- /* clear the affected bits */
- vmolr &= ~(E1000_VMOLR_UPE | E1000_VMOLR_MPME |
- E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE);
-
- if (dev->flags & IFF_PROMISC) {
- vmolr |= E1000_VMOLR_UPE;
- rctl = E1000_READ_REG(hw, E1000_RCTL);
- rctl |= E1000_RCTL_UPE;
- E1000_WRITE_REG(hw, E1000_RCTL, rctl);
- } else {
- rctl = E1000_READ_REG(hw, E1000_RCTL);
- rctl &= ~E1000_RCTL_UPE;
- E1000_WRITE_REG(hw, E1000_RCTL, rctl);
- if (dev->flags & IFF_ALLMULTI) {
- vmolr |= E1000_VMOLR_MPME;
- } else {
- /*
- * Write addresses to the MTA, if the attempt fails
- * then we should just turn on promiscuous mode so
- * that we can at least receive multicast traffic
- */
- if (igb_write_mc_addr_list(adapter->netdev) != 0)
- vmolr |= E1000_VMOLR_ROMPE;
- }
-#ifdef HAVE_SET_RX_MODE
- /*
- * Write addresses to available RAR registers, if there is not
- * sufficient space to store all the addresses then enable
- * unicast promiscuous mode
- */
- if (igb_write_vm_addr_list(dev) < 0)
- vmolr |= E1000_VMOLR_UPE;
-#endif
- }
- E1000_WRITE_REG(hw, E1000_VMOLR(hw_queue), vmolr);
-
- return;
-}
-
-int igb_vmdq_set_mac(struct net_device *dev, void *p)
-{
- struct sockaddr *addr = p;
- struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
- struct igb_adapter *adapter = vadapter->real_adapter;
- int hw_queue = vadapter->rx_ring->queue_index +
- adapter->vfs_allocated_count;
-
- igb_del_mac_filter(adapter, dev->dev_addr, hw_queue);
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
- return igb_add_mac_filter(adapter, dev->dev_addr, hw_queue);
-}
-
-int igb_vmdq_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
- struct igb_adapter *adapter = vadapter->real_adapter;
-
- if (adapter->netdev->mtu < new_mtu) {
- DPRINTK(PROBE, INFO,
- "Set MTU on %s to >= %d "
- "before changing MTU on %s\n",
- adapter->netdev->name, new_mtu, dev->name);
- return -EINVAL;
- }
- dev->mtu = new_mtu;
- return 0;
-}
-
-void igb_vmdq_tx_timeout(struct net_device *dev)
-{
- return;
-}
-
-void igb_vmdq_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
-{
- struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
- struct igb_adapter *adapter = vadapter->real_adapter;
- struct e1000_hw *hw = &adapter->hw;
- int hw_queue = vadapter->rx_ring->queue_index +
- adapter->vfs_allocated_count;
-
- vadapter->vlgrp = grp;
-
- igb_enable_vlan_tags(adapter);
- E1000_WRITE_REG(hw, E1000_VMVIR(hw_queue), 0);
-
- return;
-}
-void igb_vmdq_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
-{
- struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
- struct igb_adapter *adapter = vadapter->real_adapter;
-#ifndef HAVE_NETDEV_VLAN_FEATURES
- struct net_device *v_netdev;
-#endif
- int hw_queue = vadapter->rx_ring->queue_index +
- adapter->vfs_allocated_count;
-
- /* attempt to add filter to vlvf array */
- igb_vlvf_set(adapter, vid, TRUE, hw_queue);
-
-#ifndef HAVE_NETDEV_VLAN_FEATURES
-
- /* Copy feature flags from netdev to the vlan netdev for this vid.
- * This allows things like TSO to bubble down to our vlan device.
- */
- v_netdev = vlan_group_get_device(vadapter->vlgrp, vid);
- v_netdev->features |= adapter->netdev->features;
- vlan_group_set_device(vadapter->vlgrp, vid, v_netdev);
-#endif
-
- return;
-}
-void igb_vmdq_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
-{
- struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
- struct igb_adapter *adapter = vadapter->real_adapter;
- int hw_queue = vadapter->rx_ring->queue_index +
- adapter->vfs_allocated_count;
-
- vlan_group_set_device(vadapter->vlgrp, vid, NULL);
- /* remove vlan from VLVF table array */
- igb_vlvf_set(adapter, vid, FALSE, hw_queue);
-
-
- return;
-}
-
-static int igb_vmdq_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
-{
- struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
- struct igb_adapter *adapter = vadapter->real_adapter;
- struct e1000_hw *hw = &adapter->hw;
- u32 status;
-
- if (hw->phy.media_type == e1000_media_type_copper) {
-
- ecmd->supported = (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full|
- SUPPORTED_Autoneg |
- SUPPORTED_TP);
- ecmd->advertising = ADVERTISED_TP;
-
- if (hw->mac.autoneg == 1) {
- ecmd->advertising |= ADVERTISED_Autoneg;
- /* the e1000 autoneg seems to match ethtool nicely */
- ecmd->advertising |= hw->phy.autoneg_advertised;
- }
-
- ecmd->port = PORT_TP;
- ecmd->phy_address = hw->phy.addr;
- } else {
- ecmd->supported = (SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE |
- SUPPORTED_Autoneg);
-
- ecmd->advertising = (ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE |
- ADVERTISED_Autoneg);
-
- ecmd->port = PORT_FIBRE;
- }
-
- ecmd->transceiver = XCVR_INTERNAL;
-
- status = E1000_READ_REG(hw, E1000_STATUS);
-
- if (status & E1000_STATUS_LU) {
-
- if ((status & E1000_STATUS_SPEED_1000) ||
- hw->phy.media_type != e1000_media_type_copper)
- ecmd->speed = SPEED_1000;
- else if (status & E1000_STATUS_SPEED_100)
- ecmd->speed = SPEED_100;
- else
- ecmd->speed = SPEED_10;
-
- if ((status & E1000_STATUS_FD) ||
- hw->phy.media_type != e1000_media_type_copper)
- ecmd->duplex = DUPLEX_FULL;
- else
- ecmd->duplex = DUPLEX_HALF;
- } else {
- ecmd->speed = -1;
- ecmd->duplex = -1;
- }
-
- ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
- return 0;
-}
-
-
-static u32 igb_vmdq_get_msglevel(struct net_device *netdev)
-{
- struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
- struct igb_adapter *adapter = vadapter->real_adapter;
- return adapter->msg_enable;
-}
-
-static void igb_vmdq_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
- struct igb_adapter *adapter = vadapter->real_adapter;
- struct net_device *main_netdev = adapter->netdev;
-
- strncpy(drvinfo->driver, igb_driver_name, 32);
- strncpy(drvinfo->version, igb_driver_version, 32);
-
- strncpy(drvinfo->fw_version, "N/A", 4);
- snprintf(drvinfo->bus_info, 32, "%s VMDQ %d", main_netdev->name,
- vadapter->rx_ring->queue_index);
- drvinfo->n_stats = 0;
- drvinfo->testinfo_len = 0;
- drvinfo->regdump_len = 0;
-}
-
-static void igb_vmdq_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
-{
- struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
-
- struct igb_ring *tx_ring = vadapter->tx_ring;
- struct igb_ring *rx_ring = vadapter->rx_ring;
-
- ring->rx_max_pending = IGB_MAX_RXD;
- ring->tx_max_pending = IGB_MAX_TXD;
- ring->rx_mini_max_pending = 0;
- ring->rx_jumbo_max_pending = 0;
- ring->rx_pending = rx_ring->count;
- ring->tx_pending = tx_ring->count;
- ring->rx_mini_pending = 0;
- ring->rx_jumbo_pending = 0;
-}
-static u32 igb_vmdq_get_rx_csum(struct net_device *netdev)
-{
- struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
- struct igb_adapter *adapter = vadapter->real_adapter;
-
- return test_bit(IGB_RING_FLAG_RX_CSUM, &adapter->rx_ring[0]->flags);
-}
-
-
-static struct ethtool_ops igb_vmdq_ethtool_ops = {
- .get_settings = igb_vmdq_get_settings,
- .get_drvinfo = igb_vmdq_get_drvinfo,
- .get_link = ethtool_op_get_link,
- .get_ringparam = igb_vmdq_get_ringparam,
- .get_rx_csum = igb_vmdq_get_rx_csum,
- .get_tx_csum = ethtool_op_get_tx_csum,
- .get_sg = ethtool_op_get_sg,
- .set_sg = ethtool_op_set_sg,
- .get_msglevel = igb_vmdq_get_msglevel,
-#ifdef NETIF_F_TSO
- .get_tso = ethtool_op_get_tso,
-#endif
-#ifdef HAVE_ETHTOOL_GET_PERM_ADDR
- .get_perm_addr = ethtool_op_get_perm_addr,
-#endif
-};
-
-void igb_vmdq_set_ethtool_ops(struct net_device *netdev)
-{
- SET_ETHTOOL_OPS(netdev, &igb_vmdq_ethtool_ops);
-}
-
-
-#endif /* CONFIG_IGB_VMDQ_NETDEV */
-
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.h
deleted file mode 100755
index e51e7c4e..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#ifndef _IGB_VMDQ_H_
-#define _IGB_VMDQ_H_
-
-#ifdef CONFIG_IGB_VMDQ_NETDEV
-int igb_vmdq_open(struct net_device *dev);
-int igb_vmdq_close(struct net_device *dev);
-netdev_tx_t igb_vmdq_xmit_frame(struct sk_buff *skb, struct net_device *dev);
-struct net_device_stats *igb_vmdq_get_stats(struct net_device *dev);
-void igb_vmdq_set_rx_mode(struct net_device *dev);
-int igb_vmdq_set_mac(struct net_device *dev, void *addr);
-int igb_vmdq_change_mtu(struct net_device *dev, int new_mtu);
-void igb_vmdq_tx_timeout(struct net_device *dev);
-void igb_vmdq_vlan_rx_register(struct net_device *dev,
- struct vlan_group *grp);
-void igb_vmdq_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
-void igb_vmdq_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
-void igb_vmdq_set_ethtool_ops(struct net_device *netdev);
-#endif /* CONFIG_IGB_VMDQ_NETDEV */
-#endif /* _IGB_VMDQ_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/kcompat.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/kcompat.c
deleted file mode 100755
index bde3a83c..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/kcompat.c
+++ /dev/null
@@ -1,1482 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#include "igb.h"
-#include "kcompat.h"
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) )
-/* From lib/vsprintf.c */
-#include <asm/div64.h>
-
-static int skip_atoi(const char **s)
-{
- int i=0;
-
- while (isdigit(**s))
- i = i*10 + *((*s)++) - '0';
- return i;
-}
-
-#define _kc_ZEROPAD 1 /* pad with zero */
-#define _kc_SIGN 2 /* unsigned/signed long */
-#define _kc_PLUS 4 /* show plus */
-#define _kc_SPACE 8 /* space if plus */
-#define _kc_LEFT 16 /* left justified */
-#define _kc_SPECIAL 32 /* 0x */
-#define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */
-
-static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type)
-{
- char c,sign,tmp[66];
- const char *digits;
- const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz";
- const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
- int i;
-
- digits = (type & _kc_LARGE) ? large_digits : small_digits;
- if (type & _kc_LEFT)
- type &= ~_kc_ZEROPAD;
- if (base < 2 || base > 36)
- return 0;
- c = (type & _kc_ZEROPAD) ? '0' : ' ';
- sign = 0;
- if (type & _kc_SIGN) {
- if (num < 0) {
- sign = '-';
- num = -num;
- size--;
- } else if (type & _kc_PLUS) {
- sign = '+';
- size--;
- } else if (type & _kc_SPACE) {
- sign = ' ';
- size--;
- }
- }
- if (type & _kc_SPECIAL) {
- if (base == 16)
- size -= 2;
- else if (base == 8)
- size--;
- }
- i = 0;
- if (num == 0)
- tmp[i++]='0';
- else while (num != 0)
- tmp[i++] = digits[do_div(num,base)];
- if (i > precision)
- precision = i;
- size -= precision;
- if (!(type&(_kc_ZEROPAD+_kc_LEFT))) {
- while(size-->0) {
- if (buf <= end)
- *buf = ' ';
- ++buf;
- }
- }
- if (sign) {
- if (buf <= end)
- *buf = sign;
- ++buf;
- }
- if (type & _kc_SPECIAL) {
- if (base==8) {
- if (buf <= end)
- *buf = '0';
- ++buf;
- } else if (base==16) {
- if (buf <= end)
- *buf = '0';
- ++buf;
- if (buf <= end)
- *buf = digits[33];
- ++buf;
- }
- }
- if (!(type & _kc_LEFT)) {
- while (size-- > 0) {
- if (buf <= end)
- *buf = c;
- ++buf;
- }
- }
- while (i < precision--) {
- if (buf <= end)
- *buf = '0';
- ++buf;
- }
- while (i-- > 0) {
- if (buf <= end)
- *buf = tmp[i];
- ++buf;
- }
- while (size-- > 0) {
- if (buf <= end)
- *buf = ' ';
- ++buf;
- }
- return buf;
-}
-
-int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
-{
- int len;
- unsigned long long num;
- int i, base;
- char *str, *end, c;
- const char *s;
-
- int flags; /* flags to number() */
-
- int field_width; /* width of output field */
- int precision; /* min. # of digits for integers; max
- number of chars for from string */
- int qualifier; /* 'h', 'l', or 'L' for integer fields */
- /* 'z' support added 23/7/1999 S.H. */
- /* 'z' changed to 'Z' --davidm 1/25/99 */
-
- str = buf;
- end = buf + size - 1;
-
- if (end < buf - 1) {
- end = ((void *) -1);
- size = end - buf + 1;
- }
-
- for (; *fmt ; ++fmt) {
- if (*fmt != '%') {
- if (str <= end)
- *str = *fmt;
- ++str;
- continue;
- }
-
- /* process flags */
- flags = 0;
- repeat:
- ++fmt; /* this also skips first '%' */
- switch (*fmt) {
- case '-': flags |= _kc_LEFT; goto repeat;
- case '+': flags |= _kc_PLUS; goto repeat;
- case ' ': flags |= _kc_SPACE; goto repeat;
- case '#': flags |= _kc_SPECIAL; goto repeat;
- case '0': flags |= _kc_ZEROPAD; goto repeat;
- }
-
- /* get field width */
- field_width = -1;
- if (isdigit(*fmt))
- field_width = skip_atoi(&fmt);
- else if (*fmt == '*') {
- ++fmt;
- /* it's the next argument */
- field_width = va_arg(args, int);
- if (field_width < 0) {
- field_width = -field_width;
- flags |= _kc_LEFT;
- }
- }
-
- /* get the precision */
- precision = -1;
- if (*fmt == '.') {
- ++fmt;
- if (isdigit(*fmt))
- precision = skip_atoi(&fmt);
- else if (*fmt == '*') {
- ++fmt;
- /* it's the next argument */
- precision = va_arg(args, int);
- }
- if (precision < 0)
- precision = 0;
- }
-
- /* get the conversion qualifier */
- qualifier = -1;
- if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') {
- qualifier = *fmt;
- ++fmt;
- }
-
- /* default base */
- base = 10;
-
- switch (*fmt) {
- case 'c':
- if (!(flags & _kc_LEFT)) {
- while (--field_width > 0) {
- if (str <= end)
- *str = ' ';
- ++str;
- }
- }
- c = (unsigned char) va_arg(args, int);
- if (str <= end)
- *str = c;
- ++str;
- while (--field_width > 0) {
- if (str <= end)
- *str = ' ';
- ++str;
- }
- continue;
-
- case 's':
- s = va_arg(args, char *);
- if (!s)
- s = "<NULL>";
-
- len = strnlen(s, precision);
-
- if (!(flags & _kc_LEFT)) {
- while (len < field_width--) {
- if (str <= end)
- *str = ' ';
- ++str;
- }
- }
- for (i = 0; i < len; ++i) {
- if (str <= end)
- *str = *s;
- ++str; ++s;
- }
- while (len < field_width--) {
- if (str <= end)
- *str = ' ';
- ++str;
- }
- continue;
-
- case 'p':
- if (field_width == -1) {
- field_width = 2*sizeof(void *);
- flags |= _kc_ZEROPAD;
- }
- str = number(str, end,
- (unsigned long) va_arg(args, void *),
- 16, field_width, precision, flags);
- continue;
-
-
- case 'n':
- /* FIXME:
- * What does C99 say about the overflow case here? */
- if (qualifier == 'l') {
- long * ip = va_arg(args, long *);
- *ip = (str - buf);
- } else if (qualifier == 'Z') {
- size_t * ip = va_arg(args, size_t *);
- *ip = (str - buf);
- } else {
- int * ip = va_arg(args, int *);
- *ip = (str - buf);
- }
- continue;
-
- case '%':
- if (str <= end)
- *str = '%';
- ++str;
- continue;
-
- /* integer number formats - set up the flags and "break" */
- case 'o':
- base = 8;
- break;
-
- case 'X':
- flags |= _kc_LARGE;
- case 'x':
- base = 16;
- break;
-
- case 'd':
- case 'i':
- flags |= _kc_SIGN;
- case 'u':
- break;
-
- default:
- if (str <= end)
- *str = '%';
- ++str;
- if (*fmt) {
- if (str <= end)
- *str = *fmt;
- ++str;
- } else {
- --fmt;
- }
- continue;
- }
- if (qualifier == 'L')
- num = va_arg(args, long long);
- else if (qualifier == 'l') {
- num = va_arg(args, unsigned long);
- if (flags & _kc_SIGN)
- num = (signed long) num;
- } else if (qualifier == 'Z') {
- num = va_arg(args, size_t);
- } else if (qualifier == 'h') {
- num = (unsigned short) va_arg(args, int);
- if (flags & _kc_SIGN)
- num = (signed short) num;
- } else {
- num = va_arg(args, unsigned int);
- if (flags & _kc_SIGN)
- num = (signed int) num;
- }
- str = number(str, end, num, base,
- field_width, precision, flags);
- }
- if (str <= end)
- *str = '\0';
- else if (size > 0)
- /* don't write out a null byte if the buf size is zero */
- *end = '\0';
- /* the trailing null byte doesn't count towards the total
- * ++str;
- */
- return str-buf;
-}
-
-int _kc_snprintf(char * buf, size_t size, const char *fmt, ...)
-{
- va_list args;
- int i;
-
- va_start(args, fmt);
- i = _kc_vsnprintf(buf,size,fmt,args);
- va_end(args);
- return i;
-}
-#endif /* < 2.4.8 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
-
-/**************************************/
-/* PCI DMA MAPPING */
-
-#if defined(CONFIG_HIGHMEM)
-
-#ifndef PCI_DRAM_OFFSET
-#define PCI_DRAM_OFFSET 0
-#endif
-
-u64
-_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
- size_t size, int direction)
-{
- return (((u64) (page - mem_map) << PAGE_SHIFT) + offset +
- PCI_DRAM_OFFSET);
-}
-
-#else /* CONFIG_HIGHMEM */
-
-u64
-_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
- size_t size, int direction)
-{
- return pci_map_single(dev, (void *)page_address(page) + offset, size,
- direction);
-}
-
-#endif /* CONFIG_HIGHMEM */
-
-void
-_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size,
- int direction)
-{
- return pci_unmap_single(dev, dma_addr, size, direction);
-}
-
-#endif /* 2.4.13 => 2.4.3 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
-
-/**************************************/
-/* PCI DRIVER API */
-
-int
-_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
-{
- if (!pci_dma_supported(dev, mask))
- return -EIO;
- dev->dma_mask = mask;
- return 0;
-}
-
-int
-_kc_pci_request_regions(struct pci_dev *dev, char *res_name)
-{
- int i;
-
- for (i = 0; i < 6; i++) {
- if (pci_resource_len(dev, i) == 0)
- continue;
-
- if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
- if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
- pci_release_regions(dev);
- return -EBUSY;
- }
- } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
- if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
- pci_release_regions(dev);
- return -EBUSY;
- }
- }
- }
- return 0;
-}
-
-void
-_kc_pci_release_regions(struct pci_dev *dev)
-{
- int i;
-
- for (i = 0; i < 6; i++) {
- if (pci_resource_len(dev, i) == 0)
- continue;
-
- if (pci_resource_flags(dev, i) & IORESOURCE_IO)
- release_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
-
- else if (pci_resource_flags(dev, i) & IORESOURCE_MEM)
- release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
- }
-}
-
-/**************************************/
-/* NETWORK DRIVER API */
-
-struct net_device *
-_kc_alloc_etherdev(int sizeof_priv)
-{
- struct net_device *dev;
- int alloc_size;
-
- alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31;
- dev = kzalloc(alloc_size, GFP_KERNEL);
- if (!dev)
- return NULL;
-
- if (sizeof_priv)
- dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31);
- dev->name[0] = '\0';
- ether_setup(dev);
-
- return dev;
-}
-
-int
-_kc_is_valid_ether_addr(u8 *addr)
-{
- const char zaddr[6] = { 0, };
-
- return !(addr[0] & 1) && memcmp(addr, zaddr, 6);
-}
-
-#endif /* 2.4.3 => 2.4.0 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
-
-int
-_kc_pci_set_power_state(struct pci_dev *dev, int state)
-{
- return 0;
-}
-
-int
-_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable)
-{
- return 0;
-}
-
-#endif /* 2.4.6 => 2.4.3 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
-void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page,
- int off, int size)
-{
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- frag->page = page;
- frag->page_offset = off;
- frag->size = size;
- skb_shinfo(skb)->nr_frags = i + 1;
-}
-
-/*
- * Original Copyright:
- * find_next_bit.c: fallback find next bit implementation
- *
- * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-/**
- * find_next_bit - find the next set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
- unsigned long offset)
-{
- const unsigned long *p = addr + BITOP_WORD(offset);
- unsigned long result = offset & ~(BITS_PER_LONG-1);
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset %= BITS_PER_LONG;
- if (offset) {
- tmp = *(p++);
- tmp &= (~0UL << offset);
- if (size < BITS_PER_LONG)
- goto found_first;
- if (tmp)
- goto found_middle;
- size -= BITS_PER_LONG;
- result += BITS_PER_LONG;
- }
- while (size & ~(BITS_PER_LONG-1)) {
- if ((tmp = *(p++)))
- goto found_middle;
- result += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- }
- if (!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp &= (~0UL >> (BITS_PER_LONG - size));
- if (tmp == 0UL) /* Are any bits set? */
- return result + size; /* Nope. */
-found_middle:
- return result + ffs(tmp);
-}
-
-size_t _kc_strlcpy(char *dest, const char *src, size_t size)
-{
- size_t ret = strlen(src);
-
- if (size) {
- size_t len = (ret >= size) ? size - 1 : ret;
- memcpy(dest, src, len);
- dest[len] = '\0';
- }
- return ret;
-}
-
-#ifndef do_div
-#if BITS_PER_LONG == 32
-uint32_t __attribute__((weak)) _kc__div64_32(uint64_t *n, uint32_t base)
-{
- uint64_t rem = *n;
- uint64_t b = base;
- uint64_t res, d = 1;
- uint32_t high = rem >> 32;
-
- /* Reduce the thing a bit first */
- res = 0;
- if (high >= base) {
- high /= base;
- res = (uint64_t) high << 32;
- rem -= (uint64_t) (high*base) << 32;
- }
-
- while ((int64_t)b > 0 && b < rem) {
- b = b+b;
- d = d+d;
- }
-
- do {
- if (rem >= b) {
- rem -= b;
- res += d;
- }
- b >>= 1;
- d >>= 1;
- } while (d);
-
- *n = res;
- return rem;
-}
-#endif /* BITS_PER_LONG == 32 */
-#endif /* do_div */
-#endif /* 2.6.0 => 2.4.6 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
-int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...)
-{
- va_list args;
- int i;
-
- va_start(args, fmt);
- i = vsnprintf(buf, size, fmt, args);
- va_end(args);
- return (i >= size) ? (size - 1) : i;
-}
-#endif /* < 2.6.4 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
-DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1};
-#endif /* < 2.6.10 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) )
-char *_kc_kstrdup(const char *s, unsigned int gfp)
-{
- size_t len;
- char *buf;
-
- if (!s)
- return NULL;
-
- len = strlen(s) + 1;
- buf = kmalloc(len, gfp);
- if (buf)
- memcpy(buf, s, len);
- return buf;
-}
-#endif /* < 2.6.13 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
-void *_kc_kzalloc(size_t size, int flags)
-{
- void *ret = kmalloc(size, flags);
- if (ret)
- memset(ret, 0, size);
- return ret;
-}
-#endif /* <= 2.6.13 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
-int _kc_skb_pad(struct sk_buff *skb, int pad)
-{
- int ntail;
-
- /* If the skbuff is non linear tailroom is always zero.. */
- if(!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
- memset(skb->data+skb->len, 0, pad);
- return 0;
- }
-
- ntail = skb->data_len + pad - (skb->end - skb->tail);
- if (likely(skb_cloned(skb) || ntail > 0)) {
- if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC));
- goto free_skb;
- }
-
-#ifdef MAX_SKB_FRAGS
- if (skb_is_nonlinear(skb) &&
- !__pskb_pull_tail(skb, skb->data_len))
- goto free_skb;
-
-#endif
- memset(skb->data + skb->len, 0, pad);
- return 0;
-
-free_skb:
- kfree_skb(skb);
- return -ENOMEM;
-}
-
-#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))
-int _kc_pci_save_state(struct pci_dev *pdev)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct adapter_struct *adapter = netdev_priv(netdev);
- int size = PCI_CONFIG_SPACE_LEN, i;
- u16 pcie_cap_offset, pcie_link_status;
-
-#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
- /* no ->dev for 2.4 kernels */
- WARN_ON(pdev->dev.driver_data == NULL);
-#endif
- pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- if (pcie_cap_offset) {
- if (!pci_read_config_word(pdev,
- pcie_cap_offset + PCIE_LINK_STATUS,
- &pcie_link_status))
- size = PCIE_CONFIG_SPACE_LEN;
- }
- pci_config_space_ich8lan();
-#ifdef HAVE_PCI_ERS
- if (adapter->config_space == NULL)
-#else
- WARN_ON(adapter->config_space != NULL);
-#endif
- adapter->config_space = kmalloc(size, GFP_KERNEL);
- if (!adapter->config_space) {
- printk(KERN_ERR "Out of memory in pci_save_state\n");
- return -ENOMEM;
- }
- for (i = 0; i < (size / 4); i++)
- pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]);
- return 0;
-}
-
-void _kc_pci_restore_state(struct pci_dev *pdev)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct adapter_struct *adapter = netdev_priv(netdev);
- int size = PCI_CONFIG_SPACE_LEN, i;
- u16 pcie_cap_offset;
- u16 pcie_link_status;
-
- if (adapter->config_space != NULL) {
- pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- if (pcie_cap_offset &&
- !pci_read_config_word(pdev,
- pcie_cap_offset + PCIE_LINK_STATUS,
- &pcie_link_status))
- size = PCIE_CONFIG_SPACE_LEN;
-
- pci_config_space_ich8lan();
- for (i = 0; i < (size / 4); i++)
- pci_write_config_dword(pdev, i * 4, adapter->config_space[i]);
-#ifndef HAVE_PCI_ERS
- kfree(adapter->config_space);
- adapter->config_space = NULL;
-#endif
- }
-}
-#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */
-
-#ifdef HAVE_PCI_ERS
-void _kc_free_netdev(struct net_device *netdev)
-{
- struct adapter_struct *adapter = netdev_priv(netdev);
-
- if (adapter->config_space != NULL)
- kfree(adapter->config_space);
-#ifdef CONFIG_SYSFS
- if (netdev->reg_state == NETREG_UNINITIALIZED) {
- kfree((char *)netdev - netdev->padded);
- } else {
- BUG_ON(netdev->reg_state != NETREG_UNREGISTERED);
- netdev->reg_state = NETREG_RELEASED;
- class_device_put(&netdev->class_dev);
- }
-#else
- kfree((char *)netdev - netdev->padded);
-#endif
-}
-#endif
-
-void *_kc_kmemdup(const void *src, size_t len, unsigned gfp)
-{
- void *p;
-
- p = kzalloc(len, gfp);
- if (p)
- memcpy(p, src, len);
- return p;
-}
-#endif /* <= 2.6.19 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
-struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev)
-{
- return ((struct adapter_struct *)netdev_priv(netdev))->pdev;
-}
-#endif /* < 2.6.21 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
-/* hexdump code taken from lib/hexdump.c */
-static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
- int groupsize, unsigned char *linebuf,
- size_t linebuflen, bool ascii)
-{
- const u8 *ptr = buf;
- u8 ch;
- int j, lx = 0;
- int ascii_column;
-
- if (rowsize != 16 && rowsize != 32)
- rowsize = 16;
-
- if (!len)
- goto nil;
- if (len > rowsize) /* limit to one line at a time */
- len = rowsize;
- if ((len % groupsize) != 0) /* no mixed size output */
- groupsize = 1;
-
- switch (groupsize) {
- case 8: {
- const u64 *ptr8 = buf;
- int ngroups = len / groupsize;
-
- for (j = 0; j < ngroups; j++)
- lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
- "%s%16.16llx", j ? " " : "",
- (unsigned long long)*(ptr8 + j));
- ascii_column = 17 * ngroups + 2;
- break;
- }
-
- case 4: {
- const u32 *ptr4 = buf;
- int ngroups = len / groupsize;
-
- for (j = 0; j < ngroups; j++)
- lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
- "%s%8.8x", j ? " " : "", *(ptr4 + j));
- ascii_column = 9 * ngroups + 2;
- break;
- }
-
- case 2: {
- const u16 *ptr2 = buf;
- int ngroups = len / groupsize;
-
- for (j = 0; j < ngroups; j++)
- lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
- "%s%4.4x", j ? " " : "", *(ptr2 + j));
- ascii_column = 5 * ngroups + 2;
- break;
- }
-
- default:
- for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
- ch = ptr[j];
- linebuf[lx++] = hex_asc(ch >> 4);
- linebuf[lx++] = hex_asc(ch & 0x0f);
- linebuf[lx++] = ' ';
- }
- if (j)
- lx--;
-
- ascii_column = 3 * rowsize + 2;
- break;
- }
- if (!ascii)
- goto nil;
-
- while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
- linebuf[lx++] = ' ';
- for (j = 0; (j < len) && (lx + 2) < linebuflen; j++)
- linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j]
- : '.';
-nil:
- linebuf[lx++] = '\0';
-}
-
-void _kc_print_hex_dump(const char *level,
- const char *prefix_str, int prefix_type,
- int rowsize, int groupsize,
- const void *buf, size_t len, bool ascii)
-{
- const u8 *ptr = buf;
- int i, linelen, remaining = len;
- unsigned char linebuf[200];
-
- if (rowsize != 16 && rowsize != 32)
- rowsize = 16;
-
- for (i = 0; i < len; i += rowsize) {
- linelen = min(remaining, rowsize);
- remaining -= rowsize;
- _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
- linebuf, sizeof(linebuf), ascii);
-
- switch (prefix_type) {
- case DUMP_PREFIX_ADDRESS:
- printk("%s%s%*p: %s\n", level, prefix_str,
- (int)(2 * sizeof(void *)), ptr + i, linebuf);
- break;
- case DUMP_PREFIX_OFFSET:
- printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
- break;
- default:
- printk("%s%s%s\n", level, prefix_str, linebuf);
- break;
- }
- }
-}
-
-#ifdef HAVE_I2C_SUPPORT
-struct i2c_client *
-_kc_i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
-{
- struct i2c_client *client;
- int status;
-
- client = kzalloc(sizeof *client, GFP_KERNEL);
- if (!client)
- return NULL;
-
- client->adapter = adap;
-
- client->dev.platform_data = info->platform_data;
-
- client->flags = info->flags;
- client->addr = info->addr;
-
- strlcpy(client->name, info->type, sizeof(client->name));
-
- /* Check for address business */
- status = i2c_check_addr(adap, client->addr);
- if (status)
- goto out_err;
-
- client->dev.parent = &client->adapter->dev;
- client->dev.bus = &i2c_bus_type;
-
- status = i2c_attach_client(client);
- if (status)
- goto out_err;
-
- dev_dbg(&adap->dev, "client [%s] registered with bus id %s\n",
- client->name, dev_name(&client->dev));
-
- return client;
-
-out_err:
- dev_err(&adap->dev, "Failed to register i2c client %s at 0x%02x "
- "(%d)\n", client->name, client->addr, status);
- kfree(client);
- return NULL;
-}
-#endif /* HAVE_I2C_SUPPORT */
-#endif /* < 2.6.22 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
-#ifdef NAPI
-struct net_device *napi_to_poll_dev(const struct napi_struct *napi)
-{
- struct adapter_q_vector *q_vector = container_of(napi,
- struct adapter_q_vector,
- napi);
- return &q_vector->poll_dev;
-}
-
-int __kc_adapter_clean(struct net_device *netdev, int *budget)
-{
- int work_done;
- int work_to_do = min(*budget, netdev->quota);
- /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */
- struct napi_struct *napi = netdev->priv;
- work_done = napi->poll(napi, work_to_do);
- *budget -= work_done;
- netdev->quota -= work_done;
- return (work_done >= work_to_do) ? 1 : 0;
-}
-#endif /* NAPI */
-#endif /* <= 2.6.24 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
-void _kc_pci_disable_link_state(struct pci_dev *pdev, int state)
-{
- struct pci_dev *parent = pdev->bus->self;
- u16 link_state;
- int pos;
-
- if (!parent)
- return;
-
- pos = pci_find_capability(parent, PCI_CAP_ID_EXP);
- if (pos) {
- pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state);
- link_state &= ~state;
- pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state);
- }
-}
-#endif /* < 2.6.26 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
-#ifdef HAVE_TX_MQ
-void _kc_netif_tx_stop_all_queues(struct net_device *netdev)
-{
- struct adapter_struct *adapter = netdev_priv(netdev);
- int i;
-
- netif_stop_queue(netdev);
- if (netif_is_multiqueue(netdev))
- for (i = 0; i < adapter->num_tx_queues; i++)
- netif_stop_subqueue(netdev, i);
-}
-void _kc_netif_tx_wake_all_queues(struct net_device *netdev)
-{
- struct adapter_struct *adapter = netdev_priv(netdev);
- int i;
-
- netif_wake_queue(netdev);
- if (netif_is_multiqueue(netdev))
- for (i = 0; i < adapter->num_tx_queues; i++)
- netif_wake_subqueue(netdev, i);
-}
-void _kc_netif_tx_start_all_queues(struct net_device *netdev)
-{
- struct adapter_struct *adapter = netdev_priv(netdev);
- int i;
-
- netif_start_queue(netdev);
- if (netif_is_multiqueue(netdev))
- for (i = 0; i < adapter->num_tx_queues; i++)
- netif_start_subqueue(netdev, i);
-}
-#endif /* HAVE_TX_MQ */
-
-#ifndef __WARN_printf
-void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...)
-{
- va_list args;
-
- printk(KERN_WARNING "------------[ cut here ]------------\n");
- printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file, line);
- va_start(args, fmt);
- vprintk(fmt, args);
- va_end(args);
-
- dump_stack();
-}
-#endif /* __WARN_printf */
-#endif /* < 2.6.27 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
-
-int
-_kc_pci_prepare_to_sleep(struct pci_dev *dev)
-{
- pci_power_t target_state;
- int error;
-
- target_state = pci_choose_state(dev, PMSG_SUSPEND);
-
- pci_enable_wake(dev, target_state, true);
-
- error = pci_set_power_state(dev, target_state);
-
- if (error)
- pci_enable_wake(dev, target_state, false);
-
- return error;
-}
-
-int
-_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable)
-{
- int err;
-
- err = pci_enable_wake(dev, PCI_D3cold, enable);
- if (err)
- goto out;
-
- err = pci_enable_wake(dev, PCI_D3hot, enable);
-
-out:
- return err;
-}
-#endif /* < 2.6.28 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) )
-static void __kc_pci_set_master(struct pci_dev *pdev, bool enable)
-{
- u16 old_cmd, cmd;
-
- pci_read_config_word(pdev, PCI_COMMAND, &old_cmd);
- if (enable)
- cmd = old_cmd | PCI_COMMAND_MASTER;
- else
- cmd = old_cmd & ~PCI_COMMAND_MASTER;
- if (cmd != old_cmd) {
- dev_dbg(pci_dev_to_dev(pdev), "%s bus mastering\n",
- enable ? "enabling" : "disabling");
- pci_write_config_word(pdev, PCI_COMMAND, cmd);
- }
-#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,7) )
- pdev->is_busmaster = enable;
-#endif
-}
-
-void _kc_pci_clear_master(struct pci_dev *dev)
-{
- __kc_pci_set_master(dev, false);
-}
-#endif /* < 2.6.29 */
-
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) )
-#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
-int _kc_pci_num_vf(struct pci_dev *dev)
-{
- int num_vf = 0;
-#ifdef CONFIG_PCI_IOV
- struct pci_dev *vfdev;
-
- /* loop through all ethernet devices starting at PF dev */
- vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL);
- while (vfdev) {
- if (vfdev->is_virtfn && vfdev->physfn == dev)
- num_vf++;
-
- vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev);
- }
-
-#endif
- return num_vf;
-}
-#endif /* RHEL_RELEASE_CODE */
-#endif /* < 2.6.34 */
-
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
-#ifdef HAVE_TX_MQ
-#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)))
-#ifndef CONFIG_NETDEVICES_MULTIQUEUE
-void _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
-{
- unsigned int real_num = dev->real_num_tx_queues;
- struct Qdisc *qdisc;
- int i;
-
- if (unlikely(txq > dev->num_tx_queues))
- ;
- else if (txq > real_num)
- dev->real_num_tx_queues = txq;
- else if ( txq < real_num) {
- dev->real_num_tx_queues = txq;
- for (i = txq; i < dev->num_tx_queues; i++) {
- qdisc = netdev_get_tx_queue(dev, i)->qdisc;
- if (qdisc) {
- spin_lock_bh(qdisc_lock(qdisc));
- qdisc_reset(qdisc);
- spin_unlock_bh(qdisc_lock(qdisc));
- }
- }
- }
-}
-#endif /* CONFIG_NETDEVICES_MULTIQUEUE */
-#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */
-#endif /* HAVE_TX_MQ */
-
-ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
- const void __user *from, size_t count)
-{
- loff_t pos = *ppos;
- size_t res;
-
- if (pos < 0)
- return -EINVAL;
- if (pos >= available || !count)
- return 0;
- if (count > available - pos)
- count = available - pos;
- res = copy_from_user(to + pos, from, count);
- if (res == count)
- return -EFAULT;
- count -= res;
- *ppos = pos + count;
- return count;
-}
-
-#endif /* < 2.6.35 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) )
-static const u32 _kc_flags_dup_features =
- (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH);
-
-u32 _kc_ethtool_op_get_flags(struct net_device *dev)
-{
- return dev->features & _kc_flags_dup_features;
-}
-
-int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported)
-{
- if (data & ~supported)
- return -EINVAL;
-
- dev->features = ((dev->features & ~_kc_flags_dup_features) |
- (data & _kc_flags_dup_features));
- return 0;
-}
-#endif /* < 2.6.36 */
-
-/******************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) )
-#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)))
-
-
-
-#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */
-#endif /* < 2.6.39 */
-
-/******************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) )
-void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
- int off, int size, unsigned int truesize)
-{
- skb_fill_page_desc(skb, i, page, off, size);
- skb->len += size;
- skb->data_len += size;
- skb->truesize += truesize;
-}
-
-int _kc_simple_open(struct inode *inode, struct file *file)
-{
- if (inode->i_private)
- file->private_data = inode->i_private;
-
- return 0;
-}
-
-#endif /* < 3.4.0 */
-
-/******************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) )
-#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) && \
- !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5))
-static inline int __kc_pcie_cap_version(struct pci_dev *dev)
-{
- int pos;
- u16 reg16;
-
- pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
- if (!pos)
- return 0;
- pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16);
- return reg16 & PCI_EXP_FLAGS_VERS;
-}
-
-static inline bool __kc_pcie_cap_has_devctl(const struct pci_dev __always_unused *dev)
-{
- return true;
-}
-
-static inline bool __kc_pcie_cap_has_lnkctl(struct pci_dev *dev)
-{
- int type = pci_pcie_type(dev);
-
- return __kc_pcie_cap_version(dev) > 1 ||
- type == PCI_EXP_TYPE_ROOT_PORT ||
- type == PCI_EXP_TYPE_ENDPOINT ||
- type == PCI_EXP_TYPE_LEG_END;
-}
-
-static inline bool __kc_pcie_cap_has_sltctl(struct pci_dev *dev)
-{
- int type = pci_pcie_type(dev);
- int pos;
- u16 pcie_flags_reg;
-
- pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
- if (!pos)
- return 0;
- pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &pcie_flags_reg);
-
- return __kc_pcie_cap_version(dev) > 1 ||
- type == PCI_EXP_TYPE_ROOT_PORT ||
- (type == PCI_EXP_TYPE_DOWNSTREAM &&
- pcie_flags_reg & PCI_EXP_FLAGS_SLOT);
-}
-
-static inline bool __kc_pcie_cap_has_rtctl(struct pci_dev *dev)
-{
- int type = pci_pcie_type(dev);
-
- return __kc_pcie_cap_version(dev) > 1 ||
- type == PCI_EXP_TYPE_ROOT_PORT ||
- type == PCI_EXP_TYPE_RC_EC;
-}
-
-static bool __kc_pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
-{
- if (!pci_is_pcie(dev))
- return false;
-
- switch (pos) {
- case PCI_EXP_FLAGS_TYPE:
- return true;
- case PCI_EXP_DEVCAP:
- case PCI_EXP_DEVCTL:
- case PCI_EXP_DEVSTA:
- return __kc_pcie_cap_has_devctl(dev);
- case PCI_EXP_LNKCAP:
- case PCI_EXP_LNKCTL:
- case PCI_EXP_LNKSTA:
- return __kc_pcie_cap_has_lnkctl(dev);
- case PCI_EXP_SLTCAP:
- case PCI_EXP_SLTCTL:
- case PCI_EXP_SLTSTA:
- return __kc_pcie_cap_has_sltctl(dev);
- case PCI_EXP_RTCTL:
- case PCI_EXP_RTCAP:
- case PCI_EXP_RTSTA:
- return __kc_pcie_cap_has_rtctl(dev);
- case PCI_EXP_DEVCAP2:
- case PCI_EXP_DEVCTL2:
- case PCI_EXP_LNKCAP2:
- case PCI_EXP_LNKCTL2:
- case PCI_EXP_LNKSTA2:
- return __kc_pcie_cap_version(dev) > 1;
- default:
- return false;
- }
-}
-
-/*
- * Note that these accessor functions are only for the "PCI Express
- * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the
- * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
- */
-int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
-{
- int ret;
-
- *val = 0;
- if (pos & 1)
- return -EINVAL;
-
- if (__kc_pcie_capability_reg_implemented(dev, pos)) {
- ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
- /*
- * Reset *val to 0 if pci_read_config_word() fails, it may
- * have been written as 0xFFFF if hardware error happens
- * during pci_read_config_word().
- */
- if (ret)
- *val = 0;
- return ret;
- }
-
- /*
- * For Functions that do not implement the Slot Capabilities,
- * Slot Status, and Slot Control registers, these spaces must
- * be hardwired to 0b, with the exception of the Presence Detect
- * State bit in the Slot Status register of Downstream Ports,
- * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8)
- */
- if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA &&
- pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
- *val = PCI_EXP_SLTSTA_PDS;
- }
-
- return 0;
-}
-
-int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
-{
- if (pos & 1)
- return -EINVAL;
-
- if (!__kc_pcie_capability_reg_implemented(dev, pos))
- return 0;
-
- return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
-}
-
-int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
- u16 clear, u16 set)
-{
- int ret;
- u16 val;
-
- ret = __kc_pcie_capability_read_word(dev, pos, &val);
- if (!ret) {
- val &= ~clear;
- val |= set;
- ret = __kc_pcie_capability_write_word(dev, pos, val);
- }
-
- return ret;
-}
-#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) && \
- !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) */
-#endif /* < 3.7.0 */
-
-/******************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) )
-#endif /* 3.9.0 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
-#ifdef CONFIG_PCI_IOV
-int __kc_pci_vfs_assigned(struct pci_dev *dev)
-{
- unsigned int vfs_assigned = 0;
-#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED
- int pos;
- struct pci_dev *vfdev;
- unsigned short dev_id;
-
- /* only search if we are a PF */
- if (!dev->is_physfn)
- return 0;
-
- /* find SR-IOV capability */
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
- if (!pos)
- return 0;
-
- /*
- * determine the device ID for the VFs, the vendor ID will be the
- * same as the PF so there is no need to check for that one
- */
- pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id);
-
- /* loop through all the VFs to see if we own any that are assigned */
- vfdev = pci_get_device(dev->vendor, dev_id, NULL);
- while (vfdev) {
- /*
- * It is considered assigned if it is a virtual function with
- * our dev as the physical function and the assigned bit is set
- */
- if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
- (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED))
- vfs_assigned++;
-
- vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
- }
-
-#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */
- return vfs_assigned;
-}
-
-#endif /* CONFIG_PCI_IOV */
-#endif /* 3.10.0 */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h
deleted file mode 100755
index 1213cc61..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h
+++ /dev/null
@@ -1,3884 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#ifndef _KCOMPAT_H_
-#define _KCOMPAT_H_
-
-#ifndef LINUX_VERSION_CODE
-#include <linux/version.h>
-#else
-#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
-#endif
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/udp.h>
-#include <linux/mii.h>
-#include <linux/vmalloc.h>
-#include <asm/io.h>
-#include <linux/ethtool.h>
-#include <linux/if_vlan.h>
-
-/* NAPI enable/disable flags here */
-#define NAPI
-
-#define adapter_struct igb_adapter
-#define adapter_q_vector igb_q_vector
-#define NAPI
-
-/* and finally set defines so that the code sees the changes */
-#ifdef NAPI
-#else
-#endif /* NAPI */
-
-/* packet split disable/enable */
-#ifdef DISABLE_PACKET_SPLIT
-#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
-#define CONFIG_IGB_DISABLE_PACKET_SPLIT
-#endif
-#endif /* DISABLE_PACKET_SPLIT */
-
-/* MSI compatibility code for all kernels and drivers */
-#ifdef DISABLE_PCI_MSI
-#undef CONFIG_PCI_MSI
-#endif
-#ifndef CONFIG_PCI_MSI
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
-struct msix_entry {
- u16 vector; /* kernel uses to write allocated vector */
- u16 entry; /* driver uses to specify entry, OS writes */
-};
-#endif
-#undef pci_enable_msi
-#define pci_enable_msi(a) -ENOTSUPP
-#undef pci_disable_msi
-#define pci_disable_msi(a) do {} while (0)
-#undef pci_enable_msix
-#define pci_enable_msix(a, b, c) -ENOTSUPP
-#undef pci_disable_msix
-#define pci_disable_msix(a) do {} while (0)
-#define msi_remove_pci_irq_vectors(a) do {} while (0)
-#endif /* CONFIG_PCI_MSI */
-#ifdef DISABLE_PM
-#undef CONFIG_PM
-#endif
-
-#ifdef DISABLE_NET_POLL_CONTROLLER
-#undef CONFIG_NET_POLL_CONTROLLER
-#endif
-
-#ifndef PMSG_SUSPEND
-#define PMSG_SUSPEND 3
-#endif
-
-/* generic boolean compatibility */
-#undef TRUE
-#undef FALSE
-#define TRUE true
-#define FALSE false
-#ifdef GCC_VERSION
-#if ( GCC_VERSION < 3000 )
-#define _Bool char
-#endif
-#else
-#define _Bool char
-#endif
-
-/* kernels less than 2.4.14 don't have this */
-#ifndef ETH_P_8021Q
-#define ETH_P_8021Q 0x8100
-#endif
-
-#ifndef module_param
-#define module_param(v,t,p) MODULE_PARM(v, "i");
-#endif
-
-#ifndef DMA_64BIT_MASK
-#define DMA_64BIT_MASK 0xffffffffffffffffULL
-#endif
-
-#ifndef DMA_32BIT_MASK
-#define DMA_32BIT_MASK 0x00000000ffffffffULL
-#endif
-
-#ifndef PCI_CAP_ID_EXP
-#define PCI_CAP_ID_EXP 0x10
-#endif
-
-#ifndef PCIE_LINK_STATE_L0S
-#define PCIE_LINK_STATE_L0S 1
-#endif
-#ifndef PCIE_LINK_STATE_L1
-#define PCIE_LINK_STATE_L1 2
-#endif
-
-#ifndef mmiowb
-#ifdef CONFIG_IA64
-#define mmiowb() asm volatile ("mf.a" ::: "memory")
-#else
-#define mmiowb()
-#endif
-#endif
-
-#ifndef SET_NETDEV_DEV
-#define SET_NETDEV_DEV(net, pdev)
-#endif
-
-#if !defined(HAVE_FREE_NETDEV) && ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) )
-#define free_netdev(x) kfree(x)
-#endif
-
-#ifdef HAVE_POLL_CONTROLLER
-#define CONFIG_NET_POLL_CONTROLLER
-#endif
-
-#ifndef SKB_DATAREF_SHIFT
-/* if we do not have the infrastructure to detect if skb_header is cloned
- just return false in all cases */
-#define skb_header_cloned(x) 0
-#endif
-
-#ifndef NETIF_F_GSO
-#define gso_size tso_size
-#define gso_segs tso_segs
-#endif
-
-#ifndef NETIF_F_GRO
-#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \
- vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan)
-#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb)
-#endif
-
-#ifndef NETIF_F_SCTP_CSUM
-#define NETIF_F_SCTP_CSUM 0
-#endif
-
-#ifndef NETIF_F_LRO
-#define NETIF_F_LRO (1 << 15)
-#endif
-
-#ifndef NETIF_F_NTUPLE
-#define NETIF_F_NTUPLE (1 << 27)
-#endif
-
-#ifndef IPPROTO_SCTP
-#define IPPROTO_SCTP 132
-#endif
-
-#ifndef CHECKSUM_PARTIAL
-#define CHECKSUM_PARTIAL CHECKSUM_HW
-#define CHECKSUM_COMPLETE CHECKSUM_HW
-#endif
-
-#ifndef __read_mostly
-#define __read_mostly
-#endif
-
-#ifndef MII_RESV1
-#define MII_RESV1 0x17 /* Reserved... */
-#endif
-
-#ifndef unlikely
-#define unlikely(_x) _x
-#define likely(_x) _x
-#endif
-
-#ifndef WARN_ON
-#define WARN_ON(x)
-#endif
-
-#ifndef PCI_DEVICE
-#define PCI_DEVICE(vend,dev) \
- .vendor = (vend), .device = (dev), \
- .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
-#endif
-
-#ifndef node_online
-#define node_online(node) ((node) == 0)
-#endif
-
-#ifndef num_online_cpus
-#define num_online_cpus() smp_num_cpus
-#endif
-
-#ifndef cpu_online
-#define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map)
-#endif
-
-#ifndef _LINUX_RANDOM_H
-#include <linux/random.h>
-#endif
-
-#ifndef DECLARE_BITMAP
-#ifndef BITS_TO_LONGS
-#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
-#endif
-#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)]
-#endif
-
-#ifndef VLAN_HLEN
-#define VLAN_HLEN 4
-#endif
-
-#ifndef VLAN_ETH_HLEN
-#define VLAN_ETH_HLEN 18
-#endif
-
-#ifndef VLAN_ETH_FRAME_LEN
-#define VLAN_ETH_FRAME_LEN 1518
-#endif
-
-#if !defined(IXGBE_DCA) && !defined(IGB_DCA)
-#define dca_get_tag(b) 0
-#define dca_add_requester(a) -1
-#define dca_remove_requester(b) do { } while(0)
-#define DCA_PROVIDER_ADD 0x0001
-#define DCA_PROVIDER_REMOVE 0x0002
-#endif
-
-#ifndef DCA_GET_TAG_TWO_ARGS
-#define dca3_get_tag(a,b) dca_get_tag(b)
-#endif
-
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-#if defined(__i386__) || defined(__x86_64__)
-#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-#endif
-#endif
-
-/* taken from 2.6.24 definition in linux/kernel.h */
-#ifndef IS_ALIGNED
-#define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0)
-#endif
-
-#ifdef IS_ENABLED
-#undef IS_ENABLED
-#undef __ARG_PLACEHOLDER_1
-#undef config_enabled
-#undef _config_enabled
-#undef __config_enabled
-#undef ___config_enabled
-#endif
-
-#define __ARG_PLACEHOLDER_1 0,
-#define config_enabled(cfg) _config_enabled(cfg)
-#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value)
-#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0)
-#define ___config_enabled(__ignored, val, ...) val
-
-#define IS_ENABLED(option) \
- (config_enabled(option) || config_enabled(option##_MODULE))
-
-#if !defined(NETIF_F_HW_VLAN_TX) && !defined(NETIF_F_HW_VLAN_CTAG_TX)
-struct _kc_vlan_ethhdr {
- unsigned char h_dest[ETH_ALEN];
- unsigned char h_source[ETH_ALEN];
- __be16 h_vlan_proto;
- __be16 h_vlan_TCI;
- __be16 h_vlan_encapsulated_proto;
-};
-#define vlan_ethhdr _kc_vlan_ethhdr
-struct _kc_vlan_hdr {
- __be16 h_vlan_TCI;
- __be16 h_vlan_encapsulated_proto;
-};
-#define vlan_hdr _kc_vlan_hdr
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
-#define vlan_tx_tag_present(_skb) 0
-#define vlan_tx_tag_get(_skb) 0
-#endif
-#endif /* NETIF_F_HW_VLAN_TX && NETIF_F_HW_VLAN_CTAG_TX */
-
-#ifndef VLAN_PRIO_SHIFT
-#define VLAN_PRIO_SHIFT 13
-#endif
-
-
-#ifndef __GFP_COLD
-#define __GFP_COLD 0
-#endif
-
-#ifndef __GFP_COMP
-#define __GFP_COMP 0
-#endif
-
-/*****************************************************************************/
-/* Installations with ethtool version without eeprom, adapter id, or statistics
- * support */
-
-#ifndef ETH_GSTRING_LEN
-#define ETH_GSTRING_LEN 32
-#endif
-
-#ifndef ETHTOOL_GSTATS
-#define ETHTOOL_GSTATS 0x1d
-#undef ethtool_drvinfo
-#define ethtool_drvinfo k_ethtool_drvinfo
-struct k_ethtool_drvinfo {
- u32 cmd;
- char driver[32];
- char version[32];
- char fw_version[32];
- char bus_info[32];
- char reserved1[32];
- char reserved2[16];
- u32 n_stats;
- u32 testinfo_len;
- u32 eedump_len;
- u32 regdump_len;
-};
-
-struct ethtool_stats {
- u32 cmd;
- u32 n_stats;
- u64 data[0];
-};
-#endif /* ETHTOOL_GSTATS */
-
-#ifndef ETHTOOL_PHYS_ID
-#define ETHTOOL_PHYS_ID 0x1c
-#endif /* ETHTOOL_PHYS_ID */
-
-#ifndef ETHTOOL_GSTRINGS
-#define ETHTOOL_GSTRINGS 0x1b
-enum ethtool_stringset {
- ETH_SS_TEST = 0,
- ETH_SS_STATS,
-};
-struct ethtool_gstrings {
- u32 cmd; /* ETHTOOL_GSTRINGS */
- u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/
- u32 len; /* number of strings in the string set */
- u8 data[0];
-};
-#endif /* ETHTOOL_GSTRINGS */
-
-#ifndef ETHTOOL_TEST
-#define ETHTOOL_TEST 0x1a
-enum ethtool_test_flags {
- ETH_TEST_FL_OFFLINE = (1 << 0),
- ETH_TEST_FL_FAILED = (1 << 1),
-};
-struct ethtool_test {
- u32 cmd;
- u32 flags;
- u32 reserved;
- u32 len;
- u64 data[0];
-};
-#endif /* ETHTOOL_TEST */
-
-#ifndef ETHTOOL_GEEPROM
-#define ETHTOOL_GEEPROM 0xb
-#undef ETHTOOL_GREGS
-struct ethtool_eeprom {
- u32 cmd;
- u32 magic;
- u32 offset;
- u32 len;
- u8 data[0];
-};
-
-struct ethtool_value {
- u32 cmd;
- u32 data;
-};
-#endif /* ETHTOOL_GEEPROM */
-
-#ifndef ETHTOOL_GLINK
-#define ETHTOOL_GLINK 0xa
-#endif /* ETHTOOL_GLINK */
-
-#ifndef ETHTOOL_GWOL
-#define ETHTOOL_GWOL 0x5
-#define ETHTOOL_SWOL 0x6
-#define SOPASS_MAX 6
-struct ethtool_wolinfo {
- u32 cmd;
- u32 supported;
- u32 wolopts;
- u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */
-};
-#endif /* ETHTOOL_GWOL */
-
-#ifndef ETHTOOL_GREGS
-#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */
-#define ethtool_regs _kc_ethtool_regs
-/* for passing big chunks of data */
-struct _kc_ethtool_regs {
- u32 cmd;
- u32 version; /* driver-specific, indicates different chips/revs */
- u32 len; /* bytes */
- u8 data[0];
-};
-#endif /* ETHTOOL_GREGS */
-
-#ifndef ETHTOOL_GMSGLVL
-#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */
-#endif
-#ifndef ETHTOOL_SMSGLVL
-#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */
-#endif
-#ifndef ETHTOOL_NWAY_RST
-#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */
-#endif
-#ifndef ETHTOOL_GLINK
-#define ETHTOOL_GLINK 0x0000000a /* Get link status */
-#endif
-#ifndef ETHTOOL_GEEPROM
-#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */
-#endif
-#ifndef ETHTOOL_SEEPROM
-#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */
-#endif
-#ifndef ETHTOOL_GCOALESCE
-#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */
-/* for configuring coalescing parameters of chip */
-#define ethtool_coalesce _kc_ethtool_coalesce
-struct _kc_ethtool_coalesce {
- u32 cmd; /* ETHTOOL_{G,S}COALESCE */
-
- /* How many usecs to delay an RX interrupt after
- * a packet arrives. If 0, only rx_max_coalesced_frames
- * is used.
- */
- u32 rx_coalesce_usecs;
-
- /* How many packets to delay an RX interrupt after
- * a packet arrives. If 0, only rx_coalesce_usecs is
- * used. It is illegal to set both usecs and max frames
- * to zero as this would cause RX interrupts to never be
- * generated.
- */
- u32 rx_max_coalesced_frames;
-
- /* Same as above two parameters, except that these values
- * apply while an IRQ is being serviced by the host. Not
- * all cards support this feature and the values are ignored
- * in that case.
- */
- u32 rx_coalesce_usecs_irq;
- u32 rx_max_coalesced_frames_irq;
-
- /* How many usecs to delay a TX interrupt after
- * a packet is sent. If 0, only tx_max_coalesced_frames
- * is used.
- */
- u32 tx_coalesce_usecs;
-
- /* How many packets to delay a TX interrupt after
- * a packet is sent. If 0, only tx_coalesce_usecs is
- * used. It is illegal to set both usecs and max frames
- * to zero as this would cause TX interrupts to never be
- * generated.
- */
- u32 tx_max_coalesced_frames;
-
- /* Same as above two parameters, except that these values
- * apply while an IRQ is being serviced by the host. Not
- * all cards support this feature and the values are ignored
- * in that case.
- */
- u32 tx_coalesce_usecs_irq;
- u32 tx_max_coalesced_frames_irq;
-
- /* How many usecs to delay in-memory statistics
- * block updates. Some drivers do not have an in-memory
- * statistic block, and in such cases this value is ignored.
- * This value must not be zero.
- */
- u32 stats_block_coalesce_usecs;
-
- /* Adaptive RX/TX coalescing is an algorithm implemented by
- * some drivers to improve latency under low packet rates and
- * improve throughput under high packet rates. Some drivers
- * only implement one of RX or TX adaptive coalescing. Anything
- * not implemented by the driver causes these values to be
- * silently ignored.
- */
- u32 use_adaptive_rx_coalesce;
- u32 use_adaptive_tx_coalesce;
-
- /* When the packet rate (measured in packets per second)
- * is below pkt_rate_low, the {rx,tx}_*_low parameters are
- * used.
- */
- u32 pkt_rate_low;
- u32 rx_coalesce_usecs_low;
- u32 rx_max_coalesced_frames_low;
- u32 tx_coalesce_usecs_low;
- u32 tx_max_coalesced_frames_low;
-
- /* When the packet rate is below pkt_rate_high but above
- * pkt_rate_low (both measured in packets per second) the
- * normal {rx,tx}_* coalescing parameters are used.
- */
-
- /* When the packet rate is (measured in packets per second)
- * is above pkt_rate_high, the {rx,tx}_*_high parameters are
- * used.
- */
- u32 pkt_rate_high;
- u32 rx_coalesce_usecs_high;
- u32 rx_max_coalesced_frames_high;
- u32 tx_coalesce_usecs_high;
- u32 tx_max_coalesced_frames_high;
-
- /* How often to do adaptive coalescing packet rate sampling,
- * measured in seconds. Must not be zero.
- */
- u32 rate_sample_interval;
-};
-#endif /* ETHTOOL_GCOALESCE */
-
-#ifndef ETHTOOL_SCOALESCE
-#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */
-#endif
-#ifndef ETHTOOL_GRINGPARAM
-#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */
-/* for configuring RX/TX ring parameters */
-#define ethtool_ringparam _kc_ethtool_ringparam
-struct _kc_ethtool_ringparam {
- u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */
-
- /* Read only attributes. These indicate the maximum number
- * of pending RX/TX ring entries the driver will allow the
- * user to set.
- */
- u32 rx_max_pending;
- u32 rx_mini_max_pending;
- u32 rx_jumbo_max_pending;
- u32 tx_max_pending;
-
- /* Values changeable by the user. The valid values are
- * in the range 1 to the "*_max_pending" counterpart above.
- */
- u32 rx_pending;
- u32 rx_mini_pending;
- u32 rx_jumbo_pending;
- u32 tx_pending;
-};
-#endif /* ETHTOOL_GRINGPARAM */
-
-#ifndef ETHTOOL_SRINGPARAM
-#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */
-#endif
-#ifndef ETHTOOL_GPAUSEPARAM
-#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */
-/* for configuring link flow control parameters */
-#define ethtool_pauseparam _kc_ethtool_pauseparam
-struct _kc_ethtool_pauseparam {
- u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */
-
- /* If the link is being auto-negotiated (via ethtool_cmd.autoneg
- * being true) the user may set 'autoneg' here non-zero to have the
- * pause parameters be auto-negotiated too. In such a case, the
- * {rx,tx}_pause values below determine what capabilities are
- * advertised.
- *
- * If 'autoneg' is zero or the link is not being auto-negotiated,
- * then {rx,tx}_pause force the driver to use/not-use pause
- * flow control.
- */
- u32 autoneg;
- u32 rx_pause;
- u32 tx_pause;
-};
-#endif /* ETHTOOL_GPAUSEPARAM */
-
-#ifndef ETHTOOL_SPAUSEPARAM
-#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */
-#endif
-#ifndef ETHTOOL_GRXCSUM
-#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */
-#endif
-#ifndef ETHTOOL_SRXCSUM
-#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */
-#endif
-#ifndef ETHTOOL_GTXCSUM
-#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */
-#endif
-#ifndef ETHTOOL_STXCSUM
-#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */
-#endif
-#ifndef ETHTOOL_GSG
-#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable
- * (ethtool_value) */
-#endif
-#ifndef ETHTOOL_SSG
-#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable
- * (ethtool_value). */
-#endif
-#ifndef ETHTOOL_TEST
-#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */
-#endif
-#ifndef ETHTOOL_GSTRINGS
-#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */
-#endif
-#ifndef ETHTOOL_PHYS_ID
-#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */
-#endif
-#ifndef ETHTOOL_GSTATS
-#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */
-#endif
-#ifndef ETHTOOL_GTSO
-#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */
-#endif
-#ifndef ETHTOOL_STSO
-#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */
-#endif
-
-#ifndef ETHTOOL_BUSINFO_LEN
-#define ETHTOOL_BUSINFO_LEN 32
-#endif
-
-#ifndef RHEL_RELEASE_VERSION
-#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b))
-#endif
-#ifndef AX_RELEASE_VERSION
-#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b))
-#endif
-
-#ifndef AX_RELEASE_CODE
-#define AX_RELEASE_CODE 0
-#endif
-
-#if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,0))
-#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,0)
-#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,1))
-#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,1)
-#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,2))
-#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,3)
-#endif
-
-#ifndef RHEL_RELEASE_CODE
-/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */
-#define RHEL_RELEASE_CODE 0
-#endif
-
-/* SuSE version macro is the same as Linux kernel version */
-#ifndef SLE_VERSION
-#define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c)
-#endif
-#ifdef CONFIG_SUSE_KERNEL
-#if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) )
-/* SLES11 GA is 2.6.27 based */
-#define SLE_VERSION_CODE SLE_VERSION(11,0,0)
-#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) )
-/* SLES11 SP1 is 2.6.32 based */
-#define SLE_VERSION_CODE SLE_VERSION(11,1,0)
-#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,61)) && \
- (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0)))
-/* SLES11 SP3 is at least 3.0.61+ based */
-#define SLE_VERSION_CODE SLE_VERSION(11,3,0)
-#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */
-#endif /* CONFIG_SUSE_KERNEL */
-#ifndef SLE_VERSION_CODE
-#define SLE_VERSION_CODE 0
-#endif /* SLE_VERSION_CODE */
-
-/* Ubuntu release and kernel codes must be specified from Makefile */
-#ifndef UBUNTU_RELEASE_VERSION
-#define UBUNTU_RELEASE_VERSION(a,b) (((a) * 100) + (b))
-#endif
-#ifndef UBUNTU_KERNEL_VERSION
-#define UBUNTU_KERNEL_VERSION(a,b,c,abi,upload) (((a) << 40) + ((b) << 32) + ((c) << 24) + ((abi) << 8) + (upload))
-#endif
-#ifndef UBUNTU_RELEASE_CODE
-#define UBUNTU_RELEASE_CODE 0
-#endif
-#ifndef UBUNTU_KERNEL_CODE
-#define UBUNTU_KERNEL_CODE 0
-#endif
-
-#ifdef __KLOCWORK__
-#ifdef ARRAY_SIZE
-#undef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
-#endif /* __KLOCWORK__ */
-
-/*****************************************************************************/
-/* 2.4.3 => 2.4.0 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
-
-/**************************************/
-/* PCI DRIVER API */
-
-#ifndef pci_set_dma_mask
-#define pci_set_dma_mask _kc_pci_set_dma_mask
-extern int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask);
-#endif
-
-#ifndef pci_request_regions
-#define pci_request_regions _kc_pci_request_regions
-extern int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name);
-#endif
-
-#ifndef pci_release_regions
-#define pci_release_regions _kc_pci_release_regions
-extern void _kc_pci_release_regions(struct pci_dev *pdev);
-#endif
-
-/**************************************/
-/* NETWORK DRIVER API */
-
-#ifndef alloc_etherdev
-#define alloc_etherdev _kc_alloc_etherdev
-extern struct net_device * _kc_alloc_etherdev(int sizeof_priv);
-#endif
-
-#ifndef is_valid_ether_addr
-#define is_valid_ether_addr _kc_is_valid_ether_addr
-extern int _kc_is_valid_ether_addr(u8 *addr);
-#endif
-
-/**************************************/
-/* MISCELLANEOUS */
-
-#ifndef INIT_TQUEUE
-#define INIT_TQUEUE(_tq, _routine, _data) \
- do { \
- INIT_LIST_HEAD(&(_tq)->list); \
- (_tq)->sync = 0; \
- (_tq)->routine = _routine; \
- (_tq)->data = _data; \
- } while (0)
-#endif
-
-#endif /* 2.4.3 => 2.4.0 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) )
-/* Generic MII registers. */
-#define MII_BMCR 0x00 /* Basic mode control register */
-#define MII_BMSR 0x01 /* Basic mode status register */
-#define MII_PHYSID1 0x02 /* PHYS ID 1 */
-#define MII_PHYSID2 0x03 /* PHYS ID 2 */
-#define MII_ADVERTISE 0x04 /* Advertisement control reg */
-#define MII_LPA 0x05 /* Link partner ability reg */
-#define MII_EXPANSION 0x06 /* Expansion register */
-/* Basic mode control register. */
-#define BMCR_FULLDPLX 0x0100 /* Full duplex */
-#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */
-/* Basic mode status register. */
-#define BMSR_ERCAP 0x0001 /* Ext-reg capability */
-#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */
-#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */
-#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */
-#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */
-#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */
-/* Advertisement control register. */
-#define ADVERTISE_CSMA 0x0001 /* Only selector supported */
-#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */
-#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */
-#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
-#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
-#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \
- ADVERTISE_100HALF | ADVERTISE_100FULL)
-/* Expansion register for auto-negotiation. */
-#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */
-#endif
-
-/*****************************************************************************/
-/* 2.4.6 => 2.4.3 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
-
-#ifndef pci_set_power_state
-#define pci_set_power_state _kc_pci_set_power_state
-extern int _kc_pci_set_power_state(struct pci_dev *dev, int state);
-#endif
-
-#ifndef pci_enable_wake
-#define pci_enable_wake _kc_pci_enable_wake
-extern int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable);
-#endif
-
-#ifndef pci_disable_device
-#define pci_disable_device _kc_pci_disable_device
-extern void _kc_pci_disable_device(struct pci_dev *pdev);
-#endif
-
-/* PCI PM entry point syntax changed, so don't support suspend/resume */
-#undef CONFIG_PM
-
-#endif /* 2.4.6 => 2.4.3 */
-
-#ifndef HAVE_PCI_SET_MWI
-#define pci_set_mwi(X) pci_write_config_word(X, \
- PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \
- PCI_COMMAND_INVALIDATE);
-#define pci_clear_mwi(X) pci_write_config_word(X, \
- PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \
- ~PCI_COMMAND_INVALIDATE);
-#endif
-
-/*****************************************************************************/
-/* 2.4.10 => 2.4.9 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) )
-
-/**************************************/
-/* MODULE API */
-
-#ifndef MODULE_LICENSE
- #define MODULE_LICENSE(X)
-#endif
-
-/**************************************/
-/* OTHER */
-
-#undef min
-#define min(x,y) ({ \
- const typeof(x) _x = (x); \
- const typeof(y) _y = (y); \
- (void) (&_x == &_y); \
- _x < _y ? _x : _y; })
-
-#undef max
-#define max(x,y) ({ \
- const typeof(x) _x = (x); \
- const typeof(y) _y = (y); \
- (void) (&_x == &_y); \
- _x > _y ? _x : _y; })
-
-#define min_t(type,x,y) ({ \
- type _x = (x); \
- type _y = (y); \
- _x < _y ? _x : _y; })
-
-#define max_t(type,x,y) ({ \
- type _x = (x); \
- type _y = (y); \
- _x > _y ? _x : _y; })
-
-#ifndef list_for_each_safe
-#define list_for_each_safe(pos, n, head) \
- for (pos = (head)->next, n = pos->next; pos != (head); \
- pos = n, n = pos->next)
-#endif
-
-#ifndef ____cacheline_aligned_in_smp
-#ifdef CONFIG_SMP
-#define ____cacheline_aligned_in_smp ____cacheline_aligned
-#else
-#define ____cacheline_aligned_in_smp
-#endif /* CONFIG_SMP */
-#endif
-
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) )
-extern int _kc_snprintf(char * buf, size_t size, const char *fmt, ...);
-#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args)
-extern int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
-#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args)
-#else /* 2.4.8 => 2.4.9 */
-extern int snprintf(char * buf, size_t size, const char *fmt, ...);
-extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
-#endif
-#endif /* 2.4.10 -> 2.4.6 */
-
-
-/*****************************************************************************/
-/* 2.4.12 => 2.4.10 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,12) )
-#ifndef HAVE_NETIF_MSG
-#define HAVE_NETIF_MSG 1
-enum {
- NETIF_MSG_DRV = 0x0001,
- NETIF_MSG_PROBE = 0x0002,
- NETIF_MSG_LINK = 0x0004,
- NETIF_MSG_TIMER = 0x0008,
- NETIF_MSG_IFDOWN = 0x0010,
- NETIF_MSG_IFUP = 0x0020,
- NETIF_MSG_RX_ERR = 0x0040,
- NETIF_MSG_TX_ERR = 0x0080,
- NETIF_MSG_TX_QUEUED = 0x0100,
- NETIF_MSG_INTR = 0x0200,
- NETIF_MSG_TX_DONE = 0x0400,
- NETIF_MSG_RX_STATUS = 0x0800,
- NETIF_MSG_PKTDATA = 0x1000,
- NETIF_MSG_HW = 0x2000,
- NETIF_MSG_WOL = 0x4000,
-};
-
-#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
-#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
-#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
-#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
-#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
-#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
-#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
-#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
-#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
-#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
-#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
-#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
-#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
-#endif /* !HAVE_NETIF_MSG */
-#endif /* 2.4.12 => 2.4.10 */
-
-/*****************************************************************************/
-/* 2.4.13 => 2.4.12 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
-
-/**************************************/
-/* PCI DMA MAPPING */
-
-#ifndef virt_to_page
- #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT))
-#endif
-
-#ifndef pci_map_page
-#define pci_map_page _kc_pci_map_page
-extern u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction);
-#endif
-
-#ifndef pci_unmap_page
-#define pci_unmap_page _kc_pci_unmap_page
-extern void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction);
-#endif
-
-/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */
-
-#undef DMA_32BIT_MASK
-#define DMA_32BIT_MASK 0xffffffff
-#undef DMA_64BIT_MASK
-#define DMA_64BIT_MASK 0xffffffff
-
-/**************************************/
-/* OTHER */
-
-#ifndef cpu_relax
-#define cpu_relax() rep_nop()
-#endif
-
-struct vlan_ethhdr {
- unsigned char h_dest[ETH_ALEN];
- unsigned char h_source[ETH_ALEN];
- unsigned short h_vlan_proto;
- unsigned short h_vlan_TCI;
- unsigned short h_vlan_encapsulated_proto;
-};
-#endif /* 2.4.13 => 2.4.12 */
-
-/*****************************************************************************/
-/* 2.4.17 => 2.4.12 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) )
-
-#ifndef __devexit_p
- #define __devexit_p(x) &(x)
-#endif
-
-#else
- /* For Kernel 3.8 these are not defined - so undefine all */
- #undef __devexit_p
- #undef __devexit
- #undef __devinit
- #undef __devinitdata
- #define __devexit_p(x) &(x)
- #define __devexit
- #define __devinit
- #define __devinitdata
-
-#endif /* 2.4.17 => 2.4.13 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) )
-#define NETIF_MSG_HW 0x2000
-#define NETIF_MSG_WOL 0x4000
-
-#ifndef netif_msg_hw
-#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
-#endif
-#ifndef netif_msg_wol
-#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
-#endif
-#endif /* 2.4.18 */
-
-/*****************************************************************************/
-
-/*****************************************************************************/
-/* 2.4.20 => 2.4.19 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) )
-
-/* we won't support NAPI on less than 2.4.20 */
-#ifdef NAPI
-#undef NAPI
-#endif
-
-#endif /* 2.4.20 => 2.4.19 */
-
-/*****************************************************************************/
-/* 2.4.22 => 2.4.17 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
-#define pci_name(x) ((x)->slot_name)
-
-#ifndef SUPPORTED_10000baseT_Full
-#define SUPPORTED_10000baseT_Full (1 << 12)
-#endif
-#ifndef ADVERTISED_10000baseT_Full
-#define ADVERTISED_10000baseT_Full (1 << 12)
-#endif
-#endif
-
-/*****************************************************************************/
-/* 2.4.22 => 2.4.17 */
-
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
-#ifndef IGB_NO_LRO
-#define IGB_NO_LRO
-#endif
-#endif
-
-/*****************************************************************************/
-/*****************************************************************************/
-/* 2.4.23 => 2.4.22 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) )
-/*****************************************************************************/
-#ifdef NAPI
-#ifndef netif_poll_disable
-#define netif_poll_disable(x) _kc_netif_poll_disable(x)
-static inline void _kc_netif_poll_disable(struct net_device *netdev)
-{
- while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) {
- /* No hurry */
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(1);
- }
-}
-#endif
-#ifndef netif_poll_enable
-#define netif_poll_enable(x) _kc_netif_poll_enable(x)
-static inline void _kc_netif_poll_enable(struct net_device *netdev)
-{
- clear_bit(__LINK_STATE_RX_SCHED, &netdev->state);
-}
-#endif
-#endif /* NAPI */
-#ifndef netif_tx_disable
-#define netif_tx_disable(x) _kc_netif_tx_disable(x)
-static inline void _kc_netif_tx_disable(struct net_device *dev)
-{
- spin_lock_bh(&dev->xmit_lock);
- netif_stop_queue(dev);
- spin_unlock_bh(&dev->xmit_lock);
-}
-#endif
-#else /* 2.4.23 => 2.4.22 */
-#define HAVE_SCTP
-#endif /* 2.4.23 => 2.4.22 */
-
-/*****************************************************************************/
-/* 2.6.4 => 2.6.0 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \
- ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
- LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) )
-#define ETHTOOL_OPS_COMPAT
-#endif /* 2.6.4 => 2.6.0 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) )
-#define __user
-#endif /* < 2.4.27 */
-
-/*****************************************************************************/
-/* 2.5.71 => 2.4.x */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) )
-#define sk_protocol protocol
-#define pci_get_device pci_find_device
-#endif /* 2.5.70 => 2.4.x */
-
-/*****************************************************************************/
-/* < 2.4.27 or 2.6.0 <= 2.6.5 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \
- ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
- LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) )
-
-#ifndef netif_msg_init
-#define netif_msg_init _kc_netif_msg_init
-static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits)
-{
- /* use default */
- if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
- return default_msg_enable_bits;
- if (debug_value == 0) /* no output */
- return 0;
- /* set low N bits */
- return (1 << debug_value) -1;
-}
-#endif
-
-#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */
-/*****************************************************************************/
-#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \
- (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \
- ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )))
-#define netdev_priv(x) x->priv
-#endif
-
-/*****************************************************************************/
-/* <= 2.5.0 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) )
-#include <linux/rtnetlink.h>
-#undef pci_register_driver
-#define pci_register_driver pci_module_init
-
-/*
- * Most of the dma compat code is copied/modifed from the 2.4.37
- * /include/linux/libata-compat.h header file
- */
-/* These definitions mirror those in pci.h, so they can be used
- * interchangeably with their PCI_ counterparts */
-enum dma_data_direction {
- DMA_BIDIRECTIONAL = 0,
- DMA_TO_DEVICE = 1,
- DMA_FROM_DEVICE = 2,
- DMA_NONE = 3,
-};
-
-struct device {
- struct pci_dev pdev;
-};
-
-static inline struct pci_dev *to_pci_dev (struct device *dev)
-{
- return (struct pci_dev *) dev;
-}
-static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
-{
- return (struct device *) pdev;
-}
-
-#define pdev_printk(lvl, pdev, fmt, args...) \
- printk("%s %s: " fmt, lvl, pci_name(pdev), ## args)
-#define dev_err(dev, fmt, args...) \
- pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args)
-#define dev_info(dev, fmt, args...) \
- pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args)
-#define dev_warn(dev, fmt, args...) \
- pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args)
-#define dev_notice(dev, fmt, args...) \
- pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args)
-#define dev_dbg(dev, fmt, args...) \
- pdev_printk(KERN_DEBUG, to_pci_dev(dev), fmt, ## args)
-
-/* NOTE: dangerous! we ignore the 'gfp' argument */
-#define dma_alloc_coherent(dev,sz,dma,gfp) \
- pci_alloc_consistent(to_pci_dev(dev),(sz),(dma))
-#define dma_free_coherent(dev,sz,addr,dma_addr) \
- pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr))
-
-#define dma_map_page(dev,a,b,c,d) \
- pci_map_page(to_pci_dev(dev),(a),(b),(c),(d))
-#define dma_unmap_page(dev,a,b,c) \
- pci_unmap_page(to_pci_dev(dev),(a),(b),(c))
-
-#define dma_map_single(dev,a,b,c) \
- pci_map_single(to_pci_dev(dev),(a),(b),(c))
-#define dma_unmap_single(dev,a,b,c) \
- pci_unmap_single(to_pci_dev(dev),(a),(b),(c))
-
-#define dma_map_sg(dev, sg, nents, dir) \
- pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir)
-#define dma_unmap_sg(dev, sg, nents, dir) \
- pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir)
-
-#define dma_sync_single(dev,a,b,c) \
- pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c))
-
-/* for range just sync everything, that's all the pci API can do */
-#define dma_sync_single_range(dev,addr,off,sz,dir) \
- pci_dma_sync_single(to_pci_dev(dev),(addr),(off)+(sz),(dir))
-
-#define dma_set_mask(dev,mask) \
- pci_set_dma_mask(to_pci_dev(dev),(mask))
-
-/* hlist_* code - double linked lists */
-struct hlist_head {
- struct hlist_node *first;
-};
-
-struct hlist_node {
- struct hlist_node *next, **pprev;
-};
-
-static inline void __hlist_del(struct hlist_node *n)
-{
- struct hlist_node *next = n->next;
- struct hlist_node **pprev = n->pprev;
- *pprev = next;
- if (next)
- next->pprev = pprev;
-}
-
-static inline void hlist_del(struct hlist_node *n)
-{
- __hlist_del(n);
- n->next = NULL;
- n->pprev = NULL;
-}
-
-static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
-{
- struct hlist_node *first = h->first;
- n->next = first;
- if (first)
- first->pprev = &n->next;
- h->first = n;
- n->pprev = &h->first;
-}
-
-static inline int hlist_empty(const struct hlist_head *h)
-{
- return !h->first;
-}
-#define HLIST_HEAD_INIT { .first = NULL }
-#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
-#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
-static inline void INIT_HLIST_NODE(struct hlist_node *h)
-{
- h->next = NULL;
- h->pprev = NULL;
-}
-
-#ifndef might_sleep
-#define might_sleep()
-#endif
-#else
-static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
-{
- return &pdev->dev;
-}
-#endif /* <= 2.5.0 */
-
-/*****************************************************************************/
-/* 2.5.28 => 2.4.23 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
-
-#include <linux/tqueue.h>
-#define work_struct tq_struct
-#undef INIT_WORK
-#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a)
-#undef container_of
-#define container_of list_entry
-#define schedule_work schedule_task
-#define flush_scheduled_work flush_scheduled_tasks
-#define cancel_work_sync(x) flush_scheduled_work()
-
-#endif /* 2.5.28 => 2.4.17 */
-
-/*****************************************************************************/
-/* 2.6.0 => 2.5.28 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
-#ifndef read_barrier_depends
-#define read_barrier_depends() rmb()
-#endif
-
-#undef get_cpu
-#define get_cpu() smp_processor_id()
-#undef put_cpu
-#define put_cpu() do { } while(0)
-#define MODULE_INFO(version, _version)
-#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
-#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1
-#endif
-#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
-#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1
-#endif
-
-#define dma_set_coherent_mask(dev,mask) 1
-
-#undef dev_put
-#define dev_put(dev) __dev_put(dev)
-
-#ifndef skb_fill_page_desc
-#define skb_fill_page_desc _kc_skb_fill_page_desc
-extern void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size);
-#endif
-
-#undef ALIGN
-#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
-
-#ifndef page_count
-#define page_count(p) atomic_read(&(p)->count)
-#endif
-
-#ifdef MAX_NUMNODES
-#undef MAX_NUMNODES
-#endif
-#define MAX_NUMNODES 1
-
-/* find_first_bit and find_next bit are not defined for most
- * 2.4 kernels (except for the redhat 2.4.21 kernels
- */
-#include <linux/bitops.h>
-#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
-#undef find_next_bit
-#define find_next_bit _kc_find_next_bit
-extern unsigned long _kc_find_next_bit(const unsigned long *addr,
- unsigned long size,
- unsigned long offset);
-#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
-
-
-#ifndef netdev_name
-static inline const char *_kc_netdev_name(const struct net_device *dev)
-{
- if (strchr(dev->name, '%'))
- return "(unregistered net_device)";
- return dev->name;
-}
-#define netdev_name(netdev) _kc_netdev_name(netdev)
-#endif /* netdev_name */
-
-#ifndef strlcpy
-#define strlcpy _kc_strlcpy
-extern size_t _kc_strlcpy(char *dest, const char *src, size_t size);
-#endif /* strlcpy */
-
-#ifndef do_div
-#if BITS_PER_LONG == 64
-# define do_div(n,base) ({ \
- uint32_t __base = (base); \
- uint32_t __rem; \
- __rem = ((uint64_t)(n)) % __base; \
- (n) = ((uint64_t)(n)) / __base; \
- __rem; \
- })
-#elif BITS_PER_LONG == 32
-extern uint32_t _kc__div64_32(uint64_t *dividend, uint32_t divisor);
-# define do_div(n,base) ({ \
- uint32_t __base = (base); \
- uint32_t __rem; \
- if (likely(((n) >> 32) == 0)) { \
- __rem = (uint32_t)(n) % __base; \
- (n) = (uint32_t)(n) / __base; \
- } else \
- __rem = _kc__div64_32(&(n), __base); \
- __rem; \
- })
-#else /* BITS_PER_LONG == ?? */
-# error do_div() does not yet support the C64
-#endif /* BITS_PER_LONG */
-#endif /* do_div */
-
-#ifndef NSEC_PER_SEC
-#define NSEC_PER_SEC 1000000000L
-#endif
-
-#undef HAVE_I2C_SUPPORT
-#else /* 2.6.0 */
-#if IS_ENABLED(CONFIG_I2C_ALGOBIT) && \
- (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,9)))
-#define HAVE_I2C_SUPPORT
-#endif /* IS_ENABLED(CONFIG_I2C_ALGOBIT) */
-
-#endif /* 2.6.0 => 2.5.28 */
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )
-#define dma_pool pci_pool
-#define dma_pool_destroy pci_pool_destroy
-#define dma_pool_alloc pci_pool_alloc
-#define dma_pool_free pci_pool_free
-
-#define dma_pool_create(name,dev,size,align,allocation) \
- pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation))
-#endif /* < 2.6.3 */
-
-/*****************************************************************************/
-/* 2.6.4 => 2.6.0 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
-#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
-#endif /* 2.6.4 => 2.6.0 */
-
-/*****************************************************************************/
-/* 2.6.5 => 2.6.0 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) )
-#define dma_sync_single_for_cpu dma_sync_single
-#define dma_sync_single_for_device dma_sync_single
-#define dma_sync_single_range_for_cpu dma_sync_single_range
-#define dma_sync_single_range_for_device dma_sync_single_range
-#ifndef pci_dma_mapping_error
-#define pci_dma_mapping_error _kc_pci_dma_mapping_error
-static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr)
-{
- return dma_addr == 0;
-}
-#endif
-#endif /* 2.6.5 => 2.6.0 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
-extern int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...);
-#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args)
-#endif /* < 2.6.4 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) )
-/* taken from 2.6 include/linux/bitmap.h */
-#undef bitmap_zero
-#define bitmap_zero _kc_bitmap_zero
-static inline void _kc_bitmap_zero(unsigned long *dst, int nbits)
-{
- if (nbits <= BITS_PER_LONG)
- *dst = 0UL;
- else {
- int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
- memset(dst, 0, len);
- }
-}
-#define random_ether_addr _kc_random_ether_addr
-static inline void _kc_random_ether_addr(u8 *addr)
-{
- get_random_bytes(addr, ETH_ALEN);
- addr[0] &= 0xfe; /* clear multicast */
- addr[0] |= 0x02; /* set local assignment */
-}
-#define page_to_nid(x) 0
-
-#endif /* < 2.6.6 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) )
-#undef if_mii
-#define if_mii _kc_if_mii
-static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq)
-{
- return (struct mii_ioctl_data *) &rq->ifr_ifru;
-}
-
-#ifndef __force
-#define __force
-#endif
-#endif /* < 2.6.7 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
-#ifndef PCI_EXP_DEVCTL
-#define PCI_EXP_DEVCTL 8
-#endif
-#ifndef PCI_EXP_DEVCTL_CERE
-#define PCI_EXP_DEVCTL_CERE 0x0001
-#endif
-#define PCI_EXP_FLAGS 2 /* Capabilities register */
-#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */
-#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */
-#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */
-#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */
-#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */
-#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */
-#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
-#define PCI_EXP_DEVCAP 4 /* Device capabilities */
-#define PCI_EXP_DEVSTA 10 /* Device Status */
-#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \
- schedule_timeout((x * HZ)/1000 + 2); \
- } while (0)
-
-#endif /* < 2.6.8 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9))
-#include <net/dsfield.h>
-#define __iomem
-
-#ifndef kcalloc
-#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags)
-extern void *_kc_kzalloc(size_t size, int flags);
-#endif
-#define MSEC_PER_SEC 1000L
-static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j)
-{
-#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
- return (MSEC_PER_SEC / HZ) * j;
-#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
- return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
-#else
- return (j * MSEC_PER_SEC) / HZ;
-#endif
-}
-static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m)
-{
- if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET))
- return MAX_JIFFY_OFFSET;
-#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
- return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
-#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
- return m * (HZ / MSEC_PER_SEC);
-#else
- return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
-#endif
-}
-
-#define msleep_interruptible _kc_msleep_interruptible
-static inline unsigned long _kc_msleep_interruptible(unsigned int msecs)
-{
- unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1;
-
- while (timeout && !signal_pending(current)) {
- __set_current_state(TASK_INTERRUPTIBLE);
- timeout = schedule_timeout(timeout);
- }
- return _kc_jiffies_to_msecs(timeout);
-}
-
-/* Basic mode control register. */
-#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */
-
-#ifndef __le16
-#define __le16 u16
-#endif
-#ifndef __le32
-#define __le32 u32
-#endif
-#ifndef __le64
-#define __le64 u64
-#endif
-#ifndef __be16
-#define __be16 u16
-#endif
-#ifndef __be32
-#define __be32 u32
-#endif
-#ifndef __be64
-#define __be64 u64
-#endif
-
-static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
-{
- return (struct vlan_ethhdr *)skb->mac.raw;
-}
-
-/* Wake-On-Lan options. */
-#define WAKE_PHY (1 << 0)
-#define WAKE_UCAST (1 << 1)
-#define WAKE_MCAST (1 << 2)
-#define WAKE_BCAST (1 << 3)
-#define WAKE_ARP (1 << 4)
-#define WAKE_MAGIC (1 << 5)
-#define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */
-
-#define skb_header_pointer _kc_skb_header_pointer
-static inline void *_kc_skb_header_pointer(const struct sk_buff *skb,
- int offset, int len, void *buffer)
-{
- int hlen = skb_headlen(skb);
-
- if (hlen - offset >= len)
- return skb->data + offset;
-
-#ifdef MAX_SKB_FRAGS
- if (skb_copy_bits(skb, offset, buffer, len) < 0)
- return NULL;
-
- return buffer;
-#else
- return NULL;
-#endif
-
-#ifndef NETDEV_TX_OK
-#define NETDEV_TX_OK 0
-#endif
-#ifndef NETDEV_TX_BUSY
-#define NETDEV_TX_BUSY 1
-#endif
-#ifndef NETDEV_TX_LOCKED
-#define NETDEV_TX_LOCKED -1
-#endif
-}
-
-#ifndef __bitwise
-#define __bitwise
-#endif
-#endif /* < 2.6.9 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
-#ifdef module_param_array_named
-#undef module_param_array_named
-#define module_param_array_named(name, array, type, nump, perm) \
- static struct kparam_array __param_arr_##name \
- = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \
- sizeof(array[0]), array }; \
- module_param_call(name, param_array_set, param_array_get, \
- &__param_arr_##name, perm)
-#endif /* module_param_array_named */
-/*
- * num_online is broken for all < 2.6.10 kernels. This is needed to support
- * Node module parameter of ixgbe.
- */
-#undef num_online_nodes
-#define num_online_nodes(n) 1
-extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES);
-#undef node_online_map
-#define node_online_map _kcompat_node_online_map
-#define pci_get_class pci_find_class
-#endif /* < 2.6.10 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) )
-#define PCI_D0 0
-#define PCI_D1 1
-#define PCI_D2 2
-#define PCI_D3hot 3
-#define PCI_D3cold 4
-typedef int pci_power_t;
-#define pci_choose_state(pdev,state) state
-#define PMSG_SUSPEND 3
-#define PCI_EXP_LNKCTL 16
-
-#undef NETIF_F_LLTX
-
-#ifndef ARCH_HAS_PREFETCH
-#define prefetch(X)
-#endif
-
-#ifndef NET_IP_ALIGN
-#define NET_IP_ALIGN 2
-#endif
-
-#define KC_USEC_PER_SEC 1000000L
-#define usecs_to_jiffies _kc_usecs_to_jiffies
-static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j)
-{
-#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
- return (KC_USEC_PER_SEC / HZ) * j;
-#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
- return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC);
-#else
- return (j * KC_USEC_PER_SEC) / HZ;
-#endif
-}
-static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m)
-{
- if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET))
- return MAX_JIFFY_OFFSET;
-#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
- return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ);
-#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
- return m * (HZ / KC_USEC_PER_SEC);
-#else
- return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC;
-#endif
-}
-
-#define PCI_EXP_LNKCAP 12 /* Link Capabilities */
-#define PCI_EXP_LNKSTA 18 /* Link Status */
-#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */
-#define PCI_EXP_SLTCTL 24 /* Slot Control */
-#define PCI_EXP_SLTSTA 26 /* Slot Status */
-#define PCI_EXP_RTCTL 28 /* Root Control */
-#define PCI_EXP_RTCAP 30 /* Root Capabilities */
-#define PCI_EXP_RTSTA 32 /* Root Status */
-#endif /* < 2.6.11 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) )
-#include <linux/reboot.h>
-#define USE_REBOOT_NOTIFIER
-
-/* Generic MII registers. */
-#define MII_CTRL1000 0x09 /* 1000BASE-T control */
-#define MII_STAT1000 0x0a /* 1000BASE-T status */
-/* Advertisement control register. */
-#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */
-#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */
-/* Link partner ability register. */
-#define LPA_PAUSE_CAP 0x0400 /* Can pause */
-#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */
-/* 1000BASE-T Control register */
-#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */
-#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */
-/* 1000BASE-T Status register */
-#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */
-#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */
-
-#ifndef is_zero_ether_addr
-#define is_zero_ether_addr _kc_is_zero_ether_addr
-static inline int _kc_is_zero_ether_addr(const u8 *addr)
-{
- return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
-}
-#endif /* is_zero_ether_addr */
-#ifndef is_multicast_ether_addr
-#define is_multicast_ether_addr _kc_is_multicast_ether_addr
-static inline int _kc_is_multicast_ether_addr(const u8 *addr)
-{
- return addr[0] & 0x01;
-}
-#endif /* is_multicast_ether_addr */
-#endif /* < 2.6.12 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) )
-#ifndef kstrdup
-#define kstrdup _kc_kstrdup
-extern char *_kc_kstrdup(const char *s, unsigned int gfp);
-#endif
-#endif /* < 2.6.13 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
-#define pm_message_t u32
-#ifndef kzalloc
-#define kzalloc _kc_kzalloc
-extern void *_kc_kzalloc(size_t size, int flags);
-#endif
-
-/* Generic MII registers. */
-#define MII_ESTATUS 0x0f /* Extended Status */
-/* Basic mode status register. */
-#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */
-/* Extended status register. */
-#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */
-#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */
-
-#define SUPPORTED_Pause (1 << 13)
-#define SUPPORTED_Asym_Pause (1 << 14)
-#define ADVERTISED_Pause (1 << 13)
-#define ADVERTISED_Asym_Pause (1 << 14)
-
-#if (!(RHEL_RELEASE_CODE && \
- (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,3)) && \
- (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))))
-#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)) && !defined(gfp_t))
-#define gfp_t unsigned
-#else
-typedef unsigned gfp_t;
-#endif
-#endif /* !RHEL4.3->RHEL5.0 */
-
-#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) )
-#ifdef CONFIG_X86_64
-#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir) \
- dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir))
-#define dma_sync_single_range_for_device(dev, addr, off, sz, dir) \
- dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir))
-#endif
-#endif
-#endif /* < 2.6.14 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) )
-#ifndef vmalloc_node
-#define vmalloc_node(a,b) vmalloc(a)
-#endif /* vmalloc_node*/
-
-#define setup_timer(_timer, _function, _data) \
-do { \
- (_timer)->function = _function; \
- (_timer)->data = _data; \
- init_timer(_timer); \
-} while (0)
-#ifndef device_can_wakeup
-#define device_can_wakeup(dev) (1)
-#endif
-#ifndef device_set_wakeup_enable
-#define device_set_wakeup_enable(dev, val) do{}while(0)
-#endif
-#ifndef device_init_wakeup
-#define device_init_wakeup(dev,val) do {} while (0)
-#endif
-static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2)
-{
- const u16 *a = (const u16 *) addr1;
- const u16 *b = (const u16 *) addr2;
-
- return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
-}
-#undef compare_ether_addr
-#define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2)
-#endif /* < 2.6.15 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) )
-#undef DEFINE_MUTEX
-#define DEFINE_MUTEX(x) DECLARE_MUTEX(x)
-#define mutex_lock(x) down_interruptible(x)
-#define mutex_unlock(x) up(x)
-
-#ifndef ____cacheline_internodealigned_in_smp
-#ifdef CONFIG_SMP
-#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp
-#else
-#define ____cacheline_internodealigned_in_smp
-#endif /* CONFIG_SMP */
-#endif /* ____cacheline_internodealigned_in_smp */
-#undef HAVE_PCI_ERS
-#else /* 2.6.16 and above */
-#undef HAVE_PCI_ERS
-#define HAVE_PCI_ERS
-#if ( SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(10,4,0) )
-#ifdef device_can_wakeup
-#undef device_can_wakeup
-#endif /* device_can_wakeup */
-#define device_can_wakeup(dev) 1
-#endif /* SLE_VERSION(10,4,0) */
-#endif /* < 2.6.16 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) )
-#ifndef dev_notice
-#define dev_notice(dev, fmt, args...) \
- dev_printk(KERN_NOTICE, dev, fmt, ## args)
-#endif
-
-#ifndef first_online_node
-#define first_online_node 0
-#endif
-#ifndef NET_SKB_PAD
-#define NET_SKB_PAD 16
-#endif
-#endif /* < 2.6.17 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) )
-
-#ifndef IRQ_HANDLED
-#define irqreturn_t void
-#define IRQ_HANDLED
-#define IRQ_NONE
-#endif
-
-#ifndef IRQF_PROBE_SHARED
-#ifdef SA_PROBEIRQ
-#define IRQF_PROBE_SHARED SA_PROBEIRQ
-#else
-#define IRQF_PROBE_SHARED 0
-#endif
-#endif
-
-#ifndef IRQF_SHARED
-#define IRQF_SHARED SA_SHIRQ
-#endif
-
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
-
-#ifndef FIELD_SIZEOF
-#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
-#endif
-
-#ifndef skb_is_gso
-#ifdef NETIF_F_TSO
-#define skb_is_gso _kc_skb_is_gso
-static inline int _kc_skb_is_gso(const struct sk_buff *skb)
-{
- return skb_shinfo(skb)->gso_size;
-}
-#else
-#define skb_is_gso(a) 0
-#endif
-#endif
-
-#ifndef resource_size_t
-#define resource_size_t unsigned long
-#endif
-
-#ifdef skb_pad
-#undef skb_pad
-#endif
-#define skb_pad(x,y) _kc_skb_pad(x, y)
-int _kc_skb_pad(struct sk_buff *skb, int pad);
-#ifdef skb_padto
-#undef skb_padto
-#endif
-#define skb_padto(x,y) _kc_skb_padto(x, y)
-static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len)
-{
- unsigned int size = skb->len;
- if(likely(size >= len))
- return 0;
- return _kc_skb_pad(skb, len - size);
-}
-
-#ifndef DECLARE_PCI_UNMAP_ADDR
-#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
- dma_addr_t ADDR_NAME
-#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
- u32 LEN_NAME
-#define pci_unmap_addr(PTR, ADDR_NAME) \
- ((PTR)->ADDR_NAME)
-#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
- (((PTR)->ADDR_NAME) = (VAL))
-#define pci_unmap_len(PTR, LEN_NAME) \
- ((PTR)->LEN_NAME)
-#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
- (((PTR)->LEN_NAME) = (VAL))
-#endif /* DECLARE_PCI_UNMAP_ADDR */
-#endif /* < 2.6.18 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
-
-#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,0)))
-#define i_private u.generic_ip
-#endif /* >= RHEL 5.0 */
-
-#ifndef DIV_ROUND_UP
-#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
-#endif
-#ifndef __ALIGN_MASK
-#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
-#endif
-#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) )
-#if (!((RHEL_RELEASE_CODE && \
- ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \
- RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \
- (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0))))))
-typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *);
-#endif
-#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
-#undef CONFIG_INET_LRO
-#undef CONFIG_INET_LRO_MODULE
-#ifdef IXGBE_FCOE
-#undef CONFIG_FCOE
-#undef CONFIG_FCOE_MODULE
-#endif /* IXGBE_FCOE */
-#endif
-typedef irqreturn_t (*new_handler_t)(int, void*);
-static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
-#else /* 2.4.x */
-typedef void (*irq_handler_t)(int, void*, struct pt_regs *);
-typedef void (*new_handler_t)(int, void*);
-static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
-#endif /* >= 2.5.x */
-{
- irq_handler_t new_handler = (irq_handler_t) handler;
- return request_irq(irq, new_handler, flags, devname, dev_id);
-}
-
-#undef request_irq
-#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id))
-
-#define irq_handler_t new_handler_t
-/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */
-#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))
-#define PCIE_CONFIG_SPACE_LEN 256
-#define PCI_CONFIG_SPACE_LEN 64
-#define PCIE_LINK_STATUS 0x12
-#define pci_config_space_ich8lan() do {} while(0)
-#undef pci_save_state
-extern int _kc_pci_save_state(struct pci_dev *);
-#define pci_save_state(pdev) _kc_pci_save_state(pdev)
-#undef pci_restore_state
-extern void _kc_pci_restore_state(struct pci_dev *);
-#define pci_restore_state(pdev) _kc_pci_restore_state(pdev)
-#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */
-
-#ifdef HAVE_PCI_ERS
-#undef free_netdev
-extern void _kc_free_netdev(struct net_device *);
-#define free_netdev(netdev) _kc_free_netdev(netdev)
-#endif
-static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
-{
- return 0;
-}
-#define pci_disable_pcie_error_reporting(dev) do {} while (0)
-#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0)
-
-extern void *_kc_kmemdup(const void *src, size_t len, unsigned gfp);
-#define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp)
-#ifndef bool
-#define bool _Bool
-#define true 1
-#define false 0
-#endif
-#else /* 2.6.19 */
-#include <linux/aer.h>
-#include <linux/string.h>
-#endif /* < 2.6.19 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) )
-#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) )
-#undef INIT_WORK
-#define INIT_WORK(_work, _func) \
-do { \
- INIT_LIST_HEAD(&(_work)->entry); \
- (_work)->pending = 0; \
- (_work)->func = (void (*)(void *))_func; \
- (_work)->data = _work; \
- init_timer(&(_work)->timer); \
-} while (0)
-#endif
-
-#ifndef PCI_VDEVICE
-#define PCI_VDEVICE(ven, dev) \
- PCI_VENDOR_ID_##ven, (dev), \
- PCI_ANY_ID, PCI_ANY_ID, 0, 0
-#endif
-
-#ifndef PCI_VENDOR_ID_INTEL
-#define PCI_VENDOR_ID_INTEL 0x8086
-#endif
-
-#ifndef round_jiffies
-#define round_jiffies(x) x
-#endif
-
-#define csum_offset csum
-
-#define HAVE_EARLY_VMALLOC_NODE
-#define dev_to_node(dev) -1
-#undef set_dev_node
-/* remove compiler warning with b=b, for unused variable */
-#define set_dev_node(a, b) do { (b) = (b); } while(0)
-
-#if (!(RHEL_RELEASE_CODE && \
- (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \
- (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \
- (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,6)))) && \
- !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0)))
-typedef __u16 __bitwise __sum16;
-typedef __u32 __bitwise __wsum;
-#endif
-
-#if (!(RHEL_RELEASE_CODE && \
- (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \
- (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \
- (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))) && \
- !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0)))
-static inline __wsum csum_unfold(__sum16 n)
-{
- return (__force __wsum)n;
-}
-#endif
-
-#else /* < 2.6.20 */
-#define HAVE_DEVICE_NUMA_NODE
-#endif /* < 2.6.20 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
-#define to_net_dev(class) container_of(class, struct net_device, class_dev)
-#define NETDEV_CLASS_DEV
-#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)))
-#define vlan_group_get_device(vg, id) (vg->vlan_devices[id])
-#define vlan_group_set_device(vg, id, dev) \
- do { \
- if (vg) vg->vlan_devices[id] = dev; \
- } while (0)
-#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */
-#define pci_channel_offline(pdev) (pdev->error_state && \
- pdev->error_state != pci_channel_io_normal)
-#define pci_request_selected_regions(pdev, bars, name) \
- pci_request_regions(pdev, name)
-#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev);
-
-#ifndef __aligned
-#define __aligned(x) __attribute__((aligned(x)))
-#endif
-
-extern struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev);
-#define netdev_to_dev(netdev) \
- pci_dev_to_dev(_kc_netdev_to_pdev(netdev))
-#else
-static inline struct device *netdev_to_dev(struct net_device *netdev)
-{
- return &netdev->dev;
-}
-
-#endif /* < 2.6.21 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
-#define tcp_hdr(skb) (skb->h.th)
-#define tcp_hdrlen(skb) (skb->h.th->doff << 2)
-#define skb_transport_offset(skb) (skb->h.raw - skb->data)
-#define skb_transport_header(skb) (skb->h.raw)
-#define ipv6_hdr(skb) (skb->nh.ipv6h)
-#define ip_hdr(skb) (skb->nh.iph)
-#define skb_network_offset(skb) (skb->nh.raw - skb->data)
-#define skb_network_header(skb) (skb->nh.raw)
-#define skb_tail_pointer(skb) skb->tail
-#define skb_reset_tail_pointer(skb) \
- do { \
- skb->tail = skb->data; \
- } while (0)
-#define skb_set_tail_pointer(skb, offset) \
- do { \
- skb->tail = skb->data + offset; \
- } while (0)
-#define skb_copy_to_linear_data(skb, from, len) \
- memcpy(skb->data, from, len)
-#define skb_copy_to_linear_data_offset(skb, offset, from, len) \
- memcpy(skb->data + offset, from, len)
-#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw)
-#define pci_register_driver pci_module_init
-#define skb_mac_header(skb) skb->mac.raw
-
-#ifdef NETIF_F_MULTI_QUEUE
-#ifndef alloc_etherdev_mq
-#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a)
-#endif
-#endif /* NETIF_F_MULTI_QUEUE */
-
-#ifndef ETH_FCS_LEN
-#define ETH_FCS_LEN 4
-#endif
-#define cancel_work_sync(x) flush_scheduled_work()
-#ifndef udp_hdr
-#define udp_hdr _udp_hdr
-static inline struct udphdr *_udp_hdr(const struct sk_buff *skb)
-{
- return (struct udphdr *)skb_transport_header(skb);
-}
-#endif
-
-#ifdef cpu_to_be16
-#undef cpu_to_be16
-#endif
-#define cpu_to_be16(x) __constant_htons(x)
-
-#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)))
-enum {
- DUMP_PREFIX_NONE,
- DUMP_PREFIX_ADDRESS,
- DUMP_PREFIX_OFFSET
-};
-#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */
-#ifndef hex_asc
-#define hex_asc(x) "0123456789abcdef"[x]
-#endif
-#include <linux/ctype.h>
-extern void _kc_print_hex_dump(const char *level, const char *prefix_str,
- int prefix_type, int rowsize, int groupsize,
- const void *buf, size_t len, bool ascii);
-#define print_hex_dump(lvl, s, t, r, g, b, l, a) \
- _kc_print_hex_dump(lvl, s, t, r, g, b, l, a)
-#ifndef ADVERTISED_2500baseX_Full
-#define ADVERTISED_2500baseX_Full (1 << 15)
-#endif
-#ifndef SUPPORTED_2500baseX_Full
-#define SUPPORTED_2500baseX_Full (1 << 15)
-#endif
-
-#ifdef HAVE_I2C_SUPPORT
-#include <linux/i2c.h>
-#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)))
-struct i2c_board_info {
- char driver_name[KOBJ_NAME_LEN];
- char type[I2C_NAME_SIZE];
- unsigned short flags;
- unsigned short addr;
- void *platform_data;
-};
-#define I2C_BOARD_INFO(driver, dev_addr) .driver_name = (driver),\
- .addr = (dev_addr)
-#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */
-#define i2c_new_device(adap, info) _kc_i2c_new_device(adap, info)
-extern struct i2c_client *
-_kc_i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info);
-#endif /* HAVE_I2C_SUPPORT */
-
-#else /* 2.6.22 */
-#define ETH_TYPE_TRANS_SETS_DEV
-#define HAVE_NETDEV_STATS_IN_NETDEV
-#endif /* < 2.6.22 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) )
-#undef SET_MODULE_OWNER
-#define SET_MODULE_OWNER(dev) do { } while (0)
-#endif /* > 2.6.22 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) )
-#define netif_subqueue_stopped(_a, _b) 0
-#ifndef PTR_ALIGN
-#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
-#endif
-
-#ifndef CONFIG_PM_SLEEP
-#define CONFIG_PM_SLEEP CONFIG_PM
-#endif
-
-#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) )
-#define HAVE_ETHTOOL_GET_PERM_ADDR
-#endif /* 2.6.14 through 2.6.22 */
-#endif /* < 2.6.23 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
-#ifndef ETH_FLAG_LRO
-#define ETH_FLAG_LRO NETIF_F_LRO
-#endif
-
-/* if GRO is supported then the napi struct must already exist */
-#ifndef NETIF_F_GRO
-/* NAPI API changes in 2.6.24 break everything */
-struct napi_struct {
- /* used to look up the real NAPI polling routine */
- int (*poll)(struct napi_struct *, int);
- struct net_device *dev;
- int weight;
-};
-#endif
-
-#ifdef NAPI
-extern int __kc_adapter_clean(struct net_device *, int *);
-extern struct net_device *napi_to_poll_dev(const struct napi_struct *napi);
-#define netif_napi_add(_netdev, _napi, _poll, _weight) \
- do { \
- struct napi_struct *__napi = (_napi); \
- struct net_device *poll_dev = napi_to_poll_dev(__napi); \
- poll_dev->poll = &(__kc_adapter_clean); \
- poll_dev->priv = (_napi); \
- poll_dev->weight = (_weight); \
- set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state); \
- set_bit(__LINK_STATE_START, &poll_dev->state);\
- dev_hold(poll_dev); \
- __napi->poll = &(_poll); \
- __napi->weight = (_weight); \
- __napi->dev = (_netdev); \
- } while (0)
-#define netif_napi_del(_napi) \
- do { \
- struct net_device *poll_dev = napi_to_poll_dev(_napi); \
- WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state)); \
- dev_put(poll_dev); \
- memset(poll_dev, 0, sizeof(struct net_device));\
- } while (0)
-#define napi_schedule_prep(_napi) \
- (netif_running((_napi)->dev) && netif_rx_schedule_prep(napi_to_poll_dev(_napi)))
-#define napi_schedule(_napi) \
- do { \
- if (napi_schedule_prep(_napi)) \
- __netif_rx_schedule(napi_to_poll_dev(_napi)); \
- } while (0)
-#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi))
-#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi))
-#ifdef CONFIG_SMP
-static inline void napi_synchronize(const struct napi_struct *n)
-{
- struct net_device *dev = napi_to_poll_dev(n);
-
- while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) {
- /* No hurry. */
- msleep(1);
- }
-}
-#else
-#define napi_synchronize(n) barrier()
-#endif /* CONFIG_SMP */
-#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi))
-#ifndef NETIF_F_GRO
-#define napi_complete(_napi) netif_rx_complete(napi_to_poll_dev(_napi))
-#else
-#define napi_complete(_napi) \
- do { \
- napi_gro_flush(_napi); \
- netif_rx_complete(napi_to_poll_dev(_napi)); \
- } while (0)
-#endif /* NETIF_F_GRO */
-#else /* NAPI */
-#define netif_napi_add(_netdev, _napi, _poll, _weight) \
- do { \
- struct napi_struct *__napi = _napi; \
- _netdev->poll = &(_poll); \
- _netdev->weight = (_weight); \
- __napi->poll = &(_poll); \
- __napi->weight = (_weight); \
- __napi->dev = (_netdev); \
- } while (0)
-#define netif_napi_del(_a) do {} while (0)
-#endif /* NAPI */
-
-#undef dev_get_by_name
-#define dev_get_by_name(_a, _b) dev_get_by_name(_b)
-#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b)
-#ifndef DMA_BIT_MASK
-#define DMA_BIT_MASK(n) (((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1))
-#endif
-
-#ifdef NETIF_F_TSO6
-#define skb_is_gso_v6 _kc_skb_is_gso_v6
-static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb)
-{
- return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
-}
-#endif /* NETIF_F_TSO6 */
-
-#ifndef KERN_CONT
-#define KERN_CONT ""
-#endif
-#ifndef pr_err
-#define pr_err(fmt, arg...) \
- printk(KERN_ERR fmt, ##arg)
-#endif
-#else /* < 2.6.24 */
-#define HAVE_ETHTOOL_GET_SSET_COUNT
-#define HAVE_NETDEV_NAPI_LIST
-#endif /* < 2.6.24 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) )
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) )
-#include <linux/pm_qos_params.h>
-#else /* >= 3.2.0 */
-#include <linux/pm_qos.h>
-#endif /* else >= 3.2.0 */
-#endif /* > 2.6.24 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) )
-#define PM_QOS_CPU_DMA_LATENCY 1
-
-#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) )
-#include <linux/latency.h>
-#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY
-#define pm_qos_add_requirement(pm_qos_class, name, value) \
- set_acceptable_latency(name, value)
-#define pm_qos_remove_requirement(pm_qos_class, name) \
- remove_acceptable_latency(name)
-#define pm_qos_update_requirement(pm_qos_class, name, value) \
- modify_acceptable_latency(name, value)
-#else
-#define PM_QOS_DEFAULT_VALUE -1
-#define pm_qos_add_requirement(pm_qos_class, name, value)
-#define pm_qos_remove_requirement(pm_qos_class, name)
-#define pm_qos_update_requirement(pm_qos_class, name, value) { \
- if (value != PM_QOS_DEFAULT_VALUE) { \
- printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \
- pci_name(adapter->pdev)); \
- } \
-}
-
-#endif /* > 2.6.18 */
-
-#define pci_enable_device_mem(pdev) pci_enable_device(pdev)
-
-#ifndef DEFINE_PCI_DEVICE_TABLE
-#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[]
-#endif /* DEFINE_PCI_DEVICE_TABLE */
-
-
-#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
-#ifndef IGB_PROCFS
-#define IGB_PROCFS
-#endif /* IGB_PROCFS */
-#endif /* >= 2.6.0 */
-
-#else /* < 2.6.25 */
-
-
-#if IS_ENABLED(CONFIG_HWMON)
-#ifndef IGB_HWMON
-#define IGB_HWMON
-#endif /* IGB_HWMON */
-#endif /* CONFIG_HWMON */
-
-#endif /* < 2.6.25 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
-#ifndef clamp_t
-#define clamp_t(type, val, min, max) ({ \
- type __val = (val); \
- type __min = (min); \
- type __max = (max); \
- __val = __val < __min ? __min : __val; \
- __val > __max ? __max : __val; })
-#endif /* clamp_t */
-#undef kzalloc_node
-#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags)
-
-extern void _kc_pci_disable_link_state(struct pci_dev *dev, int state);
-#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s)
-#else /* < 2.6.26 */
-#include <linux/pci-aspm.h>
-#define HAVE_NETDEV_VLAN_FEATURES
-#ifndef PCI_EXP_LNKCAP_ASPMS
-#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */
-#endif /* PCI_EXP_LNKCAP_ASPMS */
-#endif /* < 2.6.26 */
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
-static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep,
- __u32 speed)
-{
- ep->speed = (__u16)speed;
- /* ep->speed_hi = (__u16)(speed >> 16); */
-}
-#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set
-
-static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep)
-{
- /* no speed_hi before 2.6.27, and probably no need for it yet */
- return (__u32)ep->speed;
-}
-#define ethtool_cmd_speed _kc_ethtool_cmd_speed
-
-#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) )
-#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM))
-#define ANCIENT_PM 1
-#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) && \
- (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && \
- defined(CONFIG_PM_SLEEP))
-#define NEWER_PM 1
-#endif
-#if defined(ANCIENT_PM) || defined(NEWER_PM)
-#undef device_set_wakeup_enable
-#define device_set_wakeup_enable(dev, val) \
- do { \
- u16 pmc = 0; \
- int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \
- if (pm) { \
- pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \
- &pmc); \
- } \
- (dev)->power.can_wakeup = !!(pmc >> 11); \
- (dev)->power.should_wakeup = (val && (pmc >> 11)); \
- } while (0)
-#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */
-#endif /* 2.6.15 through 2.6.27 */
-#ifndef netif_napi_del
-#define netif_napi_del(_a) do {} while (0)
-#ifdef NAPI
-#ifdef CONFIG_NETPOLL
-#undef netif_napi_del
-#define netif_napi_del(_a) list_del(&(_a)->dev_list);
-#endif
-#endif
-#endif /* netif_napi_del */
-#ifdef dma_mapping_error
-#undef dma_mapping_error
-#endif
-#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr)
-
-#ifdef CONFIG_NETDEVICES_MULTIQUEUE
-#define HAVE_TX_MQ
-#endif
-
-#ifdef HAVE_TX_MQ
-extern void _kc_netif_tx_stop_all_queues(struct net_device *);
-extern void _kc_netif_tx_wake_all_queues(struct net_device *);
-extern void _kc_netif_tx_start_all_queues(struct net_device *);
-#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a)
-#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a)
-#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a)
-#undef netif_stop_subqueue
-#define netif_stop_subqueue(_ndev,_qi) do { \
- if (netif_is_multiqueue((_ndev))) \
- netif_stop_subqueue((_ndev), (_qi)); \
- else \
- netif_stop_queue((_ndev)); \
- } while (0)
-#undef netif_start_subqueue
-#define netif_start_subqueue(_ndev,_qi) do { \
- if (netif_is_multiqueue((_ndev))) \
- netif_start_subqueue((_ndev), (_qi)); \
- else \
- netif_start_queue((_ndev)); \
- } while (0)
-#else /* HAVE_TX_MQ */
-#define netif_tx_stop_all_queues(a) netif_stop_queue(a)
-#define netif_tx_wake_all_queues(a) netif_wake_queue(a)
-#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) )
-#define netif_tx_start_all_queues(a) netif_start_queue(a)
-#else
-#define netif_tx_start_all_queues(a) do {} while (0)
-#endif
-#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev))
-#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev))
-#endif /* HAVE_TX_MQ */
-#ifndef NETIF_F_MULTI_QUEUE
-#define NETIF_F_MULTI_QUEUE 0
-#define netif_is_multiqueue(a) 0
-#define netif_wake_subqueue(a, b)
-#endif /* NETIF_F_MULTI_QUEUE */
-
-#ifndef __WARN_printf
-extern void __kc_warn_slowpath(const char *file, const int line,
- const char *fmt, ...) __attribute__((format(printf, 3, 4)));
-#define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg)
-#endif /* __WARN_printf */
-
-#ifndef WARN
-#define WARN(condition, format...) ({ \
- int __ret_warn_on = !!(condition); \
- if (unlikely(__ret_warn_on)) \
- __WARN_printf(format); \
- unlikely(__ret_warn_on); \
-})
-#endif /* WARN */
-#undef HAVE_IXGBE_DEBUG_FS
-#undef HAVE_IGB_DEBUG_FS
-#else /* < 2.6.27 */
-#define HAVE_TX_MQ
-#define HAVE_NETDEV_SELECT_QUEUE
-#ifdef CONFIG_DEBUG_FS
-#define HAVE_IXGBE_DEBUG_FS
-#define HAVE_IGB_DEBUG_FS
-#endif /* CONFIG_DEBUG_FS */
-#endif /* < 2.6.27 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
-#define pci_ioremap_bar(pdev, bar) ioremap(pci_resource_start(pdev, bar), \
- pci_resource_len(pdev, bar))
-#define pci_wake_from_d3 _kc_pci_wake_from_d3
-#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep
-extern int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable);
-extern int _kc_pci_prepare_to_sleep(struct pci_dev *dev);
-#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC)
-#ifndef __skb_queue_head_init
-static inline void __kc_skb_queue_head_init(struct sk_buff_head *list)
-{
- list->prev = list->next = (struct sk_buff *)list;
- list->qlen = 0;
-}
-#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q)
-#endif
-
-#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */
-#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
-
-#endif /* < 2.6.28 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) )
-#ifndef swap
-#define swap(a, b) \
- do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
-#endif
-#define pci_request_selected_regions_exclusive(pdev, bars, name) \
- pci_request_selected_regions(pdev, bars, name)
-#ifndef CONFIG_NR_CPUS
-#define CONFIG_NR_CPUS 1
-#endif /* CONFIG_NR_CPUS */
-#ifndef pcie_aspm_enabled
-#define pcie_aspm_enabled() (1)
-#endif /* pcie_aspm_enabled */
-
-#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */
-
-#ifndef pci_clear_master
-extern void _kc_pci_clear_master(struct pci_dev *dev);
-#define pci_clear_master(dev) _kc_pci_clear_master(dev)
-#endif
-
-#ifndef PCI_EXP_LNKCTL_ASPMC
-#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */
-#endif
-#else /* < 2.6.29 */
-#ifndef HAVE_NET_DEVICE_OPS
-#define HAVE_NET_DEVICE_OPS
-#endif
-#ifdef CONFIG_DCB
-#define HAVE_PFC_MODE_ENABLE
-#endif /* CONFIG_DCB */
-#endif /* < 2.6.29 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) )
-#define skb_rx_queue_recorded(a) false
-#define skb_get_rx_queue(a) 0
-#define skb_record_rx_queue(a, b) do {} while (0)
-#define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues)
-#ifndef CONFIG_PCI_IOV
-#undef pci_enable_sriov
-#define pci_enable_sriov(a, b) -ENOTSUPP
-#undef pci_disable_sriov
-#define pci_disable_sriov(a) do {} while (0)
-#endif /* CONFIG_PCI_IOV */
-#ifndef pr_cont
-#define pr_cont(fmt, ...) \
- printk(KERN_CONT fmt, ##__VA_ARGS__)
-#endif /* pr_cont */
-static inline void _kc_synchronize_irq(unsigned int a)
-{
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
- synchronize_irq();
-#else /* < 2.5.28 */
- synchronize_irq(a);
-#endif /* < 2.5.28 */
-}
-#undef synchronize_irq
-#define synchronize_irq(a) _kc_synchronize_irq(a)
-
-#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
-
-#else /* < 2.6.30 */
-#define HAVE_ASPM_QUIRKS
-#endif /* < 2.6.30 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) )
-#define ETH_P_1588 0x88F7
-#define ETH_P_FIP 0x8914
-#ifndef netdev_uc_count
-#define netdev_uc_count(dev) ((dev)->uc_count)
-#endif
-#ifndef netdev_for_each_uc_addr
-#define netdev_for_each_uc_addr(uclist, dev) \
- for (uclist = dev->uc_list; uclist; uclist = uclist->next)
-#endif
-#ifndef PORT_OTHER
-#define PORT_OTHER 0xff
-#endif
-#ifndef MDIO_PHY_ID_PRTAD
-#define MDIO_PHY_ID_PRTAD 0x03e0
-#endif
-#ifndef MDIO_PHY_ID_DEVAD
-#define MDIO_PHY_ID_DEVAD 0x001f
-#endif
-#ifndef skb_dst
-#define skb_dst(s) ((s)->dst)
-#endif
-
-#ifndef SUPPORTED_1000baseKX_Full
-#define SUPPORTED_1000baseKX_Full (1 << 17)
-#endif
-#ifndef SUPPORTED_10000baseKX4_Full
-#define SUPPORTED_10000baseKX4_Full (1 << 18)
-#endif
-#ifndef SUPPORTED_10000baseKR_Full
-#define SUPPORTED_10000baseKR_Full (1 << 19)
-#endif
-
-#ifndef ADVERTISED_1000baseKX_Full
-#define ADVERTISED_1000baseKX_Full (1 << 17)
-#endif
-#ifndef ADVERTISED_10000baseKX4_Full
-#define ADVERTISED_10000baseKX4_Full (1 << 18)
-#endif
-#ifndef ADVERTISED_10000baseKR_Full
-#define ADVERTISED_10000baseKR_Full (1 << 19)
-#endif
-
-#else /* < 2.6.31 */
-#ifndef HAVE_NETDEV_STORAGE_ADDRESS
-#define HAVE_NETDEV_STORAGE_ADDRESS
-#endif
-#ifndef HAVE_NETDEV_HW_ADDR
-#define HAVE_NETDEV_HW_ADDR
-#endif
-#ifndef HAVE_TRANS_START_IN_QUEUE
-#define HAVE_TRANS_START_IN_QUEUE
-#endif
-#ifndef HAVE_INCLUDE_LINUX_MDIO_H
-#define HAVE_INCLUDE_LINUX_MDIO_H
-#endif
-#endif /* < 2.6.31 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) )
-#undef netdev_tx_t
-#define netdev_tx_t int
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
-#ifndef NETIF_F_FCOE_MTU
-#define NETIF_F_FCOE_MTU (1 << 26)
-#endif
-#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
-
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
-static inline int _kc_pm_runtime_get_sync()
-{
- return 1;
-}
-#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync()
-#else /* 2.6.0 => 2.6.32 */
-static inline int _kc_pm_runtime_get_sync(struct device *dev)
-{
- return 1;
-}
-#ifndef pm_runtime_get_sync
-#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync(dev)
-#endif
-#endif /* 2.6.0 => 2.6.32 */
-#ifndef pm_runtime_put
-#define pm_runtime_put(dev) do {} while (0)
-#endif
-#ifndef pm_runtime_put_sync
-#define pm_runtime_put_sync(dev) do {} while (0)
-#endif
-#ifndef pm_runtime_resume
-#define pm_runtime_resume(dev) do {} while (0)
-#endif
-#ifndef pm_schedule_suspend
-#define pm_schedule_suspend(dev, t) do {} while (0)
-#endif
-#ifndef pm_runtime_set_suspended
-#define pm_runtime_set_suspended(dev) do {} while (0)
-#endif
-#ifndef pm_runtime_disable
-#define pm_runtime_disable(dev) do {} while (0)
-#endif
-#ifndef pm_runtime_put_noidle
-#define pm_runtime_put_noidle(dev) do {} while (0)
-#endif
-#ifndef pm_runtime_set_active
-#define pm_runtime_set_active(dev) do {} while (0)
-#endif
-#ifndef pm_runtime_enable
-#define pm_runtime_enable(dev) do {} while (0)
-#endif
-#ifndef pm_runtime_get_noresume
-#define pm_runtime_get_noresume(dev) do {} while (0)
-#endif
-#else /* < 2.6.32 */
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
-#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE
-#define HAVE_NETDEV_OPS_FCOE_ENABLE
-#endif
-#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
-#ifdef CONFIG_DCB
-#ifndef HAVE_DCBNL_OPS_GETAPP
-#define HAVE_DCBNL_OPS_GETAPP
-#endif
-#endif /* CONFIG_DCB */
-#include <linux/pm_runtime.h>
-/* IOV bad DMA target work arounds require at least this kernel rev support */
-#define HAVE_PCIE_TYPE
-#endif /* < 2.6.32 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) )
-#ifndef pci_pcie_cap
-#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP)
-#endif
-#ifndef IPV4_FLOW
-#define IPV4_FLOW 0x10
-#endif /* IPV4_FLOW */
-#ifndef IPV6_FLOW
-#define IPV6_FLOW 0x11
-#endif /* IPV6_FLOW */
-/* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */
-#if ( (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) || \
- (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,1,0)) )
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
-#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN
-#define HAVE_NETDEV_OPS_FCOE_GETWWN
-#endif
-#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
-#endif /* RHEL6 or SLES11 SP1 */
-#ifndef __percpu
-#define __percpu
-#endif /* __percpu */
-#ifndef PORT_DA
-#define PORT_DA PORT_OTHER
-#endif
-#ifndef PORT_NONE
-#define PORT_NONE PORT_OTHER
-#endif
-
-#if ((RHEL_RELEASE_CODE && \
- (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) && \
- (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))))
-#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE)
-#undef DEFINE_DMA_UNMAP_ADDR
-#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
-#undef DEFINE_DMA_UNMAP_LEN
-#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
-#undef dma_unmap_addr
-#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
-#undef dma_unmap_addr_set
-#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
-#undef dma_unmap_len
-#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
-#undef dma_unmap_len_set
-#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
-#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */
-#endif /* RHEL_RELEASE_CODE */
-
-#if (!(RHEL_RELEASE_CODE && \
- (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,8)) && \
- (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))) || \
- ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) && \
- (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))))))
-static inline bool pci_is_pcie(struct pci_dev *dev)
-{
- return !!pci_pcie_cap(dev);
-}
-#endif /* RHEL_RELEASE_CODE */
-
-#ifndef __always_unused
-#define __always_unused __attribute__((__unused__))
-#endif
-#ifndef __maybe_unused
-#define __maybe_unused __attribute__((__unused__))
-#endif
-
-#if (!(RHEL_RELEASE_CODE && \
- (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2))))
-#define sk_tx_queue_get(_sk) (-1)
-#define sk_tx_queue_set(_sk, _tx_queue) do {} while(0)
-#endif /* !(RHEL >= 6.2) */
-
-#if (RHEL_RELEASE_CODE && \
- (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \
- (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))
-#define HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
-#define HAVE_ETHTOOL_SET_PHYS_ID
-#define HAVE_ETHTOOL_GET_TS_INFO
-#endif /* RHEL >= 6.4 && RHEL < 7.0 */
-
-#if (RHEL_RELEASE_CODE && \
- (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) && \
- (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))
-#define HAVE_RHEL6_NETDEV_OPS_EXT_FDB
-#endif /* RHEL >= 6.5 && RHEL < 7.0 */
-
-#else /* < 2.6.33 */
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
-#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN
-#define HAVE_NETDEV_OPS_FCOE_GETWWN
-#endif
-#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
-#endif /* < 2.6.33 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) )
-#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
-#ifndef pci_num_vf
-#define pci_num_vf(pdev) _kc_pci_num_vf(pdev)
-extern int _kc_pci_num_vf(struct pci_dev *dev);
-#endif
-#endif /* RHEL_RELEASE_CODE */
-
-#ifndef ETH_FLAG_NTUPLE
-#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE
-#endif
-
-#ifndef netdev_mc_count
-#define netdev_mc_count(dev) ((dev)->mc_count)
-#endif
-#ifndef netdev_mc_empty
-#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0)
-#endif
-#ifndef netdev_for_each_mc_addr
-#define netdev_for_each_mc_addr(mclist, dev) \
- for (mclist = dev->mc_list; mclist; mclist = mclist->next)
-#endif
-#ifndef netdev_uc_count
-#define netdev_uc_count(dev) ((dev)->uc.count)
-#endif
-#ifndef netdev_uc_empty
-#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0)
-#endif
-#ifndef netdev_for_each_uc_addr
-#define netdev_for_each_uc_addr(ha, dev) \
- list_for_each_entry(ha, &dev->uc.list, list)
-#endif
-#ifndef dma_set_coherent_mask
-#define dma_set_coherent_mask(dev,mask) \
- pci_set_consistent_dma_mask(to_pci_dev(dev),(mask))
-#endif
-#ifndef pci_dev_run_wake
-#define pci_dev_run_wake(pdev) (0)
-#endif
-
-/* netdev logging taken from include/linux/netdevice.h */
-#ifndef netdev_name
-static inline const char *_kc_netdev_name(const struct net_device *dev)
-{
- if (dev->reg_state != NETREG_REGISTERED)
- return "(unregistered net_device)";
- return dev->name;
-}
-#define netdev_name(netdev) _kc_netdev_name(netdev)
-#endif /* netdev_name */
-
-#undef netdev_printk
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
-#define netdev_printk(level, netdev, format, args...) \
-do { \
- struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \
- printk(level "%s: " format, pci_name(pdev), ##args); \
-} while(0)
-#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
-#define netdev_printk(level, netdev, format, args...) \
-do { \
- struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \
- struct device *dev = pci_dev_to_dev(pdev); \
- dev_printk(level, dev, "%s: " format, \
- netdev_name(netdev), ##args); \
-} while(0)
-#else /* 2.6.21 => 2.6.34 */
-#define netdev_printk(level, netdev, format, args...) \
- dev_printk(level, (netdev)->dev.parent, \
- "%s: " format, \
- netdev_name(netdev), ##args)
-#endif /* <2.6.0 <2.6.21 <2.6.34 */
-#undef netdev_emerg
-#define netdev_emerg(dev, format, args...) \
- netdev_printk(KERN_EMERG, dev, format, ##args)
-#undef netdev_alert
-#define netdev_alert(dev, format, args...) \
- netdev_printk(KERN_ALERT, dev, format, ##args)
-#undef netdev_crit
-#define netdev_crit(dev, format, args...) \
- netdev_printk(KERN_CRIT, dev, format, ##args)
-#undef netdev_err
-#define netdev_err(dev, format, args...) \
- netdev_printk(KERN_ERR, dev, format, ##args)
-#undef netdev_warn
-#define netdev_warn(dev, format, args...) \
- netdev_printk(KERN_WARNING, dev, format, ##args)
-#undef netdev_notice
-#define netdev_notice(dev, format, args...) \
- netdev_printk(KERN_NOTICE, dev, format, ##args)
-#undef netdev_info
-#define netdev_info(dev, format, args...) \
- netdev_printk(KERN_INFO, dev, format, ##args)
-#undef netdev_dbg
-#if defined(DEBUG)
-#define netdev_dbg(__dev, format, args...) \
- netdev_printk(KERN_DEBUG, __dev, format, ##args)
-#elif defined(CONFIG_DYNAMIC_DEBUG)
-#define netdev_dbg(__dev, format, args...) \
-do { \
- dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \
- netdev_name(__dev), ##args); \
-} while (0)
-#else /* DEBUG */
-#define netdev_dbg(__dev, format, args...) \
-({ \
- if (0) \
- netdev_printk(KERN_DEBUG, __dev, format, ##args); \
- 0; \
-})
-#endif /* DEBUG */
-
-#undef netif_printk
-#define netif_printk(priv, type, level, dev, fmt, args...) \
-do { \
- if (netif_msg_##type(priv)) \
- netdev_printk(level, (dev), fmt, ##args); \
-} while (0)
-
-#undef netif_emerg
-#define netif_emerg(priv, type, dev, fmt, args...) \
- netif_level(emerg, priv, type, dev, fmt, ##args)
-#undef netif_alert
-#define netif_alert(priv, type, dev, fmt, args...) \
- netif_level(alert, priv, type, dev, fmt, ##args)
-#undef netif_crit
-#define netif_crit(priv, type, dev, fmt, args...) \
- netif_level(crit, priv, type, dev, fmt, ##args)
-#undef netif_err
-#define netif_err(priv, type, dev, fmt, args...) \
- netif_level(err, priv, type, dev, fmt, ##args)
-#undef netif_warn
-#define netif_warn(priv, type, dev, fmt, args...) \
- netif_level(warn, priv, type, dev, fmt, ##args)
-#undef netif_notice
-#define netif_notice(priv, type, dev, fmt, args...) \
- netif_level(notice, priv, type, dev, fmt, ##args)
-#undef netif_info
-#define netif_info(priv, type, dev, fmt, args...) \
- netif_level(info, priv, type, dev, fmt, ##args)
-#undef netif_dbg
-#define netif_dbg(priv, type, dev, fmt, args...) \
- netif_level(dbg, priv, type, dev, fmt, ##args)
-
-#ifdef SET_SYSTEM_SLEEP_PM_OPS
-#define HAVE_SYSTEM_SLEEP_PM_OPS
-#endif
-
-#ifndef for_each_set_bit
-#define for_each_set_bit(bit, addr, size) \
- for ((bit) = find_first_bit((addr), (size)); \
- (bit) < (size); \
- (bit) = find_next_bit((addr), (size), (bit) + 1))
-#endif /* for_each_set_bit */
-
-#ifndef DEFINE_DMA_UNMAP_ADDR
-#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR
-#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN
-#define dma_unmap_addr pci_unmap_addr
-#define dma_unmap_addr_set pci_unmap_addr_set
-#define dma_unmap_len pci_unmap_len
-#define dma_unmap_len_set pci_unmap_len_set
-#endif /* DEFINE_DMA_UNMAP_ADDR */
-
-#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,3))
-#ifdef IGB_HWMON
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-#define sysfs_attr_init(attr) \
- do { \
- static struct lock_class_key __key; \
- (attr)->key = &__key; \
- } while (0)
-#else
-#define sysfs_attr_init(attr) do {} while (0)
-#endif /* CONFIG_DEBUG_LOCK_ALLOC */
-#endif /* IGB_HWMON */
-#endif /* RHEL_RELEASE_CODE */
-
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
-static inline bool _kc_pm_runtime_suspended()
-{
- return false;
-}
-#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended()
-#else /* 2.6.0 => 2.6.34 */
-static inline bool _kc_pm_runtime_suspended(struct device *dev)
-{
- return false;
-}
-#ifndef pm_runtime_suspended
-#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended(dev)
-#endif
-#endif /* 2.6.0 => 2.6.34 */
-
-#else /* < 2.6.34 */
-#define HAVE_SYSTEM_SLEEP_PM_OPS
-#ifndef HAVE_SET_RX_MODE
-#define HAVE_SET_RX_MODE
-#endif
-
-#endif /* < 2.6.34 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
-
-ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
- const void __user *from, size_t count);
-#define simple_write_to_buffer _kc_simple_write_to_buffer
-
-#ifndef numa_node_id
-#define numa_node_id() 0
-#endif
-#ifdef HAVE_TX_MQ
-#include <net/sch_generic.h>
-#ifndef CONFIG_NETDEVICES_MULTIQUEUE
-#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)))
-void _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int);
-#define netif_set_real_num_tx_queues _kc_netif_set_real_num_tx_queues
-#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */
-#else /* CONFIG_NETDEVICES_MULTI_QUEUE */
-#define netif_set_real_num_tx_queues(_netdev, _count) \
- do { \
- (_netdev)->egress_subqueue_count = _count; \
- } while (0)
-#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */
-#else /* HAVE_TX_MQ */
-#define netif_set_real_num_tx_queues(_netdev, _count) do {} while(0)
-#endif /* HAVE_TX_MQ */
-#ifndef ETH_FLAG_RXHASH
-#define ETH_FLAG_RXHASH (1<<28)
-#endif /* ETH_FLAG_RXHASH */
-#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))
-#define HAVE_IRQ_AFFINITY_HINT
-#endif
-#else /* < 2.6.35 */
-#define HAVE_PM_QOS_REQUEST_LIST
-#define HAVE_IRQ_AFFINITY_HINT
-#endif /* < 2.6.35 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) )
-extern int _kc_ethtool_op_set_flags(struct net_device *, u32, u32);
-#define ethtool_op_set_flags _kc_ethtool_op_set_flags
-extern u32 _kc_ethtool_op_get_flags(struct net_device *);
-#define ethtool_op_get_flags _kc_ethtool_op_get_flags
-
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-#ifdef NET_IP_ALIGN
-#undef NET_IP_ALIGN
-#endif
-#define NET_IP_ALIGN 0
-#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
-
-#ifdef NET_SKB_PAD
-#undef NET_SKB_PAD
-#endif
-
-#if (L1_CACHE_BYTES > 32)
-#define NET_SKB_PAD L1_CACHE_BYTES
-#else
-#define NET_SKB_PAD 32
-#endif
-
-static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev,
- unsigned int length)
-{
- struct sk_buff *skb;
-
- skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC);
- if (skb) {
-#if (NET_IP_ALIGN + NET_SKB_PAD)
- skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
-#endif
- skb->dev = dev;
- }
- return skb;
-}
-
-#ifdef netdev_alloc_skb_ip_align
-#undef netdev_alloc_skb_ip_align
-#endif
-#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l)
-
-#undef netif_level
-#define netif_level(level, priv, type, dev, fmt, args...) \
-do { \
- if (netif_msg_##type(priv)) \
- netdev_##level(dev, fmt, ##args); \
-} while (0)
-
-#undef usleep_range
-#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
-
-#define u64_stats_update_begin(a) do { } while(0)
-#define u64_stats_update_end(a) do { } while(0)
-#define u64_stats_fetch_begin(a) do { } while(0)
-#define u64_stats_fetch_retry_bh(a) (0)
-#define u64_stats_fetch_begin_bh(a) (0)
-
-#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1))
-#define HAVE_8021P_SUPPORT
-#endif
-
-#else /* < 2.6.36 */
-
-
-#define HAVE_PM_QOS_REQUEST_ACTIVE
-#define HAVE_8021P_SUPPORT
-#define HAVE_NDO_GET_STATS64
-#endif /* < 2.6.36 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) )
-#ifndef netif_set_real_num_rx_queues
-static inline int __kc_netif_set_real_num_rx_queues(struct net_device *dev,
- unsigned int rxq)
-{
- return 0;
-}
-#define netif_set_real_num_rx_queues(dev, rxq) \
- __kc_netif_set_real_num_rx_queues((dev), (rxq))
-#endif
-#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR
-#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2)
-#endif
-#ifndef VLAN_N_VID
-#define VLAN_N_VID VLAN_GROUP_ARRAY_LEN
-#endif /* VLAN_N_VID */
-#ifndef ETH_FLAG_TXVLAN
-#define ETH_FLAG_TXVLAN (1 << 7)
-#endif /* ETH_FLAG_TXVLAN */
-#ifndef ETH_FLAG_RXVLAN
-#define ETH_FLAG_RXVLAN (1 << 8)
-#endif /* ETH_FLAG_RXVLAN */
-
-static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb)
-{
- WARN_ON(skb->ip_summed != CHECKSUM_NONE);
-}
-#define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb)
-
-static inline void *_kc_vzalloc_node(unsigned long size, int node)
-{
- void *addr = vmalloc_node(size, node);
- if (addr)
- memset(addr, 0, size);
- return addr;
-}
-#define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node)
-
-static inline void *_kc_vzalloc(unsigned long size)
-{
- void *addr = vmalloc(size);
- if (addr)
- memset(addr, 0, size);
- return addr;
-}
-#define vzalloc(_size) _kc_vzalloc(_size)
-
-#ifndef vlan_get_protocol
-static inline __be16 __kc_vlan_get_protocol(const struct sk_buff *skb)
-{
- if (vlan_tx_tag_present(skb) ||
- skb->protocol != cpu_to_be16(ETH_P_8021Q))
- return skb->protocol;
-
- if (skb_headlen(skb) < sizeof(struct vlan_ethhdr))
- return 0;
-
- return ((struct vlan_ethhdr*)skb->data)->h_vlan_encapsulated_proto;
-}
-#define vlan_get_protocol(_skb) __kc_vlan_get_protocol(_skb)
-#endif
-#ifdef HAVE_HW_TIME_STAMP
-#define SKBTX_HW_TSTAMP (1 << 0)
-#define SKBTX_IN_PROGRESS (1 << 2)
-#define SKB_SHARED_TX_IS_UNION
-#endif
-
-#ifndef device_wakeup_enable
-#define device_wakeup_enable(dev) device_set_wakeup_enable(dev, true)
-#endif
-
-#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) )
-#ifndef HAVE_VLAN_RX_REGISTER
-#define HAVE_VLAN_RX_REGISTER
-#endif
-#endif /* > 2.4.18 */
-#endif /* < 2.6.37 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) )
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
-#define skb_checksum_start_offset(skb) skb_transport_offset(skb)
-#else /* 2.6.22 -> 2.6.37 */
-static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb)
-{
- return skb->csum_start - skb_headroom(skb);
-}
-#define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb)
-#endif /* 2.6.22 -> 2.6.37 */
-#ifdef CONFIG_DCB
-#ifndef IEEE_8021QAZ_MAX_TCS
-#define IEEE_8021QAZ_MAX_TCS 8
-#endif
-#ifndef DCB_CAP_DCBX_HOST
-#define DCB_CAP_DCBX_HOST 0x01
-#endif
-#ifndef DCB_CAP_DCBX_LLD_MANAGED
-#define DCB_CAP_DCBX_LLD_MANAGED 0x02
-#endif
-#ifndef DCB_CAP_DCBX_VER_CEE
-#define DCB_CAP_DCBX_VER_CEE 0x04
-#endif
-#ifndef DCB_CAP_DCBX_VER_IEEE
-#define DCB_CAP_DCBX_VER_IEEE 0x08
-#endif
-#ifndef DCB_CAP_DCBX_STATIC
-#define DCB_CAP_DCBX_STATIC 0x10
-#endif
-#endif /* CONFIG_DCB */
-#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2))
-#define CONFIG_XPS
-#endif /* RHEL_RELEASE_VERSION(6,2) */
-#endif /* < 2.6.38 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) )
-#ifndef NETIF_F_RXCSUM
-#define NETIF_F_RXCSUM (1 << 29)
-#endif
-#ifndef skb_queue_reverse_walk_safe
-#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
- for (skb = (queue)->prev, tmp = skb->prev; \
- skb != (struct sk_buff *)(queue); \
- skb = tmp, tmp = skb->prev)
-#endif
-#else /* < 2.6.39 */
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
-#ifndef HAVE_NETDEV_OPS_FCOE_DDP_TARGET
-#define HAVE_NETDEV_OPS_FCOE_DDP_TARGET
-#endif
-#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
-#ifndef HAVE_MQPRIO
-#define HAVE_MQPRIO
-#endif
-#ifndef HAVE_SETUP_TC
-#define HAVE_SETUP_TC
-#endif
-#ifdef CONFIG_DCB
-#ifndef HAVE_DCBNL_IEEE
-#define HAVE_DCBNL_IEEE
-#endif
-#endif /* CONFIG_DCB */
-#ifndef HAVE_NDO_SET_FEATURES
-#define HAVE_NDO_SET_FEATURES
-#endif
-#endif /* < 2.6.39 */
-
-/*****************************************************************************/
-/* use < 2.6.40 because of a Fedora 15 kernel update where they
- * updated the kernel version to 2.6.40.x and they back-ported 3.0 features
- * like set_phys_id for ethtool.
- */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) )
-#ifdef ETHTOOL_GRXRINGS
-#ifndef FLOW_EXT
-#define FLOW_EXT 0x80000000
-union _kc_ethtool_flow_union {
- struct ethtool_tcpip4_spec tcp_ip4_spec;
- struct ethtool_usrip4_spec usr_ip4_spec;
- __u8 hdata[60];
-};
-struct _kc_ethtool_flow_ext {
- __be16 vlan_etype;
- __be16 vlan_tci;
- __be32 data[2];
-};
-struct _kc_ethtool_rx_flow_spec {
- __u32 flow_type;
- union _kc_ethtool_flow_union h_u;
- struct _kc_ethtool_flow_ext h_ext;
- union _kc_ethtool_flow_union m_u;
- struct _kc_ethtool_flow_ext m_ext;
- __u64 ring_cookie;
- __u32 location;
-};
-#define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec
-#endif /* FLOW_EXT */
-#endif
-
-#define pci_disable_link_state_locked pci_disable_link_state
-
-#ifndef PCI_LTR_VALUE_MASK
-#define PCI_LTR_VALUE_MASK 0x000003ff
-#endif
-#ifndef PCI_LTR_SCALE_MASK
-#define PCI_LTR_SCALE_MASK 0x00001c00
-#endif
-#ifndef PCI_LTR_SCALE_SHIFT
-#define PCI_LTR_SCALE_SHIFT 10
-#endif
-
-#else /* < 2.6.40 */
-#define HAVE_ETHTOOL_SET_PHYS_ID
-#endif /* < 2.6.40 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) )
-#define USE_LEGACY_PM_SUPPORT
-#endif /* < 3.0.0 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) )
-#ifndef __netdev_alloc_skb_ip_align
-#define __netdev_alloc_skb_ip_align(d,l,_g) netdev_alloc_skb_ip_align(d,l)
-#endif /* __netdev_alloc_skb_ip_align */
-#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app)
-#define dcb_ieee_delapp(dev, app) 0
-#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority)
-
-/* 1000BASE-T Control register */
-#define CTL1000_AS_MASTER 0x0800
-#define CTL1000_ENABLE_MASTER 0x1000
-
-#else /* < 3.1.0 */
-#ifndef HAVE_DCBNL_IEEE_DELAPP
-#define HAVE_DCBNL_IEEE_DELAPP
-#endif
-#endif /* < 3.1.0 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) )
-#ifdef ETHTOOL_GRXRINGS
-#define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
-#endif /* ETHTOOL_GRXRINGS */
-
-#ifndef skb_frag_size
-#define skb_frag_size(frag) _kc_skb_frag_size(frag)
-static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag)
-{
- return frag->size;
-}
-#endif /* skb_frag_size */
-
-#ifndef skb_frag_size_sub
-#define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta)
-static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta)
-{
- frag->size -= delta;
-}
-#endif /* skb_frag_size_sub */
-
-#ifndef skb_frag_page
-#define skb_frag_page(frag) _kc_skb_frag_page(frag)
-static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag)
-{
- return frag->page;
-}
-#endif /* skb_frag_page */
-
-#ifndef skb_frag_address
-#define skb_frag_address(frag) _kc_skb_frag_address(frag)
-static inline void *_kc_skb_frag_address(const skb_frag_t *frag)
-{
- return page_address(skb_frag_page(frag)) + frag->page_offset;
-}
-#endif /* skb_frag_address */
-
-#ifndef skb_frag_dma_map
-#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
-#include <linux/dma-mapping.h>
-#endif
-#define skb_frag_dma_map(dev,frag,offset,size,dir) \
- _kc_skb_frag_dma_map(dev,frag,offset,size,dir)
-static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev,
- const skb_frag_t *frag,
- size_t offset, size_t size,
- enum dma_data_direction dir)
-{
- return dma_map_page(dev, skb_frag_page(frag),
- frag->page_offset + offset, size, dir);
-}
-#endif /* skb_frag_dma_map */
-
-#ifndef __skb_frag_unref
-#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag)
-static inline void __kc_skb_frag_unref(skb_frag_t *frag)
-{
- put_page(skb_frag_page(frag));
-}
-#endif /* __skb_frag_unref */
-
-#ifndef SPEED_UNKNOWN
-#define SPEED_UNKNOWN -1
-#endif
-#ifndef DUPLEX_UNKNOWN
-#define DUPLEX_UNKNOWN 0xff
-#endif
-#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3))
-#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED
-#define HAVE_PCI_DEV_FLAGS_ASSIGNED
-#endif
-#endif
-#else /* < 3.2.0 */
-#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED
-#define HAVE_PCI_DEV_FLAGS_ASSIGNED
-#define HAVE_VF_SPOOFCHK_CONFIGURE
-#endif
-#endif /* < 3.2.0 */
-
-#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,2))
-#undef ixgbe_get_netdev_tc_txq
-#define ixgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc])
-#endif
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) )
-typedef u32 kni_netdev_features_t;
-#undef PCI_EXP_TYPE_RC_EC
-#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */
-#ifndef CONFIG_BQL
-#define netdev_tx_completed_queue(_q, _p, _b) do {} while (0)
-#define netdev_completed_queue(_n, _p, _b) do {} while (0)
-#define netdev_tx_sent_queue(_q, _b) do {} while (0)
-#define netdev_sent_queue(_n, _b) do {} while (0)
-#define netdev_tx_reset_queue(_q) do {} while (0)
-#define netdev_reset_queue(_n) do {} while (0)
-#endif
-#else /* ! < 3.3.0 */
-typedef netdev_features_t kni_netdev_features_t;
-#define HAVE_INT_NDO_VLAN_RX_ADD_VID
-#ifdef ETHTOOL_SRXNTUPLE
-#undef ETHTOOL_SRXNTUPLE
-#endif
-#endif /* < 3.3.0 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) )
-#ifndef NETIF_F_RXFCS
-#define NETIF_F_RXFCS 0
-#endif /* NETIF_F_RXFCS */
-#ifndef NETIF_F_RXALL
-#define NETIF_F_RXALL 0
-#endif /* NETIF_F_RXALL */
-
-#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))
-#define NUMTCS_RETURNS_U8
-
-int _kc_simple_open(struct inode *inode, struct file *file);
-#define simple_open _kc_simple_open
-#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */
-
-
-#ifndef skb_add_rx_frag
-#define skb_add_rx_frag _kc_skb_add_rx_frag
-extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *,
- int, int, unsigned int);
-#endif
-#ifdef NET_ADDR_RANDOM
-#define eth_hw_addr_random(N) do { \
- random_ether_addr(N->dev_addr); \
- N->addr_assign_type |= NET_ADDR_RANDOM; \
- } while (0)
-#else /* NET_ADDR_RANDOM */
-#define eth_hw_addr_random(N) random_ether_addr(N->dev_addr)
-#endif /* NET_ADDR_RANDOM */
-#else /* < 3.4.0 */
-#include <linux/kconfig.h>
-#endif /* >= 3.4.0 */
-
-/*****************************************************************************/
-#if defined(E1000E_PTP) || defined(IGB_PTP) || defined(IXGBE_PTP) || defined(I40E_PTP)
-#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) && IS_ENABLED(CONFIG_PTP_1588_CLOCK)
-#define HAVE_PTP_1588_CLOCK
-#else
-#error Cannot enable PTP Hardware Clock support due to a pre-3.0 kernel version or CONFIG_PTP_1588_CLOCK not enabled in the kernel
-#endif /* > 3.0.0 && IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
-#endif /* E1000E_PTP || IGB_PTP || IXGBE_PTP || I40E_PTP */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) )
-#define skb_tx_timestamp(skb) do {} while (0)
-static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2)
-{
- return !compare_ether_addr(addr1, addr2);
-}
-#define ether_addr_equal(_addr1, _addr2) __kc_ether_addr_equal((_addr1),(_addr2))
-#else
-#define HAVE_FDB_OPS
-#define HAVE_ETHTOOL_GET_TS_INFO
-#endif /* < 3.5.0 */
-
-/*****************************************************************************/
-#include <linux/mdio.h>
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) )
-#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */
-
-#ifndef MDIO_EEE_100TX
-#define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */
-#endif
-#ifndef MDIO_EEE_1000T
-#define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */
-#endif
-#ifndef MDIO_EEE_10GT
-#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */
-#endif
-#ifndef MDIO_EEE_1000KX
-#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */
-#endif
-#ifndef MDIO_EEE_10GKX4
-#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */
-#endif
-#ifndef MDIO_EEE_10GKR
-#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */
-#endif
-#endif /* < 3.6.0 */
-
-/******************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) )
-#ifndef ADVERTISED_40000baseKR4_Full
-/* these defines were all added in one commit, so should be safe
- * to trigger activiation on one define
- */
-#define SUPPORTED_40000baseKR4_Full (1 << 23)
-#define SUPPORTED_40000baseCR4_Full (1 << 24)
-#define SUPPORTED_40000baseSR4_Full (1 << 25)
-#define SUPPORTED_40000baseLR4_Full (1 << 26)
-#define ADVERTISED_40000baseKR4_Full (1 << 23)
-#define ADVERTISED_40000baseCR4_Full (1 << 24)
-#define ADVERTISED_40000baseSR4_Full (1 << 25)
-#define ADVERTISED_40000baseLR4_Full (1 << 26)
-#endif
-
-/**
- * mmd_eee_cap_to_ethtool_sup_t
- * @eee_cap: value of the MMD EEE Capability register
- *
- * A small helper function that translates MMD EEE Capability (3.20) bits
- * to ethtool supported settings.
- */
-static inline u32 __kc_mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap)
-{
- u32 supported = 0;
-
- if (eee_cap & MDIO_EEE_100TX)
- supported |= SUPPORTED_100baseT_Full;
- if (eee_cap & MDIO_EEE_1000T)
- supported |= SUPPORTED_1000baseT_Full;
- if (eee_cap & MDIO_EEE_10GT)
- supported |= SUPPORTED_10000baseT_Full;
- if (eee_cap & MDIO_EEE_1000KX)
- supported |= SUPPORTED_1000baseKX_Full;
- if (eee_cap & MDIO_EEE_10GKX4)
- supported |= SUPPORTED_10000baseKX4_Full;
- if (eee_cap & MDIO_EEE_10GKR)
- supported |= SUPPORTED_10000baseKR_Full;
-
- return supported;
-}
-#define mmd_eee_cap_to_ethtool_sup_t(eee_cap) \
- __kc_mmd_eee_cap_to_ethtool_sup_t(eee_cap)
-
-/**
- * mmd_eee_adv_to_ethtool_adv_t
- * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers
- *
- * A small helper function that translates the MMD EEE Advertisement (7.60)
- * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement
- * settings.
- */
-static inline u32 __kc_mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv)
-{
- u32 adv = 0;
-
- if (eee_adv & MDIO_EEE_100TX)
- adv |= ADVERTISED_100baseT_Full;
- if (eee_adv & MDIO_EEE_1000T)
- adv |= ADVERTISED_1000baseT_Full;
- if (eee_adv & MDIO_EEE_10GT)
- adv |= ADVERTISED_10000baseT_Full;
- if (eee_adv & MDIO_EEE_1000KX)
- adv |= ADVERTISED_1000baseKX_Full;
- if (eee_adv & MDIO_EEE_10GKX4)
- adv |= ADVERTISED_10000baseKX4_Full;
- if (eee_adv & MDIO_EEE_10GKR)
- adv |= ADVERTISED_10000baseKR_Full;
-
- return adv;
-}
-#define mmd_eee_adv_to_ethtool_adv_t(eee_adv) \
- __kc_mmd_eee_adv_to_ethtool_adv_t(eee_adv)
-
-/**
- * ethtool_adv_to_mmd_eee_adv_t
- * @adv: the ethtool advertisement settings
- *
- * A small helper function that translates ethtool advertisement settings
- * to EEE advertisements for the MMD EEE Advertisement (7.60) and
- * MMD EEE Link Partner Ability (7.61) registers.
- */
-static inline u16 __kc_ethtool_adv_to_mmd_eee_adv_t(u32 adv)
-{
- u16 reg = 0;
-
- if (adv & ADVERTISED_100baseT_Full)
- reg |= MDIO_EEE_100TX;
- if (adv & ADVERTISED_1000baseT_Full)
- reg |= MDIO_EEE_1000T;
- if (adv & ADVERTISED_10000baseT_Full)
- reg |= MDIO_EEE_10GT;
- if (adv & ADVERTISED_1000baseKX_Full)
- reg |= MDIO_EEE_1000KX;
- if (adv & ADVERTISED_10000baseKX4_Full)
- reg |= MDIO_EEE_10GKX4;
- if (adv & ADVERTISED_10000baseKR_Full)
- reg |= MDIO_EEE_10GKR;
-
- return reg;
-}
-#define ethtool_adv_to_mmd_eee_adv_t(adv) \
- __kc_ethtool_adv_to_mmd_eee_adv_t(adv)
-
-#ifndef pci_pcie_type
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
-static inline u8 pci_pcie_type(struct pci_dev *pdev)
-{
- int pos;
- u16 reg16;
-
- pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- if (!pos)
- BUG();
- pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
- return (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
-}
-#else /* < 2.6.24 */
-#define pci_pcie_type(x) (x)->pcie_type
-#endif /* < 2.6.24 */
-#endif /* pci_pcie_type */
-
-#define ptp_clock_register(caps, args...) ptp_clock_register(caps)
-
-#ifndef PCI_EXP_LNKSTA2
-int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
-#define pcie_capability_read_word(d,p,v) __kc_pcie_capability_read_word(d,p,v)
-int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
-#define pcie_capability_write_word(d,p,v) __kc_pcie_capability_write_word(d,p,v)
-int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
- u16 clear, u16 set);
-#define pcie_capability_clear_and_set_word(d,p,c,s) \
- __kc_pcie_capability_clear_and_set_word(d,p,c,s)
-
-#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
-
-static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos,
- u16 clear)
-{
- return __kc_pcie_capability_clear_and_set_word(dev, pos, clear, 0);
-}
-#endif /* !PCI_EXP_LNKSTA2 */
-
-#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))
-#define USE_CONST_DEV_UC_CHAR
-#endif
-
-#else /* >= 3.7.0 */
-#define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS
-#define USE_CONST_DEV_UC_CHAR
-#endif /* >= 3.7.0 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) )
-#ifndef PCI_EXP_LNKCTL_ASPM_L0S
-#define PCI_EXP_LNKCTL_ASPM_L0S 0x01 /* L0s Enable */
-#endif
-#ifndef PCI_EXP_LNKCTL_ASPM_L1
-#define PCI_EXP_LNKCTL_ASPM_L1 0x02 /* L1 Enable */
-#endif
-#define HAVE_CONFIG_HOTPLUG
-/* Reserved Ethernet Addresses per IEEE 802.1Q */
-static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = {
- 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
-#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) &&\
- !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5))
-static inline bool is_link_local_ether_addr(const u8 *addr)
-{
- __be16 *a = (__be16 *)addr;
- static const __be16 *b = (const __be16 *)eth_reserved_addr_base;
- static const __be16 m = cpu_to_be16(0xfff0);
-
- return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
-}
-#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */
-#else /* >= 3.8.0 */
-#ifndef __devinit
-#define __devinit
-#define HAVE_ENCAP_CSUM_OFFLOAD
-#endif
-
-#ifndef __devinitdata
-#define __devinitdata
-#endif
-
-#ifndef __devexit
-#define __devexit
-#endif
-
-#ifndef __devexit_p
-#define __devexit_p
-#endif
-
-#ifndef HAVE_SRIOV_CONFIGURE
-#define HAVE_SRIOV_CONFIGURE
-#endif
-
-#define HAVE_BRIDGE_ATTRIBS
-#ifndef BRIDGE_MODE_VEB
-#define BRIDGE_MODE_VEB 0 /* Default loopback mode */
-#endif /* BRIDGE_MODE_VEB */
-#ifndef BRIDGE_MODE_VEPA
-#define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */
-#endif /* BRIDGE_MODE_VEPA */
-#endif /* >= 3.8.0 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) )
-
-#undef hlist_entry
-#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
-
-#undef hlist_entry_safe
-#define hlist_entry_safe(ptr, type, member) \
- (ptr) ? hlist_entry(ptr, type, member) : NULL
-
-#undef hlist_for_each_entry
-#define hlist_for_each_entry(pos, head, member) \
- for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \
- pos; \
- pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
-
-#undef hlist_for_each_entry_safe
-#define hlist_for_each_entry_safe(pos, n, head, member) \
- for (pos = hlist_entry_safe((head)->first, typeof(*pos), member); \
- pos && ({ n = pos->member.next; 1; }); \
- pos = hlist_entry_safe(n, typeof(*pos), member))
-
-#ifdef CONFIG_XPS
-extern int __kc_netif_set_xps_queue(struct net_device *, struct cpumask *, u16);
-#define netif_set_xps_queue(_dev, _mask, _idx) __kc_netif_set_xps_queue((_dev), (_mask), (_idx))
-#else /* CONFIG_XPS */
-#define netif_set_xps_queue(_dev, _mask, _idx) do {} while (0)
-#endif /* CONFIG_XPS */
-
-#ifdef HAVE_NETDEV_SELECT_QUEUE
-#define _kc_hashrnd 0xd631614b /* not so random hash salt */
-extern u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
-#define __netdev_pick_tx __kc_netdev_pick_tx
-#endif /* HAVE_NETDEV_SELECT_QUEUE */
-#else
-#define HAVE_BRIDGE_FILTER
-#define USE_DEFAULT_FDB_DEL_DUMP
-#endif /* < 3.9.0 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
-#ifdef CONFIG_PCI_IOV
-extern int __kc_pci_vfs_assigned(struct pci_dev *dev);
-#else
-static inline int __kc_pci_vfs_assigned(struct pci_dev *dev)
-{
- return 0;
-}
-#endif
-#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev)
-
-#ifndef VLAN_TX_COOKIE_MAGIC
-static inline struct sk_buff *__kc__vlan_hwaccel_put_tag(struct sk_buff *skb,
- u16 vlan_tci)
-{
-#ifdef VLAN_TAG_PRESENT
- vlan_tci |= VLAN_TAG_PRESENT;
-#endif
- skb->vlan_tci = vlan_tci;
- return skb;
-}
-#define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci) \
- __kc__vlan_hwaccel_put_tag(skb, vlan_tci)
-#endif
-
-#else /* >= 3.10.0 */
-#define HAVE_ENCAP_TSO_OFFLOAD
-#endif /* >= 3.10.0 */
-
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) )
-#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)))
-#if (!(UBUNTU_KERNEL_CODE >= UBUNTU_KERNEL_VERSION(3,13,0,30,54) \
- && (UBUNTU_RELEASE_CODE == UBUNTU_RELEASE_VERSION(12,4) \
- || UBUNTU_RELEASE_CODE == UBUNTU_RELEASE_VERSION(14,4))))
-#ifdef NETIF_F_RXHASH
-#define PKT_HASH_TYPE_L3 0
-static inline void
-skb_set_hash(struct sk_buff *skb, __u32 hash, __always_unused int type)
-{
- skb->rxhash = hash;
-}
-#endif /* NETIF_F_RXHASH */
-#endif /* < 3.13.0-30.54 (Ubuntu 14.04) */
-#endif /* < RHEL7 */
-#endif /* < 3.14.0 */
-
-#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0) )
-#define SET_ETHTOOL_OPS(netdev, ops) ((netdev)->ethtool_ops = (ops))
-#define HAVE_VF_MIN_MAX_TXRATE 1
-#endif /* >= 3.16.0 */
-
-#endif /* _KCOMPAT_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c
deleted file mode 100755
index 3adf8696..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c
+++ /dev/null
@@ -1,1172 +0,0 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-/*
- * net/core/ethtool.c - Ethtool ioctl handler
- * Copyright (c) 2003 Matthew Wilcox <matthew@wil.cx>
- *
- * This file is where we call all the ethtool_ops commands to get
- * the information ethtool needs. We fall back to calling do_ioctl()
- * for drivers which haven't been converted to ethtool_ops yet.
- *
- * It's GPL, stupid.
- *
- * Modification by sfeldma@pobox.com to work as backward compat
- * solution for pre-ethtool_ops kernels.
- * - copied struct ethtool_ops from ethtool.h
- * - defined SET_ETHTOOL_OPS
- * - put in some #ifndef NETIF_F_xxx wrappers
- * - changes refs to dev->ethtool_ops to ethtool_ops
- * - changed dev_ethtool to ethtool_ioctl
- * - remove EXPORT_SYMBOL()s
- * - added _kc_ prefix in built-in ethtool_op_xxx ops.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/mii.h>
-#include <linux/ethtool.h>
-#include <linux/netdevice.h>
-#include <asm/uaccess.h>
-
-#include "kcompat.h"
-
-#undef SUPPORTED_10000baseT_Full
-#define SUPPORTED_10000baseT_Full (1 << 12)
-#undef ADVERTISED_10000baseT_Full
-#define ADVERTISED_10000baseT_Full (1 << 12)
-#undef SPEED_10000
-#define SPEED_10000 10000
-
-#undef ethtool_ops
-#define ethtool_ops _kc_ethtool_ops
-
-struct _kc_ethtool_ops {
- int (*get_settings)(struct net_device *, struct ethtool_cmd *);
- int (*set_settings)(struct net_device *, struct ethtool_cmd *);
- void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
- int (*get_regs_len)(struct net_device *);
- void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);
- void (*get_wol)(struct net_device *, struct ethtool_wolinfo *);
- int (*set_wol)(struct net_device *, struct ethtool_wolinfo *);
- u32 (*get_msglevel)(struct net_device *);
- void (*set_msglevel)(struct net_device *, u32);
- int (*nway_reset)(struct net_device *);
- u32 (*get_link)(struct net_device *);
- int (*get_eeprom_len)(struct net_device *);
- int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);
- int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);
- int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *);
- int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *);
- void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *);
- int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *);
- void (*get_pauseparam)(struct net_device *,
- struct ethtool_pauseparam*);
- int (*set_pauseparam)(struct net_device *,
- struct ethtool_pauseparam*);
- u32 (*get_rx_csum)(struct net_device *);
- int (*set_rx_csum)(struct net_device *, u32);
- u32 (*get_tx_csum)(struct net_device *);
- int (*set_tx_csum)(struct net_device *, u32);
- u32 (*get_sg)(struct net_device *);
- int (*set_sg)(struct net_device *, u32);
- u32 (*get_tso)(struct net_device *);
- int (*set_tso)(struct net_device *, u32);
- int (*self_test_count)(struct net_device *);
- void (*self_test)(struct net_device *, struct ethtool_test *, u64 *);
- void (*get_strings)(struct net_device *, u32 stringset, u8 *);
- int (*phys_id)(struct net_device *, u32);
- int (*get_stats_count)(struct net_device *);
- void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *,
- u64 *);
-} *ethtool_ops = NULL;
-
-#undef SET_ETHTOOL_OPS
-#define SET_ETHTOOL_OPS(netdev, ops) (ethtool_ops = (ops))
-
-/*
- * Some useful ethtool_ops methods that are device independent. If we find that
- * all drivers want to do the same thing here, we can turn these into dev_()
- * function calls.
- */
-
-#undef ethtool_op_get_link
-#define ethtool_op_get_link _kc_ethtool_op_get_link
-u32 _kc_ethtool_op_get_link(struct net_device *dev)
-{
- return netif_carrier_ok(dev) ? 1 : 0;
-}
-
-#undef ethtool_op_get_tx_csum
-#define ethtool_op_get_tx_csum _kc_ethtool_op_get_tx_csum
-u32 _kc_ethtool_op_get_tx_csum(struct net_device *dev)
-{
-#ifdef NETIF_F_IP_CSUM
- return (dev->features & NETIF_F_IP_CSUM) != 0;
-#else
- return 0;
-#endif
-}
-
-#undef ethtool_op_set_tx_csum
-#define ethtool_op_set_tx_csum _kc_ethtool_op_set_tx_csum
-int _kc_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
-{
-#ifdef NETIF_F_IP_CSUM
- if (data)
-#ifdef NETIF_F_IPV6_CSUM
- dev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
- else
- dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
-#else
- dev->features |= NETIF_F_IP_CSUM;
- else
- dev->features &= ~NETIF_F_IP_CSUM;
-#endif
-#endif
-
- return 0;
-}
-
-#undef ethtool_op_get_sg
-#define ethtool_op_get_sg _kc_ethtool_op_get_sg
-u32 _kc_ethtool_op_get_sg(struct net_device *dev)
-{
-#ifdef NETIF_F_SG
- return (dev->features & NETIF_F_SG) != 0;
-#else
- return 0;
-#endif
-}
-
-#undef ethtool_op_set_sg
-#define ethtool_op_set_sg _kc_ethtool_op_set_sg
-int _kc_ethtool_op_set_sg(struct net_device *dev, u32 data)
-{
-#ifdef NETIF_F_SG
- if (data)
- dev->features |= NETIF_F_SG;
- else
- dev->features &= ~NETIF_F_SG;
-#endif
-
- return 0;
-}
-
-#undef ethtool_op_get_tso
-#define ethtool_op_get_tso _kc_ethtool_op_get_tso
-u32 _kc_ethtool_op_get_tso(struct net_device *dev)
-{
-#ifdef NETIF_F_TSO
- return (dev->features & NETIF_F_TSO) != 0;
-#else
- return 0;
-#endif
-}
-
-#undef ethtool_op_set_tso
-#define ethtool_op_set_tso _kc_ethtool_op_set_tso
-int _kc_ethtool_op_set_tso(struct net_device *dev, u32 data)
-{
-#ifdef NETIF_F_TSO
- if (data)
- dev->features |= NETIF_F_TSO;
- else
- dev->features &= ~NETIF_F_TSO;
-#endif
-
- return 0;
-}
-
-/* Handlers for each ethtool command */
-
-static int ethtool_get_settings(struct net_device *dev, void *useraddr)
-{
- struct ethtool_cmd cmd = { ETHTOOL_GSET };
- int err;
-
- if (!ethtool_ops->get_settings)
- return -EOPNOTSUPP;
-
- err = ethtool_ops->get_settings(dev, &cmd);
- if (err < 0)
- return err;
-
- if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
- return -EFAULT;
- return 0;
-}
-
-static int ethtool_set_settings(struct net_device *dev, void *useraddr)
-{
- struct ethtool_cmd cmd;
-
- if (!ethtool_ops->set_settings)
- return -EOPNOTSUPP;
-
- if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
- return -EFAULT;
-
- return ethtool_ops->set_settings(dev, &cmd);
-}
-
-static int ethtool_get_drvinfo(struct net_device *dev, void *useraddr)
-{
- struct ethtool_drvinfo info;
- struct ethtool_ops *ops = ethtool_ops;
-
- if (!ops->get_drvinfo)
- return -EOPNOTSUPP;
-
- memset(&info, 0, sizeof(info));
- info.cmd = ETHTOOL_GDRVINFO;
- ops->get_drvinfo(dev, &info);
-
- if (ops->self_test_count)
- info.testinfo_len = ops->self_test_count(dev);
- if (ops->get_stats_count)
- info.n_stats = ops->get_stats_count(dev);
- if (ops->get_regs_len)
- info.regdump_len = ops->get_regs_len(dev);
- if (ops->get_eeprom_len)
- info.eedump_len = ops->get_eeprom_len(dev);
-
- if (copy_to_user(useraddr, &info, sizeof(info)))
- return -EFAULT;
- return 0;
-}
-
-static int ethtool_get_regs(struct net_device *dev, char *useraddr)
-{
- struct ethtool_regs regs;
- struct ethtool_ops *ops = ethtool_ops;
- void *regbuf;
- int reglen, ret;
-
- if (!ops->get_regs || !ops->get_regs_len)
- return -EOPNOTSUPP;
-
- if (copy_from_user(&regs, useraddr, sizeof(regs)))
- return -EFAULT;
-
- reglen = ops->get_regs_len(dev);
- if (regs.len > reglen)
- regs.len = reglen;
-
- regbuf = kmalloc(reglen, GFP_USER);
- if (!regbuf)
- return -ENOMEM;
-
- ops->get_regs(dev, &regs, regbuf);
-
- ret = -EFAULT;
- if (copy_to_user(useraddr, &regs, sizeof(regs)))
- goto out;
- useraddr += offsetof(struct ethtool_regs, data);
- if (copy_to_user(useraddr, regbuf, reglen))
- goto out;
- ret = 0;
-
-out:
- kfree(regbuf);
- return ret;
-}
-
-static int ethtool_get_wol(struct net_device *dev, char *useraddr)
-{
- struct ethtool_wolinfo wol = { ETHTOOL_GWOL };
-
- if (!ethtool_ops->get_wol)
- return -EOPNOTSUPP;
-
- ethtool_ops->get_wol(dev, &wol);
-
- if (copy_to_user(useraddr, &wol, sizeof(wol)))
- return -EFAULT;
- return 0;
-}
-
-static int ethtool_set_wol(struct net_device *dev, char *useraddr)
-{
- struct ethtool_wolinfo wol;
-
- if (!ethtool_ops->set_wol)
- return -EOPNOTSUPP;
-
- if (copy_from_user(&wol, useraddr, sizeof(wol)))
- return -EFAULT;
-
- return ethtool_ops->set_wol(dev, &wol);
-}
-
-static int ethtool_get_msglevel(struct net_device *dev, char *useraddr)
-{
- struct ethtool_value edata = { ETHTOOL_GMSGLVL };
-
- if (!ethtool_ops->get_msglevel)
- return -EOPNOTSUPP;
-
- edata.data = ethtool_ops->get_msglevel(dev);
-
- if (copy_to_user(useraddr, &edata, sizeof(edata)))
- return -EFAULT;
- return 0;
-}
-
-static int ethtool_set_msglevel(struct net_device *dev, char *useraddr)
-{
- struct ethtool_value edata;
-
- if (!ethtool_ops->set_msglevel)
- return -EOPNOTSUPP;
-
- if (copy_from_user(&edata, useraddr, sizeof(edata)))
- return -EFAULT;
-
- ethtool_ops->set_msglevel(dev, edata.data);
- return 0;
-}
-
-static int ethtool_nway_reset(struct net_device *dev)
-{
- if (!ethtool_ops->nway_reset)
- return -EOPNOTSUPP;
-
- return ethtool_ops->nway_reset(dev);
-}
-
-static int ethtool_get_link(struct net_device *dev, void *useraddr)
-{
- struct ethtool_value edata = { ETHTOOL_GLINK };
-
- if (!ethtool_ops->get_link)
- return -EOPNOTSUPP;
-
- edata.data = ethtool_ops->get_link(dev);
-
- if (copy_to_user(useraddr, &edata, sizeof(edata)))
- return -EFAULT;
- return 0;
-}
-
-static int ethtool_get_eeprom(struct net_device *dev, void *useraddr)
-{
- struct ethtool_eeprom eeprom;
- struct ethtool_ops *ops = ethtool_ops;
- u8 *data;
- int ret;
-
- if (!ops->get_eeprom || !ops->get_eeprom_len)
- return -EOPNOTSUPP;
-
- if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
- return -EFAULT;
-
- /* Check for wrap and zero */
- if (eeprom.offset + eeprom.len <= eeprom.offset)
- return -EINVAL;
-
- /* Check for exceeding total eeprom len */
- if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
- return -EINVAL;
-
- data = kmalloc(eeprom.len, GFP_USER);
- if (!data)
- return -ENOMEM;
-
- ret = -EFAULT;
- if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len))
- goto out;
-
- ret = ops->get_eeprom(dev, &eeprom, data);
- if (ret)
- goto out;
-
- ret = -EFAULT;
- if (copy_to_user(useraddr, &eeprom, sizeof(eeprom)))
- goto out;
- if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len))
- goto out;
- ret = 0;
-
-out:
- kfree(data);
- return ret;
-}
-
-static int ethtool_set_eeprom(struct net_device *dev, void *useraddr)
-{
- struct ethtool_eeprom eeprom;
- struct ethtool_ops *ops = ethtool_ops;
- u8 *data;
- int ret;
-
- if (!ops->set_eeprom || !ops->get_eeprom_len)
- return -EOPNOTSUPP;
-
- if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
- return -EFAULT;
-
- /* Check for wrap and zero */
- if (eeprom.offset + eeprom.len <= eeprom.offset)
- return -EINVAL;
-
- /* Check for exceeding total eeprom len */
- if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
- return -EINVAL;
-
- data = kmalloc(eeprom.len, GFP_USER);
- if (!data)
- return -ENOMEM;
-
- ret = -EFAULT;
- if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len))
- goto out;
-
- ret = ops->set_eeprom(dev, &eeprom, data);
- if (ret)
- goto out;
-
- if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len))
- ret = -EFAULT;
-
-out:
- kfree(data);
- return ret;
-}
-
-static int ethtool_get_coalesce(struct net_device *dev, void *useraddr)
-{
- struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE };
-
- if (!ethtool_ops->get_coalesce)
- return -EOPNOTSUPP;
-
- ethtool_ops->get_coalesce(dev, &coalesce);
-
- if (copy_to_user(useraddr, &coalesce, sizeof(coalesce)))
- return -EFAULT;
- return 0;
-}
-
-static int ethtool_set_coalesce(struct net_device *dev, void *useraddr)
-{
- struct ethtool_coalesce coalesce;
-
- if (!ethtool_ops->get_coalesce)
- return -EOPNOTSUPP;
-
- if (copy_from_user(&coalesce, useraddr, sizeof(coalesce)))
- return -EFAULT;
-
- return ethtool_ops->set_coalesce(dev, &coalesce);
-}
-
-static int ethtool_get_ringparam(struct net_device *dev, void *useraddr)
-{
- struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM };
-
- if (!ethtool_ops->get_ringparam)
- return -EOPNOTSUPP;
-
- ethtool_ops->get_ringparam(dev, &ringparam);
-
- if (copy_to_user(useraddr, &ringparam, sizeof(ringparam)))
- return -EFAULT;
- return 0;
-}
-
-static int ethtool_set_ringparam(struct net_device *dev, void *useraddr)
-{
- struct ethtool_ringparam ringparam;
-
- if (!ethtool_ops->get_ringparam)
- return -EOPNOTSUPP;
-
- if (copy_from_user(&ringparam, useraddr, sizeof(ringparam)))
- return -EFAULT;
-
- return ethtool_ops->set_ringparam(dev, &ringparam);
-}
-
-static int ethtool_get_pauseparam(struct net_device *dev, void *useraddr)
-{
- struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM };
-
- if (!ethtool_ops->get_pauseparam)
- return -EOPNOTSUPP;
-
- ethtool_ops->get_pauseparam(dev, &pauseparam);
-
- if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam)))
- return -EFAULT;
- return 0;
-}
-
-static int ethtool_set_pauseparam(struct net_device *dev, void *useraddr)
-{
- struct ethtool_pauseparam pauseparam;
-
- if (!ethtool_ops->get_pauseparam)
- return -EOPNOTSUPP;
-
- if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam)))
- return -EFAULT;
-
- return ethtool_ops->set_pauseparam(dev, &pauseparam);
-}
-
-static int ethtool_get_rx_csum(struct net_device *dev, char *useraddr)
-{
- struct ethtool_value edata = { ETHTOOL_GRXCSUM };
-
- if (!ethtool_ops->get_rx_csum)
- return -EOPNOTSUPP;
-
- edata.data = ethtool_ops->get_rx_csum(dev);
-
- if (copy_to_user(useraddr, &edata, sizeof(edata)))
- return -EFAULT;
- return 0;
-}
-
-static int ethtool_set_rx_csum(struct net_device *dev, char *useraddr)
-{
- struct ethtool_value edata;
-
- if (!ethtool_ops->set_rx_csum)
- return -EOPNOTSUPP;
-
- if (copy_from_user(&edata, useraddr, sizeof(edata)))
- return -EFAULT;
-
- ethtool_ops->set_rx_csum(dev, edata.data);
- return 0;
-}
-
-static int ethtool_get_tx_csum(struct net_device *dev, char *useraddr)
-{
- struct ethtool_value edata = { ETHTOOL_GTXCSUM };
-
- if (!ethtool_ops->get_tx_csum)
- return -EOPNOTSUPP;
-
- edata.data = ethtool_ops->get_tx_csum(dev);
-
- if (copy_to_user(useraddr, &edata, sizeof(edata)))
- return -EFAULT;
- return 0;
-}
-
-static int ethtool_set_tx_csum(struct net_device *dev, char *useraddr)
-{
- struct ethtool_value edata;
-
- if (!ethtool_ops->set_tx_csum)
- return -EOPNOTSUPP;
-
- if (copy_from_user(&edata, useraddr, sizeof(edata)))
- return -EFAULT;
-
- return ethtool_ops->set_tx_csum(dev, edata.data);
-}
-
-static int ethtool_get_sg(struct net_device *dev, char *useraddr)
-{
- struct ethtool_value edata = { ETHTOOL_GSG };
-
- if (!ethtool_ops->get_sg)
- return -EOPNOTSUPP;
-
- edata.data = ethtool_ops->get_sg(dev);
-
- if (copy_to_user(useraddr, &edata, sizeof(edata)))
- return -EFAULT;
- return 0;
-}
-
-static int ethtool_set_sg(struct net_device *dev, char *useraddr)
-{
- struct ethtool_value edata;
-
- if (!ethtool_ops->set_sg)
- return -EOPNOTSUPP;
-
- if (copy_from_user(&edata, useraddr, sizeof(edata)))
- return -EFAULT;
-
- return ethtool_ops->set_sg(dev, edata.data);
-}
-
-static int ethtool_get_tso(struct net_device *dev, char *useraddr)
-{
- struct ethtool_value edata = { ETHTOOL_GTSO };
-
- if (!ethtool_ops->get_tso)
- return -EOPNOTSUPP;
-
- edata.data = ethtool_ops->get_tso(dev);
-
- if (copy_to_user(useraddr, &edata, sizeof(edata)))
- return -EFAULT;
- return 0;
-}
-
-static int ethtool_set_tso(struct net_device *dev, char *useraddr)
-{
- struct ethtool_value edata;
-
- if (!ethtool_ops->set_tso)
- return -EOPNOTSUPP;
-
- if (copy_from_user(&edata, useraddr, sizeof(edata)))
- return -EFAULT;
-
- return ethtool_ops->set_tso(dev, edata.data);
-}
-
-static int ethtool_self_test(struct net_device *dev, char *useraddr)
-{
- struct ethtool_test test;
- struct ethtool_ops *ops = ethtool_ops;
- u64 *data;
- int ret;
-
- if (!ops->self_test || !ops->self_test_count)
- return -EOPNOTSUPP;
-
- if (copy_from_user(&test, useraddr, sizeof(test)))
- return -EFAULT;
-
- test.len = ops->self_test_count(dev);
- data = kmalloc(test.len * sizeof(u64), GFP_USER);
- if (!data)
- return -ENOMEM;
-
- ops->self_test(dev, &test, data);
-
- ret = -EFAULT;
- if (copy_to_user(useraddr, &test, sizeof(test)))
- goto out;
- useraddr += sizeof(test);
- if (copy_to_user(useraddr, data, test.len * sizeof(u64)))
- goto out;
- ret = 0;
-
-out:
- kfree(data);
- return ret;
-}
-
-static int ethtool_get_strings(struct net_device *dev, void *useraddr)
-{
- struct ethtool_gstrings gstrings;
- struct ethtool_ops *ops = ethtool_ops;
- u8 *data;
- int ret;
-
- if (!ops->get_strings)
- return -EOPNOTSUPP;
-
- if (copy_from_user(&gstrings, useraddr, sizeof(gstrings)))
- return -EFAULT;
-
- switch (gstrings.string_set) {
- case ETH_SS_TEST:
- if (!ops->self_test_count)
- return -EOPNOTSUPP;
- gstrings.len = ops->self_test_count(dev);
- break;
- case ETH_SS_STATS:
- if (!ops->get_stats_count)
- return -EOPNOTSUPP;
- gstrings.len = ops->get_stats_count(dev);
- break;
- default:
- return -EINVAL;
- }
-
- data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
- if (!data)
- return -ENOMEM;
-
- ops->get_strings(dev, gstrings.string_set, data);
-
- ret = -EFAULT;
- if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
- goto out;
- useraddr += sizeof(gstrings);
- if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
- goto out;
- ret = 0;
-
-out:
- kfree(data);
- return ret;
-}
-
-static int ethtool_phys_id(struct net_device *dev, void *useraddr)
-{
- struct ethtool_value id;
-
- if (!ethtool_ops->phys_id)
- return -EOPNOTSUPP;
-
- if (copy_from_user(&id, useraddr, sizeof(id)))
- return -EFAULT;
-
- return ethtool_ops->phys_id(dev, id.data);
-}
-
-static int ethtool_get_stats(struct net_device *dev, void *useraddr)
-{
- struct ethtool_stats stats;
- struct ethtool_ops *ops = ethtool_ops;
- u64 *data;
- int ret;
-
- if (!ops->get_ethtool_stats || !ops->get_stats_count)
- return -EOPNOTSUPP;
-
- if (copy_from_user(&stats, useraddr, sizeof(stats)))
- return -EFAULT;
-
- stats.n_stats = ops->get_stats_count(dev);
- data = kmalloc(stats.n_stats * sizeof(u64), GFP_USER);
- if (!data)
- return -ENOMEM;
-
- ops->get_ethtool_stats(dev, &stats, data);
-
- ret = -EFAULT;
- if (copy_to_user(useraddr, &stats, sizeof(stats)))
- goto out;
- useraddr += sizeof(stats);
- if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64)))
- goto out;
- ret = 0;
-
-out:
- kfree(data);
- return ret;
-}
-
-/* The main entry point in this file. Called from net/core/dev.c */
-
-#define ETHTOOL_OPS_COMPAT
-int ethtool_ioctl(struct ifreq *ifr)
-{
- struct net_device *dev = __dev_get_by_name(ifr->ifr_name);
- void *useraddr = (void *) ifr->ifr_data;
- u32 ethcmd;
-
- /*
- * XXX: This can be pushed down into the ethtool_* handlers that
- * need it. Keep existing behavior for the moment.
- */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (!dev || !netif_device_present(dev))
- return -ENODEV;
-
- if (copy_from_user(&ethcmd, useraddr, sizeof (ethcmd)))
- return -EFAULT;
-
- switch (ethcmd) {
- case ETHTOOL_GSET:
- return ethtool_get_settings(dev, useraddr);
- case ETHTOOL_SSET:
- return ethtool_set_settings(dev, useraddr);
- case ETHTOOL_GDRVINFO:
- return ethtool_get_drvinfo(dev, useraddr);
- case ETHTOOL_GREGS:
- return ethtool_get_regs(dev, useraddr);
- case ETHTOOL_GWOL:
- return ethtool_get_wol(dev, useraddr);
- case ETHTOOL_SWOL:
- return ethtool_set_wol(dev, useraddr);
- case ETHTOOL_GMSGLVL:
- return ethtool_get_msglevel(dev, useraddr);
- case ETHTOOL_SMSGLVL:
- return ethtool_set_msglevel(dev, useraddr);
- case ETHTOOL_NWAY_RST:
- return ethtool_nway_reset(dev);
- case ETHTOOL_GLINK:
- return ethtool_get_link(dev, useraddr);
- case ETHTOOL_GEEPROM:
- return ethtool_get_eeprom(dev, useraddr);
- case ETHTOOL_SEEPROM:
- return ethtool_set_eeprom(dev, useraddr);
- case ETHTOOL_GCOALESCE:
- return ethtool_get_coalesce(dev, useraddr);
- case ETHTOOL_SCOALESCE:
- return ethtool_set_coalesce(dev, useraddr);
- case ETHTOOL_GRINGPARAM:
- return ethtool_get_ringparam(dev, useraddr);
- case ETHTOOL_SRINGPARAM:
- return ethtool_set_ringparam(dev, useraddr);
- case ETHTOOL_GPAUSEPARAM:
- return ethtool_get_pauseparam(dev, useraddr);
- case ETHTOOL_SPAUSEPARAM:
- return ethtool_set_pauseparam(dev, useraddr);
- case ETHTOOL_GRXCSUM:
- return ethtool_get_rx_csum(dev, useraddr);
- case ETHTOOL_SRXCSUM:
- return ethtool_set_rx_csum(dev, useraddr);
- case ETHTOOL_GTXCSUM:
- return ethtool_get_tx_csum(dev, useraddr);
- case ETHTOOL_STXCSUM:
- return ethtool_set_tx_csum(dev, useraddr);
- case ETHTOOL_GSG:
- return ethtool_get_sg(dev, useraddr);
- case ETHTOOL_SSG:
- return ethtool_set_sg(dev, useraddr);
- case ETHTOOL_GTSO:
- return ethtool_get_tso(dev, useraddr);
- case ETHTOOL_STSO:
- return ethtool_set_tso(dev, useraddr);
- case ETHTOOL_TEST:
- return ethtool_self_test(dev, useraddr);
- case ETHTOOL_GSTRINGS:
- return ethtool_get_strings(dev, useraddr);
- case ETHTOOL_PHYS_ID:
- return ethtool_phys_id(dev, useraddr);
- case ETHTOOL_GSTATS:
- return ethtool_get_stats(dev, useraddr);
- default:
- return -EOPNOTSUPP;
- }
-
- return -EOPNOTSUPP;
-}
-
-#define mii_if_info _kc_mii_if_info
-struct _kc_mii_if_info {
- int phy_id;
- int advertising;
- int phy_id_mask;
- int reg_num_mask;
-
- unsigned int full_duplex : 1; /* is full duplex? */
- unsigned int force_media : 1; /* is autoneg. disabled? */
-
- struct net_device *dev;
- int (*mdio_read) (struct net_device *dev, int phy_id, int location);
- void (*mdio_write) (struct net_device *dev, int phy_id, int location, int val);
-};
-
-struct ethtool_cmd;
-struct mii_ioctl_data;
-
-#undef mii_link_ok
-#define mii_link_ok _kc_mii_link_ok
-#undef mii_nway_restart
-#define mii_nway_restart _kc_mii_nway_restart
-#undef mii_ethtool_gset
-#define mii_ethtool_gset _kc_mii_ethtool_gset
-#undef mii_ethtool_sset
-#define mii_ethtool_sset _kc_mii_ethtool_sset
-#undef mii_check_link
-#define mii_check_link _kc_mii_check_link
-extern int _kc_mii_link_ok (struct mii_if_info *mii);
-extern int _kc_mii_nway_restart (struct mii_if_info *mii);
-extern int _kc_mii_ethtool_gset(struct mii_if_info *mii,
- struct ethtool_cmd *ecmd);
-extern int _kc_mii_ethtool_sset(struct mii_if_info *mii,
- struct ethtool_cmd *ecmd);
-extern void _kc_mii_check_link (struct mii_if_info *mii);
-#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) )
-#undef generic_mii_ioctl
-#define generic_mii_ioctl _kc_generic_mii_ioctl
-extern int _kc_generic_mii_ioctl(struct mii_if_info *mii_if,
- struct mii_ioctl_data *mii_data, int cmd,
- unsigned int *duplex_changed);
-#endif /* > 2.4.6 */
-
-
-struct _kc_pci_dev_ext {
- struct pci_dev *dev;
- void *pci_drvdata;
- struct pci_driver *driver;
-};
-
-struct _kc_net_dev_ext {
- struct net_device *dev;
- unsigned int carrier;
-};
-
-
-/**************************************/
-/* mii support */
-
-int _kc_mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
-{
- struct net_device *dev = mii->dev;
- u32 advert, bmcr, lpa, nego;
-
- ecmd->supported =
- (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
- SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
-
- /* only supports twisted-pair */
- ecmd->port = PORT_MII;
-
- /* only supports internal transceiver */
- ecmd->transceiver = XCVR_INTERNAL;
-
- /* this isn't fully supported at higher layers */
- ecmd->phy_address = mii->phy_id;
-
- ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
- advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
- if (advert & ADVERTISE_10HALF)
- ecmd->advertising |= ADVERTISED_10baseT_Half;
- if (advert & ADVERTISE_10FULL)
- ecmd->advertising |= ADVERTISED_10baseT_Full;
- if (advert & ADVERTISE_100HALF)
- ecmd->advertising |= ADVERTISED_100baseT_Half;
- if (advert & ADVERTISE_100FULL)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
-
- bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
- lpa = mii->mdio_read(dev, mii->phy_id, MII_LPA);
- if (bmcr & BMCR_ANENABLE) {
- ecmd->advertising |= ADVERTISED_Autoneg;
- ecmd->autoneg = AUTONEG_ENABLE;
-
- nego = mii_nway_result(advert & lpa);
- if (nego == LPA_100FULL || nego == LPA_100HALF)
- ecmd->speed = SPEED_100;
- else
- ecmd->speed = SPEED_10;
- if (nego == LPA_100FULL || nego == LPA_10FULL) {
- ecmd->duplex = DUPLEX_FULL;
- mii->full_duplex = 1;
- } else {
- ecmd->duplex = DUPLEX_HALF;
- mii->full_duplex = 0;
- }
- } else {
- ecmd->autoneg = AUTONEG_DISABLE;
-
- ecmd->speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
- ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
- }
-
- /* ignore maxtxpkt, maxrxpkt for now */
-
- return 0;
-}
-
-int _kc_mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
-{
- struct net_device *dev = mii->dev;
-
- if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
- return -EINVAL;
- if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
- return -EINVAL;
- if (ecmd->port != PORT_MII)
- return -EINVAL;
- if (ecmd->transceiver != XCVR_INTERNAL)
- return -EINVAL;
- if (ecmd->phy_address != mii->phy_id)
- return -EINVAL;
- if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
- return -EINVAL;
-
- /* ignore supported, maxtxpkt, maxrxpkt */
-
- if (ecmd->autoneg == AUTONEG_ENABLE) {
- u32 bmcr, advert, tmp;
-
- if ((ecmd->advertising & (ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full)) == 0)
- return -EINVAL;
-
- /* advertise only what has been requested */
- advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
- tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
- if (ADVERTISED_10baseT_Half)
- tmp |= ADVERTISE_10HALF;
- if (ADVERTISED_10baseT_Full)
- tmp |= ADVERTISE_10FULL;
- if (ADVERTISED_100baseT_Half)
- tmp |= ADVERTISE_100HALF;
- if (ADVERTISED_100baseT_Full)
- tmp |= ADVERTISE_100FULL;
- if (advert != tmp) {
- mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp);
- mii->advertising = tmp;
- }
-
- /* turn on autonegotiation, and force a renegotiate */
- bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
- bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
- mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr);
-
- mii->force_media = 0;
- } else {
- u32 bmcr, tmp;
-
- /* turn off auto negotiation, set speed and duplexity */
- bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
- tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
- if (ecmd->speed == SPEED_100)
- tmp |= BMCR_SPEED100;
- if (ecmd->duplex == DUPLEX_FULL) {
- tmp |= BMCR_FULLDPLX;
- mii->full_duplex = 1;
- } else
- mii->full_duplex = 0;
- if (bmcr != tmp)
- mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp);
-
- mii->force_media = 1;
- }
- return 0;
-}
-
-int _kc_mii_link_ok (struct mii_if_info *mii)
-{
- /* first, a dummy read, needed to latch some MII phys */
- mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
- if (mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR) & BMSR_LSTATUS)
- return 1;
- return 0;
-}
-
-int _kc_mii_nway_restart (struct mii_if_info *mii)
-{
- int bmcr;
- int r = -EINVAL;
-
- /* if autoneg is off, it's an error */
- bmcr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR);
-
- if (bmcr & BMCR_ANENABLE) {
- bmcr |= BMCR_ANRESTART;
- mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, bmcr);
- r = 0;
- }
-
- return r;
-}
-
-void _kc_mii_check_link (struct mii_if_info *mii)
-{
- int cur_link = mii_link_ok(mii);
- int prev_link = netif_carrier_ok(mii->dev);
-
- if (cur_link && !prev_link)
- netif_carrier_on(mii->dev);
- else if (prev_link && !cur_link)
- netif_carrier_off(mii->dev);
-}
-
-#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) )
-int _kc_generic_mii_ioctl(struct mii_if_info *mii_if,
- struct mii_ioctl_data *mii_data, int cmd,
- unsigned int *duplex_chg_out)
-{
- int rc = 0;
- unsigned int duplex_changed = 0;
-
- if (duplex_chg_out)
- *duplex_chg_out = 0;
-
- mii_data->phy_id &= mii_if->phy_id_mask;
- mii_data->reg_num &= mii_if->reg_num_mask;
-
- switch(cmd) {
- case SIOCDEVPRIVATE: /* binary compat, remove in 2.5 */
- case SIOCGMIIPHY:
- mii_data->phy_id = mii_if->phy_id;
- /* fall through */
-
- case SIOCDEVPRIVATE + 1:/* binary compat, remove in 2.5 */
- case SIOCGMIIREG:
- mii_data->val_out =
- mii_if->mdio_read(mii_if->dev, mii_data->phy_id,
- mii_data->reg_num);
- break;
-
- case SIOCDEVPRIVATE + 2:/* binary compat, remove in 2.5 */
- case SIOCSMIIREG: {
- u16 val = mii_data->val_in;
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (mii_data->phy_id == mii_if->phy_id) {
- switch(mii_data->reg_num) {
- case MII_BMCR: {
- unsigned int new_duplex = 0;
- if (val & (BMCR_RESET|BMCR_ANENABLE))
- mii_if->force_media = 0;
- else
- mii_if->force_media = 1;
- if (mii_if->force_media &&
- (val & BMCR_FULLDPLX))
- new_duplex = 1;
- if (mii_if->full_duplex != new_duplex) {
- duplex_changed = 1;
- mii_if->full_duplex = new_duplex;
- }
- break;
- }
- case MII_ADVERTISE:
- mii_if->advertising = val;
- break;
- default:
- /* do nothing */
- break;
- }
- }
-
- mii_if->mdio_write(mii_if->dev, mii_data->phy_id,
- mii_data->reg_num, val);
- break;
- }
-
- default:
- rc = -EOPNOTSUPP;
- break;
- }
-
- if ((rc == 0) && (duplex_chg_out) && (duplex_changed))
- *duplex_chg_out = 1;
-
- return rc;
-}
-#endif /* > 2.4.6 */
-
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/COPYING b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/COPYING
deleted file mode 100755
index 5f297e5b..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/COPYING
+++ /dev/null
@@ -1,339 +0,0 @@
-
-"This software program is licensed subject to the GNU General Public License
-(GPL). Version 2, June 1991, available at
-<http://www.fsf.org/copyleft/gpl.html>"
-
-GNU General Public License
-
-Version 2, June 1991
-
-Copyright (C) 1989, 1991 Free Software Foundation, Inc.
-59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
-
-Everyone is permitted to copy and distribute verbatim copies of this license
-document, but changing it is not allowed.
-
-Preamble
-
-The licenses for most software are designed to take away your freedom to
-share and change it. By contrast, the GNU General Public License is intended
-to guarantee your freedom to share and change free software--to make sure
-the software is free for all its users. This General Public License applies
-to most of the Free Software Foundation's software and to any other program
-whose authors commit to using it. (Some other Free Software Foundation
-software is covered by the GNU Library General Public License instead.) You
-can apply it to your programs, too.
-
-When we speak of free software, we are referring to freedom, not price. Our
-General Public Licenses are designed to make sure that you have the freedom
-to distribute copies of free software (and charge for this service if you
-wish), that you receive source code or can get it if you want it, that you
-can change the software or use pieces of it in new free programs; and that
-you know you can do these things.
-
-To protect your rights, we need to make restrictions that forbid anyone to
-deny you these rights or to ask you to surrender the rights. These
-restrictions translate to certain responsibilities for you if you distribute
-copies of the software, or if you modify it.
-
-For example, if you distribute copies of such a program, whether gratis or
-for a fee, you must give the recipients all the rights that you have. You
-must make sure that they, too, receive or can get the source code. And you
-must show them these terms so they know their rights.
-
-We protect your rights with two steps: (1) copyright the software, and (2)
-offer you this license which gives you legal permission to copy, distribute
-and/or modify the software.
-
-Also, for each author's protection and ours, we want to make certain that
-everyone understands that there is no warranty for this free software. If
-the software is modified by someone else and passed on, we want its
-recipients to know that what they have is not the original, so that any
-problems introduced by others will not reflect on the original authors'
-reputations.
-
-Finally, any free program is threatened constantly by software patents. We
-wish to avoid the danger that redistributors of a free program will
-individually obtain patent licenses, in effect making the program
-proprietary. To prevent this, we have made it clear that any patent must be
-licensed for everyone's free use or not licensed at all.
-
-The precise terms and conditions for copying, distribution and modification
-follow.
-
-TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
-0. This License applies to any program or other work which contains a notice
- placed by the copyright holder saying it may be distributed under the
- terms of this General Public License. The "Program", below, refers to any
- such program or work, and a "work based on the Program" means either the
- Program or any derivative work under copyright law: that is to say, a
- work containing the Program or a portion of it, either verbatim or with
- modifications and/or translated into another language. (Hereinafter,
- translation is included without limitation in the term "modification".)
- Each licensee is addressed as "you".
-
- Activities other than copying, distribution and modification are not
- covered by this License; they are outside its scope. The act of running
- the Program is not restricted, and the output from the Program is covered
- only if its contents constitute a work based on the Program (independent
- of having been made by running the Program). Whether that is true depends
- on what the Program does.
-
-1. You may copy and distribute verbatim copies of the Program's source code
- as you receive it, in any medium, provided that you conspicuously and
- appropriately publish on each copy an appropriate copyright notice and
- disclaimer of warranty; keep intact all the notices that refer to this
- License and to the absence of any warranty; and give any other recipients
- of the Program a copy of this License along with the Program.
-
- You may charge a fee for the physical act of transferring a copy, and you
- may at your option offer warranty protection in exchange for a fee.
-
-2. You may modify your copy or copies of the Program or any portion of it,
- thus forming a work based on the Program, and copy and distribute such
- modifications or work under the terms of Section 1 above, provided that
- you also meet all of these conditions:
-
- * a) You must cause the modified files to carry prominent notices stating
- that you changed the files and the date of any change.
-
- * b) You must cause any work that you distribute or publish, that in
- whole or in part contains or is derived from the Program or any part
- thereof, to be licensed as a whole at no charge to all third parties
- under the terms of this License.
-
- * c) If the modified program normally reads commands interactively when
- run, you must cause it, when started running for such interactive
- use in the most ordinary way, to print or display an announcement
- including an appropriate copyright notice and a notice that there is
- no warranty (or else, saying that you provide a warranty) and that
- users may redistribute the program under these conditions, and
- telling the user how to view a copy of this License. (Exception: if
- the Program itself is interactive but does not normally print such
- an announcement, your work based on the Program is not required to
- print an announcement.)
-
- These requirements apply to the modified work as a whole. If identifiable
- sections of that work are not derived from the Program, and can be
- reasonably considered independent and separate works in themselves, then
- this License, and its terms, do not apply to those sections when you
- distribute them as separate works. But when you distribute the same
- sections as part of a whole which is a work based on the Program, the
- distribution of the whole must be on the terms of this License, whose
- permissions for other licensees extend to the entire whole, and thus to
- each and every part regardless of who wrote it.
-
- Thus, it is not the intent of this section to claim rights or contest
- your rights to work written entirely by you; rather, the intent is to
- exercise the right to control the distribution of derivative or
- collective works based on the Program.
-
- In addition, mere aggregation of another work not based on the Program
- with the Program (or with a work based on the Program) on a volume of a
- storage or distribution medium does not bring the other work under the
- scope of this License.
-
-3. You may copy and distribute the Program (or a work based on it, under
- Section 2) in object code or executable form under the terms of Sections
- 1 and 2 above provided that you also do one of the following:
-
- * a) Accompany it with the complete corresponding machine-readable source
- code, which must be distributed under the terms of Sections 1 and 2
- above on a medium customarily used for software interchange; or,
-
- * b) Accompany it with a written offer, valid for at least three years,
- to give any third party, for a charge no more than your cost of
- physically performing source distribution, a complete machine-
- readable copy of the corresponding source code, to be distributed
- under the terms of Sections 1 and 2 above on a medium customarily
- used for software interchange; or,
-
- * c) Accompany it with the information you received as to the offer to
- distribute corresponding source code. (This alternative is allowed
- only for noncommercial distribution and only if you received the
- program in object code or executable form with such an offer, in
- accord with Subsection b above.)
-
- The source code for a work means the preferred form of the work for
- making modifications to it. For an executable work, complete source code
- means all the source code for all modules it contains, plus any
- associated interface definition files, plus the scripts used to control
- compilation and installation of the executable. However, as a special
- exception, the source code distributed need not include anything that is
- normally distributed (in either source or binary form) with the major
- components (compiler, kernel, and so on) of the operating system on which
- the executable runs, unless that component itself accompanies the
- executable.
-
- If distribution of executable or object code is made by offering access
- to copy from a designated place, then offering equivalent access to copy
- the source code from the same place counts as distribution of the source
- code, even though third parties are not compelled to copy the source
- along with the object code.
-
-4. You may not copy, modify, sublicense, or distribute the Program except as
- expressly provided under this License. Any attempt otherwise to copy,
- modify, sublicense or distribute the Program is void, and will
- automatically terminate your rights under this License. However, parties
- who have received copies, or rights, from you under this License will not
- have their licenses terminated so long as such parties remain in full
- compliance.
-
-5. You are not required to accept this License, since you have not signed
- it. However, nothing else grants you permission to modify or distribute
- the Program or its derivative works. These actions are prohibited by law
- if you do not accept this License. Therefore, by modifying or
- distributing the Program (or any work based on the Program), you
- indicate your acceptance of this License to do so, and all its terms and
- conditions for copying, distributing or modifying the Program or works
- based on it.
-
-6. Each time you redistribute the Program (or any work based on the
- Program), the recipient automatically receives a license from the
- original licensor to copy, distribute or modify the Program subject to
- these terms and conditions. You may not impose any further restrictions
- on the recipients' exercise of the rights granted herein. You are not
- responsible for enforcing compliance by third parties to this License.
-
-7. If, as a consequence of a court judgment or allegation of patent
- infringement or for any other reason (not limited to patent issues),
- conditions are imposed on you (whether by court order, agreement or
- otherwise) that contradict the conditions of this License, they do not
- excuse you from the conditions of this License. If you cannot distribute
- so as to satisfy simultaneously your obligations under this License and
- any other pertinent obligations, then as a consequence you may not
- distribute the Program at all. For example, if a patent license would
- not permit royalty-free redistribution of the Program by all those who
- receive copies directly or indirectly through you, then the only way you
- could satisfy both it and this License would be to refrain entirely from
- distribution of the Program.
-
- If any portion of this section is held invalid or unenforceable under any
- particular circumstance, the balance of the section is intended to apply
- and the section as a whole is intended to apply in other circumstances.
-
- It is not the purpose of this section to induce you to infringe any
- patents or other property right claims or to contest validity of any
- such claims; this section has the sole purpose of protecting the
- integrity of the free software distribution system, which is implemented
- by public license practices. Many people have made generous contributions
- to the wide range of software distributed through that system in
- reliance on consistent application of that system; it is up to the
- author/donor to decide if he or she is willing to distribute software
- through any other system and a licensee cannot impose that choice.
-
- This section is intended to make thoroughly clear what is believed to be
- a consequence of the rest of this License.
-
-8. If the distribution and/or use of the Program is restricted in certain
- countries either by patents or by copyrighted interfaces, the original
- copyright holder who places the Program under this License may add an
- explicit geographical distribution limitation excluding those countries,
- so that distribution is permitted only in or among countries not thus
- excluded. In such case, this License incorporates the limitation as if
- written in the body of this License.
-
-9. The Free Software Foundation may publish revised and/or new versions of
- the General Public License from time to time. Such new versions will be
- similar in spirit to the present version, but may differ in detail to
- address new problems or concerns.
-
- Each version is given a distinguishing version number. If the Program
- specifies a version number of this License which applies to it and "any
- later version", you have the option of following the terms and
- conditions either of that version or of any later version published by
- the Free Software Foundation. If the Program does not specify a version
- number of this License, you may choose any version ever published by the
- Free Software Foundation.
-
-10. If you wish to incorporate parts of the Program into other free programs
- whose distribution conditions are different, write to the author to ask
- for permission. For software which is copyrighted by the Free Software
- Foundation, write to the Free Software Foundation; we sometimes make
- exceptions for this. Our decision will be guided by the two goals of
- preserving the free status of all derivatives of our free software and
- of promoting the sharing and reuse of software generally.
-
- NO WARRANTY
-
-11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
- FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
- OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
- PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER
- EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
- ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH
- YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL
- NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
- WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
- REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR
- DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL
- DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM
- (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED
- INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF
- THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR
- OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-
-END OF TERMS AND CONDITIONS
-
-How to Apply These Terms to Your New Programs
-
-If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it free
-software which everyone can redistribute and change under these terms.
-
-To do so, attach the following notices to the program. It is safest to
-attach them to the start of each source file to most effectively convey the
-exclusion of warranty; and each file should have at least the "copyright"
-line and a pointer to where the full notice is found.
-
-one line to give the program's name and an idea of what it does.
-Copyright (C) yyyy name of author
-
-This program is free software; you can redistribute it and/or modify it
-under the terms of the GNU General Public License as published by the Free
-Software Foundation; either version 2 of the License, or (at your option)
-any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-more details.
-
-You should have received a copy of the GNU General Public License along with
-this program; if not, write to the Free Software Foundation, Inc., 59
-Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
-Also add information on how to contact you by electronic and paper mail.
-
-If the program is interactive, make it output a short notice like this when
-it starts in an interactive mode:
-
-Gnomovision version 69, Copyright (C) year name of author Gnomovision comes
-with ABSOLUTELY NO WARRANTY; for details type 'show w'. This is free
-software, and you are welcome to redistribute it under certain conditions;
-type 'show c' for details.
-
-The hypothetical commands 'show w' and 'show c' should show the appropriate
-parts of the General Public License. Of course, the commands you use may be
-called something other than 'show w' and 'show c'; they could even be
-mouse-clicks or menu items--whatever suits your program.
-
-You should also get your employer (if you work as a programmer) or your
-school, if any, to sign a "copyright disclaimer" for the program, if
-necessary. Here is a sample; alter the names:
-
-Yoyodyne, Inc., hereby disclaims all copyright interest in the program
-'Gnomovision' (which makes passes at compilers) written by James Hacker.
-
-signature of Ty Coon, 1 April 1989
-Ty Coon, President of Vice
-
-This General Public License does not permit incorporating your program into
-proprietary programs. If your program is a subroutine library, you may
-consider it more useful to permit linking proprietary applications with the
-library. If this is what you want to do, use the GNU Library General Public
-License instead of this License.
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe.h
deleted file mode 100755
index 222c2c71..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe.h
+++ /dev/null
@@ -1,925 +0,0 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#ifndef _IXGBE_H_
-#define _IXGBE_H_
-
-#ifndef IXGBE_NO_LRO
-#include <net/tcp.h>
-#endif
-
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#ifdef HAVE_IRQ_AFFINITY_HINT
-#include <linux/cpumask.h>
-#endif /* HAVE_IRQ_AFFINITY_HINT */
-#include <linux/vmalloc.h>
-
-#ifdef SIOCETHTOOL
-#include <linux/ethtool.h>
-#endif
-#ifdef NETIF_F_HW_VLAN_TX
-#include <linux/if_vlan.h>
-#endif
-#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
-#define IXGBE_DCA
-#include <linux/dca.h>
-#endif
-#include "ixgbe_dcb.h"
-
-#include "kcompat.h"
-
-#ifdef HAVE_SCTP
-#include <linux/sctp.h>
-#endif
-
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
-#define IXGBE_FCOE
-#include "ixgbe_fcoe.h"
-#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */
-
-#if defined(CONFIG_PTP_1588_CLOCK) || defined(CONFIG_PTP_1588_CLOCK_MODULE)
-#define HAVE_IXGBE_PTP
-#endif
-
-#include "ixgbe_api.h"
-
-#define PFX "ixgbe: "
-#define DPRINTK(nlevel, klevel, fmt, args...) \
- ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
- printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
- __func__ , ## args)))
-
-/* TX/RX descriptor defines */
-#define IXGBE_DEFAULT_TXD 512
-#define IXGBE_DEFAULT_TX_WORK 256
-#define IXGBE_MAX_TXD 4096
-#define IXGBE_MIN_TXD 64
-
-#define IXGBE_DEFAULT_RXD 512
-#define IXGBE_DEFAULT_RX_WORK 256
-#define IXGBE_MAX_RXD 4096
-#define IXGBE_MIN_RXD 64
-
-
-/* flow control */
-#define IXGBE_MIN_FCRTL 0x40
-#define IXGBE_MAX_FCRTL 0x7FF80
-#define IXGBE_MIN_FCRTH 0x600
-#define IXGBE_MAX_FCRTH 0x7FFF0
-#define IXGBE_DEFAULT_FCPAUSE 0xFFFF
-#define IXGBE_MIN_FCPAUSE 0
-#define IXGBE_MAX_FCPAUSE 0xFFFF
-
-/* Supported Rx Buffer Sizes */
-#define IXGBE_RXBUFFER_512 512 /* Used for packet split */
-#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
-#define IXGBE_RXBUFFER_1536 1536
-#define IXGBE_RXBUFFER_2K 2048
-#define IXGBE_RXBUFFER_3K 3072
-#define IXGBE_RXBUFFER_4K 4096
-#define IXGBE_RXBUFFER_7K 7168
-#define IXGBE_RXBUFFER_8K 8192
-#define IXGBE_RXBUFFER_15K 15360
-#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */
-#define IXGBE_MAX_RXBUFFER 16384 /* largest size for single descriptor */
-
-/*
- * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN mans we
- * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
- * this adds up to 512 bytes of extra data meaning the smallest allocation
- * we could have is 1K.
- * i.e. RXBUFFER_512 --> size-1024 slab
- */
-#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_512
-
-#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
-
-/* How many Rx Buffers do we bundle into one write to the hardware ? */
-#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */
-
-#define IXGBE_TX_FLAGS_CSUM (u32)(1)
-#define IXGBE_TX_FLAGS_HW_VLAN (u32)(1 << 1)
-#define IXGBE_TX_FLAGS_SW_VLAN (u32)(1 << 2)
-#define IXGBE_TX_FLAGS_TSO (u32)(1 << 3)
-#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 4)
-#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5)
-#define IXGBE_TX_FLAGS_FSO (u32)(1 << 6)
-#define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7)
-#define IXGBE_TX_FLAGS_TSTAMP (u32)(1 << 8)
-#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
-#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
-#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
-#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
-
-#define IXGBE_MAX_RX_DESC_POLL 10
-
-#define IXGBE_MAX_VF_MC_ENTRIES 30
-#define IXGBE_MAX_VF_FUNCTIONS 64
-#define IXGBE_MAX_VFTA_ENTRIES 128
-#define MAX_EMULATION_MAC_ADDRS 16
-#define IXGBE_MAX_PF_MACVLANS 15
-#define IXGBE_82599_VF_DEVICE_ID 0x10ED
-#define IXGBE_X540_VF_DEVICE_ID 0x1515
-
-#ifdef CONFIG_PCI_IOV
-#define VMDQ_P(p) ((p) + adapter->num_vfs)
-#else
-#define VMDQ_P(p) (p)
-#endif
-
-#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
- { \
- u32 current_counter = IXGBE_READ_REG(hw, reg); \
- if (current_counter < last_counter) \
- counter += 0x100000000LL; \
- last_counter = current_counter; \
- counter &= 0xFFFFFFFF00000000LL; \
- counter |= current_counter; \
- }
-
-#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
- { \
- u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
- u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
- u64 current_counter = (current_counter_msb << 32) | \
- current_counter_lsb; \
- if (current_counter < last_counter) \
- counter += 0x1000000000LL; \
- last_counter = current_counter; \
- counter &= 0xFFFFFFF000000000LL; \
- counter |= current_counter; \
- }
-
-struct vf_stats {
- u64 gprc;
- u64 gorc;
- u64 gptc;
- u64 gotc;
- u64 mprc;
-};
-
-struct vf_data_storage {
- unsigned char vf_mac_addresses[ETH_ALEN];
- u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
- u16 num_vf_mc_hashes;
- u16 default_vf_vlan_id;
- u16 vlans_enabled;
- bool clear_to_send;
- struct vf_stats vfstats;
- struct vf_stats last_vfstats;
- struct vf_stats saved_rst_vfstats;
- bool pf_set_mac;
- u16 pf_vlan; /* When set, guest VLAN config not allowed. */
- u16 pf_qos;
- u16 tx_rate;
- u16 vlan_count;
- u8 spoofchk_enabled;
- struct pci_dev *vfdev;
-};
-
-struct vf_macvlans {
- struct list_head l;
- int vf;
- bool free;
- bool is_macvlan;
- u8 vf_macvlan[ETH_ALEN];
-};
-
-#ifndef IXGBE_NO_LRO
-#define IXGBE_LRO_MAX 32 /*Maximum number of LRO descriptors*/
-#define IXGBE_LRO_GLOBAL 10
-
-struct ixgbe_lro_stats {
- u32 flushed;
- u32 coal;
-};
-
-/*
- * ixgbe_lro_header - header format to be aggregated by LRO
- * @iph: IP header without options
- * @tcp: TCP header
- * @ts: Optional TCP timestamp data in TCP options
- *
- * This structure relies on the check above that verifies that the header
- * is IPv4 and does not contain any options.
- */
-struct ixgbe_lrohdr {
- struct iphdr iph;
- struct tcphdr th;
- __be32 ts[0];
-};
-
-struct ixgbe_lro_list {
- struct sk_buff_head active;
- struct ixgbe_lro_stats stats;
-};
-
-#endif /* IXGBE_NO_LRO */
-#define IXGBE_MAX_TXD_PWR 14
-#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
-
-/* Tx Descriptors needed, worst case */
-#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
-#ifdef MAX_SKB_FRAGS
-#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
-#else
-#define DESC_NEEDED 4
-#endif
-
-/* wrapper around a pointer to a socket buffer,
- * so a DMA handle can be stored along with the buffer */
-struct ixgbe_tx_buffer {
- union ixgbe_adv_tx_desc *next_to_watch;
- unsigned long time_stamp;
- struct sk_buff *skb;
- unsigned int bytecount;
- unsigned short gso_segs;
- __be16 protocol;
- DEFINE_DMA_UNMAP_ADDR(dma);
- DEFINE_DMA_UNMAP_LEN(len);
- u32 tx_flags;
-};
-
-struct ixgbe_rx_buffer {
- struct sk_buff *skb;
- dma_addr_t dma;
-#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
- struct page *page;
- unsigned int page_offset;
-#endif
-};
-
-struct ixgbe_queue_stats {
- u64 packets;
- u64 bytes;
-};
-
-struct ixgbe_tx_queue_stats {
- u64 restart_queue;
- u64 tx_busy;
- u64 tx_done_old;
-};
-
-struct ixgbe_rx_queue_stats {
- u64 rsc_count;
- u64 rsc_flush;
- u64 non_eop_descs;
- u64 alloc_rx_page_failed;
- u64 alloc_rx_buff_failed;
- u64 csum_err;
-};
-
-enum ixgbe_ring_state_t {
- __IXGBE_TX_FDIR_INIT_DONE,
- __IXGBE_TX_DETECT_HANG,
- __IXGBE_HANG_CHECK_ARMED,
- __IXGBE_RX_RSC_ENABLED,
-#ifndef HAVE_NDO_SET_FEATURES
- __IXGBE_RX_CSUM_ENABLED,
-#endif
- __IXGBE_RX_CSUM_UDP_ZERO_ERR,
-#ifdef IXGBE_FCOE
- __IXGBE_RX_FCOE_BUFSZ,
-#endif
-};
-
-#define check_for_tx_hang(ring) \
- test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
-#define set_check_for_tx_hang(ring) \
- set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
-#define clear_check_for_tx_hang(ring) \
- clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
-#ifndef IXGBE_NO_HW_RSC
-#define ring_is_rsc_enabled(ring) \
- test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
-#else
-#define ring_is_rsc_enabled(ring) false
-#endif
-#define set_ring_rsc_enabled(ring) \
- set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
-#define clear_ring_rsc_enabled(ring) \
- clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
-#define netdev_ring(ring) (ring->netdev)
-#define ring_queue_index(ring) (ring->queue_index)
-
-
-struct ixgbe_ring {
- struct ixgbe_ring *next; /* pointer to next ring in q_vector */
- struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */
- struct net_device *netdev; /* netdev ring belongs to */
- struct device *dev; /* device for DMA mapping */
- void *desc; /* descriptor ring memory */
- union {
- struct ixgbe_tx_buffer *tx_buffer_info;
- struct ixgbe_rx_buffer *rx_buffer_info;
- };
- unsigned long state;
- u8 __iomem *tail;
- dma_addr_t dma; /* phys. address of descriptor ring */
- unsigned int size; /* length in bytes */
-
- u16 count; /* amount of descriptors */
-
- u8 queue_index; /* needed for multiqueue queue management */
- u8 reg_idx; /* holds the special value that gets
- * the hardware register offset
- * associated with this ring, which is
- * different for DCB and RSS modes
- */
- u16 next_to_use;
- u16 next_to_clean;
-
- union {
-#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
- u16 rx_buf_len;
-#else
- u16 next_to_alloc;
-#endif
- struct {
- u8 atr_sample_rate;
- u8 atr_count;
- };
- };
-
- u8 dcb_tc;
- struct ixgbe_queue_stats stats;
- union {
- struct ixgbe_tx_queue_stats tx_stats;
- struct ixgbe_rx_queue_stats rx_stats;
- };
-} ____cacheline_internodealigned_in_smp;
-
-enum ixgbe_ring_f_enum {
- RING_F_NONE = 0,
- RING_F_VMDQ, /* SR-IOV uses the same ring feature */
- RING_F_RSS,
- RING_F_FDIR,
-#ifdef IXGBE_FCOE
- RING_F_FCOE,
-#endif /* IXGBE_FCOE */
- RING_F_ARRAY_SIZE /* must be last in enum set */
-};
-
-#define IXGBE_MAX_DCB_INDICES 8
-#define IXGBE_MAX_RSS_INDICES 16
-#define IXGBE_MAX_VMDQ_INDICES 64
-#define IXGBE_MAX_FDIR_INDICES 64
-#ifdef IXGBE_FCOE
-#define IXGBE_MAX_FCOE_INDICES 8
-#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
-#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
-#else
-#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES
-#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
-#endif /* IXGBE_FCOE */
-struct ixgbe_ring_feature {
- int indices;
- int mask;
-};
-
-#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
-/*
- * FCoE requires that all Rx buffers be over 2200 bytes in length. Since
- * this is twice the size of a half page we need to double the page order
- * for FCoE enabled Rx queues.
- */
-#if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192)
-static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
-{
- return test_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state) ? 1 : 0;
-}
-#else
-#define ixgbe_rx_pg_order(_ring) 0
-#endif
-#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
-#define ixgbe_rx_bufsz(_ring) ((PAGE_SIZE / 2) << ixgbe_rx_pg_order(_ring))
-
-#endif
-struct ixgbe_ring_container {
- struct ixgbe_ring *ring; /* pointer to linked list of rings */
- unsigned int total_bytes; /* total bytes processed this int */
- unsigned int total_packets; /* total packets processed this int */
- u16 work_limit; /* total work allowed per interrupt */
- u8 count; /* total number of rings in vector */
- u8 itr; /* current ITR setting for ring */
-};
-
-/* iterator for handling rings in ring container */
-#define ixgbe_for_each_ring(pos, head) \
- for (pos = (head).ring; pos != NULL; pos = pos->next)
-
-#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
- ? 8 : 1)
-#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
-
-/* MAX_MSIX_Q_VECTORS of these are allocated,
- * but we only use one per queue-specific vector.
- */
-struct ixgbe_q_vector {
- struct ixgbe_adapter *adapter;
- int cpu; /* CPU for DCA */
- u16 v_idx; /* index of q_vector within array, also used for
- * finding the bit in EICR and friends that
- * represents the vector for this ring */
- u16 itr; /* Interrupt throttle rate written to EITR */
- struct ixgbe_ring_container rx, tx;
-
-#ifdef CONFIG_IXGBE_NAPI
- struct napi_struct napi;
-#endif
-#ifndef HAVE_NETDEV_NAPI_LIST
- struct net_device poll_dev;
-#endif
-#ifdef HAVE_IRQ_AFFINITY_HINT
- cpumask_t affinity_mask;
-#endif
-#ifndef IXGBE_NO_LRO
- struct ixgbe_lro_list lrolist; /* LRO list for queue vector*/
-#endif
- int numa_node;
- char name[IFNAMSIZ + 9];
-
- /* for dynamic allocation of rings associated with this q_vector */
- struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
-};
-
-/*
- * microsecond values for various ITR rates shifted by 2 to fit itr register
- * with the first 3 bits reserved 0
- */
-#define IXGBE_MIN_RSC_ITR 24
-#define IXGBE_100K_ITR 40
-#define IXGBE_20K_ITR 200
-#define IXGBE_16K_ITR 248
-#define IXGBE_10K_ITR 400
-#define IXGBE_8K_ITR 500
-
-/* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */
-static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc,
- const u32 stat_err_bits)
-{
- return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
-}
-
-/* ixgbe_desc_unused - calculate if we have unused descriptors */
-static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
-{
- u16 ntc = ring->next_to_clean;
- u16 ntu = ring->next_to_use;
-
- return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
-}
-
-#define IXGBE_RX_DESC(R, i) \
- (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
-#define IXGBE_TX_DESC(R, i) \
- (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
-#define IXGBE_TX_CTXTDESC(R, i) \
- (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
-
-#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
-#ifdef IXGBE_FCOE
-/* use 3K as the baby jumbo frame size for FCoE */
-#define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072
-#endif /* IXGBE_FCOE */
-
-#define TCP_TIMER_VECTOR 0
-#define OTHER_VECTOR 1
-#define NON_Q_VECTORS (OTHER_VECTOR + TCP_TIMER_VECTOR)
-
-#define IXGBE_MAX_MSIX_Q_VECTORS_82599 64
-#define IXGBE_MAX_MSIX_Q_VECTORS_82598 16
-
-struct ixgbe_mac_addr {
- u8 addr[ETH_ALEN];
- u16 queue;
- u16 state; /* bitmask */
-};
-#define IXGBE_MAC_STATE_DEFAULT 0x1
-#define IXGBE_MAC_STATE_MODIFIED 0x2
-#define IXGBE_MAC_STATE_IN_USE 0x4
-
-#ifdef IXGBE_PROCFS
-struct ixgbe_therm_proc_data {
- struct ixgbe_hw *hw;
- struct ixgbe_thermal_diode_data *sensor_data;
-};
-
-#endif /* IXGBE_PROCFS */
-
-/*
- * Only for array allocations in our adapter struct. On 82598, there will be
- * unused entries in the array, but that's not a big deal. Also, in 82599,
- * we can actually assign 64 queue vectors based on our extended-extended
- * interrupt registers. This is different than 82598, which is limited to 16.
- */
-#define MAX_MSIX_Q_VECTORS IXGBE_MAX_MSIX_Q_VECTORS_82599
-#define MAX_MSIX_COUNT IXGBE_MAX_MSIX_VECTORS_82599
-
-#define MIN_MSIX_Q_VECTORS 1
-#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
-
-/* default to trying for four seconds */
-#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
-
-/* board specific private data structure */
-struct ixgbe_adapter {
-#ifdef NETIF_F_HW_VLAN_TX
-#ifdef HAVE_VLAN_RX_REGISTER
- struct vlan_group *vlgrp; /* must be first, see ixgbe_receive_skb */
-#else
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
-#endif
-#endif /* NETIF_F_HW_VLAN_TX */
- /* OS defined structs */
- struct net_device *netdev;
- struct pci_dev *pdev;
-
- unsigned long state;
-
- /* Some features need tri-state capability,
- * thus the additional *_CAPABLE flags.
- */
- u32 flags;
-#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 0)
-#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1)
-#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 2)
-#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3)
-#ifndef IXGBE_NO_LLI
-#define IXGBE_FLAG_LLI_PUSH (u32)(1 << 4)
-#endif
-#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 8)
-#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
-#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 9)
-#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 10)
-#define IXGBE_FLAG_DCA_ENABLED_DATA (u32)(1 << 11)
-#else
-#define IXGBE_FLAG_DCA_ENABLED (u32)0
-#define IXGBE_FLAG_DCA_CAPABLE (u32)0
-#define IXGBE_FLAG_DCA_ENABLED_DATA (u32)0
-#endif
-#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 12)
-#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 13)
-#define IXGBE_FLAG_DCB_CAPABLE (u32)(1 << 14)
-#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 15)
-#define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 16)
-#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 18)
-#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 19)
-#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 20)
-#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 21)
-#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 22)
-#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 23)
-#ifdef IXGBE_FCOE
-#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 24)
-#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 25)
-#endif /* IXGBE_FCOE */
-#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 26)
-#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 27)
-#define IXGBE_FLAG_SRIOV_REPLICATION_ENABLE (u32)(1 << 28)
-#define IXGBE_FLAG_SRIOV_L2SWITCH_ENABLE (u32)(1 << 29)
-#define IXGBE_FLAG_SRIOV_L2LOOPBACK_ENABLE (u32)(1 << 30)
-#define IXGBE_FLAG_RX_BB_CAPABLE (u32)(1 << 31)
-
- u32 flags2;
-#ifndef IXGBE_NO_HW_RSC
-#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1)
-#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1)
-#else
-#define IXGBE_FLAG2_RSC_CAPABLE 0
-#define IXGBE_FLAG2_RSC_ENABLED 0
-#endif
-#define IXGBE_FLAG2_VMDQ_DEFAULT_OVERRIDE (u32)(1 << 2)
-#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 4)
-#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 5)
-#define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 6)
-#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 7)
-#define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 8)
-#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 9)
-#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 10)
-#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 11)
-#define IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED (u32)(1 << 12)
-
- /* Tx fast path data */
- int num_tx_queues;
- u16 tx_itr_setting;
- u16 tx_work_limit;
-
- /* Rx fast path data */
- int num_rx_queues;
- u16 rx_itr_setting;
- u16 rx_work_limit;
-
- /* TX */
- struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
-
- u64 restart_queue;
- u64 lsc_int;
- u32 tx_timeout_count;
-
- /* RX */
- struct ixgbe_ring *rx_ring[MAX_RX_QUEUES];
- int num_rx_pools; /* == num_rx_queues in 82598 */
- int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
- u64 hw_csum_rx_error;
- u64 hw_rx_no_dma_resources;
- u64 rsc_total_count;
- u64 rsc_total_flush;
- u64 non_eop_descs;
-#ifndef CONFIG_IXGBE_NAPI
- u64 rx_dropped_backlog; /* count drops from rx intr handler */
-#endif
- u32 alloc_rx_page_failed;
- u32 alloc_rx_buff_failed;
-
- struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
-
-#ifdef HAVE_DCBNL_IEEE
- struct ieee_pfc *ixgbe_ieee_pfc;
- struct ieee_ets *ixgbe_ieee_ets;
-#endif
- struct ixgbe_dcb_config dcb_cfg;
- struct ixgbe_dcb_config temp_dcb_cfg;
- u8 dcb_set_bitmap;
- u8 dcbx_cap;
-#ifndef HAVE_MQPRIO
- u8 tc;
-#endif
- enum ixgbe_fc_mode last_lfc_mode;
-
- int num_msix_vectors;
- int max_msix_q_vectors; /* true count of q_vectors for device */
- struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
- struct msix_entry *msix_entries;
-
-#ifndef HAVE_NETDEV_STATS_IN_NETDEV
- struct net_device_stats net_stats;
-#endif
-#ifndef IXGBE_NO_LRO
- struct ixgbe_lro_stats lro_stats;
-#endif
-
-#ifdef ETHTOOL_TEST
- u32 test_icr;
- struct ixgbe_ring test_tx_ring;
- struct ixgbe_ring test_rx_ring;
-#endif
-
- /* structs defined in ixgbe_hw.h */
- struct ixgbe_hw hw;
- u16 msg_enable;
- struct ixgbe_hw_stats stats;
-#ifndef IXGBE_NO_LLI
- u32 lli_port;
- u32 lli_size;
- u32 lli_etype;
- u32 lli_vlan_pri;
-#endif /* IXGBE_NO_LLI */
-
- u32 *config_space;
- u64 tx_busy;
- unsigned int tx_ring_count;
- unsigned int rx_ring_count;
-
- u32 link_speed;
- bool link_up;
- unsigned long link_check_timeout;
-
- struct timer_list service_timer;
- struct work_struct service_task;
-
- struct hlist_head fdir_filter_list;
- unsigned long fdir_overflow; /* number of times ATR was backed off */
- union ixgbe_atr_input fdir_mask;
- int fdir_filter_count;
- u32 fdir_pballoc;
- u32 atr_sample_rate;
- spinlock_t fdir_perfect_lock;
-
-#ifdef IXGBE_FCOE
- struct ixgbe_fcoe fcoe;
-#endif /* IXGBE_FCOE */
- u32 wol;
-
- u16 bd_number;
-
- char eeprom_id[32];
- u16 eeprom_cap;
- bool netdev_registered;
- u32 interrupt_event;
-#ifdef HAVE_ETHTOOL_SET_PHYS_ID
- u32 led_reg;
-#endif
-
- DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
- unsigned int num_vfs;
- struct vf_data_storage *vfinfo;
- int vf_rate_link_speed;
- struct vf_macvlans vf_mvs;
- struct vf_macvlans *mv_list;
-#ifdef CONFIG_PCI_IOV
- u32 timer_event_accumulator;
- u32 vferr_refcount;
-#endif
- struct ixgbe_mac_addr *mac_table;
-#ifdef IXGBE_SYSFS
- struct kobject *info_kobj;
- struct kobject *therm_kobj[IXGBE_MAX_SENSORS];
-#else /* IXGBE_SYSFS */
-#ifdef IXGBE_PROCFS
- struct proc_dir_entry *eth_dir;
- struct proc_dir_entry *info_dir;
- struct proc_dir_entry *therm_dir[IXGBE_MAX_SENSORS];
- struct ixgbe_therm_proc_data therm_data[IXGBE_MAX_SENSORS];
-#endif /* IXGBE_PROCFS */
-#endif /* IXGBE_SYSFS */
-};
-
-struct ixgbe_fdir_filter {
- struct hlist_node fdir_node;
- union ixgbe_atr_input filter;
- u16 sw_idx;
- u16 action;
-};
-
-enum ixgbe_state_t {
- __IXGBE_TESTING,
- __IXGBE_RESETTING,
- __IXGBE_DOWN,
- __IXGBE_SERVICE_SCHED,
- __IXGBE_IN_SFP_INIT,
-};
-
-struct ixgbe_cb {
-#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
- union { /* Union defining head/tail partner */
- struct sk_buff *head;
- struct sk_buff *tail;
- };
-#endif
- dma_addr_t dma;
-#ifndef IXGBE_NO_LRO
- __be32 tsecr; /* timestamp echo response */
- u32 tsval; /* timestamp value in host order */
- u32 next_seq; /* next expected sequence number */
- u16 free; /* 65521 minus total size */
- u16 mss; /* size of data portion of packet */
-#endif /* IXGBE_NO_LRO */
-#ifdef HAVE_VLAN_RX_REGISTER
- u16 vid; /* VLAN tag */
-#endif
- u16 append_cnt; /* number of skb's appended */
-#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
- bool page_released;
-#endif
-};
-#define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb)
-
-#ifdef IXGBE_SYSFS
-void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
-int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
-#endif /* IXGBE_SYSFS */
-#ifdef IXGBE_PROCFS
-void ixgbe_procfs_exit(struct ixgbe_adapter *adapter);
-int ixgbe_procfs_init(struct ixgbe_adapter *adapter);
-int ixgbe_procfs_topdir_init(void);
-void ixgbe_procfs_topdir_exit(void);
-#endif /* IXGBE_PROCFS */
-
-extern struct dcbnl_rtnl_ops dcbnl_ops;
-extern int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max);
-
-extern u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 index);
-
-/* needed by ixgbe_main.c */
-extern int ixgbe_validate_mac_addr(u8 *mc_addr);
-extern void ixgbe_check_options(struct ixgbe_adapter *adapter);
-extern void ixgbe_assign_netdev_ops(struct net_device *netdev);
-
-/* needed by ixgbe_ethtool.c */
-extern char ixgbe_driver_name[];
-extern const char ixgbe_driver_version[];
-
-extern void ixgbe_up(struct ixgbe_adapter *adapter);
-extern void ixgbe_down(struct ixgbe_adapter *adapter);
-extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
-extern void ixgbe_reset(struct ixgbe_adapter *adapter);
-extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
-extern int ixgbe_setup_rx_resources(struct ixgbe_ring *);
-extern int ixgbe_setup_tx_resources(struct ixgbe_ring *);
-extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
-extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
-extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,
- struct ixgbe_ring *);
-extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,
- struct ixgbe_ring *);
-extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
-extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
-extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
-extern bool ixgbe_is_ixgbe(struct pci_dev *pcidev);
-extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
- struct ixgbe_adapter *,
- struct ixgbe_ring *);
-extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
- struct ixgbe_tx_buffer *);
-extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
-extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *);
-extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *);
-extern void ixgbe_set_rx_mode(struct net_device *netdev);
-extern int ixgbe_write_mc_addr_list(struct net_device *netdev);
-extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
-#ifdef IXGBE_FCOE
-extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
-#endif /* IXGBE_FCOE */
-extern void ixgbe_do_reset(struct net_device *netdev);
-extern void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector);
-extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *);
-extern void ixgbe_vlan_stripping_enable(struct ixgbe_adapter *adapter);
-extern void ixgbe_vlan_stripping_disable(struct ixgbe_adapter *adapter);
-#ifdef ETHTOOL_OPS_COMPAT
-extern int ethtool_ioctl(struct ifreq *ifr);
-#endif
-
-#ifdef IXGBE_FCOE
-extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
-extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
- struct ixgbe_tx_buffer *first,
- u8 *hdr_len);
-extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);
-extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
- union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb);
-extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
- struct scatterlist *sgl, unsigned int sgc);
-#ifdef HAVE_NETDEV_OPS_FCOE_DDP_TARGET
-extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
- struct scatterlist *sgl, unsigned int sgc);
-#endif /* HAVE_NETDEV_OPS_FCOE_DDP_TARGET */
-extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
-#ifdef HAVE_NETDEV_OPS_FCOE_ENABLE
-extern int ixgbe_fcoe_enable(struct net_device *netdev);
-extern int ixgbe_fcoe_disable(struct net_device *netdev);
-#endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */
-#ifdef CONFIG_DCB
-#ifdef HAVE_DCBNL_OPS_GETAPP
-extern u8 ixgbe_fcoe_getapp(struct net_device *netdev);
-#endif /* HAVE_DCBNL_OPS_GETAPP */
-extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
-#endif /* CONFIG_DCB */
-#ifdef HAVE_NETDEV_OPS_FCOE_GETWWN
-extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
-#endif
-#endif /* IXGBE_FCOE */
-
-#ifdef CONFIG_DCB
-#ifdef HAVE_DCBNL_IEEE
-s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame);
-#endif /* HAVE_DCBNL_IEEE */
-#endif /* CONFIG_DCB */
-
-extern void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring);
-extern int ixgbe_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd);
-extern int ixgbe_write_uc_addr_list(struct ixgbe_adapter *adapter,
- struct net_device *netdev, unsigned int vfn);
-extern void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
-extern int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
- u8 *addr, u16 queue);
-extern int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
- u8 *addr, u16 queue);
-extern int ixgbe_available_rars(struct ixgbe_adapter *adapter);
-#ifndef HAVE_VLAN_RX_REGISTER
-extern void ixgbe_vlan_mode(struct net_device *, u32);
-#endif
-#ifndef ixgbe_get_netdev_tc_txq
-#define ixgbe_get_netdev_tc_txq(dev, tc) (&dev->tc_to_txq[tc])
-#endif
-extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
-#endif /* _IXGBE_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82598.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82598.c
deleted file mode 100755
index 24015844..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82598.c
+++ /dev/null
@@ -1,1296 +0,0 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#include "ixgbe_type.h"
-#include "ixgbe_82598.h"
-#include "ixgbe_api.h"
-#include "ixgbe_common.h"
-#include "ixgbe_phy.h"
-
-static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg);
-static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
-static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
- bool autoneg_wait_to_complete);
-static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed, bool *link_up,
- bool link_up_wait_to_complete);
-static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg,
- bool autoneg_wait_to_complete);
-static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg,
- bool autoneg_wait_to_complete);
-static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
-static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
-static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
- u32 headroom, int strategy);
-
-/**
- * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
- * @hw: pointer to the HW structure
- *
- * The defaults for 82598 should be in the range of 50us to 50ms,
- * however the hardware default for these parts is 500us to 1ms which is less
- * than the 10ms recommended by the pci-e spec. To address this we need to
- * increase the value to either 10ms to 250ms for capability version 1 config,
- * or 16ms to 55ms for version 2.
- **/
-void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
-{
- u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
- u16 pcie_devctl2;
-
- /* only take action if timeout value is defaulted to 0 */
- if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
- goto out;
-
- /*
- * if capababilities version is type 1 we can write the
- * timeout of 10ms to 250ms through the GCR register
- */
- if (!(gcr & IXGBE_GCR_CAP_VER2)) {
- gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
- goto out;
- }
-
- /*
- * for version 2 capabilities we need to write the config space
- * directly in order to set the completion timeout value for
- * 16ms to 55ms
- */
- pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
- pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
- IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
-out:
- /* disable completion timeout resend */
- gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
- IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
-}
-
-/**
- * ixgbe_init_ops_82598 - Inits func ptrs and MAC type
- * @hw: pointer to hardware structure
- *
- * Initialize the function pointers and assign the MAC type for 82598.
- * Does not touch the hardware.
- **/
-s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
-{
- struct ixgbe_mac_info *mac = &hw->mac;
- struct ixgbe_phy_info *phy = &hw->phy;
- s32 ret_val;
-
- ret_val = ixgbe_init_phy_ops_generic(hw);
- ret_val = ixgbe_init_ops_generic(hw);
-
- /* PHY */
- phy->ops.init = &ixgbe_init_phy_ops_82598;
-
- /* MAC */
- mac->ops.start_hw = &ixgbe_start_hw_82598;
- mac->ops.reset_hw = &ixgbe_reset_hw_82598;
- mac->ops.get_media_type = &ixgbe_get_media_type_82598;
- mac->ops.get_supported_physical_layer =
- &ixgbe_get_supported_physical_layer_82598;
- mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
- mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
- mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
-
- /* RAR, Multicast, VLAN */
- mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
- mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
- mac->ops.set_vfta = &ixgbe_set_vfta_82598;
- mac->ops.set_vlvf = NULL;
- mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
-
- /* Flow Control */
- mac->ops.fc_enable = &ixgbe_fc_enable_82598;
-
- mac->mcft_size = 128;
- mac->vft_size = 128;
- mac->num_rar_entries = 16;
- mac->rx_pb_size = 512;
- mac->max_tx_queues = 32;
- mac->max_rx_queues = 64;
- mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
-
- /* SFP+ Module */
- phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
-
- /* Link */
- mac->ops.check_link = &ixgbe_check_mac_link_82598;
- mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
- mac->ops.flap_tx_laser = NULL;
- mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82598;
- mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598;
-
- /* Manageability interface */
- mac->ops.set_fw_drv_ver = NULL;
-
- return ret_val;
-}
-
-/**
- * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
- * @hw: pointer to hardware structure
- *
- * Initialize any function pointers that were not able to be
- * set during init_shared_code because the PHY/SFP type was
- * not known. Perform the SFP init if necessary.
- *
- **/
-s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
-{
- struct ixgbe_mac_info *mac = &hw->mac;
- struct ixgbe_phy_info *phy = &hw->phy;
- s32 ret_val = 0;
- u16 list_offset, data_offset;
-
- /* Identify the PHY */
- phy->ops.identify(hw);
-
- /* Overwrite the link function pointers if copper PHY */
- if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
- mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
- mac->ops.get_link_capabilities =
- &ixgbe_get_copper_link_capabilities_generic;
- }
-
- switch (hw->phy.type) {
- case ixgbe_phy_tn:
- phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
- phy->ops.check_link = &ixgbe_check_phy_link_tnx;
- phy->ops.get_firmware_version =
- &ixgbe_get_phy_firmware_version_tnx;
- break;
- case ixgbe_phy_nl:
- phy->ops.reset = &ixgbe_reset_phy_nl;
-
- /* Call SFP+ identify routine to get the SFP+ module type */
- ret_val = phy->ops.identify_sfp(hw);
- if (ret_val != 0)
- goto out;
- else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
- ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
- goto out;
- }
-
- /* Check to see if SFP+ module is supported */
- ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
- &list_offset,
- &data_offset);
- if (ret_val != 0) {
- ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
- goto out;
- }
- break;
- default:
- break;
- }
-
-out:
- return ret_val;
-}
-
-/**
- * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
- * @hw: pointer to hardware structure
- *
- * Starts the hardware using the generic start_hw function.
- * Disables relaxed ordering Then set pcie completion timeout
- *
- **/
-s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
-{
- u32 regval;
- u32 i;
- s32 ret_val = 0;
-
- ret_val = ixgbe_start_hw_generic(hw);
-
- /* Disable relaxed ordering */
- for (i = 0; ((i < hw->mac.max_tx_queues) &&
- (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
- regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
- regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
- }
-
- for (i = 0; ((i < hw->mac.max_rx_queues) &&
- (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
- regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
- regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
- IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
- IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
- }
-
- /* set the completion timeout for interface */
- if (ret_val == 0)
- ixgbe_set_pcie_completion_timeout(hw);
-
- return ret_val;
-}
-
-/**
- * ixgbe_get_link_capabilities_82598 - Determines link capabilities
- * @hw: pointer to hardware structure
- * @speed: pointer to link speed
- * @autoneg: boolean auto-negotiation value
- *
- * Determines the link capabilities by reading the AUTOC register.
- **/
-static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg)
-{
- s32 status = 0;
- u32 autoc = 0;
-
- /*
- * Determine link capabilities based on the stored value of AUTOC,
- * which represents EEPROM defaults. If AUTOC value has not been
- * stored, use the current register value.
- */
- if (hw->mac.orig_link_settings_stored)
- autoc = hw->mac.orig_autoc;
- else
- autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-
- switch (autoc & IXGBE_AUTOC_LMS_MASK) {
- case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
- *speed = IXGBE_LINK_SPEED_1GB_FULL;
- *autoneg = false;
- break;
-
- case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
- *speed = IXGBE_LINK_SPEED_10GB_FULL;
- *autoneg = false;
- break;
-
- case IXGBE_AUTOC_LMS_1G_AN:
- *speed = IXGBE_LINK_SPEED_1GB_FULL;
- *autoneg = true;
- break;
-
- case IXGBE_AUTOC_LMS_KX4_AN:
- case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
- *speed = IXGBE_LINK_SPEED_UNKNOWN;
- if (autoc & IXGBE_AUTOC_KX4_SUPP)
- *speed |= IXGBE_LINK_SPEED_10GB_FULL;
- if (autoc & IXGBE_AUTOC_KX_SUPP)
- *speed |= IXGBE_LINK_SPEED_1GB_FULL;
- *autoneg = true;
- break;
-
- default:
- status = IXGBE_ERR_LINK_SETUP;
- break;
- }
-
- return status;
-}
-
-/**
- * ixgbe_get_media_type_82598 - Determines media type
- * @hw: pointer to hardware structure
- *
- * Returns the media type (fiber, copper, backplane)
- **/
-static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
-{
- enum ixgbe_media_type media_type;
-
- /* Detect if there is a copper PHY attached. */
- switch (hw->phy.type) {
- case ixgbe_phy_cu_unknown:
- case ixgbe_phy_tn:
- media_type = ixgbe_media_type_copper;
- goto out;
- default:
- break;
- }
-
- /* Media type for I82598 is based on device ID */
- switch (hw->device_id) {
- case IXGBE_DEV_ID_82598:
- case IXGBE_DEV_ID_82598_BX:
- /* Default device ID is mezzanine card KX/KX4 */
- media_type = ixgbe_media_type_backplane;
- break;
- case IXGBE_DEV_ID_82598AF_DUAL_PORT:
- case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
- case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
- case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
- case IXGBE_DEV_ID_82598EB_XF_LR:
- case IXGBE_DEV_ID_82598EB_SFP_LOM:
- media_type = ixgbe_media_type_fiber;
- break;
- case IXGBE_DEV_ID_82598EB_CX4:
- case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
- media_type = ixgbe_media_type_cx4;
- break;
- case IXGBE_DEV_ID_82598AT:
- case IXGBE_DEV_ID_82598AT2:
- media_type = ixgbe_media_type_copper;
- break;
- default:
- media_type = ixgbe_media_type_unknown;
- break;
- }
-out:
- return media_type;
-}
-
-/**
- * ixgbe_fc_enable_82598 - Enable flow control
- * @hw: pointer to hardware structure
- *
- * Enable flow control according to the current settings.
- **/
-s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
-{
- s32 ret_val = 0;
- u32 fctrl_reg;
- u32 rmcs_reg;
- u32 reg;
- u32 fcrtl, fcrth;
- u32 link_speed = 0;
- int i;
- bool link_up;
-
- /* Validate the water mark configuration */
- if (!hw->fc.pause_time) {
- ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
- goto out;
- }
-
- /* Low water mark of zero causes XOFF floods */
- for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
- if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
- hw->fc.high_water[i]) {
- if (!hw->fc.low_water[i] ||
- hw->fc.low_water[i] >= hw->fc.high_water[i]) {
- hw_dbg(hw, "Invalid water mark configuration\n");
- ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
- goto out;
- }
- }
- }
-
- /*
- * On 82598 having Rx FC on causes resets while doing 1G
- * so if it's on turn it off once we know link_speed. For
- * more details see 82598 Specification update.
- */
- hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
- if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
- switch (hw->fc.requested_mode) {
- case ixgbe_fc_full:
- hw->fc.requested_mode = ixgbe_fc_tx_pause;
- break;
- case ixgbe_fc_rx_pause:
- hw->fc.requested_mode = ixgbe_fc_none;
- break;
- default:
- /* no change */
- break;
- }
- }
-
- /* Negotiate the fc mode to use */
- ixgbe_fc_autoneg(hw);
-
- /* Disable any previous flow control settings */
- fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
- fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
-
- rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
- rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
-
- /*
- * The possible values of fc.current_mode are:
- * 0: Flow control is completely disabled
- * 1: Rx flow control is enabled (we can receive pause frames,
- * but not send pause frames).
- * 2: Tx flow control is enabled (we can send pause frames but
- * we do not support receiving pause frames).
- * 3: Both Rx and Tx flow control (symmetric) are enabled.
- * other: Invalid.
- */
- switch (hw->fc.current_mode) {
- case ixgbe_fc_none:
- /*
- * Flow control is disabled by software override or autoneg.
- * The code below will actually disable it in the HW.
- */
- break;
- case ixgbe_fc_rx_pause:
- /*
- * Rx Flow control is enabled and Tx Flow control is
- * disabled by software override. Since there really
- * isn't a way to advertise that we are capable of RX
- * Pause ONLY, we will advertise that we support both
- * symmetric and asymmetric Rx PAUSE. Later, we will
- * disable the adapter's ability to send PAUSE frames.
- */
- fctrl_reg |= IXGBE_FCTRL_RFCE;
- break;
- case ixgbe_fc_tx_pause:
- /*
- * Tx Flow control is enabled, and Rx Flow control is
- * disabled by software override.
- */
- rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
- break;
- case ixgbe_fc_full:
- /* Flow control (both Rx and Tx) is enabled by SW override. */
- fctrl_reg |= IXGBE_FCTRL_RFCE;
- rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
- break;
- default:
- hw_dbg(hw, "Flow control param set incorrectly\n");
- ret_val = IXGBE_ERR_CONFIG;
- goto out;
- break;
- }
-
- /* Set 802.3x based flow control settings. */
- fctrl_reg |= IXGBE_FCTRL_DPF;
- IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
- IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
-
- /* Set up and enable Rx high/low water mark thresholds, enable XON. */
- for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
- if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
- hw->fc.high_water[i]) {
- fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
- fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
- IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
- IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
- } else {
- IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
- IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
- }
-
- }
-
- /* Configure pause time (2 TCs per register) */
- reg = hw->fc.pause_time * 0x00010001;
- for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
- IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
-
- /* Configure flow control refresh threshold value */
- IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
-
-out:
- return ret_val;
-}
-
-/**
- * ixgbe_start_mac_link_82598 - Configures MAC link settings
- * @hw: pointer to hardware structure
- *
- * Configures link settings based on values in the ixgbe_hw struct.
- * Restarts the link. Performs autonegotiation if needed.
- **/
-static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
- bool autoneg_wait_to_complete)
-{
- u32 autoc_reg;
- u32 links_reg;
- u32 i;
- s32 status = 0;
-
- /* Restart link */
- autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- autoc_reg |= IXGBE_AUTOC_AN_RESTART;
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
-
- /* Only poll for autoneg to complete if specified to do so */
- if (autoneg_wait_to_complete) {
- if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
- IXGBE_AUTOC_LMS_KX4_AN ||
- (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
- IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
- links_reg = 0; /* Just in case Autoneg time = 0 */
- for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
- links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
- if (links_reg & IXGBE_LINKS_KX_AN_COMP)
- break;
- msleep(100);
- }
- if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
- status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
- hw_dbg(hw, "Autonegotiation did not complete.\n");
- }
- }
- }
-
- /* Add delay to filter out noises during initial link setup */
- msleep(50);
-
- return status;
-}
-
-/**
- * ixgbe_validate_link_ready - Function looks for phy link
- * @hw: pointer to hardware structure
- *
- * Function indicates success when phy link is available. If phy is not ready
- * within 5 seconds of MAC indicating link, the function returns error.
- **/
-static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
-{
- u32 timeout;
- u16 an_reg;
-
- if (hw->device_id != IXGBE_DEV_ID_82598AT2)
- return 0;
-
- for (timeout = 0;
- timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
-
- if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
- (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
- break;
-
- msleep(100);
- }
-
- if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
- hw_dbg(hw, "Link was indicated but link is down\n");
- return IXGBE_ERR_LINK_SETUP;
- }
-
- return 0;
-}
-
-/**
- * ixgbe_check_mac_link_82598 - Get link/speed status
- * @hw: pointer to hardware structure
- * @speed: pointer to link speed
- * @link_up: true is link is up, false otherwise
- * @link_up_wait_to_complete: bool used to wait for link up or not
- *
- * Reads the links register to determine if link is up and the current speed
- **/
-static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed, bool *link_up,
- bool link_up_wait_to_complete)
-{
- u32 links_reg;
- u32 i;
- u16 link_reg, adapt_comp_reg;
-
- /*
- * SERDES PHY requires us to read link status from undocumented
- * register 0xC79F. Bit 0 set indicates link is up/ready; clear
- * indicates link down. OxC00C is read to check that the XAUI lanes
- * are active. Bit 0 clear indicates active; set indicates inactive.
- */
- if (hw->phy.type == ixgbe_phy_nl) {
- hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
- hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
- hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
- &adapt_comp_reg);
- if (link_up_wait_to_complete) {
- for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
- if ((link_reg & 1) &&
- ((adapt_comp_reg & 1) == 0)) {
- *link_up = true;
- break;
- } else {
- *link_up = false;
- }
- msleep(100);
- hw->phy.ops.read_reg(hw, 0xC79F,
- IXGBE_TWINAX_DEV,
- &link_reg);
- hw->phy.ops.read_reg(hw, 0xC00C,
- IXGBE_TWINAX_DEV,
- &adapt_comp_reg);
- }
- } else {
- if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
- *link_up = true;
- else
- *link_up = false;
- }
-
- if (*link_up == false)
- goto out;
- }
-
- links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
- if (link_up_wait_to_complete) {
- for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
- if (links_reg & IXGBE_LINKS_UP) {
- *link_up = true;
- break;
- } else {
- *link_up = false;
- }
- msleep(100);
- links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
- }
- } else {
- if (links_reg & IXGBE_LINKS_UP)
- *link_up = true;
- else
- *link_up = false;
- }
-
- if (links_reg & IXGBE_LINKS_SPEED)
- *speed = IXGBE_LINK_SPEED_10GB_FULL;
- else
- *speed = IXGBE_LINK_SPEED_1GB_FULL;
-
- if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == true) &&
- (ixgbe_validate_link_ready(hw) != 0))
- *link_up = false;
-
-out:
- return 0;
-}
-
-/**
- * ixgbe_setup_mac_link_82598 - Set MAC link speed
- * @hw: pointer to hardware structure
- * @speed: new link speed
- * @autoneg: true if autonegotiation enabled
- * @autoneg_wait_to_complete: true when waiting for completion is needed
- *
- * Set the link speed in the AUTOC register and restarts link.
- **/
-static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
- bool autoneg_wait_to_complete)
-{
- s32 status = 0;
- ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
- u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- u32 autoc = curr_autoc;
- u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
-
- /* Check to see if speed passed in is supported. */
- ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
- speed &= link_capabilities;
-
- if (speed == IXGBE_LINK_SPEED_UNKNOWN)
- status = IXGBE_ERR_LINK_SETUP;
-
- /* Set KX4/KX support according to speed requested */
- else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
- link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
- autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
- if (speed & IXGBE_LINK_SPEED_10GB_FULL)
- autoc |= IXGBE_AUTOC_KX4_SUPP;
- if (speed & IXGBE_LINK_SPEED_1GB_FULL)
- autoc |= IXGBE_AUTOC_KX_SUPP;
- if (autoc != curr_autoc)
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
- }
-
- if (status == 0) {
- /*
- * Setup and restart the link based on the new values in
- * ixgbe_hw This will write the AUTOC register based on the new
- * stored values
- */
- status = ixgbe_start_mac_link_82598(hw,
- autoneg_wait_to_complete);
- }
-
- return status;
-}
-
-
-/**
- * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
- * @hw: pointer to hardware structure
- * @speed: new link speed
- * @autoneg: true if autonegotiation enabled
- * @autoneg_wait_to_complete: true if waiting is needed to complete
- *
- * Sets the link speed in the AUTOC register in the MAC and restarts link.
- **/
-static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg,
- bool autoneg_wait_to_complete)
-{
- s32 status;
-
- /* Setup the PHY according to input speed */
- status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
- autoneg_wait_to_complete);
- /* Set up MAC */
- ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
-
- return status;
-}
-
-/**
- * ixgbe_reset_hw_82598 - Performs hardware reset
- * @hw: pointer to hardware structure
- *
- * Resets the hardware by resetting the transmit and receive units, masks and
- * clears all interrupts, performing a PHY reset, and performing a link (MAC)
- * reset.
- **/
-static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
-{
- s32 status = 0;
- s32 phy_status = 0;
- u32 ctrl;
- u32 gheccr;
- u32 i;
- u32 autoc;
- u8 analog_val;
-
- /* Call adapter stop to disable tx/rx and clear interrupts */
- status = hw->mac.ops.stop_adapter(hw);
- if (status != 0)
- goto reset_hw_out;
-
- /*
- * Power up the Atlas Tx lanes if they are currently powered down.
- * Atlas Tx lanes are powered down for MAC loopback tests, but
- * they are not automatically restored on reset.
- */
- hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
- if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
- /* Enable Tx Atlas so packets can be transmitted again */
- hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
- &analog_val);
- analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
- hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
- analog_val);
-
- hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
- &analog_val);
- analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
- hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
- analog_val);
-
- hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
- &analog_val);
- analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
- hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
- analog_val);
-
- hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
- &analog_val);
- analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
- hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
- analog_val);
- }
-
- /* Reset PHY */
- if (hw->phy.reset_disable == false) {
- /* PHY ops must be identified and initialized prior to reset */
-
- /* Init PHY and function pointers, perform SFP setup */
- phy_status = hw->phy.ops.init(hw);
- if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
- goto reset_hw_out;
- if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
- goto mac_reset_top;
-
- hw->phy.ops.reset(hw);
- }
-
-mac_reset_top:
- /*
- * Issue global reset to the MAC. This needs to be a SW reset.
- * If link reset is used, it might reset the MAC when mng is using it
- */
- ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
- IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
- IXGBE_WRITE_FLUSH(hw);
-
- /* Poll for reset bit to self-clear indicating reset is complete */
- for (i = 0; i < 10; i++) {
- udelay(1);
- ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
- if (!(ctrl & IXGBE_CTRL_RST))
- break;
- }
- if (ctrl & IXGBE_CTRL_RST) {
- status = IXGBE_ERR_RESET_FAILED;
- hw_dbg(hw, "Reset polling failed to complete.\n");
- }
-
- msleep(50);
-
- /*
- * Double resets are required for recovery from certain error
- * conditions. Between resets, it is necessary to stall to allow time
- * for any pending HW events to complete.
- */
- if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
- hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
- goto mac_reset_top;
- }
-
- gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
- gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
- IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
-
- /*
- * Store the original AUTOC value if it has not been
- * stored off yet. Otherwise restore the stored original
- * AUTOC value since the reset operation sets back to deaults.
- */
- autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- if (hw->mac.orig_link_settings_stored == false) {
- hw->mac.orig_autoc = autoc;
- hw->mac.orig_link_settings_stored = true;
- } else if (autoc != hw->mac.orig_autoc) {
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
- }
-
- /* Store the permanent mac address */
- hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
-
- /*
- * Store MAC address from RAR0, clear receive address registers, and
- * clear the multicast table
- */
- hw->mac.ops.init_rx_addrs(hw);
-
-reset_hw_out:
- if (phy_status != 0)
- status = phy_status;
-
- return status;
-}
-
-/**
- * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
- * @hw: pointer to hardware struct
- * @rar: receive address register index to associate with a VMDq index
- * @vmdq: VMDq set index
- **/
-s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
-{
- u32 rar_high;
- u32 rar_entries = hw->mac.num_rar_entries;
-
- /* Make sure we are using a valid rar index range */
- if (rar >= rar_entries) {
- hw_dbg(hw, "RAR index %d is out of range.\n", rar);
- return IXGBE_ERR_INVALID_ARGUMENT;
- }
-
- rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
- rar_high &= ~IXGBE_RAH_VIND_MASK;
- rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
- IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
- return 0;
-}
-
-/**
- * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
- * @hw: pointer to hardware struct
- * @rar: receive address register index to associate with a VMDq index
- * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
- **/
-static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
-{
- u32 rar_high;
- u32 rar_entries = hw->mac.num_rar_entries;
-
-
- /* Make sure we are using a valid rar index range */
- if (rar >= rar_entries) {
- hw_dbg(hw, "RAR index %d is out of range.\n", rar);
- return IXGBE_ERR_INVALID_ARGUMENT;
- }
-
- rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
- if (rar_high & IXGBE_RAH_VIND_MASK) {
- rar_high &= ~IXGBE_RAH_VIND_MASK;
- IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
- }
-
- return 0;
-}
-
-/**
- * ixgbe_set_vfta_82598 - Set VLAN filter table
- * @hw: pointer to hardware structure
- * @vlan: VLAN id to write to VLAN filter
- * @vind: VMDq output index that maps queue to VLAN id in VFTA
- * @vlan_on: boolean flag to turn on/off VLAN in VFTA
- *
- * Turn on/off specified VLAN in the VLAN filter table.
- **/
-s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
- bool vlan_on)
-{
- u32 regindex;
- u32 bitindex;
- u32 bits;
- u32 vftabyte;
-
- if (vlan > 4095)
- return IXGBE_ERR_PARAM;
-
- /* Determine 32-bit word position in array */
- regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
-
- /* Determine the location of the (VMD) queue index */
- vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
- bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
-
- /* Set the nibble for VMD queue index */
- bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
- bits &= (~(0x0F << bitindex));
- bits |= (vind << bitindex);
- IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
-
- /* Determine the location of the bit for this VLAN id */
- bitindex = vlan & 0x1F; /* lower five bits */
-
- bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
- if (vlan_on)
- /* Turn on this VLAN id */
- bits |= (1 << bitindex);
- else
- /* Turn off this VLAN id */
- bits &= ~(1 << bitindex);
- IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
-
- return 0;
-}
-
-/**
- * ixgbe_clear_vfta_82598 - Clear VLAN filter table
- * @hw: pointer to hardware structure
- *
- * Clears the VLAN filer table, and the VMDq index associated with the filter
- **/
-static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
-{
- u32 offset;
- u32 vlanbyte;
-
- for (offset = 0; offset < hw->mac.vft_size; offset++)
- IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
-
- for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
- for (offset = 0; offset < hw->mac.vft_size; offset++)
- IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
- 0);
-
- return 0;
-}
-
-/**
- * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
- * @hw: pointer to hardware structure
- * @reg: analog register to read
- * @val: read value
- *
- * Performs read operation to Atlas analog register specified.
- **/
-s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
-{
- u32 atlas_ctl;
-
- IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
- IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
- IXGBE_WRITE_FLUSH(hw);
- udelay(10);
- atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
- *val = (u8)atlas_ctl;
-
- return 0;
-}
-
-/**
- * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
- * @hw: pointer to hardware structure
- * @reg: atlas register to write
- * @val: value to write
- *
- * Performs write operation to Atlas analog register specified.
- **/
-s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
-{
- u32 atlas_ctl;
-
- atlas_ctl = (reg << 8) | val;
- IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
- IXGBE_WRITE_FLUSH(hw);
- udelay(10);
-
- return 0;
-}
-
-/**
- * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
- * @hw: pointer to hardware structure
- * @byte_offset: EEPROM byte offset to read
- * @eeprom_data: value read
- *
- * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
- **/
-s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *eeprom_data)
-{
- s32 status = 0;
- u16 sfp_addr = 0;
- u16 sfp_data = 0;
- u16 sfp_stat = 0;
- u32 i;
-
- if (hw->phy.type == ixgbe_phy_nl) {
- /*
- * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
- * 0xC30D. These registers are used to talk to the SFP+
- * module's EEPROM through the SDA/SCL (I2C) interface.
- */
- sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
- sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
- hw->phy.ops.write_reg(hw,
- IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
- sfp_addr);
-
- /* Poll status */
- for (i = 0; i < 100; i++) {
- hw->phy.ops.read_reg(hw,
- IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
- &sfp_stat);
- sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
- if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
- break;
- msleep(10);
- }
-
- if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
- hw_dbg(hw, "EEPROM read did not pass.\n");
- status = IXGBE_ERR_SFP_NOT_PRESENT;
- goto out;
- }
-
- /* Read data */
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
-
- *eeprom_data = (u8)(sfp_data >> 8);
- } else {
- status = IXGBE_ERR_PHY;
- goto out;
- }
-
-out:
- return status;
-}
-
-/**
- * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
- * @hw: pointer to hardware structure
- *
- * Determines physical layer capabilities of the current configuration.
- **/
-u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
-{
- u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
- u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
- u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
- u16 ext_ability = 0;
-
- hw->phy.ops.identify(hw);
-
- /* Copper PHY must be checked before AUTOC LMS to determine correct
- * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
- switch (hw->phy.type) {
- case ixgbe_phy_tn:
- case ixgbe_phy_cu_unknown:
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
- if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
- physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
- if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
- physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
- if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
- physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
- goto out;
- default:
- break;
- }
-
- switch (autoc & IXGBE_AUTOC_LMS_MASK) {
- case IXGBE_AUTOC_LMS_1G_AN:
- case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
- if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
- physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
- else
- physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
- break;
- case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
- if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
- physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
- else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
- physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
- else /* XAUI */
- physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
- break;
- case IXGBE_AUTOC_LMS_KX4_AN:
- case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
- if (autoc & IXGBE_AUTOC_KX_SUPP)
- physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
- if (autoc & IXGBE_AUTOC_KX4_SUPP)
- physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
- break;
- default:
- break;
- }
-
- if (hw->phy.type == ixgbe_phy_nl) {
- hw->phy.ops.identify_sfp(hw);
-
- switch (hw->phy.sfp_type) {
- case ixgbe_sfp_type_da_cu:
- physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
- break;
- case ixgbe_sfp_type_sr:
- physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
- break;
- case ixgbe_sfp_type_lr:
- physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
- break;
- default:
- physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
- break;
- }
- }
-
- switch (hw->device_id) {
- case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
- physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
- break;
- case IXGBE_DEV_ID_82598AF_DUAL_PORT:
- case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
- case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
- physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
- break;
- case IXGBE_DEV_ID_82598EB_XF_LR:
- physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
- break;
- default:
- break;
- }
-
-out:
- return physical_layer;
-}
-
-/**
- * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
- * port devices.
- * @hw: pointer to the HW structure
- *
- * Calls common function and corrects issue with some single port devices
- * that enable LAN1 but not LAN0.
- **/
-void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
-{
- struct ixgbe_bus_info *bus = &hw->bus;
- u16 pci_gen = 0;
- u16 pci_ctrl2 = 0;
-
- ixgbe_set_lan_id_multi_port_pcie(hw);
-
- /* check if LAN0 is disabled */
- hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
- if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
-
- hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
-
- /* if LAN0 is completely disabled force function to 0 */
- if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
- !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
- !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
-
- bus->func = 0;
- }
- }
-}
-
-/**
- * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
- * @hw: pointer to hardware structure
- * @num_pb: number of packet buffers to allocate
- * @headroom: reserve n KB of headroom
- * @strategy: packet buffer allocation strategy
- **/
-static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
- u32 headroom, int strategy)
-{
- u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
- u8 i = 0;
-
- if (!num_pb)
- return;
-
- /* Setup Rx packet buffer sizes */
- switch (strategy) {
- case PBA_STRATEGY_WEIGHTED:
- /* Setup the first four at 80KB */
- rxpktsize = IXGBE_RXPBSIZE_80KB;
- for (; i < 4; i++)
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
- /* Setup the last four at 48KB...don't re-init i */
- rxpktsize = IXGBE_RXPBSIZE_48KB;
- /* Fall Through */
- case PBA_STRATEGY_EQUAL:
- default:
- /* Divide the remaining Rx packet buffer evenly among the TCs */
- for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
- break;
- }
-
- /* Setup Tx packet buffer sizes */
- for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
- IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
-
- return;
-}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82598.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82598.h
deleted file mode 100755
index c6abb020..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82598.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#ifndef _IXGBE_82598_H_
-#define _IXGBE_82598_H_
-
-u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw);
-s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw);
-s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
-s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
-s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
-s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *eeprom_data);
-u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
-s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
-void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
-void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
-#endif /* _IXGBE_82598_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.c
deleted file mode 100755
index 1ad4b769..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.c
+++ /dev/null
@@ -1,2314 +0,0 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#include "ixgbe_type.h"
-#include "ixgbe_82599.h"
-#include "ixgbe_api.h"
-#include "ixgbe_common.h"
-#include "ixgbe_phy.h"
-
-static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg,
- bool autoneg_wait_to_complete);
-static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
-static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
- u16 offset, u16 *data);
-static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
- u16 words, u16 *data);
-static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 *data);
-static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 data);
-
-void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
-{
- struct ixgbe_mac_info *mac = &hw->mac;
-
- /* enable the laser control functions for SFP+ fiber */
- if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
- mac->ops.disable_tx_laser =
- &ixgbe_disable_tx_laser_multispeed_fiber;
- mac->ops.enable_tx_laser =
- &ixgbe_enable_tx_laser_multispeed_fiber;
- mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
-
- } else {
- mac->ops.disable_tx_laser = NULL;
- mac->ops.enable_tx_laser = NULL;
- mac->ops.flap_tx_laser = NULL;
- }
-
- if (hw->phy.multispeed_fiber) {
- /* Set up dual speed SFP+ support */
- mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
- } else {
- if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
- (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
- hw->phy.smart_speed == ixgbe_smart_speed_on) &&
- !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
- } else {
- mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
- }
- }
-}
-
-/**
- * ixgbe_init_phy_ops_82599 - PHY/SFP specific init
- * @hw: pointer to hardware structure
- *
- * Initialize any function pointers that were not able to be
- * set during init_shared_code because the PHY/SFP type was
- * not known. Perform the SFP init if necessary.
- *
- **/
-s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
-{
- struct ixgbe_mac_info *mac = &hw->mac;
- struct ixgbe_phy_info *phy = &hw->phy;
- s32 ret_val = 0;
- u32 esdp;
-
- if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
- /* Store flag indicating I2C bus access control unit. */
- hw->phy.qsfp_shared_i2c_bus = TRUE;
-
- /* Initialize access to QSFP+ I2C bus */
- esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
- esdp |= IXGBE_ESDP_SDP0_DIR;
- esdp &= ~IXGBE_ESDP_SDP1_DIR;
- esdp &= ~IXGBE_ESDP_SDP0;
- esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
- esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
- IXGBE_WRITE_FLUSH(hw);
-
- phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_82599;
- phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_82599;
- }
- /* Identify the PHY or SFP module */
- ret_val = phy->ops.identify(hw);
- if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
- goto init_phy_ops_out;
-
- /* Setup function pointers based on detected SFP module and speeds */
- ixgbe_init_mac_link_ops_82599(hw);
- if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
- hw->phy.ops.reset = NULL;
-
- /* If copper media, overwrite with copper function pointers */
- if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
- mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
- mac->ops.get_link_capabilities =
- &ixgbe_get_copper_link_capabilities_generic;
- }
-
- /* Set necessary function pointers based on phy type */
- switch (hw->phy.type) {
- case ixgbe_phy_tn:
- phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
- phy->ops.check_link = &ixgbe_check_phy_link_tnx;
- phy->ops.get_firmware_version =
- &ixgbe_get_phy_firmware_version_tnx;
- break;
- default:
- break;
- }
-init_phy_ops_out:
- return ret_val;
-}
-
-s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
-{
- s32 ret_val = 0;
- u32 reg_anlp1 = 0;
- u32 i = 0;
- u16 list_offset, data_offset, data_value;
-
- if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
- ixgbe_init_mac_link_ops_82599(hw);
-
- hw->phy.ops.reset = NULL;
-
- ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
- &data_offset);
- if (ret_val != 0)
- goto setup_sfp_out;
-
- /* PHY config will finish before releasing the semaphore */
- ret_val = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- if (ret_val != 0) {
- ret_val = IXGBE_ERR_SWFW_SYNC;
- goto setup_sfp_out;
- }
-
- hw->eeprom.ops.read(hw, ++data_offset, &data_value);
- while (data_value != 0xffff) {
- IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
- IXGBE_WRITE_FLUSH(hw);
- hw->eeprom.ops.read(hw, ++data_offset, &data_value);
- }
-
- /* Release the semaphore */
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
- /* Delay obtaining semaphore again to allow FW access */
- msleep(hw->eeprom.semaphore_delay);
-
- /* Now restart DSP by setting Restart_AN and clearing LMS */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
- IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
- IXGBE_AUTOC_AN_RESTART));
-
- /* Wait for AN to leave state 0 */
- for (i = 0; i < 10; i++) {
- msleep(4);
- reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
- if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
- break;
- }
- if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
- hw_dbg(hw, "sfp module setup not complete\n");
- ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
- goto setup_sfp_out;
- }
-
- /* Restart DSP by setting Restart_AN and return to SFI mode */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
- IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
- IXGBE_AUTOC_AN_RESTART));
- }
-
-setup_sfp_out:
- return ret_val;
-}
-
-/**
- * ixgbe_init_ops_82599 - Inits func ptrs and MAC type
- * @hw: pointer to hardware structure
- *
- * Initialize the function pointers and assign the MAC type for 82599.
- * Does not touch the hardware.
- **/
-
-s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
-{
- struct ixgbe_mac_info *mac = &hw->mac;
- struct ixgbe_phy_info *phy = &hw->phy;
- struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
- s32 ret_val;
-
- ixgbe_init_phy_ops_generic(hw);
- ret_val = ixgbe_init_ops_generic(hw);
-
- /* PHY */
- phy->ops.identify = &ixgbe_identify_phy_82599;
- phy->ops.init = &ixgbe_init_phy_ops_82599;
-
- /* MAC */
- mac->ops.reset_hw = &ixgbe_reset_hw_82599;
- mac->ops.get_media_type = &ixgbe_get_media_type_82599;
- mac->ops.get_supported_physical_layer =
- &ixgbe_get_supported_physical_layer_82599;
- mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
- mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
- mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
- mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
- mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
- mac->ops.start_hw = &ixgbe_start_hw_82599;
- mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
- mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
- mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
- mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
- mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
-
- /* RAR, Multicast, VLAN */
- mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
- mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
- mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
- mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
- mac->rar_highwater = 1;
- mac->ops.set_vfta = &ixgbe_set_vfta_generic;
- mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
- mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
- mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
- mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
- mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
- mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
-
- /* Link */
- mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
- mac->ops.check_link = &ixgbe_check_mac_link_generic;
- mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
- ixgbe_init_mac_link_ops_82599(hw);
-
- mac->mcft_size = 128;
- mac->vft_size = 128;
- mac->num_rar_entries = 128;
- mac->rx_pb_size = 512;
- mac->max_tx_queues = 128;
- mac->max_rx_queues = 128;
- mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
-
- mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
- IXGBE_FWSM_MODE_MASK) ? true : false;
-
- //hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
-
- /* EEPROM */
- eeprom->ops.read = &ixgbe_read_eeprom_82599;
- eeprom->ops.read_buffer = &ixgbe_read_eeprom_buffer_82599;
-
- /* Manageability interface */
- mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
-
- mac->ops.get_thermal_sensor_data =
- &ixgbe_get_thermal_sensor_data_generic;
- mac->ops.init_thermal_sensor_thresh =
- &ixgbe_init_thermal_sensor_thresh_generic;
-
- return ret_val;
-}
-
-/**
- * ixgbe_get_link_capabilities_82599 - Determines link capabilities
- * @hw: pointer to hardware structure
- * @speed: pointer to link speed
- * @negotiation: true when autoneg or autotry is enabled
- *
- * Determines the link capabilities by reading the AUTOC register.
- **/
-s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *negotiation)
-{
- s32 status = 0;
- u32 autoc = 0;
-
- /* Check if 1G SFP module. */
- if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
- *speed = IXGBE_LINK_SPEED_1GB_FULL;
- *negotiation = true;
- goto out;
- }
-
- /*
- * Determine link capabilities based on the stored value of AUTOC,
- * which represents EEPROM defaults. If AUTOC value has not
- * been stored, use the current register values.
- */
- if (hw->mac.orig_link_settings_stored)
- autoc = hw->mac.orig_autoc;
- else
- autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-
- switch (autoc & IXGBE_AUTOC_LMS_MASK) {
- case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
- *speed = IXGBE_LINK_SPEED_1GB_FULL;
- *negotiation = false;
- break;
-
- case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
- *speed = IXGBE_LINK_SPEED_10GB_FULL;
- *negotiation = false;
- break;
-
- case IXGBE_AUTOC_LMS_1G_AN:
- *speed = IXGBE_LINK_SPEED_1GB_FULL;
- *negotiation = true;
- break;
-
- case IXGBE_AUTOC_LMS_10G_SERIAL:
- *speed = IXGBE_LINK_SPEED_10GB_FULL;
- *negotiation = false;
- break;
-
- case IXGBE_AUTOC_LMS_KX4_KX_KR:
- case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
- *speed = IXGBE_LINK_SPEED_UNKNOWN;
- if (autoc & IXGBE_AUTOC_KR_SUPP)
- *speed |= IXGBE_LINK_SPEED_10GB_FULL;
- if (autoc & IXGBE_AUTOC_KX4_SUPP)
- *speed |= IXGBE_LINK_SPEED_10GB_FULL;
- if (autoc & IXGBE_AUTOC_KX_SUPP)
- *speed |= IXGBE_LINK_SPEED_1GB_FULL;
- *negotiation = true;
- break;
-
- case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
- *speed = IXGBE_LINK_SPEED_100_FULL;
- if (autoc & IXGBE_AUTOC_KR_SUPP)
- *speed |= IXGBE_LINK_SPEED_10GB_FULL;
- if (autoc & IXGBE_AUTOC_KX4_SUPP)
- *speed |= IXGBE_LINK_SPEED_10GB_FULL;
- if (autoc & IXGBE_AUTOC_KX_SUPP)
- *speed |= IXGBE_LINK_SPEED_1GB_FULL;
- *negotiation = true;
- break;
-
- case IXGBE_AUTOC_LMS_SGMII_1G_100M:
- *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
- *negotiation = false;
- break;
-
- default:
- status = IXGBE_ERR_LINK_SETUP;
- goto out;
- break;
- }
-
- if (hw->phy.multispeed_fiber) {
- *speed |= IXGBE_LINK_SPEED_10GB_FULL |
- IXGBE_LINK_SPEED_1GB_FULL;
- *negotiation = true;
- }
-
-out:
- return status;
-}
-
-/**
- * ixgbe_get_media_type_82599 - Get media type
- * @hw: pointer to hardware structure
- *
- * Returns the media type (fiber, copper, backplane)
- **/
-enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
-{
- enum ixgbe_media_type media_type;
-
- /* Detect if there is a copper PHY attached. */
- switch (hw->phy.type) {
- case ixgbe_phy_cu_unknown:
- case ixgbe_phy_tn:
- media_type = ixgbe_media_type_copper;
- goto out;
- default:
- break;
- }
-
- switch (hw->device_id) {
- case IXGBE_DEV_ID_82599_KX4:
- case IXGBE_DEV_ID_82599_KX4_MEZZ:
- case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
- case IXGBE_DEV_ID_82599_KR:
- case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
- case IXGBE_DEV_ID_82599_XAUI_LOM:
- /* Default device ID is mezzanine card KX/KX4 */
- media_type = ixgbe_media_type_backplane;
- break;
- case IXGBE_DEV_ID_82599_SFP:
- case IXGBE_DEV_ID_82599_SFP_FCOE:
- case IXGBE_DEV_ID_82599_SFP_EM:
- case IXGBE_DEV_ID_82599_SFP_SF2:
- case IXGBE_DEV_ID_82599EN_SFP:
- media_type = ixgbe_media_type_fiber;
- break;
- case IXGBE_DEV_ID_82599_CX4:
- media_type = ixgbe_media_type_cx4;
- break;
- case IXGBE_DEV_ID_82599_T3_LOM:
- media_type = ixgbe_media_type_copper;
- break;
- case IXGBE_DEV_ID_82599_LS:
- media_type = ixgbe_media_type_fiber_lco;
- break;
- case IXGBE_DEV_ID_82599_QSFP_SF_QP:
- media_type = ixgbe_media_type_fiber_qsfp;
- break;
- default:
- media_type = ixgbe_media_type_unknown;
- break;
- }
-out:
- return media_type;
-}
-
-/**
- * ixgbe_start_mac_link_82599 - Setup MAC link settings
- * @hw: pointer to hardware structure
- * @autoneg_wait_to_complete: true when waiting for completion is needed
- *
- * Configures link settings based on values in the ixgbe_hw struct.
- * Restarts the link. Performs autonegotiation if needed.
- **/
-s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
- bool autoneg_wait_to_complete)
-{
- u32 autoc_reg;
- u32 links_reg = 0;
- u32 i;
- s32 status = 0;
-
- /* Restart link */
- autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- autoc_reg |= IXGBE_AUTOC_AN_RESTART;
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
-
- /* Only poll for autoneg to complete if specified to do so */
- if (autoneg_wait_to_complete) {
- if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
- IXGBE_AUTOC_LMS_KX4_KX_KR ||
- (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
- IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
- (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
- IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
- for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
- links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
- if (links_reg & IXGBE_LINKS_KX_AN_COMP)
- break;
- msleep(100);
- }
- if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
- status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
- hw_dbg(hw, "Autoneg did not complete.\n");
- }
- }
- }
-
- /* Add delay to filter out noises during initial link setup */
- msleep(50);
-
- return status;
-}
-
-/**
- * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
- * @hw: pointer to hardware structure
- *
- * The base drivers may require better control over SFP+ module
- * PHY states. This includes selectively shutting down the Tx
- * laser on the PHY, effectively halting physical link.
- **/
-void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
-{
- u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
-
- /* Disable tx laser; allow 100us to go dark per spec */
- esdp_reg |= IXGBE_ESDP_SDP3;
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- IXGBE_WRITE_FLUSH(hw);
- udelay(100);
-}
-
-/**
- * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
- * @hw: pointer to hardware structure
- *
- * The base drivers may require better control over SFP+ module
- * PHY states. This includes selectively turning on the Tx
- * laser on the PHY, effectively starting physical link.
- **/
-void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
-{
- u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
-
- /* Enable tx laser; allow 100ms to light up */
- esdp_reg &= ~IXGBE_ESDP_SDP3;
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- IXGBE_WRITE_FLUSH(hw);
- msleep(100);
-}
-
-/**
- * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
- * @hw: pointer to hardware structure
- *
- * When the driver changes the link speeds that it can support,
- * it sets autotry_restart to true to indicate that we need to
- * initiate a new autotry session with the link partner. To do
- * so, we set the speed then disable and re-enable the tx laser, to
- * alert the link partner that it also needs to restart autotry on its
- * end. This is consistent with true clause 37 autoneg, which also
- * involves a loss of signal.
- **/
-void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
-{
- if (hw->mac.autotry_restart) {
- ixgbe_disable_tx_laser_multispeed_fiber(hw);
- ixgbe_enable_tx_laser_multispeed_fiber(hw);
- hw->mac.autotry_restart = false;
- }
-}
-
-/**
- * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
- * @hw: pointer to hardware structure
- * @speed: new link speed
- * @autoneg: true if autonegotiation enabled
- * @autoneg_wait_to_complete: true when waiting for completion is needed
- *
- * Set the link speed in the AUTOC register and restarts link.
- **/
-s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
- bool autoneg_wait_to_complete)
-{
- s32 status = 0;
- ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
- ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
- u32 speedcnt = 0;
- u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
- u32 i = 0;
- bool link_up = false;
- bool negotiation;
-
- /* Mask off requested but non-supported speeds */
- status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
- if (status != 0)
- return status;
-
- speed &= link_speed;
-
- /*
- * Try each speed one by one, highest priority first. We do this in
- * software because 10gb fiber doesn't support speed autonegotiation.
- */
- if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
- speedcnt++;
- highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
-
- /* If we already have link at this speed, just jump out */
- status = ixgbe_check_link(hw, &link_speed, &link_up, false);
- if (status != 0)
- return status;
-
- if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
- goto out;
-
- /* Set the module link speed */
- esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- IXGBE_WRITE_FLUSH(hw);
-
- /* Allow module to change analog characteristics (1G->10G) */
- msleep(40);
-
- status = ixgbe_setup_mac_link_82599(hw,
- IXGBE_LINK_SPEED_10GB_FULL,
- autoneg,
- autoneg_wait_to_complete);
- if (status != 0)
- return status;
-
- /* Flap the tx laser if it has not already been done */
- ixgbe_flap_tx_laser(hw);
-
- /*
- * Wait for the controller to acquire link. Per IEEE 802.3ap,
- * Section 73.10.2, we may have to wait up to 500ms if KR is
- * attempted. 82599 uses the same timing for 10g SFI.
- */
- for (i = 0; i < 5; i++) {
- /* Wait for the link partner to also set speed */
- msleep(100);
-
- /* If we have link, just jump out */
- status = ixgbe_check_link(hw, &link_speed,
- &link_up, false);
- if (status != 0)
- return status;
-
- if (link_up)
- goto out;
- }
- }
-
- if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
- speedcnt++;
- if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
- highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
-
- /* If we already have link at this speed, just jump out */
- status = ixgbe_check_link(hw, &link_speed, &link_up, false);
- if (status != 0)
- return status;
-
- if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
- goto out;
-
- /* Set the module link speed */
- esdp_reg &= ~IXGBE_ESDP_SDP5;
- esdp_reg |= IXGBE_ESDP_SDP5_DIR;
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- IXGBE_WRITE_FLUSH(hw);
-
- /* Allow module to change analog characteristics (10G->1G) */
- msleep(40);
-
- status = ixgbe_setup_mac_link_82599(hw,
- IXGBE_LINK_SPEED_1GB_FULL,
- autoneg,
- autoneg_wait_to_complete);
- if (status != 0)
- return status;
-
- /* Flap the tx laser if it has not already been done */
- ixgbe_flap_tx_laser(hw);
-
- /* Wait for the link partner to also set speed */
- msleep(100);
-
- /* If we have link, just jump out */
- status = ixgbe_check_link(hw, &link_speed, &link_up, false);
- if (status != 0)
- return status;
-
- if (link_up)
- goto out;
- }
-
- /*
- * We didn't get link. Configure back to the highest speed we tried,
- * (if there was more than one). We call ourselves back with just the
- * single highest speed that the user requested.
- */
- if (speedcnt > 1)
- status = ixgbe_setup_mac_link_multispeed_fiber(hw,
- highest_link_speed, autoneg, autoneg_wait_to_complete);
-
-out:
- /* Set autoneg_advertised value based on input link speed */
- hw->phy.autoneg_advertised = 0;
-
- if (speed & IXGBE_LINK_SPEED_10GB_FULL)
- hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
-
- if (speed & IXGBE_LINK_SPEED_1GB_FULL)
- hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
-
- return status;
-}
-
-/**
- * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
- * @hw: pointer to hardware structure
- * @speed: new link speed
- * @autoneg: true if autonegotiation enabled
- * @autoneg_wait_to_complete: true when waiting for completion is needed
- *
- * Implements the Intel SmartSpeed algorithm.
- **/
-s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
- bool autoneg_wait_to_complete)
-{
- s32 status = 0;
- ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
- s32 i, j;
- bool link_up = false;
- u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-
- /* Set autoneg_advertised value based on input link speed */
- hw->phy.autoneg_advertised = 0;
-
- if (speed & IXGBE_LINK_SPEED_10GB_FULL)
- hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
-
- if (speed & IXGBE_LINK_SPEED_1GB_FULL)
- hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
-
- if (speed & IXGBE_LINK_SPEED_100_FULL)
- hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
-
- /*
- * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the
- * autoneg advertisement if link is unable to be established at the
- * highest negotiated rate. This can sometimes happen due to integrity
- * issues with the physical media connection.
- */
-
- /* First, try to get link with full advertisement */
- hw->phy.smart_speed_active = false;
- for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
- status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
- autoneg_wait_to_complete);
- if (status != 0)
- goto out;
-
- /*
- * Wait for the controller to acquire link. Per IEEE 802.3ap,
- * Section 73.10.2, we may have to wait up to 500ms if KR is
- * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
- * Table 9 in the AN MAS.
- */
- for (i = 0; i < 5; i++) {
- msleep(100);
-
- /* If we have link, just jump out */
- status = ixgbe_check_link(hw, &link_speed, &link_up,
- false);
- if (status != 0)
- goto out;
-
- if (link_up)
- goto out;
- }
- }
-
- /*
- * We didn't get link. If we advertised KR plus one of KX4/KX
- * (or BX4/BX), then disable KR and try again.
- */
- if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
- ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
- goto out;
-
- /* Turn SmartSpeed on to disable KR support */
- hw->phy.smart_speed_active = true;
- status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
- autoneg_wait_to_complete);
- if (status != 0)
- goto out;
-
- /*
- * Wait for the controller to acquire link. 600ms will allow for
- * the AN link_fail_inhibit_timer as well for multiple cycles of
- * parallel detect, both 10g and 1g. This allows for the maximum
- * connect attempts as defined in the AN MAS table 73-7.
- */
- for (i = 0; i < 6; i++) {
- msleep(100);
-
- /* If we have link, just jump out */
- status = ixgbe_check_link(hw, &link_speed, &link_up, false);
- if (status != 0)
- goto out;
-
- if (link_up)
- goto out;
- }
-
- /* We didn't get link. Turn SmartSpeed back off. */
- hw->phy.smart_speed_active = false;
- status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
- autoneg_wait_to_complete);
-
-out:
- if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
- hw_dbg(hw, "Smartspeed has downgraded the link speed "
- "from the maximum advertised\n");
- return status;
-}
-
-/**
- * ixgbe_setup_mac_link_82599 - Set MAC link speed
- * @hw: pointer to hardware structure
- * @speed: new link speed
- * @autoneg: true if autonegotiation enabled
- * @autoneg_wait_to_complete: true when waiting for completion is needed
- *
- * Set the link speed in the AUTOC register and restarts link.
- **/
-s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
- bool autoneg_wait_to_complete)
-{
- s32 status = 0;
- u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
- u32 start_autoc = autoc;
- u32 orig_autoc = 0;
- u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
- u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
- u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
- u32 links_reg = 0;
- u32 i;
- ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
-
- /* Check to see if speed passed in is supported. */
- status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
- if (status != 0)
- goto out;
-
- speed &= link_capabilities;
-
- if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
- status = IXGBE_ERR_LINK_SETUP;
- goto out;
- }
-
- /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
- if (hw->mac.orig_link_settings_stored)
- orig_autoc = hw->mac.orig_autoc;
- else
- orig_autoc = autoc;
-
- if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
- link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
- link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
- /* Set KX4/KX/KR support according to speed requested */
- autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
- if (speed & IXGBE_LINK_SPEED_10GB_FULL)
- if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
- autoc |= IXGBE_AUTOC_KX4_SUPP;
- if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
- (hw->phy.smart_speed_active == false))
- autoc |= IXGBE_AUTOC_KR_SUPP;
- if (speed & IXGBE_LINK_SPEED_1GB_FULL)
- autoc |= IXGBE_AUTOC_KX_SUPP;
- } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
- (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
- link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
- /* Switch from 1G SFI to 10G SFI if requested */
- if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
- (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
- autoc &= ~IXGBE_AUTOC_LMS_MASK;
- autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
- }
- } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
- (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
- /* Switch from 10G SFI to 1G SFI if requested */
- if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
- (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
- autoc &= ~IXGBE_AUTOC_LMS_MASK;
- if (autoneg)
- autoc |= IXGBE_AUTOC_LMS_1G_AN;
- else
- autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
- }
- }
-
- if (autoc != start_autoc) {
- /* Restart link */
- autoc |= IXGBE_AUTOC_AN_RESTART;
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
-
- /* Only poll for autoneg to complete if specified to do so */
- if (autoneg_wait_to_complete) {
- if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
- link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
- link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
- for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
- links_reg =
- IXGBE_READ_REG(hw, IXGBE_LINKS);
- if (links_reg & IXGBE_LINKS_KX_AN_COMP)
- break;
- msleep(100);
- }
- if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
- status =
- IXGBE_ERR_AUTONEG_NOT_COMPLETE;
- hw_dbg(hw, "Autoneg did not complete.\n");
- }
- }
- }
-
- /* Add delay to filter out noises during initial link setup */
- msleep(50);
- }
-
-out:
- return status;
-}
-
-/**
- * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
- * @hw: pointer to hardware structure
- * @speed: new link speed
- * @autoneg: true if autonegotiation enabled
- * @autoneg_wait_to_complete: true if waiting is needed to complete
- *
- * Restarts link on PHY and MAC based on settings passed in.
- **/
-static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg,
- bool autoneg_wait_to_complete)
-{
- s32 status;
-
- /* Setup the PHY according to input speed */
- status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
- autoneg_wait_to_complete);
- /* Set up MAC */
- ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
-
- return status;
-}
-
-/**
- * ixgbe_reset_hw_82599 - Perform hardware reset
- * @hw: pointer to hardware structure
- *
- * Resets the hardware by resetting the transmit and receive units, masks
- * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
- * reset.
- **/
-s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
-{
-// ixgbe_link_speed link_speed;
- s32 status = 0;
-// u32 ctrl, i, autoc, autoc2;
-// bool link_up = false;
-
-#if 0
- /* Call adapter stop to disable tx/rx and clear interrupts */
- status = hw->mac.ops.stop_adapter(hw);
- if (status != 0)
- goto reset_hw_out;
-
- /* flush pending Tx transactions */
- ixgbe_clear_tx_pending(hw);
-
- /* PHY ops must be identified and initialized prior to reset */
-
- /* Identify PHY and related function pointers */
- status = hw->phy.ops.init(hw);
-
- if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
- goto reset_hw_out;
-
- /* Setup SFP module if there is one present. */
- if (hw->phy.sfp_setup_needed) {
- status = hw->mac.ops.setup_sfp(hw);
- hw->phy.sfp_setup_needed = false;
- }
-
- if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
- goto reset_hw_out;
-
- /* Reset PHY */
- if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
- hw->phy.ops.reset(hw);
-
-mac_reset_top:
- /*
- * Issue global reset to the MAC. Needs to be SW reset if link is up.
- * If link reset is used when link is up, it might reset the PHY when
- * mng is using it. If link is down or the flag to force full link
- * reset is set, then perform link reset.
- */
- ctrl = IXGBE_CTRL_LNK_RST;
- if (!hw->force_full_reset) {
- hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
- if (link_up)
- ctrl = IXGBE_CTRL_RST;
- }
-
- ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
- IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
- IXGBE_WRITE_FLUSH(hw);
-
- /* Poll for reset bit to self-clear indicating reset is complete */
- for (i = 0; i < 10; i++) {
- udelay(1);
- ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
- if (!(ctrl & IXGBE_CTRL_RST_MASK))
- break;
- }
-
- if (ctrl & IXGBE_CTRL_RST_MASK) {
- status = IXGBE_ERR_RESET_FAILED;
- hw_dbg(hw, "Reset polling failed to complete.\n");
- }
-
- msleep(50);
-
- /*
- * Double resets are required for recovery from certain error
- * conditions. Between resets, it is necessary to stall to allow time
- * for any pending HW events to complete.
- */
- if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
- hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
- goto mac_reset_top;
- }
-
- /*
- * Store the original AUTOC/AUTOC2 values if they have not been
- * stored off yet. Otherwise restore the stored original
- * values since the reset operation sets back to defaults.
- */
- autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
- if (hw->mac.orig_link_settings_stored == false) {
- hw->mac.orig_autoc = autoc;
- hw->mac.orig_autoc2 = autoc2;
- hw->mac.orig_link_settings_stored = true;
- } else {
- if (autoc != hw->mac.orig_autoc)
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
- IXGBE_AUTOC_AN_RESTART));
-
- if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
- (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
- autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
- autoc2 |= (hw->mac.orig_autoc2 &
- IXGBE_AUTOC2_UPPER_MASK);
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
- }
- }
-#endif
-
- /* Store the permanent mac address */
- hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
-
- /*
- * Store MAC address from RAR0, clear receive address registers, and
- * clear the multicast table. Also reset num_rar_entries to 128,
- * since we modify this value when programming the SAN MAC address.
- */
- hw->mac.num_rar_entries = 128;
- hw->mac.ops.init_rx_addrs(hw);
-
- /* Store the permanent SAN mac address */
- hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
-
- /* Add the SAN MAC address to the RAR only if it's a valid address */
- if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
- hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
- hw->mac.san_addr, 0, IXGBE_RAH_AV);
-
- /* Save the SAN MAC RAR index */
- hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
-
- /* Reserve the last RAR for the SAN MAC address */
- hw->mac.num_rar_entries--;
- }
-
- /* Store the alternative WWNN/WWPN prefix */
- hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
- &hw->mac.wwpn_prefix);
-
-//reset_hw_out:
- return status;
-}
-
-/**
- * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
- * @hw: pointer to hardware structure
- **/
-s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
-{
- int i;
- u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
- fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
-
- /*
- * Before starting reinitialization process,
- * FDIRCMD.CMD must be zero.
- */
- for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
- if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
- IXGBE_FDIRCMD_CMD_MASK))
- break;
- udelay(10);
- }
- if (i >= IXGBE_FDIRCMD_CMD_POLL) {
- hw_dbg(hw, "Flow Director previous command isn't complete, "
- "aborting table re-initialization.\n");
- return IXGBE_ERR_FDIR_REINIT_FAILED;
- }
-
- IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
- IXGBE_WRITE_FLUSH(hw);
- /*
- * 82599 adapters flow director init flow cannot be restarted,
- * Workaround 82599 silicon errata by performing the following steps
- * before re-writing the FDIRCTRL control register with the same value.
- * - write 1 to bit 8 of FDIRCMD register &
- * - write 0 to bit 8 of FDIRCMD register
- */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
- (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
- IXGBE_FDIRCMD_CLEARHT));
- IXGBE_WRITE_FLUSH(hw);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
- (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
- ~IXGBE_FDIRCMD_CLEARHT));
- IXGBE_WRITE_FLUSH(hw);
- /*
- * Clear FDIR Hash register to clear any leftover hashes
- * waiting to be programmed.
- */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
- IXGBE_WRITE_FLUSH(hw);
-
- IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
- IXGBE_WRITE_FLUSH(hw);
-
- /* Poll init-done after we write FDIRCTRL register */
- for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
- if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
- IXGBE_FDIRCTRL_INIT_DONE)
- break;
- udelay(10);
- }
- if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
- hw_dbg(hw, "Flow Director Signature poll time exceeded!\n");
- return IXGBE_ERR_FDIR_REINIT_FAILED;
- }
-
- /* Clear FDIR statistics registers (read to clear) */
- IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
- IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
- IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
- IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
- IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
-
- return 0;
-}
-
-/**
- * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
- * @hw: pointer to hardware structure
- * @fdirctrl: value to write to flow director control register
- **/
-static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
-{
- int i;
-
- /* Prime the keys for hashing */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
-
- /*
- * Poll init-done after we write the register. Estimated times:
- * 10G: PBALLOC = 11b, timing is 60us
- * 1G: PBALLOC = 11b, timing is 600us
- * 100M: PBALLOC = 11b, timing is 6ms
- *
- * Multiple these timings by 4 if under full Rx load
- *
- * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
- * 1 msec per poll time. If we're at line rate and drop to 100M, then
- * this might not finish in our poll time, but we can live with that
- * for now.
- */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
- IXGBE_WRITE_FLUSH(hw);
- for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
- if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
- IXGBE_FDIRCTRL_INIT_DONE)
- break;
- msleep(1);
- }
-
- if (i >= IXGBE_FDIR_INIT_DONE_POLL)
- hw_dbg(hw, "Flow Director poll time exceeded!\n");
-}
-
-/**
- * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
- * @hw: pointer to hardware structure
- * @fdirctrl: value to write to flow director control register, initially
- * contains just the value of the Rx packet buffer allocation
- **/
-s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
-{
- /*
- * Continue setup of fdirctrl register bits:
- * Move the flexible bytes to use the ethertype - shift 6 words
- * Set the maximum length per hash bucket to 0xA filters
- * Send interrupt when 64 filters are left
- */
- fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
- (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
- (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
-
- /* write hashes and fdirctrl register, poll for completion */
- ixgbe_fdir_enable_82599(hw, fdirctrl);
-
- return 0;
-}
-
-/**
- * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
- * @hw: pointer to hardware structure
- * @fdirctrl: value to write to flow director control register, initially
- * contains just the value of the Rx packet buffer allocation
- **/
-s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
-{
- /*
- * Continue setup of fdirctrl register bits:
- * Turn perfect match filtering on
- * Report hash in RSS field of Rx wb descriptor
- * Initialize the drop queue
- * Move the flexible bytes to use the ethertype - shift 6 words
- * Set the maximum length per hash bucket to 0xA filters
- * Send interrupt when 64 (0x4 * 16) filters are left
- */
- fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
- IXGBE_FDIRCTRL_REPORT_STATUS |
- (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
- (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
- (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
- (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
-
- /* write hashes and fdirctrl register, poll for completion */
- ixgbe_fdir_enable_82599(hw, fdirctrl);
-
- return 0;
-}
-
-/*
- * These defines allow us to quickly generate all of the necessary instructions
- * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
- * for values 0 through 15
- */
-#define IXGBE_ATR_COMMON_HASH_KEY \
- (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
-#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
-do { \
- u32 n = (_n); \
- if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
- common_hash ^= lo_hash_dword >> n; \
- else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
- bucket_hash ^= lo_hash_dword >> n; \
- else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
- sig_hash ^= lo_hash_dword << (16 - n); \
- if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
- common_hash ^= hi_hash_dword >> n; \
- else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
- bucket_hash ^= hi_hash_dword >> n; \
- else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
- sig_hash ^= hi_hash_dword << (16 - n); \
-} while (0);
-
-/**
- * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
- * @stream: input bitstream to compute the hash on
- *
- * This function is almost identical to the function above but contains
- * several optomizations such as unwinding all of the loops, letting the
- * compiler work out all of the conditional ifs since the keys are static
- * defines, and computing two keys at once since the hashed dword stream
- * will be the same for both keys.
- **/
-u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
- union ixgbe_atr_hash_dword common)
-{
- u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
- u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
-
- /* record the flow_vm_vlan bits as they are a key part to the hash */
- flow_vm_vlan = IXGBE_NTOHL(input.dword);
-
- /* generate common hash dword */
- hi_hash_dword = IXGBE_NTOHL(common.dword);
-
- /* low dword is word swapped version of common */
- lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
-
- /* apply flow ID/VM pool/VLAN ID bits to hash words */
- hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
-
- /* Process bits 0 and 16 */
- IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
-
- /*
- * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
- * delay this because bit 0 of the stream should not be processed
- * so we do not add the vlan until after bit 0 was processed
- */
- lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
-
- /* Process remaining 30 bit of the key */
- IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
- IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
- IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
- IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
- IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
- IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
- IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
- IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
- IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
- IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
- IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
- IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
- IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
- IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
- IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
-
- /* combine common_hash result with signature and bucket hashes */
- bucket_hash ^= common_hash;
- bucket_hash &= IXGBE_ATR_HASH_MASK;
-
- sig_hash ^= common_hash << 16;
- sig_hash &= IXGBE_ATR_HASH_MASK << 16;
-
- /* return completed signature hash */
- return sig_hash ^ bucket_hash;
-}
-
-/**
- * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
- * @hw: pointer to hardware structure
- * @input: unique input dword
- * @common: compressed common input dword
- * @queue: queue index to direct traffic to
- **/
-s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_hash_dword input,
- union ixgbe_atr_hash_dword common,
- u8 queue)
-{
- u64 fdirhashcmd;
- u32 fdircmd;
-
- /*
- * Get the flow_type in order to program FDIRCMD properly
- * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
- */
- switch (input.formatted.flow_type) {
- case IXGBE_ATR_FLOW_TYPE_TCPV4:
- case IXGBE_ATR_FLOW_TYPE_UDPV4:
- case IXGBE_ATR_FLOW_TYPE_SCTPV4:
- case IXGBE_ATR_FLOW_TYPE_TCPV6:
- case IXGBE_ATR_FLOW_TYPE_UDPV6:
- case IXGBE_ATR_FLOW_TYPE_SCTPV6:
- break;
- default:
- hw_dbg(hw, " Error on flow type input\n");
- return IXGBE_ERR_CONFIG;
- }
-
- /* configure FDIRCMD register */
- fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
- IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
- fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
- fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
-
- /*
- * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
- * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
- */
- fdirhashcmd = (u64)fdircmd << 32;
- fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
- IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
-
- hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
-
- return 0;
-}
-
-#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
-do { \
- u32 n = (_n); \
- if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
- bucket_hash ^= lo_hash_dword >> n; \
- if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
- bucket_hash ^= hi_hash_dword >> n; \
-} while (0);
-
-/**
- * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
- * @atr_input: input bitstream to compute the hash on
- * @input_mask: mask for the input bitstream
- *
- * This function serves two main purposes. First it applys the input_mask
- * to the atr_input resulting in a cleaned up atr_input data stream.
- * Secondly it computes the hash and stores it in the bkt_hash field at
- * the end of the input byte stream. This way it will be available for
- * future use without needing to recompute the hash.
- **/
-void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
- union ixgbe_atr_input *input_mask)
-{
-
- u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
- u32 bucket_hash = 0;
-
- /* Apply masks to input data */
- input->dword_stream[0] &= input_mask->dword_stream[0];
- input->dword_stream[1] &= input_mask->dword_stream[1];
- input->dword_stream[2] &= input_mask->dword_stream[2];
- input->dword_stream[3] &= input_mask->dword_stream[3];
- input->dword_stream[4] &= input_mask->dword_stream[4];
- input->dword_stream[5] &= input_mask->dword_stream[5];
- input->dword_stream[6] &= input_mask->dword_stream[6];
- input->dword_stream[7] &= input_mask->dword_stream[7];
- input->dword_stream[8] &= input_mask->dword_stream[8];
- input->dword_stream[9] &= input_mask->dword_stream[9];
- input->dword_stream[10] &= input_mask->dword_stream[10];
-
- /* record the flow_vm_vlan bits as they are a key part to the hash */
- flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
-
- /* generate common hash dword */
- hi_hash_dword = IXGBE_NTOHL(input->dword_stream[1] ^
- input->dword_stream[2] ^
- input->dword_stream[3] ^
- input->dword_stream[4] ^
- input->dword_stream[5] ^
- input->dword_stream[6] ^
- input->dword_stream[7] ^
- input->dword_stream[8] ^
- input->dword_stream[9] ^
- input->dword_stream[10]);
-
- /* low dword is word swapped version of common */
- lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
-
- /* apply flow ID/VM pool/VLAN ID bits to hash words */
- hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
-
- /* Process bits 0 and 16 */
- IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
-
- /*
- * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
- * delay this because bit 0 of the stream should not be processed
- * so we do not add the vlan until after bit 0 was processed
- */
- lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
-
- /* Process remaining 30 bit of the key */
- IXGBE_COMPUTE_BKT_HASH_ITERATION(1);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(2);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
-
- /*
- * Limit hash to 13 bits since max bucket count is 8K.
- * Store result at the end of the input stream.
- */
- input->formatted.bkt_hash = bucket_hash & 0x1FFF;
-}
-
-/**
- * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
- * @input_mask: mask to be bit swapped
- *
- * The source and destination port masks for flow director are bit swapped
- * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to
- * generate a correctly swapped value we need to bit swap the mask and that
- * is what is accomplished by this function.
- **/
-static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
-{
- u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
- mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
- mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
- mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
- mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
- mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
- return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
-}
-
-/*
- * These two macros are meant to address the fact that we have registers
- * that are either all or in part big-endian. As a result on big-endian
- * systems we will end up byte swapping the value to little-endian before
- * it is byte swapped again and written to the hardware in the original
- * big-endian format.
- */
-#define IXGBE_STORE_AS_BE32(_value) \
- (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
- (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
-
-#define IXGBE_WRITE_REG_BE32(a, reg, value) \
- IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
-
-#define IXGBE_STORE_AS_BE16(_value) \
- IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
-
-s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_input *input_mask)
-{
- /* mask IPv6 since it is currently not supported */
- u32 fdirm = IXGBE_FDIRM_DIPv6;
- u32 fdirtcpm;
-
- /*
- * Program the relevant mask registers. If src/dst_port or src/dst_addr
- * are zero, then assume a full mask for that field. Also assume that
- * a VLAN of 0 is unspecified, so mask that out as well. L4type
- * cannot be masked out in this implementation.
- *
- * This also assumes IPv4 only. IPv6 masking isn't supported at this
- * point in time.
- */
-
- /* verify bucket hash is cleared on hash generation */
- if (input_mask->formatted.bkt_hash)
- hw_dbg(hw, " bucket hash should always be 0 in mask\n");
-
- /* Program FDIRM and verify partial masks */
- switch (input_mask->formatted.vm_pool & 0x7F) {
- case 0x0:
- fdirm |= IXGBE_FDIRM_POOL;
- case 0x7F:
- break;
- default:
- hw_dbg(hw, " Error on vm pool mask\n");
- return IXGBE_ERR_CONFIG;
- }
-
- switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
- case 0x0:
- fdirm |= IXGBE_FDIRM_L4P;
- if (input_mask->formatted.dst_port ||
- input_mask->formatted.src_port) {
- hw_dbg(hw, " Error on src/dst port mask\n");
- return IXGBE_ERR_CONFIG;
- }
- case IXGBE_ATR_L4TYPE_MASK:
- break;
- default:
- hw_dbg(hw, " Error on flow type mask\n");
- return IXGBE_ERR_CONFIG;
- }
-
- switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
- case 0x0000:
- /* mask VLAN ID, fall through to mask VLAN priority */
- fdirm |= IXGBE_FDIRM_VLANID;
- case 0x0FFF:
- /* mask VLAN priority */
- fdirm |= IXGBE_FDIRM_VLANP;
- break;
- case 0xE000:
- /* mask VLAN ID only, fall through */
- fdirm |= IXGBE_FDIRM_VLANID;
- case 0xEFFF:
- /* no VLAN fields masked */
- break;
- default:
- hw_dbg(hw, " Error on VLAN mask\n");
- return IXGBE_ERR_CONFIG;
- }
-
- switch (input_mask->formatted.flex_bytes & 0xFFFF) {
- case 0x0000:
- /* Mask Flex Bytes, fall through */
- fdirm |= IXGBE_FDIRM_FLEX;
- case 0xFFFF:
- break;
- default:
- hw_dbg(hw, " Error on flexible byte mask\n");
- return IXGBE_ERR_CONFIG;
- }
-
- /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
-
- /* store the TCP/UDP port masks, bit reversed from port layout */
- fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
-
- /* write both the same so that UDP and TCP use the same mask */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
-
- /* store source and destination IP masks (big-enian) */
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
- ~input_mask->formatted.src_ip[0]);
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
- ~input_mask->formatted.dst_ip[0]);
-
- return 0;
-}
-
-s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_input *input,
- u16 soft_id, u8 queue)
-{
- u32 fdirport, fdirvlan, fdirhash, fdircmd;
-
- /* currently IPv6 is not supported, must be programmed with 0 */
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
- input->formatted.src_ip[0]);
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
- input->formatted.src_ip[1]);
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
- input->formatted.src_ip[2]);
-
- /* record the source address (big-endian) */
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
-
- /* record the first 32 bits of the destination address (big-endian) */
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
-
- /* record source and destination port (little-endian)*/
- fdirport = IXGBE_NTOHS(input->formatted.dst_port);
- fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
- fdirport |= IXGBE_NTOHS(input->formatted.src_port);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
-
- /* record vlan (little-endian) and flex_bytes(big-endian) */
- fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
- fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
- fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
-
- /* configure FDIRHASH register */
- fdirhash = input->formatted.bkt_hash;
- fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
- IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
-
- /*
- * flush all previous writes to make certain registers are
- * programmed prior to issuing the command
- */
- IXGBE_WRITE_FLUSH(hw);
-
- /* configure FDIRCMD register */
- fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
- IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
- if (queue == IXGBE_FDIR_DROP_QUEUE)
- fdircmd |= IXGBE_FDIRCMD_DROP;
- fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
- fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
- fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
-
- IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
-
- return 0;
-}
-
-s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_input *input,
- u16 soft_id)
-{
- u32 fdirhash;
- u32 fdircmd = 0;
- u32 retry_count;
- s32 err = 0;
-
- /* configure FDIRHASH register */
- fdirhash = input->formatted.bkt_hash;
- fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
- IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
-
- /* flush hash to HW */
- IXGBE_WRITE_FLUSH(hw);
-
- /* Query if filter is present */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
-
- for (retry_count = 10; retry_count; retry_count--) {
- /* allow 10us for query to process */
- udelay(10);
- /* verify query completed successfully */
- fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
- if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
- break;
- }
-
- if (!retry_count)
- err = IXGBE_ERR_FDIR_REINIT_FAILED;
-
- /* if filter exists in hardware then remove it */
- if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
- IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
- IXGBE_WRITE_FLUSH(hw);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
- IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
- }
-
- return err;
-}
-
-/**
- * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
- * @hw: pointer to hardware structure
- * @input: input bitstream
- * @input_mask: mask for the input bitstream
- * @soft_id: software index for the filters
- * @queue: queue index to direct traffic to
- *
- * Note that the caller to this function must lock before calling, since the
- * hardware writes must be protected from one another.
- **/
-s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_input *input,
- union ixgbe_atr_input *input_mask,
- u16 soft_id, u8 queue)
-{
- s32 err = IXGBE_ERR_CONFIG;
-
- /*
- * Check flow_type formatting, and bail out before we touch the hardware
- * if there's a configuration issue
- */
- switch (input->formatted.flow_type) {
- case IXGBE_ATR_FLOW_TYPE_IPV4:
- input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
- if (input->formatted.dst_port || input->formatted.src_port) {
- hw_dbg(hw, " Error on src/dst port\n");
- return IXGBE_ERR_CONFIG;
- }
- break;
- case IXGBE_ATR_FLOW_TYPE_SCTPV4:
- if (input->formatted.dst_port || input->formatted.src_port) {
- hw_dbg(hw, " Error on src/dst port\n");
- return IXGBE_ERR_CONFIG;
- }
- case IXGBE_ATR_FLOW_TYPE_TCPV4:
- case IXGBE_ATR_FLOW_TYPE_UDPV4:
- input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
- IXGBE_ATR_L4TYPE_MASK;
- break;
- default:
- hw_dbg(hw, " Error on flow type input\n");
- return err;
- }
-
- /* program input mask into the HW */
- err = ixgbe_fdir_set_input_mask_82599(hw, input_mask);
- if (err)
- return err;
-
- /* apply mask and compute/store hash */
- ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
-
- /* program filters to filter memory */
- return ixgbe_fdir_write_perfect_filter_82599(hw, input,
- soft_id, queue);
-}
-
-/**
- * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
- * @hw: pointer to hardware structure
- * @reg: analog register to read
- * @val: read value
- *
- * Performs read operation to Omer analog register specified.
- **/
-s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
-{
- u32 core_ctl;
-
- IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
- (reg << 8));
- IXGBE_WRITE_FLUSH(hw);
- udelay(10);
- core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
- *val = (u8)core_ctl;
-
- return 0;
-}
-
-/**
- * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
- * @hw: pointer to hardware structure
- * @reg: atlas register to write
- * @val: value to write
- *
- * Performs write operation to Omer analog register specified.
- **/
-s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
-{
- u32 core_ctl;
-
- core_ctl = (reg << 8) | val;
- IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
- IXGBE_WRITE_FLUSH(hw);
- udelay(10);
-
- return 0;
-}
-
-/**
- * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
- * @hw: pointer to hardware structure
- *
- * Starts the hardware using the generic start_hw function
- * and the generation start_hw function.
- * Then performs revision-specific operations, if any.
- **/
-s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
-{
- s32 ret_val = 0;
-
- ret_val = ixgbe_start_hw_generic(hw);
- if (ret_val != 0)
- goto out;
-
- ret_val = ixgbe_start_hw_gen2(hw);
- if (ret_val != 0)
- goto out;
-
- /* We need to run link autotry after the driver loads */
- hw->mac.autotry_restart = true;
-
- if (ret_val == 0)
- ret_val = ixgbe_verify_fw_version_82599(hw);
-out:
- return ret_val;
-}
-
-/**
- * ixgbe_identify_phy_82599 - Get physical layer module
- * @hw: pointer to hardware structure
- *
- * Determines the physical layer module found on the current adapter.
- * If PHY already detected, maintains current PHY type in hw struct,
- * otherwise executes the PHY detection routine.
- **/
-s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
-{
- s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
-
- /* Detect PHY if not unknown - returns success if already detected. */
- status = ixgbe_identify_phy_generic(hw);
- if (status != 0) {
- /* 82599 10GBASE-T requires an external PHY */
- if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
- goto out;
- else
- status = ixgbe_identify_module_generic(hw);
- }
-
- /* Set PHY type none if no PHY detected */
- if (hw->phy.type == ixgbe_phy_unknown) {
- hw->phy.type = ixgbe_phy_none;
- status = 0;
- }
-
- /* Return error if SFP module has been detected but is not supported */
- if (hw->phy.type == ixgbe_phy_sfp_unsupported)
- status = IXGBE_ERR_SFP_NOT_SUPPORTED;
-
-out:
- return status;
-}
-
-/**
- * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
- * @hw: pointer to hardware structure
- *
- * Determines physical layer capabilities of the current configuration.
- **/
-u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
-{
- u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
- u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
- u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
- u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
- u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
- u16 ext_ability = 0;
- u8 comp_codes_10g = 0;
- u8 comp_codes_1g = 0;
-
- hw->phy.ops.identify(hw);
-
- switch (hw->phy.type) {
- case ixgbe_phy_tn:
- case ixgbe_phy_cu_unknown:
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
- if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
- physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
- if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
- physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
- if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
- physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
- goto out;
- default:
- break;
- }
-
- switch (autoc & IXGBE_AUTOC_LMS_MASK) {
- case IXGBE_AUTOC_LMS_1G_AN:
- case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
- if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
- physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
- IXGBE_PHYSICAL_LAYER_1000BASE_BX;
- goto out;
- } else
- /* SFI mode so read SFP module */
- goto sfp_check;
- break;
- case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
- if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
- physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
- else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
- physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
- else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
- physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
- goto out;
- break;
- case IXGBE_AUTOC_LMS_10G_SERIAL:
- if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
- physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
- goto out;
- } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
- goto sfp_check;
- break;
- case IXGBE_AUTOC_LMS_KX4_KX_KR:
- case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
- if (autoc & IXGBE_AUTOC_KX_SUPP)
- physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
- if (autoc & IXGBE_AUTOC_KX4_SUPP)
- physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
- if (autoc & IXGBE_AUTOC_KR_SUPP)
- physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
- goto out;
- break;
- default:
- goto out;
- break;
- }
-
-sfp_check:
- /* SFP check must be done last since DA modules are sometimes used to
- * test KR mode - we need to id KR mode correctly before SFP module.
- * Call identify_sfp because the pluggable module may have changed */
- hw->phy.ops.identify_sfp(hw);
- if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
- goto out;
-
- switch (hw->phy.type) {
- case ixgbe_phy_sfp_passive_tyco:
- case ixgbe_phy_sfp_passive_unknown:
- physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
- break;
- case ixgbe_phy_sfp_ftl_active:
- case ixgbe_phy_sfp_active_unknown:
- physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
- break;
- case ixgbe_phy_sfp_avago:
- case ixgbe_phy_sfp_ftl:
- case ixgbe_phy_sfp_intel:
- case ixgbe_phy_sfp_unknown:
- hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
- hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
- if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
- physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
- else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
- physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
- else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
- physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
- else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE)
- physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX;
- break;
- default:
- break;
- }
-
-out:
- return physical_layer;
-}
-
-/**
- * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
- * @hw: pointer to hardware structure
- * @regval: register value to write to RXCTRL
- *
- * Enables the Rx DMA unit for 82599
- **/
-s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
-{
-
- /*
- * Workaround for 82599 silicon errata when enabling the Rx datapath.
- * If traffic is incoming before we enable the Rx unit, it could hang
- * the Rx DMA unit. Therefore, make sure the security engine is
- * completely disabled prior to enabling the Rx unit.
- */
-
- hw->mac.ops.disable_sec_rx_path(hw);
-
- IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
-
- hw->mac.ops.enable_sec_rx_path(hw);
-
- return 0;
-}
-
-/**
- * ixgbe_verify_fw_version_82599 - verify fw version for 82599
- * @hw: pointer to hardware structure
- *
- * Verifies that installed the firmware version is 0.6 or higher
- * for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
- *
- * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
- * if the FW version is not supported.
- **/
-static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
-{
- s32 status = IXGBE_ERR_EEPROM_VERSION;
- u16 fw_offset, fw_ptp_cfg_offset;
- u16 fw_version = 0;
-
- /* firmware check is only necessary for SFI devices */
- if (hw->phy.media_type != ixgbe_media_type_fiber) {
- status = 0;
- goto fw_version_out;
- }
-
- /* get the offset to the Firmware Module block */
- hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
-
- if ((fw_offset == 0) || (fw_offset == 0xFFFF))
- goto fw_version_out;
-
- /* get the offset to the Pass Through Patch Configuration block */
- hw->eeprom.ops.read(hw, (fw_offset +
- IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
- &fw_ptp_cfg_offset);
-
- if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
- goto fw_version_out;
-
- /* get the firmware version */
- hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
- IXGBE_FW_PATCH_VERSION_4), &fw_version);
-
- if (fw_version > 0x5)
- status = 0;
-
-fw_version_out:
- return status;
-}
-
-/**
- * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
- * @hw: pointer to hardware structure
- *
- * Returns true if the LESM FW module is present and enabled. Otherwise
- * returns false. Smart Speed must be disabled if LESM FW module is enabled.
- **/
-bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
-{
- bool lesm_enabled = false;
- u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
- s32 status;
-
- /* get the offset to the Firmware Module block */
- status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
-
- if ((status != 0) ||
- (fw_offset == 0) || (fw_offset == 0xFFFF))
- goto out;
-
- /* get the offset to the LESM Parameters block */
- status = hw->eeprom.ops.read(hw, (fw_offset +
- IXGBE_FW_LESM_PARAMETERS_PTR),
- &fw_lesm_param_offset);
-
- if ((status != 0) ||
- (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
- goto out;
-
- /* get the lesm state word */
- status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
- IXGBE_FW_LESM_STATE_1),
- &fw_lesm_state);
-
- if ((status == 0) &&
- (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
- lesm_enabled = true;
-
-out:
- return lesm_enabled;
-}
-
-/**
- * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
- * fastest available method
- *
- * @hw: pointer to hardware structure
- * @offset: offset of word in EEPROM to read
- * @words: number of words
- * @data: word(s) read from the EEPROM
- *
- * Retrieves 16 bit word(s) read from EEPROM
- **/
-static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
- u16 words, u16 *data)
-{
- struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
- s32 ret_val = IXGBE_ERR_CONFIG;
-
- /*
- * If EEPROM is detected and can be addressed using 14 bits,
- * use EERD otherwise use bit bang
- */
- if ((eeprom->type == ixgbe_eeprom_spi) &&
- (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
- ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
- data);
- else
- ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
- words,
- data);
-
- return ret_val;
-}
-
-/**
- * ixgbe_read_eeprom_82599 - Read EEPROM word using
- * fastest available method
- *
- * @hw: pointer to hardware structure
- * @offset: offset of word in the EEPROM to read
- * @data: word read from the EEPROM
- *
- * Reads a 16 bit word from the EEPROM
- **/
-static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
- u16 offset, u16 *data)
-{
- struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
- s32 ret_val = IXGBE_ERR_CONFIG;
-
- /*
- * If EEPROM is detected and can be addressed using 14 bits,
- * use EERD otherwise use bit bang
- */
- if ((eeprom->type == ixgbe_eeprom_spi) &&
- (offset <= IXGBE_EERD_MAX_ADDR))
- ret_val = ixgbe_read_eerd_generic(hw, offset, data);
- else
- ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
-
- return ret_val;
-}
-
-/**
- * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
- * @hw: pointer to hardware structure
- * @byte_offset: byte offset to read
- * @data: value read
- *
- * Performs byte read operation to SFP module's EEPROM over I2C interface at
- * a specified device address.
- **/
-static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 *data)
-{
- u32 esdp;
- s32 status;
- s32 timeout = 200;
-
- if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
- /* Acquire I2C bus ownership. */
- esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
- esdp |= IXGBE_ESDP_SDP0;
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
- IXGBE_WRITE_FLUSH(hw);
-
- while (timeout) {
- esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
- if (esdp & IXGBE_ESDP_SDP1)
- break;
-
- msleep(5);
- timeout--;
- }
-
- if (!timeout) {
- hw_dbg(hw, "Driver can't access resource,"
- " acquiring I2C bus timeout.\n");
- status = IXGBE_ERR_I2C;
- goto release_i2c_access;
- }
- }
-
- status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
-
-release_i2c_access:
-
- if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
- /* Release I2C bus ownership. */
- esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
- esdp &= ~IXGBE_ESDP_SDP0;
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
- IXGBE_WRITE_FLUSH(hw);
- }
-
- return status;
-}
-
-/**
- * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
- * @hw: pointer to hardware structure
- * @byte_offset: byte offset to write
- * @data: value to write
- *
- * Performs byte write operation to SFP module's EEPROM over I2C interface at
- * a specified device address.
- **/
-static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 data)
-{
- u32 esdp;
- s32 status;
- s32 timeout = 200;
-
- if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
- /* Acquire I2C bus ownership. */
- esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
- esdp |= IXGBE_ESDP_SDP0;
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
- IXGBE_WRITE_FLUSH(hw);
-
- while (timeout) {
- esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
- if (esdp & IXGBE_ESDP_SDP1)
- break;
-
- msleep(5);
- timeout--;
- }
-
- if (!timeout) {
- hw_dbg(hw, "Driver can't access resource,"
- " acquiring I2C bus timeout.\n");
- status = IXGBE_ERR_I2C;
- goto release_i2c_access;
- }
- }
-
- status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
-
-release_i2c_access:
-
- if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
- /* Release I2C bus ownership. */
- esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
- esdp &= ~IXGBE_ESDP_SDP0;
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
- IXGBE_WRITE_FLUSH(hw);
- }
-
- return status;
-}
-
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.h
deleted file mode 100755
index 02be92ab..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#ifndef _IXGBE_82599_H_
-#define _IXGBE_82599_H_
-
-s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed, bool *autoneg);
-enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
-void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
-void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
-void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
-s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
- bool autoneg_wait_to_complete);
-s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
- bool autoneg_wait_to_complete);
-s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
- bool autoneg_wait_to_complete);
-s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed,
- bool autoneg, bool autoneg_wait_to_complete);
-s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw);
-void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw);
-s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
-s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
-s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
-s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw);
-s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
-s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
-u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
-s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
-bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
-#endif /* _IXGBE_82599_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_api.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_api.c
deleted file mode 100755
index 9a0a43e6..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_api.c
+++ /dev/null
@@ -1,1158 +0,0 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#include "ixgbe_api.h"
-#include "ixgbe_common.h"
-
-/**
- * ixgbe_init_shared_code - Initialize the shared code
- * @hw: pointer to hardware structure
- *
- * This will assign function pointers and assign the MAC type and PHY code.
- * Does not touch the hardware. This function must be called prior to any
- * other function in the shared code. The ixgbe_hw structure should be
- * memset to 0 prior to calling this function. The following fields in
- * hw structure should be filled in prior to calling this function:
- * hw_addr, back, device_id, vendor_id, subsystem_device_id,
- * subsystem_vendor_id, and revision_id
- **/
-s32 ixgbe_init_shared_code(struct ixgbe_hw *hw)
-{
- s32 status;
-
- /*
- * Set the mac type
- */
- ixgbe_set_mac_type(hw);
-
- switch (hw->mac.type) {
- case ixgbe_mac_82598EB:
- status = ixgbe_init_ops_82598(hw);
- break;
- case ixgbe_mac_82599EB:
- status = ixgbe_init_ops_82599(hw);
- break;
- case ixgbe_mac_X540:
- status = ixgbe_init_ops_X540(hw);
- break;
- default:
- status = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
- break;
- }
-
- return status;
-}
-
-/**
- * ixgbe_set_mac_type - Sets MAC type
- * @hw: pointer to the HW structure
- *
- * This function sets the mac type of the adapter based on the
- * vendor ID and device ID stored in the hw structure.
- **/
-s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
-{
- s32 ret_val = 0;
-
- if (hw->vendor_id == IXGBE_INTEL_VENDOR_ID) {
- switch (hw->device_id) {
- case IXGBE_DEV_ID_82598:
- case IXGBE_DEV_ID_82598_BX:
- case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
- case IXGBE_DEV_ID_82598AF_DUAL_PORT:
- case IXGBE_DEV_ID_82598AT:
- case IXGBE_DEV_ID_82598AT2:
- case IXGBE_DEV_ID_82598EB_CX4:
- case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
- case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
- case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
- case IXGBE_DEV_ID_82598EB_XF_LR:
- case IXGBE_DEV_ID_82598EB_SFP_LOM:
- hw->mac.type = ixgbe_mac_82598EB;
- break;
- case IXGBE_DEV_ID_82599_KX4:
- case IXGBE_DEV_ID_82599_KX4_MEZZ:
- case IXGBE_DEV_ID_82599_XAUI_LOM:
- case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
- case IXGBE_DEV_ID_82599_KR:
- case IXGBE_DEV_ID_82599_SFP:
- case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
- case IXGBE_DEV_ID_82599_SFP_FCOE:
- case IXGBE_DEV_ID_82599_SFP_EM:
- case IXGBE_DEV_ID_82599_SFP_SF2:
- case IXGBE_DEV_ID_82599_QSFP_SF_QP:
- case IXGBE_DEV_ID_82599EN_SFP:
- case IXGBE_DEV_ID_82599_CX4:
- case IXGBE_DEV_ID_82599_LS:
- case IXGBE_DEV_ID_82599_T3_LOM:
- hw->mac.type = ixgbe_mac_82599EB;
- break;
- case IXGBE_DEV_ID_X540T:
- hw->mac.type = ixgbe_mac_X540;
- break;
- default:
- ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
- break;
- }
- } else {
- ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
- }
-
- hw_dbg(hw, "ixgbe_set_mac_type found mac: %d, returns: %d\n",
- hw->mac.type, ret_val);
- return ret_val;
-}
-
-/**
- * ixgbe_init_hw - Initialize the hardware
- * @hw: pointer to hardware structure
- *
- * Initialize the hardware by resetting and then starting the hardware
- **/
-s32 ixgbe_init_hw(struct ixgbe_hw *hw)
-{
- return ixgbe_call_func(hw, hw->mac.ops.init_hw, (hw),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_reset_hw - Performs a hardware reset
- * @hw: pointer to hardware structure
- *
- * Resets the hardware by resetting the transmit and receive units, masks and
- * clears all interrupts, performs a PHY reset, and performs a MAC reset
- **/
-s32 ixgbe_reset_hw(struct ixgbe_hw *hw)
-{
- return ixgbe_call_func(hw, hw->mac.ops.reset_hw, (hw),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_start_hw - Prepares hardware for Rx/Tx
- * @hw: pointer to hardware structure
- *
- * Starts the hardware by filling the bus info structure and media type,
- * clears all on chip counters, initializes receive address registers,
- * multicast table, VLAN filter table, calls routine to setup link and
- * flow control settings, and leaves transmit and receive units disabled
- * and uninitialized.
- **/
-s32 ixgbe_start_hw(struct ixgbe_hw *hw)
-{
- return ixgbe_call_func(hw, hw->mac.ops.start_hw, (hw),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_clear_hw_cntrs - Clear hardware counters
- * @hw: pointer to hardware structure
- *
- * Clears all hardware statistics counters by reading them from the hardware
- * Statistics counters are clear on read.
- **/
-s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw)
-{
- return ixgbe_call_func(hw, hw->mac.ops.clear_hw_cntrs, (hw),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_get_media_type - Get media type
- * @hw: pointer to hardware structure
- *
- * Returns the media type (fiber, copper, backplane)
- **/
-enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw)
-{
- return ixgbe_call_func(hw, hw->mac.ops.get_media_type, (hw),
- ixgbe_media_type_unknown);
-}
-
-/**
- * ixgbe_get_mac_addr - Get MAC address
- * @hw: pointer to hardware structure
- * @mac_addr: Adapter MAC address
- *
- * Reads the adapter's MAC address from the first Receive Address Register
- * (RAR0) A reset of the adapter must have been performed prior to calling
- * this function in order for the MAC address to have been loaded from the
- * EEPROM into RAR0
- **/
-s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr)
-{
- return ixgbe_call_func(hw, hw->mac.ops.get_mac_addr,
- (hw, mac_addr), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_get_san_mac_addr - Get SAN MAC address
- * @hw: pointer to hardware structure
- * @san_mac_addr: SAN MAC address
- *
- * Reads the SAN MAC address from the EEPROM, if it's available. This is
- * per-port, so set_lan_id() must be called before reading the addresses.
- **/
-s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr)
-{
- return ixgbe_call_func(hw, hw->mac.ops.get_san_mac_addr,
- (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_set_san_mac_addr - Write a SAN MAC address
- * @hw: pointer to hardware structure
- * @san_mac_addr: SAN MAC address
- *
- * Writes A SAN MAC address to the EEPROM.
- **/
-s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr)
-{
- return ixgbe_call_func(hw, hw->mac.ops.set_san_mac_addr,
- (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_get_device_caps - Get additional device capabilities
- * @hw: pointer to hardware structure
- * @device_caps: the EEPROM word for device capabilities
- *
- * Reads the extra device capabilities from the EEPROM
- **/
-s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps)
-{
- return ixgbe_call_func(hw, hw->mac.ops.get_device_caps,
- (hw, device_caps), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_get_wwn_prefix - Get alternative WWNN/WWPN prefix from the EEPROM
- * @hw: pointer to hardware structure
- * @wwnn_prefix: the alternative WWNN prefix
- * @wwpn_prefix: the alternative WWPN prefix
- *
- * This function will read the EEPROM from the alternative SAN MAC address
- * block to check the support for the alternative WWNN/WWPN prefix support.
- **/
-s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix,
- u16 *wwpn_prefix)
-{
- return ixgbe_call_func(hw, hw->mac.ops.get_wwn_prefix,
- (hw, wwnn_prefix, wwpn_prefix),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_get_fcoe_boot_status - Get FCOE boot status from EEPROM
- * @hw: pointer to hardware structure
- * @bs: the fcoe boot status
- *
- * This function will read the FCOE boot status from the iSCSI FCOE block
- **/
-s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs)
-{
- return ixgbe_call_func(hw, hw->mac.ops.get_fcoe_boot_status,
- (hw, bs),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_get_bus_info - Set PCI bus info
- * @hw: pointer to hardware structure
- *
- * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
- **/
-s32 ixgbe_get_bus_info(struct ixgbe_hw *hw)
-{
- return ixgbe_call_func(hw, hw->mac.ops.get_bus_info, (hw),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_get_num_of_tx_queues - Get Tx queues
- * @hw: pointer to hardware structure
- *
- * Returns the number of transmit queues for the given adapter.
- **/
-u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw)
-{
- return hw->mac.max_tx_queues;
-}
-
-/**
- * ixgbe_get_num_of_rx_queues - Get Rx queues
- * @hw: pointer to hardware structure
- *
- * Returns the number of receive queues for the given adapter.
- **/
-u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw)
-{
- return hw->mac.max_rx_queues;
-}
-
-/**
- * ixgbe_stop_adapter - Disable Rx/Tx units
- * @hw: pointer to hardware structure
- *
- * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
- * disables transmit and receive units. The adapter_stopped flag is used by
- * the shared code and drivers to determine if the adapter is in a stopped
- * state and should not touch the hardware.
- **/
-s32 ixgbe_stop_adapter(struct ixgbe_hw *hw)
-{
- return ixgbe_call_func(hw, hw->mac.ops.stop_adapter, (hw),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_read_pba_string - Reads part number string from EEPROM
- * @hw: pointer to hardware structure
- * @pba_num: stores the part number string from the EEPROM
- * @pba_num_size: part number string buffer length
- *
- * Reads the part number string from the EEPROM.
- **/
-s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size)
-{
- return ixgbe_read_pba_string_generic(hw, pba_num, pba_num_size);
-}
-
-/**
- * ixgbe_identify_phy - Get PHY type
- * @hw: pointer to hardware structure
- *
- * Determines the physical layer module found on the current adapter.
- **/
-s32 ixgbe_identify_phy(struct ixgbe_hw *hw)
-{
- s32 status = 0;
-
- if (hw->phy.type == ixgbe_phy_unknown) {
- status = ixgbe_call_func(hw, hw->phy.ops.identify, (hw),
- IXGBE_NOT_IMPLEMENTED);
- }
-
- return status;
-}
-
-/**
- * ixgbe_reset_phy - Perform a PHY reset
- * @hw: pointer to hardware structure
- **/
-s32 ixgbe_reset_phy(struct ixgbe_hw *hw)
-{
- s32 status = 0;
-
- if (hw->phy.type == ixgbe_phy_unknown) {
- if (ixgbe_identify_phy(hw) != 0)
- status = IXGBE_ERR_PHY;
- }
-
- if (status == 0) {
- status = ixgbe_call_func(hw, hw->phy.ops.reset, (hw),
- IXGBE_NOT_IMPLEMENTED);
- }
- return status;
-}
-
-/**
- * ixgbe_get_phy_firmware_version -
- * @hw: pointer to hardware structure
- * @firmware_version: pointer to firmware version
- **/
-s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, u16 *firmware_version)
-{
- s32 status = 0;
-
- status = ixgbe_call_func(hw, hw->phy.ops.get_firmware_version,
- (hw, firmware_version),
- IXGBE_NOT_IMPLEMENTED);
- return status;
-}
-
-/**
- * ixgbe_read_phy_reg - Read PHY register
- * @hw: pointer to hardware structure
- * @reg_addr: 32 bit address of PHY register to read
- * @phy_data: Pointer to read data from PHY register
- *
- * Reads a value from a specified PHY register
- **/
-s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
- u16 *phy_data)
-{
- if (hw->phy.id == 0)
- ixgbe_identify_phy(hw);
-
- return ixgbe_call_func(hw, hw->phy.ops.read_reg, (hw, reg_addr,
- device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_write_phy_reg - Write PHY register
- * @hw: pointer to hardware structure
- * @reg_addr: 32 bit PHY register to write
- * @phy_data: Data to write to the PHY register
- *
- * Writes a value to specified PHY register
- **/
-s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
- u16 phy_data)
-{
- if (hw->phy.id == 0)
- ixgbe_identify_phy(hw);
-
- return ixgbe_call_func(hw, hw->phy.ops.write_reg, (hw, reg_addr,
- device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_setup_phy_link - Restart PHY autoneg
- * @hw: pointer to hardware structure
- *
- * Restart autonegotiation and PHY and waits for completion.
- **/
-s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw)
-{
- return ixgbe_call_func(hw, hw->phy.ops.setup_link, (hw),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_check_phy_link - Determine link and speed status
- * @hw: pointer to hardware structure
- *
- * Reads a PHY register to determine if link is up and the current speed for
- * the PHY.
- **/
-s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
- bool *link_up)
-{
- return ixgbe_call_func(hw, hw->phy.ops.check_link, (hw, speed,
- link_up), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_setup_phy_link_speed - Set auto advertise
- * @hw: pointer to hardware structure
- * @speed: new link speed
- * @autoneg: true if autonegotiation enabled
- *
- * Sets the auto advertised capabilities
- **/
-s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed,
- bool autoneg,
- bool autoneg_wait_to_complete)
-{
- return ixgbe_call_func(hw, hw->phy.ops.setup_link_speed, (hw, speed,
- autoneg, autoneg_wait_to_complete),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_check_link - Get link and speed status
- * @hw: pointer to hardware structure
- *
- * Reads the links register to determine if link is up and the current speed
- **/
-s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
- bool *link_up, bool link_up_wait_to_complete)
-{
- return ixgbe_call_func(hw, hw->mac.ops.check_link, (hw, speed,
- link_up, link_up_wait_to_complete),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_disable_tx_laser - Disable Tx laser
- * @hw: pointer to hardware structure
- *
- * If the driver needs to disable the laser on SFI optics.
- **/
-void ixgbe_disable_tx_laser(struct ixgbe_hw *hw)
-{
- if (hw->mac.ops.disable_tx_laser)
- hw->mac.ops.disable_tx_laser(hw);
-}
-
-/**
- * ixgbe_enable_tx_laser - Enable Tx laser
- * @hw: pointer to hardware structure
- *
- * If the driver needs to enable the laser on SFI optics.
- **/
-void ixgbe_enable_tx_laser(struct ixgbe_hw *hw)
-{
- if (hw->mac.ops.enable_tx_laser)
- hw->mac.ops.enable_tx_laser(hw);
-}
-
-/**
- * ixgbe_flap_tx_laser - flap Tx laser to start autotry process
- * @hw: pointer to hardware structure
- *
- * When the driver changes the link speeds that it can support then
- * flap the tx laser to alert the link partner to start autotry
- * process on its end.
- **/
-void ixgbe_flap_tx_laser(struct ixgbe_hw *hw)
-{
- if (hw->mac.ops.flap_tx_laser)
- hw->mac.ops.flap_tx_laser(hw);
-}
-
-/**
- * ixgbe_setup_link - Set link speed
- * @hw: pointer to hardware structure
- * @speed: new link speed
- * @autoneg: true if autonegotiation enabled
- *
- * Configures link settings. Restarts the link.
- * Performs autonegotiation if needed.
- **/
-s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
- bool autoneg,
- bool autoneg_wait_to_complete)
-{
- return ixgbe_call_func(hw, hw->mac.ops.setup_link, (hw, speed,
- autoneg, autoneg_wait_to_complete),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_get_link_capabilities - Returns link capabilities
- * @hw: pointer to hardware structure
- *
- * Determines the link capabilities of the current configuration.
- **/
-s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
- bool *autoneg)
-{
- return ixgbe_call_func(hw, hw->mac.ops.get_link_capabilities, (hw,
- speed, autoneg), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_led_on - Turn on LEDs
- * @hw: pointer to hardware structure
- * @index: led number to turn on
- *
- * Turns on the software controllable LEDs.
- **/
-s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index)
-{
- return ixgbe_call_func(hw, hw->mac.ops.led_on, (hw, index),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_led_off - Turn off LEDs
- * @hw: pointer to hardware structure
- * @index: led number to turn off
- *
- * Turns off the software controllable LEDs.
- **/
-s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index)
-{
- return ixgbe_call_func(hw, hw->mac.ops.led_off, (hw, index),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_blink_led_start - Blink LEDs
- * @hw: pointer to hardware structure
- * @index: led number to blink
- *
- * Blink LED based on index.
- **/
-s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index)
-{
- return ixgbe_call_func(hw, hw->mac.ops.blink_led_start, (hw, index),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_blink_led_stop - Stop blinking LEDs
- * @hw: pointer to hardware structure
- *
- * Stop blinking LED based on index.
- **/
-s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index)
-{
- return ixgbe_call_func(hw, hw->mac.ops.blink_led_stop, (hw, index),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_init_eeprom_params - Initialize EEPROM parameters
- * @hw: pointer to hardware structure
- *
- * Initializes the EEPROM parameters ixgbe_eeprom_info within the
- * ixgbe_hw struct in order to set up EEPROM access.
- **/
-s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw)
-{
- return ixgbe_call_func(hw, hw->eeprom.ops.init_params, (hw),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-
-/**
- * ixgbe_write_eeprom - Write word to EEPROM
- * @hw: pointer to hardware structure
- * @offset: offset within the EEPROM to be written to
- * @data: 16 bit word to be written to the EEPROM
- *
- * Writes 16 bit value to EEPROM. If ixgbe_eeprom_update_checksum is not
- * called after this function, the EEPROM will most likely contain an
- * invalid checksum.
- **/
-s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data)
-{
- return ixgbe_call_func(hw, hw->eeprom.ops.write, (hw, offset, data),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_write_eeprom_buffer - Write word(s) to EEPROM
- * @hw: pointer to hardware structure
- * @offset: offset within the EEPROM to be written to
- * @data: 16 bit word(s) to be written to the EEPROM
- * @words: number of words
- *
- * Writes 16 bit word(s) to EEPROM. If ixgbe_eeprom_update_checksum is not
- * called after this function, the EEPROM will most likely contain an
- * invalid checksum.
- **/
-s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, u16 words,
- u16 *data)
-{
- return ixgbe_call_func(hw, hw->eeprom.ops.write_buffer,
- (hw, offset, words, data),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_read_eeprom - Read word from EEPROM
- * @hw: pointer to hardware structure
- * @offset: offset within the EEPROM to be read
- * @data: read 16 bit value from EEPROM
- *
- * Reads 16 bit value from EEPROM
- **/
-s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data)
-{
- return ixgbe_call_func(hw, hw->eeprom.ops.read, (hw, offset, data),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_read_eeprom_buffer - Read word(s) from EEPROM
- * @hw: pointer to hardware structure
- * @offset: offset within the EEPROM to be read
- * @data: read 16 bit word(s) from EEPROM
- * @words: number of words
- *
- * Reads 16 bit word(s) from EEPROM
- **/
-s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset,
- u16 words, u16 *data)
-{
- return ixgbe_call_func(hw, hw->eeprom.ops.read_buffer,
- (hw, offset, words, data),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_validate_eeprom_checksum - Validate EEPROM checksum
- * @hw: pointer to hardware structure
- * @checksum_val: calculated checksum
- *
- * Performs checksum calculation and validates the EEPROM checksum
- **/
-s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val)
-{
- return ixgbe_call_func(hw, hw->eeprom.ops.validate_checksum,
- (hw, checksum_val), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_eeprom_update_checksum - Updates the EEPROM checksum
- * @hw: pointer to hardware structure
- **/
-s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw)
-{
- return ixgbe_call_func(hw, hw->eeprom.ops.update_checksum, (hw),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_insert_mac_addr - Find a RAR for this mac address
- * @hw: pointer to hardware structure
- * @addr: Address to put into receive address register
- * @vmdq: VMDq pool to assign
- *
- * Puts an ethernet address into a receive address register, or
- * finds the rar that it is aleady in; adds to the pool list
- **/
-s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
-{
- return ixgbe_call_func(hw, hw->mac.ops.insert_mac_addr,
- (hw, addr, vmdq),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_set_rar - Set Rx address register
- * @hw: pointer to hardware structure
- * @index: Receive address register to write
- * @addr: Address to put into receive address register
- * @vmdq: VMDq "set"
- * @enable_addr: set flag that address is active
- *
- * Puts an ethernet address into a receive address register.
- **/
-s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
- u32 enable_addr)
-{
- return ixgbe_call_func(hw, hw->mac.ops.set_rar, (hw, index, addr, vmdq,
- enable_addr), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_clear_rar - Clear Rx address register
- * @hw: pointer to hardware structure
- * @index: Receive address register to write
- *
- * Puts an ethernet address into a receive address register.
- **/
-s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index)
-{
- return ixgbe_call_func(hw, hw->mac.ops.clear_rar, (hw, index),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_set_vmdq - Associate a VMDq index with a receive address
- * @hw: pointer to hardware structure
- * @rar: receive address register index to associate with VMDq index
- * @vmdq: VMDq set or pool index
- **/
-s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
-{
- return ixgbe_call_func(hw, hw->mac.ops.set_vmdq, (hw, rar, vmdq),
- IXGBE_NOT_IMPLEMENTED);
-
-}
-
-/**
- * ixgbe_set_vmdq_san_mac - Associate VMDq index 127 with a receive address
- * @hw: pointer to hardware structure
- * @vmdq: VMDq default pool index
- **/
-s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq)
-{
- return ixgbe_call_func(hw, hw->mac.ops.set_vmdq_san_mac,
- (hw, vmdq), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_clear_vmdq - Disassociate a VMDq index from a receive address
- * @hw: pointer to hardware structure
- * @rar: receive address register index to disassociate with VMDq index
- * @vmdq: VMDq set or pool index
- **/
-s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
-{
- return ixgbe_call_func(hw, hw->mac.ops.clear_vmdq, (hw, rar, vmdq),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_init_rx_addrs - Initializes receive address filters.
- * @hw: pointer to hardware structure
- *
- * Places the MAC address in receive address register 0 and clears the rest
- * of the receive address registers. Clears the multicast table. Assumes
- * the receiver is in reset when the routine is called.
- **/
-s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw)
-{
- return ixgbe_call_func(hw, hw->mac.ops.init_rx_addrs, (hw),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_get_num_rx_addrs - Returns the number of RAR entries.
- * @hw: pointer to hardware structure
- **/
-u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw)
-{
- return hw->mac.num_rar_entries;
-}
-
-/**
- * ixgbe_update_uc_addr_list - Updates the MAC's list of secondary addresses
- * @hw: pointer to hardware structure
- * @addr_list: the list of new multicast addresses
- * @addr_count: number of addresses
- * @func: iterator function to walk the multicast address list
- *
- * The given list replaces any existing list. Clears the secondary addrs from
- * receive address registers. Uses unused receive address registers for the
- * first secondary addresses, and falls back to promiscuous mode as needed.
- **/
-s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
- u32 addr_count, ixgbe_mc_addr_itr func)
-{
- return ixgbe_call_func(hw, hw->mac.ops.update_uc_addr_list, (hw,
- addr_list, addr_count, func),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_update_mc_addr_list - Updates the MAC's list of multicast addresses
- * @hw: pointer to hardware structure
- * @mc_addr_list: the list of new multicast addresses
- * @mc_addr_count: number of addresses
- * @func: iterator function to walk the multicast address list
- *
- * The given list replaces any existing list. Clears the MC addrs from receive
- * address registers and the multicast table. Uses unused receive address
- * registers for the first multicast addresses, and hashes the rest into the
- * multicast table.
- **/
-s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count, ixgbe_mc_addr_itr func,
- bool clear)
-{
- return ixgbe_call_func(hw, hw->mac.ops.update_mc_addr_list, (hw,
- mc_addr_list, mc_addr_count, func, clear),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_enable_mc - Enable multicast address in RAR
- * @hw: pointer to hardware structure
- *
- * Enables multicast address in RAR and the use of the multicast hash table.
- **/
-s32 ixgbe_enable_mc(struct ixgbe_hw *hw)
-{
- return ixgbe_call_func(hw, hw->mac.ops.enable_mc, (hw),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_disable_mc - Disable multicast address in RAR
- * @hw: pointer to hardware structure
- *
- * Disables multicast address in RAR and the use of the multicast hash table.
- **/
-s32 ixgbe_disable_mc(struct ixgbe_hw *hw)
-{
- return ixgbe_call_func(hw, hw->mac.ops.disable_mc, (hw),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_clear_vfta - Clear VLAN filter table
- * @hw: pointer to hardware structure
- *
- * Clears the VLAN filer table, and the VMDq index associated with the filter
- **/
-s32 ixgbe_clear_vfta(struct ixgbe_hw *hw)
-{
- return ixgbe_call_func(hw, hw->mac.ops.clear_vfta, (hw),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_set_vfta - Set VLAN filter table
- * @hw: pointer to hardware structure
- * @vlan: VLAN id to write to VLAN filter
- * @vind: VMDq output index that maps queue to VLAN id in VFTA
- * @vlan_on: boolean flag to turn on/off VLAN in VFTA
- *
- * Turn on/off specified VLAN in the VLAN filter table.
- **/
-s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
-{
- return ixgbe_call_func(hw, hw->mac.ops.set_vfta, (hw, vlan, vind,
- vlan_on), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_set_vlvf - Set VLAN Pool Filter
- * @hw: pointer to hardware structure
- * @vlan: VLAN id to write to VLAN filter
- * @vind: VMDq output index that maps queue to VLAN id in VFVFB
- * @vlan_on: boolean flag to turn on/off VLAN in VFVF
- * @vfta_changed: pointer to boolean flag which indicates whether VFTA
- * should be changed
- *
- * Turn on/off specified bit in VLVF table.
- **/
-s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on,
- bool *vfta_changed)
-{
- return ixgbe_call_func(hw, hw->mac.ops.set_vlvf, (hw, vlan, vind,
- vlan_on, vfta_changed), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_fc_enable - Enable flow control
- * @hw: pointer to hardware structure
- *
- * Configures the flow control settings based on SW configuration.
- **/
-s32 ixgbe_fc_enable(struct ixgbe_hw *hw)
-{
- return ixgbe_call_func(hw, hw->mac.ops.fc_enable, (hw),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_set_fw_drv_ver - Try to send the driver version number FW
- * @hw: pointer to hardware structure
- * @maj: driver major number to be sent to firmware
- * @min: driver minor number to be sent to firmware
- * @build: driver build number to be sent to firmware
- * @ver: driver version number to be sent to firmware
- **/
-s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
- u8 ver)
-{
- return ixgbe_call_func(hw, hw->mac.ops.set_fw_drv_ver, (hw, maj, min,
- build, ver), IXGBE_NOT_IMPLEMENTED);
-}
-
-
-/**
- * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
- * @hw: pointer to hardware structure
- *
- * Updates the temperatures in mac.thermal_sensor_data
- **/
-s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw)
-{
- return ixgbe_call_func(hw, hw->mac.ops.get_thermal_sensor_data, (hw),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds
- * @hw: pointer to hardware structure
- *
- * Inits the thermal sensor thresholds according to the NVM map
- **/
-s32 ixgbe_init_thermal_sensor_thresh(struct ixgbe_hw *hw)
-{
- return ixgbe_call_func(hw, hw->mac.ops.init_thermal_sensor_thresh, (hw),
- IXGBE_NOT_IMPLEMENTED);
-}
-/**
- * ixgbe_read_analog_reg8 - Reads 8 bit analog register
- * @hw: pointer to hardware structure
- * @reg: analog register to read
- * @val: read value
- *
- * Performs write operation to analog register specified.
- **/
-s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val)
-{
- return ixgbe_call_func(hw, hw->mac.ops.read_analog_reg8, (hw, reg,
- val), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_write_analog_reg8 - Writes 8 bit analog register
- * @hw: pointer to hardware structure
- * @reg: analog register to write
- * @val: value to write
- *
- * Performs write operation to Atlas analog register specified.
- **/
-s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val)
-{
- return ixgbe_call_func(hw, hw->mac.ops.write_analog_reg8, (hw, reg,
- val), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_init_uta_tables - Initializes Unicast Table Arrays.
- * @hw: pointer to hardware structure
- *
- * Initializes the Unicast Table Arrays to zero on device load. This
- * is part of the Rx init addr execution path.
- **/
-s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw)
-{
- return ixgbe_call_func(hw, hw->mac.ops.init_uta_tables, (hw),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_read_i2c_byte - Reads 8 bit word over I2C at specified device address
- * @hw: pointer to hardware structure
- * @byte_offset: byte offset to read
- * @data: value read
- *
- * Performs byte read operation to SFP module's EEPROM over I2C interface.
- **/
-s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
- u8 *data)
-{
- return ixgbe_call_func(hw, hw->phy.ops.read_i2c_byte, (hw, byte_offset,
- dev_addr, data), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_write_i2c_byte - Writes 8 bit word over I2C
- * @hw: pointer to hardware structure
- * @byte_offset: byte offset to write
- * @data: value to write
- *
- * Performs byte write operation to SFP module's EEPROM over I2C interface
- * at a specified device address.
- **/
-s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
- u8 data)
-{
- return ixgbe_call_func(hw, hw->phy.ops.write_i2c_byte, (hw, byte_offset,
- dev_addr, data), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_write_i2c_eeprom - Writes 8 bit EEPROM word over I2C interface
- * @hw: pointer to hardware structure
- * @byte_offset: EEPROM byte offset to write
- * @eeprom_data: value to write
- *
- * Performs byte write operation to SFP module's EEPROM over I2C interface.
- **/
-s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw,
- u8 byte_offset, u8 eeprom_data)
-{
- return ixgbe_call_func(hw, hw->phy.ops.write_i2c_eeprom,
- (hw, byte_offset, eeprom_data),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_read_i2c_eeprom - Reads 8 bit EEPROM word over I2C interface
- * @hw: pointer to hardware structure
- * @byte_offset: EEPROM byte offset to read
- * @eeprom_data: value read
- *
- * Performs byte read operation to SFP module's EEPROM over I2C interface.
- **/
-s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data)
-{
- return ixgbe_call_func(hw, hw->phy.ops.read_i2c_eeprom,
- (hw, byte_offset, eeprom_data),
- IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_get_supported_physical_layer - Returns physical layer type
- * @hw: pointer to hardware structure
- *
- * Determines physical layer capabilities of the current configuration.
- **/
-u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw)
-{
- return ixgbe_call_func(hw, hw->mac.ops.get_supported_physical_layer,
- (hw), IXGBE_PHYSICAL_LAYER_UNKNOWN);
-}
-
-/**
- * ixgbe_enable_rx_dma - Enables Rx DMA unit, dependent on device specifics
- * @hw: pointer to hardware structure
- * @regval: bitfield to write to the Rx DMA register
- *
- * Enables the Rx DMA unit of the device.
- **/
-s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval)
-{
- return ixgbe_call_func(hw, hw->mac.ops.enable_rx_dma,
- (hw, regval), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_disable_sec_rx_path - Stops the receive data path
- * @hw: pointer to hardware structure
- *
- * Stops the receive data path.
- **/
-s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw)
-{
- return ixgbe_call_func(hw, hw->mac.ops.disable_sec_rx_path,
- (hw), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_enable_sec_rx_path - Enables the receive data path
- * @hw: pointer to hardware structure
- *
- * Enables the receive data path.
- **/
-s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw)
-{
- return ixgbe_call_func(hw, hw->mac.ops.enable_sec_rx_path,
- (hw), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_acquire_swfw_semaphore - Acquire SWFW semaphore
- * @hw: pointer to hardware structure
- * @mask: Mask to specify which semaphore to acquire
- *
- * Acquires the SWFW semaphore through SW_FW_SYNC register for the specified
- * function (CSR, PHY0, PHY1, EEPROM, Flash)
- **/
-s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask)
-{
- return ixgbe_call_func(hw, hw->mac.ops.acquire_swfw_sync,
- (hw, mask), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_release_swfw_semaphore - Release SWFW semaphore
- * @hw: pointer to hardware structure
- * @mask: Mask to specify which semaphore to release
- *
- * Releases the SWFW semaphore through SW_FW_SYNC register for the specified
- * function (CSR, PHY0, PHY1, EEPROM, Flash)
- **/
-void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u16 mask)
-{
- if (hw->mac.ops.release_swfw_sync)
- hw->mac.ops.release_swfw_sync(hw, mask);
-}
-
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_api.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_api.h
deleted file mode 100755
index a6ab30d2..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_api.h
+++ /dev/null
@@ -1,168 +0,0 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#ifndef _IXGBE_API_H_
-#define _IXGBE_API_H_
-
-#include "ixgbe_type.h"
-
-s32 ixgbe_init_shared_code(struct ixgbe_hw *hw);
-
-extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
-extern s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
-extern s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw);
-
-s32 ixgbe_set_mac_type(struct ixgbe_hw *hw);
-s32 ixgbe_init_hw(struct ixgbe_hw *hw);
-s32 ixgbe_reset_hw(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw(struct ixgbe_hw *hw);
-s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw);
-enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw);
-s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr);
-s32 ixgbe_get_bus_info(struct ixgbe_hw *hw);
-u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw);
-u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw);
-s32 ixgbe_stop_adapter(struct ixgbe_hw *hw);
-s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size);
-
-s32 ixgbe_identify_phy(struct ixgbe_hw *hw);
-s32 ixgbe_reset_phy(struct ixgbe_hw *hw);
-s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
- u16 *phy_data);
-s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
- u16 phy_data);
-
-s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw);
-s32 ixgbe_check_phy_link(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *link_up);
-s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg,
- bool autoneg_wait_to_complete);
-void ixgbe_disable_tx_laser(struct ixgbe_hw *hw);
-void ixgbe_enable_tx_laser(struct ixgbe_hw *hw);
-void ixgbe_flap_tx_laser(struct ixgbe_hw *hw);
-s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
- bool autoneg, bool autoneg_wait_to_complete);
-s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
- bool *link_up, bool link_up_wait_to_complete);
-s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
- bool *autoneg);
-s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index);
-
-s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw);
-s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data);
-s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset,
- u16 words, u16 *data);
-s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data);
-s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset,
- u16 words, u16 *data);
-
-s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val);
-s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw);
-
-s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
-s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
- u32 enable_addr);
-s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq);
-s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw);
-u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw);
-s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
- u32 addr_count, ixgbe_mc_addr_itr func);
-s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count, ixgbe_mc_addr_itr func,
- bool clear);
-void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr_list, u32 vmdq);
-s32 ixgbe_enable_mc(struct ixgbe_hw *hw);
-s32 ixgbe_disable_mc(struct ixgbe_hw *hw);
-s32 ixgbe_clear_vfta(struct ixgbe_hw *hw);
-s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan,
- u32 vind, bool vlan_on);
-s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
- bool vlan_on, bool *vfta_changed);
-s32 ixgbe_fc_enable(struct ixgbe_hw *hw);
-s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
- u8 ver);
-s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw);
-s32 ixgbe_init_thermal_sensor_thresh(struct ixgbe_hw *hw);
-void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr);
-s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw,
- u16 *firmware_version);
-s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val);
-s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val);
-s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw);
-s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data);
-u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw);
-s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval);
-s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw);
-s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw);
-s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
-s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_hash_dword input,
- union ixgbe_atr_hash_dword common,
- u8 queue);
-s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_input *input_mask);
-s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_input *input,
- u16 soft_id, u8 queue);
-s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_input *input,
- u16 soft_id);
-s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_input *input,
- union ixgbe_atr_input *mask,
- u16 soft_id,
- u8 queue);
-void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
- union ixgbe_atr_input *mask);
-u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
- union ixgbe_atr_hash_dword common);
-s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
- u8 *data);
-s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
- u8 data);
-s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data);
-s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
-s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
-s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps);
-s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask);
-void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u16 mask);
-s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix,
- u16 *wwpn_prefix);
-s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs);
-
-#endif /* _IXGBE_API_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c
deleted file mode 100755
index a0a0046e..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c
+++ /dev/null
@@ -1,4083 +0,0 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#include "ixgbe_common.h"
-#include "ixgbe_phy.h"
-#include "ixgbe_api.h"
-
-static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
-static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
-static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
-static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
-static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
-static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
- u16 count);
-static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
-static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
-static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
-static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
-
-static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
-static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
- u16 *san_mac_offset);
-static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
- u16 words, u16 *data);
-static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
- u16 words, u16 *data);
-static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
- u16 offset);
-
-/**
- * ixgbe_init_ops_generic - Inits function ptrs
- * @hw: pointer to the hardware structure
- *
- * Initialize the function pointers.
- **/
-s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
-{
- struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
- struct ixgbe_mac_info *mac = &hw->mac;
- u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
-
- /* EEPROM */
- eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
- /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
- if (eec & IXGBE_EEC_PRES) {
- eeprom->ops.read = &ixgbe_read_eerd_generic;
- eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_generic;
- } else {
- eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic;
- eeprom->ops.read_buffer =
- &ixgbe_read_eeprom_buffer_bit_bang_generic;
- }
- eeprom->ops.write = &ixgbe_write_eeprom_generic;
- eeprom->ops.write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic;
- eeprom->ops.validate_checksum =
- &ixgbe_validate_eeprom_checksum_generic;
- eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
- eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic;
-
- /* MAC */
- mac->ops.init_hw = &ixgbe_init_hw_generic;
- mac->ops.reset_hw = NULL;
- mac->ops.start_hw = &ixgbe_start_hw_generic;
- mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic;
- mac->ops.get_media_type = NULL;
- mac->ops.get_supported_physical_layer = NULL;
- mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic;
- mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic;
- mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
- mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
- mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie;
- mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync;
- mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync;
-
- /* LEDs */
- mac->ops.led_on = &ixgbe_led_on_generic;
- mac->ops.led_off = &ixgbe_led_off_generic;
- mac->ops.blink_led_start = &ixgbe_blink_led_start_generic;
- mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic;
-
- /* RAR, Multicast, VLAN */
- mac->ops.set_rar = &ixgbe_set_rar_generic;
- mac->ops.clear_rar = &ixgbe_clear_rar_generic;
- mac->ops.insert_mac_addr = NULL;
- mac->ops.set_vmdq = NULL;
- mac->ops.clear_vmdq = NULL;
- mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic;
- mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic;
- mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic;
- mac->ops.enable_mc = &ixgbe_enable_mc_generic;
- mac->ops.disable_mc = &ixgbe_disable_mc_generic;
- mac->ops.clear_vfta = NULL;
- mac->ops.set_vfta = NULL;
- mac->ops.set_vlvf = NULL;
- mac->ops.init_uta_tables = NULL;
-
- /* Flow Control */
- mac->ops.fc_enable = &ixgbe_fc_enable_generic;
-
- /* Link */
- mac->ops.get_link_capabilities = NULL;
- mac->ops.setup_link = NULL;
- mac->ops.check_link = NULL;
-
- return 0;
-}
-
-/**
- * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
- * control
- * @hw: pointer to hardware structure
- *
- * There are several phys that do not support autoneg flow control. This
- * function check the device id to see if the associated phy supports
- * autoneg flow control.
- **/
-static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
-{
-
- switch (hw->device_id) {
- case IXGBE_DEV_ID_X540T:
- return 0;
- case IXGBE_DEV_ID_82599_T3_LOM:
- return 0;
- default:
- return IXGBE_ERR_FC_NOT_SUPPORTED;
- }
-}
-
-/**
- * ixgbe_setup_fc - Set up flow control
- * @hw: pointer to hardware structure
- *
- * Called at init time to set up flow control.
- **/
-static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
-{
- s32 ret_val = 0;
- u32 reg = 0, reg_bp = 0;
- u16 reg_cu = 0;
-
- /*
- * Validate the requested mode. Strict IEEE mode does not allow
- * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
- */
- if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
- hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
- ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
- goto out;
- }
-
- /*
- * 10gig parts do not have a word in the EEPROM to determine the
- * default flow control setting, so we explicitly set it to full.
- */
- if (hw->fc.requested_mode == ixgbe_fc_default)
- hw->fc.requested_mode = ixgbe_fc_full;
-
- /*
- * Set up the 1G and 10G flow control advertisement registers so the
- * HW will be able to do fc autoneg once the cable is plugged in. If
- * we link at 10G, the 1G advertisement is harmless and vice versa.
- */
- switch (hw->phy.media_type) {
- case ixgbe_media_type_fiber:
- case ixgbe_media_type_backplane:
- reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
- reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- break;
- case ixgbe_media_type_copper:
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg_cu);
- break;
- default:
- break;
- }
-
- /*
- * The possible values of fc.requested_mode are:
- * 0: Flow control is completely disabled
- * 1: Rx flow control is enabled (we can receive pause frames,
- * but not send pause frames).
- * 2: Tx flow control is enabled (we can send pause frames but
- * we do not support receiving pause frames).
- * 3: Both Rx and Tx flow control (symmetric) are enabled.
- * other: Invalid.
- */
- switch (hw->fc.requested_mode) {
- case ixgbe_fc_none:
- /* Flow control completely disabled by software override. */
- reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
- if (hw->phy.media_type == ixgbe_media_type_backplane)
- reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
- IXGBE_AUTOC_ASM_PAUSE);
- else if (hw->phy.media_type == ixgbe_media_type_copper)
- reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
- break;
- case ixgbe_fc_tx_pause:
- /*
- * Tx Flow control is enabled, and Rx Flow control is
- * disabled by software override.
- */
- reg |= IXGBE_PCS1GANA_ASM_PAUSE;
- reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
- if (hw->phy.media_type == ixgbe_media_type_backplane) {
- reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
- reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
- } else if (hw->phy.media_type == ixgbe_media_type_copper) {
- reg_cu |= IXGBE_TAF_ASM_PAUSE;
- reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
- }
- break;
- case ixgbe_fc_rx_pause:
- /*
- * Rx Flow control is enabled and Tx Flow control is
- * disabled by software override. Since there really
- * isn't a way to advertise that we are capable of RX
- * Pause ONLY, we will advertise that we support both
- * symmetric and asymmetric Rx PAUSE, as such we fall
- * through to the fc_full statement. Later, we will
- * disable the adapter's ability to send PAUSE frames.
- */
- case ixgbe_fc_full:
- /* Flow control (both Rx and Tx) is enabled by SW override. */
- reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
- if (hw->phy.media_type == ixgbe_media_type_backplane)
- reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
- IXGBE_AUTOC_ASM_PAUSE;
- else if (hw->phy.media_type == ixgbe_media_type_copper)
- reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
- break;
- default:
- hw_dbg(hw, "Flow control param set incorrectly\n");
- ret_val = IXGBE_ERR_CONFIG;
- goto out;
- break;
- }
-
- if (hw->mac.type != ixgbe_mac_X540) {
- /*
- * Enable auto-negotiation between the MAC & PHY;
- * the MAC will advertise clause 37 flow control.
- */
- IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
- reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
-
- /* Disable AN timeout */
- if (hw->fc.strict_ieee)
- reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
-
- IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
- hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
- }
-
- /*
- * AUTOC restart handles negotiation of 1G and 10G on backplane
- * and copper. There is no need to set the PCS1GCTL register.
- *
- */
- if (hw->phy.media_type == ixgbe_media_type_backplane) {
- reg_bp |= IXGBE_AUTOC_AN_RESTART;
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
- } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
- (ixgbe_device_supports_autoneg_fc(hw) == 0)) {
- hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
- }
-
- hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
-out:
- return ret_val;
-}
-
-/**
- * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
- * @hw: pointer to hardware structure
- *
- * Starts the hardware by filling the bus info structure and media type, clears
- * all on chip counters, initializes receive address registers, multicast
- * table, VLAN filter table, calls routine to set up link and flow control
- * settings, and leaves transmit and receive units disabled and uninitialized
- **/
-s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
-{
- s32 ret_val;
- u32 ctrl_ext;
-
- /* Set the media type */
- hw->phy.media_type = hw->mac.ops.get_media_type(hw);
-
- /* PHY ops initialization must be done in reset_hw() */
-
- /* Clear the VLAN filter table */
- hw->mac.ops.clear_vfta(hw);
-
- /* Clear statistics registers */
- hw->mac.ops.clear_hw_cntrs(hw);
-
- /* Set No Snoop Disable */
- ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
- ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
- IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
- IXGBE_WRITE_FLUSH(hw);
-
- /* Setup flow control */
- ret_val = ixgbe_setup_fc(hw);
- if (ret_val != 0)
- goto out;
-
- /* Clear adapter stopped flag */
- hw->adapter_stopped = false;
-
-out:
- return ret_val;
-}
-
-/**
- * ixgbe_start_hw_gen2 - Init sequence for common device family
- * @hw: pointer to hw structure
- *
- * Performs the init sequence common to the second generation
- * of 10 GbE devices.
- * Devices in the second generation:
- * 82599
- * X540
- **/
-s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
-{
- u32 i;
- u32 regval;
-
- /* Clear the rate limiters */
- for (i = 0; i < hw->mac.max_tx_queues; i++) {
- IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
- IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
- }
- IXGBE_WRITE_FLUSH(hw);
-
- /* Disable relaxed ordering */
- for (i = 0; i < hw->mac.max_tx_queues; i++) {
- regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
- regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
- }
-
- for (i = 0; i < hw->mac.max_rx_queues; i++) {
- regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
- regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
- IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
- IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
- }
-
- return 0;
-}
-
-/**
- * ixgbe_init_hw_generic - Generic hardware initialization
- * @hw: pointer to hardware structure
- *
- * Initialize the hardware by resetting the hardware, filling the bus info
- * structure and media type, clears all on chip counters, initializes receive
- * address registers, multicast table, VLAN filter table, calls routine to set
- * up link and flow control settings, and leaves transmit and receive units
- * disabled and uninitialized
- **/
-s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
-{
- s32 status;
-
- /* Reset the hardware */
- status = hw->mac.ops.reset_hw(hw);
-
- if (status == 0) {
- /* Start the HW */
- status = hw->mac.ops.start_hw(hw);
- }
-
- return status;
-}
-
-/**
- * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
- * @hw: pointer to hardware structure
- *
- * Clears all hardware statistics counters by reading them from the hardware
- * Statistics counters are clear on read.
- **/
-s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
-{
- u16 i = 0;
-
- IXGBE_READ_REG(hw, IXGBE_CRCERRS);
- IXGBE_READ_REG(hw, IXGBE_ILLERRC);
- IXGBE_READ_REG(hw, IXGBE_ERRBC);
- IXGBE_READ_REG(hw, IXGBE_MSPDC);
- for (i = 0; i < 8; i++)
- IXGBE_READ_REG(hw, IXGBE_MPC(i));
-
- IXGBE_READ_REG(hw, IXGBE_MLFC);
- IXGBE_READ_REG(hw, IXGBE_MRFC);
- IXGBE_READ_REG(hw, IXGBE_RLEC);
- IXGBE_READ_REG(hw, IXGBE_LXONTXC);
- IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
- if (hw->mac.type >= ixgbe_mac_82599EB) {
- IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
- IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
- } else {
- IXGBE_READ_REG(hw, IXGBE_LXONRXC);
- IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
- }
-
- for (i = 0; i < 8; i++) {
- IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
- IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
- if (hw->mac.type >= ixgbe_mac_82599EB) {
- IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
- IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
- } else {
- IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
- IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
- }
- }
- if (hw->mac.type >= ixgbe_mac_82599EB)
- for (i = 0; i < 8; i++)
- IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
- IXGBE_READ_REG(hw, IXGBE_PRC64);
- IXGBE_READ_REG(hw, IXGBE_PRC127);
- IXGBE_READ_REG(hw, IXGBE_PRC255);
- IXGBE_READ_REG(hw, IXGBE_PRC511);
- IXGBE_READ_REG(hw, IXGBE_PRC1023);
- IXGBE_READ_REG(hw, IXGBE_PRC1522);
- IXGBE_READ_REG(hw, IXGBE_GPRC);
- IXGBE_READ_REG(hw, IXGBE_BPRC);
- IXGBE_READ_REG(hw, IXGBE_MPRC);
- IXGBE_READ_REG(hw, IXGBE_GPTC);
- IXGBE_READ_REG(hw, IXGBE_GORCL);
- IXGBE_READ_REG(hw, IXGBE_GORCH);
- IXGBE_READ_REG(hw, IXGBE_GOTCL);
- IXGBE_READ_REG(hw, IXGBE_GOTCH);
- if (hw->mac.type == ixgbe_mac_82598EB)
- for (i = 0; i < 8; i++)
- IXGBE_READ_REG(hw, IXGBE_RNBC(i));
- IXGBE_READ_REG(hw, IXGBE_RUC);
- IXGBE_READ_REG(hw, IXGBE_RFC);
- IXGBE_READ_REG(hw, IXGBE_ROC);
- IXGBE_READ_REG(hw, IXGBE_RJC);
- IXGBE_READ_REG(hw, IXGBE_MNGPRC);
- IXGBE_READ_REG(hw, IXGBE_MNGPDC);
- IXGBE_READ_REG(hw, IXGBE_MNGPTC);
- IXGBE_READ_REG(hw, IXGBE_TORL);
- IXGBE_READ_REG(hw, IXGBE_TORH);
- IXGBE_READ_REG(hw, IXGBE_TPR);
- IXGBE_READ_REG(hw, IXGBE_TPT);
- IXGBE_READ_REG(hw, IXGBE_PTC64);
- IXGBE_READ_REG(hw, IXGBE_PTC127);
- IXGBE_READ_REG(hw, IXGBE_PTC255);
- IXGBE_READ_REG(hw, IXGBE_PTC511);
- IXGBE_READ_REG(hw, IXGBE_PTC1023);
- IXGBE_READ_REG(hw, IXGBE_PTC1522);
- IXGBE_READ_REG(hw, IXGBE_MPTC);
- IXGBE_READ_REG(hw, IXGBE_BPTC);
- for (i = 0; i < 16; i++) {
- IXGBE_READ_REG(hw, IXGBE_QPRC(i));
- IXGBE_READ_REG(hw, IXGBE_QPTC(i));
- if (hw->mac.type >= ixgbe_mac_82599EB) {
- IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
- IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
- IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
- IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
- IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
- } else {
- IXGBE_READ_REG(hw, IXGBE_QBRC(i));
- IXGBE_READ_REG(hw, IXGBE_QBTC(i));
- }
- }
-
- if (hw->mac.type == ixgbe_mac_X540) {
- if (hw->phy.id == 0)
- ixgbe_identify_phy(hw);
- hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
- IXGBE_MDIO_PCS_DEV_TYPE, &i);
- hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
- IXGBE_MDIO_PCS_DEV_TYPE, &i);
- hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
- IXGBE_MDIO_PCS_DEV_TYPE, &i);
- hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
- IXGBE_MDIO_PCS_DEV_TYPE, &i);
- }
-
- return 0;
-}
-
-/**
- * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
- * @hw: pointer to hardware structure
- * @pba_num: stores the part number string from the EEPROM
- * @pba_num_size: part number string buffer length
- *
- * Reads the part number string from the EEPROM.
- **/
-s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
- u32 pba_num_size)
-{
- s32 ret_val;
- u16 data;
- u16 pba_ptr;
- u16 offset;
- u16 length;
-
- if (pba_num == NULL) {
- hw_dbg(hw, "PBA string buffer was null\n");
- return IXGBE_ERR_INVALID_ARGUMENT;
- }
-
- ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
- if (ret_val) {
- hw_dbg(hw, "NVM Read Error\n");
- return ret_val;
- }
-
- ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
- if (ret_val) {
- hw_dbg(hw, "NVM Read Error\n");
- return ret_val;
- }
-
- /*
- * if data is not ptr guard the PBA must be in legacy format which
- * means pba_ptr is actually our second data word for the PBA number
- * and we can decode it into an ascii string
- */
- if (data != IXGBE_PBANUM_PTR_GUARD) {
- hw_dbg(hw, "NVM PBA number is not stored as string\n");
-
- /* we will need 11 characters to store the PBA */
- if (pba_num_size < 11) {
- hw_dbg(hw, "PBA string buffer too small\n");
- return IXGBE_ERR_NO_SPACE;
- }
-
- /* extract hex string from data and pba_ptr */
- pba_num[0] = (data >> 12) & 0xF;
- pba_num[1] = (data >> 8) & 0xF;
- pba_num[2] = (data >> 4) & 0xF;
- pba_num[3] = data & 0xF;
- pba_num[4] = (pba_ptr >> 12) & 0xF;
- pba_num[5] = (pba_ptr >> 8) & 0xF;
- pba_num[6] = '-';
- pba_num[7] = 0;
- pba_num[8] = (pba_ptr >> 4) & 0xF;
- pba_num[9] = pba_ptr & 0xF;
-
- /* put a null character on the end of our string */
- pba_num[10] = '\0';
-
- /* switch all the data but the '-' to hex char */
- for (offset = 0; offset < 10; offset++) {
- if (pba_num[offset] < 0xA)
- pba_num[offset] += '0';
- else if (pba_num[offset] < 0x10)
- pba_num[offset] += 'A' - 0xA;
- }
-
- return 0;
- }
-
- ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
- if (ret_val) {
- hw_dbg(hw, "NVM Read Error\n");
- return ret_val;
- }
-
- if (length == 0xFFFF || length == 0) {
- hw_dbg(hw, "NVM PBA number section invalid length\n");
- return IXGBE_ERR_PBA_SECTION;
- }
-
- /* check if pba_num buffer is big enough */
- if (pba_num_size < (((u32)length * 2) - 1)) {
- hw_dbg(hw, "PBA string buffer too small\n");
- return IXGBE_ERR_NO_SPACE;
- }
-
- /* trim pba length from start of string */
- pba_ptr++;
- length--;
-
- for (offset = 0; offset < length; offset++) {
- ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
- if (ret_val) {
- hw_dbg(hw, "NVM Read Error\n");
- return ret_val;
- }
- pba_num[offset * 2] = (u8)(data >> 8);
- pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
- }
- pba_num[offset * 2] = '\0';
-
- return 0;
-}
-
-/**
- * ixgbe_get_mac_addr_generic - Generic get MAC address
- * @hw: pointer to hardware structure
- * @mac_addr: Adapter MAC address
- *
- * Reads the adapter's MAC address from first Receive Address Register (RAR0)
- * A reset of the adapter must be performed prior to calling this function
- * in order for the MAC address to have been loaded from the EEPROM into RAR0
- **/
-s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
-{
- u32 rar_high;
- u32 rar_low;
- u16 i;
-
- rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
- rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
-
- for (i = 0; i < 4; i++)
- mac_addr[i] = (u8)(rar_low >> (i*8));
-
- for (i = 0; i < 2; i++)
- mac_addr[i+4] = (u8)(rar_high >> (i*8));
-
- return 0;
-}
-
-/**
- * ixgbe_get_bus_info_generic - Generic set PCI bus info
- * @hw: pointer to hardware structure
- *
- * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
- **/
-s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
-{
- struct ixgbe_mac_info *mac = &hw->mac;
- u16 link_status;
-
- hw->bus.type = ixgbe_bus_type_pci_express;
-
- /* Get the negotiated link width and speed from PCI config space */
- link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
-
- switch (link_status & IXGBE_PCI_LINK_WIDTH) {
- case IXGBE_PCI_LINK_WIDTH_1:
- hw->bus.width = ixgbe_bus_width_pcie_x1;
- break;
- case IXGBE_PCI_LINK_WIDTH_2:
- hw->bus.width = ixgbe_bus_width_pcie_x2;
- break;
- case IXGBE_PCI_LINK_WIDTH_4:
- hw->bus.width = ixgbe_bus_width_pcie_x4;
- break;
- case IXGBE_PCI_LINK_WIDTH_8:
- hw->bus.width = ixgbe_bus_width_pcie_x8;
- break;
- default:
- hw->bus.width = ixgbe_bus_width_unknown;
- break;
- }
-
- switch (link_status & IXGBE_PCI_LINK_SPEED) {
- case IXGBE_PCI_LINK_SPEED_2500:
- hw->bus.speed = ixgbe_bus_speed_2500;
- break;
- case IXGBE_PCI_LINK_SPEED_5000:
- hw->bus.speed = ixgbe_bus_speed_5000;
- break;
- case IXGBE_PCI_LINK_SPEED_8000:
- hw->bus.speed = ixgbe_bus_speed_8000;
- break;
- default:
- hw->bus.speed = ixgbe_bus_speed_unknown;
- break;
- }
-
- mac->ops.set_lan_id(hw);
-
- return 0;
-}
-
-/**
- * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
- * @hw: pointer to the HW structure
- *
- * Determines the LAN function id by reading memory-mapped registers
- * and swaps the port value if requested.
- **/
-void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
-{
- struct ixgbe_bus_info *bus = &hw->bus;
- u32 reg;
-
- reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
- bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
- bus->lan_id = bus->func;
-
- /* check for a port swap */
- reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
- if (reg & IXGBE_FACTPS_LFS)
- bus->func ^= 0x1;
-}
-
-/**
- * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
- * @hw: pointer to hardware structure
- *
- * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
- * disables transmit and receive units. The adapter_stopped flag is used by
- * the shared code and drivers to determine if the adapter is in a stopped
- * state and should not touch the hardware.
- **/
-s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
-{
- u32 reg_val;
- u16 i;
-
- /*
- * Set the adapter_stopped flag so other driver functions stop touching
- * the hardware
- */
- hw->adapter_stopped = true;
-
- /* Disable the receive unit */
- IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
-
- /* Clear interrupt mask to stop interrupts from being generated */
- IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
-
- /* Clear any pending interrupts, flush previous writes */
- IXGBE_READ_REG(hw, IXGBE_EICR);
-
- /* Disable the transmit unit. Each queue must be disabled. */
- for (i = 0; i < hw->mac.max_tx_queues; i++)
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
-
- /* Disable the receive unit by stopping each queue */
- for (i = 0; i < hw->mac.max_rx_queues; i++) {
- reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
- reg_val &= ~IXGBE_RXDCTL_ENABLE;
- reg_val |= IXGBE_RXDCTL_SWFLSH;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
- }
-
- /* flush all queues disables */
- IXGBE_WRITE_FLUSH(hw);
- msleep(2);
-
- /*
- * Prevent the PCI-E bus from from hanging by disabling PCI-E master
- * access and verify no pending requests
- */
- return ixgbe_disable_pcie_master(hw);
-}
-
-/**
- * ixgbe_led_on_generic - Turns on the software controllable LEDs.
- * @hw: pointer to hardware structure
- * @index: led number to turn on
- **/
-s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
-{
- u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
-
- /* To turn on the LED, set mode to ON. */
- led_reg &= ~IXGBE_LED_MODE_MASK(index);
- led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
- IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
- IXGBE_WRITE_FLUSH(hw);
-
- return 0;
-}
-
-/**
- * ixgbe_led_off_generic - Turns off the software controllable LEDs.
- * @hw: pointer to hardware structure
- * @index: led number to turn off
- **/
-s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
-{
- u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
-
- /* To turn off the LED, set mode to OFF. */
- led_reg &= ~IXGBE_LED_MODE_MASK(index);
- led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
- IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
- IXGBE_WRITE_FLUSH(hw);
-
- return 0;
-}
-
-/**
- * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
- * @hw: pointer to hardware structure
- *
- * Initializes the EEPROM parameters ixgbe_eeprom_info within the
- * ixgbe_hw struct in order to set up EEPROM access.
- **/
-s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
-{
- struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
- u32 eec;
- u16 eeprom_size;
-
- if (eeprom->type == ixgbe_eeprom_uninitialized) {
- eeprom->type = ixgbe_eeprom_none;
- /* Set default semaphore delay to 10ms which is a well
- * tested value */
- eeprom->semaphore_delay = 10;
- /* Clear EEPROM page size, it will be initialized as needed */
- eeprom->word_page_size = 0;
-
- /*
- * Check for EEPROM present first.
- * If not present leave as none
- */
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
- if (eec & IXGBE_EEC_PRES) {
- eeprom->type = ixgbe_eeprom_spi;
-
- /*
- * SPI EEPROM is assumed here. This code would need to
- * change if a future EEPROM is not SPI.
- */
- eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
- IXGBE_EEC_SIZE_SHIFT);
- eeprom->word_size = 1 << (eeprom_size +
- IXGBE_EEPROM_WORD_SIZE_SHIFT);
- }
-
- if (eec & IXGBE_EEC_ADDR_SIZE)
- eeprom->address_bits = 16;
- else
- eeprom->address_bits = 8;
- hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: "
- "%d\n", eeprom->type, eeprom->word_size,
- eeprom->address_bits);
- }
-
- return 0;
-}
-
-/**
- * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
- * @hw: pointer to hardware structure
- * @offset: offset within the EEPROM to write
- * @words: number of word(s)
- * @data: 16 bit word(s) to write to EEPROM
- *
- * Reads 16 bit word(s) from EEPROM through bit-bang method
- **/
-s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
- u16 words, u16 *data)
-{
- s32 status = 0;
- u16 i, count;
-
- hw->eeprom.ops.init_params(hw);
-
- if (words == 0) {
- status = IXGBE_ERR_INVALID_ARGUMENT;
- goto out;
- }
-
- if (offset + words > hw->eeprom.word_size) {
- status = IXGBE_ERR_EEPROM;
- goto out;
- }
-
- /*
- * The EEPROM page size cannot be queried from the chip. We do lazy
- * initialization. It is worth to do that when we write large buffer.
- */
- if ((hw->eeprom.word_page_size == 0) &&
- (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
- ixgbe_detect_eeprom_page_size_generic(hw, offset);
-
- /*
- * We cannot hold synchronization semaphores for too long
- * to avoid other entity starvation. However it is more efficient
- * to read in bursts than synchronizing access for each word.
- */
- for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
- count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
- IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
- status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
- count, &data[i]);
-
- if (status != 0)
- break;
- }
-
-out:
- return status;
-}
-
-/**
- * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
- * @hw: pointer to hardware structure
- * @offset: offset within the EEPROM to be written to
- * @words: number of word(s)
- * @data: 16 bit word(s) to be written to the EEPROM
- *
- * If ixgbe_eeprom_update_checksum is not called after this function, the
- * EEPROM will most likely contain an invalid checksum.
- **/
-static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
- u16 words, u16 *data)
-{
- s32 status;
- u16 word;
- u16 page_size;
- u16 i;
- u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
-
- /* Prepare the EEPROM for writing */
- status = ixgbe_acquire_eeprom(hw);
-
- if (status == 0) {
- if (ixgbe_ready_eeprom(hw) != 0) {
- ixgbe_release_eeprom(hw);
- status = IXGBE_ERR_EEPROM;
- }
- }
-
- if (status == 0) {
- for (i = 0; i < words; i++) {
- ixgbe_standby_eeprom(hw);
-
- /* Send the WRITE ENABLE command (8 bit opcode ) */
- ixgbe_shift_out_eeprom_bits(hw,
- IXGBE_EEPROM_WREN_OPCODE_SPI,
- IXGBE_EEPROM_OPCODE_BITS);
-
- ixgbe_standby_eeprom(hw);
-
- /*
- * Some SPI eeproms use the 8th address bit embedded
- * in the opcode
- */
- if ((hw->eeprom.address_bits == 8) &&
- ((offset + i) >= 128))
- write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
-
- /* Send the Write command (8-bit opcode + addr) */
- ixgbe_shift_out_eeprom_bits(hw, write_opcode,
- IXGBE_EEPROM_OPCODE_BITS);
- ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
- hw->eeprom.address_bits);
-
- page_size = hw->eeprom.word_page_size;
-
- /* Send the data in burst via SPI*/
- do {
- word = data[i];
- word = (word >> 8) | (word << 8);
- ixgbe_shift_out_eeprom_bits(hw, word, 16);
-
- if (page_size == 0)
- break;
-
- /* do not wrap around page */
- if (((offset + i) & (page_size - 1)) ==
- (page_size - 1))
- break;
- } while (++i < words);
-
- ixgbe_standby_eeprom(hw);
- msleep(10);
- }
- /* Done with writing - release the EEPROM */
- ixgbe_release_eeprom(hw);
- }
-
- return status;
-}
-
-/**
- * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
- * @hw: pointer to hardware structure
- * @offset: offset within the EEPROM to be written to
- * @data: 16 bit word to be written to the EEPROM
- *
- * If ixgbe_eeprom_update_checksum is not called after this function, the
- * EEPROM will most likely contain an invalid checksum.
- **/
-s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
-{
- s32 status;
-
- hw->eeprom.ops.init_params(hw);
-
- if (offset >= hw->eeprom.word_size) {
- status = IXGBE_ERR_EEPROM;
- goto out;
- }
-
- status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
-
-out:
- return status;
-}
-
-/**
- * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
- * @hw: pointer to hardware structure
- * @offset: offset within the EEPROM to be read
- * @data: read 16 bit words(s) from EEPROM
- * @words: number of word(s)
- *
- * Reads 16 bit word(s) from EEPROM through bit-bang method
- **/
-s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
- u16 words, u16 *data)
-{
- s32 status = 0;
- u16 i, count;
-
- hw->eeprom.ops.init_params(hw);
-
- if (words == 0) {
- status = IXGBE_ERR_INVALID_ARGUMENT;
- goto out;
- }
-
- if (offset + words > hw->eeprom.word_size) {
- status = IXGBE_ERR_EEPROM;
- goto out;
- }
-
- /*
- * We cannot hold synchronization semaphores for too long
- * to avoid other entity starvation. However it is more efficient
- * to read in bursts than synchronizing access for each word.
- */
- for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
- count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
- IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
-
- status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
- count, &data[i]);
-
- if (status != 0)
- break;
- }
-
-out:
- return status;
-}
-
-/**
- * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
- * @hw: pointer to hardware structure
- * @offset: offset within the EEPROM to be read
- * @words: number of word(s)
- * @data: read 16 bit word(s) from EEPROM
- *
- * Reads 16 bit word(s) from EEPROM through bit-bang method
- **/
-static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
- u16 words, u16 *data)
-{
- s32 status;
- u16 word_in;
- u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
- u16 i;
-
- /* Prepare the EEPROM for reading */
- status = ixgbe_acquire_eeprom(hw);
-
- if (status == 0) {
- if (ixgbe_ready_eeprom(hw) != 0) {
- ixgbe_release_eeprom(hw);
- status = IXGBE_ERR_EEPROM;
- }
- }
-
- if (status == 0) {
- for (i = 0; i < words; i++) {
- ixgbe_standby_eeprom(hw);
- /*
- * Some SPI eeproms use the 8th address bit embedded
- * in the opcode
- */
- if ((hw->eeprom.address_bits == 8) &&
- ((offset + i) >= 128))
- read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
-
- /* Send the READ command (opcode + addr) */
- ixgbe_shift_out_eeprom_bits(hw, read_opcode,
- IXGBE_EEPROM_OPCODE_BITS);
- ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
- hw->eeprom.address_bits);
-
- /* Read the data. */
- word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
- data[i] = (word_in >> 8) | (word_in << 8);
- }
-
- /* End this read operation */
- ixgbe_release_eeprom(hw);
- }
-
- return status;
-}
-
-/**
- * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
- * @hw: pointer to hardware structure
- * @offset: offset within the EEPROM to be read
- * @data: read 16 bit value from EEPROM
- *
- * Reads 16 bit value from EEPROM through bit-bang method
- **/
-s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
- u16 *data)
-{
- s32 status;
-
- hw->eeprom.ops.init_params(hw);
-
- if (offset >= hw->eeprom.word_size) {
- status = IXGBE_ERR_EEPROM;
- goto out;
- }
-
- status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
-
-out:
- return status;
-}
-
-/**
- * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
- * @hw: pointer to hardware structure
- * @offset: offset of word in the EEPROM to read
- * @words: number of word(s)
- * @data: 16 bit word(s) from the EEPROM
- *
- * Reads a 16 bit word(s) from the EEPROM using the EERD register.
- **/
-s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
- u16 words, u16 *data)
-{
- u32 eerd;
- s32 status = 0;
- u32 i;
-
- hw->eeprom.ops.init_params(hw);
-
- if (words == 0) {
- status = IXGBE_ERR_INVALID_ARGUMENT;
- goto out;
- }
-
- if (offset >= hw->eeprom.word_size) {
- status = IXGBE_ERR_EEPROM;
- goto out;
- }
-
- for (i = 0; i < words; i++) {
- eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) +
- IXGBE_EEPROM_RW_REG_START;
-
- IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
- status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
-
- if (status == 0) {
- data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
- IXGBE_EEPROM_RW_REG_DATA);
- } else {
- hw_dbg(hw, "Eeprom read timed out\n");
- goto out;
- }
- }
-out:
- return status;
-}
-
-/**
- * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
- * @hw: pointer to hardware structure
- * @offset: offset within the EEPROM to be used as a scratch pad
- *
- * Discover EEPROM page size by writing marching data at given offset.
- * This function is called only when we are writing a new large buffer
- * at given offset so the data would be overwritten anyway.
- **/
-static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
- u16 offset)
-{
- u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
- s32 status = 0;
- u16 i;
-
- for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
- data[i] = i;
-
- hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
- status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
- IXGBE_EEPROM_PAGE_SIZE_MAX, data);
- hw->eeprom.word_page_size = 0;
- if (status != 0)
- goto out;
-
- status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
- if (status != 0)
- goto out;
-
- /*
- * When writing in burst more than the actual page size
- * EEPROM address wraps around current page.
- */
- hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
-
- hw_dbg(hw, "Detected EEPROM page size = %d words.",
- hw->eeprom.word_page_size);
-out:
- return status;
-}
-
-/**
- * ixgbe_read_eerd_generic - Read EEPROM word using EERD
- * @hw: pointer to hardware structure
- * @offset: offset of word in the EEPROM to read
- * @data: word read from the EEPROM
- *
- * Reads a 16 bit word from the EEPROM using the EERD register.
- **/
-s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
-{
- return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
-}
-
-/**
- * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
- * @hw: pointer to hardware structure
- * @offset: offset of word in the EEPROM to write
- * @words: number of word(s)
- * @data: word(s) write to the EEPROM
- *
- * Write a 16 bit word(s) to the EEPROM using the EEWR register.
- **/
-s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
- u16 words, u16 *data)
-{
- u32 eewr;
- s32 status = 0;
- u16 i;
-
- hw->eeprom.ops.init_params(hw);
-
- if (words == 0) {
- status = IXGBE_ERR_INVALID_ARGUMENT;
- goto out;
- }
-
- if (offset >= hw->eeprom.word_size) {
- status = IXGBE_ERR_EEPROM;
- goto out;
- }
-
- for (i = 0; i < words; i++) {
- eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
- (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
- IXGBE_EEPROM_RW_REG_START;
-
- status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
- if (status != 0) {
- hw_dbg(hw, "Eeprom write EEWR timed out\n");
- goto out;
- }
-
- IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
-
- status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
- if (status != 0) {
- hw_dbg(hw, "Eeprom write EEWR timed out\n");
- goto out;
- }
- }
-
-out:
- return status;
-}
-
-/**
- * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
- * @hw: pointer to hardware structure
- * @offset: offset of word in the EEPROM to write
- * @data: word write to the EEPROM
- *
- * Write a 16 bit word to the EEPROM using the EEWR register.
- **/
-s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
-{
- return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
-}
-
-/**
- * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
- * @hw: pointer to hardware structure
- * @ee_reg: EEPROM flag for polling
- *
- * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
- * read or write is done respectively.
- **/
-s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
-{
- u32 i;
- u32 reg;
- s32 status = IXGBE_ERR_EEPROM;
-
- for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
- if (ee_reg == IXGBE_NVM_POLL_READ)
- reg = IXGBE_READ_REG(hw, IXGBE_EERD);
- else
- reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
-
- if (reg & IXGBE_EEPROM_RW_REG_DONE) {
- status = 0;
- break;
- }
- udelay(5);
- }
- return status;
-}
-
-/**
- * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
- * @hw: pointer to hardware structure
- *
- * Prepares EEPROM for access using bit-bang method. This function should
- * be called before issuing a command to the EEPROM.
- **/
-static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
-{
- s32 status = 0;
- u32 eec;
- u32 i;
-
- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
- != 0)
- status = IXGBE_ERR_SWFW_SYNC;
-
- if (status == 0) {
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
-
- /* Request EEPROM Access */
- eec |= IXGBE_EEC_REQ;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
-
- for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
- if (eec & IXGBE_EEC_GNT)
- break;
- udelay(5);
- }
-
- /* Release if grant not acquired */
- if (!(eec & IXGBE_EEC_GNT)) {
- eec &= ~IXGBE_EEC_REQ;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
- hw_dbg(hw, "Could not acquire EEPROM grant\n");
-
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
- status = IXGBE_ERR_EEPROM;
- }
-
- /* Setup EEPROM for Read/Write */
- if (status == 0) {
- /* Clear CS and SK */
- eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
- IXGBE_WRITE_FLUSH(hw);
- udelay(1);
- }
- }
- return status;
-}
-
-/**
- * ixgbe_get_eeprom_semaphore - Get hardware semaphore
- * @hw: pointer to hardware structure
- *
- * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
- **/
-static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
-{
- s32 status = IXGBE_ERR_EEPROM;
- u32 timeout = 2000;
- u32 i;
- u32 swsm;
-
- /* Get SMBI software semaphore between device drivers first */
- for (i = 0; i < timeout; i++) {
- /*
- * If the SMBI bit is 0 when we read it, then the bit will be
- * set and we have the semaphore
- */
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
- if (!(swsm & IXGBE_SWSM_SMBI)) {
- status = 0;
- break;
- }
- udelay(50);
- }
-
- if (i == timeout) {
- hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore "
- "not granted.\n");
- /*
- * this release is particularly important because our attempts
- * above to get the semaphore may have succeeded, and if there
- * was a timeout, we should unconditionally clear the semaphore
- * bits to free the driver to make progress
- */
- ixgbe_release_eeprom_semaphore(hw);
-
- udelay(50);
- /*
- * one last try
- * If the SMBI bit is 0 when we read it, then the bit will be
- * set and we have the semaphore
- */
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
- if (!(swsm & IXGBE_SWSM_SMBI))
- status = 0;
- }
-
- /* Now get the semaphore between SW/FW through the SWESMBI bit */
- if (status == 0) {
- for (i = 0; i < timeout; i++) {
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
-
- /* Set the SW EEPROM semaphore bit to request access */
- swsm |= IXGBE_SWSM_SWESMBI;
- IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
-
- /*
- * If we set the bit successfully then we got the
- * semaphore.
- */
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
- if (swsm & IXGBE_SWSM_SWESMBI)
- break;
-
- udelay(50);
- }
-
- /*
- * Release semaphores and return error if SW EEPROM semaphore
- * was not granted because we don't have access to the EEPROM
- */
- if (i >= timeout) {
- hw_dbg(hw, "SWESMBI Software EEPROM semaphore "
- "not granted.\n");
- ixgbe_release_eeprom_semaphore(hw);
- status = IXGBE_ERR_EEPROM;
- }
- } else {
- hw_dbg(hw, "Software semaphore SMBI between device drivers "
- "not granted.\n");
- }
-
- return status;
-}
-
-/**
- * ixgbe_release_eeprom_semaphore - Release hardware semaphore
- * @hw: pointer to hardware structure
- *
- * This function clears hardware semaphore bits.
- **/
-static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
-{
- u32 swsm;
-
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
-
- /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
- swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
- IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
- IXGBE_WRITE_FLUSH(hw);
-}
-
-/**
- * ixgbe_ready_eeprom - Polls for EEPROM ready
- * @hw: pointer to hardware structure
- **/
-static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
-{
- s32 status = 0;
- u16 i;
- u8 spi_stat_reg;
-
- /*
- * Read "Status Register" repeatedly until the LSB is cleared. The
- * EEPROM will signal that the command has been completed by clearing
- * bit 0 of the internal status register. If it's not cleared within
- * 5 milliseconds, then error out.
- */
- for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
- ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
- IXGBE_EEPROM_OPCODE_BITS);
- spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
- if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
- break;
-
- udelay(5);
- ixgbe_standby_eeprom(hw);
- };
-
- /*
- * On some parts, SPI write time could vary from 0-20mSec on 3.3V
- * devices (and only 0-5mSec on 5V devices)
- */
- if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
- hw_dbg(hw, "SPI EEPROM Status error\n");
- status = IXGBE_ERR_EEPROM;
- }
-
- return status;
-}
-
-/**
- * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
- * @hw: pointer to hardware structure
- **/
-static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
-{
- u32 eec;
-
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
-
- /* Toggle CS to flush commands */
- eec |= IXGBE_EEC_CS;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
- IXGBE_WRITE_FLUSH(hw);
- udelay(1);
- eec &= ~IXGBE_EEC_CS;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
- IXGBE_WRITE_FLUSH(hw);
- udelay(1);
-}
-
-/**
- * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
- * @hw: pointer to hardware structure
- * @data: data to send to the EEPROM
- * @count: number of bits to shift out
- **/
-static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
- u16 count)
-{
- u32 eec;
- u32 mask;
- u32 i;
-
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
-
- /*
- * Mask is used to shift "count" bits of "data" out to the EEPROM
- * one bit at a time. Determine the starting bit based on count
- */
- mask = 0x01 << (count - 1);
-
- for (i = 0; i < count; i++) {
- /*
- * A "1" is shifted out to the EEPROM by setting bit "DI" to a
- * "1", and then raising and then lowering the clock (the SK
- * bit controls the clock input to the EEPROM). A "0" is
- * shifted out to the EEPROM by setting "DI" to "0" and then
- * raising and then lowering the clock.
- */
- if (data & mask)
- eec |= IXGBE_EEC_DI;
- else
- eec &= ~IXGBE_EEC_DI;
-
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
- IXGBE_WRITE_FLUSH(hw);
-
- udelay(1);
-
- ixgbe_raise_eeprom_clk(hw, &eec);
- ixgbe_lower_eeprom_clk(hw, &eec);
-
- /*
- * Shift mask to signify next bit of data to shift in to the
- * EEPROM
- */
- mask = mask >> 1;
- };
-
- /* We leave the "DI" bit set to "0" when we leave this routine. */
- eec &= ~IXGBE_EEC_DI;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
- IXGBE_WRITE_FLUSH(hw);
-}
-
-/**
- * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
- * @hw: pointer to hardware structure
- **/
-static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
-{
- u32 eec;
- u32 i;
- u16 data = 0;
-
- /*
- * In order to read a register from the EEPROM, we need to shift
- * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
- * the clock input to the EEPROM (setting the SK bit), and then reading
- * the value of the "DO" bit. During this "shifting in" process the
- * "DI" bit should always be clear.
- */
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
-
- eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
-
- for (i = 0; i < count; i++) {
- data = data << 1;
- ixgbe_raise_eeprom_clk(hw, &eec);
-
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
-
- eec &= ~(IXGBE_EEC_DI);
- if (eec & IXGBE_EEC_DO)
- data |= 1;
-
- ixgbe_lower_eeprom_clk(hw, &eec);
- }
-
- return data;
-}
-
-/**
- * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
- * @hw: pointer to hardware structure
- * @eec: EEC register's current value
- **/
-static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
-{
- /*
- * Raise the clock input to the EEPROM
- * (setting the SK bit), then delay
- */
- *eec = *eec | IXGBE_EEC_SK;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
- IXGBE_WRITE_FLUSH(hw);
- udelay(1);
-}
-
-/**
- * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
- * @hw: pointer to hardware structure
- * @eecd: EECD's current value
- **/
-static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
-{
- /*
- * Lower the clock input to the EEPROM (clearing the SK bit), then
- * delay
- */
- *eec = *eec & ~IXGBE_EEC_SK;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
- IXGBE_WRITE_FLUSH(hw);
- udelay(1);
-}
-
-/**
- * ixgbe_release_eeprom - Release EEPROM, release semaphores
- * @hw: pointer to hardware structure
- **/
-static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
-{
- u32 eec;
-
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
-
- eec |= IXGBE_EEC_CS; /* Pull CS high */
- eec &= ~IXGBE_EEC_SK; /* Lower SCK */
-
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
- IXGBE_WRITE_FLUSH(hw);
-
- udelay(1);
-
- /* Stop requesting EEPROM access */
- eec &= ~IXGBE_EEC_REQ;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
-
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
-
- /* Delay before attempt to obtain semaphore again to allow FW access */
- msleep(hw->eeprom.semaphore_delay);
-}
-
-/**
- * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
- * @hw: pointer to hardware structure
- **/
-u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
-{
- u16 i;
- u16 j;
- u16 checksum = 0;
- u16 length = 0;
- u16 pointer = 0;
- u16 word = 0;
-
- /* Include 0x0-0x3F in the checksum */
- for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
- if (hw->eeprom.ops.read(hw, i, &word) != 0) {
- hw_dbg(hw, "EEPROM read failed\n");
- break;
- }
- checksum += word;
- }
-
- /* Include all data from pointers except for the fw pointer */
- for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
- hw->eeprom.ops.read(hw, i, &pointer);
-
- /* Make sure the pointer seems valid */
- if (pointer != 0xFFFF && pointer != 0) {
- hw->eeprom.ops.read(hw, pointer, &length);
-
- if (length != 0xFFFF && length != 0) {
- for (j = pointer+1; j <= pointer+length; j++) {
- hw->eeprom.ops.read(hw, j, &word);
- checksum += word;
- }
- }
- }
- }
-
- checksum = (u16)IXGBE_EEPROM_SUM - checksum;
-
- return checksum;
-}
-
-/**
- * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
- * @hw: pointer to hardware structure
- * @checksum_val: calculated checksum
- *
- * Performs checksum calculation and validates the EEPROM checksum. If the
- * caller does not need checksum_val, the value can be NULL.
- **/
-s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
- u16 *checksum_val)
-{
- s32 status;
- u16 checksum;
- u16 read_checksum = 0;
-
- /*
- * Read the first word from the EEPROM. If this times out or fails, do
- * not continue or we could be in for a very long wait while every
- * EEPROM read fails
- */
- status = hw->eeprom.ops.read(hw, 0, &checksum);
-
- if (status == 0) {
- checksum = hw->eeprom.ops.calc_checksum(hw);
-
- hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
-
- /*
- * Verify read checksum from EEPROM is the same as
- * calculated checksum
- */
- if (read_checksum != checksum)
- status = IXGBE_ERR_EEPROM_CHECKSUM;
-
- /* If the user cares, return the calculated checksum */
- if (checksum_val)
- *checksum_val = checksum;
- } else {
- hw_dbg(hw, "EEPROM read failed\n");
- }
-
- return status;
-}
-
-/**
- * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
- * @hw: pointer to hardware structure
- **/
-s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
-{
- s32 status;
- u16 checksum;
-
- /*
- * Read the first word from the EEPROM. If this times out or fails, do
- * not continue or we could be in for a very long wait while every
- * EEPROM read fails
- */
- status = hw->eeprom.ops.read(hw, 0, &checksum);
-
- if (status == 0) {
- checksum = hw->eeprom.ops.calc_checksum(hw);
- status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
- checksum);
- } else {
- hw_dbg(hw, "EEPROM read failed\n");
- }
-
- return status;
-}
-
-/**
- * ixgbe_validate_mac_addr - Validate MAC address
- * @mac_addr: pointer to MAC address.
- *
- * Tests a MAC address to ensure it is a valid Individual Address
- **/
-s32 ixgbe_validate_mac_addr(u8 *mac_addr)
-{
- s32 status = 0;
-
- /* Make sure it is not a multicast address */
- if (IXGBE_IS_MULTICAST(mac_addr)) {
- hw_dbg(hw, "MAC address is multicast\n");
- status = IXGBE_ERR_INVALID_MAC_ADDR;
- /* Not a broadcast address */
- } else if (IXGBE_IS_BROADCAST(mac_addr)) {
- hw_dbg(hw, "MAC address is broadcast\n");
- status = IXGBE_ERR_INVALID_MAC_ADDR;
- /* Reject the zero address */
- } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
- mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
- hw_dbg(hw, "MAC address is all zeros\n");
- status = IXGBE_ERR_INVALID_MAC_ADDR;
- }
- return status;
-}
-
-/**
- * ixgbe_set_rar_generic - Set Rx address register
- * @hw: pointer to hardware structure
- * @index: Receive address register to write
- * @addr: Address to put into receive address register
- * @vmdq: VMDq "set" or "pool" index
- * @enable_addr: set flag that address is active
- *
- * Puts an ethernet address into a receive address register.
- **/
-s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
- u32 enable_addr)
-{
- u32 rar_low, rar_high;
- u32 rar_entries = hw->mac.num_rar_entries;
-
- /* Make sure we are using a valid rar index range */
- if (index >= rar_entries) {
- hw_dbg(hw, "RAR index %d is out of range.\n", index);
- return IXGBE_ERR_INVALID_ARGUMENT;
- }
-
- /* setup VMDq pool selection before this RAR gets enabled */
- hw->mac.ops.set_vmdq(hw, index, vmdq);
-
- /*
- * HW expects these in little endian so we reverse the byte
- * order from network order (big endian) to little endian
- */
- rar_low = ((u32)addr[0] |
- ((u32)addr[1] << 8) |
- ((u32)addr[2] << 16) |
- ((u32)addr[3] << 24));
- /*
- * Some parts put the VMDq setting in the extra RAH bits,
- * so save everything except the lower 16 bits that hold part
- * of the address and the address valid bit.
- */
- rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
- rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
- rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
-
- if (enable_addr != 0)
- rar_high |= IXGBE_RAH_AV;
-
- IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
- IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
-
- return 0;
-}
-
-/**
- * ixgbe_clear_rar_generic - Remove Rx address register
- * @hw: pointer to hardware structure
- * @index: Receive address register to write
- *
- * Clears an ethernet address from a receive address register.
- **/
-s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
-{
- u32 rar_high;
- u32 rar_entries = hw->mac.num_rar_entries;
-
- /* Make sure we are using a valid rar index range */
- if (index >= rar_entries) {
- hw_dbg(hw, "RAR index %d is out of range.\n", index);
- return IXGBE_ERR_INVALID_ARGUMENT;
- }
-
- /*
- * Some parts put the VMDq setting in the extra RAH bits,
- * so save everything except the lower 16 bits that hold part
- * of the address and the address valid bit.
- */
- rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
- rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
-
- IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
- IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
-
- /* clear VMDq pool/queue selection for this RAR */
- hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
-
- return 0;
-}
-
-/**
- * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
- * @hw: pointer to hardware structure
- *
- * Places the MAC address in receive address register 0 and clears the rest
- * of the receive address registers. Clears the multicast table. Assumes
- * the receiver is in reset when the routine is called.
- **/
-s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
-{
- u32 i;
- u32 rar_entries = hw->mac.num_rar_entries;
-
- /*
- * If the current mac address is valid, assume it is a software override
- * to the permanent address.
- * Otherwise, use the permanent address from the eeprom.
- */
- if (ixgbe_validate_mac_addr(hw->mac.addr) ==
- IXGBE_ERR_INVALID_MAC_ADDR) {
- /* Get the MAC address from the RAR0 for later reference */
- hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
-
- hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
- hw->mac.addr[0], hw->mac.addr[1],
- hw->mac.addr[2]);
- hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
- hw->mac.addr[4], hw->mac.addr[5]);
- } else {
- /* Setup the receive address. */
- hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
- hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ",
- hw->mac.addr[0], hw->mac.addr[1],
- hw->mac.addr[2]);
- hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
- hw->mac.addr[4], hw->mac.addr[5]);
-
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
-
- /* clear VMDq pool/queue selection for RAR 0 */
- hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
- }
- hw->addr_ctrl.overflow_promisc = 0;
-
- hw->addr_ctrl.rar_used_count = 1;
-
- /* Zero out the other receive addresses. */
- hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1);
- for (i = 1; i < rar_entries; i++) {
- IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
- IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
- }
-
- /* Clear the MTA */
- hw->addr_ctrl.mta_in_use = 0;
- IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
-
- hw_dbg(hw, " Clearing MTA\n");
- for (i = 0; i < hw->mac.mcft_size; i++)
- IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
-
- ixgbe_init_uta_tables(hw);
-
- return 0;
-}
-
-/**
- * ixgbe_add_uc_addr - Adds a secondary unicast address.
- * @hw: pointer to hardware structure
- * @addr: new address
- *
- * Adds it to unused receive address register or goes into promiscuous mode.
- **/
-void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
-{
- u32 rar_entries = hw->mac.num_rar_entries;
- u32 rar;
-
- hw_dbg(hw, " UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
- addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
-
- /*
- * Place this address in the RAR if there is room,
- * else put the controller into promiscuous mode
- */
- if (hw->addr_ctrl.rar_used_count < rar_entries) {
- rar = hw->addr_ctrl.rar_used_count;
- hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
- hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar);
- hw->addr_ctrl.rar_used_count++;
- } else {
- hw->addr_ctrl.overflow_promisc++;
- }
-
- hw_dbg(hw, "ixgbe_add_uc_addr Complete\n");
-}
-
-/**
- * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
- * @hw: pointer to hardware structure
- * @addr_list: the list of new addresses
- * @addr_count: number of addresses
- * @next: iterator function to walk the address list
- *
- * The given list replaces any existing list. Clears the secondary addrs from
- * receive address registers. Uses unused receive address registers for the
- * first secondary addresses, and falls back to promiscuous mode as needed.
- *
- * Drivers using secondary unicast addresses must set user_set_promisc when
- * manually putting the device into promiscuous mode.
- **/
-s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
- u32 addr_count, ixgbe_mc_addr_itr next)
-{
- u8 *addr;
- u32 i;
- u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
- u32 uc_addr_in_use;
- u32 fctrl;
- u32 vmdq;
-
- /*
- * Clear accounting of old secondary address list,
- * don't count RAR[0]
- */
- uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
- hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
- hw->addr_ctrl.overflow_promisc = 0;
-
- /* Zero out the other receive addresses */
- hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use+1);
- for (i = 0; i < uc_addr_in_use; i++) {
- IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
- IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
- }
-
- /* Add the new addresses */
- for (i = 0; i < addr_count; i++) {
- hw_dbg(hw, " Adding the secondary addresses:\n");
- addr = next(hw, &addr_list, &vmdq);
- ixgbe_add_uc_addr(hw, addr, vmdq);
- }
-
- if (hw->addr_ctrl.overflow_promisc) {
- /* enable promisc if not already in overflow or set by user */
- if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
- hw_dbg(hw, " Entering address overflow promisc mode\n");
- fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
- fctrl |= IXGBE_FCTRL_UPE;
- IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
- }
- } else {
- /* only disable if set by overflow, not by user */
- if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
- hw_dbg(hw, " Leaving address overflow promisc mode\n");
- fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
- fctrl &= ~IXGBE_FCTRL_UPE;
- IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
- }
- }
-
- hw_dbg(hw, "ixgbe_update_uc_addr_list_generic Complete\n");
- return 0;
-}
-
-/**
- * ixgbe_mta_vector - Determines bit-vector in multicast table to set
- * @hw: pointer to hardware structure
- * @mc_addr: the multicast address
- *
- * Extracts the 12 bits, from a multicast address, to determine which
- * bit-vector to set in the multicast table. The hardware uses 12 bits, from
- * incoming rx multicast addresses, to determine the bit-vector to check in
- * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
- * by the MO field of the MCSTCTRL. The MO field is set during initialization
- * to mc_filter_type.
- **/
-static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
-{
- u32 vector = 0;
-
- switch (hw->mac.mc_filter_type) {
- case 0: /* use bits [47:36] of the address */
- vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
- break;
- case 1: /* use bits [46:35] of the address */
- vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
- break;
- case 2: /* use bits [45:34] of the address */
- vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
- break;
- case 3: /* use bits [43:32] of the address */
- vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
- break;
- default: /* Invalid mc_filter_type */
- hw_dbg(hw, "MC filter type param set incorrectly\n");
- break;
- }
-
- /* vector can only be 12-bits or boundary will be exceeded */
- vector &= 0xFFF;
- return vector;
-}
-
-/**
- * ixgbe_set_mta - Set bit-vector in multicast table
- * @hw: pointer to hardware structure
- * @hash_value: Multicast address hash value
- *
- * Sets the bit-vector in the multicast table.
- **/
-void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
-{
- u32 vector;
- u32 vector_bit;
- u32 vector_reg;
-
- hw->addr_ctrl.mta_in_use++;
-
- vector = ixgbe_mta_vector(hw, mc_addr);
- hw_dbg(hw, " bit-vector = 0x%03X\n", vector);
-
- /*
- * The MTA is a register array of 128 32-bit registers. It is treated
- * like an array of 4096 bits. We want to set bit
- * BitArray[vector_value]. So we figure out what register the bit is
- * in, read it, OR in the new bit, then write back the new value. The
- * register is determined by the upper 7 bits of the vector value and
- * the bit within that register are determined by the lower 5 bits of
- * the value.
- */
- vector_reg = (vector >> 5) & 0x7F;
- vector_bit = vector & 0x1F;
- hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
-}
-
-/**
- * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
- * @hw: pointer to hardware structure
- * @mc_addr_list: the list of new multicast addresses
- * @mc_addr_count: number of addresses
- * @next: iterator function to walk the multicast address list
- * @clear: flag, when set clears the table beforehand
- *
- * When the clear flag is set, the given list replaces any existing list.
- * Hashes the given addresses into the multicast table.
- **/
-s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count, ixgbe_mc_addr_itr next,
- bool clear)
-{
- u32 i;
- u32 vmdq;
-
- /*
- * Set the new number of MC addresses that we are being requested to
- * use.
- */
- hw->addr_ctrl.num_mc_addrs = mc_addr_count;
- hw->addr_ctrl.mta_in_use = 0;
-
- /* Clear mta_shadow */
- if (clear) {
- hw_dbg(hw, " Clearing MTA\n");
- memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
- }
-
- /* Update mta_shadow */
- for (i = 0; i < mc_addr_count; i++) {
- hw_dbg(hw, " Adding the multicast addresses:\n");
- ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
- }
-
- /* Enable mta */
- for (i = 0; i < hw->mac.mcft_size; i++)
- IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
- hw->mac.mta_shadow[i]);
-
- if (hw->addr_ctrl.mta_in_use > 0)
- IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
- IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
-
- hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n");
- return 0;
-}
-
-/**
- * ixgbe_enable_mc_generic - Enable multicast address in RAR
- * @hw: pointer to hardware structure
- *
- * Enables multicast address in RAR and the use of the multicast hash table.
- **/
-s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
-{
- struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
-
- if (a->mta_in_use > 0)
- IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
- hw->mac.mc_filter_type);
-
- return 0;
-}
-
-/**
- * ixgbe_disable_mc_generic - Disable multicast address in RAR
- * @hw: pointer to hardware structure
- *
- * Disables multicast address in RAR and the use of the multicast hash table.
- **/
-s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
-{
- struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
-
- if (a->mta_in_use > 0)
- IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
-
- return 0;
-}
-
-/**
- * ixgbe_fc_enable_generic - Enable flow control
- * @hw: pointer to hardware structure
- *
- * Enable flow control according to the current settings.
- **/
-s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
-{
- s32 ret_val = 0;
- u32 mflcn_reg, fccfg_reg;
- u32 reg;
- u32 fcrtl, fcrth;
- int i;
-
- /* Validate the water mark configuration */
- if (!hw->fc.pause_time) {
- ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
- goto out;
- }
-
- /* Low water mark of zero causes XOFF floods */
- for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
- if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
- hw->fc.high_water[i]) {
- if (!hw->fc.low_water[i] ||
- hw->fc.low_water[i] >= hw->fc.high_water[i]) {
- hw_dbg(hw, "Invalid water mark configuration\n");
- ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
- goto out;
- }
- }
- }
-
- /* Negotiate the fc mode to use */
- ixgbe_fc_autoneg(hw);
-
- /* Disable any previous flow control settings */
- mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
- mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
-
- fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
- fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
-
- /*
- * The possible values of fc.current_mode are:
- * 0: Flow control is completely disabled
- * 1: Rx flow control is enabled (we can receive pause frames,
- * but not send pause frames).
- * 2: Tx flow control is enabled (we can send pause frames but
- * we do not support receiving pause frames).
- * 3: Both Rx and Tx flow control (symmetric) are enabled.
- * other: Invalid.
- */
- switch (hw->fc.current_mode) {
- case ixgbe_fc_none:
- /*
- * Flow control is disabled by software override or autoneg.
- * The code below will actually disable it in the HW.
- */
- break;
- case ixgbe_fc_rx_pause:
- /*
- * Rx Flow control is enabled and Tx Flow control is
- * disabled by software override. Since there really
- * isn't a way to advertise that we are capable of RX
- * Pause ONLY, we will advertise that we support both
- * symmetric and asymmetric Rx PAUSE. Later, we will
- * disable the adapter's ability to send PAUSE frames.
- */
- mflcn_reg |= IXGBE_MFLCN_RFCE;
- break;
- case ixgbe_fc_tx_pause:
- /*
- * Tx Flow control is enabled, and Rx Flow control is
- * disabled by software override.
- */
- fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
- break;
- case ixgbe_fc_full:
- /* Flow control (both Rx and Tx) is enabled by SW override. */
- mflcn_reg |= IXGBE_MFLCN_RFCE;
- fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
- break;
- default:
- hw_dbg(hw, "Flow control param set incorrectly\n");
- ret_val = IXGBE_ERR_CONFIG;
- goto out;
- break;
- }
-
- /* Set 802.3x based flow control settings. */
- mflcn_reg |= IXGBE_MFLCN_DPF;
- IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
- IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
-
-
- /* Set up and enable Rx high/low water mark thresholds, enable XON. */
- for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
- if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
- hw->fc.high_water[i]) {
- fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
- IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
- fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
- } else {
- IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
- /*
- * In order to prevent Tx hangs when the internal Tx
- * switch is enabled we must set the high water mark
- * to the maximum FCRTH value. This allows the Tx
- * switch to function even under heavy Rx workloads.
- */
- fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
- }
-
- IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
- }
-
- /* Configure pause time (2 TCs per register) */
- reg = hw->fc.pause_time * 0x00010001;
- for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
- IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
-
- /* Configure flow control refresh threshold value */
- IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
-
-out:
- return ret_val;
-}
-
-/**
- * ixgbe_negotiate_fc - Negotiate flow control
- * @hw: pointer to hardware structure
- * @adv_reg: flow control advertised settings
- * @lp_reg: link partner's flow control settings
- * @adv_sym: symmetric pause bit in advertisement
- * @adv_asm: asymmetric pause bit in advertisement
- * @lp_sym: symmetric pause bit in link partner advertisement
- * @lp_asm: asymmetric pause bit in link partner advertisement
- *
- * Find the intersection between advertised settings and link partner's
- * advertised settings
- **/
-static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
- u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
-{
- if ((!(adv_reg)) || (!(lp_reg)))
- return IXGBE_ERR_FC_NOT_NEGOTIATED;
-
- if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
- /*
- * Now we need to check if the user selected Rx ONLY
- * of pause frames. In this case, we had to advertise
- * FULL flow control because we could not advertise RX
- * ONLY. Hence, we must now check to see if we need to
- * turn OFF the TRANSMISSION of PAUSE frames.
- */
- if (hw->fc.requested_mode == ixgbe_fc_full) {
- hw->fc.current_mode = ixgbe_fc_full;
- hw_dbg(hw, "Flow Control = FULL.\n");
- } else {
- hw->fc.current_mode = ixgbe_fc_rx_pause;
- hw_dbg(hw, "Flow Control=RX PAUSE frames only\n");
- }
- } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
- (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
- hw->fc.current_mode = ixgbe_fc_tx_pause;
- hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
- } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
- !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
- hw->fc.current_mode = ixgbe_fc_rx_pause;
- hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
- } else {
- hw->fc.current_mode = ixgbe_fc_none;
- hw_dbg(hw, "Flow Control = NONE.\n");
- }
- return 0;
-}
-
-/**
- * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
- * @hw: pointer to hardware structure
- *
- * Enable flow control according on 1 gig fiber.
- **/
-static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
-{
- u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
- s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
-
- /*
- * On multispeed fiber at 1g, bail out if
- * - link is up but AN did not complete, or if
- * - link is up and AN completed but timed out
- */
-
- linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
- if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
- (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
- goto out;
-
- pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
- pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
-
- ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
- pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
- IXGBE_PCS1GANA_ASM_PAUSE,
- IXGBE_PCS1GANA_SYM_PAUSE,
- IXGBE_PCS1GANA_ASM_PAUSE);
-
-out:
- return ret_val;
-}
-
-/**
- * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
- * @hw: pointer to hardware structure
- *
- * Enable flow control according to IEEE clause 37.
- **/
-static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
-{
- u32 links2, anlp1_reg, autoc_reg, links;
- s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
-
- /*
- * On backplane, bail out if
- * - backplane autoneg was not completed, or if
- * - we are 82599 and link partner is not AN enabled
- */
- links = IXGBE_READ_REG(hw, IXGBE_LINKS);
- if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
- goto out;
-
- if (hw->mac.type == ixgbe_mac_82599EB) {
- links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
- if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
- goto out;
- }
- /*
- * Read the 10g AN autoc and LP ability registers and resolve
- * local flow control settings accordingly
- */
- autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
-
- ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
- anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
- IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
-
-out:
- return ret_val;
-}
-
-/**
- * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
- * @hw: pointer to hardware structure
- *
- * Enable flow control according to IEEE clause 37.
- **/
-static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
-{
- u16 technology_ability_reg = 0;
- u16 lp_technology_ability_reg = 0;
-
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &technology_ability_reg);
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &lp_technology_ability_reg);
-
- return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
- (u32)lp_technology_ability_reg,
- IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
- IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
-}
-
-/**
- * ixgbe_fc_autoneg - Configure flow control
- * @hw: pointer to hardware structure
- *
- * Compares our advertised flow control capabilities to those advertised by
- * our link partner, and determines the proper flow control mode to use.
- **/
-void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
-{
- s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
- ixgbe_link_speed speed;
- bool link_up;
-
- /*
- * AN should have completed when the cable was plugged in.
- * Look for reasons to bail out. Bail out if:
- * - FC autoneg is disabled, or if
- * - link is not up.
- */
- if (hw->fc.disable_fc_autoneg)
- goto out;
-
- hw->mac.ops.check_link(hw, &speed, &link_up, false);
- if (!link_up)
- goto out;
-
- switch (hw->phy.media_type) {
- /* Autoneg flow control on fiber adapters */
- case ixgbe_media_type_fiber:
- if (speed == IXGBE_LINK_SPEED_1GB_FULL)
- ret_val = ixgbe_fc_autoneg_fiber(hw);
- break;
-
- /* Autoneg flow control on backplane adapters */
- case ixgbe_media_type_backplane:
- ret_val = ixgbe_fc_autoneg_backplane(hw);
- break;
-
- /* Autoneg flow control on copper adapters */
- case ixgbe_media_type_copper:
- if (ixgbe_device_supports_autoneg_fc(hw) == 0)
- ret_val = ixgbe_fc_autoneg_copper(hw);
- break;
-
- default:
- break;
- }
-
-out:
- if (ret_val == 0) {
- hw->fc.fc_was_autonegged = true;
- } else {
- hw->fc.fc_was_autonegged = false;
- hw->fc.current_mode = hw->fc.requested_mode;
- }
-}
-
-/**
- * ixgbe_disable_pcie_master - Disable PCI-express master access
- * @hw: pointer to hardware structure
- *
- * Disables PCI-Express master access and verifies there are no pending
- * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
- * bit hasn't caused the master requests to be disabled, else 0
- * is returned signifying master requests disabled.
- **/
-s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
-{
- s32 status = 0;
- u32 i;
-
- /* Always set this bit to ensure any future transactions are blocked */
- IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
-
- /* Exit if master requets are blocked */
- if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
- goto out;
-
- /* Poll for master request bit to clear */
- for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
- udelay(100);
- if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
- goto out;
- }
-
- /*
- * Two consecutive resets are required via CTRL.RST per datasheet
- * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
- * of this need. The first reset prevents new master requests from
- * being issued by our device. We then must wait 1usec or more for any
- * remaining completions from the PCIe bus to trickle in, and then reset
- * again to clear out any effects they may have had on our device.
- */
- hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n");
- hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
-
- /*
- * Before proceeding, make sure that the PCIe block does not have
- * transactions pending.
- */
- for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
- udelay(100);
- if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
- IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
- goto out;
- }
-
- hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
- status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
-
-out:
- return status;
-}
-
-/**
- * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
- * @hw: pointer to hardware structure
- * @mask: Mask to specify which semaphore to acquire
- *
- * Acquires the SWFW semaphore through the GSSR register for the specified
- * function (CSR, PHY0, PHY1, EEPROM, Flash)
- **/
-s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
-{
- u32 gssr;
- u32 swmask = mask;
- u32 fwmask = mask << 5;
- s32 timeout = 200;
-
- while (timeout) {
- /*
- * SW EEPROM semaphore bit is used for access to all
- * SW_FW_SYNC/GSSR bits (not just EEPROM)
- */
- if (ixgbe_get_eeprom_semaphore(hw))
- return IXGBE_ERR_SWFW_SYNC;
-
- gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
- if (!(gssr & (fwmask | swmask)))
- break;
-
- /*
- * Firmware currently using resource (fwmask) or other software
- * thread currently using resource (swmask)
- */
- ixgbe_release_eeprom_semaphore(hw);
- msleep(5);
- timeout--;
- }
-
- if (!timeout) {
- hw_dbg(hw, "Driver can't access resource, SW_FW_SYNC timeout.\n");
- return IXGBE_ERR_SWFW_SYNC;
- }
-
- gssr |= swmask;
- IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
-
- ixgbe_release_eeprom_semaphore(hw);
- return 0;
-}
-
-/**
- * ixgbe_release_swfw_sync - Release SWFW semaphore
- * @hw: pointer to hardware structure
- * @mask: Mask to specify which semaphore to release
- *
- * Releases the SWFW semaphore through the GSSR register for the specified
- * function (CSR, PHY0, PHY1, EEPROM, Flash)
- **/
-void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
-{
- u32 gssr;
- u32 swmask = mask;
-
- ixgbe_get_eeprom_semaphore(hw);
-
- gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
- gssr &= ~swmask;
- IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
-
- ixgbe_release_eeprom_semaphore(hw);
-}
-
-/**
- * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
- * @hw: pointer to hardware structure
- *
- * Stops the receive data path and waits for the HW to internally empty
- * the Rx security block
- **/
-s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
-{
-#define IXGBE_MAX_SECRX_POLL 40
-
- int i;
- int secrxreg;
-
- secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
- secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
- IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
- for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
- secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
- if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
- break;
- else
- /* Use interrupt-safe sleep just in case */
- udelay(1000);
- }
-
- /* For informational purposes only */
- if (i >= IXGBE_MAX_SECRX_POLL)
- hw_dbg(hw, "Rx unit being enabled before security "
- "path fully disabled. Continuing with init.\n");
-
- return 0;
-}
-
-/**
- * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
- * @hw: pointer to hardware structure
- *
- * Enables the receive data path.
- **/
-s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
-{
- int secrxreg;
-
- secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
- secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
- IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
- IXGBE_WRITE_FLUSH(hw);
-
- return 0;
-}
-
-/**
- * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
- * @hw: pointer to hardware structure
- * @regval: register value to write to RXCTRL
- *
- * Enables the Rx DMA unit
- **/
-s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
-{
- IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
-
- return 0;
-}
-
-/**
- * ixgbe_blink_led_start_generic - Blink LED based on index.
- * @hw: pointer to hardware structure
- * @index: led number to blink
- **/
-s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
-{
- ixgbe_link_speed speed = 0;
- bool link_up = 0;
- u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
-
- /*
- * Link must be up to auto-blink the LEDs;
- * Force it if link is down.
- */
- hw->mac.ops.check_link(hw, &speed, &link_up, false);
-
- if (!link_up) {
- autoc_reg |= IXGBE_AUTOC_AN_RESTART;
- autoc_reg |= IXGBE_AUTOC_FLU;
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
- IXGBE_WRITE_FLUSH(hw);
- msleep(10);
- }
-
- led_reg &= ~IXGBE_LED_MODE_MASK(index);
- led_reg |= IXGBE_LED_BLINK(index);
- IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
- IXGBE_WRITE_FLUSH(hw);
-
- return 0;
-}
-
-/**
- * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
- * @hw: pointer to hardware structure
- * @index: led number to stop blinking
- **/
-s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
-{
- u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
-
- autoc_reg &= ~IXGBE_AUTOC_FLU;
- autoc_reg |= IXGBE_AUTOC_AN_RESTART;
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
-
- led_reg &= ~IXGBE_LED_MODE_MASK(index);
- led_reg &= ~IXGBE_LED_BLINK(index);
- led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
- IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
- IXGBE_WRITE_FLUSH(hw);
-
- return 0;
-}
-
-/**
- * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
- * @hw: pointer to hardware structure
- * @san_mac_offset: SAN MAC address offset
- *
- * This function will read the EEPROM location for the SAN MAC address
- * pointer, and returns the value at that location. This is used in both
- * get and set mac_addr routines.
- **/
-static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
- u16 *san_mac_offset)
-{
- /*
- * First read the EEPROM pointer to see if the MAC addresses are
- * available.
- */
- hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
-
- return 0;
-}
-
-/**
- * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
- * @hw: pointer to hardware structure
- * @san_mac_addr: SAN MAC address
- *
- * Reads the SAN MAC address from the EEPROM, if it's available. This is
- * per-port, so set_lan_id() must be called before reading the addresses.
- * set_lan_id() is called by identify_sfp(), but this cannot be relied
- * upon for non-SFP connections, so we must call it here.
- **/
-s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
-{
- u16 san_mac_data, san_mac_offset;
- u8 i;
-
- /*
- * First read the EEPROM pointer to see if the MAC addresses are
- * available. If they're not, no point in calling set_lan_id() here.
- */
- ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
-
- if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
- /*
- * No addresses available in this EEPROM. It's not an
- * error though, so just wipe the local address and return.
- */
- for (i = 0; i < 6; i++)
- san_mac_addr[i] = 0xFF;
-
- goto san_mac_addr_out;
- }
-
- /* make sure we know which port we need to program */
- hw->mac.ops.set_lan_id(hw);
- /* apply the port offset to the address offset */
- (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
- (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
- for (i = 0; i < 3; i++) {
- hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
- san_mac_addr[i * 2] = (u8)(san_mac_data);
- san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
- san_mac_offset++;
- }
-
-san_mac_addr_out:
- return 0;
-}
-
-/**
- * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
- * @hw: pointer to hardware structure
- * @san_mac_addr: SAN MAC address
- *
- * Write a SAN MAC address to the EEPROM.
- **/
-s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
-{
- s32 status = 0;
- u16 san_mac_data, san_mac_offset;
- u8 i;
-
- /* Look for SAN mac address pointer. If not defined, return */
- ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
-
- if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
- status = IXGBE_ERR_NO_SAN_ADDR_PTR;
- goto san_mac_addr_out;
- }
-
- /* Make sure we know which port we need to write */
- hw->mac.ops.set_lan_id(hw);
- /* Apply the port offset to the address offset */
- (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
- (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
-
- for (i = 0; i < 3; i++) {
- san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
- san_mac_data |= (u16)(san_mac_addr[i * 2]);
- hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
- san_mac_offset++;
- }
-
-san_mac_addr_out:
- return status;
-}
-
-/**
- * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
- * @hw: pointer to hardware structure
- *
- * Read PCIe configuration space, and get the MSI-X vector count from
- * the capabilities table.
- **/
-u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
-{
- u16 msix_count = 1;
- u16 max_msix_count;
- u16 pcie_offset;
-
- switch (hw->mac.type) {
- case ixgbe_mac_82598EB:
- pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
- max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
- break;
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
- max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
- break;
- default:
- return msix_count;
- }
-
- msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
- msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
-
- /* MSI-X count is zero-based in HW */
- msix_count++;
-
- if (msix_count > max_msix_count)
- msix_count = max_msix_count;
-
- return msix_count;
-}
-
-/**
- * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
- * @hw: pointer to hardware structure
- * @addr: Address to put into receive address register
- * @vmdq: VMDq pool to assign
- *
- * Puts an ethernet address into a receive address register, or
- * finds the rar that it is aleady in; adds to the pool list
- **/
-s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
-{
- static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
- u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
- u32 rar;
- u32 rar_low, rar_high;
- u32 addr_low, addr_high;
-
- /* swap bytes for HW little endian */
- addr_low = addr[0] | (addr[1] << 8)
- | (addr[2] << 16)
- | (addr[3] << 24);
- addr_high = addr[4] | (addr[5] << 8);
-
- /*
- * Either find the mac_id in rar or find the first empty space.
- * rar_highwater points to just after the highest currently used
- * rar in order to shorten the search. It grows when we add a new
- * rar to the top.
- */
- for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
- rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
-
- if (((IXGBE_RAH_AV & rar_high) == 0)
- && first_empty_rar == NO_EMPTY_RAR_FOUND) {
- first_empty_rar = rar;
- } else if ((rar_high & 0xFFFF) == addr_high) {
- rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
- if (rar_low == addr_low)
- break; /* found it already in the rars */
- }
- }
-
- if (rar < hw->mac.rar_highwater) {
- /* already there so just add to the pool bits */
- ixgbe_set_vmdq(hw, rar, vmdq);
- } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
- /* stick it into first empty RAR slot we found */
- rar = first_empty_rar;
- ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
- } else if (rar == hw->mac.rar_highwater) {
- /* add it to the top of the list and inc the highwater mark */
- ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
- hw->mac.rar_highwater++;
- } else if (rar >= hw->mac.num_rar_entries) {
- return IXGBE_ERR_INVALID_MAC_ADDR;
- }
-
- /*
- * If we found rar[0], make sure the default pool bit (we use pool 0)
- * remains cleared to be sure default pool packets will get delivered
- */
- if (rar == 0)
- ixgbe_clear_vmdq(hw, rar, 0);
-
- return rar;
-}
-
-/**
- * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
- * @hw: pointer to hardware struct
- * @rar: receive address register index to disassociate
- * @vmdq: VMDq pool index to remove from the rar
- **/
-s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
-{
- u32 mpsar_lo, mpsar_hi;
- u32 rar_entries = hw->mac.num_rar_entries;
-
- /* Make sure we are using a valid rar index range */
- if (rar >= rar_entries) {
- hw_dbg(hw, "RAR index %d is out of range.\n", rar);
- return IXGBE_ERR_INVALID_ARGUMENT;
- }
-
- mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
- mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
-
- if (!mpsar_lo && !mpsar_hi)
- goto done;
-
- if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
- if (mpsar_lo) {
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
- mpsar_lo = 0;
- }
- if (mpsar_hi) {
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
- mpsar_hi = 0;
- }
- } else if (vmdq < 32) {
- mpsar_lo &= ~(1 << vmdq);
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
- } else {
- mpsar_hi &= ~(1 << (vmdq - 32));
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
- }
-
- /* was that the last pool using this rar? */
- if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
- hw->mac.ops.clear_rar(hw, rar);
-done:
- return 0;
-}
-
-/**
- * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
- * @hw: pointer to hardware struct
- * @rar: receive address register index to associate with a VMDq index
- * @vmdq: VMDq pool index
- **/
-s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
-{
- u32 mpsar;
- u32 rar_entries = hw->mac.num_rar_entries;
-
- /* Make sure we are using a valid rar index range */
- if (rar >= rar_entries) {
- hw_dbg(hw, "RAR index %d is out of range.\n", rar);
- return IXGBE_ERR_INVALID_ARGUMENT;
- }
-
- if (vmdq < 32) {
- mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
- mpsar |= 1 << vmdq;
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
- } else {
- mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
- mpsar |= 1 << (vmdq - 32);
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
- }
- return 0;
-}
-
-/**
- * This function should only be involved in the IOV mode.
- * In IOV mode, Default pool is next pool after the number of
- * VFs advertized and not 0.
- * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
- *
- * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
- * @hw: pointer to hardware struct
- * @vmdq: VMDq pool index
- **/
-s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
-{
- u32 mpsar;
- u32 rar = hw->mac.san_mac_rar_index;
-
- if (vmdq < 32) {
- mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
- mpsar |= 1 << vmdq;
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
- } else {
- mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
- mpsar |= 1 << (vmdq - 32);
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
- }
-
- return 0;
-}
-
-/**
- * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
- * @hw: pointer to hardware structure
- **/
-s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
-{
- int i;
-
- hw_dbg(hw, " Clearing UTA\n");
-
- for (i = 0; i < 128; i++)
- IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
-
- return 0;
-}
-
-/**
- * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
- * @hw: pointer to hardware structure
- * @vlan: VLAN id to write to VLAN filter
- *
- * return the VLVF index where this VLAN id should be placed
- *
- **/
-s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
-{
- u32 bits = 0;
- u32 first_empty_slot = 0;
- s32 regindex;
-
- /* short cut the special case */
- if (vlan == 0)
- return 0;
-
- /*
- * Search for the vlan id in the VLVF entries. Save off the first empty
- * slot found along the way
- */
- for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
- bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
- if (!bits && !(first_empty_slot))
- first_empty_slot = regindex;
- else if ((bits & 0x0FFF) == vlan)
- break;
- }
-
- /*
- * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
- * in the VLVF. Else use the first empty VLVF register for this
- * vlan id.
- */
- if (regindex >= IXGBE_VLVF_ENTRIES) {
- if (first_empty_slot)
- regindex = first_empty_slot;
- else {
- hw_dbg(hw, "No space in VLVF.\n");
- regindex = IXGBE_ERR_NO_SPACE;
- }
- }
-
- return regindex;
-}
-
-/**
- * ixgbe_set_vfta_generic - Set VLAN filter table
- * @hw: pointer to hardware structure
- * @vlan: VLAN id to write to VLAN filter
- * @vind: VMDq output index that maps queue to VLAN id in VFVFB
- * @vlan_on: boolean flag to turn on/off VLAN in VFVF
- *
- * Turn on/off specified VLAN in the VLAN filter table.
- **/
-s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
- bool vlan_on)
-{
- s32 regindex;
- u32 bitindex;
- u32 vfta;
- u32 targetbit;
- s32 ret_val = 0;
- bool vfta_changed = false;
-
- if (vlan > 4095)
- return IXGBE_ERR_PARAM;
-
- /*
- * this is a 2 part operation - first the VFTA, then the
- * VLVF and VLVFB if VT Mode is set
- * We don't write the VFTA until we know the VLVF part succeeded.
- */
-
- /* Part 1
- * The VFTA is a bitstring made up of 128 32-bit registers
- * that enable the particular VLAN id, much like the MTA:
- * bits[11-5]: which register
- * bits[4-0]: which bit in the register
- */
- regindex = (vlan >> 5) & 0x7F;
- bitindex = vlan & 0x1F;
- targetbit = (1 << bitindex);
- vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
-
- if (vlan_on) {
- if (!(vfta & targetbit)) {
- vfta |= targetbit;
- vfta_changed = true;
- }
- } else {
- if ((vfta & targetbit)) {
- vfta &= ~targetbit;
- vfta_changed = true;
- }
- }
-
- /* Part 2
- * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
- */
- ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
- &vfta_changed);
- if (ret_val != 0)
- return ret_val;
-
- if (vfta_changed)
- IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
-
- return 0;
-}
-
-/**
- * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
- * @hw: pointer to hardware structure
- * @vlan: VLAN id to write to VLAN filter
- * @vind: VMDq output index that maps queue to VLAN id in VFVFB
- * @vlan_on: boolean flag to turn on/off VLAN in VFVF
- * @vfta_changed: pointer to boolean flag which indicates whether VFTA
- * should be changed
- *
- * Turn on/off specified bit in VLVF table.
- **/
-s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
- bool vlan_on, bool *vfta_changed)
-{
- u32 vt;
-
- if (vlan > 4095)
- return IXGBE_ERR_PARAM;
-
- /* If VT Mode is set
- * Either vlan_on
- * make sure the vlan is in VLVF
- * set the vind bit in the matching VLVFB
- * Or !vlan_on
- * clear the pool bit and possibly the vind
- */
- vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
- if (vt & IXGBE_VT_CTL_VT_ENABLE) {
- s32 vlvf_index;
- u32 bits;
-
- vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
- if (vlvf_index < 0)
- return vlvf_index;
-
- if (vlan_on) {
- /* set the pool bit */
- if (vind < 32) {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2));
- bits |= (1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2),
- bits);
- } else {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1));
- bits |= (1 << (vind - 32));
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1),
- bits);
- }
- } else {
- /* clear the pool bit */
- if (vind < 32) {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2));
- bits &= ~(1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2),
- bits);
- bits |= IXGBE_READ_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1));
- } else {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1));
- bits &= ~(1 << (vind - 32));
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1),
- bits);
- bits |= IXGBE_READ_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2));
- }
- }
-
- /*
- * If there are still bits set in the VLVFB registers
- * for the VLAN ID indicated we need to see if the
- * caller is requesting that we clear the VFTA entry bit.
- * If the caller has requested that we clear the VFTA
- * entry bit but there are still pools/VFs using this VLAN
- * ID entry then ignore the request. We're not worried
- * about the case where we're turning the VFTA VLAN ID
- * entry bit on, only when requested to turn it off as
- * there may be multiple pools and/or VFs using the
- * VLAN ID entry. In that case we cannot clear the
- * VFTA bit until all pools/VFs using that VLAN ID have also
- * been cleared. This will be indicated by "bits" being
- * zero.
- */
- if (bits) {
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
- (IXGBE_VLVF_VIEN | vlan));
- if ((!vlan_on) && (vfta_changed != NULL)) {
- /* someone wants to clear the vfta entry
- * but some pools/VFs are still using it.
- * Ignore it. */
- *vfta_changed = false;
- }
- } else
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
- }
-
- return 0;
-}
-
-/**
- * ixgbe_clear_vfta_generic - Clear VLAN filter table
- * @hw: pointer to hardware structure
- *
- * Clears the VLAN filer table, and the VMDq index associated with the filter
- **/
-s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
-{
- u32 offset;
-
- for (offset = 0; offset < hw->mac.vft_size; offset++)
- IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
-
- for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
- }
-
- return 0;
-}
-
-/**
- * ixgbe_check_mac_link_generic - Determine link and speed status
- * @hw: pointer to hardware structure
- * @speed: pointer to link speed
- * @link_up: true when link is up
- * @link_up_wait_to_complete: bool used to wait for link up or not
- *
- * Reads the links register to determine if link is up and the current speed
- **/
-s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
- bool *link_up, bool link_up_wait_to_complete)
-{
- u32 links_reg, links_orig;
- u32 i;
-
- /* clear the old state */
- links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
-
- links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
-
- if (links_orig != links_reg) {
- hw_dbg(hw, "LINKS changed from %08X to %08X\n",
- links_orig, links_reg);
- }
-
- if (link_up_wait_to_complete) {
- for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
- if (links_reg & IXGBE_LINKS_UP) {
- *link_up = true;
- break;
- } else {
- *link_up = false;
- }
- msleep(100);
- links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
- }
- } else {
- if (links_reg & IXGBE_LINKS_UP)
- *link_up = true;
- else
- *link_up = false;
- }
-
- if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
- IXGBE_LINKS_SPEED_10G_82599)
- *speed = IXGBE_LINK_SPEED_10GB_FULL;
- else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
- IXGBE_LINKS_SPEED_1G_82599)
- *speed = IXGBE_LINK_SPEED_1GB_FULL;
- else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
- IXGBE_LINKS_SPEED_100_82599)
- *speed = IXGBE_LINK_SPEED_100_FULL;
- else
- *speed = IXGBE_LINK_SPEED_UNKNOWN;
-
- return 0;
-}
-
-/**
- * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
- * the EEPROM
- * @hw: pointer to hardware structure
- * @wwnn_prefix: the alternative WWNN prefix
- * @wwpn_prefix: the alternative WWPN prefix
- *
- * This function will read the EEPROM from the alternative SAN MAC address
- * block to check the support for the alternative WWNN/WWPN prefix support.
- **/
-s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
- u16 *wwpn_prefix)
-{
- u16 offset, caps;
- u16 alt_san_mac_blk_offset;
-
- /* clear output first */
- *wwnn_prefix = 0xFFFF;
- *wwpn_prefix = 0xFFFF;
-
- /* check if alternative SAN MAC is supported */
- hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
- &alt_san_mac_blk_offset);
-
- if ((alt_san_mac_blk_offset == 0) ||
- (alt_san_mac_blk_offset == 0xFFFF))
- goto wwn_prefix_out;
-
- /* check capability in alternative san mac address block */
- offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
- hw->eeprom.ops.read(hw, offset, &caps);
- if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
- goto wwn_prefix_out;
-
- /* get the corresponding prefix for WWNN/WWPN */
- offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
- hw->eeprom.ops.read(hw, offset, wwnn_prefix);
-
- offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
- hw->eeprom.ops.read(hw, offset, wwpn_prefix);
-
-wwn_prefix_out:
- return 0;
-}
-
-/**
- * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
- * @hw: pointer to hardware structure
- * @bs: the fcoe boot status
- *
- * This function will read the FCOE boot status from the iSCSI FCOE block
- **/
-s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
-{
- u16 offset, caps, flags;
- s32 status;
-
- /* clear output first */
- *bs = ixgbe_fcoe_bootstatus_unavailable;
-
- /* check if FCOE IBA block is present */
- offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
- status = hw->eeprom.ops.read(hw, offset, &caps);
- if (status != 0)
- goto out;
-
- if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
- goto out;
-
- /* check if iSCSI FCOE block is populated */
- status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
- if (status != 0)
- goto out;
-
- if ((offset == 0) || (offset == 0xFFFF))
- goto out;
-
- /* read fcoe flags in iSCSI FCOE block */
- offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
- status = hw->eeprom.ops.read(hw, offset, &flags);
- if (status != 0)
- goto out;
-
- if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
- *bs = ixgbe_fcoe_bootstatus_enabled;
- else
- *bs = ixgbe_fcoe_bootstatus_disabled;
-
-out:
- return status;
-}
-
-/**
- * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
- * @hw: pointer to hardware structure
- * @enable: enable or disable switch for anti-spoofing
- * @pf: Physical Function pool - do not enable anti-spoofing for the PF
- *
- **/
-void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
-{
- int j;
- int pf_target_reg = pf >> 3;
- int pf_target_shift = pf % 8;
- u32 pfvfspoof = 0;
-
- if (hw->mac.type == ixgbe_mac_82598EB)
- return;
-
- if (enable)
- pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
-
- /*
- * PFVFSPOOF register array is size 8 with 8 bits assigned to
- * MAC anti-spoof enables in each register array element.
- */
- for (j = 0; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
-
- /* If not enabling anti-spoofing then done */
- if (!enable)
- return;
-
- /*
- * The PF should be allowed to spoof so that it can support
- * emulation mode NICs. Reset the bit assigned to the PF
- */
- pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg));
- pfvfspoof ^= (1 << pf_target_shift);
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg), pfvfspoof);
-}
-
-/**
- * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
- * @hw: pointer to hardware structure
- * @enable: enable or disable switch for VLAN anti-spoofing
- * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
- *
- **/
-void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
-{
- int vf_target_reg = vf >> 3;
- int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
- u32 pfvfspoof;
-
- if (hw->mac.type == ixgbe_mac_82598EB)
- return;
-
- pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
- if (enable)
- pfvfspoof |= (1 << vf_target_shift);
- else
- pfvfspoof &= ~(1 << vf_target_shift);
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
-}
-
-/**
- * ixgbe_get_device_caps_generic - Get additional device capabilities
- * @hw: pointer to hardware structure
- * @device_caps: the EEPROM word with the extra device capabilities
- *
- * This function will read the EEPROM location for the device capabilities,
- * and return the word through device_caps.
- **/
-s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
-{
- hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
-
- return 0;
-}
-
-/**
- * ixgbe_calculate_checksum - Calculate checksum for buffer
- * @buffer: pointer to EEPROM
- * @length: size of EEPROM to calculate a checksum for
- * Calculates the checksum for some buffer on a specified length. The
- * checksum calculated is returned.
- **/
-static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
-{
- u32 i;
- u8 sum = 0;
-
- if (!buffer)
- return 0;
- for (i = 0; i < length; i++)
- sum += buffer[i];
-
- return (u8) (0 - sum);
-}
-
-/**
- * ixgbe_host_interface_command - Issue command to manageability block
- * @hw: pointer to the HW structure
- * @buffer: contains the command to write and where the return status will
- * be placed
- * @length: length of buffer, must be multiple of 4 bytes
- *
- * Communicates with the manageability block. On success return 0
- * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
- **/
-static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
- u32 length)
-{
- u32 hicr, i, bi;
- u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
- u8 buf_len, dword_len;
-
- s32 ret_val = 0;
-
- if (length == 0 || length & 0x3 ||
- length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
- hw_dbg(hw, "Buffer length failure.\n");
- ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
- goto out;
- }
-
- /* Check that the host interface is enabled. */
- hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
- if ((hicr & IXGBE_HICR_EN) == 0) {
- hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n");
- ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
- goto out;
- }
-
- /* Calculate length in DWORDs */
- dword_len = length >> 2;
-
- /*
- * The device driver writes the relevant command block
- * into the ram area.
- */
- for (i = 0; i < dword_len; i++)
- IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
- i, IXGBE_CPU_TO_LE32(buffer[i]));
-
- /* Setting this bit tells the ARC that a new command is pending. */
- IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
-
- for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
- hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
- if (!(hicr & IXGBE_HICR_C))
- break;
- msleep(1);
- }
-
- /* Check command successful completion. */
- if (i == IXGBE_HI_COMMAND_TIMEOUT ||
- (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
- hw_dbg(hw, "Command has failed with no status valid.\n");
- ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
- goto out;
- }
-
- /* Calculate length in DWORDs */
- dword_len = hdr_size >> 2;
-
- /* first pull in the header so we know the buffer length */
- for (bi = 0; bi < dword_len; bi++) {
- buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
- IXGBE_LE32_TO_CPUS(&buffer[bi]);
- }
-
- /* If there is any thing in data position pull it in */
- buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
- if (buf_len == 0)
- goto out;
-
- if (length < (buf_len + hdr_size)) {
- hw_dbg(hw, "Buffer not large enough for reply message.\n");
- ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
- goto out;
- }
-
- /* Calculate length in DWORDs, add 3 for odd lengths */
- dword_len = (buf_len + 3) >> 2;
-
- /* Pull in the rest of the buffer (bi is where we left off)*/
- for (; bi <= dword_len; bi++) {
- buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
- IXGBE_LE32_TO_CPUS(&buffer[bi]);
- }
-
-out:
- return ret_val;
-}
-
-/**
- * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
- * @hw: pointer to the HW structure
- * @maj: driver version major number
- * @min: driver version minor number
- * @build: driver version build number
- * @sub: driver version sub build number
- *
- * Sends driver version number to firmware through the manageability
- * block. On success return 0
- * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
- * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
- **/
-s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
- u8 build, u8 sub)
-{
- struct ixgbe_hic_drv_info fw_cmd;
- int i;
- s32 ret_val = 0;
-
- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)
- != 0) {
- ret_val = IXGBE_ERR_SWFW_SYNC;
- goto out;
- }
-
- fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
- fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
- fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
- fw_cmd.port_num = (u8)hw->bus.func;
- fw_cmd.ver_maj = maj;
- fw_cmd.ver_min = min;
- fw_cmd.ver_build = build;
- fw_cmd.ver_sub = sub;
- fw_cmd.hdr.checksum = 0;
- fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
- (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
- fw_cmd.pad = 0;
- fw_cmd.pad2 = 0;
-
- for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
- ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
- sizeof(fw_cmd));
- if (ret_val != 0)
- continue;
-
- if (fw_cmd.hdr.cmd_or_resp.ret_status ==
- FW_CEM_RESP_STATUS_SUCCESS)
- ret_val = 0;
- else
- ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
-
- break;
- }
-
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
-out:
- return ret_val;
-}
-
-/**
- * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
- * @hw: pointer to hardware structure
- * @num_pb: number of packet buffers to allocate
- * @headroom: reserve n KB of headroom
- * @strategy: packet buffer allocation strategy
- **/
-void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
- int strategy)
-{
- u32 pbsize = hw->mac.rx_pb_size;
- int i = 0;
- u32 rxpktsize, txpktsize, txpbthresh;
-
- /* Reserve headroom */
- pbsize -= headroom;
-
- if (!num_pb)
- num_pb = 1;
-
- /* Divide remaining packet buffer space amongst the number of packet
- * buffers requested using supplied strategy.
- */
- switch (strategy) {
- case PBA_STRATEGY_WEIGHTED:
- /* ixgbe_dcb_pba_80_48 strategy weight first half of packet
- * buffer with 5/8 of the packet buffer space.
- */
- rxpktsize = (pbsize * 5) / (num_pb * 4);
- pbsize -= rxpktsize * (num_pb / 2);
- rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
- for (; i < (num_pb / 2); i++)
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
- /* Fall through to configure remaining packet buffers */
- case PBA_STRATEGY_EQUAL:
- rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
- for (; i < num_pb; i++)
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
- break;
- default:
- break;
- }
-
- /* Only support an equally distributed Tx packet buffer strategy. */
- txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
- txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
- for (i = 0; i < num_pb; i++) {
- IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
- IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
- }
-
- /* Clear unused TCs, if any, to zero buffer size*/
- for (; i < IXGBE_MAX_PB; i++) {
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
- IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
- IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
- }
-}
-
-/**
- * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
- * @hw: pointer to the hardware structure
- *
- * The 82599 and x540 MACs can experience issues if TX work is still pending
- * when a reset occurs. This function prevents this by flushing the PCIe
- * buffers on the system.
- **/
-void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
-{
- u32 gcr_ext, hlreg0;
-
- /*
- * If double reset is not requested then all transactions should
- * already be clear and as such there is no work to do
- */
- if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
- return;
-
- /*
- * Set loopback enable to prevent any transmits from being sent
- * should the link come up. This assumes that the RXCTRL.RXEN bit
- * has already been cleared.
- */
- hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
-
- /* initiate cleaning flow for buffers in the PCIe transaction layer */
- gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
- IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
- gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
-
- /* Flush all writes and allow 20usec for all transactions to clear */
- IXGBE_WRITE_FLUSH(hw);
- udelay(20);
-
- /* restore previous register values */
- IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
- IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
-}
-
-static const u8 ixgbe_emc_temp_data[4] = {
- IXGBE_EMC_INTERNAL_DATA,
- IXGBE_EMC_DIODE1_DATA,
- IXGBE_EMC_DIODE2_DATA,
- IXGBE_EMC_DIODE3_DATA
-};
-static const u8 ixgbe_emc_therm_limit[4] = {
- IXGBE_EMC_INTERNAL_THERM_LIMIT,
- IXGBE_EMC_DIODE1_THERM_LIMIT,
- IXGBE_EMC_DIODE2_THERM_LIMIT,
- IXGBE_EMC_DIODE3_THERM_LIMIT
-};
-
-/**
- * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
- * @hw: pointer to hardware structure
- * @data: pointer to the thermal sensor data structure
- *
- * Returns the thermal sensor data structure
- **/
-s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
-{
- s32 status = 0;
- u16 ets_offset;
- u16 ets_cfg;
- u16 ets_sensor;
- u8 num_sensors;
- u8 sensor_index;
- u8 sensor_location;
- u8 i;
- struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
-
- /* Only support thermal sensors attached to 82599 physical port 0 */
- if ((hw->mac.type != ixgbe_mac_82599EB) ||
- (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
- status = IXGBE_NOT_IMPLEMENTED;
- goto out;
- }
-
- status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, &ets_offset);
- if (status)
- goto out;
-
- if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) {
- status = IXGBE_NOT_IMPLEMENTED;
- goto out;
- }
-
- status = hw->eeprom.ops.read(hw, ets_offset, &ets_cfg);
- if (status)
- goto out;
-
- if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
- != IXGBE_ETS_TYPE_EMC) {
- status = IXGBE_NOT_IMPLEMENTED;
- goto out;
- }
-
- num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
- if (num_sensors > IXGBE_MAX_SENSORS)
- num_sensors = IXGBE_MAX_SENSORS;
-
- for (i = 0; i < num_sensors; i++) {
- status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i),
- &ets_sensor);
- if (status)
- goto out;
-
- sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
- IXGBE_ETS_DATA_INDEX_SHIFT);
- sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
- IXGBE_ETS_DATA_LOC_SHIFT);
-
- if (sensor_location != 0) {
- status = hw->phy.ops.read_i2c_byte(hw,
- ixgbe_emc_temp_data[sensor_index],
- IXGBE_I2C_THERMAL_SENSOR_ADDR,
- &data->sensor[i].temp);
- if (status)
- goto out;
- }
- }
-out:
- return status;
-}
-
-/**
- * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds
- * @hw: pointer to hardware structure
- *
- * Inits the thermal sensor thresholds according to the NVM map
- * and save off the threshold and location values into mac.thermal_sensor_data
- **/
-s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
-{
- s32 status = 0;
- u16 ets_offset;
- u16 ets_cfg;
- u16 ets_sensor;
- u8 low_thresh_delta;
- u8 num_sensors;
- u8 sensor_index;
- u8 sensor_location;
- u8 therm_limit;
- u8 i;
- struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
-
- memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
-
- /* Only support thermal sensors attached to 82599 physical port 0 */
- if ((hw->mac.type != ixgbe_mac_82599EB) ||
- (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
- return IXGBE_NOT_IMPLEMENTED;
-
- hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, &ets_offset);
- if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
- return IXGBE_NOT_IMPLEMENTED;
-
- hw->eeprom.ops.read(hw, ets_offset, &ets_cfg);
- if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
- != IXGBE_ETS_TYPE_EMC)
- return IXGBE_NOT_IMPLEMENTED;
-
- low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
- IXGBE_ETS_LTHRES_DELTA_SHIFT);
- num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
-
- for (i = 0; i < num_sensors; i++) {
- hw->eeprom.ops.read(hw, (ets_offset + 1 + i), &ets_sensor);
- sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
- IXGBE_ETS_DATA_INDEX_SHIFT);
- sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
- IXGBE_ETS_DATA_LOC_SHIFT);
- therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK;
-
- hw->phy.ops.write_i2c_byte(hw,
- ixgbe_emc_therm_limit[sensor_index],
- IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit);
-
- if ((i < IXGBE_MAX_SENSORS) && (sensor_location != 0)) {
- data->sensor[i].location = sensor_location;
- data->sensor[i].caution_thresh = therm_limit;
- data->sensor[i].max_op_thresh = therm_limit -
- low_thresh_delta;
- }
- }
- return status;
-}
-
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.h
deleted file mode 100755
index 9bd6f534..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.h
+++ /dev/null
@@ -1,140 +0,0 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#ifndef _IXGBE_COMMON_H_
-#define _IXGBE_COMMON_H_
-
-#include "ixgbe_type.h"
-
-u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
-
-s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
-s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
-s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
-s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
- u32 pba_num_size);
-s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
-s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
-void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
-s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
-
-s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
-
-s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
-s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
-s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
- u16 words, u16 *data);
-s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
-s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
- u16 words, u16 *data);
-s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
-s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
- u16 words, u16 *data);
-s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
- u16 *data);
-s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
- u16 words, u16 *data);
-u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
-s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
- u16 *checksum_val);
-s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
-s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
-
-s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
- u32 enable_addr);
-s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
-s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count,
- ixgbe_mc_addr_itr func, bool clear);
-s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
- u32 addr_count, ixgbe_mc_addr_itr func);
-s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
-s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
-s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
-s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw);
-s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw);
-
-s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
-void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
-
-s32 ixgbe_validate_mac_addr(u8 *mac_addr);
-s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
-void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
-s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
-
-s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
-
-s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
-s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
-
-s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
-s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
-s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
-s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
- u32 vind, bool vlan_on);
-s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
- bool vlan_on, bool *vfta_changed);
-s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
-s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan);
-
-s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *link_up, bool link_up_wait_to_complete);
-
-s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
- u16 *wwpn_prefix);
-
-s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs);
-void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
-void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
-s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
-void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
- int strategy);
-s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
- u8 build, u8 ver);
-void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
-
-#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
-#define IXGBE_EMC_INTERNAL_DATA 0x00
-#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20
-#define IXGBE_EMC_DIODE1_DATA 0x01
-#define IXGBE_EMC_DIODE1_THERM_LIMIT 0x19
-#define IXGBE_EMC_DIODE2_DATA 0x23
-#define IXGBE_EMC_DIODE2_THERM_LIMIT 0x1A
-#define IXGBE_EMC_DIODE3_DATA 0x2A
-#define IXGBE_EMC_DIODE3_THERM_LIMIT 0x30
-
-s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
-s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
-#endif /* IXGBE_COMMON */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_dcb.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_dcb.h
deleted file mode 100755
index a6690451..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_dcb.h
+++ /dev/null
@@ -1,168 +0,0 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#ifndef _IXGBE_DCB_H_
-#define _IXGBE_DCB_H_
-
-
-#include "ixgbe_type.h"
-
-/* DCB defines */
-/* DCB credit calculation defines */
-#define IXGBE_DCB_CREDIT_QUANTUM 64
-#define IXGBE_DCB_MAX_CREDIT_REFILL 200 /* 200 * 64B = 12800B */
-#define IXGBE_DCB_MAX_TSO_SIZE (32 * 1024) /* Max TSO pkt size in DCB*/
-#define IXGBE_DCB_MAX_CREDIT (2 * IXGBE_DCB_MAX_CREDIT_REFILL)
-
-/* 513 for 32KB TSO packet */
-#define IXGBE_DCB_MIN_TSO_CREDIT \
- ((IXGBE_DCB_MAX_TSO_SIZE / IXGBE_DCB_CREDIT_QUANTUM) + 1)
-
-/* DCB configuration defines */
-#define IXGBE_DCB_MAX_USER_PRIORITY 8
-#define IXGBE_DCB_MAX_BW_GROUP 8
-#define IXGBE_DCB_BW_PERCENT 100
-
-#define IXGBE_DCB_TX_CONFIG 0
-#define IXGBE_DCB_RX_CONFIG 1
-
-/* DCB capability defines */
-#define IXGBE_DCB_PG_SUPPORT 0x00000001
-#define IXGBE_DCB_PFC_SUPPORT 0x00000002
-#define IXGBE_DCB_BCN_SUPPORT 0x00000004
-#define IXGBE_DCB_UP2TC_SUPPORT 0x00000008
-#define IXGBE_DCB_GSP_SUPPORT 0x00000010
-
-struct ixgbe_dcb_support {
- u32 capabilities; /* DCB capabilities */
-
- /* Each bit represents a number of TCs configurable in the hw.
- * If 8 traffic classes can be configured, the value is 0x80. */
- u8 traffic_classes;
- u8 pfc_traffic_classes;
-};
-
-enum ixgbe_dcb_tsa {
- ixgbe_dcb_tsa_ets = 0,
- ixgbe_dcb_tsa_group_strict_cee,
- ixgbe_dcb_tsa_strict
-};
-
-/* Traffic class bandwidth allocation per direction */
-struct ixgbe_dcb_tc_path {
- u8 bwg_id; /* Bandwidth Group (BWG) ID */
- u8 bwg_percent; /* % of BWG's bandwidth */
- u8 link_percent; /* % of link bandwidth */
- u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */
- u16 data_credits_refill; /* Credit refill amount in 64B granularity */
- u16 data_credits_max; /* Max credits for a configured packet buffer
- * in 64B granularity.*/
- enum ixgbe_dcb_tsa tsa; /* Link or Group Strict Priority */
-};
-
-enum ixgbe_dcb_pfc {
- ixgbe_dcb_pfc_disabled = 0,
- ixgbe_dcb_pfc_enabled,
- ixgbe_dcb_pfc_enabled_txonly,
- ixgbe_dcb_pfc_enabled_rxonly
-};
-
-/* Traffic class configuration */
-struct ixgbe_dcb_tc_config {
- struct ixgbe_dcb_tc_path path[2]; /* One each for Tx/Rx */
- enum ixgbe_dcb_pfc pfc; /* Class based flow control setting */
-
- u16 desc_credits_max; /* For Tx Descriptor arbitration */
- u8 tc; /* Traffic class (TC) */
-};
-
-enum ixgbe_dcb_pba {
- /* PBA[0-7] each use 64KB FIFO */
- ixgbe_dcb_pba_equal = PBA_STRATEGY_EQUAL,
- /* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */
- ixgbe_dcb_pba_80_48 = PBA_STRATEGY_WEIGHTED
-};
-
-struct ixgbe_dcb_num_tcs {
- u8 pg_tcs;
- u8 pfc_tcs;
-};
-
-struct ixgbe_dcb_config {
- struct ixgbe_dcb_tc_config tc_config[IXGBE_DCB_MAX_TRAFFIC_CLASS];
- struct ixgbe_dcb_support support;
- struct ixgbe_dcb_num_tcs num_tcs;
- u8 bw_percentage[2][IXGBE_DCB_MAX_BW_GROUP]; /* One each for Tx/Rx */
- bool pfc_mode_enable;
- bool round_robin_enable;
-
- enum ixgbe_dcb_pba rx_pba_cfg;
-
- u32 dcb_cfg_version; /* Not used...OS-specific? */
- u32 link_speed; /* For bandwidth allocation validation purpose */
- bool vt_mode;
-};
-
-/* DCB driver APIs */
-
-/* DCB rule checking */
-s32 ixgbe_dcb_check_config_cee(struct ixgbe_dcb_config *);
-
-/* DCB credits calculation */
-s32 ixgbe_dcb_calculate_tc_credits(u8 *, u16 *, u16 *, int);
-s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *,
- struct ixgbe_dcb_config *, u32, u8);
-
-/* DCB PFC */
-s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *, u8, u8 *);
-s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *);
-
-/* DCB stats */
-s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *);
-s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
-s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
-
-/* DCB config arbiters */
-s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *,
- struct ixgbe_dcb_config *);
-s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *,
- struct ixgbe_dcb_config *);
-s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *,
- struct ixgbe_dcb_config *);
-
-/* DCB unpack routines */
-void ixgbe_dcb_unpack_pfc_cee(struct ixgbe_dcb_config *, u8 *, u8 *);
-void ixgbe_dcb_unpack_refill_cee(struct ixgbe_dcb_config *, int, u16 *);
-void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *, u16 *);
-void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *, int, u8 *);
-void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *, int, u8 *);
-void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *, int, u8 *);
-
-/* DCB initialization */
-s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, u16 *, u16 *, u8 *, u8 *, u8 *);
-s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *);
-#endif /* _IXGBE_DCB_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c
deleted file mode 100755
index 11472bd3..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c
+++ /dev/null
@@ -1,2901 +0,0 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-/* ethtool support for ixgbe */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/ethtool.h>
-#include <linux/vmalloc.h>
-#include <linux/highmem.h>
-#ifdef SIOCETHTOOL
-#include <asm/uaccess.h>
-
-#include "ixgbe.h"
-
-#ifndef ETH_GSTRING_LEN
-#define ETH_GSTRING_LEN 32
-#endif
-
-#define IXGBE_ALL_RAR_ENTRIES 16
-
-#ifdef ETHTOOL_OPS_COMPAT
-#include "kcompat_ethtool.c"
-#endif
-#ifdef ETHTOOL_GSTATS
-struct ixgbe_stats {
- char stat_string[ETH_GSTRING_LEN];
- int sizeof_stat;
- int stat_offset;
-};
-
-#define IXGBE_NETDEV_STAT(_net_stat) { \
- .stat_string = #_net_stat, \
- .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
- .stat_offset = offsetof(struct net_device_stats, _net_stat) \
-}
-static const struct ixgbe_stats ixgbe_gstrings_net_stats[] = {
- IXGBE_NETDEV_STAT(rx_packets),
- IXGBE_NETDEV_STAT(tx_packets),
- IXGBE_NETDEV_STAT(rx_bytes),
- IXGBE_NETDEV_STAT(tx_bytes),
- IXGBE_NETDEV_STAT(rx_errors),
- IXGBE_NETDEV_STAT(tx_errors),
- IXGBE_NETDEV_STAT(rx_dropped),
- IXGBE_NETDEV_STAT(tx_dropped),
- IXGBE_NETDEV_STAT(multicast),
- IXGBE_NETDEV_STAT(collisions),
- IXGBE_NETDEV_STAT(rx_over_errors),
- IXGBE_NETDEV_STAT(rx_crc_errors),
- IXGBE_NETDEV_STAT(rx_frame_errors),
- IXGBE_NETDEV_STAT(rx_fifo_errors),
- IXGBE_NETDEV_STAT(rx_missed_errors),
- IXGBE_NETDEV_STAT(tx_aborted_errors),
- IXGBE_NETDEV_STAT(tx_carrier_errors),
- IXGBE_NETDEV_STAT(tx_fifo_errors),
- IXGBE_NETDEV_STAT(tx_heartbeat_errors),
-};
-
-#define IXGBE_STAT(_name, _stat) { \
- .stat_string = _name, \
- .sizeof_stat = FIELD_SIZEOF(struct ixgbe_adapter, _stat), \
- .stat_offset = offsetof(struct ixgbe_adapter, _stat) \
-}
-static struct ixgbe_stats ixgbe_gstrings_stats[] = {
- IXGBE_STAT("rx_pkts_nic", stats.gprc),
- IXGBE_STAT("tx_pkts_nic", stats.gptc),
- IXGBE_STAT("rx_bytes_nic", stats.gorc),
- IXGBE_STAT("tx_bytes_nic", stats.gotc),
- IXGBE_STAT("lsc_int", lsc_int),
- IXGBE_STAT("tx_busy", tx_busy),
- IXGBE_STAT("non_eop_descs", non_eop_descs),
-#ifndef CONFIG_IXGBE_NAPI
- IXGBE_STAT("rx_dropped_backlog", rx_dropped_backlog),
-#endif
- IXGBE_STAT("broadcast", stats.bprc),
- IXGBE_STAT("rx_no_buffer_count", stats.rnbc[0]) ,
- IXGBE_STAT("tx_timeout_count", tx_timeout_count),
- IXGBE_STAT("tx_restart_queue", restart_queue),
- IXGBE_STAT("rx_long_length_errors", stats.roc),
- IXGBE_STAT("rx_short_length_errors", stats.ruc),
- IXGBE_STAT("tx_flow_control_xon", stats.lxontxc),
- IXGBE_STAT("rx_flow_control_xon", stats.lxonrxc),
- IXGBE_STAT("tx_flow_control_xoff", stats.lxofftxc),
- IXGBE_STAT("rx_flow_control_xoff", stats.lxoffrxc),
- IXGBE_STAT("rx_csum_offload_errors", hw_csum_rx_error),
- IXGBE_STAT("alloc_rx_page_failed", alloc_rx_page_failed),
- IXGBE_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
-#ifndef IXGBE_NO_LRO
- IXGBE_STAT("lro_aggregated", lro_stats.coal),
- IXGBE_STAT("lro_flushed", lro_stats.flushed),
-#endif /* IXGBE_NO_LRO */
- IXGBE_STAT("rx_no_dma_resources", hw_rx_no_dma_resources),
- IXGBE_STAT("hw_rsc_aggregated", rsc_total_count),
- IXGBE_STAT("hw_rsc_flushed", rsc_total_flush),
-#ifdef HAVE_TX_MQ
- IXGBE_STAT("fdir_match", stats.fdirmatch),
- IXGBE_STAT("fdir_miss", stats.fdirmiss),
- IXGBE_STAT("fdir_overflow", fdir_overflow),
-#endif /* HAVE_TX_MQ */
-#ifdef IXGBE_FCOE
- IXGBE_STAT("fcoe_bad_fccrc", stats.fccrc),
- IXGBE_STAT("fcoe_last_errors", stats.fclast),
- IXGBE_STAT("rx_fcoe_dropped", stats.fcoerpdc),
- IXGBE_STAT("rx_fcoe_packets", stats.fcoeprc),
- IXGBE_STAT("rx_fcoe_dwords", stats.fcoedwrc),
- IXGBE_STAT("fcoe_noddp", stats.fcoe_noddp),
- IXGBE_STAT("fcoe_noddp_ext_buff", stats.fcoe_noddp_ext_buff),
- IXGBE_STAT("tx_fcoe_packets", stats.fcoeptc),
- IXGBE_STAT("tx_fcoe_dwords", stats.fcoedwtc),
-#endif /* IXGBE_FCOE */
- IXGBE_STAT("os2bmc_rx_by_bmc", stats.o2bgptc),
- IXGBE_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
- IXGBE_STAT("os2bmc_tx_by_host", stats.o2bspc),
- IXGBE_STAT("os2bmc_rx_by_host", stats.b2ogprc),
-};
-
-#define IXGBE_QUEUE_STATS_LEN \
- ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_tx_queues + \
- ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) * \
- (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
-#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
-#define IXGBE_NETDEV_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_net_stats)
-#define IXGBE_PB_STATS_LEN ( \
- (((struct ixgbe_adapter *)netdev_priv(netdev))->flags & \
- IXGBE_FLAG_DCB_ENABLED) ? \
- (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
- sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
- sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
- sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
- / sizeof(u64) : 0)
-#define IXGBE_VF_STATS_LEN \
- ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_vfs) * \
- (sizeof(struct vf_stats) / sizeof(u64)))
-#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
- IXGBE_NETDEV_STATS_LEN + \
- IXGBE_PB_STATS_LEN + \
- IXGBE_QUEUE_STATS_LEN + \
- IXGBE_VF_STATS_LEN)
-
-#endif /* ETHTOOL_GSTATS */
-#ifdef ETHTOOL_TEST
-static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
- "Register test (offline)", "Eeprom test (offline)",
- "Interrupt test (offline)", "Loopback test (offline)",
- "Link test (on/offline)"
-};
-#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
-#endif /* ETHTOOL_TEST */
-
-int ixgbe_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
- u32 link_speed = 0;
- bool link_up;
-
- ecmd->supported = SUPPORTED_10000baseT_Full;
- ecmd->autoneg = AUTONEG_ENABLE;
- ecmd->transceiver = XCVR_EXTERNAL;
- if ((hw->phy.media_type == ixgbe_media_type_copper) ||
- (hw->phy.multispeed_fiber)) {
- ecmd->supported |= (SUPPORTED_1000baseT_Full |
- SUPPORTED_Autoneg);
- switch (hw->mac.type) {
- case ixgbe_mac_X540:
- ecmd->supported |= SUPPORTED_100baseT_Full;
- break;
- default:
- break;
- }
-
- ecmd->advertising = ADVERTISED_Autoneg;
- if (hw->phy.autoneg_advertised) {
- if (hw->phy.autoneg_advertised &
- IXGBE_LINK_SPEED_100_FULL)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
- if (hw->phy.autoneg_advertised &
- IXGBE_LINK_SPEED_10GB_FULL)
- ecmd->advertising |= ADVERTISED_10000baseT_Full;
- if (hw->phy.autoneg_advertised &
- IXGBE_LINK_SPEED_1GB_FULL)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
- } else {
- /*
- * Default advertised modes in case
- * phy.autoneg_advertised isn't set.
- */
- ecmd->advertising |= (ADVERTISED_10000baseT_Full |
- ADVERTISED_1000baseT_Full);
- if (hw->mac.type == ixgbe_mac_X540)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
- }
-
- if (hw->phy.media_type == ixgbe_media_type_copper) {
- ecmd->supported |= SUPPORTED_TP;
- ecmd->advertising |= ADVERTISED_TP;
- ecmd->port = PORT_TP;
- } else {
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_FIBRE;
- }
- } else if (hw->phy.media_type == ixgbe_media_type_backplane) {
- /* Set as FIBRE until SERDES defined in kernel */
- if (hw->device_id == IXGBE_DEV_ID_82598_BX) {
- ecmd->supported = (SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE);
- ecmd->advertising = (ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE);
- ecmd->port = PORT_FIBRE;
- ecmd->autoneg = AUTONEG_DISABLE;
- } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE)
- || (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) {
- ecmd->supported |= (SUPPORTED_1000baseT_Full |
- SUPPORTED_Autoneg |
- SUPPORTED_FIBRE);
- ecmd->advertising = (ADVERTISED_10000baseT_Full |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_Autoneg |
- ADVERTISED_FIBRE);
- ecmd->port = PORT_FIBRE;
- } else {
- ecmd->supported |= (SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE);
- ecmd->advertising = (ADVERTISED_10000baseT_Full |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE);
- ecmd->port = PORT_FIBRE;
- }
- } else {
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising = (ADVERTISED_10000baseT_Full |
- ADVERTISED_FIBRE);
- ecmd->port = PORT_FIBRE;
- ecmd->autoneg = AUTONEG_DISABLE;
- }
-
-#ifdef HAVE_ETHTOOL_SFP_DISPLAY_PORT
- /* Get PHY type */
- switch (adapter->hw.phy.type) {
- case ixgbe_phy_tn:
- case ixgbe_phy_aq:
- case ixgbe_phy_cu_unknown:
- /* Copper 10G-BASET */
- ecmd->port = PORT_TP;
- break;
- case ixgbe_phy_qt:
- ecmd->port = PORT_FIBRE;
- break;
- case ixgbe_phy_nl:
- case ixgbe_phy_sfp_passive_tyco:
- case ixgbe_phy_sfp_passive_unknown:
- case ixgbe_phy_sfp_ftl:
- case ixgbe_phy_sfp_avago:
- case ixgbe_phy_sfp_intel:
- case ixgbe_phy_sfp_unknown:
- switch (adapter->hw.phy.sfp_type) {
- /* SFP+ devices, further checking needed */
- case ixgbe_sfp_type_da_cu:
- case ixgbe_sfp_type_da_cu_core0:
- case ixgbe_sfp_type_da_cu_core1:
- ecmd->port = PORT_DA;
- break;
- case ixgbe_sfp_type_sr:
- case ixgbe_sfp_type_lr:
- case ixgbe_sfp_type_srlr_core0:
- case ixgbe_sfp_type_srlr_core1:
- ecmd->port = PORT_FIBRE;
- break;
- case ixgbe_sfp_type_not_present:
- ecmd->port = PORT_NONE;
- break;
- case ixgbe_sfp_type_1g_cu_core0:
- case ixgbe_sfp_type_1g_cu_core1:
- ecmd->port = PORT_TP;
- ecmd->supported = SUPPORTED_TP;
- ecmd->advertising = (ADVERTISED_1000baseT_Full |
- ADVERTISED_TP);
- break;
- case ixgbe_sfp_type_1g_sx_core0:
- case ixgbe_sfp_type_1g_sx_core1:
- ecmd->port = PORT_FIBRE;
- ecmd->supported = SUPPORTED_FIBRE;
- ecmd->advertising = (ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE);
- break;
- case ixgbe_sfp_type_unknown:
- default:
- ecmd->port = PORT_OTHER;
- break;
- }
- break;
- case ixgbe_phy_xaui:
- ecmd->port = PORT_NONE;
- break;
- case ixgbe_phy_unknown:
- case ixgbe_phy_generic:
- case ixgbe_phy_sfp_unsupported:
- default:
- ecmd->port = PORT_OTHER;
- break;
- }
-#endif
-
- if (!in_interrupt()) {
- hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
- } else {
- /*
- * this case is a special workaround for RHEL5 bonding
- * that calls this routine from interrupt context
- */
- link_speed = adapter->link_speed;
- link_up = adapter->link_up;
- }
-
- if (link_up) {
- switch (link_speed) {
- case IXGBE_LINK_SPEED_10GB_FULL:
- ecmd->speed = SPEED_10000;
- break;
- case IXGBE_LINK_SPEED_1GB_FULL:
- ecmd->speed = SPEED_1000;
- break;
- case IXGBE_LINK_SPEED_100_FULL:
- ecmd->speed = SPEED_100;
- break;
- default:
- break;
- }
- ecmd->duplex = DUPLEX_FULL;
- } else {
- ecmd->speed = -1;
- ecmd->duplex = -1;
- }
-
- return 0;
-}
-
-static int ixgbe_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
- u32 advertised, old;
- s32 err = 0;
-
- if ((hw->phy.media_type == ixgbe_media_type_copper) ||
- (hw->phy.multispeed_fiber)) {
- /*
- * this function does not support duplex forcing, but can
- * limit the advertising of the adapter to the specified speed
- */
- if (ecmd->autoneg == AUTONEG_DISABLE)
- return -EINVAL;
-
- if (ecmd->advertising & ~ecmd->supported)
- return -EINVAL;
-
- old = hw->phy.autoneg_advertised;
- advertised = 0;
- if (ecmd->advertising & ADVERTISED_10000baseT_Full)
- advertised |= IXGBE_LINK_SPEED_10GB_FULL;
-
- if (ecmd->advertising & ADVERTISED_1000baseT_Full)
- advertised |= IXGBE_LINK_SPEED_1GB_FULL;
-
- if (ecmd->advertising & ADVERTISED_100baseT_Full)
- advertised |= IXGBE_LINK_SPEED_100_FULL;
-
- if (old == advertised)
- return err;
- /* this sets the link speed and restarts auto-neg */
- hw->mac.autotry_restart = true;
- err = hw->mac.ops.setup_link(hw, advertised, true, true);
- if (err) {
- e_info(probe, "setup link failed with code %d\n", err);
- hw->mac.ops.setup_link(hw, old, true, true);
- }
- }
- return err;
-}
-
-static void ixgbe_get_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
-
- if (hw->fc.disable_fc_autoneg)
- pause->autoneg = 0;
- else
- pause->autoneg = 1;
-
- if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
- pause->rx_pause = 1;
- } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
- pause->tx_pause = 1;
- } else if (hw->fc.current_mode == ixgbe_fc_full) {
- pause->rx_pause = 1;
- pause->tx_pause = 1;
- }
-}
-
-static int ixgbe_set_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
- struct ixgbe_fc_info fc = hw->fc;
-
- /* 82598 does no support link flow control with DCB enabled */
- if ((hw->mac.type == ixgbe_mac_82598EB) &&
- (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
- return -EINVAL;
-
- fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
-
- if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
- fc.requested_mode = ixgbe_fc_full;
- else if (pause->rx_pause)
- fc.requested_mode = ixgbe_fc_rx_pause;
- else if (pause->tx_pause)
- fc.requested_mode = ixgbe_fc_tx_pause;
- else
- fc.requested_mode = ixgbe_fc_none;
-
- /* if the thing changed then we'll update and use new autoneg */
- if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
- hw->fc = fc;
- if (netif_running(netdev))
- ixgbe_reinit_locked(adapter);
- else
- ixgbe_reset(adapter);
- }
-
- return 0;
-}
-
-static u32 ixgbe_get_msglevel(struct net_device *netdev)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- return adapter->msg_enable;
-}
-
-static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- adapter->msg_enable = data;
-}
-
-static int ixgbe_get_regs_len(struct net_device *netdev)
-{
-#define IXGBE_REGS_LEN 1129
- return IXGBE_REGS_LEN * sizeof(u32);
-}
-
-#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
-
-
-static void ixgbe_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
- void *p)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
- u32 *regs_buff = p;
- u8 i;
-
- printk(KERN_DEBUG "ixgbe_get_regs_1\n");
- memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
- printk(KERN_DEBUG "ixgbe_get_regs_2 0x%p\n", hw->hw_addr);
-
- regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
-
- /* General Registers */
- regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
- printk(KERN_DEBUG "ixgbe_get_regs_3\n");
- regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
- regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
- regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
- regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
- regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
- regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
- regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
-
- printk(KERN_DEBUG "ixgbe_get_regs_4\n");
-
- /* NVM Register */
- regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC);
- regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
- regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA);
- regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
- regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
- regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
- regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
- regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
- regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
- regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC);
-
- /* Interrupt */
- /* don't read EICR because it can clear interrupt causes, instead
- * read EICS which is a shadow but doesn't clear EICR */
- regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
- regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
- regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
- regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
- regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
- regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
- regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
- regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
- regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
- regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
- regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
- regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
-
- /* Flow Control */
- regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
- regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0));
- regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
- regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
- regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
- for (i = 0; i < 8; i++) {
- switch (hw->mac.type) {
- case ixgbe_mac_82598EB:
- regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
- regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
- break;
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- regs_buff[35 + i] = IXGBE_READ_REG(hw,
- IXGBE_FCRTL_82599(i));
- regs_buff[43 + i] = IXGBE_READ_REG(hw,
- IXGBE_FCRTH_82599(i));
- break;
- default:
- break;
- }
- }
- regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
- regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
-
- /* Receive DMA */
- for (i = 0; i < 64; i++)
- regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
- for (i = 0; i < 64; i++)
- regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
- for (i = 0; i < 64; i++)
- regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
- for (i = 0; i < 64; i++)
- regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
- for (i = 0; i < 64; i++)
- regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
- for (i = 0; i < 64; i++)
- regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
- for (i = 0; i < 16; i++)
- regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
- for (i = 0; i < 16; i++)
- regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
- regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
- for (i = 0; i < 8; i++)
- regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
- regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
- regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
-
- /* Receive */
- regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
- regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
- for (i = 0; i < 16; i++)
- regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
- for (i = 0; i < 16; i++)
- regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
- regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
- regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
- regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
- regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
- regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
- for (i = 0; i < 8; i++)
- regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
- for (i = 0; i < 8; i++)
- regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
- regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
-
- /* Transmit */
- for (i = 0; i < 32; i++)
- regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
- for (i = 0; i < 32; i++)
- regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
- for (i = 0; i < 32; i++)
- regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
- for (i = 0; i < 32; i++)
- regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
- for (i = 0; i < 32; i++)
- regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
- for (i = 0; i < 32; i++)
- regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
- for (i = 0; i < 32; i++)
- regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
- for (i = 0; i < 32; i++)
- regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
- regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
- for (i = 0; i < 16; i++)
- regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
- regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
- for (i = 0; i < 8; i++)
- regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
- regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
-
- /* Wake Up */
- regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
- regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
- regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
- regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
- regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
- regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
- regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
- regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
- regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
-
- /* DCB */
- regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
- regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
- regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
- regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
- for (i = 0; i < 8; i++)
- regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
- for (i = 0; i < 8; i++)
- regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
- for (i = 0; i < 8; i++)
- regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
- for (i = 0; i < 8; i++)
- regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
- for (i = 0; i < 8; i++)
- regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i));
- for (i = 0; i < 8; i++)
- regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i));
-
- /* Statistics */
- regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
- regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
- regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
- regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
- for (i = 0; i < 8; i++)
- regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
- regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
- regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
- regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
- regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
- regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
- regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
- regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
- for (i = 0; i < 8; i++)
- regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
- for (i = 0; i < 8; i++)
- regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
- for (i = 0; i < 8; i++)
- regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
- for (i = 0; i < 8; i++)
- regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
- regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
- regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
- regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
- regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
- regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
- regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
- regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
- regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
- regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
- regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
- regs_buff[942] = IXGBE_GET_STAT(adapter, gorc);
- regs_buff[944] = IXGBE_GET_STAT(adapter, gotc);
- for (i = 0; i < 8; i++)
- regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
- regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
- regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
- regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
- regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
- regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
- regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
- regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
- regs_buff[961] = IXGBE_GET_STAT(adapter, tor);
- regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
- regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
- regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
- regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
- regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
- regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
- regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
- regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
- regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
- regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
- regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
- for (i = 0; i < 16; i++)
- regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
- for (i = 0; i < 16; i++)
- regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
- for (i = 0; i < 16; i++)
- regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
- for (i = 0; i < 16; i++)
- regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
-
- /* MAC */
- regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
- regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
- regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
- regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
- regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
- regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
- regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
- regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
- regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
- regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
- regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
- regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
- regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
- regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
- regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
- regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
- regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
- regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
- regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
- regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
- regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
- regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
- regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
- regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
- regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
- regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
- regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
- regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
- regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
- regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
- regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
-
- /* Diagnostic */
- regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
- for (i = 0; i < 8; i++)
- regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
- regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
- for (i = 0; i < 4; i++)
- regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
- regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
- regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
- for (i = 0; i < 8; i++)
- regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
- regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
- for (i = 0; i < 4; i++)
- regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
- regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
- regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
- regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0);
- regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1);
- regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2);
- regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3);
- regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
- regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0);
- regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1);
- regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
- regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
- for (i = 0; i < 8; i++)
- regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
- regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
- regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
- regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
- regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
- regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
- regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
- regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
- regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
- regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
-
- /* 82599 X540 specific registers */
- regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
-}
-
-static int ixgbe_get_eeprom_len(struct net_device *netdev)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- return adapter->hw.eeprom.word_size * 2;
-}
-
-static int ixgbe_get_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
- u16 *eeprom_buff;
- int first_word, last_word, eeprom_len;
- int ret_val = 0;
- u16 i;
-
- if (eeprom->len == 0)
- return -EINVAL;
-
- eeprom->magic = hw->vendor_id | (hw->device_id << 16);
-
- first_word = eeprom->offset >> 1;
- last_word = (eeprom->offset + eeprom->len - 1) >> 1;
- eeprom_len = last_word - first_word + 1;
-
- eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
- if (!eeprom_buff)
- return -ENOMEM;
-
- ret_val = ixgbe_read_eeprom_buffer(hw, first_word, eeprom_len,
- eeprom_buff);
-
- /* Device's eeprom is always little-endian, word addressable */
- for (i = 0; i < eeprom_len; i++)
- le16_to_cpus(&eeprom_buff[i]);
-
- memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
- kfree(eeprom_buff);
-
- return ret_val;
-}
-
-static int ixgbe_set_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
- u16 *eeprom_buff;
- void *ptr;
- int max_len, first_word, last_word, ret_val = 0;
- u16 i;
-
- if (eeprom->len == 0)
- return -EINVAL;
-
- if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
- return -EINVAL;
-
- max_len = hw->eeprom.word_size * 2;
-
- first_word = eeprom->offset >> 1;
- last_word = (eeprom->offset + eeprom->len - 1) >> 1;
- eeprom_buff = kmalloc(max_len, GFP_KERNEL);
- if (!eeprom_buff)
- return -ENOMEM;
-
- ptr = eeprom_buff;
-
- if (eeprom->offset & 1) {
- /*
- * need read/modify/write of first changed EEPROM word
- * only the second byte of the word is being modified
- */
- ret_val = ixgbe_read_eeprom(hw, first_word, &eeprom_buff[0]);
- if (ret_val)
- goto err;
-
- ptr++;
- }
- if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
- /*
- * need read/modify/write of last changed EEPROM word
- * only the first byte of the word is being modified
- */
- ret_val = ixgbe_read_eeprom(hw, last_word,
- &eeprom_buff[last_word - first_word]);
- if (ret_val)
- goto err;
- }
-
- /* Device's eeprom is always little-endian, word addressable */
- for (i = 0; i < last_word - first_word + 1; i++)
- le16_to_cpus(&eeprom_buff[i]);
-
- memcpy(ptr, bytes, eeprom->len);
-
- for (i = 0; i < last_word - first_word + 1; i++)
- cpu_to_le16s(&eeprom_buff[i]);
-
- ret_val = ixgbe_write_eeprom_buffer(hw, first_word,
- last_word - first_word + 1,
- eeprom_buff);
-
- /* Update the checksum */
- if (ret_val == 0)
- ixgbe_update_eeprom_checksum(hw);
-
-err:
- kfree(eeprom_buff);
- return ret_val;
-}
-
-static void ixgbe_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
- strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
-
- strlcpy(drvinfo->version, ixgbe_driver_version,
- sizeof(drvinfo->version));
-
- strlcpy(drvinfo->fw_version, adapter->eeprom_id,
- sizeof(drvinfo->fw_version));
-
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
- sizeof(drvinfo->bus_info));
-
- drvinfo->n_stats = IXGBE_STATS_LEN;
- drvinfo->testinfo_len = IXGBE_TEST_LEN;
- drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
-}
-
-static void ixgbe_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
- ring->rx_max_pending = IXGBE_MAX_RXD;
- ring->tx_max_pending = IXGBE_MAX_TXD;
- ring->rx_mini_max_pending = 0;
- ring->rx_jumbo_max_pending = 0;
- ring->rx_pending = adapter->rx_ring_count;
- ring->tx_pending = adapter->tx_ring_count;
- ring->rx_mini_pending = 0;
- ring->rx_jumbo_pending = 0;
-}
-
-static int ixgbe_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_ring *tx_ring = NULL, *rx_ring = NULL;
- u32 new_rx_count, new_tx_count;
- int i, err = 0;
-
- if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
- return -EINVAL;
-
- new_tx_count = clamp_t(u32, ring->tx_pending,
- IXGBE_MIN_TXD, IXGBE_MAX_TXD);
- new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
-
- new_rx_count = clamp_t(u32, ring->rx_pending,
- IXGBE_MIN_RXD, IXGBE_MAX_RXD);
- new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
-
- /* if nothing to do return success */
- if ((new_tx_count == adapter->tx_ring_count) &&
- (new_rx_count == adapter->rx_ring_count))
- return 0;
-
- while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
- usleep_range(1000, 2000);
-
- if (!netif_running(adapter->netdev)) {
- for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i]->count = new_tx_count;
- for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i]->count = new_rx_count;
- adapter->tx_ring_count = new_tx_count;
- adapter->rx_ring_count = new_rx_count;
- goto clear_reset;
- }
-
- /* alloc updated Tx resources */
- if (new_tx_count != adapter->tx_ring_count) {
- tx_ring = vmalloc(adapter->num_tx_queues * sizeof(*tx_ring));
- if (!tx_ring) {
- err = -ENOMEM;
- goto clear_reset;
- }
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- /* clone ring and setup updated count */
- tx_ring[i] = *adapter->tx_ring[i];
- tx_ring[i].count = new_tx_count;
- err = ixgbe_setup_tx_resources(&tx_ring[i]);
- if (err) {
- while (i) {
- i--;
- ixgbe_free_tx_resources(&tx_ring[i]);
- }
-
- vfree(tx_ring);
- tx_ring = NULL;
-
- goto clear_reset;
- }
- }
- }
-
- /* alloc updated Rx resources */
- if (new_rx_count != adapter->rx_ring_count) {
- rx_ring = vmalloc(adapter->num_rx_queues * sizeof(*rx_ring));
- if (!rx_ring) {
- err = -ENOMEM;
- goto clear_reset;
- }
-
- for (i = 0; i < adapter->num_rx_queues; i++) {
- /* clone ring and setup updated count */
- rx_ring[i] = *adapter->rx_ring[i];
- rx_ring[i].count = new_rx_count;
- err = ixgbe_setup_rx_resources(&rx_ring[i]);
- if (err) {
- while (i) {
- i--;
- ixgbe_free_rx_resources(&rx_ring[i]);
- }
-
- vfree(rx_ring);
- rx_ring = NULL;
-
- goto clear_reset;
- }
- }
- }
-
- /* bring interface down to prepare for update */
- ixgbe_down(adapter);
-
- /* Tx */
- if (tx_ring) {
- for (i = 0; i < adapter->num_tx_queues; i++) {
- ixgbe_free_tx_resources(adapter->tx_ring[i]);
- *adapter->tx_ring[i] = tx_ring[i];
- }
- adapter->tx_ring_count = new_tx_count;
-
- vfree(tx_ring);
- tx_ring = NULL;
- }
-
- /* Rx */
- if (rx_ring) {
- for (i = 0; i < adapter->num_rx_queues; i++) {
- ixgbe_free_rx_resources(adapter->rx_ring[i]);
- *adapter->rx_ring[i] = rx_ring[i];
- }
- adapter->rx_ring_count = new_rx_count;
-
- vfree(rx_ring);
- rx_ring = NULL;
- }
-
- /* restore interface using new values */
- ixgbe_up(adapter);
-
-clear_reset:
- /* free Tx resources if Rx error is encountered */
- if (tx_ring) {
- for (i = 0; i < adapter->num_tx_queues; i++)
- ixgbe_free_tx_resources(&tx_ring[i]);
- vfree(tx_ring);
- }
-
- clear_bit(__IXGBE_RESETTING, &adapter->state);
- return err;
-}
-
-#ifndef HAVE_ETHTOOL_GET_SSET_COUNT
-static int ixgbe_get_stats_count(struct net_device *netdev)
-{
- return IXGBE_STATS_LEN;
-}
-
-#else /* HAVE_ETHTOOL_GET_SSET_COUNT */
-static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
-{
- switch (sset) {
- case ETH_SS_TEST:
- return IXGBE_TEST_LEN;
- case ETH_SS_STATS:
- return IXGBE_STATS_LEN;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
-static void ixgbe_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, u64 *data)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
-#ifdef HAVE_NETDEV_STATS_IN_NETDEV
- struct net_device_stats *net_stats = &netdev->stats;
-#else
- struct net_device_stats *net_stats = &adapter->net_stats;
-#endif
- u64 *queue_stat;
- int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64);
- int i, j, k;
- char *p;
-
- printk(KERN_DEBUG "ixgbe_stats 0\n");
- ixgbe_update_stats(adapter);
- printk(KERN_DEBUG "ixgbe_stats 1\n");
-
- for (i = 0; i < IXGBE_NETDEV_STATS_LEN; i++) {
- p = (char *)net_stats + ixgbe_gstrings_net_stats[i].stat_offset;
- data[i] = (ixgbe_gstrings_net_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
- for (j = 0; j < IXGBE_GLOBAL_STATS_LEN; j++, i++) {
- p = (char *)adapter + ixgbe_gstrings_stats[j].stat_offset;
- data[i] = (ixgbe_gstrings_stats[j].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
- printk(KERN_DEBUG "ixgbe_stats 2\n");
-#ifdef NO_VNIC
- for (j = 0; j < adapter->num_tx_queues; j++) {
- queue_stat = (u64 *)&adapter->tx_ring[j]->stats;
- for (k = 0; k < stat_count; k++)
- data[i + k] = queue_stat[k];
- i += k;
- }
- for (j = 0; j < adapter->num_rx_queues; j++) {
- queue_stat = (u64 *)&adapter->rx_ring[j]->stats;
- for (k = 0; k < stat_count; k++)
- data[i + k] = queue_stat[k];
- i += k;
- }
- printk(KERN_DEBUG "ixgbe_stats 3\n");
-#endif
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) {
- data[i++] = adapter->stats.pxontxc[j];
- data[i++] = adapter->stats.pxofftxc[j];
- }
- for (j = 0; j < MAX_RX_PACKET_BUFFERS; j++) {
- data[i++] = adapter->stats.pxonrxc[j];
- data[i++] = adapter->stats.pxoffrxc[j];
- }
- }
- printk(KERN_DEBUG "ixgbe_stats 4\n");
- stat_count = sizeof(struct vf_stats) / sizeof(u64);
- for (j = 0; j < adapter->num_vfs; j++) {
- queue_stat = (u64 *)&adapter->vfinfo[j].vfstats;
- for (k = 0; k < stat_count; k++)
- data[i + k] = queue_stat[k];
- queue_stat = (u64 *)&adapter->vfinfo[j].saved_rst_vfstats;
- for (k = 0; k < stat_count; k++)
- data[i + k] += queue_stat[k];
- i += k;
- }
-}
-
-static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
- u8 *data)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- char *p = (char *)data;
- int i;
-
- switch (stringset) {
- case ETH_SS_TEST:
- memcpy(data, *ixgbe_gstrings_test,
- IXGBE_TEST_LEN * ETH_GSTRING_LEN);
- break;
- case ETH_SS_STATS:
- for (i = 0; i < IXGBE_NETDEV_STATS_LEN; i++) {
- memcpy(p, ixgbe_gstrings_net_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
- memcpy(p, ixgbe_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < adapter->num_tx_queues; i++) {
- sprintf(p, "tx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < adapter->num_rx_queues; i++) {
- sprintf(p, "rx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
- }
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
- sprintf(p, "tx_pb_%u_pxon", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_pb_%u_pxoff", i);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < MAX_RX_PACKET_BUFFERS; i++) {
- sprintf(p, "rx_pb_%u_pxon", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_pb_%u_pxoff", i);
- p += ETH_GSTRING_LEN;
- }
- }
- for (i = 0; i < adapter->num_vfs; i++) {
- sprintf(p, "VF %d Rx Packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "VF %d Rx Bytes", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "VF %d Tx Packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "VF %d Tx Bytes", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "VF %d MC Packets", i);
- p += ETH_GSTRING_LEN;
- }
- /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
- break;
- }
-}
-
-static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- bool link_up;
- u32 link_speed = 0;
- *data = 0;
-
- hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
- if (link_up)
- return *data;
- else
- *data = 1;
- return *data;
-}
-
-/* ethtool register test data */
-struct ixgbe_reg_test {
- u16 reg;
- u8 array_len;
- u8 test_type;
- u32 mask;
- u32 write;
-};
-
-/* In the hardware, registers are laid out either singly, in arrays
- * spaced 0x40 bytes apart, or in contiguous tables. We assume
- * most tests take place on arrays or single registers (handled
- * as a single-element array) and special-case the tables.
- * Table tests are always pattern tests.
- *
- * We also make provision for some required setup steps by specifying
- * registers to be written without any read-back testing.
- */
-
-#define PATTERN_TEST 1
-#define SET_READ_TEST 2
-#define WRITE_NO_TEST 3
-#define TABLE32_TEST 4
-#define TABLE64_TEST_LO 5
-#define TABLE64_TEST_HI 6
-
-/* default 82599 register test */
-static struct ixgbe_reg_test reg_test_82599[] = {
- { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
- { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
- { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
- { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
- { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
- { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
- { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
- { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
- { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
- { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
- { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
- { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
- { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { 0, 0, 0, 0 }
-};
-
-/* default 82598 register test */
-static struct ixgbe_reg_test reg_test_82598[] = {
- { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
- { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
- { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
- { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
- /* Enable all four RX queues before testing. */
- { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
- /* RDH is read-only for 82598, only test RDT. */
- { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
- { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
- { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
- { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
- { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
- { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
- { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
- { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
- { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { 0, 0, 0, 0 }
-};
-
-#define REG_PATTERN_TEST(R, M, W) \
-{ \
- u32 pat, val, before; \
- const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
- for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \
- before = readl(adapter->hw.hw_addr + R); \
- writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \
- val = readl(adapter->hw.hw_addr + R); \
- if (val != (_test[pat] & W & M)) { \
- e_err(drv, "pattern test reg %04X failed: got " \
- "0x%08X expected 0x%08X\n", \
- R, val, (_test[pat] & W & M)); \
- *data = R; \
- writel(before, adapter->hw.hw_addr + R); \
- return 1; \
- } \
- writel(before, adapter->hw.hw_addr + R); \
- } \
-}
-
-#define REG_SET_AND_CHECK(R, M, W) \
-{ \
- u32 val, before; \
- before = readl(adapter->hw.hw_addr + R); \
- writel((W & M), (adapter->hw.hw_addr + R)); \
- val = readl(adapter->hw.hw_addr + R); \
- if ((W & M) != (val & M)) { \
- e_err(drv, "set/check reg %04X test failed: got 0x%08X " \
- "expected 0x%08X\n", R, (val & M), (W & M)); \
- *data = R; \
- writel(before, (adapter->hw.hw_addr + R)); \
- return 1; \
- } \
- writel(before, (adapter->hw.hw_addr + R)); \
-}
-
-static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
-{
- struct ixgbe_reg_test *test;
- u32 value, status_before, status_after;
- u32 i, toggle;
-
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82598EB:
- toggle = 0x7FFFF3FF;
- test = reg_test_82598;
- break;
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- toggle = 0x7FFFF30F;
- test = reg_test_82599;
- break;
- default:
- *data = 1;
- return 1;
- break;
- }
-
- /*
- * Because the status register is such a special case,
- * we handle it separately from the rest of the register
- * tests. Some bits are read-only, some toggle, and some
- * are writeable on newer MACs.
- */
- status_before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS);
- value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle);
- status_after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
- if (value != status_after) {
- e_err(drv, "failed STATUS register test got: "
- "0x%08X expected: 0x%08X\n", status_after, value);
- *data = 1;
- return 1;
- }
- /* restore previous status */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, status_before);
-
- /*
- * Perform the remainder of the register test, looping through
- * the test table until we either fail or reach the null entry.
- */
- while (test->reg) {
- for (i = 0; i < test->array_len; i++) {
- switch (test->test_type) {
- case PATTERN_TEST:
- REG_PATTERN_TEST(test->reg + (i * 0x40),
- test->mask,
- test->write);
- break;
- case SET_READ_TEST:
- REG_SET_AND_CHECK(test->reg + (i * 0x40),
- test->mask,
- test->write);
- break;
- case WRITE_NO_TEST:
- writel(test->write,
- (adapter->hw.hw_addr + test->reg)
- + (i * 0x40));
- break;
- case TABLE32_TEST:
- REG_PATTERN_TEST(test->reg + (i * 4),
- test->mask,
- test->write);
- break;
- case TABLE64_TEST_LO:
- REG_PATTERN_TEST(test->reg + (i * 8),
- test->mask,
- test->write);
- break;
- case TABLE64_TEST_HI:
- REG_PATTERN_TEST((test->reg + 4) + (i * 8),
- test->mask,
- test->write);
- break;
- }
- }
- test++;
- }
-
- *data = 0;
- return 0;
-}
-
-static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
-{
- if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL))
- *data = 1;
- else
- *data = 0;
- return *data;
-}
-
-static irqreturn_t ixgbe_test_intr(int irq, void *data)
-{
- struct net_device *netdev = (struct net_device *) data;
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
- adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
-
- return IRQ_HANDLED;
-}
-
-static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
-{
- struct net_device *netdev = adapter->netdev;
- u32 mask, i = 0, shared_int = true;
- u32 irq = adapter->pdev->irq;
-
- *data = 0;
-
- /* Hook up test interrupt handler just for this test */
- if (adapter->msix_entries) {
- /* NOTE: we don't test MSI-X interrupts here, yet */
- return 0;
- } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
- shared_int = false;
- if (request_irq(irq, &ixgbe_test_intr, 0, netdev->name,
- netdev)) {
- *data = 1;
- return -1;
- }
- } else if (!request_irq(irq, &ixgbe_test_intr, IRQF_PROBE_SHARED,
- netdev->name, netdev)) {
- shared_int = false;
- } else if (request_irq(irq, &ixgbe_test_intr, IRQF_SHARED,
- netdev->name, netdev)) {
- *data = 1;
- return -1;
- }
- e_info(hw, "testing %s interrupt\n",
- (shared_int ? "shared" : "unshared"));
-
- /* Disable all the interrupts */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
- IXGBE_WRITE_FLUSH(&adapter->hw);
- usleep_range(10000, 20000);
-
- /* Test each interrupt */
- for (; i < 10; i++) {
- /* Interrupt to test */
- mask = 1 << i;
-
- if (!shared_int) {
- /*
- * Disable the interrupts to be reported in
- * the cause register and then force the same
- * interrupt and see if one gets posted. If
- * an interrupt was posted to the bus, the
- * test failed.
- */
- adapter->test_icr = 0;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
- ~mask & 0x00007FFF);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
- ~mask & 0x00007FFF);
- IXGBE_WRITE_FLUSH(&adapter->hw);
- usleep_range(10000, 20000);
-
- if (adapter->test_icr & mask) {
- *data = 3;
- break;
- }
- }
-
- /*
- * Enable the interrupt to be reported in the cause
- * register and then force the same interrupt and see
- * if one gets posted. If an interrupt was not posted
- * to the bus, the test failed.
- */
- adapter->test_icr = 0;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
- IXGBE_WRITE_FLUSH(&adapter->hw);
- usleep_range(10000, 20000);
-
- if (!(adapter->test_icr & mask)) {
- *data = 4;
- break;
- }
-
- if (!shared_int) {
- /*
- * Disable the other interrupts to be reported in
- * the cause register and then force the other
- * interrupts and see if any get posted. If
- * an interrupt was posted to the bus, the
- * test failed.
- */
- adapter->test_icr = 0;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
- ~mask & 0x00007FFF);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
- ~mask & 0x00007FFF);
- IXGBE_WRITE_FLUSH(&adapter->hw);
- usleep_range(10000, 20000);
-
- if (adapter->test_icr) {
- *data = 5;
- break;
- }
- }
- }
-
- /* Disable all the interrupts */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
- IXGBE_WRITE_FLUSH(&adapter->hw);
- usleep_range(10000, 20000);
-
- /* Unhook test interrupt handler */
- free_irq(irq, netdev);
-
- return *data;
-}
-
-
-
-static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 reg_data;
-
- /* X540 needs to set the MACC.FLU bit to force link up */
- if (adapter->hw.mac.type == ixgbe_mac_X540) {
- reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
- reg_data |= IXGBE_MACC_FLU;
- IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
- }
-
- /* right now we only support MAC loopback in the driver */
- reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- /* Setup MAC loopback */
- reg_data |= IXGBE_HLREG0_LPBK;
- IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data);
-
- reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL);
- reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
- IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
-
- reg_data = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- reg_data &= ~IXGBE_AUTOC_LMS_MASK;
- reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU;
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
- IXGBE_WRITE_FLUSH(hw);
- usleep_range(10000, 20000);
-
- /* Disable Atlas Tx lanes; re-enabled in reset path */
- if (hw->mac.type == ixgbe_mac_82598EB) {
- u8 atlas;
-
- ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
- atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
- ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
-
- ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
- atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
- ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
-
- ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
- atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
- ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
-
- ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
- atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
- ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
- }
-
- return 0;
-}
-
-static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
-{
- u32 reg_data;
-
- reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
- reg_data &= ~IXGBE_HLREG0_LPBK;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
-}
-
-
-
-
-
-
-static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
-{
-
- //*data = ixgbe_setup_desc_rings(adapter);
- //if (*data)
- // goto out;
- *data = ixgbe_setup_loopback_test(adapter);
- if (*data)
- goto err_loopback;
- //*data = ixgbe_run_loopback_test(adapter);
- ixgbe_loopback_cleanup(adapter);
-
-err_loopback:
- //ixgbe_free_desc_rings(adapter);
-//out:
- return *data;
-
-}
-
-#ifndef HAVE_ETHTOOL_GET_SSET_COUNT
-static int ixgbe_diag_test_count(struct net_device *netdev)
-{
- return IXGBE_TEST_LEN;
-}
-
-#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
-static void ixgbe_diag_test(struct net_device *netdev,
- struct ethtool_test *eth_test, u64 *data)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- bool if_running = netif_running(netdev);
-
- set_bit(__IXGBE_TESTING, &adapter->state);
- if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
- /* Offline tests */
-
- e_info(hw, "offline testing starting\n");
-
- /* Link test performed before hardware reset so autoneg doesn't
- * interfere with test result */
- if (ixgbe_link_test(adapter, &data[4]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
- int i;
- for (i = 0; i < adapter->num_vfs; i++) {
- if (adapter->vfinfo[i].clear_to_send) {
- e_warn(drv, "Please take active VFS "
- "offline and restart the "
- "adapter before running NIC "
- "diagnostics\n");
- data[0] = 1;
- data[1] = 1;
- data[2] = 1;
- data[3] = 1;
- eth_test->flags |= ETH_TEST_FL_FAILED;
- clear_bit(__IXGBE_TESTING,
- &adapter->state);
- goto skip_ol_tests;
- }
- }
- }
-
- if (if_running)
- /* indicate we're in test mode */
- dev_close(netdev);
- else
- ixgbe_reset(adapter);
-
- e_info(hw, "register testing starting\n");
- if (ixgbe_reg_test(adapter, &data[0]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
- ixgbe_reset(adapter);
- e_info(hw, "eeprom testing starting\n");
- if (ixgbe_eeprom_test(adapter, &data[1]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
- ixgbe_reset(adapter);
- e_info(hw, "interrupt testing starting\n");
- if (ixgbe_intr_test(adapter, &data[2]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
- /* If SRIOV or VMDq is enabled then skip MAC
- * loopback diagnostic. */
- if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
- IXGBE_FLAG_VMDQ_ENABLED)) {
- e_info(hw, "skip MAC loopback diagnostic in VT mode\n");
- data[3] = 0;
- goto skip_loopback;
- }
-
- ixgbe_reset(adapter);
- e_info(hw, "loopback testing starting\n");
- if (ixgbe_loopback_test(adapter, &data[3]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
-skip_loopback:
- ixgbe_reset(adapter);
-
- clear_bit(__IXGBE_TESTING, &adapter->state);
- if (if_running)
- dev_open(netdev);
- } else {
- e_info(hw, "online testing starting\n");
- /* Online tests */
- if (ixgbe_link_test(adapter, &data[4]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
- /* Online tests aren't run; pass by default */
- data[0] = 0;
- data[1] = 0;
- data[2] = 0;
- data[3] = 0;
-
- clear_bit(__IXGBE_TESTING, &adapter->state);
- }
-skip_ol_tests:
- msleep_interruptible(4 * 1000);
-}
-
-static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
- struct ethtool_wolinfo *wol)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- int retval = 1;
- u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
-
- /* WOL not supported except for the following */
- switch (hw->device_id) {
- case IXGBE_DEV_ID_82599_SFP:
- /* Only these subdevice could supports WOL */
- switch (hw->subsystem_device_id) {
- case IXGBE_SUBDEV_ID_82599_560FLR:
- /* only support first port */
- if (hw->bus.func != 0) {
- wol->supported = 0;
- break;
- }
- case IXGBE_SUBDEV_ID_82599_SFP:
- retval = 0;
- break;
- default:
- wol->supported = 0;
- break;
- }
- break;
- case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
- /* All except this subdevice support WOL */
- if (hw->subsystem_device_id ==
- IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) {
- wol->supported = 0;
- break;
- }
- retval = 0;
- break;
- case IXGBE_DEV_ID_82599_KX4:
- retval = 0;
- break;
- case IXGBE_DEV_ID_X540T:
- /* check eeprom to see if enabled wol */
- if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
- ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
- (hw->bus.func == 0))) {
- retval = 0;
- break;
- }
-
- /* All others not supported */
- wol->supported = 0;
- break;
- default:
- wol->supported = 0;
- }
- return retval;
-}
-
-static void ixgbe_get_wol(struct net_device *netdev,
- struct ethtool_wolinfo *wol)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
- wol->supported = WAKE_UCAST | WAKE_MCAST |
- WAKE_BCAST | WAKE_MAGIC;
- wol->wolopts = 0;
-
- if (ixgbe_wol_exclusion(adapter, wol) ||
- !device_can_wakeup(&adapter->pdev->dev))
- return;
-
- if (adapter->wol & IXGBE_WUFC_EX)
- wol->wolopts |= WAKE_UCAST;
- if (adapter->wol & IXGBE_WUFC_MC)
- wol->wolopts |= WAKE_MCAST;
- if (adapter->wol & IXGBE_WUFC_BC)
- wol->wolopts |= WAKE_BCAST;
- if (adapter->wol & IXGBE_WUFC_MAG)
- wol->wolopts |= WAKE_MAGIC;
-}
-
-static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
- if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
- return -EOPNOTSUPP;
-
- if (ixgbe_wol_exclusion(adapter, wol))
- return wol->wolopts ? -EOPNOTSUPP : 0;
-
- adapter->wol = 0;
-
- if (wol->wolopts & WAKE_UCAST)
- adapter->wol |= IXGBE_WUFC_EX;
- if (wol->wolopts & WAKE_MCAST)
- adapter->wol |= IXGBE_WUFC_MC;
- if (wol->wolopts & WAKE_BCAST)
- adapter->wol |= IXGBE_WUFC_BC;
- if (wol->wolopts & WAKE_MAGIC)
- adapter->wol |= IXGBE_WUFC_MAG;
-
- device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
-
- return 0;
-}
-
-static int ixgbe_nway_reset(struct net_device *netdev)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
- if (netif_running(netdev))
- ixgbe_reinit_locked(adapter);
-
- return 0;
-}
-
-#ifdef HAVE_ETHTOOL_SET_PHYS_ID
-static int ixgbe_set_phys_id(struct net_device *netdev,
- enum ethtool_phys_id_state state)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
-
- switch (state) {
- case ETHTOOL_ID_ACTIVE:
- adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
- return 2;
-
- case ETHTOOL_ID_ON:
- hw->mac.ops.led_on(hw, IXGBE_LED_ON);
- break;
-
- case ETHTOOL_ID_OFF:
- hw->mac.ops.led_off(hw, IXGBE_LED_ON);
- break;
-
- case ETHTOOL_ID_INACTIVE:
- /* Restore LED settings */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
- break;
- }
-
- return 0;
-}
-#else
-static int ixgbe_phys_id(struct net_device *netdev, u32 data)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
- u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
- u32 i;
-
- if (!data || data > 300)
- data = 300;
-
- for (i = 0; i < (data * 1000); i += 400) {
- ixgbe_led_on(hw, IXGBE_LED_ON);
- msleep_interruptible(200);
- ixgbe_led_off(hw, IXGBE_LED_ON);
- msleep_interruptible(200);
- }
-
- /* Restore LED settings */
- IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
-
- return 0;
-}
-#endif /* HAVE_ETHTOOL_SET_PHYS_ID */
-
-static int ixgbe_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
- ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit;
-#ifndef CONFIG_IXGBE_NAPI
- ec->rx_max_coalesced_frames_irq = adapter->rx_work_limit;
-#endif /* CONFIG_IXGBE_NAPI */
- /* only valid if in constant ITR mode */
- if (adapter->rx_itr_setting <= 1)
- ec->rx_coalesce_usecs = adapter->rx_itr_setting;
- else
- ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
-
- /* if in mixed tx/rx queues per vector mode, report only rx settings */
- if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
- return 0;
-
- /* only valid if in constant ITR mode */
- if (adapter->tx_itr_setting <= 1)
- ec->tx_coalesce_usecs = adapter->tx_itr_setting;
- else
- ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
-
- return 0;
-}
-
-/*
- * this function must be called before setting the new value of
- * rx_itr_setting
- */
-#ifdef NO_VNIC
-static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
-
- /* nothing to do if LRO or RSC are not enabled */
- if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ||
- !(netdev->features & NETIF_F_LRO))
- return false;
-
- /* check the feature flag value and enable RSC if necessary */
- if (adapter->rx_itr_setting == 1 ||
- adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
- if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
- adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
- e_info(probe, "rx-usecs value high enough "
- "to re-enable RSC\n");
- return true;
- }
- /* if interrupt rate is too high then disable RSC */
- } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
- adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
-#ifdef IXGBE_NO_LRO
- e_info(probe, "rx-usecs set too low, disabling RSC\n");
-#else
- e_info(probe, "rx-usecs set too low, "
- "falling back to software LRO\n");
-#endif
- return true;
- }
- return false;
-}
-#endif
-
-static int ixgbe_set_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
-{
-#ifdef NO_VNIC
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_q_vector *q_vector;
- int i;
- int num_vectors;
- u16 tx_itr_param, rx_itr_param;
- bool need_reset = false;
-
- /* don't accept tx specific changes if we've got mixed RxTx vectors */
- if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count
- && ec->tx_coalesce_usecs)
- return -EINVAL;
-
- if (ec->tx_max_coalesced_frames_irq)
- adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq;
-
-#ifndef CONFIG_IXGBE_NAPI
- if (ec->rx_max_coalesced_frames_irq)
- adapter->rx_work_limit = ec->rx_max_coalesced_frames_irq;
-
-#endif
- if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
- (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
- return -EINVAL;
-
- if (ec->rx_coalesce_usecs > 1)
- adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
- else
- adapter->rx_itr_setting = ec->rx_coalesce_usecs;
-
- if (adapter->rx_itr_setting == 1)
- rx_itr_param = IXGBE_20K_ITR;
- else
- rx_itr_param = adapter->rx_itr_setting;
-
- if (ec->tx_coalesce_usecs > 1)
- adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
- else
- adapter->tx_itr_setting = ec->tx_coalesce_usecs;
-
- if (adapter->tx_itr_setting == 1)
- tx_itr_param = IXGBE_10K_ITR;
- else
- tx_itr_param = adapter->tx_itr_setting;
-
- /* check the old value and enable RSC if necessary */
- need_reset = ixgbe_update_rsc(adapter);
-
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
- num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- else
- num_vectors = 1;
-
- for (i = 0; i < num_vectors; i++) {
- q_vector = adapter->q_vector[i];
- q_vector->tx.work_limit = adapter->tx_work_limit;
- q_vector->rx.work_limit = adapter->rx_work_limit;
- if (q_vector->tx.count && !q_vector->rx.count)
- /* tx only */
- q_vector->itr = tx_itr_param;
- else
- /* rx only or mixed */
- q_vector->itr = rx_itr_param;
- ixgbe_write_eitr(q_vector);
- }
-
- /*
- * do reset here at the end to make sure EITR==0 case is handled
- * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
- * also locks in RSC enable/disable which requires reset
- */
- if (need_reset)
- ixgbe_do_reset(netdev);
-#endif
- return 0;
-}
-
-#ifndef HAVE_NDO_SET_FEATURES
-static u32 ixgbe_get_rx_csum(struct net_device *netdev)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_ring *ring = adapter->rx_ring[0];
- return test_bit(__IXGBE_RX_CSUM_ENABLED, &ring->state);
-}
-
-static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- int i;
-
- for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbe_ring *ring = adapter->rx_ring[i];
- if (data)
- set_bit(__IXGBE_RX_CSUM_ENABLED, &ring->state);
- else
- clear_bit(__IXGBE_RX_CSUM_ENABLED, &ring->state);
- }
-
- /* LRO and RSC both depend on RX checksum to function */
- if (!data && (netdev->features & NETIF_F_LRO)) {
- netdev->features &= ~NETIF_F_LRO;
-
- if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
- adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
- ixgbe_do_reset(netdev);
- }
- }
-
- return 0;
-}
-
-static u32 ixgbe_get_tx_csum(struct net_device *netdev)
-{
- return (netdev->features & NETIF_F_IP_CSUM) != 0;
-}
-
-static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- u32 feature_list;
-
-#ifdef NETIF_F_IPV6_CSUM
- feature_list = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
-#else
- feature_list = NETIF_F_IP_CSUM;
-#endif
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- feature_list |= NETIF_F_SCTP_CSUM;
- break;
- default:
- break;
- }
- if (data)
- netdev->features |= feature_list;
- else
- netdev->features &= ~feature_list;
-
- return 0;
-}
-
-#ifdef NETIF_F_TSO
-static int ixgbe_set_tso(struct net_device *netdev, u32 data)
-{
- if (data) {
- netdev->features |= NETIF_F_TSO;
-#ifdef NETIF_F_TSO6
- netdev->features |= NETIF_F_TSO6;
-#endif
- } else {
-#ifndef HAVE_NETDEV_VLAN_FEATURES
-#ifdef NETIF_F_HW_VLAN_TX
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- /* disable TSO on all VLANs if they're present */
- if (adapter->vlgrp) {
- int i;
- struct net_device *v_netdev;
- for (i = 0; i < VLAN_N_VID; i++) {
- v_netdev =
- vlan_group_get_device(adapter->vlgrp, i);
- if (v_netdev) {
- v_netdev->features &= ~NETIF_F_TSO;
-#ifdef NETIF_F_TSO6
- v_netdev->features &= ~NETIF_F_TSO6;
-#endif
- vlan_group_set_device(adapter->vlgrp, i,
- v_netdev);
- }
- }
- }
-#endif
-#endif /* HAVE_NETDEV_VLAN_FEATURES */
- netdev->features &= ~NETIF_F_TSO;
-#ifdef NETIF_F_TSO6
- netdev->features &= ~NETIF_F_TSO6;
-#endif
- }
- return 0;
-}
-
-#endif /* NETIF_F_TSO */
-#ifdef ETHTOOL_GFLAGS
-static int ixgbe_set_flags(struct net_device *netdev, u32 data)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- u32 supported_flags = ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN;
- u32 changed = netdev->features ^ data;
- bool need_reset = false;
- int rc;
-
-#ifndef HAVE_VLAN_RX_REGISTER
- if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
- !(data & ETH_FLAG_RXVLAN))
- return -EINVAL;
-
-#endif
-#ifdef NETIF_F_RXHASH
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED)
- supported_flags |= ETH_FLAG_RXHASH;
-#endif
-#ifdef IXGBE_NO_LRO
- if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
-#endif
- supported_flags |= ETH_FLAG_LRO;
-
-#ifdef ETHTOOL_GRXRINGS
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_X540:
- case ixgbe_mac_82599EB:
- supported_flags |= ETH_FLAG_NTUPLE;
- default:
- break;
- }
-
-#endif
- rc = ethtool_op_set_flags(netdev, data, supported_flags);
- if (rc)
- return rc;
-
-#ifndef HAVE_VLAN_RX_REGISTER
- if (changed & ETH_FLAG_RXVLAN)
- ixgbe_vlan_mode(netdev, netdev->features);
-
-#endif
- /* if state changes we need to update adapter->flags and reset */
- if (!(netdev->features & NETIF_F_LRO)) {
- if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
- need_reset = true;
- adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
- } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
- !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
- if (adapter->rx_itr_setting == 1 ||
- adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
- adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
- need_reset = true;
- } else if (changed & ETH_FLAG_LRO) {
-#ifdef IXGBE_NO_LRO
- e_info(probe, "rx-usecs set too low, "
- "disabling RSC\n");
-#else
- e_info(probe, "rx-usecs set too low, "
- "falling back to software LRO\n");
-#endif
- }
- }
-
-#ifdef ETHTOOL_GRXRINGS
- /*
- * Check if Flow Director n-tuple support was enabled or disabled. If
- * the state changed, we need to reset.
- */
- if (!(netdev->features & NETIF_F_NTUPLE)) {
- if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
- /* turn off Flow Director, set ATR and reset */
- if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
- !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
- adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
- need_reset = true;
- }
- adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
- } else if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
- /* turn off ATR, enable perfect filters and reset */
- adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
- adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
- need_reset = true;
- }
-
-#endif /* ETHTOOL_GRXRINGS */
- if (need_reset)
- ixgbe_do_reset(netdev);
-
- return 0;
-}
-
-#endif /* ETHTOOL_GFLAGS */
-#endif /* HAVE_NDO_SET_FEATURES */
-#ifdef ETHTOOL_GRXRINGS
-static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
- struct ethtool_rxnfc *cmd)
-{
- union ixgbe_atr_input *mask = &adapter->fdir_mask;
- struct ethtool_rx_flow_spec *fsp =
- (struct ethtool_rx_flow_spec *)&cmd->fs;
- struct hlist_node *node, *node2;
- struct ixgbe_fdir_filter *rule = NULL;
-
- /* report total rule count */
- cmd->data = (1024 << adapter->fdir_pballoc) - 2;
-
- hlist_for_each_entry_safe(rule, node, node2,
- &adapter->fdir_filter_list, fdir_node) {
- if (fsp->location <= rule->sw_idx)
- break;
- }
-
- if (!rule || fsp->location != rule->sw_idx)
- return -EINVAL;
-
- /* fill out the flow spec entry */
-
- /* set flow type field */
- switch (rule->filter.formatted.flow_type) {
- case IXGBE_ATR_FLOW_TYPE_TCPV4:
- fsp->flow_type = TCP_V4_FLOW;
- break;
- case IXGBE_ATR_FLOW_TYPE_UDPV4:
- fsp->flow_type = UDP_V4_FLOW;
- break;
- case IXGBE_ATR_FLOW_TYPE_SCTPV4:
- fsp->flow_type = SCTP_V4_FLOW;
- break;
- case IXGBE_ATR_FLOW_TYPE_IPV4:
- fsp->flow_type = IP_USER_FLOW;
- fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
- fsp->h_u.usr_ip4_spec.proto = 0;
- fsp->m_u.usr_ip4_spec.proto = 0;
- break;
- default:
- return -EINVAL;
- }
-
- fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
- fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
- fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
- fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
- fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
- fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
- fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
- fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
- fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id;
- fsp->m_ext.vlan_tci = mask->formatted.vlan_id;
- fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
- fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
- fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
- fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
- fsp->flow_type |= FLOW_EXT;
-
- /* record action */
- if (rule->action == IXGBE_FDIR_DROP_QUEUE)
- fsp->ring_cookie = RX_CLS_FLOW_DISC;
- else
- fsp->ring_cookie = rule->action;
-
- return 0;
-}
-
-static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
- struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
-{
- struct hlist_node *node, *node2;
- struct ixgbe_fdir_filter *rule;
- int cnt = 0;
-
- /* report total rule count */
- cmd->data = (1024 << adapter->fdir_pballoc) - 2;
-
- hlist_for_each_entry_safe(rule, node, node2,
- &adapter->fdir_filter_list, fdir_node) {
- if (cnt == cmd->rule_cnt)
- return -EMSGSIZE;
- rule_locs[cnt] = rule->sw_idx;
- cnt++;
- }
-
- cmd->rule_cnt = cnt;
-
- return 0;
-}
-
-static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
- struct ethtool_rxnfc *cmd)
-{
- cmd->data = 0;
-
- /* if RSS is disabled then report no hashing */
- if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
- return 0;
-
- /* Report default options for RSS on ixgbe */
- switch (cmd->flow_type) {
- case TCP_V4_FLOW:
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- case UDP_V4_FLOW:
- if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- case SCTP_V4_FLOW:
- case AH_ESP_V4_FLOW:
- case AH_V4_FLOW:
- case ESP_V4_FLOW:
- case IPV4_FLOW:
- cmd->data |= RXH_IP_SRC | RXH_IP_DST;
- break;
- case TCP_V6_FLOW:
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- case UDP_V6_FLOW:
- if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- case SCTP_V6_FLOW:
- case AH_ESP_V6_FLOW:
- case AH_V6_FLOW:
- case ESP_V6_FLOW:
- case IPV6_FLOW:
- cmd->data |= RXH_IP_SRC | RXH_IP_DST;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
-#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
- void *rule_locs)
-#else
- u32 *rule_locs)
-#endif
-{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = adapter->num_rx_queues;
- ret = 0;
- break;
- case ETHTOOL_GRXCLSRLCNT:
- cmd->rule_cnt = adapter->fdir_filter_count;
- ret = 0;
- break;
- case ETHTOOL_GRXCLSRULE:
- ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
- break;
- case ETHTOOL_GRXCLSRLALL:
- ret = ixgbe_get_ethtool_fdir_all(adapter, cmd,
- (u32 *)rule_locs);
- break;
- case ETHTOOL_GRXFH:
- ret = ixgbe_get_rss_hash_opts(adapter, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
-static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
- struct ixgbe_fdir_filter *input,
- u16 sw_idx)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct hlist_node *node, *node2, *parent;
- struct ixgbe_fdir_filter *rule;
- int err = -EINVAL;
-
- parent = NULL;
- rule = NULL;
-
- hlist_for_each_entry_safe(rule, node, node2,
- &adapter->fdir_filter_list, fdir_node) {
- /* hash found, or no matching entry */
- if (rule->sw_idx >= sw_idx)
- break;
- parent = node;
- }
-
- /* if there is an old rule occupying our place remove it */
- if (rule && (rule->sw_idx == sw_idx)) {
- if (!input || (rule->filter.formatted.bkt_hash !=
- input->filter.formatted.bkt_hash)) {
- err = ixgbe_fdir_erase_perfect_filter_82599(hw,
- &rule->filter,
- sw_idx);
- }
-
- hlist_del(&rule->fdir_node);
- kfree(rule);
- adapter->fdir_filter_count--;
- }
-
- /*
- * If no input this was a delete, err should be 0 if a rule was
- * successfully found and removed from the list else -EINVAL
- */
- if (!input)
- return err;
-
- /* initialize node and set software index */
- INIT_HLIST_NODE(&input->fdir_node);
-
- /* add filter to the list */
- if (parent)
- hlist_add_after(parent, &input->fdir_node);
- else
- hlist_add_head(&input->fdir_node,
- &adapter->fdir_filter_list);
-
- /* update counts */
- adapter->fdir_filter_count++;
-
- return 0;
-}
-
-static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
- u8 *flow_type)
-{
- switch (fsp->flow_type & ~FLOW_EXT) {
- case TCP_V4_FLOW:
- *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
- break;
- case UDP_V4_FLOW:
- *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
- break;
- case SCTP_V4_FLOW:
- *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
- break;
- case IP_USER_FLOW:
- switch (fsp->h_u.usr_ip4_spec.proto) {
- case IPPROTO_TCP:
- *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
- break;
- case IPPROTO_UDP:
- *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
- break;
- case IPPROTO_SCTP:
- *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
- break;
- case 0:
- if (!fsp->m_u.usr_ip4_spec.proto) {
- *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
- break;
- }
- default:
- return 0;
- }
- break;
- default:
- return 0;
- }
-
- return 1;
-}
-
-static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
- struct ethtool_rxnfc *cmd)
-{
- struct ethtool_rx_flow_spec *fsp =
- (struct ethtool_rx_flow_spec *)&cmd->fs;
- struct ixgbe_hw *hw = &adapter->hw;
- struct ixgbe_fdir_filter *input;
- union ixgbe_atr_input mask;
- int err;
-
- if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
- return -EOPNOTSUPP;
-
- /*
- * Don't allow programming if the action is a queue greater than
- * the number of online Rx queues.
- */
- if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
- (fsp->ring_cookie >= adapter->num_rx_queues))
- return -EINVAL;
-
- /* Don't allow indexes to exist outside of available space */
- if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
- e_err(drv, "Location out of range\n");
- return -EINVAL;
- }
-
- input = kzalloc(sizeof(*input), GFP_ATOMIC);
- if (!input)
- return -ENOMEM;
-
- memset(&mask, 0, sizeof(union ixgbe_atr_input));
-
- /* set SW index */
- input->sw_idx = fsp->location;
-
- /* record flow type */
- if (!ixgbe_flowspec_to_flow_type(fsp,
- &input->filter.formatted.flow_type)) {
- e_err(drv, "Unrecognized flow type\n");
- goto err_out;
- }
-
- mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
- IXGBE_ATR_L4TYPE_MASK;
-
- if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
- mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
-
- /* Copy input into formatted structures */
- input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
- mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
- input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
- mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
- input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
- mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
- input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
- mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
-
- if (fsp->flow_type & FLOW_EXT) {
- input->filter.formatted.vm_pool =
- (unsigned char)ntohl(fsp->h_ext.data[1]);
- mask.formatted.vm_pool =
- (unsigned char)ntohl(fsp->m_ext.data[1]);
- input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci;
- mask.formatted.vlan_id = fsp->m_ext.vlan_tci;
- input->filter.formatted.flex_bytes =
- fsp->h_ext.vlan_etype;
- mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
- }
-
- /* determine if we need to drop or route the packet */
- if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
- input->action = IXGBE_FDIR_DROP_QUEUE;
- else
- input->action = fsp->ring_cookie;
-
- spin_lock(&adapter->fdir_perfect_lock);
-
- if (hlist_empty(&adapter->fdir_filter_list)) {
- /* save mask and program input mask into HW */
- memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
- err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
- if (err) {
- e_err(drv, "Error writing mask\n");
- goto err_out_w_lock;
- }
- } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
- e_err(drv, "Only one mask supported per port\n");
- goto err_out_w_lock;
- }
-
- /* apply mask and compute/store hash */
- ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
-
- /* program filters to filter memory */
- err = ixgbe_fdir_write_perfect_filter_82599(hw,
- &input->filter, input->sw_idx,
- (input->action == IXGBE_FDIR_DROP_QUEUE) ?
- IXGBE_FDIR_DROP_QUEUE :
- adapter->rx_ring[input->action]->reg_idx);
- if (err)
- goto err_out_w_lock;
-
- ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
-
- spin_unlock(&adapter->fdir_perfect_lock);
-
- kfree(input);
- return err;
-err_out_w_lock:
- spin_unlock(&adapter->fdir_perfect_lock);
-err_out:
- kfree(input);
- return -EINVAL;
-}
-
-static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
- struct ethtool_rxnfc *cmd)
-{
- struct ethtool_rx_flow_spec *fsp =
- (struct ethtool_rx_flow_spec *)&cmd->fs;
- int err;
-
- spin_lock(&adapter->fdir_perfect_lock);
- err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, (u16)(fsp->location));
- spin_unlock(&adapter->fdir_perfect_lock);
-
- return err;
-}
-
-#ifdef ETHTOOL_SRXNTUPLE
-/*
- * We need to keep this around for kernels 2.6.33 - 2.6.39 in order to avoid
- * a null pointer dereference as it was assumend if the NETIF_F_NTUPLE flag
- * was defined that this function was present.
- */
-static int ixgbe_set_rx_ntuple(struct net_device *dev,
- struct ethtool_rx_ntuple *cmd)
-{
- return -EOPNOTSUPP;
-}
-
-#endif
-#define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
- IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
-static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
- struct ethtool_rxnfc *nfc)
-{
- u32 flags2 = adapter->flags2;
-
- /*
- * RSS does not support anything other than hashing
- * to queues on src and dst IPs and ports
- */
- if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3))
- return -EINVAL;
-
- switch (nfc->flow_type) {
- case TCP_V4_FLOW:
- case TCP_V6_FLOW:
- if (!(nfc->data & RXH_IP_SRC) ||
- !(nfc->data & RXH_IP_DST) ||
- !(nfc->data & RXH_L4_B_0_1) ||
- !(nfc->data & RXH_L4_B_2_3))
- return -EINVAL;
- break;
- case UDP_V4_FLOW:
- if (!(nfc->data & RXH_IP_SRC) ||
- !(nfc->data & RXH_IP_DST))
- return -EINVAL;
- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- case 0:
- flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
- break;
- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
- break;
- default:
- return -EINVAL;
- }
- break;
- case UDP_V6_FLOW:
- if (!(nfc->data & RXH_IP_SRC) ||
- !(nfc->data & RXH_IP_DST))
- return -EINVAL;
- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- case 0:
- flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
- break;
- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
- break;
- default:
- return -EINVAL;
- }
- break;
- case AH_ESP_V4_FLOW:
- case AH_V4_FLOW:
- case ESP_V4_FLOW:
- case SCTP_V4_FLOW:
- case AH_ESP_V6_FLOW:
- case AH_V6_FLOW:
- case ESP_V6_FLOW:
- case SCTP_V6_FLOW:
- if (!(nfc->data & RXH_IP_SRC) ||
- !(nfc->data & RXH_IP_DST) ||
- (nfc->data & RXH_L4_B_0_1) ||
- (nfc->data & RXH_L4_B_2_3))
- return -EINVAL;
- break;
- default:
- return -EINVAL;
- }
-
- /* if we changed something we need to update flags */
- if (flags2 != adapter->flags2) {
- struct ixgbe_hw *hw = &adapter->hw;
- u32 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
-
- if ((flags2 & UDP_RSS_FLAGS) &&
- !(adapter->flags2 & UDP_RSS_FLAGS))
- e_warn(drv, "enabling UDP RSS: fragmented packets"
- " may arrive out of order to the stack above\n");
-
- adapter->flags2 = flags2;
-
- /* Perform hash on these packet types */
- mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
- | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
- | IXGBE_MRQC_RSS_FIELD_IPV6
- | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
-
- mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
- IXGBE_MRQC_RSS_FIELD_IPV6_UDP);
-
- if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
- mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
-
- if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
- mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
-
- IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
- }
-
- return 0;
-}
-
-static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
-{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXCLSRLINS:
- ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd);
- break;
- case ETHTOOL_SRXCLSRLDEL:
- ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
- break;
- case ETHTOOL_SRXFH:
- ret = ixgbe_set_rss_hash_opt(adapter, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
-#endif /* ETHTOOL_GRXRINGS */
-//static
-struct ethtool_ops ixgbe_ethtool_ops = {
- .get_settings = ixgbe_get_settings,
- .set_settings = ixgbe_set_settings,
- .get_drvinfo = ixgbe_get_drvinfo,
- .get_regs_len = ixgbe_get_regs_len,
- .get_regs = ixgbe_get_regs,
- .get_wol = ixgbe_get_wol,
- .set_wol = ixgbe_set_wol,
- .nway_reset = ixgbe_nway_reset,
- .get_link = ethtool_op_get_link,
- .get_eeprom_len = ixgbe_get_eeprom_len,
- .get_eeprom = ixgbe_get_eeprom,
- .set_eeprom = ixgbe_set_eeprom,
- .get_ringparam = ixgbe_get_ringparam,
- .set_ringparam = ixgbe_set_ringparam,
- .get_pauseparam = ixgbe_get_pauseparam,
- .set_pauseparam = ixgbe_set_pauseparam,
- .get_msglevel = ixgbe_get_msglevel,
- .set_msglevel = ixgbe_set_msglevel,
-#ifndef HAVE_ETHTOOL_GET_SSET_COUNT
- .self_test_count = ixgbe_diag_test_count,
-#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
- .self_test = ixgbe_diag_test,
- .get_strings = ixgbe_get_strings,
-#ifdef HAVE_ETHTOOL_SET_PHYS_ID
- .set_phys_id = ixgbe_set_phys_id,
-#else
- .phys_id = ixgbe_phys_id,
-#endif /* HAVE_ETHTOOL_SET_PHYS_ID */
-#ifndef HAVE_ETHTOOL_GET_SSET_COUNT
- .get_stats_count = ixgbe_get_stats_count,
-#else /* HAVE_ETHTOOL_GET_SSET_COUNT */
- .get_sset_count = ixgbe_get_sset_count,
-#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
- .get_ethtool_stats = ixgbe_get_ethtool_stats,
-#ifdef HAVE_ETHTOOL_GET_PERM_ADDR
- .get_perm_addr = ethtool_op_get_perm_addr,
-#endif
- .get_coalesce = ixgbe_get_coalesce,
- .set_coalesce = ixgbe_set_coalesce,
-#ifndef HAVE_NDO_SET_FEATURES
- .get_rx_csum = ixgbe_get_rx_csum,
- .set_rx_csum = ixgbe_set_rx_csum,
- .get_tx_csum = ixgbe_get_tx_csum,
- .set_tx_csum = ixgbe_set_tx_csum,
- .get_sg = ethtool_op_get_sg,
- .set_sg = ethtool_op_set_sg,
-#ifdef NETIF_F_TSO
- .get_tso = ethtool_op_get_tso,
- .set_tso = ixgbe_set_tso,
-#endif
-#ifdef ETHTOOL_GFLAGS
- .get_flags = ethtool_op_get_flags,
- .set_flags = ixgbe_set_flags,
-#endif
-#endif /* HAVE_NDO_SET_FEATURES */
-#ifdef ETHTOOL_GRXRINGS
- .get_rxnfc = ixgbe_get_rxnfc,
- .set_rxnfc = ixgbe_set_rxnfc,
-#ifdef ETHTOOL_SRXNTUPLE
- .set_rx_ntuple = ixgbe_set_rx_ntuple,
-#endif
-#endif
-};
-
-void ixgbe_set_ethtool_ops(struct net_device *netdev)
-{
- SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops);
-}
-#endif /* SIOCETHTOOL */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_fcoe.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_fcoe.h
deleted file mode 100755
index cad28622..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_fcoe.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#ifndef _IXGBE_FCOE_H
-#define _IXGBE_FCOE_H
-
-#ifdef IXGBE_FCOE
-
-#include <scsi/fc/fc_fs.h>
-#include <scsi/fc/fc_fcoe.h>
-
-/* shift bits within STAT fo FCSTAT */
-#define IXGBE_RXDADV_FCSTAT_SHIFT 4
-
-/* ddp user buffer */
-#define IXGBE_BUFFCNT_MAX 256 /* 8 bits bufcnt */
-#define IXGBE_FCPTR_ALIGN 16
-#define IXGBE_FCPTR_MAX (IXGBE_BUFFCNT_MAX * sizeof(dma_addr_t))
-#define IXGBE_FCBUFF_4KB 0x0
-#define IXGBE_FCBUFF_8KB 0x1
-#define IXGBE_FCBUFF_16KB 0x2
-#define IXGBE_FCBUFF_64KB 0x3
-#define IXGBE_FCBUFF_MAX 65536 /* 64KB max */
-#define IXGBE_FCBUFF_MIN 4096 /* 4KB min */
-#define IXGBE_FCOE_DDP_MAX 512 /* 9 bits xid */
-
-/* Default traffic class to use for FCoE */
-#define IXGBE_FCOE_DEFTC 3
-
-/* fcerr */
-#define IXGBE_FCERR_BADCRC 0x00100000
-#define IXGBE_FCERR_EOFSOF 0x00200000
-#define IXGBE_FCERR_NOFIRST 0x00300000
-#define IXGBE_FCERR_OOOSEQ 0x00400000
-#define IXGBE_FCERR_NODMA 0x00500000
-#define IXGBE_FCERR_PKTLOST 0x00600000
-
-/* FCoE DDP for target mode */
-#define __IXGBE_FCOE_TARGET 1
-
-struct ixgbe_fcoe_ddp {
- int len;
- u32 err;
- unsigned int sgc;
- struct scatterlist *sgl;
- dma_addr_t udp;
- u64 *udl;
- struct pci_pool *pool;
-};
-
-struct ixgbe_fcoe {
- struct pci_pool **pool;
- atomic_t refcnt;
- spinlock_t lock;
- struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
- unsigned char *extra_ddp_buffer;
- dma_addr_t extra_ddp_buffer_dma;
- u64 __percpu *pcpu_noddp;
- u64 __percpu *pcpu_noddp_ext_buff;
- unsigned long mode;
- u8 tc;
- u8 up;
- u8 up_set;
-};
-#endif /* IXGBE_FCOE */
-
-#endif /* _IXGBE_FCOE_H */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c
deleted file mode 100755
index cb569068..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c
+++ /dev/null
@@ -1,2975 +0,0 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-/******************************************************************************
- Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
-******************************************************************************/
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/vmalloc.h>
-#include <linux/highmem.h>
-#include <linux/string.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#ifdef HAVE_SCTP
-#include <linux/sctp.h>
-#endif
-#include <linux/pkt_sched.h>
-#include <linux/ipv6.h>
-#ifdef NETIF_F_TSO
-#include <net/checksum.h>
-#ifdef NETIF_F_TSO6
-#include <net/ip6_checksum.h>
-#endif
-#endif
-#ifdef SIOCETHTOOL
-#include <linux/ethtool.h>
-#endif
-
-#include "ixgbe.h"
-
-#undef CONFIG_DCA
-#undef CONFIG_DCA_MODULE
-
-char ixgbe_driver_name[] = "ixgbe";
-static const char ixgbe_driver_string[] =
- "Intel(R) 10 Gigabit PCI Express Network Driver";
-#define DRV_HW_PERF
-
-#ifndef CONFIG_IXGBE_NAPI
-#define DRIVERNAPI
-#else
-#define DRIVERNAPI "-NAPI"
-#endif
-
-#define FPGA
-
-#define VMDQ_TAG
-
-#define MAJ 3
-#define MIN 9
-#define BUILD 17
-#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
- __stringify(BUILD) DRIVERNAPI DRV_HW_PERF FPGA VMDQ_TAG
-const char ixgbe_driver_version[] = DRV_VERSION;
-static const char ixgbe_copyright[] =
- "Copyright (c) 1999-2012 Intel Corporation.";
-
-/* ixgbe_pci_tbl - PCI Device ID Table
- *
- * Wildcard entries (PCI_ANY_ID) should come last
- * Last entry must be all 0s
- *
- * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
- * Class, Class Mask, private data (not used) }
- */
-DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598)},
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT)},
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT)},
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT)},
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2)},
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4)},
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT)},
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT)},
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM)},
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR)},
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM)},
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX)},
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4)},
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM)},
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR)},
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP)},
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM)},
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ)},
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4)},
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE)},
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE)},
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM)},
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE)},
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T)},
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2)},
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS)},
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP)},
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP)},
- /* required last entry */
- {0, }
-};
-
-#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
-static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
- void *p);
-static struct notifier_block dca_notifier = {
- .notifier_call = ixgbe_notify_dca,
- .next = NULL,
- .priority = 0
-};
-
-#endif
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
-MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
-
-#define DEFAULT_DEBUG_LEVEL_SHIFT 3
-
-
-static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
-{
- u32 ctrl_ext;
-
- /* Let firmware take over control of h/w */
- ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
- ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
-}
-
-#ifdef NO_VNIC
-static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
-{
- u32 ctrl_ext;
-
- /* Let firmware know the driver has taken over */
- ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
- ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
-}
-#endif
-
-
-static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct ixgbe_hw_stats *hwstats = &adapter->stats;
- int i;
- u32 data;
-
- if ((hw->fc.current_mode != ixgbe_fc_full) &&
- (hw->fc.current_mode != ixgbe_fc_rx_pause))
- return;
-
- switch (hw->mac.type) {
- case ixgbe_mac_82598EB:
- data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
- break;
- default:
- data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
- }
- hwstats->lxoffrxc += data;
-
- /* refill credits (no tx hang) if we received xoff */
- if (!data)
- return;
-
- for (i = 0; i < adapter->num_tx_queues; i++)
- clear_bit(__IXGBE_HANG_CHECK_ARMED,
- &adapter->tx_ring[i]->state);
-}
-
-static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct ixgbe_hw_stats *hwstats = &adapter->stats;
- u32 xoff[8] = {0};
- int i;
- bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
-
-#ifdef HAVE_DCBNL_IEEE
- if (adapter->ixgbe_ieee_pfc)
- pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
-
-#endif
- if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
- ixgbe_update_xoff_rx_lfc(adapter);
- return;
- }
-
- /* update stats for each tc, only valid with PFC enabled */
- for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
- switch (hw->mac.type) {
- case ixgbe_mac_82598EB:
- xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
- break;
- default:
- xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
- }
- hwstats->pxoffrxc[i] += xoff[i];
- }
-
- /* disarm tx queues that have received xoff frames */
- for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
- u8 tc = tx_ring->dcb_tc;
-
- if ((tc <= 7) && (xoff[tc]))
- clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
- }
-}
-
-
-
-
-#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
-
-
-
-
-#ifdef HAVE_8021P_SUPPORT
-/**
- * ixgbe_vlan_stripping_disable - helper to disable vlan tag stripping
- * @adapter: driver data
- */
-void ixgbe_vlan_stripping_disable(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 vlnctrl;
- int i;
-
- /* leave vlan tag stripping enabled for DCB */
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
- return;
-
- switch (hw->mac.type) {
- case ixgbe_mac_82598EB:
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- vlnctrl &= ~IXGBE_VLNCTRL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
- break;
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- for (i = 0; i < adapter->num_rx_queues; i++) {
- u8 reg_idx = adapter->rx_ring[i]->reg_idx;
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
- vlnctrl &= ~IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), vlnctrl);
- }
- break;
- default:
- break;
- }
-}
-
-#endif
-/**
- * ixgbe_vlan_stripping_enable - helper to enable vlan tag stripping
- * @adapter: driver data
- */
-void ixgbe_vlan_stripping_enable(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 vlnctrl;
- int i;
-
- switch (hw->mac.type) {
- case ixgbe_mac_82598EB:
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- vlnctrl |= IXGBE_VLNCTRL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
- break;
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- for (i = 0; i < adapter->num_rx_queues; i++) {
- u8 reg_idx = adapter->rx_ring[i]->reg_idx;
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
- vlnctrl |= IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), vlnctrl);
- }
- break;
- default:
- break;
- }
-}
-
-#ifdef HAVE_VLAN_RX_REGISTER
-void ixgbe_vlan_mode(struct net_device *netdev, struct vlan_group *grp)
-#else
-void ixgbe_vlan_mode(struct net_device *netdev, u32 features)
-#endif
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
-#ifdef HAVE_8021P_SUPPORT
- bool enable;
-#endif
-#ifdef HAVE_VLAN_RX_REGISTER
-
- //if (!test_bit(__IXGBE_DOWN, &adapter->state))
- // ixgbe_irq_disable(adapter);
-
- adapter->vlgrp = grp;
-
- //if (!test_bit(__IXGBE_DOWN, &adapter->state))
- // ixgbe_irq_enable(adapter, true, true);
-#endif
-#ifdef HAVE_8021P_SUPPORT
-#ifdef HAVE_VLAN_RX_REGISTER
- enable = (grp || (adapter->flags & IXGBE_FLAG_DCB_ENABLED));
-#else
- enable = !!(features & NETIF_F_HW_VLAN_RX);
-#endif
- if (enable)
- /* enable VLAN tag insert/strip */
- ixgbe_vlan_stripping_enable(adapter);
- else
- /* disable VLAN tag insert/strip */
- ixgbe_vlan_stripping_disable(adapter);
-
-#endif
-}
-
-static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
-{
-#ifdef NETDEV_HW_ADDR_T_MULTICAST
- struct netdev_hw_addr *mc_ptr;
-#else
- struct dev_mc_list *mc_ptr;
-#endif
- struct ixgbe_adapter *adapter = hw->back;
- u8 *addr = *mc_addr_ptr;
-
- *vmdq = adapter->num_vfs;
-
-#ifdef NETDEV_HW_ADDR_T_MULTICAST
- mc_ptr = container_of(addr, struct netdev_hw_addr, addr[0]);
- if (mc_ptr->list.next) {
- struct netdev_hw_addr *ha;
-
- ha = list_entry(mc_ptr->list.next, struct netdev_hw_addr, list);
- *mc_addr_ptr = ha->addr;
- }
-#else
- mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
- if (mc_ptr->next)
- *mc_addr_ptr = mc_ptr->next->dmi_addr;
-#endif
- else
- *mc_addr_ptr = NULL;
-
- return addr;
-}
-
-/**
- * ixgbe_write_mc_addr_list - write multicast addresses to MTA
- * @netdev: network interface device structure
- *
- * Writes multicast address list to the MTA hash table.
- * Returns: -ENOMEM on failure
- * 0 on no addresses written
- * X on writing X addresses to MTA
- **/
-int ixgbe_write_mc_addr_list(struct net_device *netdev)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
-#ifdef NETDEV_HW_ADDR_T_MULTICAST
- struct netdev_hw_addr *ha;
-#endif
- u8 *addr_list = NULL;
- int addr_count = 0;
-
- if (!hw->mac.ops.update_mc_addr_list)
- return -ENOMEM;
-
- if (!netif_running(netdev))
- return 0;
-
-
- hw->mac.ops.update_mc_addr_list(hw, NULL, 0,
- ixgbe_addr_list_itr, true);
-
- if (!netdev_mc_empty(netdev)) {
-#ifdef NETDEV_HW_ADDR_T_MULTICAST
- ha = list_first_entry(&netdev->mc.list,
- struct netdev_hw_addr, list);
- addr_list = ha->addr;
-#else
- addr_list = netdev->mc_list->dmi_addr;
-#endif
- addr_count = netdev_mc_count(netdev);
-
- hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
- ixgbe_addr_list_itr, false);
- }
-
-#ifdef CONFIG_PCI_IOV
- //ixgbe_restore_vf_multicasts(adapter);
-#endif
- return addr_count;
-}
-
-
-void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- int i;
- for (i = 0; i < hw->mac.num_rar_entries; i++) {
- if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE) {
- hw->mac.ops.set_rar(hw, i, adapter->mac_table[i].addr,
- adapter->mac_table[i].queue,
- IXGBE_RAH_AV);
- } else {
- hw->mac.ops.clear_rar(hw, i);
- }
- }
-}
-
-void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- int i;
- for (i = 0; i < hw->mac.num_rar_entries; i++) {
- if (adapter->mac_table[i].state & IXGBE_MAC_STATE_MODIFIED) {
- if (adapter->mac_table[i].state &
- IXGBE_MAC_STATE_IN_USE) {
- hw->mac.ops.set_rar(hw, i,
- adapter->mac_table[i].addr,
- adapter->mac_table[i].queue,
- IXGBE_RAH_AV);
- } else {
- hw->mac.ops.clear_rar(hw, i);
- }
- adapter->mac_table[i].state &=
- ~(IXGBE_MAC_STATE_MODIFIED);
- }
- }
-}
-
-int ixgbe_available_rars(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- int i, count = 0;
-
- for (i = 0; i < hw->mac.num_rar_entries; i++) {
- if (adapter->mac_table[i].state == 0)
- count++;
- }
- return count;
-}
-
-int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- int i;
-
- if (is_zero_ether_addr(addr))
- return 0;
-
- for (i = 0; i < hw->mac.num_rar_entries; i++) {
- if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
- continue;
- adapter->mac_table[i].state |= (IXGBE_MAC_STATE_MODIFIED |
- IXGBE_MAC_STATE_IN_USE);
- memcpy(adapter->mac_table[i].addr, addr, ETH_ALEN);
- adapter->mac_table[i].queue = queue;
- ixgbe_sync_mac_table(adapter);
- return i;
- }
- return -ENOMEM;
-}
-
-void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
-{
- int i;
- struct ixgbe_hw *hw = &adapter->hw;
-
- for (i = 0; i < hw->mac.num_rar_entries; i++) {
- adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
- adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
- memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
- adapter->mac_table[i].queue = 0;
- }
- ixgbe_sync_mac_table(adapter);
-}
-
-void ixgbe_del_mac_filter_by_index(struct ixgbe_adapter *adapter, int index)
-{
- adapter->mac_table[index].state |= IXGBE_MAC_STATE_MODIFIED;
- adapter->mac_table[index].state &= ~IXGBE_MAC_STATE_IN_USE;
- memset(adapter->mac_table[index].addr, 0, ETH_ALEN);
- adapter->mac_table[index].queue = 0;
- ixgbe_sync_mac_table(adapter);
-}
-
-int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8* addr, u16 queue)
-{
- /* search table for addr, if found, set to 0 and sync */
- int i;
- struct ixgbe_hw *hw = &adapter->hw;
-
- if (is_zero_ether_addr(addr))
- return 0;
- for (i = 0; i < hw->mac.num_rar_entries; i++) {
- if (ether_addr_equal(addr, adapter->mac_table[i].addr) &&
- adapter->mac_table[i].queue == queue) {
- adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
- adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
- memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
- adapter->mac_table[i].queue = 0;
- ixgbe_sync_mac_table(adapter);
- return 0;
- }
- }
- return -ENOMEM;
-}
-#ifdef HAVE_SET_RX_MODE
-/**
- * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
- * @netdev: network interface device structure
- *
- * Writes unicast address list to the RAR table.
- * Returns: -ENOMEM on failure/insufficient address space
- * 0 on no addresses written
- * X on writing X addresses to the RAR table
- **/
-int ixgbe_write_uc_addr_list(struct ixgbe_adapter *adapter,
- struct net_device *netdev, unsigned int vfn)
-{
- int count = 0;
-
- /* return ENOMEM indicating insufficient memory for addresses */
- if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter))
- return -ENOMEM;
-
- if (!netdev_uc_empty(netdev)) {
-#ifdef NETDEV_HW_ADDR_T_UNICAST
- struct netdev_hw_addr *ha;
-#else
- struct dev_mc_list *ha;
-#endif
- netdev_for_each_uc_addr(ha, netdev) {
-#ifdef NETDEV_HW_ADDR_T_UNICAST
- ixgbe_del_mac_filter(adapter, ha->addr, (u16)vfn);
- ixgbe_add_mac_filter(adapter, ha->addr, (u16)vfn);
-#else
- ixgbe_del_mac_filter(adapter, ha->da_addr, (u16)vfn);
- ixgbe_add_mac_filter(adapter, ha->da_addr, (u16)vfn);
-#endif
- count++;
- }
- }
- return count;
-}
-
-#endif
-/**
- * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
- * @netdev: network interface device structure
- *
- * The set_rx_method entry point is called whenever the unicast/multicast
- * address list or the network interface flags are updated. This routine is
- * responsible for configuring the hardware for proper unicast, multicast and
- * promiscuous mode.
- **/
-void ixgbe_set_rx_mode(struct net_device *netdev)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
- u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
- u32 vlnctrl;
- int count;
-
- /* Check for Promiscuous and All Multicast modes */
- fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-
- /* set all bits that we expect to always be set */
- fctrl |= IXGBE_FCTRL_BAM;
- fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
- fctrl |= IXGBE_FCTRL_PMCF;
-
- /* clear the bits we are changing the status of */
- fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
- vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
-
- if (netdev->flags & IFF_PROMISC) {
- hw->addr_ctrl.user_set_promisc = true;
- fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
- vmolr |= IXGBE_VMOLR_MPE;
- } else {
- if (netdev->flags & IFF_ALLMULTI) {
- fctrl |= IXGBE_FCTRL_MPE;
- vmolr |= IXGBE_VMOLR_MPE;
- } else {
- /*
- * Write addresses to the MTA, if the attempt fails
- * then we should just turn on promiscuous mode so
- * that we can at least receive multicast traffic
- */
- count = ixgbe_write_mc_addr_list(netdev);
- if (count < 0) {
- fctrl |= IXGBE_FCTRL_MPE;
- vmolr |= IXGBE_VMOLR_MPE;
- } else if (count) {
- vmolr |= IXGBE_VMOLR_ROMPE;
- }
- }
-#ifdef NETIF_F_HW_VLAN_TX
- /* enable hardware vlan filtering */
- vlnctrl |= IXGBE_VLNCTRL_VFE;
-#endif
- hw->addr_ctrl.user_set_promisc = false;
-#ifdef HAVE_SET_RX_MODE
- /*
- * Write addresses to available RAR registers, if there is not
- * sufficient space to store all the addresses then enable
- * unicast promiscuous mode
- */
- count = ixgbe_write_uc_addr_list(adapter, netdev,
- adapter->num_vfs);
- if (count < 0) {
- fctrl |= IXGBE_FCTRL_UPE;
- vmolr |= IXGBE_VMOLR_ROPE;
- }
-#endif
- }
-
- if (hw->mac.type != ixgbe_mac_82598EB) {
- vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) &
- ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
- IXGBE_VMOLR_ROPE);
- IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr);
- }
-
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
- IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
-}
-
-
-
-
-
-
-
-
-/* Additional bittime to account for IXGBE framing */
-#define IXGBE_ETH_FRAMING 20
-
-/*
- * ixgbe_hpbthresh - calculate high water mark for flow control
- *
- * @adapter: board private structure to calculate for
- * @pb - packet buffer to calculate
- */
-static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct net_device *dev = adapter->netdev;
- int link, tc, kb, marker;
- u32 dv_id, rx_pba;
-
- /* Calculate max LAN frame size */
- tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING;
-
-#ifdef IXGBE_FCOE
- /* FCoE traffic class uses FCOE jumbo frames */
- if (dev->features & NETIF_F_FCOE_MTU) {
- int fcoe_pb = 0;
-
- fcoe_pb = netdev_get_prio_tc_map(dev, adapter->fcoe.up);
-
- if (fcoe_pb == pb && tc < IXGBE_FCOE_JUMBO_FRAME_SIZE)
- tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
- }
-#endif
-
- /* Calculate delay value for device */
- switch (hw->mac.type) {
- case ixgbe_mac_X540:
- dv_id = IXGBE_DV_X540(link, tc);
- break;
- default:
- dv_id = IXGBE_DV(link, tc);
- break;
- }
-
- /* Loopback switch introduces additional latency */
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
- dv_id += IXGBE_B2BT(tc);
-
- /* Delay value is calculated in bit times convert to KB */
- kb = IXGBE_BT2KB(dv_id);
- rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10;
-
- marker = rx_pba - kb;
-
- /* It is possible that the packet buffer is not large enough
- * to provide required headroom. In this case throw an error
- * to user and a do the best we can.
- */
- if (marker < 0) {
- e_warn(drv, "Packet Buffer(%i) can not provide enough"
- "headroom to suppport flow control."
- "Decrease MTU or number of traffic classes\n", pb);
- marker = tc + 1;
- }
-
- return marker;
-}
-
-/*
- * ixgbe_lpbthresh - calculate low water mark for for flow control
- *
- * @adapter: board private structure to calculate for
- * @pb - packet buffer to calculate
- */
-static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct net_device *dev = adapter->netdev;
- int tc;
- u32 dv_id;
-
- /* Calculate max LAN frame size */
- tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
-
-#ifdef IXGBE_FCOE
- /* FCoE traffic class uses FCOE jumbo frames */
- if (dev->features & NETIF_F_FCOE_MTU) {
- int fcoe_pb = 0;
-
- fcoe_pb = netdev_get_prio_tc_map(dev, adapter->fcoe.up);
-
- if (fcoe_pb == pb && tc < IXGBE_FCOE_JUMBO_FRAME_SIZE)
- tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
- }
-#endif
-
- /* Calculate delay value for device */
- switch (hw->mac.type) {
- case ixgbe_mac_X540:
- dv_id = IXGBE_LOW_DV_X540(tc);
- break;
- default:
- dv_id = IXGBE_LOW_DV(tc);
- break;
- }
-
- /* Delay value is calculated in bit times convert to KB */
- return IXGBE_BT2KB(dv_id);
-}
-
-/*
- * ixgbe_pbthresh_setup - calculate and setup high low water marks
- */
-static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- int num_tc = netdev_get_num_tc(adapter->netdev);
- int i;
-
- if (!num_tc)
- num_tc = 1;
- if (num_tc > IXGBE_DCB_MAX_TRAFFIC_CLASS)
- num_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
-
- for (i = 0; i < num_tc; i++) {
- hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
- hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
-
- /* Low water marks must not be larger than high water marks */
- if (hw->fc.low_water[i] > hw->fc.high_water[i])
- hw->fc.low_water[i] = 0;
- }
-
- for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++)
- hw->fc.high_water[i] = 0;
-}
-
-
-
-#ifdef NO_VNIC
-static void ixgbe_configure(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
-
- ixgbe_configure_pb(adapter);
- ixgbe_configure_dcb(adapter);
-
- ixgbe_set_rx_mode(adapter->netdev);
-#ifdef NETIF_F_HW_VLAN_TX
- ixgbe_restore_vlan(adapter);
-#endif
-
-#ifdef IXGBE_FCOE
- if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
- ixgbe_configure_fcoe(adapter);
-
-#endif /* IXGBE_FCOE */
-
- if (adapter->hw.mac.type != ixgbe_mac_82598EB)
- hw->mac.ops.disable_sec_rx_path(hw);
-
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
- ixgbe_init_fdir_signature_82599(&adapter->hw,
- adapter->fdir_pballoc);
- } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
- ixgbe_init_fdir_perfect_82599(&adapter->hw,
- adapter->fdir_pballoc);
- ixgbe_fdir_filter_restore(adapter);
- }
-
- if (adapter->hw.mac.type != ixgbe_mac_82598EB)
- hw->mac.ops.enable_sec_rx_path(hw);
-
- ixgbe_configure_virtualization(adapter);
-
- ixgbe_configure_tx(adapter);
- ixgbe_configure_rx(adapter);
-}
-#endif
-
-static bool ixgbe_is_sfp(struct ixgbe_hw *hw)
-{
- switch (hw->phy.type) {
- case ixgbe_phy_sfp_avago:
- case ixgbe_phy_sfp_ftl:
- case ixgbe_phy_sfp_intel:
- case ixgbe_phy_sfp_unknown:
- case ixgbe_phy_sfp_passive_tyco:
- case ixgbe_phy_sfp_passive_unknown:
- case ixgbe_phy_sfp_active_unknown:
- case ixgbe_phy_sfp_ftl_active:
- return true;
- case ixgbe_phy_nl:
- if (hw->mac.type == ixgbe_mac_82598EB)
- return true;
- default:
- return false;
- }
-}
-
-
-/**
- * ixgbe_clear_vf_stats_counters - Clear out VF stats after reset
- * @adapter: board private structure
- *
- * On a reset we need to clear out the VF stats or accounting gets
- * messed up because they're not clear on read.
- **/
-void ixgbe_clear_vf_stats_counters(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- int i;
-
- for (i = 0; i < adapter->num_vfs; i++) {
- adapter->vfinfo[i].last_vfstats.gprc =
- IXGBE_READ_REG(hw, IXGBE_PVFGPRC(i));
- adapter->vfinfo[i].saved_rst_vfstats.gprc +=
- adapter->vfinfo[i].vfstats.gprc;
- adapter->vfinfo[i].vfstats.gprc = 0;
- adapter->vfinfo[i].last_vfstats.gptc =
- IXGBE_READ_REG(hw, IXGBE_PVFGPTC(i));
- adapter->vfinfo[i].saved_rst_vfstats.gptc +=
- adapter->vfinfo[i].vfstats.gptc;
- adapter->vfinfo[i].vfstats.gptc = 0;
- adapter->vfinfo[i].last_vfstats.gorc =
- IXGBE_READ_REG(hw, IXGBE_PVFGORC_LSB(i));
- adapter->vfinfo[i].saved_rst_vfstats.gorc +=
- adapter->vfinfo[i].vfstats.gorc;
- adapter->vfinfo[i].vfstats.gorc = 0;
- adapter->vfinfo[i].last_vfstats.gotc =
- IXGBE_READ_REG(hw, IXGBE_PVFGOTC_LSB(i));
- adapter->vfinfo[i].saved_rst_vfstats.gotc +=
- adapter->vfinfo[i].vfstats.gotc;
- adapter->vfinfo[i].vfstats.gotc = 0;
- adapter->vfinfo[i].last_vfstats.mprc =
- IXGBE_READ_REG(hw, IXGBE_PVFMPRC(i));
- adapter->vfinfo[i].saved_rst_vfstats.mprc +=
- adapter->vfinfo[i].vfstats.mprc;
- adapter->vfinfo[i].vfstats.mprc = 0;
- }
-}
-
-
-
-void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
-{
-#ifdef NO_VNIC
- WARN_ON(in_interrupt());
- /* put off any impending NetWatchDogTimeout */
- adapter->netdev->trans_start = jiffies;
-
- while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
- usleep_range(1000, 2000);
- ixgbe_down(adapter);
- /*
- * If SR-IOV enabled then wait a bit before bringing the adapter
- * back up to give the VFs time to respond to the reset. The
- * two second wait is based upon the watchdog timer cycle in
- * the VF driver.
- */
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
- msleep(2000);
- ixgbe_up(adapter);
- clear_bit(__IXGBE_RESETTING, &adapter->state);
-#endif
-}
-
-void ixgbe_up(struct ixgbe_adapter *adapter)
-{
- /* hardware has been reset, we need to reload some things */
- //ixgbe_configure(adapter);
-
- //ixgbe_up_complete(adapter);
-}
-
-void ixgbe_reset(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct net_device *netdev = adapter->netdev;
- int err;
-
- /* lock SFP init bit to prevent race conditions with the watchdog */
- while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
- usleep_range(1000, 2000);
-
- /* clear all SFP and link config related flags while holding SFP_INIT */
- adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
- IXGBE_FLAG2_SFP_NEEDS_RESET);
- adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
-
- err = hw->mac.ops.init_hw(hw);
- switch (err) {
- case 0:
- case IXGBE_ERR_SFP_NOT_PRESENT:
- case IXGBE_ERR_SFP_NOT_SUPPORTED:
- break;
- case IXGBE_ERR_MASTER_REQUESTS_PENDING:
- e_dev_err("master disable timed out\n");
- break;
- case IXGBE_ERR_EEPROM_VERSION:
- /* We are running on a pre-production device, log a warning */
- e_dev_warn("This device is a pre-production adapter/LOM. "
- "Please be aware there may be issues associated "
- "with your hardware. If you are experiencing "
- "problems please contact your Intel or hardware "
- "representative who provided you with this "
- "hardware.\n");
- break;
- default:
- e_dev_err("Hardware Error: %d\n", err);
- }
-
- clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
-
- ixgbe_flush_sw_mac_table(adapter);
- memcpy(&adapter->mac_table[0].addr, hw->mac.perm_addr,
- netdev->addr_len);
- adapter->mac_table[0].queue = adapter->num_vfs;
- adapter->mac_table[0].state = (IXGBE_MAC_STATE_DEFAULT |
- IXGBE_MAC_STATE_IN_USE);
- hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr,
- adapter->mac_table[0].queue,
- IXGBE_RAH_AV);
-}
-
-
-
-
-
-
-void ixgbe_down(struct ixgbe_adapter *adapter)
-{
-#ifdef NO_VNIC
- struct net_device *netdev = adapter->netdev;
- struct ixgbe_hw *hw = &adapter->hw;
- u32 rxctrl;
- int i;
-
- /* signal that we are down to the interrupt handler */
- set_bit(__IXGBE_DOWN, &adapter->state);
-
- /* disable receives */
- rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
- IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
-
- /* disable all enabled rx queues */
- for (i = 0; i < adapter->num_rx_queues; i++)
- /* this call also flushes the previous write */
- ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
-
- usleep_range(10000, 20000);
-
- netif_tx_stop_all_queues(netdev);
-
- /* call carrier off first to avoid false dev_watchdog timeouts */
- netif_carrier_off(netdev);
- netif_tx_disable(netdev);
-
- ixgbe_irq_disable(adapter);
-
- ixgbe_napi_disable_all(adapter);
-
- adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT |
- IXGBE_FLAG2_RESET_REQUESTED);
- adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
-
- del_timer_sync(&adapter->service_timer);
-
- if (adapter->num_vfs) {
- /* Clear EITR Select mapping */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
-
- /* Mark all the VFs as inactive */
- for (i = 0 ; i < adapter->num_vfs; i++)
- adapter->vfinfo[i].clear_to_send = 0;
-
- /* ping all the active vfs to let them know we are going down */
- ixgbe_ping_all_vfs(adapter);
-
- /* Disable all VFTE/VFRE TX/RX */
- ixgbe_disable_tx_rx(adapter);
- }
-
- /* disable transmits in the hardware now that interrupts are off */
- for (i = 0; i < adapter->num_tx_queues; i++) {
- u8 reg_idx = adapter->tx_ring[i]->reg_idx;
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
- }
-
- /* Disable the Tx DMA engine on 82599 and X540 */
- switch (hw->mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
- (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
- ~IXGBE_DMATXCTL_TE));
- break;
- default:
- break;
- }
-
-#ifdef HAVE_PCI_ERS
- if (!pci_channel_offline(adapter->pdev))
-#endif
- ixgbe_reset(adapter);
- /* power down the optics */
- if ((hw->phy.multispeed_fiber) ||
- ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
- (hw->mac.type == ixgbe_mac_82599EB)))
- ixgbe_disable_tx_laser(hw);
-
- ixgbe_clean_all_tx_rings(adapter);
- ixgbe_clean_all_rx_rings(adapter);
-
-#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
- /* since we reset the hardware DCA settings were cleared */
- ixgbe_setup_dca(adapter);
-#endif
-
-#endif /* NO_VNIC */
-}
-
-#ifndef NO_VNIC
-
-#undef IXGBE_FCOE
-
-/* Artificial max queue cap per traffic class in DCB mode */
-#define DCB_QUEUE_CAP 8
-
-/**
- * ixgbe_set_dcb_queues: Allocate queues for a DCB-enabled device
- * @adapter: board private structure to initialize
- *
- * When DCB (Data Center Bridging) is enabled, allocate queues for
- * each traffic class. If multiqueue isn't available,then abort DCB
- * initialization.
- *
- * This function handles all combinations of DCB, RSS, and FCoE.
- *
- **/
-static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
-{
- int tcs;
-#ifdef HAVE_MQPRIO
- int rss_i, i, offset = 0;
- struct net_device *dev = adapter->netdev;
-
- /* Map queue offset and counts onto allocated tx queues */
- tcs = netdev_get_num_tc(dev);
-
- if (!tcs)
- return false;
-
- rss_i = min_t(int, dev->num_tx_queues / tcs, num_online_cpus());
-
- if (rss_i > DCB_QUEUE_CAP)
- rss_i = DCB_QUEUE_CAP;
-
- for (i = 0; i < tcs; i++) {
- netdev_set_tc_queue(dev, i, rss_i, offset);
- offset += rss_i;
- }
-
- adapter->num_tx_queues = rss_i * tcs;
- adapter->num_rx_queues = rss_i * tcs;
-
-#ifdef IXGBE_FCOE
- /* FCoE enabled queues require special configuration indexed
- * by feature specific indices and mask. Here we map FCoE
- * indices onto the DCB queue pairs allowing FCoE to own
- * configuration later.
- */
-
- if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
- struct ixgbe_ring_feature *f;
- int tc;
- u8 prio_tc[IXGBE_DCB_MAX_USER_PRIORITY] = {0};
-
- ixgbe_dcb_unpack_map_cee(&adapter->dcb_cfg,
- IXGBE_DCB_TX_CONFIG,
- prio_tc);
- tc = prio_tc[adapter->fcoe.up];
-
- f = &adapter->ring_feature[RING_F_FCOE];
- f->indices = min_t(int, rss_i, f->indices);
- f->mask = rss_i * tc;
- }
-#endif /* IXGBE_FCOE */
-#else
- if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
- return false;
-
- /* Enable one Queue per traffic class */
- tcs = adapter->tc;
- if (!tcs)
- return false;
-
-#ifdef IXGBE_FCOE
- if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
- struct ixgbe_ring_feature *f;
- int tc = netdev_get_prio_tc_map(adapter->netdev,
- adapter->fcoe.up);
-
- f = &adapter->ring_feature[RING_F_FCOE];
-
- /*
- * We have max 8 queues for FCoE, where 8 the is
- * FCoE redirection table size. We must also share
- * ring resources with network traffic so if FCoE TC is
- * 4 or greater and we are in 8 TC mode we can only use
- * 7 queues.
- */
- if ((tcs > 4) && (tc >= 4) && (f->indices > 7))
- f->indices = 7;
-
- f->indices = min_t(int, num_online_cpus(), f->indices);
- f->mask = tcs;
-
- adapter->num_rx_queues = f->indices + tcs;
- adapter->num_tx_queues = f->indices + tcs;
-
- return true;
- }
-
-#endif /* IXGBE_FCOE */
- adapter->num_rx_queues = tcs;
- adapter->num_tx_queues = tcs;
-#endif /* HAVE_MQ */
-
- return true;
-}
-
-/**
- * ixgbe_set_vmdq_queues: Allocate queues for VMDq devices
- * @adapter: board private structure to initialize
- *
- * When VMDq (Virtual Machine Devices queue) is enabled, allocate queues
- * and VM pools where appropriate. If RSS is available, then also try and
- * enable RSS and map accordingly.
- *
- **/
-static bool ixgbe_set_vmdq_queues(struct ixgbe_adapter *adapter)
-{
- int vmdq_i = adapter->ring_feature[RING_F_VMDQ].indices;
- int vmdq_m = 0;
- int rss_i = adapter->ring_feature[RING_F_RSS].indices;
- unsigned long i;
- int rss_shift;
- bool ret = false;
-
-
- switch (adapter->flags & (IXGBE_FLAG_RSS_ENABLED
- | IXGBE_FLAG_DCB_ENABLED
- | IXGBE_FLAG_VMDQ_ENABLED)) {
-
- case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED):
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- vmdq_i = min((int)IXGBE_MAX_VMDQ_INDICES, vmdq_i);
- if (vmdq_i > 32)
- rss_i = 2;
- else
- rss_i = 4;
- i = rss_i;
- rss_shift = find_first_bit(&i, sizeof(i) * 8);
- vmdq_m = ((IXGBE_MAX_VMDQ_INDICES - 1) <<
- rss_shift) & (MAX_RX_QUEUES - 1);
- break;
- default:
- break;
- }
- adapter->num_rx_queues = vmdq_i * rss_i;
- adapter->num_tx_queues = min((int)MAX_TX_QUEUES, vmdq_i * rss_i);
- ret = true;
- break;
-
- case (IXGBE_FLAG_VMDQ_ENABLED):
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82598EB:
- vmdq_m = (IXGBE_MAX_VMDQ_INDICES - 1);
- break;
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- vmdq_m = (IXGBE_MAX_VMDQ_INDICES - 1) << 1;
- break;
- default:
- break;
- }
- adapter->num_rx_queues = vmdq_i;
- adapter->num_tx_queues = vmdq_i;
- ret = true;
- break;
-
- default:
- ret = false;
- goto vmdq_queues_out;
- }
-
- if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
- adapter->num_rx_pools = vmdq_i;
- adapter->num_rx_queues_per_pool = adapter->num_rx_queues /
- vmdq_i;
- } else {
- adapter->num_rx_pools = adapter->num_rx_queues;
- adapter->num_rx_queues_per_pool = 1;
- }
- /* save the mask for later use */
- adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
-vmdq_queues_out:
- return ret;
-}
-
-/**
- * ixgbe_set_rss_queues: Allocate queues for RSS
- * @adapter: board private structure to initialize
- *
- * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
- * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
- *
- **/
-static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_ring_feature *f;
-
- if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) {
- adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
- return false;
- }
-
- /* set mask for 16 queue limit of RSS */
- f = &adapter->ring_feature[RING_F_RSS];
- f->mask = 0xF;
-
- /*
- * Use Flow Director in addition to RSS to ensure the best
- * distribution of flows across cores, even when an FDIR flow
- * isn't matched.
- */
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
- f = &adapter->ring_feature[RING_F_FDIR];
-
- f->indices = min_t(int, num_online_cpus(), f->indices);
- f->mask = 0;
- }
-
- adapter->num_rx_queues = f->indices;
-#ifdef HAVE_TX_MQ
- adapter->num_tx_queues = f->indices;
-#endif
-
- return true;
-}
-
-#ifdef IXGBE_FCOE
-/**
- * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
- * @adapter: board private structure to initialize
- *
- * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
- * The ring feature mask is not used as a mask for FCoE, as it can take any 8
- * rx queues out of the max number of rx queues, instead, it is used as the
- * index of the first rx queue used by FCoE.
- *
- **/
-static bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_ring_feature *f;
-
- if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
- return false;
-
- ixgbe_set_rss_queues(adapter);
-
- f = &adapter->ring_feature[RING_F_FCOE];
- f->indices = min_t(int, num_online_cpus(), f->indices);
-
- /* adding FCoE queues */
- f->mask = adapter->num_rx_queues;
- adapter->num_rx_queues += f->indices;
- adapter->num_tx_queues += f->indices;
-
- return true;
-}
-
-#endif /* IXGBE_FCOE */
-/*
- * ixgbe_set_num_queues: Allocate queues for device, feature dependent
- * @adapter: board private structure to initialize
- *
- * This is the top level queue allocation routine. The order here is very
- * important, starting with the "most" number of features turned on at once,
- * and ending with the smallest set of features. This way large combinations
- * can be allocated if they're turned on, and smaller combinations are the
- * fallthrough conditions.
- *
- **/
-static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
-{
- /* Start with base case */
- adapter->num_rx_queues = 1;
- adapter->num_tx_queues = 1;
- adapter->num_rx_pools = adapter->num_rx_queues;
- adapter->num_rx_queues_per_pool = 1;
-
- if (ixgbe_set_vmdq_queues(adapter))
- return;
-
- if (ixgbe_set_dcb_queues(adapter))
- return;
-
-#ifdef IXGBE_FCOE
- if (ixgbe_set_fcoe_queues(adapter))
- return;
-
-#endif /* IXGBE_FCOE */
- ixgbe_set_rss_queues(adapter);
-}
-
-#endif
-
-
-/**
- * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
- * @adapter: board private structure to initialize
- *
- * ixgbe_sw_init initializes the Adapter private data structure.
- * Fields are initialized based on PCI device information and
- * OS network device settings (MTU size).
- **/
-static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct pci_dev *pdev = adapter->pdev;
- int err;
-
- /* PCI config space info */
-
- hw->vendor_id = pdev->vendor;
- hw->device_id = pdev->device;
- pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
- hw->subsystem_vendor_id = pdev->subsystem_vendor;
- hw->subsystem_device_id = pdev->subsystem_device;
-
- err = ixgbe_init_shared_code(hw);
- if (err) {
- e_err(probe, "init_shared_code failed: %d\n", err);
- goto out;
- }
- adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
- hw->mac.num_rar_entries,
- GFP_ATOMIC);
- /* Set capability flags */
- switch (hw->mac.type) {
- case ixgbe_mac_82598EB:
- adapter->flags |= IXGBE_FLAG_MSI_CAPABLE |
- IXGBE_FLAG_MSIX_CAPABLE |
- IXGBE_FLAG_MQ_CAPABLE |
- IXGBE_FLAG_RSS_CAPABLE;
- adapter->flags |= IXGBE_FLAG_DCB_CAPABLE;
-#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
- adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
-#endif
- adapter->flags &= ~IXGBE_FLAG_SRIOV_CAPABLE;
- adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
-
- if (hw->device_id == IXGBE_DEV_ID_82598AT)
- adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
-
- adapter->max_msix_q_vectors = IXGBE_MAX_MSIX_Q_VECTORS_82598;
- break;
- case ixgbe_mac_X540:
- adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
- case ixgbe_mac_82599EB:
- adapter->flags |= IXGBE_FLAG_MSI_CAPABLE |
- IXGBE_FLAG_MSIX_CAPABLE |
- IXGBE_FLAG_MQ_CAPABLE |
- IXGBE_FLAG_RSS_CAPABLE;
- adapter->flags |= IXGBE_FLAG_DCB_CAPABLE;
-#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
- adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
-#endif
- adapter->flags |= IXGBE_FLAG_SRIOV_CAPABLE;
- adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
-#ifdef IXGBE_FCOE
- adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
- adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
- adapter->ring_feature[RING_F_FCOE].indices = 0;
-#ifdef CONFIG_DCB
- /* Default traffic class to use for FCoE */
- adapter->fcoe.tc = IXGBE_FCOE_DEFTC;
- adapter->fcoe.up = IXGBE_FCOE_DEFTC;
- adapter->fcoe.up_set = IXGBE_FCOE_DEFTC;
-#endif
-#endif
- if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
- adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
-#ifndef IXGBE_NO_SMART_SPEED
- hw->phy.smart_speed = ixgbe_smart_speed_on;
-#else
- hw->phy.smart_speed = ixgbe_smart_speed_off;
-#endif
- adapter->max_msix_q_vectors = IXGBE_MAX_MSIX_Q_VECTORS_82599;
- default:
- break;
- }
-
- /* n-tuple support exists, always init our spinlock */
- //spin_lock_init(&adapter->fdir_perfect_lock);
-
- if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) {
- int j;
- struct ixgbe_dcb_tc_config *tc;
- int dcb_i = IXGBE_DCB_MAX_TRAFFIC_CLASS;
-
-
- adapter->dcb_cfg.num_tcs.pg_tcs = dcb_i;
- adapter->dcb_cfg.num_tcs.pfc_tcs = dcb_i;
- for (j = 0; j < dcb_i; j++) {
- tc = &adapter->dcb_cfg.tc_config[j];
- tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = 0;
- tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 100 / dcb_i;
- tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = 0;
- tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 100 / dcb_i;
- tc->pfc = ixgbe_dcb_pfc_disabled;
- if (j == 0) {
- /* total of all TCs bandwidth needs to be 100 */
- tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent +=
- 100 % dcb_i;
- tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent +=
- 100 % dcb_i;
- }
- }
-
- /* Initialize default user to priority mapping, UPx->TC0 */
- tc = &adapter->dcb_cfg.tc_config[0];
- tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
- tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
-
- adapter->dcb_cfg.bw_percentage[IXGBE_DCB_TX_CONFIG][0] = 100;
- adapter->dcb_cfg.bw_percentage[IXGBE_DCB_RX_CONFIG][0] = 100;
- adapter->dcb_cfg.rx_pba_cfg = ixgbe_dcb_pba_equal;
- adapter->dcb_cfg.pfc_mode_enable = false;
- adapter->dcb_cfg.round_robin_enable = false;
- adapter->dcb_set_bitmap = 0x00;
-#ifdef CONFIG_DCB
- adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
-#endif /* CONFIG_DCB */
-
- if (hw->mac.type == ixgbe_mac_X540) {
- adapter->dcb_cfg.num_tcs.pg_tcs = 4;
- adapter->dcb_cfg.num_tcs.pfc_tcs = 4;
- }
- }
-#ifdef CONFIG_DCB
- /* XXX does this need to be initialized even w/o DCB? */
- //memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
- // sizeof(adapter->temp_dcb_cfg));
-
-#endif
- //if (hw->mac.type == ixgbe_mac_82599EB ||
- // hw->mac.type == ixgbe_mac_X540)
- // hw->mbx.ops.init_params(hw);
-
- /* default flow control settings */
- hw->fc.requested_mode = ixgbe_fc_full;
- hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */
-
- adapter->last_lfc_mode = hw->fc.current_mode;
- ixgbe_pbthresh_setup(adapter);
- hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
- hw->fc.send_xon = true;
- hw->fc.disable_fc_autoneg = false;
-
- /* set default ring sizes */
- adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
- adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
-
- /* set default work limits */
- adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
- adapter->rx_work_limit = IXGBE_DEFAULT_RX_WORK;
-
- set_bit(__IXGBE_DOWN, &adapter->state);
-out:
- return err;
-}
-
-/**
- * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
- * @tx_ring: tx descriptor ring (for a specific queue) to setup
- *
- * Return 0 on success, negative on failure
- **/
-int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
-{
- struct device *dev = tx_ring->dev;
- //int orig_node = dev_to_node(dev);
- int numa_node = -1;
- int size;
-
- size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
-
- if (tx_ring->q_vector)
- numa_node = tx_ring->q_vector->numa_node;
-
- tx_ring->tx_buffer_info = vzalloc_node(size, numa_node);
- if (!tx_ring->tx_buffer_info)
- tx_ring->tx_buffer_info = vzalloc(size);
- if (!tx_ring->tx_buffer_info)
- goto err;
-
- /* round up to nearest 4K */
- tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
- tx_ring->size = ALIGN(tx_ring->size, 4096);
-
- //set_dev_node(dev, numa_node);
- //tx_ring->desc = dma_alloc_coherent(dev,
- // tx_ring->size,
- // &tx_ring->dma,
- // GFP_KERNEL);
- //set_dev_node(dev, orig_node);
- //if (!tx_ring->desc)
- // tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
- // &tx_ring->dma, GFP_KERNEL);
- //if (!tx_ring->desc)
- // goto err;
-
- return 0;
-
-err:
- vfree(tx_ring->tx_buffer_info);
- tx_ring->tx_buffer_info = NULL;
- dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
- return -ENOMEM;
-}
-
-/**
- * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
- * @adapter: board private structure
- *
- * If this function returns with an error, then it's possible one or
- * more of the rings is populated (while the rest are not). It is the
- * callers duty to clean those orphaned rings.
- *
- * Return 0 on success, negative on failure
- **/
-static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
-{
- int i, err = 0;
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
- if (!err)
- continue;
- e_err(probe, "Allocation for Tx Queue %u failed\n", i);
- break;
- }
-
- return err;
-}
-
-/**
- * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
- * @rx_ring: rx descriptor ring (for a specific queue) to setup
- *
- * Returns 0 on success, negative on failure
- **/
-int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
-{
- struct device *dev = rx_ring->dev;
- //int orig_node = dev_to_node(dev);
- int numa_node = -1;
- int size;
-
- size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
-
- if (rx_ring->q_vector)
- numa_node = rx_ring->q_vector->numa_node;
-
- rx_ring->rx_buffer_info = vzalloc_node(size, numa_node);
- if (!rx_ring->rx_buffer_info)
- rx_ring->rx_buffer_info = vzalloc(size);
- if (!rx_ring->rx_buffer_info)
- goto err;
-
- /* Round up to nearest 4K */
- rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
- rx_ring->size = ALIGN(rx_ring->size, 4096);
-
-#ifdef NO_VNIC
- set_dev_node(dev, numa_node);
- rx_ring->desc = dma_alloc_coherent(dev,
- rx_ring->size,
- &rx_ring->dma,
- GFP_KERNEL);
- set_dev_node(dev, orig_node);
- if (!rx_ring->desc)
- rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
- &rx_ring->dma, GFP_KERNEL);
- if (!rx_ring->desc)
- goto err;
-
-#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
- ixgbe_init_rx_page_offset(rx_ring);
-
-#endif
-
-#endif /* NO_VNIC */
- return 0;
-err:
- vfree(rx_ring->rx_buffer_info);
- rx_ring->rx_buffer_info = NULL;
- dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
- return -ENOMEM;
-}
-
-/**
- * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
- * @adapter: board private structure
- *
- * If this function returns with an error, then it's possible one or
- * more of the rings is populated (while the rest are not). It is the
- * callers duty to clean those orphaned rings.
- *
- * Return 0 on success, negative on failure
- **/
-static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
-{
- int i, err = 0;
-
- for (i = 0; i < adapter->num_rx_queues; i++) {
- err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
- if (!err)
- continue;
- e_err(probe, "Allocation for Rx Queue %u failed\n", i);
- break;
- }
-
- return err;
-}
-
-/**
- * ixgbe_free_tx_resources - Free Tx Resources per Queue
- * @tx_ring: Tx descriptor ring for a specific queue
- *
- * Free all transmit software resources
- **/
-void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
-{
- //ixgbe_clean_tx_ring(tx_ring);
-
- vfree(tx_ring->tx_buffer_info);
- tx_ring->tx_buffer_info = NULL;
-
- /* if not set, then don't free */
- if (!tx_ring->desc)
- return;
-
- //dma_free_coherent(tx_ring->dev, tx_ring->size,
- // tx_ring->desc, tx_ring->dma);
-
- tx_ring->desc = NULL;
-}
-
-/**
- * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
- * @adapter: board private structure
- *
- * Free all transmit software resources
- **/
-static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
-{
- int i;
-
- for (i = 0; i < adapter->num_tx_queues; i++)
- if (adapter->tx_ring[i]->desc)
- ixgbe_free_tx_resources(adapter->tx_ring[i]);
-}
-
-/**
- * ixgbe_free_rx_resources - Free Rx Resources
- * @rx_ring: ring to clean the resources from
- *
- * Free all receive software resources
- **/
-void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
-{
- //ixgbe_clean_rx_ring(rx_ring);
-
- vfree(rx_ring->rx_buffer_info);
- rx_ring->rx_buffer_info = NULL;
-
- /* if not set, then don't free */
- if (!rx_ring->desc)
- return;
-
- //dma_free_coherent(rx_ring->dev, rx_ring->size,
- // rx_ring->desc, rx_ring->dma);
-
- rx_ring->desc = NULL;
-}
-
-/**
- * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
- * @adapter: board private structure
- *
- * Free all receive software resources
- **/
-static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
-{
- int i;
-
- for (i = 0; i < adapter->num_rx_queues; i++)
- if (adapter->rx_ring[i]->desc)
- ixgbe_free_rx_resources(adapter->rx_ring[i]);
-}
-
-
-/**
- * ixgbe_open - Called when a network interface is made active
- * @netdev: network interface device structure
- *
- * Returns 0 on success, negative value on failure
- *
- * The open entry point is called when a network interface is made
- * active by the system (IFF_UP). At this point all resources needed
- * for transmit and receive operations are allocated, the interrupt
- * handler is registered with the OS, the watchdog timer is started,
- * and the stack is notified that the interface is ready.
- **/
-//static
-int ixgbe_open(struct net_device *netdev)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- int err;
-
- /* disallow open during test */
- if (test_bit(__IXGBE_TESTING, &adapter->state))
- return -EBUSY;
-
- netif_carrier_off(netdev);
-
- /* allocate transmit descriptors */
- err = ixgbe_setup_all_tx_resources(adapter);
- if (err)
- goto err_setup_tx;
-
- /* allocate receive descriptors */
- err = ixgbe_setup_all_rx_resources(adapter);
- if (err)
- goto err_setup_rx;
-
-#ifdef NO_VNIC
- ixgbe_configure(adapter);
-
- err = ixgbe_request_irq(adapter);
- if (err)
- goto err_req_irq;
-
- ixgbe_up_complete(adapter);
-
-err_req_irq:
-#else
- return 0;
-#endif
-err_setup_rx:
- ixgbe_free_all_rx_resources(adapter);
-err_setup_tx:
- ixgbe_free_all_tx_resources(adapter);
- ixgbe_reset(adapter);
-
- return err;
-}
-
-/**
- * ixgbe_close - Disables a network interface
- * @netdev: network interface device structure
- *
- * Returns 0, this is not allowed to fail
- *
- * The close entry point is called when an interface is de-activated
- * by the OS. The hardware is still under the drivers control, but
- * needs to be disabled. A global MAC reset is issued to stop the
- * hardware, and all transmit and receive resources are freed.
- **/
-//static
-int ixgbe_close(struct net_device *netdev)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
- //ixgbe_down(adapter);
- //ixgbe_free_irq(adapter);
-
- //ixgbe_fdir_filter_exit(adapter);
-
- //ixgbe_free_all_tx_resources(adapter);
- //ixgbe_free_all_rx_resources(adapter);
-
- ixgbe_release_hw_control(adapter);
-
- return 0;
-}
-
-
-
-
-
-/**
- * ixgbe_get_stats - Get System Network Statistics
- * @netdev: network interface device structure
- *
- * Returns the address of the device statistics structure.
- * The statistics are actually updated from the timer callback.
- **/
-//static
-struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
- /* update the stats data */
- ixgbe_update_stats(adapter);
-
-#ifdef HAVE_NETDEV_STATS_IN_NETDEV
- /* only return the current stats */
- return &netdev->stats;
-#else
- /* only return the current stats */
- return &adapter->net_stats;
-#endif /* HAVE_NETDEV_STATS_IN_NETDEV */
-}
-
-/**
- * ixgbe_update_stats - Update the board statistics counters.
- * @adapter: board private structure
- **/
-void ixgbe_update_stats(struct ixgbe_adapter *adapter)
-{
-#ifdef HAVE_NETDEV_STATS_IN_NETDEV
- struct net_device_stats *net_stats = &adapter->netdev->stats;
-#else
- struct net_device_stats *net_stats = &adapter->net_stats;
-#endif /* HAVE_NETDEV_STATS_IN_NETDEV */
- struct ixgbe_hw *hw = &adapter->hw;
- struct ixgbe_hw_stats *hwstats = &adapter->stats;
- u64 total_mpc = 0;
- u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
- u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
- u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
- u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
-#ifndef IXGBE_NO_LRO
- u32 flushed = 0, coal = 0;
- int num_q_vectors = 1;
-#endif
-#ifdef IXGBE_FCOE
- struct ixgbe_fcoe *fcoe = &adapter->fcoe;
- unsigned int cpu;
- u64 fcoe_noddp_counts_sum = 0, fcoe_noddp_ext_buff_counts_sum = 0;
-#endif /* IXGBE_FCOE */
-
- printk(KERN_DEBUG "ixgbe_update_stats, tx_queues=%d, rx_queues=%d\n",
- adapter->num_tx_queues, adapter->num_rx_queues);
-
- if (test_bit(__IXGBE_DOWN, &adapter->state) ||
- test_bit(__IXGBE_RESETTING, &adapter->state))
- return;
-
-#ifndef IXGBE_NO_LRO
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
- num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-
-#endif
- if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
- u64 rsc_count = 0;
- u64 rsc_flush = 0;
- for (i = 0; i < adapter->num_rx_queues; i++) {
- rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
- rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
- }
- adapter->rsc_total_count = rsc_count;
- adapter->rsc_total_flush = rsc_flush;
- }
-
-#ifndef IXGBE_NO_LRO
- for (i = 0; i < num_q_vectors; i++) {
- struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
- if (!q_vector)
- continue;
- flushed += q_vector->lrolist.stats.flushed;
- coal += q_vector->lrolist.stats.coal;
- }
- adapter->lro_stats.flushed = flushed;
- adapter->lro_stats.coal = coal;
-
-#endif
- for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
- non_eop_descs += rx_ring->rx_stats.non_eop_descs;
- alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
- alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
- hw_csum_rx_error += rx_ring->rx_stats.csum_err;
- bytes += rx_ring->stats.bytes;
- packets += rx_ring->stats.packets;
-
- }
- adapter->non_eop_descs = non_eop_descs;
- adapter->alloc_rx_page_failed = alloc_rx_page_failed;
- adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
- adapter->hw_csum_rx_error = hw_csum_rx_error;
- net_stats->rx_bytes = bytes;
- net_stats->rx_packets = packets;
-
- bytes = 0;
- packets = 0;
- /* gather some stats to the adapter struct that are per queue */
- for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
- restart_queue += tx_ring->tx_stats.restart_queue;
- tx_busy += tx_ring->tx_stats.tx_busy;
- bytes += tx_ring->stats.bytes;
- packets += tx_ring->stats.packets;
- }
- adapter->restart_queue = restart_queue;
- adapter->tx_busy = tx_busy;
- net_stats->tx_bytes = bytes;
- net_stats->tx_packets = packets;
-
- hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
-
- /* 8 register reads */
- for (i = 0; i < 8; i++) {
- /* for packet buffers not used, the register should read 0 */
- mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
- missed_rx += mpc;
- hwstats->mpc[i] += mpc;
- total_mpc += hwstats->mpc[i];
- hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
- hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
- switch (hw->mac.type) {
- case ixgbe_mac_82598EB:
- hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
- hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
- hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
- hwstats->pxonrxc[i] +=
- IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
- break;
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- hwstats->pxonrxc[i] +=
- IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
- break;
- default:
- break;
- }
- }
-
- /*16 register reads */
- for (i = 0; i < 16; i++) {
- hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
- hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
- if ((hw->mac.type == ixgbe_mac_82599EB) ||
- (hw->mac.type == ixgbe_mac_X540)) {
- hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
- IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */
- hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
- IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); /* to clear */
- }
- }
-
- hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
- /* work around hardware counting issue */
- hwstats->gprc -= missed_rx;
-
- ixgbe_update_xoff_received(adapter);
-
- /* 82598 hardware only has a 32 bit counter in the high register */
- switch (hw->mac.type) {
- case ixgbe_mac_82598EB:
- hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
- hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
- hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
- hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
- break;
- case ixgbe_mac_X540:
- /* OS2BMC stats are X540 only*/
- hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
- hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
- hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
- hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
- case ixgbe_mac_82599EB:
- for (i = 0; i < 16; i++)
- adapter->hw_rx_no_dma_resources +=
- IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
- hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
- IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
- hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
- IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
- hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
- IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
- hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
-#ifdef HAVE_TX_MQ
- hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
- hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
-#endif /* HAVE_TX_MQ */
-#ifdef IXGBE_FCOE
- hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
- hwstats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
- hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
- hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
- hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
- hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
- hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
- /* Add up per cpu counters for total ddp aloc fail */
- if (fcoe && fcoe->pcpu_noddp && fcoe->pcpu_noddp_ext_buff) {
- for_each_possible_cpu(cpu) {
- fcoe_noddp_counts_sum +=
- *per_cpu_ptr(fcoe->pcpu_noddp, cpu);
- fcoe_noddp_ext_buff_counts_sum +=
- *per_cpu_ptr(fcoe->
- pcpu_noddp_ext_buff, cpu);
- }
- }
- hwstats->fcoe_noddp = fcoe_noddp_counts_sum;
- hwstats->fcoe_noddp_ext_buff = fcoe_noddp_ext_buff_counts_sum;
-
-#endif /* IXGBE_FCOE */
- break;
- default:
- break;
- }
- bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
- hwstats->bprc += bprc;
- hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
- if (hw->mac.type == ixgbe_mac_82598EB)
- hwstats->mprc -= bprc;
- hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
- hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
- hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
- hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
- hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
- hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
- hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
- hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
- lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
- hwstats->lxontxc += lxon;
- lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
- hwstats->lxofftxc += lxoff;
- hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
- hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
- /*
- * 82598 errata - tx of flow control packets is included in tx counters
- */
- xon_off_tot = lxon + lxoff;
- hwstats->gptc -= xon_off_tot;
- hwstats->mptc -= xon_off_tot;
- hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
- hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
- hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
- hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
- hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
- hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
- hwstats->ptc64 -= xon_off_tot;
- hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
- hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
- hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
- hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
- hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
- hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
- /* Fill out the OS statistics structure */
- net_stats->multicast = hwstats->mprc;
-
- /* Rx Errors */
- net_stats->rx_errors = hwstats->crcerrs +
- hwstats->rlec;
- net_stats->rx_dropped = 0;
- net_stats->rx_length_errors = hwstats->rlec;
- net_stats->rx_crc_errors = hwstats->crcerrs;
- net_stats->rx_missed_errors = total_mpc;
-
- /*
- * VF Stats Collection - skip while resetting because these
- * are not clear on read and otherwise you'll sometimes get
- * crazy values.
- */
- if (!test_bit(__IXGBE_RESETTING, &adapter->state)) {
- for (i = 0; i < adapter->num_vfs; i++) {
- UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPRC(i), \
- adapter->vfinfo[i].last_vfstats.gprc, \
- adapter->vfinfo[i].vfstats.gprc);
- UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPTC(i), \
- adapter->vfinfo[i].last_vfstats.gptc, \
- adapter->vfinfo[i].vfstats.gptc);
- UPDATE_VF_COUNTER_36bit(IXGBE_PVFGORC_LSB(i), \
- IXGBE_PVFGORC_MSB(i), \
- adapter->vfinfo[i].last_vfstats.gorc, \
- adapter->vfinfo[i].vfstats.gorc);
- UPDATE_VF_COUNTER_36bit(IXGBE_PVFGOTC_LSB(i), \
- IXGBE_PVFGOTC_MSB(i), \
- adapter->vfinfo[i].last_vfstats.gotc, \
- adapter->vfinfo[i].vfstats.gotc);
- UPDATE_VF_COUNTER_32bit(IXGBE_PVFMPRC(i), \
- adapter->vfinfo[i].last_vfstats.mprc, \
- adapter->vfinfo[i].vfstats.mprc);
- }
- }
-}
-
-
-#ifdef NO_VNIC
-
-/**
- * ixgbe_watchdog_update_link - update the link status
- * @adapter - pointer to the device adapter structure
- * @link_speed - pointer to a u32 to store the link_speed
- **/
-static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 link_speed = adapter->link_speed;
- bool link_up = adapter->link_up;
- bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
-
- if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
- return;
-
- if (hw->mac.ops.check_link) {
- hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
- } else {
- /* always assume link is up, if no check link function */
- link_speed = IXGBE_LINK_SPEED_10GB_FULL;
- link_up = true;
- }
-
-#ifdef HAVE_DCBNL_IEEE
- if (adapter->ixgbe_ieee_pfc)
- pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
-
-#endif
- if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) {
- hw->mac.ops.fc_enable(hw);
- //ixgbe_set_rx_drop_en(adapter);
- }
-
- if (link_up ||
- time_after(jiffies, (adapter->link_check_timeout +
- IXGBE_TRY_LINK_TIMEOUT))) {
- adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
- IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
- IXGBE_WRITE_FLUSH(hw);
- }
-
- adapter->link_up = link_up;
- adapter->link_speed = link_speed;
-}
-#endif
-
-
-
-#ifdef NO_VNIC
-
-/**
- * ixgbe_service_task - manages and runs subtasks
- * @work: pointer to work_struct containing our data
- **/
-static void ixgbe_service_task(struct work_struct *work)
-{
- //struct ixgbe_adapter *adapter = container_of(work,
- // struct ixgbe_adapter,
- // service_task);
-
- //ixgbe_reset_subtask(adapter);
- //ixgbe_sfp_detection_subtask(adapter);
- //ixgbe_sfp_link_config_subtask(adapter);
- //ixgbe_check_overtemp_subtask(adapter);
- //ixgbe_watchdog_subtask(adapter);
-#ifdef HAVE_TX_MQ
- //ixgbe_fdir_reinit_subtask(adapter);
-#endif
- //ixgbe_check_hang_subtask(adapter);
-
- //ixgbe_service_event_complete(adapter);
-}
-
-
-
-
-#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
- IXGBE_TXD_CMD_RS)
-
-
-/**
- * ixgbe_set_mac - Change the Ethernet Address of the NIC
- * @netdev: network interface device structure
- * @p: pointer to an address structure
- *
- * Returns 0 on success, negative on failure
- **/
-static int ixgbe_set_mac(struct net_device *netdev, void *p)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
- struct sockaddr *addr = p;
- int ret;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- ixgbe_del_mac_filter(adapter, hw->mac.addr,
- adapter->num_vfs);
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
-
-
- /* set the correct pool for the new PF MAC address in entry 0 */
- ret = ixgbe_add_mac_filter(adapter, hw->mac.addr,
- adapter->num_vfs);
- return (ret > 0 ? 0 : ret);
-}
-
-
-/**
- * ixgbe_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- **/
-static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
-#ifdef ETHTOOL_OPS_COMPAT
- case SIOCETHTOOL:
- return ethtool_ioctl(ifr);
-#endif
- default:
- return -EOPNOTSUPP;
- }
-}
-#endif /* NO_VNIC */
-
-
-void ixgbe_do_reset(struct net_device *netdev)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
- if (netif_running(netdev))
- ixgbe_reinit_locked(adapter);
- else
- ixgbe_reset(adapter);
-}
-
-
-
-
-
-
-/**
- * ixgbe_probe - Device Initialization Routine
- * @pdev: PCI device information struct
- * @ent: entry in ixgbe_pci_tbl
- *
- * Returns 0 on success, negative on failure
- *
- * ixgbe_probe initializes an adapter identified by a pci_dev structure.
- * The OS initialization, configuring of the adapter private structure,
- * and a hardware reset occur.
- **/
-//static
-int ixgbe_kni_probe(struct pci_dev *pdev,
- struct net_device **lad_dev)
-{
- size_t count;
- struct net_device *netdev;
- struct ixgbe_adapter *adapter = NULL;
- struct ixgbe_hw *hw = NULL;
- static int cards_found;
- int i, err;
- u16 offset;
- u16 eeprom_verh, eeprom_verl, eeprom_cfg_blkh, eeprom_cfg_blkl;
- u32 etrack_id;
- u16 build, major, patch;
- char *info_string, *i_s_var;
- u8 part_str[IXGBE_PBANUM_LENGTH];
- enum ixgbe_mac_type mac_type = ixgbe_mac_unknown;
-#ifdef HAVE_TX_MQ
- unsigned int indices = num_possible_cpus();
-#endif /* HAVE_TX_MQ */
-#ifdef IXGBE_FCOE
- u16 device_caps;
-#endif
- u16 wol_cap;
-
- err = pci_enable_device_mem(pdev);
- if (err)
- return err;
-
-
-#ifdef NO_VNIC
- err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
- IORESOURCE_MEM), ixgbe_driver_name);
- if (err) {
- dev_err(pci_dev_to_dev(pdev),
- "pci_request_selected_regions failed 0x%x\n", err);
- goto err_pci_reg;
- }
-#endif
-
- /*
- * The mac_type is needed before we have the adapter is set up
- * so rather than maintain two devID -> MAC tables we dummy up
- * an ixgbe_hw stuct and use ixgbe_set_mac_type.
- */
- hw = vmalloc(sizeof(struct ixgbe_hw));
- if (!hw) {
- pr_info("Unable to allocate memory for early mac "
- "check\n");
- } else {
- hw->vendor_id = pdev->vendor;
- hw->device_id = pdev->device;
- ixgbe_set_mac_type(hw);
- mac_type = hw->mac.type;
- vfree(hw);
- }
-
-#ifdef NO_VNIC
- /*
- * Workaround of Silicon errata on 82598. Disable LOs in the PCI switch
- * port to which the 82598 is connected to prevent duplicate
- * completions caused by LOs. We need the mac type so that we only
- * do this on 82598 devices, ixgbe_set_mac_type does this for us if
- * we set it's device ID.
- */
- if (mac_type == ixgbe_mac_82598EB)
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
-
- pci_enable_pcie_error_reporting(pdev);
-
- pci_set_master(pdev);
-#endif
-
-#ifdef HAVE_TX_MQ
-#ifdef CONFIG_DCB
-#ifdef HAVE_MQPRIO
- indices *= IXGBE_DCB_MAX_TRAFFIC_CLASS;
-#else
- indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES);
-#endif /* HAVE_MQPRIO */
-#endif /* CONFIG_DCB */
-
- if (mac_type == ixgbe_mac_82598EB)
- indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
- else
- indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
-
-#ifdef IXGBE_FCOE
- indices += min_t(unsigned int, num_possible_cpus(),
- IXGBE_MAX_FCOE_INDICES);
-#endif
- netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
-#else /* HAVE_TX_MQ */
- netdev = alloc_etherdev(sizeof(struct ixgbe_adapter));
-#endif /* HAVE_TX_MQ */
- if (!netdev) {
- err = -ENOMEM;
- goto err_alloc_etherdev;
- }
-
- SET_NETDEV_DEV(netdev, &pdev->dev);
-
- adapter = netdev_priv(netdev);
- //pci_set_drvdata(pdev, adapter);
-
- adapter->netdev = netdev;
- adapter->pdev = pdev;
- hw = &adapter->hw;
- hw->back = adapter;
- adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
-
-#ifdef HAVE_PCI_ERS
- /*
- * call save state here in standalone driver because it relies on
- * adapter struct to exist, and needs to call netdev_priv
- */
- pci_save_state(pdev);
-
-#endif
- hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
- if (!hw->hw_addr) {
- err = -EIO;
- goto err_ioremap;
- }
- //ixgbe_assign_netdev_ops(netdev);
- ixgbe_set_ethtool_ops(netdev);
-
- strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
-
- adapter->bd_number = cards_found;
-
- /* setup the private structure */
- err = ixgbe_sw_init(adapter);
- if (err)
- goto err_sw_init;
-
- /* Make it possible the adapter to be woken up via WOL */
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
- break;
- default:
- break;
- }
-
- /*
- * check_options must be called before setup_link to set up
- * hw->fc completely
- */
- //ixgbe_check_options(adapter);
-
-#ifndef NO_VNIC
- /* reset_hw fills in the perm_addr as well */
- hw->phy.reset_if_overtemp = true;
- err = hw->mac.ops.reset_hw(hw);
- hw->phy.reset_if_overtemp = false;
- if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
- hw->mac.type == ixgbe_mac_82598EB) {
- err = 0;
- } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- e_dev_err("failed to load because an unsupported SFP+ "
- "module type was detected.\n");
- e_dev_err("Reload the driver after installing a supported "
- "module.\n");
- goto err_sw_init;
- } else if (err) {
- e_dev_err("HW Init failed: %d\n", err);
- goto err_sw_init;
- }
-#endif
-
- //if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
- // ixgbe_probe_vf(adapter);
-
-
-#ifdef MAX_SKB_FRAGS
- netdev->features |= NETIF_F_SG |
- NETIF_F_IP_CSUM;
-
-#ifdef NETIF_F_IPV6_CSUM
- netdev->features |= NETIF_F_IPV6_CSUM;
-#endif
-
-#ifdef NETIF_F_HW_VLAN_TX
- netdev->features |= NETIF_F_HW_VLAN_TX |
- NETIF_F_HW_VLAN_RX;
-#endif
-#ifdef NETIF_F_TSO
- netdev->features |= NETIF_F_TSO;
-#endif /* NETIF_F_TSO */
-#ifdef NETIF_F_TSO6
- netdev->features |= NETIF_F_TSO6;
-#endif /* NETIF_F_TSO6 */
-#ifdef NETIF_F_RXHASH
- netdev->features |= NETIF_F_RXHASH;
-#endif /* NETIF_F_RXHASH */
-
-#ifdef HAVE_NDO_SET_FEATURES
- netdev->features |= NETIF_F_RXCSUM;
-
- /* copy netdev features into list of user selectable features */
- netdev->hw_features |= netdev->features;
-
- /* give us the option of enabling RSC/LRO later */
-#ifdef IXGBE_NO_LRO
- if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
-#endif
- netdev->hw_features |= NETIF_F_LRO;
-
-#else
-#ifdef NETIF_F_GRO
-
- /* this is only needed on kernels prior to 2.6.39 */
- netdev->features |= NETIF_F_GRO;
-#endif /* NETIF_F_GRO */
-#endif
-
-#ifdef NETIF_F_HW_VLAN_TX
- /* set this bit last since it cannot be part of hw_features */
- netdev->features |= NETIF_F_HW_VLAN_FILTER;
-#endif
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- netdev->features |= NETIF_F_SCTP_CSUM;
-#ifdef HAVE_NDO_SET_FEATURES
- netdev->hw_features |= NETIF_F_SCTP_CSUM |
- NETIF_F_NTUPLE;
-#endif
- break;
- default:
- break;
- }
-
-#ifdef HAVE_NETDEV_VLAN_FEATURES
- netdev->vlan_features |= NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_TSO |
- NETIF_F_TSO6;
-
-#endif /* HAVE_NETDEV_VLAN_FEATURES */
- /*
- * If perfect filters were enabled in check_options(), enable them
- * on the netdevice too.
- */
- if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
- netdev->features |= NETIF_F_NTUPLE;
- if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
- adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
- adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
- if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
- adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
- /* clear n-tuple support in the netdev unconditionally */
- netdev->features &= ~NETIF_F_NTUPLE;
- }
-
-#ifdef NETIF_F_RXHASH
- if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
- netdev->features &= ~NETIF_F_RXHASH;
-
-#endif /* NETIF_F_RXHASH */
- if (netdev->features & NETIF_F_LRO) {
- if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
- ((adapter->rx_itr_setting == 1) ||
- (adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR))) {
- adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
- } else if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) {
-#ifdef IXGBE_NO_LRO
- e_info(probe, "InterruptThrottleRate set too high, "
- "disabling RSC\n");
-#else
- e_info(probe, "InterruptThrottleRate set too high, "
- "falling back to software LRO\n");
-#endif
- }
- }
-#ifdef CONFIG_DCB
- //netdev->dcbnl_ops = &dcbnl_ops;
-#endif
-
-#ifdef IXGBE_FCOE
-#ifdef NETIF_F_FSO
- if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
- ixgbe_get_device_caps(hw, &device_caps);
- if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) {
- adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
- adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
- e_info(probe, "FCoE offload feature is not available. "
- "Disabling FCoE offload feature\n");
- }
-#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE
- else {
- adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
- adapter->ring_feature[RING_F_FCOE].indices =
- IXGBE_FCRETA_SIZE;
- netdev->features |= NETIF_F_FSO |
- NETIF_F_FCOE_CRC |
- NETIF_F_FCOE_MTU;
- netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
- }
-#endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */
-#ifdef HAVE_NETDEV_VLAN_FEATURES
- netdev->vlan_features |= NETIF_F_FSO |
- NETIF_F_FCOE_CRC |
- NETIF_F_FCOE_MTU;
-#endif /* HAVE_NETDEV_VLAN_FEATURES */
- }
-#endif /* NETIF_F_FSO */
-#endif /* IXGBE_FCOE */
-
-#endif /* MAX_SKB_FRAGS */
- /* make sure the EEPROM is good */
- if (hw->eeprom.ops.validate_checksum &&
- (hw->eeprom.ops.validate_checksum(hw, NULL) < 0)) {
- e_dev_err("The EEPROM Checksum Is Not Valid\n");
- err = -EIO;
- goto err_sw_init;
- }
-
- memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
-#ifdef ETHTOOL_GPERMADDR
- memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
-
- if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
- e_dev_err("invalid MAC address\n");
- err = -EIO;
- goto err_sw_init;
- }
-#else
- if (ixgbe_validate_mac_addr(netdev->dev_addr)) {
- e_dev_err("invalid MAC address\n");
- err = -EIO;
- goto err_sw_init;
- }
-#endif
- memcpy(&adapter->mac_table[0].addr, hw->mac.perm_addr,
- netdev->addr_len);
- adapter->mac_table[0].queue = adapter->num_vfs;
- adapter->mac_table[0].state = (IXGBE_MAC_STATE_DEFAULT |
- IXGBE_MAC_STATE_IN_USE);
- hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr,
- adapter->mac_table[0].queue,
- IXGBE_RAH_AV);
-
- //setup_timer(&adapter->service_timer, &ixgbe_service_timer,
- // (unsigned long) adapter);
-
- //INIT_WORK(&adapter->service_task, ixgbe_service_task);
- //clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
-
- //err = ixgbe_init_interrupt_scheme(adapter);
- //if (err)
- // goto err_sw_init;
-
- //adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
- ixgbe_set_num_queues(adapter);
-
- adapter->wol = 0;
- /* WOL not supported for all but the following */
- switch (pdev->device) {
- case IXGBE_DEV_ID_82599_SFP:
- /* Only these subdevice supports WOL */
- switch (pdev->subsystem_device) {
- case IXGBE_SUBDEV_ID_82599_560FLR:
- /* only support first port */
- if (hw->bus.func != 0)
- break;
- case IXGBE_SUBDEV_ID_82599_SFP:
- adapter->wol = IXGBE_WUFC_MAG;
- break;
- }
- break;
- case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
- /* All except this subdevice support WOL */
- if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
- adapter->wol = IXGBE_WUFC_MAG;
- break;
- case IXGBE_DEV_ID_82599_KX4:
- adapter->wol = IXGBE_WUFC_MAG;
- break;
- case IXGBE_DEV_ID_X540T:
- /* Check eeprom to see if it is enabled */
- ixgbe_read_eeprom(hw, 0x2c, &adapter->eeprom_cap);
- wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
-
- if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
- ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
- (hw->bus.func == 0)))
- adapter->wol = IXGBE_WUFC_MAG;
- break;
- }
- //device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
-
-
- /*
- * Save off EEPROM version number and Option Rom version which
- * together make a unique identify for the eeprom
- */
- ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh);
- ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl);
-
- etrack_id = (eeprom_verh << 16) | eeprom_verl;
-
- ixgbe_read_eeprom(hw, 0x17, &offset);
-
- /* Make sure offset to SCSI block is valid */
- if (!(offset == 0x0) && !(offset == 0xffff)) {
- ixgbe_read_eeprom(hw, offset + 0x84, &eeprom_cfg_blkh);
- ixgbe_read_eeprom(hw, offset + 0x83, &eeprom_cfg_blkl);
-
- /* Only display Option Rom if exist */
- if (eeprom_cfg_blkl && eeprom_cfg_blkh) {
- major = eeprom_cfg_blkl >> 8;
- build = (eeprom_cfg_blkl << 8) | (eeprom_cfg_blkh >> 8);
- patch = eeprom_cfg_blkh & 0x00ff;
-
- snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
- "0x%08x, %d.%d.%d", etrack_id, major, build,
- patch);
- } else {
- snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
- "0x%08x", etrack_id);
- }
- } else {
- snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
- "0x%08x", etrack_id);
- }
-
- /* reset the hardware with the new settings */
- err = hw->mac.ops.start_hw(hw);
- if (err == IXGBE_ERR_EEPROM_VERSION) {
- /* We are running on a pre-production device, log a warning */
- e_dev_warn("This device is a pre-production adapter/LOM. "
- "Please be aware there may be issues associated "
- "with your hardware. If you are experiencing "
- "problems please contact your Intel or hardware "
- "representative who provided you with this "
- "hardware.\n");
- }
- /* pick up the PCI bus settings for reporting later */
- if (hw->mac.ops.get_bus_info)
- hw->mac.ops.get_bus_info(hw);
-
- strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
- *lad_dev = netdev;
-
- adapter->netdev_registered = true;
-#ifdef NO_VNIC
- /* power down the optics */
- if ((hw->phy.multispeed_fiber) ||
- ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
- (hw->mac.type == ixgbe_mac_82599EB)))
- ixgbe_disable_tx_laser(hw);
-
- /* carrier off reporting is important to ethtool even BEFORE open */
- netif_carrier_off(netdev);
- /* keep stopping all the transmit queues for older kernels */
- netif_tx_stop_all_queues(netdev);
-#endif
-
- /* print all messages at the end so that we use our eth%d name */
- /* print bus type/speed/width info */
- e_dev_info("(PCI Express:%s:%s) ",
- (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" :
- hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" :
- "Unknown"),
- (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
- hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
- hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
- "Unknown"));
-
- /* print the MAC address */
- for (i = 0; i < 6; i++)
- pr_cont("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
-
- /* First try to read PBA as a string */
- err = ixgbe_read_pba_string(hw, part_str, IXGBE_PBANUM_LENGTH);
- if (err)
- strlcpy(part_str, "Unknown", sizeof(part_str));
- if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
- e_info(probe, "MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
- hw->mac.type, hw->phy.type, hw->phy.sfp_type, part_str);
- else
- e_info(probe, "MAC: %d, PHY: %d, PBA No: %s\n",
- hw->mac.type, hw->phy.type, part_str);
-
- if (((hw->bus.speed == ixgbe_bus_speed_2500) &&
- (hw->bus.width <= ixgbe_bus_width_pcie_x4)) ||
- (hw->bus.width <= ixgbe_bus_width_pcie_x2)) {
- e_dev_warn("PCI-Express bandwidth available for this "
- "card is not sufficient for optimal "
- "performance.\n");
- e_dev_warn("For optimal performance a x8 PCI-Express "
- "slot is required.\n");
- }
-
-#define INFO_STRING_LEN 255
- info_string = kzalloc(INFO_STRING_LEN, GFP_KERNEL);
- if (!info_string) {
- e_err(probe, "allocation for info string failed\n");
- goto no_info_string;
- }
- count = 0;
- i_s_var = info_string;
- count += snprintf(i_s_var, INFO_STRING_LEN, "Enabled Features: ");
-
- i_s_var = info_string + count;
- count += snprintf(i_s_var, (INFO_STRING_LEN - count),
- "RxQ: %d TxQ: %d ", adapter->num_rx_queues,
- adapter->num_tx_queues);
- i_s_var = info_string + count;
-#ifdef IXGBE_FCOE
- if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
- count += snprintf(i_s_var, INFO_STRING_LEN - count, "FCoE ");
- i_s_var = info_string + count;
- }
-#endif
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
- count += snprintf(i_s_var, INFO_STRING_LEN - count,
- "FdirHash ");
- i_s_var = info_string + count;
- }
- if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
- count += snprintf(i_s_var, INFO_STRING_LEN - count,
- "FdirPerfect ");
- i_s_var = info_string + count;
- }
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- count += snprintf(i_s_var, INFO_STRING_LEN - count, "DCB ");
- i_s_var = info_string + count;
- }
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- count += snprintf(i_s_var, INFO_STRING_LEN - count, "RSS ");
- i_s_var = info_string + count;
- }
- if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
- count += snprintf(i_s_var, INFO_STRING_LEN - count, "DCA ");
- i_s_var = info_string + count;
- }
- if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
- count += snprintf(i_s_var, INFO_STRING_LEN - count, "RSC ");
- i_s_var = info_string + count;
- }
-#ifndef IXGBE_NO_LRO
- else if (netdev->features & NETIF_F_LRO) {
- count += snprintf(i_s_var, INFO_STRING_LEN - count, "LRO ");
- i_s_var = info_string + count;
- }
-#endif
-
- BUG_ON(i_s_var > (info_string + INFO_STRING_LEN));
- /* end features printing */
- e_info(probe, "%s\n", info_string);
- kfree(info_string);
-no_info_string:
-
- /* firmware requires blank driver version */
- ixgbe_set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF);
-
-#if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN)
- /* add san mac addr to netdev */
- //ixgbe_add_sanmac_netdev(netdev);
-
-#endif /* (HAVE_NETDEV_STORAGE_ADDRESS) && (NETDEV_HW_ADDR_T_SAN) */
- e_info(probe, "Intel(R) 10 Gigabit Network Connection\n");
- cards_found++;
-
-#ifdef IXGBE_SYSFS
- //if (ixgbe_sysfs_init(adapter))
- // e_err(probe, "failed to allocate sysfs resources\n");
-#else
-#ifdef IXGBE_PROCFS
- //if (ixgbe_procfs_init(adapter))
- // e_err(probe, "failed to allocate procfs resources\n");
-#endif /* IXGBE_PROCFS */
-#endif /* IXGBE_SYSFS */
-
- return 0;
-
-//err_register:
- //ixgbe_clear_interrupt_scheme(adapter);
- //ixgbe_release_hw_control(adapter);
-err_sw_init:
- adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
- if (adapter->mac_table)
- kfree(adapter->mac_table);
- iounmap(hw->hw_addr);
-err_ioremap:
- free_netdev(netdev);
-err_alloc_etherdev:
- //pci_release_selected_regions(pdev,
- // pci_select_bars(pdev, IORESOURCE_MEM));
-//err_pci_reg:
-//err_dma:
- pci_disable_device(pdev);
- return err;
-}
-
-/**
- * ixgbe_remove - Device Removal Routine
- * @pdev: PCI device information struct
- *
- * ixgbe_remove is called by the PCI subsystem to alert the driver
- * that it should release a PCI device. The could be caused by a
- * Hot-Plug event, or because the driver is going to be removed from
- * memory.
- **/
-void ixgbe_kni_remove(struct pci_dev *pdev)
-{
- pci_disable_device(pdev);
-}
-
-
-u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
-{
- u16 value;
- struct ixgbe_adapter *adapter = hw->back;
-
- pci_read_config_word(adapter->pdev, reg, &value);
- return value;
-}
-
-void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
-{
- struct ixgbe_adapter *adapter = hw->back;
-
- pci_write_config_word(adapter->pdev, reg, value);
-}
-
-void ewarn(struct ixgbe_hw *hw, const char *st, u32 status)
-{
- struct ixgbe_adapter *adapter = hw->back;
-
- netif_warn(adapter, drv, adapter->netdev, "%s", st);
-}
-
-
-
-
-
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_mbx.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_mbx.h
deleted file mode 100755
index 124f00de..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_mbx.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#ifndef _IXGBE_MBX_H_
-#define _IXGBE_MBX_H_
-
-#include "ixgbe_type.h"
-
-#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
-#define IXGBE_ERR_MBX -100
-
-#define IXGBE_VFMAILBOX 0x002FC
-#define IXGBE_VFMBMEM 0x00200
-
-/* Define mailbox register bits */
-#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
-#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */
-#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
-#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
-#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
-#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
-#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */
-#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
-#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
-
-#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
-#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
-#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
-#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
-#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
-
-#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
-#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
-#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
-#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
-
-
-/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
- * PF. The reverse is true if it is IXGBE_PF_*.
- * Message ACK's are the value or'd with 0xF0000000
- */
-#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
- * this are the ACK */
-#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
- * this are the NACK */
-#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
- * clear to send requests */
-#define IXGBE_VT_MSGINFO_SHIFT 16
-/* bits 23:16 are used for extra info for certain messages */
-#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
-
-#define IXGBE_VF_RESET 0x01 /* VF requests reset */
-#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
-#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
-#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
-#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
-#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
-
-/* length of permanent address message returned from PF */
-#define IXGBE_VF_PERMADDR_MSG_LEN 4
-/* word in permanent address message with the current multicast type */
-#define IXGBE_VF_MC_TYPE_WORD 3
-
-#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
-
-
-#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
-#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
-
-s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
-s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
-s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
-s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
-s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
-s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
-s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
-void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw);
-void ixgbe_init_mbx_params_vf(struct ixgbe_hw *);
-void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
-
-#endif /* _IXGBE_MBX_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_osdep.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_osdep.h
deleted file mode 100755
index d161600b..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_osdep.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-
-/* glue for the OS independent part of ixgbe
- * includes register access macros
- */
-
-#ifndef _IXGBE_OSDEP_H_
-#define _IXGBE_OSDEP_H_
-
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/if_ether.h>
-#include <linux/sched.h>
-#include "kcompat.h"
-
-
-#ifndef msleep
-#define msleep(x) do { if (in_interrupt()) { \
- /* Don't mdelay in interrupt context! */ \
- BUG(); \
- } else { \
- msleep(x); \
- } } while (0)
-
-#endif
-
-#undef ASSERT
-
-#ifdef DBG
-#define hw_dbg(hw, S, A...) printk(KERN_DEBUG S, ## A)
-#else
-#define hw_dbg(hw, S, A...) do {} while (0)
-#endif
-
-#define e_dev_info(format, arg...) \
- dev_info(pci_dev_to_dev(adapter->pdev), format, ## arg)
-#define e_dev_warn(format, arg...) \
- dev_warn(pci_dev_to_dev(adapter->pdev), format, ## arg)
-#define e_dev_err(format, arg...) \
- dev_err(pci_dev_to_dev(adapter->pdev), format, ## arg)
-#define e_dev_notice(format, arg...) \
- dev_notice(pci_dev_to_dev(adapter->pdev), format, ## arg)
-#define e_info(msglvl, format, arg...) \
- netif_info(adapter, msglvl, adapter->netdev, format, ## arg)
-#define e_err(msglvl, format, arg...) \
- netif_err(adapter, msglvl, adapter->netdev, format, ## arg)
-#define e_warn(msglvl, format, arg...) \
- netif_warn(adapter, msglvl, adapter->netdev, format, ## arg)
-#define e_crit(msglvl, format, arg...) \
- netif_crit(adapter, msglvl, adapter->netdev, format, ## arg)
-
-
-#ifdef DBG
-#define IXGBE_WRITE_REG(a, reg, value) do {\
- switch (reg) { \
- case IXGBE_EIMS: \
- case IXGBE_EIMC: \
- case IXGBE_EIAM: \
- case IXGBE_EIAC: \
- case IXGBE_EICR: \
- case IXGBE_EICS: \
- printk("%s: Reg - 0x%05X, value - 0x%08X\n", __func__, \
- reg, (u32)(value)); \
- default: \
- break; \
- } \
- writel((value), ((a)->hw_addr + (reg))); \
-} while (0)
-#else
-#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
-#endif
-
-#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
-
-#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) ( \
- writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
-
-#define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \
- readl((a)->hw_addr + (reg) + ((offset) << 2)))
-
-#ifndef writeq
-#define writeq(val, addr) do { writel((u32) (val), addr); \
- writel((u32) (val >> 32), (addr + 4)); \
- } while (0);
-#endif
-
-#define IXGBE_WRITE_REG64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
-
-#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
-struct ixgbe_hw;
-extern u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg);
-extern void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value);
-extern void ewarn(struct ixgbe_hw *hw, const char *str, u32 status);
-
-#define IXGBE_READ_PCIE_WORD ixgbe_read_pci_cfg_word
-#define IXGBE_WRITE_PCIE_WORD ixgbe_write_pci_cfg_word
-#define IXGBE_EEPROM_GRANT_ATTEMPS 100
-#define IXGBE_HTONL(_i) htonl(_i)
-#define IXGBE_NTOHL(_i) ntohl(_i)
-#define IXGBE_NTOHS(_i) ntohs(_i)
-#define IXGBE_CPU_TO_LE32(_i) cpu_to_le32(_i)
-#define IXGBE_LE32_TO_CPUS(_i) le32_to_cpus(_i)
-#define EWARN(H, W, S) ewarn(H, W, S)
-
-#endif /* _IXGBE_OSDEP_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.c
deleted file mode 100755
index e3f5275e..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.c
+++ /dev/null
@@ -1,1847 +0,0 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#include "ixgbe_api.h"
-#include "ixgbe_common.h"
-#include "ixgbe_phy.h"
-
-static void ixgbe_i2c_start(struct ixgbe_hw *hw);
-static void ixgbe_i2c_stop(struct ixgbe_hw *hw);
-static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data);
-static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data);
-static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw);
-static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data);
-static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
-static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
-static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
-static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
-static bool ixgbe_get_i2c_data(u32 *i2cctl);
-
-/**
- * ixgbe_init_phy_ops_generic - Inits PHY function ptrs
- * @hw: pointer to the hardware structure
- *
- * Initialize the function pointers.
- **/
-s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw)
-{
- struct ixgbe_phy_info *phy = &hw->phy;
-
- /* PHY */
- phy->ops.identify = &ixgbe_identify_phy_generic;
- phy->ops.reset = &ixgbe_reset_phy_generic;
- phy->ops.read_reg = &ixgbe_read_phy_reg_generic;
- phy->ops.write_reg = &ixgbe_write_phy_reg_generic;
- phy->ops.setup_link = &ixgbe_setup_phy_link_generic;
- phy->ops.setup_link_speed = &ixgbe_setup_phy_link_speed_generic;
- phy->ops.check_link = NULL;
- phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_generic;
- phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_generic;
- phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_generic;
- phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic;
- phy->ops.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic;
- phy->ops.i2c_bus_clear = &ixgbe_i2c_bus_clear;
- phy->ops.identify_sfp = &ixgbe_identify_module_generic;
- phy->sfp_type = ixgbe_sfp_type_unknown;
- phy->ops.check_overtemp = &ixgbe_tn_check_overtemp;
- return 0;
-}
-
-/**
- * ixgbe_identify_phy_generic - Get physical layer module
- * @hw: pointer to hardware structure
- *
- * Determines the physical layer module found on the current adapter.
- **/
-s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
-{
- s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
- u32 phy_addr;
- u16 ext_ability = 0;
-
- if (hw->phy.type == ixgbe_phy_unknown) {
- for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
- if (ixgbe_validate_phy_addr(hw, phy_addr)) {
- hw->phy.addr = phy_addr;
- ixgbe_get_phy_id(hw);
- hw->phy.type =
- ixgbe_get_phy_type_from_id(hw->phy.id);
-
- if (hw->phy.type == ixgbe_phy_unknown) {
- hw->phy.ops.read_reg(hw,
- IXGBE_MDIO_PHY_EXT_ABILITY,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
- &ext_ability);
- if (ext_ability &
- (IXGBE_MDIO_PHY_10GBASET_ABILITY |
- IXGBE_MDIO_PHY_1000BASET_ABILITY))
- hw->phy.type =
- ixgbe_phy_cu_unknown;
- else
- hw->phy.type =
- ixgbe_phy_generic;
- }
-
- status = 0;
- break;
- }
- }
- /* clear value if nothing found */
- if (status != 0)
- hw->phy.addr = 0;
- } else {
- status = 0;
- }
-
- return status;
-}
-
-/**
- * ixgbe_validate_phy_addr - Determines phy address is valid
- * @hw: pointer to hardware structure
- *
- **/
-bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr)
-{
- u16 phy_id = 0;
- bool valid = false;
-
- hw->phy.addr = phy_addr;
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id);
-
- if (phy_id != 0xFFFF && phy_id != 0x0)
- valid = true;
-
- return valid;
-}
-
-/**
- * ixgbe_get_phy_id - Get the phy type
- * @hw: pointer to hardware structure
- *
- **/
-s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
-{
- u32 status;
- u16 phy_id_high = 0;
- u16 phy_id_low = 0;
-
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
- &phy_id_high);
-
- if (status == 0) {
- hw->phy.id = (u32)(phy_id_high << 16);
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
- &phy_id_low);
- hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
- hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
- }
- return status;
-}
-
-/**
- * ixgbe_get_phy_type_from_id - Get the phy type
- * @hw: pointer to hardware structure
- *
- **/
-enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
-{
- enum ixgbe_phy_type phy_type;
-
- switch (phy_id) {
- case TN1010_PHY_ID:
- phy_type = ixgbe_phy_tn;
- break;
- case X540_PHY_ID:
- phy_type = ixgbe_phy_aq;
- break;
- case QT2022_PHY_ID:
- phy_type = ixgbe_phy_qt;
- break;
- case ATH_PHY_ID:
- phy_type = ixgbe_phy_nl;
- break;
- default:
- phy_type = ixgbe_phy_unknown;
- break;
- }
-
- hw_dbg(hw, "phy type found is %d\n", phy_type);
- return phy_type;
-}
-
-/**
- * ixgbe_reset_phy_generic - Performs a PHY reset
- * @hw: pointer to hardware structure
- **/
-s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
-{
- u32 i;
- u16 ctrl = 0;
- s32 status = 0;
-
- if (hw->phy.type == ixgbe_phy_unknown)
- status = ixgbe_identify_phy_generic(hw);
-
- if (status != 0 || hw->phy.type == ixgbe_phy_none)
- goto out;
-
- /* Don't reset PHY if it's shut down due to overtemp. */
- if (!hw->phy.reset_if_overtemp &&
- (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
- goto out;
-
- /*
- * Perform soft PHY reset to the PHY_XS.
- * This will cause a soft reset to the PHY
- */
- hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
- IXGBE_MDIO_PHY_XS_DEV_TYPE,
- IXGBE_MDIO_PHY_XS_RESET);
-
- /*
- * Poll for reset bit to self-clear indicating reset is complete.
- * Some PHYs could take up to 3 seconds to complete and need about
- * 1.7 usec delay after the reset is complete.
- */
- for (i = 0; i < 30; i++) {
- msleep(100);
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
- IXGBE_MDIO_PHY_XS_DEV_TYPE, &ctrl);
- if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET)) {
- udelay(2);
- break;
- }
- }
-
- if (ctrl & IXGBE_MDIO_PHY_XS_RESET) {
- status = IXGBE_ERR_RESET_FAILED;
- hw_dbg(hw, "PHY reset polling failed to complete.\n");
- }
-
-out:
- return status;
-}
-
-/**
- * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register
- * @hw: pointer to hardware structure
- * @reg_addr: 32 bit address of PHY register to read
- * @phy_data: Pointer to read data from PHY register
- **/
-s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 *phy_data)
-{
- u32 command;
- u32 i;
- u32 data;
- s32 status = 0;
- u16 gssr;
-
- if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
- gssr = IXGBE_GSSR_PHY1_SM;
- else
- gssr = IXGBE_GSSR_PHY0_SM;
-
- if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
- status = IXGBE_ERR_SWFW_SYNC;
-
- if (status == 0) {
- /* Setup and write the address cycle command */
- command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
- (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
- (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
-
- IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
-
- /*
- * Check every 10 usec to see if the address cycle completed.
- * The MDI Command bit will clear when the operation is
- * complete
- */
- for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
- udelay(10);
-
- command = IXGBE_READ_REG(hw, IXGBE_MSCA);
-
- if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
- break;
- }
-
- if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
- hw_dbg(hw, "PHY address command did not complete.\n");
- status = IXGBE_ERR_PHY;
- }
-
- if (status == 0) {
- /*
- * Address cycle complete, setup and write the read
- * command
- */
- command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
- (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
- (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
-
- IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
-
- /*
- * Check every 10 usec to see if the address cycle
- * completed. The MDI Command bit will clear when the
- * operation is complete
- */
- for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
- udelay(10);
-
- command = IXGBE_READ_REG(hw, IXGBE_MSCA);
-
- if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
- break;
- }
-
- if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
- hw_dbg(hw, "PHY read command didn't complete\n");
- status = IXGBE_ERR_PHY;
- } else {
- /*
- * Read operation is complete. Get the data
- * from MSRWD
- */
- data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
- data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
- *phy_data = (u16)(data);
- }
- }
-
- hw->mac.ops.release_swfw_sync(hw, gssr);
- }
-
- return status;
-}
-
-/**
- * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register
- * @hw: pointer to hardware structure
- * @reg_addr: 32 bit PHY register to write
- * @device_type: 5 bit device type
- * @phy_data: Data to write to the PHY register
- **/
-s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 phy_data)
-{
- u32 command;
- u32 i;
- s32 status = 0;
- u16 gssr;
-
- if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
- gssr = IXGBE_GSSR_PHY1_SM;
- else
- gssr = IXGBE_GSSR_PHY0_SM;
-
- if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
- status = IXGBE_ERR_SWFW_SYNC;
-
- if (status == 0) {
- /* Put the data in the MDI single read and write data register*/
- IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
-
- /* Setup and write the address cycle command */
- command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
- (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
- (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
-
- IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
-
- /*
- * Check every 10 usec to see if the address cycle completed.
- * The MDI Command bit will clear when the operation is
- * complete
- */
- for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
- udelay(10);
-
- command = IXGBE_READ_REG(hw, IXGBE_MSCA);
-
- if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
- break;
- }
-
- if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
- hw_dbg(hw, "PHY address cmd didn't complete\n");
- status = IXGBE_ERR_PHY;
- }
-
- if (status == 0) {
- /*
- * Address cycle complete, setup and write the write
- * command
- */
- command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
- (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
- (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
-
- IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
-
- /*
- * Check every 10 usec to see if the address cycle
- * completed. The MDI Command bit will clear when the
- * operation is complete
- */
- for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
- udelay(10);
-
- command = IXGBE_READ_REG(hw, IXGBE_MSCA);
-
- if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
- break;
- }
-
- if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
- hw_dbg(hw, "PHY address cmd didn't complete\n");
- status = IXGBE_ERR_PHY;
- }
- }
-
- hw->mac.ops.release_swfw_sync(hw, gssr);
- }
-
- return status;
-}
-
-/**
- * ixgbe_setup_phy_link_generic - Set and restart autoneg
- * @hw: pointer to hardware structure
- *
- * Restart autonegotiation and PHY and waits for completion.
- **/
-s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
-{
- s32 status = 0;
- u32 time_out;
- u32 max_time_out = 10;
- u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
- bool autoneg = false;
- ixgbe_link_speed speed;
-
- ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
-
- if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
- /* Set or unset auto-negotiation 10G advertisement */
- hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
-
- autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE;
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
- autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE;
-
- hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
- }
-
- if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
- /* Set or unset auto-negotiation 1G advertisement */
- hw->phy.ops.read_reg(hw,
- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
-
- autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
- autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
-
- hw->phy.ops.write_reg(hw,
- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
- }
-
- if (speed & IXGBE_LINK_SPEED_100_FULL) {
- /* Set or unset auto-negotiation 100M advertisement */
- hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
-
- autoneg_reg &= ~(IXGBE_MII_100BASE_T_ADVERTISE |
- IXGBE_MII_100BASE_T_ADVERTISE_HALF);
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
- autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE;
-
- hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
- }
-
- /* Restart PHY autonegotiation and wait for completion */
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
-
- autoneg_reg |= IXGBE_MII_RESTART;
-
- hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
-
- /* Wait for autonegotiation to finish */
- for (time_out = 0; time_out < max_time_out; time_out++) {
- udelay(10);
- /* Restart PHY autonegotiation and wait for completion */
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
-
- autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
- if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE)
- break;
- }
-
- if (time_out == max_time_out) {
- status = IXGBE_ERR_LINK_SETUP;
- hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out");
- }
-
- return status;
-}
-
-/**
- * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
- * @hw: pointer to hardware structure
- * @speed: new link speed
- * @autoneg: true if autonegotiation enabled
- **/
-s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg,
- bool autoneg_wait_to_complete)
-{
-
- /*
- * Clear autoneg_advertised and set new values based on input link
- * speed.
- */
- hw->phy.autoneg_advertised = 0;
-
- if (speed & IXGBE_LINK_SPEED_10GB_FULL)
- hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
-
- if (speed & IXGBE_LINK_SPEED_1GB_FULL)
- hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
-
- if (speed & IXGBE_LINK_SPEED_100_FULL)
- hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
-
- /* Setup link based on the new speed settings */
- hw->phy.ops.setup_link(hw);
-
- return 0;
-}
-
-/**
- * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities
- * @hw: pointer to hardware structure
- * @speed: pointer to link speed
- * @autoneg: boolean auto-negotiation value
- *
- * Determines the link capabilities by reading the AUTOC register.
- **/
-s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg)
-{
- s32 status = IXGBE_ERR_LINK_SETUP;
- u16 speed_ability;
-
- *speed = 0;
- *autoneg = true;
-
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
- &speed_ability);
-
- if (status == 0) {
- if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G)
- *speed |= IXGBE_LINK_SPEED_10GB_FULL;
- if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G)
- *speed |= IXGBE_LINK_SPEED_1GB_FULL;
- if (speed_ability & IXGBE_MDIO_PHY_SPEED_100M)
- *speed |= IXGBE_LINK_SPEED_100_FULL;
- }
-
- return status;
-}
-
-/**
- * ixgbe_check_phy_link_tnx - Determine link and speed status
- * @hw: pointer to hardware structure
- *
- * Reads the VS1 register to determine if link is up and the current speed for
- * the PHY.
- **/
-s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
- bool *link_up)
-{
- s32 status = 0;
- u32 time_out;
- u32 max_time_out = 10;
- u16 phy_link = 0;
- u16 phy_speed = 0;
- u16 phy_data = 0;
-
- /* Initialize speed and link to default case */
- *link_up = false;
- *speed = IXGBE_LINK_SPEED_10GB_FULL;
-
- /*
- * Check current speed and link status of the PHY register.
- * This is a vendor specific register and may have to
- * be changed for other copper PHYs.
- */
- for (time_out = 0; time_out < max_time_out; time_out++) {
- udelay(10);
- status = hw->phy.ops.read_reg(hw,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
- &phy_data);
- phy_link = phy_data & IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
- phy_speed = phy_data &
- IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
- if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
- *link_up = true;
- if (phy_speed ==
- IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
- *speed = IXGBE_LINK_SPEED_1GB_FULL;
- break;
- }
- }
-
- return status;
-}
-
-/**
- * ixgbe_setup_phy_link_tnx - Set and restart autoneg
- * @hw: pointer to hardware structure
- *
- * Restart autonegotiation and PHY and waits for completion.
- **/
-s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
-{
- s32 status = 0;
- u32 time_out;
- u32 max_time_out = 10;
- u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
- bool autoneg = false;
- ixgbe_link_speed speed;
-
- ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
-
- if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
- /* Set or unset auto-negotiation 10G advertisement */
- hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
-
- autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE;
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
- autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE;
-
- hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
- }
-
- if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
- /* Set or unset auto-negotiation 1G advertisement */
- hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
-
- autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
- autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
-
- hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
- }
-
- if (speed & IXGBE_LINK_SPEED_100_FULL) {
- /* Set or unset auto-negotiation 100M advertisement */
- hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
-
- autoneg_reg &= ~IXGBE_MII_100BASE_T_ADVERTISE;
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
- autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE;
-
- hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
- }
-
- /* Restart PHY autonegotiation and wait for completion */
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
-
- autoneg_reg |= IXGBE_MII_RESTART;
-
- hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
-
- /* Wait for autonegotiation to finish */
- for (time_out = 0; time_out < max_time_out; time_out++) {
- udelay(10);
- /* Restart PHY autonegotiation and wait for completion */
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
-
- autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
- if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE)
- break;
- }
-
- if (time_out == max_time_out) {
- status = IXGBE_ERR_LINK_SETUP;
- hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out");
- }
-
- return status;
-}
-
-/**
- * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
- * @hw: pointer to hardware structure
- * @firmware_version: pointer to the PHY Firmware Version
- **/
-s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
- u16 *firmware_version)
-{
- s32 status = 0;
-
- status = hw->phy.ops.read_reg(hw, TNX_FW_REV,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
- firmware_version);
-
- return status;
-}
-
-/**
- * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version
- * @hw: pointer to hardware structure
- * @firmware_version: pointer to the PHY Firmware Version
- **/
-s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
- u16 *firmware_version)
-{
- s32 status = 0;
-
- status = hw->phy.ops.read_reg(hw, AQ_FW_REV,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
- firmware_version);
-
- return status;
-}
-
-/**
- * ixgbe_reset_phy_nl - Performs a PHY reset
- * @hw: pointer to hardware structure
- **/
-s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
-{
- u16 phy_offset, control, eword, edata, block_crc;
- bool end_data = false;
- u16 list_offset, data_offset;
- u16 phy_data = 0;
- s32 ret_val = 0;
- u32 i;
-
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
- IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
-
- /* reset the PHY and poll for completion */
- hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
- IXGBE_MDIO_PHY_XS_DEV_TYPE,
- (phy_data | IXGBE_MDIO_PHY_XS_RESET));
-
- for (i = 0; i < 100; i++) {
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
- IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
- if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) == 0)
- break;
- msleep(10);
- }
-
- if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) != 0) {
- hw_dbg(hw, "PHY reset did not complete.\n");
- ret_val = IXGBE_ERR_PHY;
- goto out;
- }
-
- /* Get init offsets */
- ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
- &data_offset);
- if (ret_val != 0)
- goto out;
-
- ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc);
- data_offset++;
- while (!end_data) {
- /*
- * Read control word from PHY init contents offset
- */
- ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
- control = (eword & IXGBE_CONTROL_MASK_NL) >>
- IXGBE_CONTROL_SHIFT_NL;
- edata = eword & IXGBE_DATA_MASK_NL;
- switch (control) {
- case IXGBE_DELAY_NL:
- data_offset++;
- hw_dbg(hw, "DELAY: %d MS\n", edata);
- msleep(edata);
- break;
- case IXGBE_DATA_NL:
- hw_dbg(hw, "DATA:\n");
- data_offset++;
- hw->eeprom.ops.read(hw, data_offset++,
- &phy_offset);
- for (i = 0; i < edata; i++) {
- hw->eeprom.ops.read(hw, data_offset, &eword);
- hw->phy.ops.write_reg(hw, phy_offset,
- IXGBE_TWINAX_DEV, eword);
- hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword,
- phy_offset);
- data_offset++;
- phy_offset++;
- }
- break;
- case IXGBE_CONTROL_NL:
- data_offset++;
- hw_dbg(hw, "CONTROL:\n");
- if (edata == IXGBE_CONTROL_EOL_NL) {
- hw_dbg(hw, "EOL\n");
- end_data = true;
- } else if (edata == IXGBE_CONTROL_SOL_NL) {
- hw_dbg(hw, "SOL\n");
- } else {
- hw_dbg(hw, "Bad control value\n");
- ret_val = IXGBE_ERR_PHY;
- goto out;
- }
- break;
- default:
- hw_dbg(hw, "Bad control type\n");
- ret_val = IXGBE_ERR_PHY;
- goto out;
- }
- }
-
-out:
- return ret_val;
-}
-
-/**
- * ixgbe_identify_module_generic - Identifies module type
- * @hw: pointer to hardware structure
- *
- * Determines HW type and calls appropriate function.
- **/
-s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
-{
- s32 status = IXGBE_ERR_SFP_NOT_PRESENT;
-
- switch (hw->mac.ops.get_media_type(hw)) {
- case ixgbe_media_type_fiber:
- status = ixgbe_identify_sfp_module_generic(hw);
- break;
-
- case ixgbe_media_type_fiber_qsfp:
- status = ixgbe_identify_qsfp_module_generic(hw);
- break;
-
- default:
- hw->phy.sfp_type = ixgbe_sfp_type_not_present;
- status = IXGBE_ERR_SFP_NOT_PRESENT;
- break;
- }
-
- return status;
-}
-
-/**
- * ixgbe_identify_sfp_module_generic - Identifies SFP modules
- * @hw: pointer to hardware structure
- *
- * Searches for and identifies the SFP module and assigns appropriate PHY type.
- **/
-s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
-{
- s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
- u32 vendor_oui = 0;
- enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
- u8 identifier = 0;
- u8 comp_codes_1g = 0;
- u8 comp_codes_10g = 0;
- u8 oui_bytes[3] = {0, 0, 0};
- u8 cable_tech = 0;
- u8 cable_spec = 0;
- u16 enforce_sfp = 0;
-
- if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
- hw->phy.sfp_type = ixgbe_sfp_type_not_present;
- status = IXGBE_ERR_SFP_NOT_PRESENT;
- goto out;
- }
-
- status = hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_IDENTIFIER,
- &identifier);
-
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
- goto err_read_i2c_eeprom;
-
- /* LAN ID is needed for sfp_type determination */
- hw->mac.ops.set_lan_id(hw);
-
- if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
- hw->phy.type = ixgbe_phy_sfp_unsupported;
- status = IXGBE_ERR_SFP_NOT_SUPPORTED;
- } else {
- status = hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_1GBE_COMP_CODES,
- &comp_codes_1g);
-
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
- goto err_read_i2c_eeprom;
-
- status = hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_10GBE_COMP_CODES,
- &comp_codes_10g);
-
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
- goto err_read_i2c_eeprom;
- status = hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_CABLE_TECHNOLOGY,
- &cable_tech);
-
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
- goto err_read_i2c_eeprom;
-
- /* ID Module
- * =========
- * 0 SFP_DA_CU
- * 1 SFP_SR
- * 2 SFP_LR
- * 3 SFP_DA_CORE0 - 82599-specific
- * 4 SFP_DA_CORE1 - 82599-specific
- * 5 SFP_SR/LR_CORE0 - 82599-specific
- * 6 SFP_SR/LR_CORE1 - 82599-specific
- * 7 SFP_act_lmt_DA_CORE0 - 82599-specific
- * 8 SFP_act_lmt_DA_CORE1 - 82599-specific
- * 9 SFP_1g_cu_CORE0 - 82599-specific
- * 10 SFP_1g_cu_CORE1 - 82599-specific
- * 11 SFP_1g_sx_CORE0 - 82599-specific
- * 12 SFP_1g_sx_CORE1 - 82599-specific
- */
- if (hw->mac.type == ixgbe_mac_82598EB) {
- if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
- hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
- else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
- hw->phy.sfp_type = ixgbe_sfp_type_sr;
- else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
- hw->phy.sfp_type = ixgbe_sfp_type_lr;
- else
- hw->phy.sfp_type = ixgbe_sfp_type_unknown;
- } else if (hw->mac.type == ixgbe_mac_82599EB) {
- if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
- if (hw->bus.lan_id == 0)
- hw->phy.sfp_type =
- ixgbe_sfp_type_da_cu_core0;
- else
- hw->phy.sfp_type =
- ixgbe_sfp_type_da_cu_core1;
- } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
- hw->phy.ops.read_i2c_eeprom(
- hw, IXGBE_SFF_CABLE_SPEC_COMP,
- &cable_spec);
- if (cable_spec &
- IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
- if (hw->bus.lan_id == 0)
- hw->phy.sfp_type =
- ixgbe_sfp_type_da_act_lmt_core0;
- else
- hw->phy.sfp_type =
- ixgbe_sfp_type_da_act_lmt_core1;
- } else {
- hw->phy.sfp_type =
- ixgbe_sfp_type_unknown;
- }
- } else if (comp_codes_10g &
- (IXGBE_SFF_10GBASESR_CAPABLE |
- IXGBE_SFF_10GBASELR_CAPABLE)) {
- if (hw->bus.lan_id == 0)
- hw->phy.sfp_type =
- ixgbe_sfp_type_srlr_core0;
- else
- hw->phy.sfp_type =
- ixgbe_sfp_type_srlr_core1;
- } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
- if (hw->bus.lan_id == 0)
- hw->phy.sfp_type =
- ixgbe_sfp_type_1g_cu_core0;
- else
- hw->phy.sfp_type =
- ixgbe_sfp_type_1g_cu_core1;
- } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) {
- if (hw->bus.lan_id == 0)
- hw->phy.sfp_type =
- ixgbe_sfp_type_1g_sx_core0;
- else
- hw->phy.sfp_type =
- ixgbe_sfp_type_1g_sx_core1;
- } else {
- hw->phy.sfp_type = ixgbe_sfp_type_unknown;
- }
- }
-
- if (hw->phy.sfp_type != stored_sfp_type)
- hw->phy.sfp_setup_needed = true;
-
- /* Determine if the SFP+ PHY is dual speed or not. */
- hw->phy.multispeed_fiber = false;
- if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
- (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
- ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
- (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
- hw->phy.multispeed_fiber = true;
-
- /* Determine PHY vendor */
- if (hw->phy.type != ixgbe_phy_nl) {
- hw->phy.id = identifier;
- status = hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_VENDOR_OUI_BYTE0,
- &oui_bytes[0]);
-
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
- goto err_read_i2c_eeprom;
-
- status = hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_VENDOR_OUI_BYTE1,
- &oui_bytes[1]);
-
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
- goto err_read_i2c_eeprom;
-
- status = hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_VENDOR_OUI_BYTE2,
- &oui_bytes[2]);
-
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
- goto err_read_i2c_eeprom;
-
- vendor_oui =
- ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
- (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
- (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
-
- switch (vendor_oui) {
- case IXGBE_SFF_VENDOR_OUI_TYCO:
- if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
- hw->phy.type =
- ixgbe_phy_sfp_passive_tyco;
- break;
- case IXGBE_SFF_VENDOR_OUI_FTL:
- if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
- hw->phy.type = ixgbe_phy_sfp_ftl_active;
- else
- hw->phy.type = ixgbe_phy_sfp_ftl;
- break;
- case IXGBE_SFF_VENDOR_OUI_AVAGO:
- hw->phy.type = ixgbe_phy_sfp_avago;
- break;
- case IXGBE_SFF_VENDOR_OUI_INTEL:
- hw->phy.type = ixgbe_phy_sfp_intel;
- break;
- default:
- if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
- hw->phy.type =
- ixgbe_phy_sfp_passive_unknown;
- else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
- hw->phy.type =
- ixgbe_phy_sfp_active_unknown;
- else
- hw->phy.type = ixgbe_phy_sfp_unknown;
- break;
- }
- }
-
- /* Allow any DA cable vendor */
- if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
- IXGBE_SFF_DA_ACTIVE_CABLE)) {
- status = 0;
- goto out;
- }
-
- /* Verify supported 1G SFP modules */
- if (comp_codes_10g == 0 &&
- !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
- hw->phy.type = ixgbe_phy_sfp_unsupported;
- status = IXGBE_ERR_SFP_NOT_SUPPORTED;
- goto out;
- }
-
- /* Anything else 82598-based is supported */
- if (hw->mac.type == ixgbe_mac_82598EB) {
- status = 0;
- goto out;
- }
-
- ixgbe_get_device_caps(hw, &enforce_sfp);
- if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
- !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) ||
- (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) ||
- (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0) ||
- (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1))) {
- /* Make sure we're a supported PHY type */
- if (hw->phy.type == ixgbe_phy_sfp_intel) {
- status = 0;
- } else {
- if (hw->allow_unsupported_sfp == true) {
- EWARN(hw, "WARNING: Intel (R) Network "
- "Connections are quality tested "
- "using Intel (R) Ethernet Optics."
- " Using untested modules is not "
- "supported and may cause unstable"
- " operation or damage to the "
- "module or the adapter. Intel "
- "Corporation is not responsible "
- "for any harm caused by using "
- "untested modules.\n", status);
- status = 0;
- } else {
- hw_dbg(hw, "SFP+ module not supported\n");
- hw->phy.type =
- ixgbe_phy_sfp_unsupported;
- status = IXGBE_ERR_SFP_NOT_SUPPORTED;
- }
- }
- } else {
- status = 0;
- }
- }
-
-out:
- return status;
-
-err_read_i2c_eeprom:
- hw->phy.sfp_type = ixgbe_sfp_type_not_present;
- if (hw->phy.type != ixgbe_phy_nl) {
- hw->phy.id = 0;
- hw->phy.type = ixgbe_phy_unknown;
- }
- return IXGBE_ERR_SFP_NOT_PRESENT;
-}
-
-/**
- * ixgbe_identify_qsfp_module_generic - Identifies QSFP modules
- * @hw: pointer to hardware structure
- *
- * Searches for and identifies the QSFP module and assigns appropriate PHY type
- **/
-s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
-{
- s32 status = 0;
-
- if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) {
- hw->phy.sfp_type = ixgbe_sfp_type_not_present;
- status = IXGBE_ERR_SFP_NOT_PRESENT;
- }
-
- return status;
-}
-
-
-/**
- * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
- * @hw: pointer to hardware structure
- * @list_offset: offset to the SFP ID list
- * @data_offset: offset to the SFP data block
- *
- * Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if
- * so it returns the offsets to the phy init sequence block.
- **/
-s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
- u16 *list_offset,
- u16 *data_offset)
-{
- u16 sfp_id;
- u16 sfp_type = hw->phy.sfp_type;
-
- if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
-
- if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
- return IXGBE_ERR_SFP_NOT_PRESENT;
-
- if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) &&
- (hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
-
- /*
- * Limiting active cables and 1G Phys must be initialized as
- * SR modules
- */
- if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
- sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
- sfp_type == ixgbe_sfp_type_1g_sx_core0)
- sfp_type = ixgbe_sfp_type_srlr_core0;
- else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
- sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
- sfp_type == ixgbe_sfp_type_1g_sx_core1)
- sfp_type = ixgbe_sfp_type_srlr_core1;
-
- /* Read offset to PHY init contents */
- hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset);
-
- if ((!*list_offset) || (*list_offset == 0xFFFF))
- return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
-
- /* Shift offset to first ID word */
- (*list_offset)++;
-
- /*
- * Find the matching SFP ID in the EEPROM
- * and program the init sequence
- */
- hw->eeprom.ops.read(hw, *list_offset, &sfp_id);
-
- while (sfp_id != IXGBE_PHY_INIT_END_NL) {
- if (sfp_id == sfp_type) {
- (*list_offset)++;
- hw->eeprom.ops.read(hw, *list_offset, data_offset);
- if ((!*data_offset) || (*data_offset == 0xFFFF)) {
- hw_dbg(hw, "SFP+ module not supported\n");
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
- } else {
- break;
- }
- } else {
- (*list_offset) += 2;
- if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
- return IXGBE_ERR_PHY;
- }
- }
-
- if (sfp_id == IXGBE_PHY_INIT_END_NL) {
- hw_dbg(hw, "No matching SFP+ module found\n");
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
- }
-
- return 0;
-}
-
-/**
- * ixgbe_read_i2c_eeprom_generic - Reads 8 bit EEPROM word over I2C interface
- * @hw: pointer to hardware structure
- * @byte_offset: EEPROM byte offset to read
- * @eeprom_data: value read
- *
- * Performs byte read operation to SFP module's EEPROM over I2C interface.
- **/
-s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *eeprom_data)
-{
- return hw->phy.ops.read_i2c_byte(hw, byte_offset,
- IXGBE_I2C_EEPROM_DEV_ADDR,
- eeprom_data);
-}
-
-/**
- * ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface
- * @hw: pointer to hardware structure
- * @byte_offset: EEPROM byte offset to write
- * @eeprom_data: value to write
- *
- * Performs byte write operation to SFP module's EEPROM over I2C interface.
- **/
-s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 eeprom_data)
-{
- return hw->phy.ops.write_i2c_byte(hw, byte_offset,
- IXGBE_I2C_EEPROM_DEV_ADDR,
- eeprom_data);
-}
-
-/**
- * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C
- * @hw: pointer to hardware structure
- * @byte_offset: byte offset to read
- * @data: value read
- *
- * Performs byte read operation to SFP module's EEPROM over I2C interface at
- * a specified device address.
- **/
-s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 *data)
-{
- s32 status = 0;
- u32 max_retry = 10;
- u32 retry = 0;
- u16 swfw_mask = 0;
- bool nack = 1;
- *data = 0;
-
- if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
- swfw_mask = IXGBE_GSSR_PHY1_SM;
- else
- swfw_mask = IXGBE_GSSR_PHY0_SM;
-
- do {
- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)
- != 0) {
- status = IXGBE_ERR_SWFW_SYNC;
- goto read_byte_out;
- }
-
- ixgbe_i2c_start(hw);
-
- /* Device Address and write indication */
- status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
- if (status != 0)
- goto fail;
-
- status = ixgbe_get_i2c_ack(hw);
- if (status != 0)
- goto fail;
-
- status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
- if (status != 0)
- goto fail;
-
- status = ixgbe_get_i2c_ack(hw);
- if (status != 0)
- goto fail;
-
- ixgbe_i2c_start(hw);
-
- /* Device Address and read indication */
- status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1));
- if (status != 0)
- goto fail;
-
- status = ixgbe_get_i2c_ack(hw);
- if (status != 0)
- goto fail;
-
- status = ixgbe_clock_in_i2c_byte(hw, data);
- if (status != 0)
- goto fail;
-
- status = ixgbe_clock_out_i2c_bit(hw, nack);
- if (status != 0)
- goto fail;
-
- ixgbe_i2c_stop(hw);
- break;
-
-fail:
- hw->mac.ops.release_swfw_sync(hw, swfw_mask);
- msleep(100);
- ixgbe_i2c_bus_clear(hw);
- retry++;
- if (retry < max_retry)
- hw_dbg(hw, "I2C byte read error - Retrying.\n");
- else
- hw_dbg(hw, "I2C byte read error.\n");
-
- } while (retry < max_retry);
-
- hw->mac.ops.release_swfw_sync(hw, swfw_mask);
-
-read_byte_out:
- return status;
-}
-
-/**
- * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C
- * @hw: pointer to hardware structure
- * @byte_offset: byte offset to write
- * @data: value to write
- *
- * Performs byte write operation to SFP module's EEPROM over I2C interface at
- * a specified device address.
- **/
-s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 data)
-{
- s32 status = 0;
- u32 max_retry = 1;
- u32 retry = 0;
- u16 swfw_mask = 0;
-
- if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
- swfw_mask = IXGBE_GSSR_PHY1_SM;
- else
- swfw_mask = IXGBE_GSSR_PHY0_SM;
-
- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) {
- status = IXGBE_ERR_SWFW_SYNC;
- goto write_byte_out;
- }
-
- do {
- ixgbe_i2c_start(hw);
-
- status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
- if (status != 0)
- goto fail;
-
- status = ixgbe_get_i2c_ack(hw);
- if (status != 0)
- goto fail;
-
- status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
- if (status != 0)
- goto fail;
-
- status = ixgbe_get_i2c_ack(hw);
- if (status != 0)
- goto fail;
-
- status = ixgbe_clock_out_i2c_byte(hw, data);
- if (status != 0)
- goto fail;
-
- status = ixgbe_get_i2c_ack(hw);
- if (status != 0)
- goto fail;
-
- ixgbe_i2c_stop(hw);
- break;
-
-fail:
- ixgbe_i2c_bus_clear(hw);
- retry++;
- if (retry < max_retry)
- hw_dbg(hw, "I2C byte write error - Retrying.\n");
- else
- hw_dbg(hw, "I2C byte write error.\n");
- } while (retry < max_retry);
-
- hw->mac.ops.release_swfw_sync(hw, swfw_mask);
-
-write_byte_out:
- return status;
-}
-
-/**
- * ixgbe_i2c_start - Sets I2C start condition
- * @hw: pointer to hardware structure
- *
- * Sets I2C start condition (High -> Low on SDA while SCL is High)
- **/
-static void ixgbe_i2c_start(struct ixgbe_hw *hw)
-{
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
-
- /* Start condition must begin with data and clock high */
- ixgbe_set_i2c_data(hw, &i2cctl, 1);
- ixgbe_raise_i2c_clk(hw, &i2cctl);
-
- /* Setup time for start condition (4.7us) */
- udelay(IXGBE_I2C_T_SU_STA);
-
- ixgbe_set_i2c_data(hw, &i2cctl, 0);
-
- /* Hold time for start condition (4us) */
- udelay(IXGBE_I2C_T_HD_STA);
-
- ixgbe_lower_i2c_clk(hw, &i2cctl);
-
- /* Minimum low period of clock is 4.7 us */
- udelay(IXGBE_I2C_T_LOW);
-
-}
-
-/**
- * ixgbe_i2c_stop - Sets I2C stop condition
- * @hw: pointer to hardware structure
- *
- * Sets I2C stop condition (Low -> High on SDA while SCL is High)
- **/
-static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
-{
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
-
- /* Stop condition must begin with data low and clock high */
- ixgbe_set_i2c_data(hw, &i2cctl, 0);
- ixgbe_raise_i2c_clk(hw, &i2cctl);
-
- /* Setup time for stop condition (4us) */
- udelay(IXGBE_I2C_T_SU_STO);
-
- ixgbe_set_i2c_data(hw, &i2cctl, 1);
-
- /* bus free time between stop and start (4.7us)*/
- udelay(IXGBE_I2C_T_BUF);
-}
-
-/**
- * ixgbe_clock_in_i2c_byte - Clocks in one byte via I2C
- * @hw: pointer to hardware structure
- * @data: data byte to clock in
- *
- * Clocks in one byte data via I2C data/clock
- **/
-static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
-{
- s32 i;
- bool bit = 0;
-
- for (i = 7; i >= 0; i--) {
- ixgbe_clock_in_i2c_bit(hw, &bit);
- *data |= bit << i;
- }
-
- return 0;
-}
-
-/**
- * ixgbe_clock_out_i2c_byte - Clocks out one byte via I2C
- * @hw: pointer to hardware structure
- * @data: data byte clocked out
- *
- * Clocks out one byte data via I2C data/clock
- **/
-static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
-{
- s32 status = 0;
- s32 i;
- u32 i2cctl;
- bool bit = 0;
-
- for (i = 7; i >= 0; i--) {
- bit = (data >> i) & 0x1;
- status = ixgbe_clock_out_i2c_bit(hw, bit);
-
- if (status != 0)
- break;
- }
-
- /* Release SDA line (set high) */
- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
- i2cctl |= IXGBE_I2C_DATA_OUT;
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, i2cctl);
- IXGBE_WRITE_FLUSH(hw);
-
- return status;
-}
-
-/**
- * ixgbe_get_i2c_ack - Polls for I2C ACK
- * @hw: pointer to hardware structure
- *
- * Clocks in/out one bit via I2C data/clock
- **/
-static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
-{
- s32 status = 0;
- u32 i = 0;
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
- u32 timeout = 10;
- bool ack = 1;
-
- ixgbe_raise_i2c_clk(hw, &i2cctl);
-
-
- /* Minimum high period of clock is 4us */
- udelay(IXGBE_I2C_T_HIGH);
-
- /* Poll for ACK. Note that ACK in I2C spec is
- * transition from 1 to 0 */
- for (i = 0; i < timeout; i++) {
- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
- ack = ixgbe_get_i2c_data(&i2cctl);
-
- udelay(1);
- if (ack == 0)
- break;
- }
-
- if (ack == 1) {
- hw_dbg(hw, "I2C ack was not received.\n");
- status = IXGBE_ERR_I2C;
- }
-
- ixgbe_lower_i2c_clk(hw, &i2cctl);
-
- /* Minimum low period of clock is 4.7 us */
- udelay(IXGBE_I2C_T_LOW);
-
- return status;
-}
-
-/**
- * ixgbe_clock_in_i2c_bit - Clocks in one bit via I2C data/clock
- * @hw: pointer to hardware structure
- * @data: read data value
- *
- * Clocks in one bit via I2C data/clock
- **/
-static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
-{
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
-
- ixgbe_raise_i2c_clk(hw, &i2cctl);
-
- /* Minimum high period of clock is 4us */
- udelay(IXGBE_I2C_T_HIGH);
-
- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
- *data = ixgbe_get_i2c_data(&i2cctl);
-
- ixgbe_lower_i2c_clk(hw, &i2cctl);
-
- /* Minimum low period of clock is 4.7 us */
- udelay(IXGBE_I2C_T_LOW);
-
- return 0;
-}
-
-/**
- * ixgbe_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock
- * @hw: pointer to hardware structure
- * @data: data value to write
- *
- * Clocks out one bit via I2C data/clock
- **/
-static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
-{
- s32 status;
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
-
- status = ixgbe_set_i2c_data(hw, &i2cctl, data);
- if (status == 0) {
- ixgbe_raise_i2c_clk(hw, &i2cctl);
-
- /* Minimum high period of clock is 4us */
- udelay(IXGBE_I2C_T_HIGH);
-
- ixgbe_lower_i2c_clk(hw, &i2cctl);
-
- /* Minimum low period of clock is 4.7 us.
- * This also takes care of the data hold time.
- */
- udelay(IXGBE_I2C_T_LOW);
- } else {
- status = IXGBE_ERR_I2C;
- hw_dbg(hw, "I2C data was not set to %X\n", data);
- }
-
- return status;
-}
-/**
- * ixgbe_raise_i2c_clk - Raises the I2C SCL clock
- * @hw: pointer to hardware structure
- * @i2cctl: Current value of I2CCTL register
- *
- * Raises the I2C clock line '0'->'1'
- **/
-static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
-{
- u32 i = 0;
- u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT;
- u32 i2cctl_r = 0;
-
- for (i = 0; i < timeout; i++) {
- *i2cctl |= IXGBE_I2C_CLK_OUT;
-
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
- IXGBE_WRITE_FLUSH(hw);
- /* SCL rise time (1000ns) */
- udelay(IXGBE_I2C_T_RISE);
-
- i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
- if (i2cctl_r & IXGBE_I2C_CLK_IN)
- break;
- }
-}
-
-/**
- * ixgbe_lower_i2c_clk - Lowers the I2C SCL clock
- * @hw: pointer to hardware structure
- * @i2cctl: Current value of I2CCTL register
- *
- * Lowers the I2C clock line '1'->'0'
- **/
-static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
-{
-
- *i2cctl &= ~IXGBE_I2C_CLK_OUT;
-
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
- IXGBE_WRITE_FLUSH(hw);
-
- /* SCL fall time (300ns) */
- udelay(IXGBE_I2C_T_FALL);
-}
-
-/**
- * ixgbe_set_i2c_data - Sets the I2C data bit
- * @hw: pointer to hardware structure
- * @i2cctl: Current value of I2CCTL register
- * @data: I2C data value (0 or 1) to set
- *
- * Sets the I2C data bit
- **/
-static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
-{
- s32 status = 0;
-
- if (data)
- *i2cctl |= IXGBE_I2C_DATA_OUT;
- else
- *i2cctl &= ~IXGBE_I2C_DATA_OUT;
-
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
- IXGBE_WRITE_FLUSH(hw);
-
- /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
- udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
-
- /* Verify data was set correctly */
- *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
- if (data != ixgbe_get_i2c_data(i2cctl)) {
- status = IXGBE_ERR_I2C;
- hw_dbg(hw, "Error - I2C data was not set to %X.\n", data);
- }
-
- return status;
-}
-
-/**
- * ixgbe_get_i2c_data - Reads the I2C SDA data bit
- * @hw: pointer to hardware structure
- * @i2cctl: Current value of I2CCTL register
- *
- * Returns the I2C data bit value
- **/
-static bool ixgbe_get_i2c_data(u32 *i2cctl)
-{
- bool data;
-
- if (*i2cctl & IXGBE_I2C_DATA_IN)
- data = 1;
- else
- data = 0;
-
- return data;
-}
-
-/**
- * ixgbe_i2c_bus_clear - Clears the I2C bus
- * @hw: pointer to hardware structure
- *
- * Clears the I2C bus by sending nine clock pulses.
- * Used when data line is stuck low.
- **/
-void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
-{
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
- u32 i;
-
- ixgbe_i2c_start(hw);
-
- ixgbe_set_i2c_data(hw, &i2cctl, 1);
-
- for (i = 0; i < 9; i++) {
- ixgbe_raise_i2c_clk(hw, &i2cctl);
-
- /* Min high period of clock is 4us */
- udelay(IXGBE_I2C_T_HIGH);
-
- ixgbe_lower_i2c_clk(hw, &i2cctl);
-
- /* Min low period of clock is 4.7us*/
- udelay(IXGBE_I2C_T_LOW);
- }
-
- ixgbe_i2c_start(hw);
-
- /* Put the i2c bus back to default state */
- ixgbe_i2c_stop(hw);
-}
-
-/**
- * ixgbe_tn_check_overtemp - Checks if an overtemp occurred.
- * @hw: pointer to hardware structure
- *
- * Checks if the LASI temp alarm status was triggered due to overtemp
- **/
-s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
-{
- s32 status = 0;
- u16 phy_data = 0;
-
- if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM)
- goto out;
-
- /* Check that the LASI temp alarm status was triggered */
- hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_data);
-
- if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
- goto out;
-
- status = IXGBE_ERR_OVERTEMP;
-out:
- return status;
-}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.h
deleted file mode 100755
index bbe5a9e3..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.h
+++ /dev/null
@@ -1,137 +0,0 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#ifndef _IXGBE_PHY_H_
-#define _IXGBE_PHY_H_
-
-#include "ixgbe_type.h"
-#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0
-
-/* EEPROM byte offsets */
-#define IXGBE_SFF_IDENTIFIER 0x0
-#define IXGBE_SFF_IDENTIFIER_SFP 0x3
-#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25
-#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26
-#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27
-#define IXGBE_SFF_1GBE_COMP_CODES 0x6
-#define IXGBE_SFF_10GBE_COMP_CODES 0x3
-#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
-#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
-
-/* Bitmasks */
-#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
-#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8
-#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
-#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
-#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
-#define IXGBE_SFF_1GBASET_CAPABLE 0x8
-#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
-#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
-#define IXGBE_I2C_EEPROM_READ_MASK 0x100
-#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
-#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
-#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
-#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
-#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
-
-/* Flow control defines */
-#define IXGBE_TAF_SYM_PAUSE 0x400
-#define IXGBE_TAF_ASM_PAUSE 0x800
-
-/* Bit-shift macros */
-#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24
-#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16
-#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8
-
-/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
-#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600
-#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500
-#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00
-#define IXGBE_SFF_VENDOR_OUI_INTEL 0x001B2100
-
-/* I2C SDA and SCL timing parameters for standard mode */
-#define IXGBE_I2C_T_HD_STA 4
-#define IXGBE_I2C_T_LOW 5
-#define IXGBE_I2C_T_HIGH 4
-#define IXGBE_I2C_T_SU_STA 5
-#define IXGBE_I2C_T_HD_DATA 5
-#define IXGBE_I2C_T_SU_DATA 1
-#define IXGBE_I2C_T_RISE 1
-#define IXGBE_I2C_T_FALL 1
-#define IXGBE_I2C_T_SU_STO 4
-#define IXGBE_I2C_T_BUF 5
-
-#define IXGBE_TN_LASI_STATUS_REG 0x9005
-#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
-
-s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
-bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
-enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
-s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
-s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
-s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
-s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 *phy_data);
-s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 phy_data);
-s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
-s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg,
- bool autoneg_wait_to_complete);
-s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg);
-
-/* PHY specific */
-s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *link_up);
-s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
-s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
- u16 *firmware_version);
-s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
- u16 *firmware_version);
-
-s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
-s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
-s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
-s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
-s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
- u16 *list_offset,
- u16 *data_offset);
-s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
-s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 *data);
-s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 data);
-s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *eeprom_data);
-s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 eeprom_data);
-void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
-#endif /* _IXGBE_PHY_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_sriov.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_sriov.h
deleted file mode 100755
index b1cc9d04..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_sriov.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-
-#ifndef _IXGBE_SRIOV_H_
-#define _IXGBE_SRIOV_H_
-
-int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
- int entries, u16 *hash_list, u32 vf);
-void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
-int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf);
-void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe);
-void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf);
-void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf);
-void ixgbe_msg_task(struct ixgbe_adapter *adapter);
-int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
- int vf, unsigned char *mac_addr);
-void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
-void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
-#ifdef IFLA_VF_MAX
-int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
-int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
- u8 qos);
-int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
-#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
-int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
-#endif
-int ixgbe_ndo_get_vf_config(struct net_device *netdev,
- int vf, struct ifla_vf_info *ivi);
-#endif
-void ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
-#ifdef CONFIG_PCI_IOV
-int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
-void ixgbe_enable_sriov(struct ixgbe_adapter *adapter);
-#endif
-int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter);
-#ifdef IFLA_VF_MAX
-void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
-#endif /* IFLA_VF_MAX */
-void ixgbe_dump_registers(struct ixgbe_adapter *adapter);
-
-/*
- * These are defined in ixgbe_type.h on behalf of the VF driver
- * but we need them here unwrapped for the PF driver.
- */
-#define IXGBE_DEV_ID_82599_VF 0x10ED
-#define IXGBE_DEV_ID_X540_VF 0x1515
-
-#endif /* _IXGBE_SRIOV_H_ */
-
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_type.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_type.h
deleted file mode 100755
index 6b21c879..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_type.h
+++ /dev/null
@@ -1,3254 +0,0 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#ifndef _IXGBE_TYPE_H_
-#define _IXGBE_TYPE_H_
-
-#include "ixgbe_osdep.h"
-
-
-/* Vendor ID */
-#define IXGBE_INTEL_VENDOR_ID 0x8086
-
-/* Device IDs */
-#define IXGBE_DEV_ID_82598 0x10B6
-#define IXGBE_DEV_ID_82598_BX 0x1508
-#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6
-#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7
-#define IXGBE_DEV_ID_82598AT 0x10C8
-#define IXGBE_DEV_ID_82598AT2 0x150B
-#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB
-#define IXGBE_DEV_ID_82598EB_CX4 0x10DD
-#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC
-#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1
-#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1
-#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
-#define IXGBE_DEV_ID_82599_KX4 0x10F7
-#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514
-#define IXGBE_DEV_ID_82599_KR 0x1517
-#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
-#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
-#define IXGBE_DEV_ID_82599_CX4 0x10F9
-#define IXGBE_DEV_ID_82599_SFP 0x10FB
-#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
-#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
-#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152A
-#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
-#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
-#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
-#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558
-#define IXGBE_DEV_ID_82599EN_SFP 0x1557
-#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
-#define IXGBE_DEV_ID_82599_T3_LOM 0x151C
-#define IXGBE_DEV_ID_82599_LS 0x154F
-#define IXGBE_DEV_ID_X540T 0x1528
-
-/* General Registers */
-#define IXGBE_CTRL 0x00000
-#define IXGBE_STATUS 0x00008
-#define IXGBE_CTRL_EXT 0x00018
-#define IXGBE_ESDP 0x00020
-#define IXGBE_EODSDP 0x00028
-#define IXGBE_I2CCTL 0x00028
-#define IXGBE_PHY_GPIO 0x00028
-#define IXGBE_MAC_GPIO 0x00030
-#define IXGBE_PHYINT_STATUS0 0x00100
-#define IXGBE_PHYINT_STATUS1 0x00104
-#define IXGBE_PHYINT_STATUS2 0x00108
-#define IXGBE_LEDCTL 0x00200
-#define IXGBE_FRTIMER 0x00048
-#define IXGBE_TCPTIMER 0x0004C
-#define IXGBE_CORESPARE 0x00600
-#define IXGBE_EXVET 0x05078
-
-/* NVM Registers */
-#define IXGBE_EEC 0x10010
-#define IXGBE_EERD 0x10014
-#define IXGBE_EEWR 0x10018
-#define IXGBE_FLA 0x1001C
-#define IXGBE_EEMNGCTL 0x10110
-#define IXGBE_EEMNGDATA 0x10114
-#define IXGBE_FLMNGCTL 0x10118
-#define IXGBE_FLMNGDATA 0x1011C
-#define IXGBE_FLMNGCNT 0x10120
-#define IXGBE_FLOP 0x1013C
-#define IXGBE_GRC 0x10200
-#define IXGBE_SRAMREL 0x10210
-#define IXGBE_PHYDBG 0x10218
-
-/* General Receive Control */
-#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */
-#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */
-
-#define IXGBE_VPDDIAG0 0x10204
-#define IXGBE_VPDDIAG1 0x10208
-
-/* I2CCTL Bit Masks */
-#define IXGBE_I2C_CLK_IN 0x00000001
-#define IXGBE_I2C_CLK_OUT 0x00000002
-#define IXGBE_I2C_DATA_IN 0x00000004
-#define IXGBE_I2C_DATA_OUT 0x00000008
-#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500
-
-#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
-#define IXGBE_EMC_INTERNAL_DATA 0x00
-#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20
-#define IXGBE_EMC_DIODE1_DATA 0x01
-#define IXGBE_EMC_DIODE1_THERM_LIMIT 0x19
-#define IXGBE_EMC_DIODE2_DATA 0x23
-#define IXGBE_EMC_DIODE2_THERM_LIMIT 0x1A
-
-#define IXGBE_MAX_SENSORS 3
-
-struct ixgbe_thermal_diode_data {
- u8 location;
- u8 temp;
- u8 caution_thresh;
- u8 max_op_thresh;
-};
-
-struct ixgbe_thermal_sensor_data {
- struct ixgbe_thermal_diode_data sensor[IXGBE_MAX_SENSORS];
-};
-
-/* Interrupt Registers */
-#define IXGBE_EICR 0x00800
-#define IXGBE_EICS 0x00808
-#define IXGBE_EIMS 0x00880
-#define IXGBE_EIMC 0x00888
-#define IXGBE_EIAC 0x00810
-#define IXGBE_EIAM 0x00890
-#define IXGBE_EICS_EX(_i) (0x00A90 + (_i) * 4)
-#define IXGBE_EIMS_EX(_i) (0x00AA0 + (_i) * 4)
-#define IXGBE_EIMC_EX(_i) (0x00AB0 + (_i) * 4)
-#define IXGBE_EIAM_EX(_i) (0x00AD0 + (_i) * 4)
-/* 82599 EITR is only 12 bits, with the lower 3 always zero */
-/*
- * 82598 EITR is 16 bits but set the limits based on the max
- * supported by all ixgbe hardware
- */
-#define IXGBE_MAX_INT_RATE 488281
-#define IXGBE_MIN_INT_RATE 956
-#define IXGBE_MAX_EITR 0x00000FF8
-#define IXGBE_MIN_EITR 8
-#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \
- (0x012300 + (((_i) - 24) * 4)))
-#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8
-#define IXGBE_EITR_LLI_MOD 0x00008000
-#define IXGBE_EITR_CNT_WDIS 0x80000000
-#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */
-#define IXGBE_IVAR_MISC 0x00A00 /* misc MSI-X interrupt causes */
-#define IXGBE_EITRSEL 0x00894
-#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */
-#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */
-#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4)))
-#define IXGBE_GPIE 0x00898
-
-/* Flow Control Registers */
-#define IXGBE_FCADBUL 0x03210
-#define IXGBE_FCADBUH 0x03214
-#define IXGBE_FCAMACL 0x04328
-#define IXGBE_FCAMACH 0x0432C
-#define IXGBE_FCRTH_82599(_i) (0x03260 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_FCRTL_82599(_i) (0x03220 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_PFCTOP 0x03008
-#define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */
-#define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */
-#define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */
-#define IXGBE_FCRTV 0x032A0
-#define IXGBE_FCCFG 0x03D00
-#define IXGBE_TFCS 0x0CE00
-
-/* Receive DMA Registers */
-#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \
- (0x0D000 + (((_i) - 64) * 0x40)))
-#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \
- (0x0D004 + (((_i) - 64) * 0x40)))
-#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \
- (0x0D008 + (((_i) - 64) * 0x40)))
-#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \
- (0x0D010 + (((_i) - 64) * 0x40)))
-#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \
- (0x0D018 + (((_i) - 64) * 0x40)))
-#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \
- (0x0D028 + (((_i) - 64) * 0x40)))
-#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
- (0x0D02C + (((_i) - 64) * 0x40)))
-#define IXGBE_RSCDBU 0x03028
-#define IXGBE_RDDCC 0x02F20
-#define IXGBE_RXMEMWRAP 0x03190
-#define IXGBE_STARCTRL 0x03024
-/*
- * Split and Replication Receive Control Registers
- * 00-15 : 0x02100 + n*4
- * 16-64 : 0x01014 + n*0x40
- * 64-127: 0x0D014 + (n-64)*0x40
- */
-#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
- (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
- (0x0D014 + (((_i) - 64) * 0x40))))
-/*
- * Rx DCA Control Register:
- * 00-15 : 0x02200 + n*4
- * 16-64 : 0x0100C + n*0x40
- * 64-127: 0x0D00C + (n-64)*0x40
- */
-#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
- (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
- (0x0D00C + (((_i) - 64) * 0x40))))
-#define IXGBE_RDRXCTL 0x02F00
-#define IXGBE_RDRXCTL_RSC_PUSH 0x80
-/* 8 of these 0x03C00 - 0x03C1C */
-#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4))
-#define IXGBE_RXCTRL 0x03000
-#define IXGBE_DROPEN 0x03D04
-#define IXGBE_RXPBSIZE_SHIFT 10
-
-/* Receive Registers */
-#define IXGBE_RXCSUM 0x05000
-#define IXGBE_RFCTL 0x05008
-#define IXGBE_DRECCCTL 0x02F08
-#define IXGBE_DRECCCTL_DISABLE 0
-#define IXGBE_DRECCCTL2 0x02F8C
-
-/* Multicast Table Array - 128 entries */
-#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4))
-#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
- (0x0A200 + ((_i) * 8)))
-#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
- (0x0A204 + ((_i) * 8)))
-#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8))
-#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8))
-/* Packet split receive type */
-#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \
- (0x0EA00 + ((_i) * 4)))
-/* array of 4096 1-bit vlan filters */
-#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4))
-/*array of 4096 4-bit vlan vmdq indices */
-#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4))
-#define IXGBE_FCTRL 0x05080
-#define IXGBE_VLNCTRL 0x05088
-#define IXGBE_MCSTCTRL 0x05090
-#define IXGBE_MRQC 0x05818
-#define IXGBE_SAQF(_i) (0x0E000 + ((_i) * 4)) /* Source Address Queue Filter */
-#define IXGBE_DAQF(_i) (0x0E200 + ((_i) * 4)) /* Dest. Address Queue Filter */
-#define IXGBE_SDPQF(_i) (0x0E400 + ((_i) * 4)) /* Src Dest. Addr Queue Filter */
-#define IXGBE_FTQF(_i) (0x0E600 + ((_i) * 4)) /* Five Tuple Queue Filter */
-#define IXGBE_ETQF(_i) (0x05128 + ((_i) * 4)) /* EType Queue Filter */
-#define IXGBE_ETQS(_i) (0x0EC00 + ((_i) * 4)) /* EType Queue Select */
-#define IXGBE_SYNQF 0x0EC30 /* SYN Packet Queue Filter */
-#define IXGBE_RQTC 0x0EC70
-#define IXGBE_MTQC 0x08120
-#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */
-#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */
-#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */
-#define IXGBE_VT_CTL 0x051B0
-#define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */
-/* 64 Mailboxes, 16 DW each */
-#define IXGBE_PFMBMEM(_i) (0x13000 + (64 * (_i)))
-#define IXGBE_PFMBICR(_i) (0x00710 + (4 * (_i))) /* 4 total */
-#define IXGBE_PFMBIMR(_i) (0x00720 + (4 * (_i))) /* 4 total */
-#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4))
-#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4))
-#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4))
-#define IXGBE_QDE 0x2F04
-#define IXGBE_VMTXSW(_i) (0x05180 + ((_i) * 4)) /* 2 total */
-#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */
-#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4))
-#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4))
-#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4))
-#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4))
-#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/
-#define IXGBE_RXFECCERR0 0x051B8
-#define IXGBE_LLITHRESH 0x0EC90
-#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_IMIRVP 0x05AC0
-#define IXGBE_VMD_CTL 0x0581C
-#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */
-#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */
-
-/* Flow Director registers */
-#define IXGBE_FDIRCTRL 0x0EE00
-#define IXGBE_FDIRHKEY 0x0EE68
-#define IXGBE_FDIRSKEY 0x0EE6C
-#define IXGBE_FDIRDIP4M 0x0EE3C
-#define IXGBE_FDIRSIP4M 0x0EE40
-#define IXGBE_FDIRTCPM 0x0EE44
-#define IXGBE_FDIRUDPM 0x0EE48
-#define IXGBE_FDIRIP6M 0x0EE74
-#define IXGBE_FDIRM 0x0EE70
-
-/* Flow Director Stats registers */
-#define IXGBE_FDIRFREE 0x0EE38
-#define IXGBE_FDIRLEN 0x0EE4C
-#define IXGBE_FDIRUSTAT 0x0EE50
-#define IXGBE_FDIRFSTAT 0x0EE54
-#define IXGBE_FDIRMATCH 0x0EE58
-#define IXGBE_FDIRMISS 0x0EE5C
-
-/* Flow Director Programming registers */
-#define IXGBE_FDIRSIPv6(_i) (0x0EE0C + ((_i) * 4)) /* 3 of these (0-2) */
-#define IXGBE_FDIRIPSA 0x0EE18
-#define IXGBE_FDIRIPDA 0x0EE1C
-#define IXGBE_FDIRPORT 0x0EE20
-#define IXGBE_FDIRVLAN 0x0EE24
-#define IXGBE_FDIRHASH 0x0EE28
-#define IXGBE_FDIRCMD 0x0EE2C
-
-/* Transmit DMA registers */
-#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of them (0-31)*/
-#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40))
-#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40))
-#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40))
-#define IXGBE_TDT(_i) (0x06018 + ((_i) * 0x40))
-#define IXGBE_TXDCTL(_i) (0x06028 + ((_i) * 0x40))
-#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40))
-#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40))
-#define IXGBE_DTXCTL 0x07E00
-
-#define IXGBE_DMATXCTL 0x04A80
-#define IXGBE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */
-#define IXGBE_PFDTXGSWC 0x08220
-#define IXGBE_DTXMXSZRQ 0x08100
-#define IXGBE_DTXTCPFLGL 0x04A88
-#define IXGBE_DTXTCPFLGH 0x04A8C
-#define IXGBE_LBDRPEN 0x0CA00
-#define IXGBE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */
-
-#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */
-#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */
-#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */
-#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */
-
-#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
-
-/* Anti-spoofing defines */
-#define IXGBE_SPOOF_MACAS_MASK 0xFF
-#define IXGBE_SPOOF_VLANAS_MASK 0xFF00
-#define IXGBE_SPOOF_VLANAS_SHIFT 8
-#define IXGBE_PFVFSPOOF_REG_COUNT 8
-/* 16 of these (0-15) */
-#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4))
-/* Tx DCA Control register : 128 of these (0-127) */
-#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40))
-#define IXGBE_TIPG 0x0CB00
-#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */
-#define IXGBE_MNGTXMAP 0x0CD10
-#define IXGBE_TIPG_FIBER_DEFAULT 3
-#define IXGBE_TXPBSIZE_SHIFT 10
-
-/* Wake up registers */
-#define IXGBE_WUC 0x05800
-#define IXGBE_WUFC 0x05808
-#define IXGBE_WUS 0x05810
-#define IXGBE_IPAV 0x05838
-#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */
-#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */
-
-#define IXGBE_WUPL 0x05900
-#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
-#define IXGBE_FHFT(_n) (0x09000 + (_n * 0x100)) /* Flex host filter table */
-/* Ext Flexible Host Filter Table */
-#define IXGBE_FHFT_EXT(_n) (0x09800 + (_n * 0x100))
-
-#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4
-#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2
-
-/* Each Flexible Filter is at most 128 (0x80) bytes in length */
-#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX 128
-#define IXGBE_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */
-#define IXGBE_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */
-
-/* Definitions for power management and wakeup registers */
-/* Wake Up Control */
-#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */
-#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */
-#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */
-
-/* Wake Up Filter Control */
-#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
-#define IXGBE_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
-#define IXGBE_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
-#define IXGBE_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
-#define IXGBE_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
-#define IXGBE_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
-#define IXGBE_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
-#define IXGBE_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
-#define IXGBE_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */
-
-#define IXGBE_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */
-#define IXGBE_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
-#define IXGBE_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
-#define IXGBE_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
-#define IXGBE_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
-#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */
-#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */
-#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */
-/* Mask for Ext. flex filters */
-#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000
-#define IXGBE_WUFC_ALL_FILTERS 0x003F00FF /* Mask for all wakeup filters */
-#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
-
-/* Wake Up Status */
-#define IXGBE_WUS_LNKC IXGBE_WUFC_LNKC
-#define IXGBE_WUS_MAG IXGBE_WUFC_MAG
-#define IXGBE_WUS_EX IXGBE_WUFC_EX
-#define IXGBE_WUS_MC IXGBE_WUFC_MC
-#define IXGBE_WUS_BC IXGBE_WUFC_BC
-#define IXGBE_WUS_ARP IXGBE_WUFC_ARP
-#define IXGBE_WUS_IPV4 IXGBE_WUFC_IPV4
-#define IXGBE_WUS_IPV6 IXGBE_WUFC_IPV6
-#define IXGBE_WUS_MNG IXGBE_WUFC_MNG
-#define IXGBE_WUS_FLX0 IXGBE_WUFC_FLX0
-#define IXGBE_WUS_FLX1 IXGBE_WUFC_FLX1
-#define IXGBE_WUS_FLX2 IXGBE_WUFC_FLX2
-#define IXGBE_WUS_FLX3 IXGBE_WUFC_FLX3
-#define IXGBE_WUS_FLX4 IXGBE_WUFC_FLX4
-#define IXGBE_WUS_FLX5 IXGBE_WUFC_FLX5
-#define IXGBE_WUS_FLX_FILTERS IXGBE_WUFC_FLX_FILTERS
-
-/* Wake Up Packet Length */
-#define IXGBE_WUPL_LENGTH_MASK 0xFFFF
-
-/* DCB registers */
-#define IXGBE_DCB_MAX_TRAFFIC_CLASS 8
-#define IXGBE_RMCS 0x03D00
-#define IXGBE_DPMCS 0x07F40
-#define IXGBE_PDPMCS 0x0CD00
-#define IXGBE_RUPPBMR 0x050A0
-#define IXGBE_RT2CR(_i) (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_RT2SR(_i) (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_TDTQ2TCCR(_i) (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */
-#define IXGBE_TDTQ2TCSR(_i) (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */
-#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
-
-
-/* Security Control Registers */
-#define IXGBE_SECTXCTRL 0x08800
-#define IXGBE_SECTXSTAT 0x08804
-#define IXGBE_SECTXBUFFAF 0x08808
-#define IXGBE_SECTXMINIFG 0x08810
-#define IXGBE_SECRXCTRL 0x08D00
-#define IXGBE_SECRXSTAT 0x08D04
-
-/* Security Bit Fields and Masks */
-#define IXGBE_SECTXCTRL_SECTX_DIS 0x00000001
-#define IXGBE_SECTXCTRL_TX_DIS 0x00000002
-#define IXGBE_SECTXCTRL_STORE_FORWARD 0x00000004
-
-#define IXGBE_SECTXSTAT_SECTX_RDY 0x00000001
-#define IXGBE_SECTXSTAT_ECC_TXERR 0x00000002
-
-#define IXGBE_SECRXCTRL_SECRX_DIS 0x00000001
-#define IXGBE_SECRXCTRL_RX_DIS 0x00000002
-
-#define IXGBE_SECRXSTAT_SECRX_RDY 0x00000001
-#define IXGBE_SECRXSTAT_ECC_RXERR 0x00000002
-
-/* LinkSec (MacSec) Registers */
-#define IXGBE_LSECTXCAP 0x08A00
-#define IXGBE_LSECRXCAP 0x08F00
-#define IXGBE_LSECTXCTRL 0x08A04
-#define IXGBE_LSECTXSCL 0x08A08 /* SCI Low */
-#define IXGBE_LSECTXSCH 0x08A0C /* SCI High */
-#define IXGBE_LSECTXSA 0x08A10
-#define IXGBE_LSECTXPN0 0x08A14
-#define IXGBE_LSECTXPN1 0x08A18
-#define IXGBE_LSECTXKEY0(_n) (0x08A1C + (4 * (_n))) /* 4 of these (0-3) */
-#define IXGBE_LSECTXKEY1(_n) (0x08A2C + (4 * (_n))) /* 4 of these (0-3) */
-#define IXGBE_LSECRXCTRL 0x08F04
-#define IXGBE_LSECRXSCL 0x08F08
-#define IXGBE_LSECRXSCH 0x08F0C
-#define IXGBE_LSECRXSA(_i) (0x08F10 + (4 * (_i))) /* 2 of these (0-1) */
-#define IXGBE_LSECRXPN(_i) (0x08F18 + (4 * (_i))) /* 2 of these (0-1) */
-#define IXGBE_LSECRXKEY(_n, _m) (0x08F20 + ((0x10 * (_n)) + (4 * (_m))))
-#define IXGBE_LSECTXUT 0x08A3C /* OutPktsUntagged */
-#define IXGBE_LSECTXPKTE 0x08A40 /* OutPktsEncrypted */
-#define IXGBE_LSECTXPKTP 0x08A44 /* OutPktsProtected */
-#define IXGBE_LSECTXOCTE 0x08A48 /* OutOctetsEncrypted */
-#define IXGBE_LSECTXOCTP 0x08A4C /* OutOctetsProtected */
-#define IXGBE_LSECRXUT 0x08F40 /* InPktsUntagged/InPktsNoTag */
-#define IXGBE_LSECRXOCTD 0x08F44 /* InOctetsDecrypted */
-#define IXGBE_LSECRXOCTV 0x08F48 /* InOctetsValidated */
-#define IXGBE_LSECRXBAD 0x08F4C /* InPktsBadTag */
-#define IXGBE_LSECRXNOSCI 0x08F50 /* InPktsNoSci */
-#define IXGBE_LSECRXUNSCI 0x08F54 /* InPktsUnknownSci */
-#define IXGBE_LSECRXUNCH 0x08F58 /* InPktsUnchecked */
-#define IXGBE_LSECRXDELAY 0x08F5C /* InPktsDelayed */
-#define IXGBE_LSECRXLATE 0x08F60 /* InPktsLate */
-#define IXGBE_LSECRXOK(_n) (0x08F64 + (0x04 * (_n))) /* InPktsOk */
-#define IXGBE_LSECRXINV(_n) (0x08F6C + (0x04 * (_n))) /* InPktsInvalid */
-#define IXGBE_LSECRXNV(_n) (0x08F74 + (0x04 * (_n))) /* InPktsNotValid */
-#define IXGBE_LSECRXUNSA 0x08F7C /* InPktsUnusedSa */
-#define IXGBE_LSECRXNUSA 0x08F80 /* InPktsNotUsingSa */
-
-/* LinkSec (MacSec) Bit Fields and Masks */
-#define IXGBE_LSECTXCAP_SUM_MASK 0x00FF0000
-#define IXGBE_LSECTXCAP_SUM_SHIFT 16
-#define IXGBE_LSECRXCAP_SUM_MASK 0x00FF0000
-#define IXGBE_LSECRXCAP_SUM_SHIFT 16
-
-#define IXGBE_LSECTXCTRL_EN_MASK 0x00000003
-#define IXGBE_LSECTXCTRL_DISABLE 0x0
-#define IXGBE_LSECTXCTRL_AUTH 0x1
-#define IXGBE_LSECTXCTRL_AUTH_ENCRYPT 0x2
-#define IXGBE_LSECTXCTRL_AISCI 0x00000020
-#define IXGBE_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00
-#define IXGBE_LSECTXCTRL_RSV_MASK 0x000000D8
-
-#define IXGBE_LSECRXCTRL_EN_MASK 0x0000000C
-#define IXGBE_LSECRXCTRL_EN_SHIFT 2
-#define IXGBE_LSECRXCTRL_DISABLE 0x0
-#define IXGBE_LSECRXCTRL_CHECK 0x1
-#define IXGBE_LSECRXCTRL_STRICT 0x2
-#define IXGBE_LSECRXCTRL_DROP 0x3
-#define IXGBE_LSECRXCTRL_PLSH 0x00000040
-#define IXGBE_LSECRXCTRL_RP 0x00000080
-#define IXGBE_LSECRXCTRL_RSV_MASK 0xFFFFFF33
-
-/* IpSec Registers */
-#define IXGBE_IPSTXIDX 0x08900
-#define IXGBE_IPSTXSALT 0x08904
-#define IXGBE_IPSTXKEY(_i) (0x08908 + (4 * (_i))) /* 4 of these (0-3) */
-#define IXGBE_IPSRXIDX 0x08E00
-#define IXGBE_IPSRXIPADDR(_i) (0x08E04 + (4 * (_i))) /* 4 of these (0-3) */
-#define IXGBE_IPSRXSPI 0x08E14
-#define IXGBE_IPSRXIPIDX 0x08E18
-#define IXGBE_IPSRXKEY(_i) (0x08E1C + (4 * (_i))) /* 4 of these (0-3) */
-#define IXGBE_IPSRXSALT 0x08E2C
-#define IXGBE_IPSRXMOD 0x08E30
-
-#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4
-
-/* DCB registers */
-#define IXGBE_RTRPCS 0x02430
-#define IXGBE_RTTDCS 0x04900
-#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */
-#define IXGBE_RTTPCS 0x0CD00
-#define IXGBE_RTRUP2TC 0x03020
-#define IXGBE_RTTUP2TC 0x0C800
-#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_TXLLQ(_i) (0x082E0 + ((_i) * 4)) /* 4 of these (0-3) */
-#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_RTTDQSEL 0x04904
-#define IXGBE_RTTDT1C 0x04908
-#define IXGBE_RTTDT1S 0x0490C
-#define IXGBE_RTTDTECC 0x04990
-#define IXGBE_RTTDTECC_NO_BCN 0x00000100
-
-#define IXGBE_RTTBCNRC 0x04984
-#define IXGBE_RTTBCNRC_RS_ENA 0x80000000
-#define IXGBE_RTTBCNRC_RF_DEC_MASK 0x00003FFF
-#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14
-#define IXGBE_RTTBCNRC_RF_INT_MASK \
- (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
-#define IXGBE_RTTBCNRM 0x04980
-
-/* FCoE DMA Context Registers */
-#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */
-#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */
-#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */
-#define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */
-#define IXGBE_FCINVST0 0x03FC0 /* FC Invalid DMA Context Status Reg 0*/
-#define IXGBE_FCINVST(_i) (IXGBE_FCINVST0 + ((_i) * 4))
-#define IXGBE_FCBUFF_VALID (1 << 0) /* DMA Context Valid */
-#define IXGBE_FCBUFF_BUFFSIZE (3 << 3) /* User Buffer Size */
-#define IXGBE_FCBUFF_WRCONTX (1 << 7) /* 0: Initiator, 1: Target */
-#define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */
-#define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */
-#define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3
-#define IXGBE_FCBUFF_BUFFCNT_SHIFT 8
-#define IXGBE_FCBUFF_OFFSET_SHIFT 16
-#define IXGBE_FCDMARW_WE (1 << 14) /* Write enable */
-#define IXGBE_FCDMARW_RE (1 << 15) /* Read enable */
-#define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */
-#define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */
-#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16
-/* FCoE SOF/EOF */
-#define IXGBE_TEOFF 0x04A94 /* Tx FC EOF */
-#define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */
-#define IXGBE_REOFF 0x05158 /* Rx FC EOF */
-#define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */
-/* FCoE Filter Context Registers */
-#define IXGBE_FCFLT 0x05108 /* FC FLT Context */
-#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */
-#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */
-#define IXGBE_FCFLT_VALID (1 << 0) /* Filter Context Valid */
-#define IXGBE_FCFLT_FIRST (1 << 1) /* Filter First */
-#define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */
-#define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */
-#define IXGBE_FCFLTRW_RVALDT (1 << 13) /* Fast Re-Validation */
-#define IXGBE_FCFLTRW_WE (1 << 14) /* Write Enable */
-#define IXGBE_FCFLTRW_RE (1 << 15) /* Read Enable */
-/* FCoE Receive Control */
-#define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */
-#define IXGBE_FCRXCTRL_FCOELLI (1 << 0) /* Low latency interrupt */
-#define IXGBE_FCRXCTRL_SAVBAD (1 << 1) /* Save Bad Frames */
-#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2) /* EN 1st Read Header */
-#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3) /* EN Last Header in Seq */
-#define IXGBE_FCRXCTRL_ALLH (1 << 4) /* EN All Headers */
-#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5) /* EN 1st Seq. Header */
-#define IXGBE_FCRXCTRL_ICRC (1 << 6) /* Ignore Bad FC CRC */
-#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7) /* FC CRC Byte Ordering */
-#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */
-#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8
-/* FCoE Redirection */
-#define IXGBE_FCRECTL 0x0ED00 /* FC Redirection Control */
-#define IXGBE_FCRETA0 0x0ED10 /* FC Redirection Table 0 */
-#define IXGBE_FCRETA(_i) (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */
-#define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */
-#define IXGBE_FCRETASEL_ENA 0x2 /* FCoE FCRETASEL bit */
-#define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */
-#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */
-
-/* Stats registers */
-#define IXGBE_CRCERRS 0x04000
-#define IXGBE_ILLERRC 0x04004
-#define IXGBE_ERRBC 0x04008
-#define IXGBE_MSPDC 0x04010
-#define IXGBE_MPC(_i) (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/
-#define IXGBE_MLFC 0x04034
-#define IXGBE_MRFC 0x04038
-#define IXGBE_RLEC 0x04040
-#define IXGBE_LXONTXC 0x03F60
-#define IXGBE_LXONRXC 0x0CF60
-#define IXGBE_LXOFFTXC 0x03F68
-#define IXGBE_LXOFFRXC 0x0CF68
-#define IXGBE_LXONRXCNT 0x041A4
-#define IXGBE_LXOFFRXCNT 0x041A8
-#define IXGBE_PXONRXCNT(_i) (0x04140 + ((_i) * 4)) /* 8 of these */
-#define IXGBE_PXOFFRXCNT(_i) (0x04160 + ((_i) * 4)) /* 8 of these */
-#define IXGBE_PXON2OFFCNT(_i) (0x03240 + ((_i) * 4)) /* 8 of these */
-#define IXGBE_PXONTXC(_i) (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/
-#define IXGBE_PXONRXC(_i) (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/
-#define IXGBE_PXOFFTXC(_i) (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/
-#define IXGBE_PXOFFRXC(_i) (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/
-#define IXGBE_PRC64 0x0405C
-#define IXGBE_PRC127 0x04060
-#define IXGBE_PRC255 0x04064
-#define IXGBE_PRC511 0x04068
-#define IXGBE_PRC1023 0x0406C
-#define IXGBE_PRC1522 0x04070
-#define IXGBE_GPRC 0x04074
-#define IXGBE_BPRC 0x04078
-#define IXGBE_MPRC 0x0407C
-#define IXGBE_GPTC 0x04080
-#define IXGBE_GORCL 0x04088
-#define IXGBE_GORCH 0x0408C
-#define IXGBE_GOTCL 0x04090
-#define IXGBE_GOTCH 0x04094
-#define IXGBE_RNBC(_i) (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/
-#define IXGBE_RUC 0x040A4
-#define IXGBE_RFC 0x040A8
-#define IXGBE_ROC 0x040AC
-#define IXGBE_RJC 0x040B0
-#define IXGBE_MNGPRC 0x040B4
-#define IXGBE_MNGPDC 0x040B8
-#define IXGBE_MNGPTC 0x0CF90
-#define IXGBE_TORL 0x040C0
-#define IXGBE_TORH 0x040C4
-#define IXGBE_TPR 0x040D0
-#define IXGBE_TPT 0x040D4
-#define IXGBE_PTC64 0x040D8
-#define IXGBE_PTC127 0x040DC
-#define IXGBE_PTC255 0x040E0
-#define IXGBE_PTC511 0x040E4
-#define IXGBE_PTC1023 0x040E8
-#define IXGBE_PTC1522 0x040EC
-#define IXGBE_MPTC 0x040F0
-#define IXGBE_BPTC 0x040F4
-#define IXGBE_XEC 0x04120
-#define IXGBE_SSVPC 0x08780
-
-#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4))
-#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \
- (0x08600 + ((_i) * 4)))
-#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4))
-
-#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */
-#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */
-#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
-#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */
-#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
-#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */
-#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */
-#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */
-#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */
-#define IXGBE_FCCRC 0x05118 /* Num of Good Eth CRC w/ Bad FC CRC */
-#define IXGBE_FCOERPDC 0x0241C /* FCoE Rx Packets Dropped Count */
-#define IXGBE_FCLAST 0x02424 /* FCoE Last Error Count */
-#define IXGBE_FCOEPRC 0x02428 /* Number of FCoE Packets Received */
-#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */
-#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */
-#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */
-#define IXGBE_FCCRC_CNT_MASK 0x0000FFFF /* CRC_CNT: bit 0 - 15 */
-#define IXGBE_FCLAST_CNT_MASK 0x0000FFFF /* Last_CNT: bit 0 - 15 */
-#define IXGBE_O2BGPTC 0x041C4
-#define IXGBE_O2BSPC 0x087B0
-#define IXGBE_B2OSPC 0x041C0
-#define IXGBE_B2OGPRC 0x02F90
-#define IXGBE_BUPRC 0x04180
-#define IXGBE_BMPRC 0x04184
-#define IXGBE_BBPRC 0x04188
-#define IXGBE_BUPTC 0x0418C
-#define IXGBE_BMPTC 0x04190
-#define IXGBE_BBPTC 0x04194
-#define IXGBE_BCRCERRS 0x04198
-#define IXGBE_BXONRXC 0x0419C
-#define IXGBE_BXOFFRXC 0x041E0
-#define IXGBE_BXONTXC 0x041E4
-#define IXGBE_BXOFFTXC 0x041E8
-#define IXGBE_PCRC8ECL 0x0E810
-#define IXGBE_PCRC8ECH 0x0E811
-#define IXGBE_PCRC8ECH_MASK 0x1F
-#define IXGBE_LDPCECL 0x0E820
-#define IXGBE_LDPCECH 0x0E821
-
-/* Management */
-#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_MANC 0x05820
-#define IXGBE_MFVAL 0x05824
-#define IXGBE_MANC2H 0x05860
-#define IXGBE_MDEF(_i) (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_MIPAF 0x058B0
-#define IXGBE_MMAL(_i) (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */
-#define IXGBE_MMAH(_i) (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */
-#define IXGBE_FTFT 0x09400 /* 0x9400-0x97FC */
-#define IXGBE_METF(_i) (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */
-#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_LSWFW 0x15014
-#define IXGBE_BMCIP(_i) (0x05050 + ((_i) * 4)) /* 0x5050-0x505C */
-#define IXGBE_BMCIPVAL 0x05060
-#define IXGBE_BMCIP_IPADDR_TYPE 0x00000001
-#define IXGBE_BMCIP_IPADDR_VALID 0x00000002
-
-/* Management Bit Fields and Masks */
-#define IXGBE_MANC_EN_BMC2OS 0x10000000 /* Ena BMC2OS and OS2BMC traffic */
-#define IXGBE_MANC_EN_BMC2OS_SHIFT 28
-
-/* Firmware Semaphore Register */
-#define IXGBE_FWSM_MODE_MASK 0xE
-
-/* ARC Subsystem registers */
-#define IXGBE_HICR 0x15F00
-#define IXGBE_FWSTS 0x15F0C
-#define IXGBE_HSMC0R 0x15F04
-#define IXGBE_HSMC1R 0x15F08
-#define IXGBE_SWSR 0x15F10
-#define IXGBE_HFDR 0x15FE8
-#define IXGBE_FLEX_MNG 0x15800 /* 0x15800 - 0x15EFC */
-
-#define IXGBE_HICR_EN 0x01 /* Enable bit - RO */
-/* Driver sets this bit when done to put command in RAM */
-#define IXGBE_HICR_C 0x02
-#define IXGBE_HICR_SV 0x04 /* Status Validity */
-#define IXGBE_HICR_FW_RESET_ENABLE 0x40
-#define IXGBE_HICR_FW_RESET 0x80
-
-/* PCI-E registers */
-#define IXGBE_GCR 0x11000
-#define IXGBE_GTV 0x11004
-#define IXGBE_FUNCTAG 0x11008
-#define IXGBE_GLT 0x1100C
-#define IXGBE_PCIEPIPEADR 0x11004
-#define IXGBE_PCIEPIPEDAT 0x11008
-#define IXGBE_GSCL_1 0x11010
-#define IXGBE_GSCL_2 0x11014
-#define IXGBE_GSCL_3 0x11018
-#define IXGBE_GSCL_4 0x1101C
-#define IXGBE_GSCN_0 0x11020
-#define IXGBE_GSCN_1 0x11024
-#define IXGBE_GSCN_2 0x11028
-#define IXGBE_GSCN_3 0x1102C
-#define IXGBE_FACTPS 0x10150
-#define IXGBE_PCIEANACTL 0x11040
-#define IXGBE_SWSM 0x10140
-#define IXGBE_FWSM 0x10148
-#define IXGBE_GSSR 0x10160
-#define IXGBE_MREVID 0x11064
-#define IXGBE_DCA_ID 0x11070
-#define IXGBE_DCA_CTRL 0x11074
-#define IXGBE_SWFW_SYNC IXGBE_GSSR
-
-/* PCI-E registers 82599-Specific */
-#define IXGBE_GCR_EXT 0x11050
-#define IXGBE_GSCL_5_82599 0x11030
-#define IXGBE_GSCL_6_82599 0x11034
-#define IXGBE_GSCL_7_82599 0x11038
-#define IXGBE_GSCL_8_82599 0x1103C
-#define IXGBE_PHYADR_82599 0x11040
-#define IXGBE_PHYDAT_82599 0x11044
-#define IXGBE_PHYCTL_82599 0x11048
-#define IXGBE_PBACLR_82599 0x11068
-#define IXGBE_CIAA_82599 0x11088
-#define IXGBE_CIAD_82599 0x1108C
-#define IXGBE_PICAUSE 0x110B0
-#define IXGBE_PIENA 0x110B8
-#define IXGBE_CDQ_MBR_82599 0x110B4
-#define IXGBE_PCIESPARE 0x110BC
-#define IXGBE_MISC_REG_82599 0x110F0
-#define IXGBE_ECC_CTRL_0_82599 0x11100
-#define IXGBE_ECC_CTRL_1_82599 0x11104
-#define IXGBE_ECC_STATUS_82599 0x110E0
-#define IXGBE_BAR_CTRL_82599 0x110F4
-
-/* PCI Express Control */
-#define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000
-#define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000
-#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000
-#define IXGBE_GCR_CAP_VER2 0x00040000
-
-#define IXGBE_GCR_EXT_MSIX_EN 0x80000000
-#define IXGBE_GCR_EXT_BUFFERS_CLEAR 0x40000000
-#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001
-#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002
-#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
-#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \
- IXGBE_GCR_EXT_VT_MODE_64)
-/* Time Sync Registers */
-#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
-#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */
-#define IXGBE_RXSTMPL 0x051E8 /* Rx timestamp Low - RO */
-#define IXGBE_RXSTMPH 0x051A4 /* Rx timestamp High - RO */
-#define IXGBE_RXSATRL 0x051A0 /* Rx timestamp attribute low - RO */
-#define IXGBE_RXSATRH 0x051A8 /* Rx timestamp attribute high - RO */
-#define IXGBE_RXMTRL 0x05120 /* RX message type register low - RW */
-#define IXGBE_TXSTMPL 0x08C04 /* Tx timestamp value Low - RO */
-#define IXGBE_TXSTMPH 0x08C08 /* Tx timestamp value High - RO */
-#define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */
-#define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */
-#define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */
-#define IXGBE_TIMADJL 0x08C18 /* Time Adjustment Offset register Low - RW */
-#define IXGBE_TIMADJH 0x08C1C /* Time Adjustment Offset register High - RW */
-#define IXGBE_TSAUXC 0x08C20 /* TimeSync Auxiliary Control register - RW */
-#define IXGBE_TRGTTIML0 0x08C24 /* Target Time Register 0 Low - RW */
-#define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */
-#define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */
-#define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */
-#define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */
-#define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */
-#define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */
-#define IXGBE_AUXSTMPH0 0x08C40 /* Auxiliary Time Stamp 0 register High - RO */
-#define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */
-#define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */
-
-/* Diagnostic Registers */
-#define IXGBE_RDSTATCTL 0x02C20
-#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */
-#define IXGBE_RDHMPN 0x02F08
-#define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4))
-#define IXGBE_RDPROBE 0x02F20
-#define IXGBE_RDMAM 0x02F30
-#define IXGBE_RDMAD 0x02F34
-#define IXGBE_TDSTATCTL 0x07C20
-#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */
-#define IXGBE_TDHMPN 0x07F08
-#define IXGBE_TDHMPN2 0x082FC
-#define IXGBE_TXDESCIC 0x082CC
-#define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4))
-#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4))
-#define IXGBE_TDPROBE 0x07F20
-#define IXGBE_TXBUFCTRL 0x0C600
-#define IXGBE_TXBUFDATA0 0x0C610
-#define IXGBE_TXBUFDATA1 0x0C614
-#define IXGBE_TXBUFDATA2 0x0C618
-#define IXGBE_TXBUFDATA3 0x0C61C
-#define IXGBE_RXBUFCTRL 0x03600
-#define IXGBE_RXBUFDATA0 0x03610
-#define IXGBE_RXBUFDATA1 0x03614
-#define IXGBE_RXBUFDATA2 0x03618
-#define IXGBE_RXBUFDATA3 0x0361C
-#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */
-#define IXGBE_RFVAL 0x050A4
-#define IXGBE_MDFTC1 0x042B8
-#define IXGBE_MDFTC2 0x042C0
-#define IXGBE_MDFTFIFO1 0x042C4
-#define IXGBE_MDFTFIFO2 0x042C8
-#define IXGBE_MDFTS 0x042CC
-#define IXGBE_RXDATAWRPTR(_i) (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/
-#define IXGBE_RXDESCWRPTR(_i) (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/
-#define IXGBE_RXDATARDPTR(_i) (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/
-#define IXGBE_RXDESCRDPTR(_i) (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/
-#define IXGBE_TXDATAWRPTR(_i) (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/
-#define IXGBE_TXDESCWRPTR(_i) (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/
-#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/
-#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/
-#define IXGBE_PCIEECCCTL 0x1106C
-#define IXGBE_RXWRPTR(_i) (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/
-#define IXGBE_RXUSED(_i) (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/
-#define IXGBE_RXRDPTR(_i) (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/
-#define IXGBE_RXRDWRPTR(_i) (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/
-#define IXGBE_TXWRPTR(_i) (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/
-#define IXGBE_TXUSED(_i) (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/
-#define IXGBE_TXRDPTR(_i) (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/
-#define IXGBE_TXRDWRPTR(_i) (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/
-#define IXGBE_PCIEECCCTL0 0x11100
-#define IXGBE_PCIEECCCTL1 0x11104
-#define IXGBE_RXDBUECC 0x03F70
-#define IXGBE_TXDBUECC 0x0CF70
-#define IXGBE_RXDBUEST 0x03F74
-#define IXGBE_TXDBUEST 0x0CF74
-#define IXGBE_PBTXECC 0x0C300
-#define IXGBE_PBRXECC 0x03300
-#define IXGBE_GHECCR 0x110B0
-
-/* MAC Registers */
-#define IXGBE_PCS1GCFIG 0x04200
-#define IXGBE_PCS1GLCTL 0x04208
-#define IXGBE_PCS1GLSTA 0x0420C
-#define IXGBE_PCS1GDBG0 0x04210
-#define IXGBE_PCS1GDBG1 0x04214
-#define IXGBE_PCS1GANA 0x04218
-#define IXGBE_PCS1GANLP 0x0421C
-#define IXGBE_PCS1GANNP 0x04220
-#define IXGBE_PCS1GANLPNP 0x04224
-#define IXGBE_HLREG0 0x04240
-#define IXGBE_HLREG1 0x04244
-#define IXGBE_PAP 0x04248
-#define IXGBE_MACA 0x0424C
-#define IXGBE_APAE 0x04250
-#define IXGBE_ARD 0x04254
-#define IXGBE_AIS 0x04258
-#define IXGBE_MSCA 0x0425C
-#define IXGBE_MSRWD 0x04260
-#define IXGBE_MLADD 0x04264
-#define IXGBE_MHADD 0x04268
-#define IXGBE_MAXFRS 0x04268
-#define IXGBE_TREG 0x0426C
-#define IXGBE_PCSS1 0x04288
-#define IXGBE_PCSS2 0x0428C
-#define IXGBE_XPCSS 0x04290
-#define IXGBE_MFLCN 0x04294
-#define IXGBE_SERDESC 0x04298
-#define IXGBE_MACS 0x0429C
-#define IXGBE_AUTOC 0x042A0
-#define IXGBE_LINKS 0x042A4
-#define IXGBE_LINKS2 0x04324
-#define IXGBE_AUTOC2 0x042A8
-#define IXGBE_AUTOC3 0x042AC
-#define IXGBE_ANLP1 0x042B0
-#define IXGBE_ANLP2 0x042B4
-#define IXGBE_MACC 0x04330
-#define IXGBE_ATLASCTL 0x04800
-#define IXGBE_MMNGC 0x042D0
-#define IXGBE_ANLPNP1 0x042D4
-#define IXGBE_ANLPNP2 0x042D8
-#define IXGBE_KRPCSFC 0x042E0
-#define IXGBE_KRPCSS 0x042E4
-#define IXGBE_FECS1 0x042E8
-#define IXGBE_FECS2 0x042EC
-#define IXGBE_SMADARCTL 0x14F10
-#define IXGBE_MPVC 0x04318
-#define IXGBE_SGMIIC 0x04314
-
-/* Statistics Registers */
-#define IXGBE_RXNFGPC 0x041B0
-#define IXGBE_RXNFGBCL 0x041B4
-#define IXGBE_RXNFGBCH 0x041B8
-#define IXGBE_RXDGPC 0x02F50
-#define IXGBE_RXDGBCL 0x02F54
-#define IXGBE_RXDGBCH 0x02F58
-#define IXGBE_RXDDGPC 0x02F5C
-#define IXGBE_RXDDGBCL 0x02F60
-#define IXGBE_RXDDGBCH 0x02F64
-#define IXGBE_RXLPBKGPC 0x02F68
-#define IXGBE_RXLPBKGBCL 0x02F6C
-#define IXGBE_RXLPBKGBCH 0x02F70
-#define IXGBE_RXDLPBKGPC 0x02F74
-#define IXGBE_RXDLPBKGBCL 0x02F78
-#define IXGBE_RXDLPBKGBCH 0x02F7C
-#define IXGBE_TXDGPC 0x087A0
-#define IXGBE_TXDGBCL 0x087A4
-#define IXGBE_TXDGBCH 0x087A8
-
-#define IXGBE_RXDSTATCTRL 0x02F40
-
-/* Copper Pond 2 link timeout */
-#define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50
-
-/* Omer CORECTL */
-#define IXGBE_CORECTL 0x014F00
-/* BARCTRL */
-#define IXGBE_BARCTRL 0x110F4
-#define IXGBE_BARCTRL_FLSIZE 0x0700
-#define IXGBE_BARCTRL_FLSIZE_SHIFT 8
-#define IXGBE_BARCTRL_CSRSIZE 0x2000
-
-/* RSCCTL Bit Masks */
-#define IXGBE_RSCCTL_RSCEN 0x01
-#define IXGBE_RSCCTL_MAXDESC_1 0x00
-#define IXGBE_RSCCTL_MAXDESC_4 0x04
-#define IXGBE_RSCCTL_MAXDESC_8 0x08
-#define IXGBE_RSCCTL_MAXDESC_16 0x0C
-
-/* RSCDBU Bit Masks */
-#define IXGBE_RSCDBU_RSCSMALDIS_MASK 0x0000007F
-#define IXGBE_RSCDBU_RSCACKDIS 0x00000080
-
-/* RDRXCTL Bit Masks */
-#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min THLD Size */
-#define IXGBE_RDRXCTL_CRCSTRIP 0x00000002 /* CRC Strip */
-#define IXGBE_RDRXCTL_MVMEN 0x00000020
-#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */
-#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */
-#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */
-#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disabl RSC compl on LLI */
-#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC ena */
-#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC ena */
-
-/* RQTC Bit Masks and Shifts */
-#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4)
-#define IXGBE_RQTC_TC0_MASK (0x7 << 0)
-#define IXGBE_RQTC_TC1_MASK (0x7 << 4)
-#define IXGBE_RQTC_TC2_MASK (0x7 << 8)
-#define IXGBE_RQTC_TC3_MASK (0x7 << 12)
-#define IXGBE_RQTC_TC4_MASK (0x7 << 16)
-#define IXGBE_RQTC_TC5_MASK (0x7 << 20)
-#define IXGBE_RQTC_TC6_MASK (0x7 << 24)
-#define IXGBE_RQTC_TC7_MASK (0x7 << 28)
-
-/* PSRTYPE.RQPL Bit masks and shift */
-#define IXGBE_PSRTYPE_RQPL_MASK 0x7
-#define IXGBE_PSRTYPE_RQPL_SHIFT 29
-
-/* CTRL Bit Masks */
-#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */
-#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */
-#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
-#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST)
-
-/* FACTPS */
-#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */
-
-/* MHADD Bit Masks */
-#define IXGBE_MHADD_MFS_MASK 0xFFFF0000
-#define IXGBE_MHADD_MFS_SHIFT 16
-
-/* Extended Device Control */
-#define IXGBE_CTRL_EXT_PFRSTD 0x00004000 /* Physical Function Reset Done */
-#define IXGBE_CTRL_EXT_NS_DIS 0x00010000 /* No Snoop disable */
-#define IXGBE_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
-#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
-
-/* Direct Cache Access (DCA) definitions */
-#define IXGBE_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */
-#define IXGBE_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */
-
-#define IXGBE_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */
-#define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
-
-#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
-#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */
-#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */
-#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */
-#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */
-#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */
-#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */
-#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */
-#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */
-
-#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
-#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */
-#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */
-#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
-#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
-#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */
-#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
-#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */
-
-/* MSCA Bit Masks */
-#define IXGBE_MSCA_NP_ADDR_MASK 0x0000FFFF /* MDI Addr (new prot) */
-#define IXGBE_MSCA_NP_ADDR_SHIFT 0
-#define IXGBE_MSCA_DEV_TYPE_MASK 0x001F0000 /* Dev Type (new prot) */
-#define IXGBE_MSCA_DEV_TYPE_SHIFT 16 /* Register Address (old prot */
-#define IXGBE_MSCA_PHY_ADDR_MASK 0x03E00000 /* PHY Address mask */
-#define IXGBE_MSCA_PHY_ADDR_SHIFT 21 /* PHY Address shift*/
-#define IXGBE_MSCA_OP_CODE_MASK 0x0C000000 /* OP CODE mask */
-#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */
-#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */
-#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (wr) */
-#define IXGBE_MSCA_READ 0x0C000000 /* OP CODE 11 (rd) */
-#define IXGBE_MSCA_READ_AUTOINC 0x08000000 /* OP CODE 10 (rd auto inc)*/
-#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */
-#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */
-#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new prot) */
-#define IXGBE_MSCA_OLD_PROTOCOL 0x10000000 /* ST CODE 01 (old prot) */
-#define IXGBE_MSCA_MDI_COMMAND 0x40000000 /* Initiate MDI command */
-#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress ena */
-
-/* MSRWD bit masks */
-#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF
-#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0
-#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000
-#define IXGBE_MSRWD_READ_DATA_SHIFT 16
-
-/* Atlas registers */
-#define IXGBE_ATLAS_PDN_LPBK 0x24
-#define IXGBE_ATLAS_PDN_10G 0xB
-#define IXGBE_ATLAS_PDN_1G 0xC
-#define IXGBE_ATLAS_PDN_AN 0xD
-
-/* Atlas bit masks */
-#define IXGBE_ATLASCTL_WRITE_CMD 0x00010000
-#define IXGBE_ATLAS_PDN_TX_REG_EN 0x10
-#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL 0xF0
-#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0
-#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0
-
-/* Omer bit masks */
-#define IXGBE_CORECTL_WRITE_CMD 0x00010000
-
-/* Device Type definitions for new protocol MDIO commands */
-#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1
-#define IXGBE_MDIO_PCS_DEV_TYPE 0x3
-#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4
-#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7
-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */
-#define IXGBE_TWINAX_DEV 1
-
-#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */
-
-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Ctrl Reg */
-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */
-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */
-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0-10G, 1-1G */
-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018
-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010
-
-#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */
-#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */
-#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */
-#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */
-#define IXGBE_MDIO_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */
-#define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */
-#define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/
-#define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/
-#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */
-#define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */
-#define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */
-#define IXGBE_MDIO_PHY_SPEED_100M 0x0020 /* 100M capable */
-#define IXGBE_MDIO_PHY_EXT_ABILITY 0xB /* Ext Ability Reg */
-#define IXGBE_MDIO_PHY_10GBASET_ABILITY 0x0004 /* 10GBaseT capable */
-#define IXGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */
-#define IXGBE_MDIO_PHY_100BASETX_ABILITY 0x0080 /* 100BaseTX capable */
-#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */
-
-#define IXGBE_MDIO_PMA_PMD_CONTROL_ADDR 0x0000 /* PMA/PMD Control Reg */
-#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */
-#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
-#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */
-
-/* MII clause 22/28 definitions */
-#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800
-
-#define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG 0x20 /* 10G Control Reg */
-#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */
-#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */
-#define IXGBE_MII_AUTONEG_ADVERTISE_REG 0x10 /* 100M Advertisement */
-#define IXGBE_MII_10GBASE_T_ADVERTISE 0x1000 /* full duplex, bit:12*/
-#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/
-#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/
-#define IXGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */
-#define IXGBE_MII_100BASE_T_ADVERTISE_HALF 0x0080 /* half duplex, bit:7 */
-#define IXGBE_MII_RESTART 0x200
-#define IXGBE_MII_AUTONEG_COMPLETE 0x20
-#define IXGBE_MII_AUTONEG_LINK_UP 0x04
-#define IXGBE_MII_AUTONEG_REG 0x0
-
-#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0
-#define IXGBE_MAX_PHY_ADDR 32
-
-/* PHY IDs*/
-#define TN1010_PHY_ID 0x00A19410
-#define TNX_FW_REV 0xB
-#define X540_PHY_ID 0x01540200
-#define AQ_FW_REV 0x20
-#define QT2022_PHY_ID 0x0043A400
-#define ATH_PHY_ID 0x03429050
-
-/* PHY Types */
-#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
-
-/* Special PHY Init Routine */
-#define IXGBE_PHY_INIT_OFFSET_NL 0x002B
-#define IXGBE_PHY_INIT_END_NL 0xFFFF
-#define IXGBE_CONTROL_MASK_NL 0xF000
-#define IXGBE_DATA_MASK_NL 0x0FFF
-#define IXGBE_CONTROL_SHIFT_NL 12
-#define IXGBE_DELAY_NL 0
-#define IXGBE_DATA_NL 1
-#define IXGBE_CONTROL_NL 0x000F
-#define IXGBE_CONTROL_EOL_NL 0x0FFF
-#define IXGBE_CONTROL_SOL_NL 0x0000
-
-/* General purpose Interrupt Enable */
-#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */
-#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */
-#define IXGBE_SDP2_GPIEN 0x00000004 /* SDP2 */
-#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */
-#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */
-#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */
-#define IXGBE_GPIE_EIAME 0x40000000
-#define IXGBE_GPIE_PBA_SUPPORT 0x80000000
-#define IXGBE_GPIE_RSC_DELAY_SHIFT 11
-#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */
-#define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */
-#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */
-#define IXGBE_GPIE_VTMODE_64 0x0000C000 /* 64 VFs 2 queues per VF */
-
-/* Packet Buffer Initialization */
-#define IXGBE_MAX_PACKET_BUFFERS 8
-
-#define IXGBE_TXPBSIZE_20KB 0x00005000 /* 20KB Packet Buffer */
-#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */
-#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */
-#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */
-#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */
-#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */
-#define IXGBE_RXPBSIZE_MAX 0x00080000 /* 512KB Packet Buffer */
-#define IXGBE_TXPBSIZE_MAX 0x00028000 /* 160KB Packet Buffer */
-
-#define IXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */
-#define IXGBE_MAX_PB 8
-
-/* Packet buffer allocation strategies */
-enum {
- PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */
-#define PBA_STRATEGY_EQUAL PBA_STRATEGY_EQUAL
- PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */
-#define PBA_STRATEGY_WEIGHTED PBA_STRATEGY_WEIGHTED
-};
-
-/* Transmit Flow Control status */
-#define IXGBE_TFCS_TXOFF 0x00000001
-#define IXGBE_TFCS_TXOFF0 0x00000100
-#define IXGBE_TFCS_TXOFF1 0x00000200
-#define IXGBE_TFCS_TXOFF2 0x00000400
-#define IXGBE_TFCS_TXOFF3 0x00000800
-#define IXGBE_TFCS_TXOFF4 0x00001000
-#define IXGBE_TFCS_TXOFF5 0x00002000
-#define IXGBE_TFCS_TXOFF6 0x00004000
-#define IXGBE_TFCS_TXOFF7 0x00008000
-
-/* TCP Timer */
-#define IXGBE_TCPTIMER_KS 0x00000100
-#define IXGBE_TCPTIMER_COUNT_ENABLE 0x00000200
-#define IXGBE_TCPTIMER_COUNT_FINISH 0x00000400
-#define IXGBE_TCPTIMER_LOOP 0x00000800
-#define IXGBE_TCPTIMER_DURATION_MASK 0x000000FF
-
-/* HLREG0 Bit Masks */
-#define IXGBE_HLREG0_TXCRCEN 0x00000001 /* bit 0 */
-#define IXGBE_HLREG0_RXCRCSTRP 0x00000002 /* bit 1 */
-#define IXGBE_HLREG0_JUMBOEN 0x00000004 /* bit 2 */
-#define IXGBE_HLREG0_TXPADEN 0x00000400 /* bit 10 */
-#define IXGBE_HLREG0_TXPAUSEEN 0x00001000 /* bit 12 */
-#define IXGBE_HLREG0_RXPAUSEEN 0x00004000 /* bit 14 */
-#define IXGBE_HLREG0_LPBK 0x00008000 /* bit 15 */
-#define IXGBE_HLREG0_MDCSPD 0x00010000 /* bit 16 */
-#define IXGBE_HLREG0_CONTMDC 0x00020000 /* bit 17 */
-#define IXGBE_HLREG0_CTRLFLTR 0x00040000 /* bit 18 */
-#define IXGBE_HLREG0_PREPEND 0x00F00000 /* bits 20-23 */
-#define IXGBE_HLREG0_PRIPAUSEEN 0x01000000 /* bit 24 */
-#define IXGBE_HLREG0_RXPAUSERECDA 0x06000000 /* bits 25-26 */
-#define IXGBE_HLREG0_RXLNGTHERREN 0x08000000 /* bit 27 */
-#define IXGBE_HLREG0_RXPADSTRIPEN 0x10000000 /* bit 28 */
-
-/* VMD_CTL bitmasks */
-#define IXGBE_VMD_CTL_VMDQ_EN 0x00000001
-#define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002
-
-/* VT_CTL bitmasks */
-#define IXGBE_VT_CTL_DIS_DEFPL 0x20000000 /* disable default pool */
-#define IXGBE_VT_CTL_REPLEN 0x40000000 /* replication enabled */
-#define IXGBE_VT_CTL_VT_ENABLE 0x00000001 /* Enable VT Mode */
-#define IXGBE_VT_CTL_POOL_SHIFT 7
-#define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT)
-
-/* VMOLR bitmasks */
-#define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */
-#define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */
-#define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */
-#define IXGBE_VMOLR_BAM 0x08000000 /* accept broadcast packets */
-#define IXGBE_VMOLR_MPE 0x10000000 /* multicast promiscuous */
-
-/* VFRE bitmask */
-#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF
-
-#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
-
-/* RDHMPN and TDHMPN bitmasks */
-#define IXGBE_RDHMPN_RDICADDR 0x007FF800
-#define IXGBE_RDHMPN_RDICRDREQ 0x00800000
-#define IXGBE_RDHMPN_RDICADDR_SHIFT 11
-#define IXGBE_TDHMPN_TDICADDR 0x003FF800
-#define IXGBE_TDHMPN_TDICRDREQ 0x00800000
-#define IXGBE_TDHMPN_TDICADDR_SHIFT 11
-
-#define IXGBE_RDMAM_MEM_SEL_SHIFT 13
-#define IXGBE_RDMAM_DWORD_SHIFT 9
-#define IXGBE_RDMAM_DESC_COMP_FIFO 1
-#define IXGBE_RDMAM_DFC_CMD_FIFO 2
-#define IXGBE_RDMAM_RSC_HEADER_ADDR 3
-#define IXGBE_RDMAM_TCN_STATUS_RAM 4
-#define IXGBE_RDMAM_WB_COLL_FIFO 5
-#define IXGBE_RDMAM_QSC_CNT_RAM 6
-#define IXGBE_RDMAM_QSC_FCOE_RAM 7
-#define IXGBE_RDMAM_QSC_QUEUE_CNT 8
-#define IXGBE_RDMAM_QSC_QUEUE_RAM 0xA
-#define IXGBE_RDMAM_QSC_RSC_RAM 0xB
-#define IXGBE_RDMAM_DESC_COM_FIFO_RANGE 135
-#define IXGBE_RDMAM_DESC_COM_FIFO_COUNT 4
-#define IXGBE_RDMAM_DFC_CMD_FIFO_RANGE 48
-#define IXGBE_RDMAM_DFC_CMD_FIFO_COUNT 7
-#define IXGBE_RDMAM_RSC_HEADER_ADDR_RANGE 32
-#define IXGBE_RDMAM_RSC_HEADER_ADDR_COUNT 4
-#define IXGBE_RDMAM_TCN_STATUS_RAM_RANGE 256
-#define IXGBE_RDMAM_TCN_STATUS_RAM_COUNT 9
-#define IXGBE_RDMAM_WB_COLL_FIFO_RANGE 8
-#define IXGBE_RDMAM_WB_COLL_FIFO_COUNT 4
-#define IXGBE_RDMAM_QSC_CNT_RAM_RANGE 64
-#define IXGBE_RDMAM_QSC_CNT_RAM_COUNT 4
-#define IXGBE_RDMAM_QSC_FCOE_RAM_RANGE 512
-#define IXGBE_RDMAM_QSC_FCOE_RAM_COUNT 5
-#define IXGBE_RDMAM_QSC_QUEUE_CNT_RANGE 32
-#define IXGBE_RDMAM_QSC_QUEUE_CNT_COUNT 4
-#define IXGBE_RDMAM_QSC_QUEUE_RAM_RANGE 128
-#define IXGBE_RDMAM_QSC_QUEUE_RAM_COUNT 8
-#define IXGBE_RDMAM_QSC_RSC_RAM_RANGE 32
-#define IXGBE_RDMAM_QSC_RSC_RAM_COUNT 8
-
-#define IXGBE_TXDESCIC_READY 0x80000000
-
-/* Receive Checksum Control */
-#define IXGBE_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
-#define IXGBE_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
-
-/* FCRTL Bit Masks */
-#define IXGBE_FCRTL_XONE 0x80000000 /* XON enable */
-#define IXGBE_FCRTH_FCEN 0x80000000 /* Packet buffer fc enable */
-
-/* PAP bit masks*/
-#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */
-
-/* RMCS Bit Masks */
-#define IXGBE_RMCS_RRM 0x00000002 /* Rx Recycle Mode enable */
-/* Receive Arbitration Control: 0 Round Robin, 1 DFP */
-#define IXGBE_RMCS_RAC 0x00000004
-/* Deficit Fixed Prio ena */
-#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC
-#define IXGBE_RMCS_TFCE_802_3X 0x00000008 /* Tx Priority FC ena */
-#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority FC ena */
-#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */
-
-/* FCCFG Bit Masks */
-#define IXGBE_FCCFG_TFCE_802_3X 0x00000008 /* Tx link FC enable */
-#define IXGBE_FCCFG_TFCE_PRIORITY 0x00000010 /* Tx priority FC enable */
-
-/* Interrupt register bitmasks */
-
-/* Extended Interrupt Cause Read */
-#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */
-#define IXGBE_EICR_FLOW_DIR 0x00010000 /* FDir Exception */
-#define IXGBE_EICR_RX_MISS 0x00020000 /* Packet Buffer Overrun */
-#define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */
-#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
-#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */
-#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */
-#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
-#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */
-#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */
-#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */
-#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */
-#define IXGBE_EICR_ECC 0x10000000 /* ECC Error */
-#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */
-#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */
-#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */
-#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
-
-/* Extended Interrupt Cause Set */
-#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
-#define IXGBE_EICS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */
-#define IXGBE_EICS_RX_MISS IXGBE_EICR_RX_MISS /* Pkt Buffer Overrun */
-#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */
-#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
-#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
-#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
-#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
-#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
-#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
-#define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */
-#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
-#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */
-#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
-#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
-
-/* Extended Interrupt Mask Set */
-#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
-#define IXGBE_EIMS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */
-#define IXGBE_EIMS_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */
-#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */
-#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
-#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */
-#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
-#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermal Sensor Event */
-#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
-#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
-#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
-#define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */
-#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
-#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */
-#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
-#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
-
-/* Extended Interrupt Mask Clear */
-#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
-#define IXGBE_EIMC_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */
-#define IXGBE_EIMC_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */
-#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */
-#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
-#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
-#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
-#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
-#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
-#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
-#define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */
-#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
-#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */
-#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
-#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
-
-#define IXGBE_EIMS_ENABLE_MASK ( \
- IXGBE_EIMS_RTX_QUEUE | \
- IXGBE_EIMS_LSC | \
- IXGBE_EIMS_TCP_TIMER | \
- IXGBE_EIMS_OTHER)
-
-/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
-#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */
-#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */
-#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
-#define IXGBE_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */
-#define IXGBE_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */
-#define IXGBE_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */
-#define IXGBE_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */
-#define IXGBE_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */
-#define IXGBE_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */
-#define IXGBE_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of control bits */
-#define IXGBE_IMIR_SIZE_BP_82599 0x00001000 /* Packet size bypass */
-#define IXGBE_IMIR_CTRL_URG_82599 0x00002000 /* Check URG bit in header */
-#define IXGBE_IMIR_CTRL_ACK_82599 0x00004000 /* Check ACK bit in header */
-#define IXGBE_IMIR_CTRL_PSH_82599 0x00008000 /* Check PSH bit in header */
-#define IXGBE_IMIR_CTRL_RST_82599 0x00010000 /* Check RST bit in header */
-#define IXGBE_IMIR_CTRL_SYN_82599 0x00020000 /* Check SYN bit in header */
-#define IXGBE_IMIR_CTRL_FIN_82599 0x00040000 /* Check FIN bit in header */
-#define IXGBE_IMIR_CTRL_BP_82599 0x00080000 /* Bypass chk of ctrl bits */
-#define IXGBE_IMIR_LLI_EN_82599 0x00100000 /* Enables low latency Int */
-#define IXGBE_IMIR_RX_QUEUE_MASK_82599 0x0000007F /* Rx Queue Mask */
-#define IXGBE_IMIR_RX_QUEUE_SHIFT_82599 21 /* Rx Queue Shift */
-#define IXGBE_IMIRVP_PRIORITY_MASK 0x00000007 /* VLAN priority mask */
-#define IXGBE_IMIRVP_PRIORITY_EN 0x00000008 /* VLAN priority enable */
-
-#define IXGBE_MAX_FTQF_FILTERS 128
-#define IXGBE_FTQF_PROTOCOL_MASK 0x00000003
-#define IXGBE_FTQF_PROTOCOL_TCP 0x00000000
-#define IXGBE_FTQF_PROTOCOL_UDP 0x00000001
-#define IXGBE_FTQF_PROTOCOL_SCTP 2
-#define IXGBE_FTQF_PRIORITY_MASK 0x00000007
-#define IXGBE_FTQF_PRIORITY_SHIFT 2
-#define IXGBE_FTQF_POOL_MASK 0x0000003F
-#define IXGBE_FTQF_POOL_SHIFT 8
-#define IXGBE_FTQF_5TUPLE_MASK_MASK 0x0000001F
-#define IXGBE_FTQF_5TUPLE_MASK_SHIFT 25
-#define IXGBE_FTQF_SOURCE_ADDR_MASK 0x1E
-#define IXGBE_FTQF_DEST_ADDR_MASK 0x1D
-#define IXGBE_FTQF_SOURCE_PORT_MASK 0x1B
-#define IXGBE_FTQF_DEST_PORT_MASK 0x17
-#define IXGBE_FTQF_PROTOCOL_COMP_MASK 0x0F
-#define IXGBE_FTQF_POOL_MASK_EN 0x40000000
-#define IXGBE_FTQF_QUEUE_ENABLE 0x80000000
-
-/* Interrupt clear mask */
-#define IXGBE_IRQ_CLEAR_MASK 0xFFFFFFFF
-
-/* Interrupt Vector Allocation Registers */
-#define IXGBE_IVAR_REG_NUM 25
-#define IXGBE_IVAR_REG_NUM_82599 64
-#define IXGBE_IVAR_TXRX_ENTRY 96
-#define IXGBE_IVAR_RX_ENTRY 64
-#define IXGBE_IVAR_RX_QUEUE(_i) (0 + (_i))
-#define IXGBE_IVAR_TX_QUEUE(_i) (64 + (_i))
-#define IXGBE_IVAR_TX_ENTRY 32
-
-#define IXGBE_IVAR_TCP_TIMER_INDEX 96 /* 0 based index */
-#define IXGBE_IVAR_OTHER_CAUSES_INDEX 97 /* 0 based index */
-
-#define IXGBE_MSIX_VECTOR(_i) (0 + (_i))
-
-#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */
-
-/* ETYPE Queue Filter/Select Bit Masks */
-#define IXGBE_MAX_ETQF_FILTERS 8
-#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */
-#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */
-#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */
-#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */
-#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */
-
-#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */
-#define IXGBE_ETQS_RX_QUEUE_SHIFT 16
-#define IXGBE_ETQS_LLI 0x20000000 /* bit 29 */
-#define IXGBE_ETQS_QUEUE_EN 0x80000000 /* bit 31 */
-
-/*
- * ETQF filter list: one static filter per filter consumer. This is
- * to avoid filter collisions later. Add new filters
- * here!!
- *
- * Current filters:
- * EAPOL 802.1x (0x888e): Filter 0
- * FCoE (0x8906): Filter 2
- * 1588 (0x88f7): Filter 3
- * FIP (0x8914): Filter 4
- */
-#define IXGBE_ETQF_FILTER_EAPOL 0
-#define IXGBE_ETQF_FILTER_FCOE 2
-#define IXGBE_ETQF_FILTER_1588 3
-#define IXGBE_ETQF_FILTER_FIP 4
-/* VLAN Control Bit Masks */
-#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */
-#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */
-#define IXGBE_VLNCTRL_CFIEN 0x20000000 /* bit 29 */
-#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */
-#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */
-
-/* VLAN pool filtering masks */
-#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */
-#define IXGBE_VLVF_ENTRIES 64
-#define IXGBE_VLVF_VLANID_MASK 0x00000FFF
-/* Per VF Port VLAN insertion rules */
-#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
-#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
-
-#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
-
-/* STATUS Bit Masks */
-#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */
-#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/
-#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Ena Status */
-
-#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */
-#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */
-
-/* ESDP Bit Masks */
-#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */
-#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */
-#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */
-#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */
-#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */
-#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */
-#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */
-#define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */
-#define IXGBE_ESDP_SDP1_DIR 0x00000200 /* SDP1 IO direction */
-#define IXGBE_ESDP_SDP4_DIR 0x00001000 /* SDP4 IO direction */
-#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */
-#define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 IO mode */
-#define IXGBE_ESDP_SDP1_NATIVE 0x00020000 /* SDP1 IO mode */
-
-
-/* LEDCTL Bit Masks */
-#define IXGBE_LED_IVRT_BASE 0x00000040
-#define IXGBE_LED_BLINK_BASE 0x00000080
-#define IXGBE_LED_MODE_MASK_BASE 0x0000000F
-#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i)))
-#define IXGBE_LED_MODE_SHIFT(_i) (8*(_i))
-#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i)
-#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i)
-#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i)
-
-/* LED modes */
-#define IXGBE_LED_LINK_UP 0x0
-#define IXGBE_LED_LINK_10G 0x1
-#define IXGBE_LED_MAC 0x2
-#define IXGBE_LED_FILTER 0x3
-#define IXGBE_LED_LINK_ACTIVE 0x4
-#define IXGBE_LED_LINK_1G 0x5
-#define IXGBE_LED_ON 0xE
-#define IXGBE_LED_OFF 0xF
-
-/* AUTOC Bit Masks */
-#define IXGBE_AUTOC_KX4_KX_SUPP_MASK 0xC0000000
-#define IXGBE_AUTOC_KX4_SUPP 0x80000000
-#define IXGBE_AUTOC_KX_SUPP 0x40000000
-#define IXGBE_AUTOC_PAUSE 0x30000000
-#define IXGBE_AUTOC_ASM_PAUSE 0x20000000
-#define IXGBE_AUTOC_SYM_PAUSE 0x10000000
-#define IXGBE_AUTOC_RF 0x08000000
-#define IXGBE_AUTOC_PD_TMR 0x06000000
-#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000
-#define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000
-#define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000
-#define IXGBE_AUTOC_FECA 0x00040000
-#define IXGBE_AUTOC_FECR 0x00020000
-#define IXGBE_AUTOC_KR_SUPP 0x00010000
-#define IXGBE_AUTOC_AN_RESTART 0x00001000
-#define IXGBE_AUTOC_FLU 0x00000001
-#define IXGBE_AUTOC_LMS_SHIFT 13
-#define IXGBE_AUTOC_LMS_10G_SERIAL (0x3 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_KX4_KX_KR (0x4 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_SGMII_1G_100M (0x5 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII (0x7 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-
-#define IXGBE_AUTOC_1G_PMA_PMD_MASK 0x00000200
-#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9
-#define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180
-#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7
-#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_SFI (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_KX_BX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-
-#define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000
-#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000
-#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16
-#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
-
-#define IXGBE_MACC_FLU 0x00000001
-#define IXGBE_MACC_FSV_10G 0x00030000
-#define IXGBE_MACC_FS 0x00040000
-#define IXGBE_MAC_RX2TX_LPBK 0x00000002
-
-/* LINKS Bit Masks */
-#define IXGBE_LINKS_KX_AN_COMP 0x80000000
-#define IXGBE_LINKS_UP 0x40000000
-#define IXGBE_LINKS_SPEED 0x20000000
-#define IXGBE_LINKS_MODE 0x18000000
-#define IXGBE_LINKS_RX_MODE 0x06000000
-#define IXGBE_LINKS_TX_MODE 0x01800000
-#define IXGBE_LINKS_XGXS_EN 0x00400000
-#define IXGBE_LINKS_SGMII_EN 0x02000000
-#define IXGBE_LINKS_PCS_1G_EN 0x00200000
-#define IXGBE_LINKS_1G_AN_EN 0x00100000
-#define IXGBE_LINKS_KX_AN_IDLE 0x00080000
-#define IXGBE_LINKS_1G_SYNC 0x00040000
-#define IXGBE_LINKS_10G_ALIGN 0x00020000
-#define IXGBE_LINKS_10G_LANE_SYNC 0x00017000
-#define IXGBE_LINKS_TL_FAULT 0x00001000
-#define IXGBE_LINKS_SIGNAL 0x00000F00
-
-#define IXGBE_LINKS_SPEED_82599 0x30000000
-#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
-#define IXGBE_LINKS_SPEED_1G_82599 0x20000000
-#define IXGBE_LINKS_SPEED_100_82599 0x10000000
-#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
-#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
-
-#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040
-
-/* PCS1GLSTA Bit Masks */
-#define IXGBE_PCS1GLSTA_LINK_OK 1
-#define IXGBE_PCS1GLSTA_SYNK_OK 0x10
-#define IXGBE_PCS1GLSTA_AN_COMPLETE 0x10000
-#define IXGBE_PCS1GLSTA_AN_PAGE_RX 0x20000
-#define IXGBE_PCS1GLSTA_AN_TIMED_OUT 0x40000
-#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000
-#define IXGBE_PCS1GLSTA_AN_ERROR_RWS 0x100000
-
-#define IXGBE_PCS1GANA_SYM_PAUSE 0x80
-#define IXGBE_PCS1GANA_ASM_PAUSE 0x100
-
-/* PCS1GLCTL Bit Masks */
-#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN 0x00040000 /* PCS 1G autoneg to en */
-#define IXGBE_PCS1GLCTL_FLV_LINK_UP 1
-#define IXGBE_PCS1GLCTL_FORCE_LINK 0x20
-#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH 0x40
-#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000
-#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000
-
-/* ANLP1 Bit Masks */
-#define IXGBE_ANLP1_PAUSE 0x0C00
-#define IXGBE_ANLP1_SYM_PAUSE 0x0400
-#define IXGBE_ANLP1_ASM_PAUSE 0x0800
-#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000
-
-/* SW Semaphore Register bitmasks */
-#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
-#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
-#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */
-#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */
-
-/* SW_FW_SYNC/GSSR definitions */
-#define IXGBE_GSSR_EEP_SM 0x0001
-#define IXGBE_GSSR_PHY0_SM 0x0002
-#define IXGBE_GSSR_PHY1_SM 0x0004
-#define IXGBE_GSSR_MAC_CSR_SM 0x0008
-#define IXGBE_GSSR_FLASH_SM 0x0010
-#define IXGBE_GSSR_SW_MNG_SM 0x0400
-
-/* FW Status register bitmask */
-#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */
-
-/* EEC Register */
-#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */
-#define IXGBE_EEC_CS 0x00000002 /* EEPROM Chip Select */
-#define IXGBE_EEC_DI 0x00000004 /* EEPROM Data In */
-#define IXGBE_EEC_DO 0x00000008 /* EEPROM Data Out */
-#define IXGBE_EEC_FWE_MASK 0x00000030 /* FLASH Write Enable */
-#define IXGBE_EEC_FWE_DIS 0x00000010 /* Disable FLASH writes */
-#define IXGBE_EEC_FWE_EN 0x00000020 /* Enable FLASH writes */
-#define IXGBE_EEC_FWE_SHIFT 4
-#define IXGBE_EEC_REQ 0x00000040 /* EEPROM Access Request */
-#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */
-#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */
-#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */
-#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */
-#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */
-#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */
-/* EEPROM Addressing bits based on type (0-small, 1-large) */
-#define IXGBE_EEC_ADDR_SIZE 0x00000400
-#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */
-#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD alows 14 bits for addr. */
-
-#define IXGBE_EEC_SIZE_SHIFT 11
-#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6
-#define IXGBE_EEPROM_OPCODE_BITS 8
-
-/* Part Number String Length */
-#define IXGBE_PBANUM_LENGTH 11
-
-/* Checksum and EEPROM pointers */
-#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
-#define IXGBE_EEPROM_CHECKSUM 0x3F
-#define IXGBE_EEPROM_SUM 0xBABA
-#define IXGBE_PCIE_ANALOG_PTR 0x03
-#define IXGBE_ATLAS0_CONFIG_PTR 0x04
-#define IXGBE_PHY_PTR 0x04
-#define IXGBE_ATLAS1_CONFIG_PTR 0x05
-#define IXGBE_OPTION_ROM_PTR 0x05
-#define IXGBE_PCIE_GENERAL_PTR 0x06
-#define IXGBE_PCIE_CONFIG0_PTR 0x07
-#define IXGBE_PCIE_CONFIG1_PTR 0x08
-#define IXGBE_CORE0_PTR 0x09
-#define IXGBE_CORE1_PTR 0x0A
-#define IXGBE_MAC0_PTR 0x0B
-#define IXGBE_MAC1_PTR 0x0C
-#define IXGBE_CSR0_CONFIG_PTR 0x0D
-#define IXGBE_CSR1_CONFIG_PTR 0x0E
-#define IXGBE_FW_PTR 0x0F
-#define IXGBE_PBANUM0_PTR 0x15
-#define IXGBE_PBANUM1_PTR 0x16
-#define IXGBE_ALT_MAC_ADDR_PTR 0x37
-#define IXGBE_FREE_SPACE_PTR 0X3E
-
-/* External Thermal Sensor Config */
-#define IXGBE_ETS_CFG 0x26
-#define IXGBE_ETS_LTHRES_DELTA_MASK 0x07C0
-#define IXGBE_ETS_LTHRES_DELTA_SHIFT 6
-#define IXGBE_ETS_TYPE_MASK 0x0038
-#define IXGBE_ETS_TYPE_SHIFT 3
-#define IXGBE_ETS_TYPE_EMC 0x000
-#define IXGBE_ETS_NUM_SENSORS_MASK 0x0007
-#define IXGBE_ETS_DATA_LOC_MASK 0x3C00
-#define IXGBE_ETS_DATA_LOC_SHIFT 10
-#define IXGBE_ETS_DATA_INDEX_MASK 0x0300
-#define IXGBE_ETS_DATA_INDEX_SHIFT 8
-#define IXGBE_ETS_DATA_HTHRESH_MASK 0x00FF
-
-#define IXGBE_SAN_MAC_ADDR_PTR 0x28
-#define IXGBE_DEVICE_CAPS 0x2C
-#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11
-#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
-#define IXGBE_MAX_MSIX_VECTORS_82599 0x40
-#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
-#define IXGBE_MAX_MSIX_VECTORS_82598 0x13
-
-/* MSI-X capability fields masks */
-#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF
-
-/* Legacy EEPROM word offsets */
-#define IXGBE_ISCSI_BOOT_CAPS 0x0033
-#define IXGBE_ISCSI_SETUP_PORT_0 0x0030
-#define IXGBE_ISCSI_SETUP_PORT_1 0x0034
-
-/* EEPROM Commands - SPI */
-#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */
-#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01
-#define IXGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */
-#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */
-#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */
-#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */
-/* EEPROM reset Write Enable latch */
-#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04
-#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */
-#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */
-#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */
-#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */
-#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */
-
-/* EEPROM Read Register */
-#define IXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */
-#define IXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */
-#define IXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */
-#define IXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
-#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for wr complete */
-#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for rd complete */
-
-#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
-
-#define IXGBE_EEPROM_PAGE_SIZE_MAX 128
-#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* words rd in burst */
-#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* words wr in burst */
-
-#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS
-#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM attempts to gain grant */
-#endif
-
-#ifndef IXGBE_EERD_EEWR_ATTEMPTS
-/* Number of 5 microseconds we wait for EERD read and
- * EERW write to complete */
-#define IXGBE_EERD_EEWR_ATTEMPTS 100000
-#endif
-
-#ifndef IXGBE_FLUDONE_ATTEMPTS
-/* # attempts we wait for flush update to complete */
-#define IXGBE_FLUDONE_ATTEMPTS 20000
-#endif
-
-#define IXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */
-#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */
-#define IXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */
-#define IXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */
-
-#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0
-#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3
-#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1
-#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2
-#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2
-#define IXGBE_FW_LESM_STATE_1 0x1
-#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */
-#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4
-#define IXGBE_FW_PATCH_VERSION_4 0x7
-#define IXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */
-#define IXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */
-#define IXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */
-#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */
-#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */
-#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */
-#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt SAN MAC capability */
-#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt SAN MAC 0 offset */
-#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt SAN MAC 1 offset */
-#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt WWNN prefix offset */
-#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt WWPN prefix offset */
-#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt SAN MAC exists */
-#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt WWN base exists */
-
-#define IXGBE_DEVICE_CAPS_WOL_PORT0_1 0x4 /* WoL supported on ports 0 & 1 */
-#define IXGBE_DEVICE_CAPS_WOL_PORT0 0x8 /* WoL supported on port 0 */
-#define IXGBE_DEVICE_CAPS_WOL_MASK 0xC /* Mask for WoL capabilities */
-
-/* PCI Bus Info */
-#define IXGBE_PCI_DEVICE_STATUS 0xAA
-#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020
-#define IXGBE_PCI_LINK_STATUS 0xB2
-#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
-#define IXGBE_PCI_LINK_WIDTH 0x3F0
-#define IXGBE_PCI_LINK_WIDTH_1 0x10
-#define IXGBE_PCI_LINK_WIDTH_2 0x20
-#define IXGBE_PCI_LINK_WIDTH_4 0x40
-#define IXGBE_PCI_LINK_WIDTH_8 0x80
-#define IXGBE_PCI_LINK_SPEED 0xF
-#define IXGBE_PCI_LINK_SPEED_2500 0x1
-#define IXGBE_PCI_LINK_SPEED_5000 0x2
-#define IXGBE_PCI_LINK_SPEED_8000 0x3
-#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E
-#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
-#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
-
-/* Number of 100 microseconds we wait for PCI Express master disable */
-#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
-
-/* Check whether address is multicast. This is little-endian specific check.*/
-#define IXGBE_IS_MULTICAST(Address) \
- (bool)(((u8 *)(Address))[0] & ((u8)0x01))
-
-/* Check whether an address is broadcast. */
-#define IXGBE_IS_BROADCAST(Address) \
- ((((u8 *)(Address))[0] == ((u8)0xff)) && \
- (((u8 *)(Address))[1] == ((u8)0xff)))
-
-/* RAH */
-#define IXGBE_RAH_VIND_MASK 0x003C0000
-#define IXGBE_RAH_VIND_SHIFT 18
-#define IXGBE_RAH_AV 0x80000000
-#define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF
-
-/* Header split receive */
-#define IXGBE_RFCTL_ISCSI_DIS 0x00000001
-#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E
-#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1
-#define IXGBE_RFCTL_RSC_DIS 0x00000010
-#define IXGBE_RFCTL_NFSW_DIS 0x00000040
-#define IXGBE_RFCTL_NFSR_DIS 0x00000080
-#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300
-#define IXGBE_RFCTL_NFS_VER_SHIFT 8
-#define IXGBE_RFCTL_NFS_VER_2 0
-#define IXGBE_RFCTL_NFS_VER_3 1
-#define IXGBE_RFCTL_NFS_VER_4 2
-#define IXGBE_RFCTL_IPV6_DIS 0x00000400
-#define IXGBE_RFCTL_IPV6_XSUM_DIS 0x00000800
-#define IXGBE_RFCTL_IPFRSP_DIS 0x00004000
-#define IXGBE_RFCTL_IPV6_EX_DIS 0x00010000
-#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
-
-/* Transmit Config masks */
-#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */
-#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */
-#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */
-/* Enable short packet padding to 64 bytes */
-#define IXGBE_TX_PAD_ENABLE 0x00000400
-#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */
-/* This allows for 16K packets + 4k for vlan */
-#define IXGBE_MAX_FRAME_SZ 0x40040000
-
-#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */
-#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */
-
-/* Receive Config masks */
-#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
-#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Desc Monitor Bypass */
-#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Ena specific Rx Queue */
-#define IXGBE_RXDCTL_SWFLSH 0x04000000 /* Rx Desc wr-bk flushing */
-#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* X540 supported only */
-#define IXGBE_RXDCTL_RLPML_EN 0x00008000
-#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
-
-#define IXGBE_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
-#define IXGBE_TSYNCTXCTL_ENABLED 0x00000010 /* Tx timestamping enabled */
-
-#define IXGBE_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */
-#define IXGBE_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */
-#define IXGBE_TSYNCRXCTL_TYPE_L2_V2 0x00
-#define IXGBE_TSYNCRXCTL_TYPE_L4_V1 0x02
-#define IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
-#define IXGBE_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
-#define IXGBE_TSYNCRXCTL_ENABLED 0x00000010 /* Rx Timestamping enabled */
-
-#define IXGBE_RXMTRL_V1_CTRLT_MASK 0x000000FF
-#define IXGBE_RXMTRL_V1_SYNC_MSG 0x00
-#define IXGBE_RXMTRL_V1_DELAY_REQ_MSG 0x01
-#define IXGBE_RXMTRL_V1_FOLLOWUP_MSG 0x02
-#define IXGBE_RXMTRL_V1_DELAY_RESP_MSG 0x03
-#define IXGBE_RXMTRL_V1_MGMT_MSG 0x04
-
-#define IXGBE_RXMTRL_V2_MSGID_MASK 0x0000FF00
-#define IXGBE_RXMTRL_V2_SYNC_MSG 0x0000
-#define IXGBE_RXMTRL_V2_DELAY_REQ_MSG 0x0100
-#define IXGBE_RXMTRL_V2_PDELAY_REQ_MSG 0x0200
-#define IXGBE_RXMTRL_V2_PDELAY_RESP_MSG 0x0300
-#define IXGBE_RXMTRL_V2_FOLLOWUP_MSG 0x0800
-#define IXGBE_RXMTRL_V2_DELAY_RESP_MSG 0x0900
-#define IXGBE_RXMTRL_V2_PDELAY_FOLLOWUP_MSG 0x0A00
-#define IXGBE_RXMTRL_V2_ANNOUNCE_MSG 0x0B00
-#define IXGBE_RXMTRL_V2_SIGNALLING_MSG 0x0C00
-#define IXGBE_RXMTRL_V2_MGMT_MSG 0x0D00
-
-#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
-#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
-#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */
-#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */
-#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */
-#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */
-/* Receive Priority Flow Control Enable */
-#define IXGBE_FCTRL_RPFCE 0x00004000
-#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */
-#define IXGBE_MFLCN_PMCF 0x00000001 /* Pass MAC Control Frames */
-#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */
-#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */
-#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */
-#define IXGBE_MFLCN_RPFCE_MASK 0x00000FF4 /* Rx Priority FC bitmap mask */
-#define IXGBE_MFLCN_RPFCE_SHIFT 4 /* Rx Priority FC bitmap shift */
-
-/* Multiple Receive Queue Control */
-#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */
-#define IXGBE_MRQC_MRQE_MASK 0xF /* Bits 3:0 */
-#define IXGBE_MRQC_RT8TCEN 0x00000002 /* 8 TC no RSS */
-#define IXGBE_MRQC_RT4TCEN 0x00000003 /* 4 TC no RSS */
-#define IXGBE_MRQC_RTRSS8TCEN 0x00000004 /* 8 TC w/ RSS */
-#define IXGBE_MRQC_RTRSS4TCEN 0x00000005 /* 4 TC w/ RSS */
-#define IXGBE_MRQC_VMDQEN 0x00000008 /* VMDq2 64 pools no RSS */
-#define IXGBE_MRQC_VMDQRSS32EN 0x0000000A /* VMDq2 32 pools w/ RSS */
-#define IXGBE_MRQC_VMDQRSS64EN 0x0000000B /* VMDq2 64 pools w/ RSS */
-#define IXGBE_MRQC_VMDQRT8TCEN 0x0000000C /* VMDq2/RT 16 pool 8 TC */
-#define IXGBE_MRQC_VMDQRT4TCEN 0x0000000D /* VMDq2/RT 32 pool 4 TC */
-#define IXGBE_MRQC_RSS_FIELD_MASK 0xFFFF0000
-#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
-#define IXGBE_MRQC_RSS_FIELD_IPV4 0x00020000
-#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000
-#define IXGBE_MRQC_RSS_FIELD_IPV6_EX 0x00080000
-#define IXGBE_MRQC_RSS_FIELD_IPV6 0x00100000
-#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
-#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
-#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
-#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000
-#define IXGBE_MRQC_L3L4TXSWEN 0x00008000
-
-/* Queue Drop Enable */
-#define IXGBE_QDE_ENABLE 0x00000001
-#define IXGBE_QDE_IDX_MASK 0x00007F00
-#define IXGBE_QDE_IDX_SHIFT 8
-#define IXGBE_QDE_WRITE 0x00010000
-#define IXGBE_QDE_READ 0x00020000
-
-#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
-#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
-#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */
-#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
-#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */
-#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */
-#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */
-#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
-#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
-
-#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000
-#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000
-#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000
-#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED 0x18000000
-#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000
-/* Multiple Transmit Queue Command Register */
-#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */
-#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */
-#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */
-#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */
-#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */
-#define IXGBE_MTQC_4TC_4TQ 0x8 /* 4 TC if RT_ENA and VT_ENA */
-#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */
-
-/* Receive Descriptor bit definitions */
-#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */
-#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */
-#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */
-#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
-#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */
-#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004
-#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
-#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */
-#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
-#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */
-#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */
-#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */
-#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
-#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
-#define IXGBE_RXD_STAT_LLINT 0x800 /* Pkt caused Low Latency Interrupt */
-#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */
-#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */
-#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */
-#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
-#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */
-#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */
-#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */
-#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */
-#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */
-#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */
-#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
-#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */
-#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */
-#define IXGBE_RXDADV_ERR_RXE 0x20000000 /* Any MAC Error */
-#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCoEFe/IPE */
-#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */
-#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */
-#define IXGBE_RXDADV_ERR_FDIR_DROP 0x00200000 /* FDIR Drop error */
-#define IXGBE_RXDADV_ERR_FDIR_COLL 0x00400000 /* FDIR Collision error */
-#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */
-#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */
-#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
-#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */
-#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */
-#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */
-#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */
-#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */
-#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
-#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
-#define IXGBE_RXD_PRI_SHIFT 13
-#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */
-#define IXGBE_RXD_CFI_SHIFT 12
-
-#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */
-#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */
-#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */
-#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */
-#define IXGBE_RXDADV_STAT_MASK 0x000fffff /* Stat/NEXTP: bit 0-19 */
-#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */
-#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */
-#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */
-#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */
-#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
-#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */
-#define IXGBE_RXDADV_STAT_TS 0x00010000 /* IEEE1588 Time Stamp */
-
-/* PSRTYPE bit definitions */
-#define IXGBE_PSRTYPE_TCPHDR 0x00000010
-#define IXGBE_PSRTYPE_UDPHDR 0x00000020
-#define IXGBE_PSRTYPE_IPV4HDR 0x00000100
-#define IXGBE_PSRTYPE_IPV6HDR 0x00000200
-#define IXGBE_PSRTYPE_L2HDR 0x00001000
-
-/* SRRCTL bit definitions */
-#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
-#define IXGBE_SRRCTL_RDMTS_SHIFT 22
-#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000
-#define IXGBE_SRRCTL_DROP_EN 0x10000000
-#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F
-#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00
-#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000
-#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
-#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
-#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
-#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
-#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000
-
-#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000
-#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
-
-#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F
-#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0
-#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0
-#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0
-#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000
-#define IXGBE_RXDADV_RSCCNT_SHIFT 17
-#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5
-#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000
-#define IXGBE_RXDADV_SPH 0x8000
-
-/* RSS Hash results */
-#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000
-#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001
-#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002
-#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003
-#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004
-#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005
-#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
-#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007
-#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008
-#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
-
-/* RSS Packet Types as indicated in the receive descriptor. */
-#define IXGBE_RXDADV_PKTTYPE_NONE 0x00000000
-#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */
-#define IXGBE_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPv4 hdr + extensions */
-#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */
-#define IXGBE_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPv6 hdr + extensions */
-#define IXGBE_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */
-#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
-#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
-#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
-#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */
-#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */
-#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */
-#define IXGBE_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */
-#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */
-#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */
-
-/* Security Processing bit Indication */
-#define IXGBE_RXDADV_LNKSEC_STATUS_SECP 0x00020000
-#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000
-#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000
-#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000
-#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000
-
-/* Masks to determine if packets should be dropped due to frame errors */
-#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
- IXGBE_RXD_ERR_CE | \
- IXGBE_RXD_ERR_LE | \
- IXGBE_RXD_ERR_PE | \
- IXGBE_RXD_ERR_OSE | \
- IXGBE_RXD_ERR_USE)
-
-#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
- IXGBE_RXDADV_ERR_CE | \
- IXGBE_RXDADV_ERR_LE | \
- IXGBE_RXDADV_ERR_PE | \
- IXGBE_RXDADV_ERR_OSE | \
- IXGBE_RXDADV_ERR_USE)
-
-#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK_82599 IXGBE_RXDADV_ERR_RXE
-
-/* Multicast bit mask */
-#define IXGBE_MCSTCTRL_MFE 0x4
-
-/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
-#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
-#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8
-#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024
-
-/* Vlan-specific macros */
-#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */
-#define IXGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */
-#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */
-#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
-
-/* SR-IOV specific macros */
-#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4)
-#define IXGBE_MBVFICR(_i) (0x00710 + ((_i) * 4))
-#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600))
-#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4))
-/* Translated register #defines */
-#define IXGBE_PVFCTRL(P) (0x00300 + (4 * (P)))
-#define IXGBE_PVFSTATUS(P) (0x00008 + (0 * (P)))
-#define IXGBE_PVFLINKS(P) (0x042A4 + (0 * (P)))
-#define IXGBE_PVFRTIMER(P) (0x00048 + (0 * (P)))
-#define IXGBE_PVFMAILBOX(P) (0x04C00 + (4 * (P)))
-#define IXGBE_PVFRXMEMWRAP(P) (0x03190 + (0 * (P)))
-#define IXGBE_PVTEICR(P) (0x00B00 + (4 * (P)))
-#define IXGBE_PVTEICS(P) (0x00C00 + (4 * (P)))
-#define IXGBE_PVTEIMS(P) (0x00D00 + (4 * (P)))
-#define IXGBE_PVTEIMC(P) (0x00E00 + (4 * (P)))
-#define IXGBE_PVTEIAC(P) (0x00F00 + (4 * (P)))
-#define IXGBE_PVTEIAM(P) (0x04D00 + (4 * (P)))
-#define IXGBE_PVTEITR(P) (((P) < 24) ? (0x00820 + ((P) * 4)) : \
- (0x012300 + (((P) - 24) * 4)))
-#define IXGBE_PVTIVAR(P) (0x12500 + (4 * (P)))
-#define IXGBE_PVTIVAR_MISC(P) (0x04E00 + (4 * (P)))
-#define IXGBE_PVTRSCINT(P) (0x12000 + (4 * (P)))
-#define IXGBE_VFPBACL(P) (0x110C8 + (4 * (P)))
-#define IXGBE_PVFRDBAL(P) ((P < 64) ? (0x01000 + (0x40 * (P))) \
- : (0x0D000 + (0x40 * ((P) - 64))))
-#define IXGBE_PVFRDBAH(P) ((P < 64) ? (0x01004 + (0x40 * (P))) \
- : (0x0D004 + (0x40 * ((P) - 64))))
-#define IXGBE_PVFRDLEN(P) ((P < 64) ? (0x01008 + (0x40 * (P))) \
- : (0x0D008 + (0x40 * ((P) - 64))))
-#define IXGBE_PVFRDH(P) ((P < 64) ? (0x01010 + (0x40 * (P))) \
- : (0x0D010 + (0x40 * ((P) - 64))))
-#define IXGBE_PVFRDT(P) ((P < 64) ? (0x01018 + (0x40 * (P))) \
- : (0x0D018 + (0x40 * ((P) - 64))))
-#define IXGBE_PVFRXDCTL(P) ((P < 64) ? (0x01028 + (0x40 * (P))) \
- : (0x0D028 + (0x40 * ((P) - 64))))
-#define IXGBE_PVFSRRCTL(P) ((P < 64) ? (0x01014 + (0x40 * (P))) \
- : (0x0D014 + (0x40 * ((P) - 64))))
-#define IXGBE_PVFPSRTYPE(P) (0x0EA00 + (4 * (P)))
-#define IXGBE_PVFTDBAL(P) (0x06000 + (0x40 * (P)))
-#define IXGBE_PVFTDBAH(P) (0x06004 + (0x40 * (P)))
-#define IXGBE_PVFTTDLEN(P) (0x06008 + (0x40 * (P)))
-#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P)))
-#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P)))
-#define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P)))
-#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P)))
-#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P)))
-#define IXGBE_PVFDCA_RXCTRL(P) (((P) < 64) ? (0x0100C + (0x40 * (P))) \
- : (0x0D00C + (0x40 * ((P) - 64))))
-#define IXGBE_PVFDCA_TXCTRL(P) (0x0600C + (0x40 * (P)))
-#define IXGBE_PVFGPRC(x) (0x0101C + (0x40 * (x)))
-#define IXGBE_PVFGPTC(x) (0x08300 + (0x04 * (x)))
-#define IXGBE_PVFGORC_LSB(x) (0x01020 + (0x40 * (x)))
-#define IXGBE_PVFGORC_MSB(x) (0x0D020 + (0x40 * (x)))
-#define IXGBE_PVFGOTC_LSB(x) (0x08400 + (0x08 * (x)))
-#define IXGBE_PVFGOTC_MSB(x) (0x08404 + (0x08 * (x)))
-#define IXGBE_PVFMPRC(x) (0x0D01C + (0x40 * (x)))
-
-#define IXGBE_PVFTDWBALn(q_per_pool, vf_number, vf_q_index) \
- (IXGBE_PVFTDWBAL((q_per_pool)*(vf_number) + (vf_q_index)))
-#define IXGBE_PVFTDWBAHn(q_per_pool, vf_number, vf_q_index) \
- (IXGBE_PVFTDWBAH((q_per_pool)*(vf_number) + (vf_q_index)))
-
-/* Little Endian defines */
-#ifndef __le16
-#define __le16 u16
-#endif
-#ifndef __le32
-#define __le32 u32
-#endif
-#ifndef __le64
-#define __le64 u64
-
-#endif
-#ifndef __be16
-/* Big Endian defines */
-#define __be16 u16
-#define __be32 u32
-#define __be64 u64
-
-#endif
-enum ixgbe_fdir_pballoc_type {
- IXGBE_FDIR_PBALLOC_NONE = 0,
- IXGBE_FDIR_PBALLOC_64K = 1,
- IXGBE_FDIR_PBALLOC_128K = 2,
- IXGBE_FDIR_PBALLOC_256K = 3,
-};
-
-/* Flow Director register values */
-#define IXGBE_FDIRCTRL_PBALLOC_64K 0x00000001
-#define IXGBE_FDIRCTRL_PBALLOC_128K 0x00000002
-#define IXGBE_FDIRCTRL_PBALLOC_256K 0x00000003
-#define IXGBE_FDIRCTRL_INIT_DONE 0x00000008
-#define IXGBE_FDIRCTRL_PERFECT_MATCH 0x00000010
-#define IXGBE_FDIRCTRL_REPORT_STATUS 0x00000020
-#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080
-#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8
-#define IXGBE_FDIRCTRL_FLEX_SHIFT 16
-#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000
-#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24
-#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000
-#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT 28
-
-#define IXGBE_FDIRTCPM_DPORTM_SHIFT 16
-#define IXGBE_FDIRUDPM_DPORTM_SHIFT 16
-#define IXGBE_FDIRIP6M_DIPM_SHIFT 16
-#define IXGBE_FDIRM_VLANID 0x00000001
-#define IXGBE_FDIRM_VLANP 0x00000002
-#define IXGBE_FDIRM_POOL 0x00000004
-#define IXGBE_FDIRM_L4P 0x00000008
-#define IXGBE_FDIRM_FLEX 0x00000010
-#define IXGBE_FDIRM_DIPv6 0x00000020
-
-#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF
-#define IXGBE_FDIRFREE_FREE_SHIFT 0
-#define IXGBE_FDIRFREE_COLL_MASK 0x7FFF0000
-#define IXGBE_FDIRFREE_COLL_SHIFT 16
-#define IXGBE_FDIRLEN_MAXLEN_MASK 0x3F
-#define IXGBE_FDIRLEN_MAXLEN_SHIFT 0
-#define IXGBE_FDIRLEN_MAXHASH_MASK 0x7FFF0000
-#define IXGBE_FDIRLEN_MAXHASH_SHIFT 16
-#define IXGBE_FDIRUSTAT_ADD_MASK 0xFFFF
-#define IXGBE_FDIRUSTAT_ADD_SHIFT 0
-#define IXGBE_FDIRUSTAT_REMOVE_MASK 0xFFFF0000
-#define IXGBE_FDIRUSTAT_REMOVE_SHIFT 16
-#define IXGBE_FDIRFSTAT_FADD_MASK 0x00FF
-#define IXGBE_FDIRFSTAT_FADD_SHIFT 0
-#define IXGBE_FDIRFSTAT_FREMOVE_MASK 0xFF00
-#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT 8
-#define IXGBE_FDIRPORT_DESTINATION_SHIFT 16
-#define IXGBE_FDIRVLAN_FLEX_SHIFT 16
-#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT 15
-#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT 16
-
-#define IXGBE_FDIRCMD_CMD_MASK 0x00000003
-#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001
-#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002
-#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003
-#define IXGBE_FDIRCMD_FILTER_VALID 0x00000004
-#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008
-#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010
-#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020
-#define IXGBE_FDIRCMD_L4TYPE_TCP 0x00000040
-#define IXGBE_FDIRCMD_L4TYPE_SCTP 0x00000060
-#define IXGBE_FDIRCMD_IPV6 0x00000080
-#define IXGBE_FDIRCMD_CLEARHT 0x00000100
-#define IXGBE_FDIRCMD_DROP 0x00000200
-#define IXGBE_FDIRCMD_INT 0x00000400
-#define IXGBE_FDIRCMD_LAST 0x00000800
-#define IXGBE_FDIRCMD_COLLISION 0x00001000
-#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000
-#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5
-#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16
-#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24
-#define IXGBE_FDIR_INIT_DONE_POLL 10
-#define IXGBE_FDIRCMD_CMD_POLL 10
-
-#define IXGBE_FDIR_DROP_QUEUE 127
-
-#define IXGBE_STATUS_OVERHEATING_BIT 20 /* STATUS overtemp bit num */
-
-/* Manageablility Host Interface defines */
-#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
-#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */
-#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */
-
-/* CEM Support */
-#define FW_CEM_HDR_LEN 0x4
-#define FW_CEM_CMD_DRIVER_INFO 0xDD
-#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5
-#define FW_CEM_CMD_RESERVED 0X0
-#define FW_CEM_UNUSED_VER 0x0
-#define FW_CEM_MAX_RETRIES 3
-#define FW_CEM_RESP_STATUS_SUCCESS 0x1
-
-/* Host Interface Command Structures */
-
-struct ixgbe_hic_hdr {
- u8 cmd;
- u8 buf_len;
- union {
- u8 cmd_resv;
- u8 ret_status;
- } cmd_or_resp;
- u8 checksum;
-};
-
-struct ixgbe_hic_drv_info {
- struct ixgbe_hic_hdr hdr;
- u8 port_num;
- u8 ver_sub;
- u8 ver_build;
- u8 ver_min;
- u8 ver_maj;
- u8 pad; /* end spacing to ensure length is mult. of dword */
- u16 pad2; /* end spacing to ensure length is mult. of dword2 */
-};
-
-/* Transmit Descriptor - Legacy */
-struct ixgbe_legacy_tx_desc {
- u64 buffer_addr; /* Address of the descriptor's data buffer */
- union {
- __le32 data;
- struct {
- __le16 length; /* Data buffer length */
- u8 cso; /* Checksum offset */
- u8 cmd; /* Descriptor control */
- } flags;
- } lower;
- union {
- __le32 data;
- struct {
- u8 status; /* Descriptor status */
- u8 css; /* Checksum start */
- __le16 vlan;
- } fields;
- } upper;
-};
-
-/* Transmit Descriptor - Advanced */
-union ixgbe_adv_tx_desc {
- struct {
- __le64 buffer_addr; /* Address of descriptor's data buf */
- __le32 cmd_type_len;
- __le32 olinfo_status;
- } read;
- struct {
- __le64 rsvd; /* Reserved */
- __le32 nxtseq_seed;
- __le32 status;
- } wb;
-};
-
-/* Receive Descriptor - Legacy */
-struct ixgbe_legacy_rx_desc {
- __le64 buffer_addr; /* Address of the descriptor's data buffer */
- __le16 length; /* Length of data DMAed into data buffer */
- __le16 csum; /* Packet checksum */
- u8 status; /* Descriptor status */
- u8 errors; /* Descriptor Errors */
- __le16 vlan;
-};
-
-/* Receive Descriptor - Advanced */
-union ixgbe_adv_rx_desc {
- struct {
- __le64 pkt_addr; /* Packet buffer address */
- __le64 hdr_addr; /* Header buffer address */
- } read;
- struct {
- struct {
- union {
- __le32 data;
- struct {
- __le16 pkt_info; /* RSS, Pkt type */
- __le16 hdr_info; /* Splithdr, hdrlen */
- } hs_rss;
- } lo_dword;
- union {
- __le32 rss; /* RSS Hash */
- struct {
- __le16 ip_id; /* IP id */
- __le16 csum; /* Packet Checksum */
- } csum_ip;
- } hi_dword;
- } lower;
- struct {
- __le32 status_error; /* ext status/error */
- __le16 length; /* Packet length */
- __le16 vlan; /* VLAN tag */
- } upper;
- } wb; /* writeback */
-};
-
-/* Context descriptors */
-struct ixgbe_adv_tx_context_desc {
- __le32 vlan_macip_lens;
- __le32 seqnum_seed;
- __le32 type_tucmd_mlhl;
- __le32 mss_l4len_idx;
-};
-
-/* Adv Transmit Descriptor Config Masks */
-#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */
-#define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */
-#define IXGBE_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 time stamp */
-#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */
-#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */
-#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
-#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Adv Context Desc */
-#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Adv Data Descriptor */
-#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */
-#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */
-#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */
-#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */
-#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext 1=Adv */
-#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */
-#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
-#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */
-#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */
-#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */
-#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
-#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */
-#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
-#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
- IXGBE_ADVTXD_POPTS_SHIFT)
-#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
- IXGBE_ADVTXD_POPTS_SHIFT)
-#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
-#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
-#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
-/* 1st&Last TSO-full iSCSI PDU */
-#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800
-#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */
-#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
-#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
-#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
-#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
-#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
-#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
-#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
-#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
-#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /* req Markers and CRC */
-#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */
-#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
-#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */
-#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */
-#define IXGBE_ADVTXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */
-#define IXGBE_ADVTXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */
-#define IXGBE_ADVTXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */
-#define IXGBE_ADVTXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation End */
-#define IXGBE_ADVTXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation Start */
-#define IXGBE_ADVTXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */
-#define IXGBE_ADVTXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */
-#define IXGBE_ADVTXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */
-#define IXGBE_ADVTXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */
-#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
-#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
-
-/* Autonegotiation advertised speeds */
-typedef u32 ixgbe_autoneg_advertised;
-/* Link speed */
-typedef u32 ixgbe_link_speed;
-#define IXGBE_LINK_SPEED_UNKNOWN 0
-#define IXGBE_LINK_SPEED_100_FULL 0x0008
-#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
-#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
-#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
- IXGBE_LINK_SPEED_10GB_FULL)
-#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \
- IXGBE_LINK_SPEED_1GB_FULL | \
- IXGBE_LINK_SPEED_10GB_FULL)
-
-
-/* Physical layer type */
-typedef u32 ixgbe_physical_layer;
-#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0
-#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001
-#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002
-#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x0004
-#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008
-#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010
-#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020
-#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x0040
-#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x0080
-#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x0100
-#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200
-#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400
-#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800
-#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
-#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
-#define IXGBE_PHYSICAL_LAYER_1000BASE_SX 0x4000
-
-/* Flow Control Data Sheet defined values
- * Calculation and defines taken from 802.1bb Annex O
- */
-
-/* BitTimes (BT) conversion */
-#define IXGBE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024))
-#define IXGBE_B2BT(BT) (BT * 8)
-
-/* Calculate Delay to respond to PFC */
-#define IXGBE_PFC_D 672
-
-/* Calculate Cable Delay */
-#define IXGBE_CABLE_DC 5556 /* Delay Copper */
-#define IXGBE_CABLE_DO 5000 /* Delay Optical */
-
-/* Calculate Interface Delay X540 */
-#define IXGBE_PHY_DC 25600 /* Delay 10G BASET */
-#define IXGBE_MAC_DC 8192 /* Delay Copper XAUI interface */
-#define IXGBE_XAUI_DC (2 * 2048) /* Delay Copper Phy */
-
-#define IXGBE_ID_X540 (IXGBE_MAC_DC + IXGBE_XAUI_DC + IXGBE_PHY_DC)
-
-/* Calculate Interface Delay 82598, 82599 */
-#define IXGBE_PHY_D 12800
-#define IXGBE_MAC_D 4096
-#define IXGBE_XAUI_D (2 * 1024)
-
-#define IXGBE_ID (IXGBE_MAC_D + IXGBE_XAUI_D + IXGBE_PHY_D)
-
-/* Calculate Delay incurred from higher layer */
-#define IXGBE_HD 6144
-
-/* Calculate PCI Bus delay for low thresholds */
-#define IXGBE_PCI_DELAY 10000
-
-/* Calculate X540 delay value in bit times */
-#define IXGBE_DV_X540(_max_frame_link, _max_frame_tc) \
- ((36 * \
- (IXGBE_B2BT(_max_frame_link) + \
- IXGBE_PFC_D + \
- (2 * IXGBE_CABLE_DC) + \
- (2 * IXGBE_ID_X540) + \
- IXGBE_HD) / 25 + 1) + \
- 2 * IXGBE_B2BT(_max_frame_tc))
-
-/* Calculate 82599, 82598 delay value in bit times */
-#define IXGBE_DV(_max_frame_link, _max_frame_tc) \
- ((36 * \
- (IXGBE_B2BT(_max_frame_link) + \
- IXGBE_PFC_D + \
- (2 * IXGBE_CABLE_DC) + \
- (2 * IXGBE_ID) + \
- IXGBE_HD) / 25 + 1) + \
- 2 * IXGBE_B2BT(_max_frame_tc))
-
-/* Calculate low threshold delay values */
-#define IXGBE_LOW_DV_X540(_max_frame_tc) \
- (2 * IXGBE_B2BT(_max_frame_tc) + \
- (36 * IXGBE_PCI_DELAY / 25) + 1)
-#define IXGBE_LOW_DV(_max_frame_tc) \
- (2 * IXGBE_LOW_DV_X540(_max_frame_tc))
-
-/* Software ATR hash keys */
-#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
-#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
-
-/* Software ATR input stream values and masks */
-#define IXGBE_ATR_HASH_MASK 0x7fff
-#define IXGBE_ATR_L4TYPE_MASK 0x3
-#define IXGBE_ATR_L4TYPE_UDP 0x1
-#define IXGBE_ATR_L4TYPE_TCP 0x2
-#define IXGBE_ATR_L4TYPE_SCTP 0x3
-#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
-enum ixgbe_atr_flow_type {
- IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0,
- IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1,
- IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2,
- IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3,
- IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4,
- IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5,
- IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6,
- IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7,
-};
-
-/* Flow Director ATR input struct. */
-union ixgbe_atr_input {
- /*
- * Byte layout in order, all values with MSB first:
- *
- * vm_pool - 1 byte
- * flow_type - 1 byte
- * vlan_id - 2 bytes
- * src_ip - 16 bytes
- * dst_ip - 16 bytes
- * src_port - 2 bytes
- * dst_port - 2 bytes
- * flex_bytes - 2 bytes
- * bkt_hash - 2 bytes
- */
- struct {
- u8 vm_pool;
- u8 flow_type;
- __be16 vlan_id;
- __be32 dst_ip[4];
- __be32 src_ip[4];
- __be16 src_port;
- __be16 dst_port;
- __be16 flex_bytes;
- __be16 bkt_hash;
- } formatted;
- __be32 dword_stream[11];
-};
-
-/* Flow Director compressed ATR hash input struct */
-union ixgbe_atr_hash_dword {
- struct {
- u8 vm_pool;
- u8 flow_type;
- __be16 vlan_id;
- } formatted;
- __be32 ip;
- struct {
- __be16 src;
- __be16 dst;
- } port;
- __be16 flex_bytes;
- __be32 dword;
-};
-
-
-/*
- * Unavailable: The FCoE Boot Option ROM is not present in the flash.
- * Disabled: Present; boot order is not set for any targets on the port.
- * Enabled: Present; boot order is set for at least one target on the port.
- */
-enum ixgbe_fcoe_boot_status {
- ixgbe_fcoe_bootstatus_disabled = 0,
- ixgbe_fcoe_bootstatus_enabled = 1,
- ixgbe_fcoe_bootstatus_unavailable = 0xFFFF
-};
-
-enum ixgbe_eeprom_type {
- ixgbe_eeprom_uninitialized = 0,
- ixgbe_eeprom_spi,
- ixgbe_flash,
- ixgbe_eeprom_none /* No NVM support */
-};
-
-enum ixgbe_mac_type {
- ixgbe_mac_unknown = 0,
- ixgbe_mac_82598EB,
- ixgbe_mac_82599EB,
- ixgbe_mac_X540,
- ixgbe_num_macs
-};
-
-enum ixgbe_phy_type {
- ixgbe_phy_unknown = 0,
- ixgbe_phy_none,
- ixgbe_phy_tn,
- ixgbe_phy_aq,
- ixgbe_phy_cu_unknown,
- ixgbe_phy_qt,
- ixgbe_phy_xaui,
- ixgbe_phy_nl,
- ixgbe_phy_sfp_passive_tyco,
- ixgbe_phy_sfp_passive_unknown,
- ixgbe_phy_sfp_active_unknown,
- ixgbe_phy_sfp_avago,
- ixgbe_phy_sfp_ftl,
- ixgbe_phy_sfp_ftl_active,
- ixgbe_phy_sfp_unknown,
- ixgbe_phy_sfp_intel,
- ixgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/
- ixgbe_phy_generic
-};
-
-/*
- * SFP+ module type IDs:
- *
- * ID Module Type
- * =============
- * 0 SFP_DA_CU
- * 1 SFP_SR
- * 2 SFP_LR
- * 3 SFP_DA_CU_CORE0 - 82599-specific
- * 4 SFP_DA_CU_CORE1 - 82599-specific
- * 5 SFP_SR/LR_CORE0 - 82599-specific
- * 6 SFP_SR/LR_CORE1 - 82599-specific
- */
-enum ixgbe_sfp_type {
- ixgbe_sfp_type_da_cu = 0,
- ixgbe_sfp_type_sr = 1,
- ixgbe_sfp_type_lr = 2,
- ixgbe_sfp_type_da_cu_core0 = 3,
- ixgbe_sfp_type_da_cu_core1 = 4,
- ixgbe_sfp_type_srlr_core0 = 5,
- ixgbe_sfp_type_srlr_core1 = 6,
- ixgbe_sfp_type_da_act_lmt_core0 = 7,
- ixgbe_sfp_type_da_act_lmt_core1 = 8,
- ixgbe_sfp_type_1g_cu_core0 = 9,
- ixgbe_sfp_type_1g_cu_core1 = 10,
- ixgbe_sfp_type_1g_sx_core0 = 11,
- ixgbe_sfp_type_1g_sx_core1 = 12,
- ixgbe_sfp_type_not_present = 0xFFFE,
- ixgbe_sfp_type_unknown = 0xFFFF
-};
-
-enum ixgbe_media_type {
- ixgbe_media_type_unknown = 0,
- ixgbe_media_type_fiber,
- ixgbe_media_type_fiber_qsfp,
- ixgbe_media_type_fiber_lco,
- ixgbe_media_type_copper,
- ixgbe_media_type_backplane,
- ixgbe_media_type_cx4,
- ixgbe_media_type_virtual
-};
-
-/* Flow Control Settings */
-enum ixgbe_fc_mode {
- ixgbe_fc_none = 0,
- ixgbe_fc_rx_pause,
- ixgbe_fc_tx_pause,
- ixgbe_fc_full,
- ixgbe_fc_default
-};
-
-/* Smart Speed Settings */
-#define IXGBE_SMARTSPEED_MAX_RETRIES 3
-enum ixgbe_smart_speed {
- ixgbe_smart_speed_auto = 0,
- ixgbe_smart_speed_on,
- ixgbe_smart_speed_off
-};
-
-/* PCI bus types */
-enum ixgbe_bus_type {
- ixgbe_bus_type_unknown = 0,
- ixgbe_bus_type_pci,
- ixgbe_bus_type_pcix,
- ixgbe_bus_type_pci_express,
- ixgbe_bus_type_reserved
-};
-
-/* PCI bus speeds */
-enum ixgbe_bus_speed {
- ixgbe_bus_speed_unknown = 0,
- ixgbe_bus_speed_33 = 33,
- ixgbe_bus_speed_66 = 66,
- ixgbe_bus_speed_100 = 100,
- ixgbe_bus_speed_120 = 120,
- ixgbe_bus_speed_133 = 133,
- ixgbe_bus_speed_2500 = 2500,
- ixgbe_bus_speed_5000 = 5000,
- ixgbe_bus_speed_8000 = 8000,
- ixgbe_bus_speed_reserved
-};
-
-/* PCI bus widths */
-enum ixgbe_bus_width {
- ixgbe_bus_width_unknown = 0,
- ixgbe_bus_width_pcie_x1 = 1,
- ixgbe_bus_width_pcie_x2 = 2,
- ixgbe_bus_width_pcie_x4 = 4,
- ixgbe_bus_width_pcie_x8 = 8,
- ixgbe_bus_width_32 = 32,
- ixgbe_bus_width_64 = 64,
- ixgbe_bus_width_reserved
-};
-
-struct ixgbe_addr_filter_info {
- u32 num_mc_addrs;
- u32 rar_used_count;
- u32 mta_in_use;
- u32 overflow_promisc;
- bool user_set_promisc;
-};
-
-/* Bus parameters */
-struct ixgbe_bus_info {
- enum ixgbe_bus_speed speed;
- enum ixgbe_bus_width width;
- enum ixgbe_bus_type type;
-
- u16 func;
- u16 lan_id;
-};
-
-/* Flow control parameters */
-struct ixgbe_fc_info {
- u32 high_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl High-water */
- u32 low_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl Low-water */
- u16 pause_time; /* Flow Control Pause timer */
- bool send_xon; /* Flow control send XON */
- bool strict_ieee; /* Strict IEEE mode */
- bool disable_fc_autoneg; /* Do not autonegotiate FC */
- bool fc_was_autonegged; /* Is current_mode the result of autonegging? */
- enum ixgbe_fc_mode current_mode; /* FC mode in effect */
- enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */
-};
-
-/* Statistics counters collected by the MAC */
-struct ixgbe_hw_stats {
- u64 crcerrs;
- u64 illerrc;
- u64 errbc;
- u64 mspdc;
- u64 mpctotal;
- u64 mpc[8];
- u64 mlfc;
- u64 mrfc;
- u64 rlec;
- u64 lxontxc;
- u64 lxonrxc;
- u64 lxofftxc;
- u64 lxoffrxc;
- u64 pxontxc[8];
- u64 pxonrxc[8];
- u64 pxofftxc[8];
- u64 pxoffrxc[8];
- u64 prc64;
- u64 prc127;
- u64 prc255;
- u64 prc511;
- u64 prc1023;
- u64 prc1522;
- u64 gprc;
- u64 bprc;
- u64 mprc;
- u64 gptc;
- u64 gorc;
- u64 gotc;
- u64 rnbc[8];
- u64 ruc;
- u64 rfc;
- u64 roc;
- u64 rjc;
- u64 mngprc;
- u64 mngpdc;
- u64 mngptc;
- u64 tor;
- u64 tpr;
- u64 tpt;
- u64 ptc64;
- u64 ptc127;
- u64 ptc255;
- u64 ptc511;
- u64 ptc1023;
- u64 ptc1522;
- u64 mptc;
- u64 bptc;
- u64 xec;
- u64 qprc[16];
- u64 qptc[16];
- u64 qbrc[16];
- u64 qbtc[16];
- u64 qprdc[16];
- u64 pxon2offc[8];
- u64 fdirustat_add;
- u64 fdirustat_remove;
- u64 fdirfstat_fadd;
- u64 fdirfstat_fremove;
- u64 fdirmatch;
- u64 fdirmiss;
- u64 fccrc;
- u64 fclast;
- u64 fcoerpdc;
- u64 fcoeprc;
- u64 fcoeptc;
- u64 fcoedwrc;
- u64 fcoedwtc;
- u64 fcoe_noddp;
- u64 fcoe_noddp_ext_buff;
- u64 ldpcec;
- u64 pcrc8ec;
- u64 b2ospc;
- u64 b2ogprc;
- u64 o2bgptc;
- u64 o2bspc;
-};
-
-/* forward declaration */
-struct ixgbe_hw;
-
-/* iterator type for walking multicast address lists */
-typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
- u32 *vmdq);
-
-/* Function pointer table */
-struct ixgbe_eeprom_operations {
- s32 (*init_params)(struct ixgbe_hw *);
- s32 (*read)(struct ixgbe_hw *, u16, u16 *);
- s32 (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
- s32 (*write)(struct ixgbe_hw *, u16, u16);
- s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
- s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
- s32 (*update_checksum)(struct ixgbe_hw *);
- u16 (*calc_checksum)(struct ixgbe_hw *);
-};
-
-struct ixgbe_mac_operations {
- s32 (*init_hw)(struct ixgbe_hw *);
- s32 (*reset_hw)(struct ixgbe_hw *);
- s32 (*start_hw)(struct ixgbe_hw *);
- s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
- enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
- u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
- s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
- s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
- s32 (*set_san_mac_addr)(struct ixgbe_hw *, u8 *);
- s32 (*get_device_caps)(struct ixgbe_hw *, u16 *);
- s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *);
- s32 (*get_fcoe_boot_status)(struct ixgbe_hw *, u16 *);
- s32 (*stop_adapter)(struct ixgbe_hw *);
- s32 (*get_bus_info)(struct ixgbe_hw *);
- void (*set_lan_id)(struct ixgbe_hw *);
- s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
- s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
- s32 (*setup_sfp)(struct ixgbe_hw *);
- s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
- s32 (*disable_sec_rx_path)(struct ixgbe_hw *);
- s32 (*enable_sec_rx_path)(struct ixgbe_hw *);
- s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
- void (*release_swfw_sync)(struct ixgbe_hw *, u16);
-
- /* Link */
- void (*disable_tx_laser)(struct ixgbe_hw *);
- void (*enable_tx_laser)(struct ixgbe_hw *);
- void (*flap_tx_laser)(struct ixgbe_hw *);
- s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
- s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
- s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
- bool *);
-
- /* Packet Buffer manipulation */
- void (*setup_rxpba)(struct ixgbe_hw *, int, u32, int);
-
- /* LED */
- s32 (*led_on)(struct ixgbe_hw *, u32);
- s32 (*led_off)(struct ixgbe_hw *, u32);
- s32 (*blink_led_start)(struct ixgbe_hw *, u32);
- s32 (*blink_led_stop)(struct ixgbe_hw *, u32);
-
- /* RAR, Multicast, VLAN */
- s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
- s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *);
- s32 (*clear_rar)(struct ixgbe_hw *, u32);
- s32 (*insert_mac_addr)(struct ixgbe_hw *, u8 *, u32);
- s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
- s32 (*set_vmdq_san_mac)(struct ixgbe_hw *, u32);
- s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
- s32 (*init_rx_addrs)(struct ixgbe_hw *);
- s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32,
- ixgbe_mc_addr_itr);
- s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
- ixgbe_mc_addr_itr, bool clear);
- s32 (*enable_mc)(struct ixgbe_hw *);
- s32 (*disable_mc)(struct ixgbe_hw *);
- s32 (*clear_vfta)(struct ixgbe_hw *);
- s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
- s32 (*set_vlvf)(struct ixgbe_hw *, u32, u32, bool, bool *);
- s32 (*init_uta_tables)(struct ixgbe_hw *);
- void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int);
- void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
-
- /* Flow Control */
- s32 (*fc_enable)(struct ixgbe_hw *);
-
- /* Manageability interface */
- s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
- s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
- s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
-};
-
-struct ixgbe_phy_operations {
- s32 (*identify)(struct ixgbe_hw *);
- s32 (*identify_sfp)(struct ixgbe_hw *);
- s32 (*init)(struct ixgbe_hw *);
- s32 (*reset)(struct ixgbe_hw *);
- s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
- s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
- s32 (*setup_link)(struct ixgbe_hw *);
- s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool,
- bool);
- s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
- s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
- s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
- s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
- s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
- s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
- void (*i2c_bus_clear)(struct ixgbe_hw *);
- s32 (*check_overtemp)(struct ixgbe_hw *);
-};
-
-struct ixgbe_eeprom_info {
- struct ixgbe_eeprom_operations ops;
- enum ixgbe_eeprom_type type;
- u32 semaphore_delay;
- u16 word_size;
- u16 address_bits;
- u16 word_page_size;
-};
-
-#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
-struct ixgbe_mac_info {
- struct ixgbe_mac_operations ops;
- enum ixgbe_mac_type type;
- u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
- u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
- u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
- /* prefix for World Wide Node Name (WWNN) */
- u16 wwnn_prefix;
- /* prefix for World Wide Port Name (WWPN) */
- u16 wwpn_prefix;
-#define IXGBE_MAX_MTA 128
- u32 mta_shadow[IXGBE_MAX_MTA];
- s32 mc_filter_type;
- u32 mcft_size;
- u32 vft_size;
- u32 num_rar_entries;
- u32 rar_highwater;
- u32 rx_pb_size;
- u32 max_tx_queues;
- u32 max_rx_queues;
- u32 orig_autoc;
- u8 san_mac_rar_index;
- u32 orig_autoc2;
- u16 max_msix_vectors;
- bool arc_subsystem_valid;
- bool orig_link_settings_stored;
- bool autotry_restart;
- u8 flags;
- struct ixgbe_thermal_sensor_data thermal_sensor_data;
-};
-
-struct ixgbe_phy_info {
- struct ixgbe_phy_operations ops;
- enum ixgbe_phy_type type;
- u32 addr;
- u32 id;
- enum ixgbe_sfp_type sfp_type;
- bool sfp_setup_needed;
- u32 revision;
- enum ixgbe_media_type media_type;
- bool reset_disable;
- ixgbe_autoneg_advertised autoneg_advertised;
- enum ixgbe_smart_speed smart_speed;
- bool smart_speed_active;
- bool multispeed_fiber;
- bool reset_if_overtemp;
- bool qsfp_shared_i2c_bus;
-};
-
-#include "ixgbe_mbx.h"
-
-struct ixgbe_mbx_operations {
- void (*init_params)(struct ixgbe_hw *hw);
- s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*check_for_msg)(struct ixgbe_hw *, u16);
- s32 (*check_for_ack)(struct ixgbe_hw *, u16);
- s32 (*check_for_rst)(struct ixgbe_hw *, u16);
-};
-
-struct ixgbe_mbx_stats {
- u32 msgs_tx;
- u32 msgs_rx;
-
- u32 acks;
- u32 reqs;
- u32 rsts;
-};
-
-struct ixgbe_mbx_info {
- struct ixgbe_mbx_operations ops;
- struct ixgbe_mbx_stats stats;
- u32 timeout;
- u32 udelay;
- u32 v2p_mailbox;
- u16 size;
-};
-
-struct ixgbe_hw {
- u8 __iomem *hw_addr;
- void *back;
- struct ixgbe_mac_info mac;
- struct ixgbe_addr_filter_info addr_ctrl;
- struct ixgbe_fc_info fc;
- struct ixgbe_phy_info phy;
- struct ixgbe_eeprom_info eeprom;
- struct ixgbe_bus_info bus;
- struct ixgbe_mbx_info mbx;
- u16 device_id;
- u16 vendor_id;
- u16 subsystem_device_id;
- u16 subsystem_vendor_id;
- u8 revision_id;
- bool adapter_stopped;
- bool force_full_reset;
- bool allow_unsupported_sfp;
-};
-
-#define ixgbe_call_func(hw, func, params, error) \
- (func != NULL) ? func params : error
-
-
-/* Error Codes */
-#define IXGBE_ERR_EEPROM -1
-#define IXGBE_ERR_EEPROM_CHECKSUM -2
-#define IXGBE_ERR_PHY -3
-#define IXGBE_ERR_CONFIG -4
-#define IXGBE_ERR_PARAM -5
-#define IXGBE_ERR_MAC_TYPE -6
-#define IXGBE_ERR_UNKNOWN_PHY -7
-#define IXGBE_ERR_LINK_SETUP -8
-#define IXGBE_ERR_ADAPTER_STOPPED -9
-#define IXGBE_ERR_INVALID_MAC_ADDR -10
-#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11
-#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12
-#define IXGBE_ERR_INVALID_LINK_SETTINGS -13
-#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14
-#define IXGBE_ERR_RESET_FAILED -15
-#define IXGBE_ERR_SWFW_SYNC -16
-#define IXGBE_ERR_PHY_ADDR_INVALID -17
-#define IXGBE_ERR_I2C -18
-#define IXGBE_ERR_SFP_NOT_SUPPORTED -19
-#define IXGBE_ERR_SFP_NOT_PRESENT -20
-#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21
-#define IXGBE_ERR_NO_SAN_ADDR_PTR -22
-#define IXGBE_ERR_FDIR_REINIT_FAILED -23
-#define IXGBE_ERR_EEPROM_VERSION -24
-#define IXGBE_ERR_NO_SPACE -25
-#define IXGBE_ERR_OVERTEMP -26
-#define IXGBE_ERR_FC_NOT_NEGOTIATED -27
-#define IXGBE_ERR_FC_NOT_SUPPORTED -28
-#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30
-#define IXGBE_ERR_PBA_SECTION -31
-#define IXGBE_ERR_INVALID_ARGUMENT -32
-#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33
-#define IXGBE_ERR_OUT_OF_MEM -34
-
-#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
-
-#define UNREFERENCED_XPARAMETER
-
-#endif /* _IXGBE_TYPE_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_x540.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_x540.c
deleted file mode 100755
index efffe6f6..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_x540.c
+++ /dev/null
@@ -1,938 +0,0 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#include "ixgbe_x540.h"
-#include "ixgbe_type.h"
-#include "ixgbe_api.h"
-#include "ixgbe_common.h"
-#include "ixgbe_phy.h"
-
-static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
-static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
-static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
-static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
-
-/**
- * ixgbe_init_ops_X540 - Inits func ptrs and MAC type
- * @hw: pointer to hardware structure
- *
- * Initialize the function pointers and assign the MAC type for X540.
- * Does not touch the hardware.
- **/
-s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
-{
- struct ixgbe_mac_info *mac = &hw->mac;
- struct ixgbe_phy_info *phy = &hw->phy;
- struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
- s32 ret_val;
-
- ret_val = ixgbe_init_phy_ops_generic(hw);
- ret_val = ixgbe_init_ops_generic(hw);
-
-
- /* EEPROM */
- eeprom->ops.init_params = &ixgbe_init_eeprom_params_X540;
- eeprom->ops.read = &ixgbe_read_eerd_X540;
- eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_X540;
- eeprom->ops.write = &ixgbe_write_eewr_X540;
- eeprom->ops.write_buffer = &ixgbe_write_eewr_buffer_X540;
- eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_X540;
- eeprom->ops.validate_checksum = &ixgbe_validate_eeprom_checksum_X540;
- eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_X540;
-
- /* PHY */
- phy->ops.init = &ixgbe_init_phy_ops_generic;
- phy->ops.reset = NULL;
-
- /* MAC */
- mac->ops.reset_hw = &ixgbe_reset_hw_X540;
- mac->ops.get_media_type = &ixgbe_get_media_type_X540;
- mac->ops.get_supported_physical_layer =
- &ixgbe_get_supported_physical_layer_X540;
- mac->ops.read_analog_reg8 = NULL;
- mac->ops.write_analog_reg8 = NULL;
- mac->ops.start_hw = &ixgbe_start_hw_X540;
- mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
- mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
- mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
- mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
- mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
- mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540;
- mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync_X540;
- mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
- mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
-
- /* RAR, Multicast, VLAN */
- mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
- mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
- mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
- mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
- mac->rar_highwater = 1;
- mac->ops.set_vfta = &ixgbe_set_vfta_generic;
- mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
- mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
- mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
- mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
- mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
-
- /* Link */
- mac->ops.get_link_capabilities =
- &ixgbe_get_copper_link_capabilities_generic;
- mac->ops.setup_link = &ixgbe_setup_mac_link_X540;
- mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
- mac->ops.check_link = &ixgbe_check_mac_link_generic;
-
- mac->mcft_size = 128;
- mac->vft_size = 128;
- mac->num_rar_entries = 128;
- mac->rx_pb_size = 384;
- mac->max_tx_queues = 128;
- mac->max_rx_queues = 128;
- mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
-
- /*
- * FWSM register
- * ARC supported; valid only if manageability features are
- * enabled.
- */
- mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
- IXGBE_FWSM_MODE_MASK) ? true : false;
-
- //hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
-
- /* LEDs */
- mac->ops.blink_led_start = ixgbe_blink_led_start_X540;
- mac->ops.blink_led_stop = ixgbe_blink_led_stop_X540;
-
- /* Manageability interface */
- mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
-
- return ret_val;
-}
-
-/**
- * ixgbe_get_link_capabilities_X540 - Determines link capabilities
- * @hw: pointer to hardware structure
- * @speed: pointer to link speed
- * @autoneg: true when autoneg or autotry is enabled
- *
- * Determines the link capabilities by reading the AUTOC register.
- **/
-s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg)
-{
- ixgbe_get_copper_link_capabilities_generic(hw, speed, autoneg);
-
- return 0;
-}
-
-/**
- * ixgbe_get_media_type_X540 - Get media type
- * @hw: pointer to hardware structure
- *
- * Returns the media type (fiber, copper, backplane)
- **/
-enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
-{
- return ixgbe_media_type_copper;
-}
-
-/**
- * ixgbe_setup_mac_link_X540 - Sets the auto advertised capabilities
- * @hw: pointer to hardware structure
- * @speed: new link speed
- * @autoneg: true if autonegotiation enabled
- * @autoneg_wait_to_complete: true when waiting for completion is needed
- **/
-s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
- bool autoneg_wait_to_complete)
-{
- return hw->phy.ops.setup_link_speed(hw, speed, autoneg,
- autoneg_wait_to_complete);
-}
-
-/**
- * ixgbe_reset_hw_X540 - Perform hardware reset
- * @hw: pointer to hardware structure
- *
- * Resets the hardware by resetting the transmit and receive units, masks
- * and clears all interrupts, and perform a reset.
- **/
-s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
-{
- s32 status = 0;
-
- /*
- * Userland DPDK takes the ownershiop of device
- * Kernel driver here used as the simple path for ethtool only
- * Won't real reset device anyway
- */
-#if 0
- u32 ctrl, i;
-
- /* Call adapter stop to disable tx/rx and clear interrupts */
- status = hw->mac.ops.stop_adapter(hw);
- if (status != 0)
- goto reset_hw_out;
-
- /* flush pending Tx transactions */
- ixgbe_clear_tx_pending(hw);
-
-mac_reset_top:
- ctrl = IXGBE_CTRL_RST;
- ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
- IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
- IXGBE_WRITE_FLUSH(hw);
-
- /* Poll for reset bit to self-clear indicating reset is complete */
- for (i = 0; i < 10; i++) {
- udelay(1);
- ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
- if (!(ctrl & IXGBE_CTRL_RST_MASK))
- break;
- }
-
- if (ctrl & IXGBE_CTRL_RST_MASK) {
- status = IXGBE_ERR_RESET_FAILED;
- hw_dbg(hw, "Reset polling failed to complete.\n");
- }
- msleep(100);
-
- /*
- * Double resets are required for recovery from certain error
- * conditions. Between resets, it is necessary to stall to allow time
- * for any pending HW events to complete.
- */
- if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
- hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
- goto mac_reset_top;
- }
-
- /* Set the Rx packet buffer size. */
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
-
-#endif
-
- /* Store the permanent mac address */
- hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
-
- /*
- * Store MAC address from RAR0, clear receive address registers, and
- * clear the multicast table. Also reset num_rar_entries to 128,
- * since we modify this value when programming the SAN MAC address.
- */
- hw->mac.num_rar_entries = 128;
- hw->mac.ops.init_rx_addrs(hw);
-
- /* Store the permanent SAN mac address */
- hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
-
- /* Add the SAN MAC address to the RAR only if it's a valid address */
- if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
- hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
- hw->mac.san_addr, 0, IXGBE_RAH_AV);
-
- /* Save the SAN MAC RAR index */
- hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
-
- /* Reserve the last RAR for the SAN MAC address */
- hw->mac.num_rar_entries--;
- }
-
- /* Store the alternative WWNN/WWPN prefix */
- hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
- &hw->mac.wwpn_prefix);
-
-//reset_hw_out:
- return status;
-}
-
-/**
- * ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx
- * @hw: pointer to hardware structure
- *
- * Starts the hardware using the generic start_hw function
- * and the generation start_hw function.
- * Then performs revision-specific operations, if any.
- **/
-s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
-{
- s32 ret_val = 0;
-
- ret_val = ixgbe_start_hw_generic(hw);
- if (ret_val != 0)
- goto out;
-
- ret_val = ixgbe_start_hw_gen2(hw);
-
-out:
- return ret_val;
-}
-
-/**
- * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type
- * @hw: pointer to hardware structure
- *
- * Determines physical layer capabilities of the current configuration.
- **/
-u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
-{
- u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
- u16 ext_ability = 0;
-
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
- if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
- physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
- if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
- physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
- if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
- physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
-
- return physical_layer;
-}
-
-/**
- * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
- * @hw: pointer to hardware structure
- *
- * Initializes the EEPROM parameters ixgbe_eeprom_info within the
- * ixgbe_hw struct in order to set up EEPROM access.
- **/
-s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
-{
- struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
- u32 eec;
- u16 eeprom_size;
-
- if (eeprom->type == ixgbe_eeprom_uninitialized) {
- eeprom->semaphore_delay = 10;
- eeprom->type = ixgbe_flash;
-
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
- eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
- IXGBE_EEC_SIZE_SHIFT);
- eeprom->word_size = 1 << (eeprom_size +
- IXGBE_EEPROM_WORD_SIZE_SHIFT);
-
- hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
- eeprom->type, eeprom->word_size);
- }
-
- return 0;
-}
-
-/**
- * ixgbe_read_eerd_X540- Read EEPROM word using EERD
- * @hw: pointer to hardware structure
- * @offset: offset of word in the EEPROM to read
- * @data: word read from the EEPROM
- *
- * Reads a 16 bit word from the EEPROM using the EERD register.
- **/
-s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
-{
- s32 status = 0;
-
- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
- 0)
- status = ixgbe_read_eerd_generic(hw, offset, data);
- else
- status = IXGBE_ERR_SWFW_SYNC;
-
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
- return status;
-}
-
-/**
- * ixgbe_read_eerd_buffer_X540- Read EEPROM word(s) using EERD
- * @hw: pointer to hardware structure
- * @offset: offset of word in the EEPROM to read
- * @words: number of words
- * @data: word(s) read from the EEPROM
- *
- * Reads a 16 bit word(s) from the EEPROM using the EERD register.
- **/
-s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
- u16 offset, u16 words, u16 *data)
-{
- s32 status = 0;
-
- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
- 0)
- status = ixgbe_read_eerd_buffer_generic(hw, offset,
- words, data);
- else
- status = IXGBE_ERR_SWFW_SYNC;
-
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
- return status;
-}
-
-/**
- * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR
- * @hw: pointer to hardware structure
- * @offset: offset of word in the EEPROM to write
- * @data: word write to the EEPROM
- *
- * Write a 16 bit word to the EEPROM using the EEWR register.
- **/
-s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
-{
- s32 status = 0;
-
- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
- 0)
- status = ixgbe_write_eewr_generic(hw, offset, data);
- else
- status = IXGBE_ERR_SWFW_SYNC;
-
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
- return status;
-}
-
-/**
- * ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR
- * @hw: pointer to hardware structure
- * @offset: offset of word in the EEPROM to write
- * @words: number of words
- * @data: word(s) write to the EEPROM
- *
- * Write a 16 bit word(s) to the EEPROM using the EEWR register.
- **/
-s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
- u16 offset, u16 words, u16 *data)
-{
- s32 status = 0;
-
- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
- 0)
- status = ixgbe_write_eewr_buffer_generic(hw, offset,
- words, data);
- else
- status = IXGBE_ERR_SWFW_SYNC;
-
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
- return status;
-}
-
-/**
- * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum
- *
- * This function does not use synchronization for EERD and EEWR. It can
- * be used internally by function which utilize ixgbe_acquire_swfw_sync_X540.
- *
- * @hw: pointer to hardware structure
- **/
-u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
-{
- u16 i;
- u16 j;
- u16 checksum = 0;
- u16 length = 0;
- u16 pointer = 0;
- u16 word = 0;
-
- /*
- * Do not use hw->eeprom.ops.read because we do not want to take
- * the synchronization semaphores here. Instead use
- * ixgbe_read_eerd_generic
- */
-
- /* Include 0x0-0x3F in the checksum */
- for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
- if (ixgbe_read_eerd_generic(hw, i, &word) != 0) {
- hw_dbg(hw, "EEPROM read failed\n");
- break;
- }
- checksum += word;
- }
-
- /*
- * Include all data from pointers 0x3, 0x6-0xE. This excludes the
- * FW, PHY module, and PCIe Expansion/Option ROM pointers.
- */
- for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
- if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
- continue;
-
- if (ixgbe_read_eerd_generic(hw, i, &pointer) != 0) {
- hw_dbg(hw, "EEPROM read failed\n");
- break;
- }
-
- /* Skip pointer section if the pointer is invalid. */
- if (pointer == 0xFFFF || pointer == 0 ||
- pointer >= hw->eeprom.word_size)
- continue;
-
- if (ixgbe_read_eerd_generic(hw, pointer, &length) !=
- 0) {
- hw_dbg(hw, "EEPROM read failed\n");
- break;
- }
-
- /* Skip pointer section if length is invalid. */
- if (length == 0xFFFF || length == 0 ||
- (pointer + length) >= hw->eeprom.word_size)
- continue;
-
- for (j = pointer+1; j <= pointer+length; j++) {
- if (ixgbe_read_eerd_generic(hw, j, &word) !=
- 0) {
- hw_dbg(hw, "EEPROM read failed\n");
- break;
- }
- checksum += word;
- }
- }
-
- checksum = (u16)IXGBE_EEPROM_SUM - checksum;
-
- return checksum;
-}
-
-/**
- * ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum
- * @hw: pointer to hardware structure
- * @checksum_val: calculated checksum
- *
- * Performs checksum calculation and validates the EEPROM checksum. If the
- * caller does not need checksum_val, the value can be NULL.
- **/
-s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
- u16 *checksum_val)
-{
- s32 status;
- u16 checksum;
- u16 read_checksum = 0;
-
- /*
- * Read the first word from the EEPROM. If this times out or fails, do
- * not continue or we could be in for a very long wait while every
- * EEPROM read fails
- */
- status = hw->eeprom.ops.read(hw, 0, &checksum);
-
- if (status != 0) {
- hw_dbg(hw, "EEPROM read failed\n");
- goto out;
- }
-
- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
- 0) {
- checksum = hw->eeprom.ops.calc_checksum(hw);
-
- /*
- * Do not use hw->eeprom.ops.read because we do not want to take
- * the synchronization semaphores twice here.
- */
- ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
- &read_checksum);
-
- /*
- * Verify read checksum from EEPROM is the same as
- * calculated checksum
- */
- if (read_checksum != checksum)
- status = IXGBE_ERR_EEPROM_CHECKSUM;
-
- /* If the user cares, return the calculated checksum */
- if (checksum_val)
- *checksum_val = checksum;
- } else {
- status = IXGBE_ERR_SWFW_SYNC;
- }
-
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
-out:
- return status;
-}
-
-/**
- * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash
- * @hw: pointer to hardware structure
- *
- * After writing EEPROM to shadow RAM using EEWR register, software calculates
- * checksum and updates the EEPROM and instructs the hardware to update
- * the flash.
- **/
-s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
-{
- s32 status;
- u16 checksum;
-
- /*
- * Read the first word from the EEPROM. If this times out or fails, do
- * not continue or we could be in for a very long wait while every
- * EEPROM read fails
- */
- status = hw->eeprom.ops.read(hw, 0, &checksum);
-
- if (status != 0)
- hw_dbg(hw, "EEPROM read failed\n");
-
- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
- 0) {
- checksum = hw->eeprom.ops.calc_checksum(hw);
-
- /*
- * Do not use hw->eeprom.ops.write because we do not want to
- * take the synchronization semaphores twice here.
- */
- status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM,
- checksum);
-
- if (status == 0)
- status = ixgbe_update_flash_X540(hw);
- else
- status = IXGBE_ERR_SWFW_SYNC;
- }
-
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
-
- return status;
-}
-
-/**
- * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device
- * @hw: pointer to hardware structure
- *
- * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
- * EEPROM from shadow RAM to the flash device.
- **/
-static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
-{
- u32 flup;
- s32 status = IXGBE_ERR_EEPROM;
-
- status = ixgbe_poll_flash_update_done_X540(hw);
- if (status == IXGBE_ERR_EEPROM) {
- hw_dbg(hw, "Flash update time out\n");
- goto out;
- }
-
- flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
-
- status = ixgbe_poll_flash_update_done_X540(hw);
- if (status == 0)
- hw_dbg(hw, "Flash update complete\n");
- else
- hw_dbg(hw, "Flash update time out\n");
-
- if (hw->revision_id == 0) {
- flup = IXGBE_READ_REG(hw, IXGBE_EEC);
-
- if (flup & IXGBE_EEC_SEC1VAL) {
- flup |= IXGBE_EEC_FLUP;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
- }
-
- status = ixgbe_poll_flash_update_done_X540(hw);
- if (status == 0)
- hw_dbg(hw, "Flash update complete\n");
- else
- hw_dbg(hw, "Flash update time out\n");
- }
-out:
- return status;
-}
-
-/**
- * ixgbe_poll_flash_update_done_X540 - Poll flash update status
- * @hw: pointer to hardware structure
- *
- * Polls the FLUDONE (bit 26) of the EEC Register to determine when the
- * flash update is done.
- **/
-static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
-{
- u32 i;
- u32 reg;
- s32 status = IXGBE_ERR_EEPROM;
-
- for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
- reg = IXGBE_READ_REG(hw, IXGBE_EEC);
- if (reg & IXGBE_EEC_FLUDONE) {
- status = 0;
- break;
- }
- udelay(5);
- }
- return status;
-}
-
-/**
- * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore
- * @hw: pointer to hardware structure
- * @mask: Mask to specify which semaphore to acquire
- *
- * Acquires the SWFW semaphore thought the SW_FW_SYNC register for
- * the specified function (CSR, PHY0, PHY1, NVM, Flash)
- **/
-s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
-{
- u32 swfw_sync;
- u32 swmask = mask;
- u32 fwmask = mask << 5;
- u32 hwmask = 0;
- u32 timeout = 200;
- u32 i;
- s32 ret_val = 0;
-
- if (swmask == IXGBE_GSSR_EEP_SM)
- hwmask = IXGBE_GSSR_FLASH_SM;
-
- /* SW only mask doesn't have FW bit pair */
- if (swmask == IXGBE_GSSR_SW_MNG_SM)
- fwmask = 0;
-
- for (i = 0; i < timeout; i++) {
- /*
- * SW NVM semaphore bit is used for access to all
- * SW_FW_SYNC bits (not just NVM)
- */
- if (ixgbe_get_swfw_sync_semaphore(hw)) {
- ret_val = IXGBE_ERR_SWFW_SYNC;
- goto out;
- }
-
- swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
- if (!(swfw_sync & (fwmask | swmask | hwmask))) {
- swfw_sync |= swmask;
- IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
- ixgbe_release_swfw_sync_semaphore(hw);
- msleep(5);
- goto out;
- } else {
- /*
- * Firmware currently using resource (fwmask), hardware
- * currently using resource (hwmask), or other software
- * thread currently using resource (swmask)
- */
- ixgbe_release_swfw_sync_semaphore(hw);
- msleep(5);
- }
- }
-
- /* Failed to get SW only semaphore */
- if (swmask == IXGBE_GSSR_SW_MNG_SM) {
- ret_val = IXGBE_ERR_SWFW_SYNC;
- goto out;
- }
-
- /* If the resource is not released by the FW/HW the SW can assume that
- * the FW/HW malfunctions. In that case the SW should sets the SW bit(s)
- * of the requested resource(s) while ignoring the corresponding FW/HW
- * bits in the SW_FW_SYNC register.
- */
- swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
- if (swfw_sync & (fwmask | hwmask)) {
- if (ixgbe_get_swfw_sync_semaphore(hw)) {
- ret_val = IXGBE_ERR_SWFW_SYNC;
- goto out;
- }
-
- swfw_sync |= swmask;
- IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
- ixgbe_release_swfw_sync_semaphore(hw);
- msleep(5);
- }
-
-out:
- return ret_val;
-}
-
-/**
- * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore
- * @hw: pointer to hardware structure
- * @mask: Mask to specify which semaphore to release
- *
- * Releases the SWFW semaphore through the SW_FW_SYNC register
- * for the specified function (CSR, PHY0, PHY1, EVM, Flash)
- **/
-void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
-{
- u32 swfw_sync;
- u32 swmask = mask;
-
- ixgbe_get_swfw_sync_semaphore(hw);
-
- swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
- swfw_sync &= ~swmask;
- IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
-
- ixgbe_release_swfw_sync_semaphore(hw);
- msleep(5);
-}
-
-/**
- * ixgbe_get_nvm_semaphore - Get hardware semaphore
- * @hw: pointer to hardware structure
- *
- * Sets the hardware semaphores so SW/FW can gain control of shared resources
- **/
-static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
-{
- s32 status = IXGBE_ERR_EEPROM;
- u32 timeout = 2000;
- u32 i;
- u32 swsm;
-
- /* Get SMBI software semaphore between device drivers first */
- for (i = 0; i < timeout; i++) {
- /*
- * If the SMBI bit is 0 when we read it, then the bit will be
- * set and we have the semaphore
- */
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
- if (!(swsm & IXGBE_SWSM_SMBI)) {
- status = 0;
- break;
- }
- udelay(50);
- }
-
- /* Now get the semaphore between SW/FW through the REGSMP bit */
- if (status == 0) {
- for (i = 0; i < timeout; i++) {
- swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
- if (!(swsm & IXGBE_SWFW_REGSMP))
- break;
-
- udelay(50);
- }
-
- /*
- * Release semaphores and return error if SW NVM semaphore
- * was not granted because we don't have access to the EEPROM
- */
- if (i >= timeout) {
- hw_dbg(hw, "REGSMP Software NVM semaphore not "
- "granted.\n");
- ixgbe_release_swfw_sync_semaphore(hw);
- status = IXGBE_ERR_EEPROM;
- }
- } else {
- hw_dbg(hw, "Software semaphore SMBI between device drivers "
- "not granted.\n");
- }
-
- return status;
-}
-
-/**
- * ixgbe_release_nvm_semaphore - Release hardware semaphore
- * @hw: pointer to hardware structure
- *
- * This function clears hardware semaphore bits.
- **/
-static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
-{
- u32 swsm;
-
- /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
-
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
- swsm &= ~IXGBE_SWSM_SMBI;
- IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
-
- swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
- swsm &= ~IXGBE_SWFW_REGSMP;
- IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm);
-
- IXGBE_WRITE_FLUSH(hw);
-}
-
-/**
- * ixgbe_blink_led_start_X540 - Blink LED based on index.
- * @hw: pointer to hardware structure
- * @index: led number to blink
- *
- * Devices that implement the version 2 interface:
- * X540
- **/
-s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
-{
- u32 macc_reg;
- u32 ledctl_reg;
- ixgbe_link_speed speed;
- bool link_up;
-
- /*
- * Link should be up in order for the blink bit in the LED control
- * register to work. Force link and speed in the MAC if link is down.
- * This will be reversed when we stop the blinking.
- */
- hw->mac.ops.check_link(hw, &speed, &link_up, false);
- if (link_up == false) {
- macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
- macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
- IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
- }
- /* Set the LED to LINK_UP + BLINK. */
- ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
- ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
- ledctl_reg |= IXGBE_LED_BLINK(index);
- IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
- IXGBE_WRITE_FLUSH(hw);
-
- return 0;
-}
-
-/**
- * ixgbe_blink_led_stop_X540 - Stop blinking LED based on index.
- * @hw: pointer to hardware structure
- * @index: led number to stop blinking
- *
- * Devices that implement the version 2 interface:
- * X540
- **/
-s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
-{
- u32 macc_reg;
- u32 ledctl_reg;
-
- /* Restore the LED to its default value. */
- ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
- ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
- ledctl_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
- ledctl_reg &= ~IXGBE_LED_BLINK(index);
- IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
-
- /* Unforce link and speed in the MAC. */
- macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
- macc_reg &= ~(IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS);
- IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
- IXGBE_WRITE_FLUSH(hw);
-
- return 0;
-}
-
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_x540.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_x540.h
deleted file mode 100755
index 77e8952d..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_x540.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#ifndef _IXGBE_X540_H_
-#define _IXGBE_X540_H_
-
-#include "ixgbe_type.h"
-
-s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed, bool *autoneg);
-enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw);
-s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
- bool autoneg, bool link_up_wait_to_complete);
-s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw);
-u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw);
-
-s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
-s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data);
-s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words,
- u16 *data);
-s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data);
-s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words,
- u16 *data);
-s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw);
-s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, u16 *checksum_val);
-u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw);
-
-s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
-void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
-
-s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
-#endif /* _IXGBE_X540_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.c
deleted file mode 100755
index ca9f4224..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.c
+++ /dev/null
@@ -1,1246 +0,0 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#include "ixgbe.h"
-#include "kcompat.h"
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) )
-/* From lib/vsprintf.c */
-#include <asm/div64.h>
-
-static int skip_atoi(const char **s)
-{
- int i=0;
-
- while (isdigit(**s))
- i = i*10 + *((*s)++) - '0';
- return i;
-}
-
-#define _kc_ZEROPAD 1 /* pad with zero */
-#define _kc_SIGN 2 /* unsigned/signed long */
-#define _kc_PLUS 4 /* show plus */
-#define _kc_SPACE 8 /* space if plus */
-#define _kc_LEFT 16 /* left justified */
-#define _kc_SPECIAL 32 /* 0x */
-#define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */
-
-static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type)
-{
- char c,sign,tmp[66];
- const char *digits;
- const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz";
- const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
- int i;
-
- digits = (type & _kc_LARGE) ? large_digits : small_digits;
- if (type & _kc_LEFT)
- type &= ~_kc_ZEROPAD;
- if (base < 2 || base > 36)
- return 0;
- c = (type & _kc_ZEROPAD) ? '0' : ' ';
- sign = 0;
- if (type & _kc_SIGN) {
- if (num < 0) {
- sign = '-';
- num = -num;
- size--;
- } else if (type & _kc_PLUS) {
- sign = '+';
- size--;
- } else if (type & _kc_SPACE) {
- sign = ' ';
- size--;
- }
- }
- if (type & _kc_SPECIAL) {
- if (base == 16)
- size -= 2;
- else if (base == 8)
- size--;
- }
- i = 0;
- if (num == 0)
- tmp[i++]='0';
- else while (num != 0)
- tmp[i++] = digits[do_div(num,base)];
- if (i > precision)
- precision = i;
- size -= precision;
- if (!(type&(_kc_ZEROPAD+_kc_LEFT))) {
- while(size-->0) {
- if (buf <= end)
- *buf = ' ';
- ++buf;
- }
- }
- if (sign) {
- if (buf <= end)
- *buf = sign;
- ++buf;
- }
- if (type & _kc_SPECIAL) {
- if (base==8) {
- if (buf <= end)
- *buf = '0';
- ++buf;
- } else if (base==16) {
- if (buf <= end)
- *buf = '0';
- ++buf;
- if (buf <= end)
- *buf = digits[33];
- ++buf;
- }
- }
- if (!(type & _kc_LEFT)) {
- while (size-- > 0) {
- if (buf <= end)
- *buf = c;
- ++buf;
- }
- }
- while (i < precision--) {
- if (buf <= end)
- *buf = '0';
- ++buf;
- }
- while (i-- > 0) {
- if (buf <= end)
- *buf = tmp[i];
- ++buf;
- }
- while (size-- > 0) {
- if (buf <= end)
- *buf = ' ';
- ++buf;
- }
- return buf;
-}
-
-int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
-{
- int len;
- unsigned long long num;
- int i, base;
- char *str, *end, c;
- const char *s;
-
- int flags; /* flags to number() */
-
- int field_width; /* width of output field */
- int precision; /* min. # of digits for integers; max
- number of chars for from string */
- int qualifier; /* 'h', 'l', or 'L' for integer fields */
- /* 'z' support added 23/7/1999 S.H. */
- /* 'z' changed to 'Z' --davidm 1/25/99 */
-
- str = buf;
- end = buf + size - 1;
-
- if (end < buf - 1) {
- end = ((void *) -1);
- size = end - buf + 1;
- }
-
- for (; *fmt ; ++fmt) {
- if (*fmt != '%') {
- if (str <= end)
- *str = *fmt;
- ++str;
- continue;
- }
-
- /* process flags */
- flags = 0;
- repeat:
- ++fmt; /* this also skips first '%' */
- switch (*fmt) {
- case '-': flags |= _kc_LEFT; goto repeat;
- case '+': flags |= _kc_PLUS; goto repeat;
- case ' ': flags |= _kc_SPACE; goto repeat;
- case '#': flags |= _kc_SPECIAL; goto repeat;
- case '0': flags |= _kc_ZEROPAD; goto repeat;
- }
-
- /* get field width */
- field_width = -1;
- if (isdigit(*fmt))
- field_width = skip_atoi(&fmt);
- else if (*fmt == '*') {
- ++fmt;
- /* it's the next argument */
- field_width = va_arg(args, int);
- if (field_width < 0) {
- field_width = -field_width;
- flags |= _kc_LEFT;
- }
- }
-
- /* get the precision */
- precision = -1;
- if (*fmt == '.') {
- ++fmt;
- if (isdigit(*fmt))
- precision = skip_atoi(&fmt);
- else if (*fmt == '*') {
- ++fmt;
- /* it's the next argument */
- precision = va_arg(args, int);
- }
- if (precision < 0)
- precision = 0;
- }
-
- /* get the conversion qualifier */
- qualifier = -1;
- if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') {
- qualifier = *fmt;
- ++fmt;
- }
-
- /* default base */
- base = 10;
-
- switch (*fmt) {
- case 'c':
- if (!(flags & _kc_LEFT)) {
- while (--field_width > 0) {
- if (str <= end)
- *str = ' ';
- ++str;
- }
- }
- c = (unsigned char) va_arg(args, int);
- if (str <= end)
- *str = c;
- ++str;
- while (--field_width > 0) {
- if (str <= end)
- *str = ' ';
- ++str;
- }
- continue;
-
- case 's':
- s = va_arg(args, char *);
- if (!s)
- s = "<NULL>";
-
- len = strnlen(s, precision);
-
- if (!(flags & _kc_LEFT)) {
- while (len < field_width--) {
- if (str <= end)
- *str = ' ';
- ++str;
- }
- }
- for (i = 0; i < len; ++i) {
- if (str <= end)
- *str = *s;
- ++str; ++s;
- }
- while (len < field_width--) {
- if (str <= end)
- *str = ' ';
- ++str;
- }
- continue;
-
- case 'p':
- if (field_width == -1) {
- field_width = 2*sizeof(void *);
- flags |= _kc_ZEROPAD;
- }
- str = number(str, end,
- (unsigned long) va_arg(args, void *),
- 16, field_width, precision, flags);
- continue;
-
-
- case 'n':
- /* FIXME:
- * What does C99 say about the overflow case here? */
- if (qualifier == 'l') {
- long * ip = va_arg(args, long *);
- *ip = (str - buf);
- } else if (qualifier == 'Z') {
- size_t * ip = va_arg(args, size_t *);
- *ip = (str - buf);
- } else {
- int * ip = va_arg(args, int *);
- *ip = (str - buf);
- }
- continue;
-
- case '%':
- if (str <= end)
- *str = '%';
- ++str;
- continue;
-
- /* integer number formats - set up the flags and "break" */
- case 'o':
- base = 8;
- break;
-
- case 'X':
- flags |= _kc_LARGE;
- case 'x':
- base = 16;
- break;
-
- case 'd':
- case 'i':
- flags |= _kc_SIGN;
- case 'u':
- break;
-
- default:
- if (str <= end)
- *str = '%';
- ++str;
- if (*fmt) {
- if (str <= end)
- *str = *fmt;
- ++str;
- } else {
- --fmt;
- }
- continue;
- }
- if (qualifier == 'L')
- num = va_arg(args, long long);
- else if (qualifier == 'l') {
- num = va_arg(args, unsigned long);
- if (flags & _kc_SIGN)
- num = (signed long) num;
- } else if (qualifier == 'Z') {
- num = va_arg(args, size_t);
- } else if (qualifier == 'h') {
- num = (unsigned short) va_arg(args, int);
- if (flags & _kc_SIGN)
- num = (signed short) num;
- } else {
- num = va_arg(args, unsigned int);
- if (flags & _kc_SIGN)
- num = (signed int) num;
- }
- str = number(str, end, num, base,
- field_width, precision, flags);
- }
- if (str <= end)
- *str = '\0';
- else if (size > 0)
- /* don't write out a null byte if the buf size is zero */
- *end = '\0';
- /* the trailing null byte doesn't count towards the total
- * ++str;
- */
- return str-buf;
-}
-
-int _kc_snprintf(char * buf, size_t size, const char *fmt, ...)
-{
- va_list args;
- int i;
-
- va_start(args, fmt);
- i = _kc_vsnprintf(buf,size,fmt,args);
- va_end(args);
- return i;
-}
-#endif /* < 2.4.8 */
-
-
-
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
-#ifdef CONFIG_PCI_IOV
-int __kc_pci_vfs_assigned(struct pci_dev *dev)
-{
- unsigned int vfs_assigned = 0;
-#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED
- int pos;
- struct pci_dev *vfdev;
- unsigned short dev_id;
-
- /* only search if we are a PF */
- if (!dev->is_physfn)
- return 0;
-
- /* find SR-IOV capability */
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
- if (!pos)
- return 0;
-
- /*
- * * determine the device ID for the VFs, the vendor ID will be the
- * * same as the PF so there is no need to check for that one
- * */
- pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id);
-
- /* loop through all the VFs to see if we own any that are assigned */
- vfdev = pci_get_device(dev->vendor, dev_id, NULL);
- while (vfdev) {
- /*
- * * It is considered assigned if it is a virtual function with
- * * our dev as the physical function and the assigned bit is set
- * */
- if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
- (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED))
- vfs_assigned++;
-
- vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
- }
-
-#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */
- return vfs_assigned;
-}
-
-#endif /* CONFIG_PCI_IOV */
-#endif /* 3.10.0 */
-
-
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
-
-/**************************************/
-/* PCI DMA MAPPING */
-
-#if defined(CONFIG_HIGHMEM)
-
-#ifndef PCI_DRAM_OFFSET
-#define PCI_DRAM_OFFSET 0
-#endif
-
-u64
-_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
- size_t size, int direction)
-{
- return (((u64) (page - mem_map) << PAGE_SHIFT) + offset +
- PCI_DRAM_OFFSET);
-}
-
-#else /* CONFIG_HIGHMEM */
-
-u64
-_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
- size_t size, int direction)
-{
- return pci_map_single(dev, (void *)page_address(page) + offset, size,
- direction);
-}
-
-#endif /* CONFIG_HIGHMEM */
-
-void
-_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size,
- int direction)
-{
- return pci_unmap_single(dev, dma_addr, size, direction);
-}
-
-#endif /* 2.4.13 => 2.4.3 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
-
-/**************************************/
-/* PCI DRIVER API */
-
-int
-_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
-{
- if (!pci_dma_supported(dev, mask))
- return -EIO;
- dev->dma_mask = mask;
- return 0;
-}
-
-int
-_kc_pci_request_regions(struct pci_dev *dev, char *res_name)
-{
- int i;
-
- for (i = 0; i < 6; i++) {
- if (pci_resource_len(dev, i) == 0)
- continue;
-
- if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
- if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
- pci_release_regions(dev);
- return -EBUSY;
- }
- } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
- if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
- pci_release_regions(dev);
- return -EBUSY;
- }
- }
- }
- return 0;
-}
-
-void
-_kc_pci_release_regions(struct pci_dev *dev)
-{
- int i;
-
- for (i = 0; i < 6; i++) {
- if (pci_resource_len(dev, i) == 0)
- continue;
-
- if (pci_resource_flags(dev, i) & IORESOURCE_IO)
- release_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
-
- else if (pci_resource_flags(dev, i) & IORESOURCE_MEM)
- release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
- }
-}
-
-/**************************************/
-/* NETWORK DRIVER API */
-
-struct net_device *
-_kc_alloc_etherdev(int sizeof_priv)
-{
- struct net_device *dev;
- int alloc_size;
-
- alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31;
- dev = kzalloc(alloc_size, GFP_KERNEL);
- if (!dev)
- return NULL;
-
- if (sizeof_priv)
- dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31);
- dev->name[0] = '\0';
- ether_setup(dev);
-
- return dev;
-}
-
-int
-_kc_is_valid_ether_addr(u8 *addr)
-{
- const char zaddr[6] = { 0, };
-
- return !(addr[0] & 1) && memcmp(addr, zaddr, 6);
-}
-
-#endif /* 2.4.3 => 2.4.0 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
-
-int
-_kc_pci_set_power_state(struct pci_dev *dev, int state)
-{
- return 0;
-}
-
-int
-_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable)
-{
- return 0;
-}
-
-#endif /* 2.4.6 => 2.4.3 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
-void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page,
- int off, int size)
-{
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- frag->page = page;
- frag->page_offset = off;
- frag->size = size;
- skb_shinfo(skb)->nr_frags = i + 1;
-}
-
-/*
- * Original Copyright:
- * find_next_bit.c: fallback find next bit implementation
- *
- * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-/**
- * find_next_bit - find the next set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
- unsigned long offset)
-{
- const unsigned long *p = addr + BITOP_WORD(offset);
- unsigned long result = offset & ~(BITS_PER_LONG-1);
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset %= BITS_PER_LONG;
- if (offset) {
- tmp = *(p++);
- tmp &= (~0UL << offset);
- if (size < BITS_PER_LONG)
- goto found_first;
- if (tmp)
- goto found_middle;
- size -= BITS_PER_LONG;
- result += BITS_PER_LONG;
- }
- while (size & ~(BITS_PER_LONG-1)) {
- if ((tmp = *(p++)))
- goto found_middle;
- result += BITS_PER_LONG;
- size -= BITS_PER_LONG;
- }
- if (!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp &= (~0UL >> (BITS_PER_LONG - size));
- if (tmp == 0UL) /* Are any bits set? */
- return result + size; /* Nope. */
-found_middle:
- return result + ffs(tmp);
-}
-
-size_t _kc_strlcpy(char *dest, const char *src, size_t size)
-{
- size_t ret = strlen(src);
-
- if (size) {
- size_t len = (ret >= size) ? size - 1 : ret;
- memcpy(dest, src, len);
- dest[len] = '\0';
- }
- return ret;
-}
-
-#endif /* 2.6.0 => 2.4.6 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
-int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...)
-{
- va_list args;
- int i;
-
- va_start(args, fmt);
- i = vsnprintf(buf, size, fmt, args);
- va_end(args);
- return (i >= size) ? (size - 1) : i;
-}
-#endif /* < 2.6.4 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
-DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1};
-#endif /* < 2.6.10 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) )
-char *_kc_kstrdup(const char *s, unsigned int gfp)
-{
- size_t len;
- char *buf;
-
- if (!s)
- return NULL;
-
- len = strlen(s) + 1;
- buf = kmalloc(len, gfp);
- if (buf)
- memcpy(buf, s, len);
- return buf;
-}
-#endif /* < 2.6.13 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
-void *_kc_kzalloc(size_t size, int flags)
-{
- void *ret = kmalloc(size, flags);
- if (ret)
- memset(ret, 0, size);
- return ret;
-}
-#endif /* <= 2.6.13 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
-int _kc_skb_pad(struct sk_buff *skb, int pad)
-{
- int ntail;
-
- /* If the skbuff is non linear tailroom is always zero.. */
- if(!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
- memset(skb->data+skb->len, 0, pad);
- return 0;
- }
-
- ntail = skb->data_len + pad - (skb->end - skb->tail);
- if (likely(skb_cloned(skb) || ntail > 0)) {
- if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC));
- goto free_skb;
- }
-
-#ifdef MAX_SKB_FRAGS
- if (skb_is_nonlinear(skb) &&
- !__pskb_pull_tail(skb, skb->data_len))
- goto free_skb;
-
-#endif
- memset(skb->data + skb->len, 0, pad);
- return 0;
-
-free_skb:
- kfree_skb(skb);
- return -ENOMEM;
-}
-
-#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))
-int _kc_pci_save_state(struct pci_dev *pdev)
-{
- struct adapter_struct *adapter = pci_get_drvdata(pdev);
- int size = PCI_CONFIG_SPACE_LEN, i;
- u16 pcie_cap_offset, pcie_link_status;
-
-#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
- /* no ->dev for 2.4 kernels */
- WARN_ON(pdev->dev.driver_data == NULL);
-#endif
- pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- if (pcie_cap_offset) {
- if (!pci_read_config_word(pdev,
- pcie_cap_offset + PCIE_LINK_STATUS,
- &pcie_link_status))
- size = PCIE_CONFIG_SPACE_LEN;
- }
- pci_config_space_ich8lan();
-#ifdef HAVE_PCI_ERS
- if (adapter->config_space == NULL)
-#else
- WARN_ON(adapter->config_space != NULL);
-#endif
- adapter->config_space = kmalloc(size, GFP_KERNEL);
- if (!adapter->config_space) {
- printk(KERN_ERR "Out of memory in pci_save_state\n");
- return -ENOMEM;
- }
- for (i = 0; i < (size / 4); i++)
- pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]);
- return 0;
-}
-
-void _kc_pci_restore_state(struct pci_dev *pdev)
-{
- struct adapter_struct *adapter = pci_get_drvdata(pdev);
- int size = PCI_CONFIG_SPACE_LEN, i;
- u16 pcie_cap_offset;
- u16 pcie_link_status;
-
- if (adapter->config_space != NULL) {
- pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- if (pcie_cap_offset &&
- !pci_read_config_word(pdev,
- pcie_cap_offset + PCIE_LINK_STATUS,
- &pcie_link_status))
- size = PCIE_CONFIG_SPACE_LEN;
-
- pci_config_space_ich8lan();
- for (i = 0; i < (size / 4); i++)
- pci_write_config_dword(pdev, i * 4, adapter->config_space[i]);
-#ifndef HAVE_PCI_ERS
- kfree(adapter->config_space);
- adapter->config_space = NULL;
-#endif
- }
-}
-#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */
-
-#ifdef HAVE_PCI_ERS
-void _kc_free_netdev(struct net_device *netdev)
-{
- struct adapter_struct *adapter = netdev_priv(netdev);
-
- if (adapter->config_space != NULL)
- kfree(adapter->config_space);
-#ifdef CONFIG_SYSFS
- if (netdev->reg_state == NETREG_UNINITIALIZED) {
- kfree((char *)netdev - netdev->padded);
- } else {
- BUG_ON(netdev->reg_state != NETREG_UNREGISTERED);
- netdev->reg_state = NETREG_RELEASED;
- class_device_put(&netdev->class_dev);
- }
-#else
- kfree((char *)netdev - netdev->padded);
-#endif
-}
-#endif
-
-void *_kc_kmemdup(const void *src, size_t len, unsigned gfp)
-{
- void *p;
-
- p = kzalloc(len, gfp);
- if (p)
- memcpy(p, src, len);
- return p;
-}
-#endif /* <= 2.6.19 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
-/* hexdump code taken from lib/hexdump.c */
-static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
- int groupsize, unsigned char *linebuf,
- size_t linebuflen, bool ascii)
-{
- const u8 *ptr = buf;
- u8 ch;
- int j, lx = 0;
- int ascii_column;
-
- if (rowsize != 16 && rowsize != 32)
- rowsize = 16;
-
- if (!len)
- goto nil;
- if (len > rowsize) /* limit to one line at a time */
- len = rowsize;
- if ((len % groupsize) != 0) /* no mixed size output */
- groupsize = 1;
-
- switch (groupsize) {
- case 8: {
- const u64 *ptr8 = buf;
- int ngroups = len / groupsize;
-
- for (j = 0; j < ngroups; j++)
- lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
- "%s%16.16llx", j ? " " : "",
- (unsigned long long)*(ptr8 + j));
- ascii_column = 17 * ngroups + 2;
- break;
- }
-
- case 4: {
- const u32 *ptr4 = buf;
- int ngroups = len / groupsize;
-
- for (j = 0; j < ngroups; j++)
- lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
- "%s%8.8x", j ? " " : "", *(ptr4 + j));
- ascii_column = 9 * ngroups + 2;
- break;
- }
-
- case 2: {
- const u16 *ptr2 = buf;
- int ngroups = len / groupsize;
-
- for (j = 0; j < ngroups; j++)
- lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
- "%s%4.4x", j ? " " : "", *(ptr2 + j));
- ascii_column = 5 * ngroups + 2;
- break;
- }
-
- default:
- for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
- ch = ptr[j];
- linebuf[lx++] = hex_asc(ch >> 4);
- linebuf[lx++] = hex_asc(ch & 0x0f);
- linebuf[lx++] = ' ';
- }
- if (j)
- lx--;
-
- ascii_column = 3 * rowsize + 2;
- break;
- }
- if (!ascii)
- goto nil;
-
- while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
- linebuf[lx++] = ' ';
- for (j = 0; (j < len) && (lx + 2) < linebuflen; j++)
- linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j]
- : '.';
-nil:
- linebuf[lx++] = '\0';
-}
-
-void _kc_print_hex_dump(const char *level,
- const char *prefix_str, int prefix_type,
- int rowsize, int groupsize,
- const void *buf, size_t len, bool ascii)
-{
- const u8 *ptr = buf;
- int i, linelen, remaining = len;
- unsigned char linebuf[200];
-
- if (rowsize != 16 && rowsize != 32)
- rowsize = 16;
-
- for (i = 0; i < len; i += rowsize) {
- linelen = min(remaining, rowsize);
- remaining -= rowsize;
- _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
- linebuf, sizeof(linebuf), ascii);
-
- switch (prefix_type) {
- case DUMP_PREFIX_ADDRESS:
- printk("%s%s%*p: %s\n", level, prefix_str,
- (int)(2 * sizeof(void *)), ptr + i, linebuf);
- break;
- case DUMP_PREFIX_OFFSET:
- printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
- break;
- default:
- printk("%s%s%s\n", level, prefix_str, linebuf);
- break;
- }
- }
-}
-#endif /* < 2.6.22 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) )
-int ixgbe_dcb_netlink_register(void)
-{
- return 0;
-}
-
-int ixgbe_dcb_netlink_unregister(void)
-{
- return 0;
-}
-
-int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max)
-{
- return 0;
-}
-#endif /* < 2.6.23 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
-#ifdef NAPI
-struct net_device *napi_to_poll_dev(struct napi_struct *napi)
-{
- struct adapter_q_vector *q_vector = container_of(napi,
- struct adapter_q_vector,
- napi);
- return &q_vector->poll_dev;
-}
-
-int __kc_adapter_clean(struct net_device *netdev, int *budget)
-{
- int work_done;
- int work_to_do = min(*budget, netdev->quota);
- /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */
- struct napi_struct *napi = netdev->priv;
- work_done = napi->poll(napi, work_to_do);
- *budget -= work_done;
- netdev->quota -= work_done;
- return (work_done >= work_to_do) ? 1 : 0;
-}
-#endif /* NAPI */
-#endif /* <= 2.6.24 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
-void _kc_pci_disable_link_state(struct pci_dev *pdev, int state)
-{
- struct pci_dev *parent = pdev->bus->self;
- u16 link_state;
- int pos;
-
- if (!parent)
- return;
-
- pos = pci_find_capability(parent, PCI_CAP_ID_EXP);
- if (pos) {
- pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state);
- link_state &= ~state;
- pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state);
- }
-}
-#endif /* < 2.6.26 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
-#ifdef HAVE_TX_MQ
-void _kc_netif_tx_stop_all_queues(struct net_device *netdev)
-{
- struct adapter_struct *adapter = netdev_priv(netdev);
- int i;
-
- netif_stop_queue(netdev);
- if (netif_is_multiqueue(netdev))
- for (i = 0; i < adapter->num_tx_queues; i++)
- netif_stop_subqueue(netdev, i);
-}
-void _kc_netif_tx_wake_all_queues(struct net_device *netdev)
-{
- struct adapter_struct *adapter = netdev_priv(netdev);
- int i;
-
- netif_wake_queue(netdev);
- if (netif_is_multiqueue(netdev))
- for (i = 0; i < adapter->num_tx_queues; i++)
- netif_wake_subqueue(netdev, i);
-}
-void _kc_netif_tx_start_all_queues(struct net_device *netdev)
-{
- struct adapter_struct *adapter = netdev_priv(netdev);
- int i;
-
- netif_start_queue(netdev);
- if (netif_is_multiqueue(netdev))
- for (i = 0; i < adapter->num_tx_queues; i++)
- netif_start_subqueue(netdev, i);
-}
-#endif /* HAVE_TX_MQ */
-
-#ifndef __WARN_printf
-void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...)
-{
- va_list args;
-
- printk(KERN_WARNING "------------[ cut here ]------------\n");
- printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file, line);
- va_start(args, fmt);
- vprintk(fmt, args);
- va_end(args);
-
- dump_stack();
-}
-#endif /* __WARN_printf */
-#endif /* < 2.6.27 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
-
-int
-_kc_pci_prepare_to_sleep(struct pci_dev *dev)
-{
- pci_power_t target_state;
- int error;
-
- target_state = pci_choose_state(dev, PMSG_SUSPEND);
-
- pci_enable_wake(dev, target_state, true);
-
- error = pci_set_power_state(dev, target_state);
-
- if (error)
- pci_enable_wake(dev, target_state, false);
-
- return error;
-}
-
-int
-_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable)
-{
- int err;
-
- err = pci_enable_wake(dev, PCI_D3cold, enable);
- if (err)
- goto out;
-
- err = pci_enable_wake(dev, PCI_D3hot, enable);
-
-out:
- return err;
-}
-#endif /* < 2.6.28 */
-
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) )
-void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
- int off, int size)
-{
- skb_fill_page_desc(skb, i, page, off, size);
- skb->len += size;
- skb->data_len += size;
- skb->truesize += size;
-}
-#endif /* < 3.4.0 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) )
-#ifdef HAVE_NETDEV_SELECT_QUEUE
-#include <net/ip.h>
-static u32 _kc_simple_tx_hashrnd;
-static u32 _kc_simple_tx_hashrnd_initialized;
-
-u16 _kc_skb_tx_hash(struct net_device *dev, struct sk_buff *skb)
-{
- u32 addr1, addr2, ports;
- u32 hash, ihl;
- u8 ip_proto = 0;
-
- if (unlikely(!_kc_simple_tx_hashrnd_initialized)) {
- get_random_bytes(&_kc_simple_tx_hashrnd, 4);
- _kc_simple_tx_hashrnd_initialized = 1;
- }
-
- switch (skb->protocol) {
- case htons(ETH_P_IP):
- if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)))
- ip_proto = ip_hdr(skb)->protocol;
- addr1 = ip_hdr(skb)->saddr;
- addr2 = ip_hdr(skb)->daddr;
- ihl = ip_hdr(skb)->ihl;
- break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- case htons(ETH_P_IPV6):
- ip_proto = ipv6_hdr(skb)->nexthdr;
- addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
- addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
- ihl = (40 >> 2);
- break;
-#endif
- default:
- return 0;
- }
-
-
- switch (ip_proto) {
- case IPPROTO_TCP:
- case IPPROTO_UDP:
- case IPPROTO_DCCP:
- case IPPROTO_ESP:
- case IPPROTO_AH:
- case IPPROTO_SCTP:
- case IPPROTO_UDPLITE:
- ports = *((u32 *) (skb_network_header(skb) + (ihl * 4)));
- break;
-
- default:
- ports = 0;
- break;
- }
-
- hash = jhash_3words(addr1, addr2, ports, _kc_simple_tx_hashrnd);
-
- return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
-}
-#endif /* HAVE_NETDEV_SELECT_QUEUE */
-#endif /* < 2.6.30 */
-
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
-#ifdef HAVE_TX_MQ
-#ifndef CONFIG_NETDEVICES_MULTIQUEUE
-void _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
-{
- unsigned int real_num = dev->real_num_tx_queues;
- struct Qdisc *qdisc;
- int i;
-
- if (unlikely(txq > dev->num_tx_queues))
- ;
- else if (txq > real_num)
- dev->real_num_tx_queues = txq;
- else if ( txq < real_num) {
- dev->real_num_tx_queues = txq;
- for (i = txq; i < dev->num_tx_queues; i++) {
- qdisc = netdev_get_tx_queue(dev, i)->qdisc;
- if (qdisc) {
- spin_lock_bh(qdisc_lock(qdisc));
- qdisc_reset(qdisc);
- spin_unlock_bh(qdisc_lock(qdisc));
- }
- }
- }
-}
-#endif /* CONFIG_NETDEVICES_MULTIQUEUE */
-#endif /* HAVE_TX_MQ */
-#endif /* < 2.6.35 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) )
-static const u32 _kc_flags_dup_features =
- (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH);
-
-u32 _kc_ethtool_op_get_flags(struct net_device *dev)
-{
- return dev->features & _kc_flags_dup_features;
-}
-
-int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported)
-{
- if (data & ~supported)
- return -EINVAL;
-
- dev->features = ((dev->features & ~_kc_flags_dup_features) |
- (data & _kc_flags_dup_features));
- return 0;
-}
-#endif /* < 2.6.36 */
-
-/******************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) )
-#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)))
-u8 _kc_netdev_get_num_tc(struct net_device *dev)
-{
- struct adapter_struct *kc_adapter = netdev_priv(dev);
- if (kc_adapter->flags & IXGBE_FLAG_DCB_ENABLED)
- return kc_adapter->tc;
- else
- return 0;
-}
-
-u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up)
-{
- struct adapter_struct *kc_adapter = netdev_priv(dev);
- int tc;
- u8 map;
-
- for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) {
- map = kc_adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap;
-
- if (map & (1 << up))
- return tc;
- }
-
- return 0;
-}
-#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */
-#endif /* < 2.6.39 */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.h
deleted file mode 100755
index e327d659..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.h
+++ /dev/null
@@ -1,3143 +0,0 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
-#ifndef _KCOMPAT_H_
-#define _KCOMPAT_H_
-
-#ifndef LINUX_VERSION_CODE
-#include <linux/version.h>
-#else
-#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
-#endif
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/udp.h>
-#include <linux/mii.h>
-#include <linux/vmalloc.h>
-#include <asm/io.h>
-#include <linux/ethtool.h>
-#include <linux/if_vlan.h>
-
-/* NAPI enable/disable flags here */
-/* enable NAPI for ixgbe by default */
-#undef CONFIG_IXGBE_NAPI
-#define CONFIG_IXGBE_NAPI
-#define NAPI
-#ifdef CONFIG_IXGBE_NAPI
-#undef NAPI
-#define NAPI
-#endif /* CONFIG_IXGBE_NAPI */
-#ifdef IXGBE_NAPI
-#undef NAPI
-#define NAPI
-#endif /* IXGBE_NAPI */
-#ifdef IXGBE_NO_NAPI
-#undef NAPI
-#endif /* IXGBE_NO_NAPI */
-
-#define adapter_struct ixgbe_adapter
-#define adapter_q_vector ixgbe_q_vector
-
-/* and finally set defines so that the code sees the changes */
-#ifdef NAPI
-#ifndef CONFIG_IXGBE_NAPI
-#define CONFIG_IXGBE_NAPI
-#endif
-#else
-#undef CONFIG_IXGBE_NAPI
-#endif /* NAPI */
-
-/* packet split disable/enable */
-#ifdef DISABLE_PACKET_SPLIT
-#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
-#define CONFIG_IXGBE_DISABLE_PACKET_SPLIT
-#endif
-#endif /* DISABLE_PACKET_SPLIT */
-
-/* MSI compatibility code for all kernels and drivers */
-#ifdef DISABLE_PCI_MSI
-#undef CONFIG_PCI_MSI
-#endif
-#ifndef CONFIG_PCI_MSI
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
-struct msix_entry {
- u16 vector; /* kernel uses to write allocated vector */
- u16 entry; /* driver uses to specify entry, OS writes */
-};
-#endif
-#undef pci_enable_msi
-#define pci_enable_msi(a) -ENOTSUPP
-#undef pci_disable_msi
-#define pci_disable_msi(a) do {} while (0)
-#undef pci_enable_msix
-#define pci_enable_msix(a, b, c) -ENOTSUPP
-#undef pci_disable_msix
-#define pci_disable_msix(a) do {} while (0)
-#define msi_remove_pci_irq_vectors(a) do {} while (0)
-#endif /* CONFIG_PCI_MSI */
-#ifdef DISABLE_PM
-#undef CONFIG_PM
-#endif
-
-#ifdef DISABLE_NET_POLL_CONTROLLER
-#undef CONFIG_NET_POLL_CONTROLLER
-#endif
-
-#ifndef PMSG_SUSPEND
-#define PMSG_SUSPEND 3
-#endif
-
-/* generic boolean compatibility */
-#undef TRUE
-#undef FALSE
-#define TRUE true
-#define FALSE false
-#ifdef GCC_VERSION
-#if ( GCC_VERSION < 3000 )
-#define _Bool char
-#endif
-#else
-#define _Bool char
-#endif
-
-/* kernels less than 2.4.14 don't have this */
-#ifndef ETH_P_8021Q
-#define ETH_P_8021Q 0x8100
-#endif
-
-#ifndef module_param
-#define module_param(v,t,p) MODULE_PARM(v, "i");
-#endif
-
-#ifndef DMA_64BIT_MASK
-#define DMA_64BIT_MASK 0xffffffffffffffffULL
-#endif
-
-#ifndef DMA_32BIT_MASK
-#define DMA_32BIT_MASK 0x00000000ffffffffULL
-#endif
-
-#ifndef PCI_CAP_ID_EXP
-#define PCI_CAP_ID_EXP 0x10
-#endif
-
-#ifndef PCIE_LINK_STATE_L0S
-#define PCIE_LINK_STATE_L0S 1
-#endif
-#ifndef PCIE_LINK_STATE_L1
-#define PCIE_LINK_STATE_L1 2
-#endif
-
-#ifndef mmiowb
-#ifdef CONFIG_IA64
-#define mmiowb() asm volatile ("mf.a" ::: "memory")
-#else
-#define mmiowb()
-#endif
-#endif
-
-#ifndef SET_NETDEV_DEV
-#define SET_NETDEV_DEV(net, pdev)
-#endif
-
-#if !defined(HAVE_FREE_NETDEV) && ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) )
-#define free_netdev(x) kfree(x)
-#endif
-
-#ifdef HAVE_POLL_CONTROLLER
-#define CONFIG_NET_POLL_CONTROLLER
-#endif
-
-#ifndef SKB_DATAREF_SHIFT
-/* if we do not have the infrastructure to detect if skb_header is cloned
- just return false in all cases */
-#define skb_header_cloned(x) 0
-#endif
-
-#ifndef NETIF_F_GSO
-#define gso_size tso_size
-#define gso_segs tso_segs
-#endif
-
-#ifndef NETIF_F_GRO
-#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \
- vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan)
-#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb)
-#endif
-
-#ifndef NETIF_F_SCTP_CSUM
-#define NETIF_F_SCTP_CSUM 0
-#endif
-
-#ifndef NETIF_F_LRO
-#define NETIF_F_LRO (1 << 15)
-#endif
-
-#ifndef NETIF_F_NTUPLE
-#define NETIF_F_NTUPLE (1 << 27)
-#endif
-
-#ifndef IPPROTO_SCTP
-#define IPPROTO_SCTP 132
-#endif
-
-#ifndef CHECKSUM_PARTIAL
-#define CHECKSUM_PARTIAL CHECKSUM_HW
-#define CHECKSUM_COMPLETE CHECKSUM_HW
-#endif
-
-#ifndef __read_mostly
-#define __read_mostly
-#endif
-
-#ifndef MII_RESV1
-#define MII_RESV1 0x17 /* Reserved... */
-#endif
-
-#ifndef unlikely
-#define unlikely(_x) _x
-#define likely(_x) _x
-#endif
-
-#ifndef WARN_ON
-#define WARN_ON(x)
-#endif
-
-#ifndef PCI_DEVICE
-#define PCI_DEVICE(vend,dev) \
- .vendor = (vend), .device = (dev), \
- .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
-#endif
-
-#ifndef node_online
-#define node_online(node) ((node) == 0)
-#endif
-
-#ifndef num_online_cpus
-#define num_online_cpus() smp_num_cpus
-#endif
-
-#ifndef cpu_online
-#define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map)
-#endif
-
-#ifndef _LINUX_RANDOM_H
-#include <linux/random.h>
-#endif
-
-#ifndef DECLARE_BITMAP
-#ifndef BITS_TO_LONGS
-#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
-#endif
-#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)]
-#endif
-
-#ifndef VLAN_HLEN
-#define VLAN_HLEN 4
-#endif
-
-#ifndef VLAN_ETH_HLEN
-#define VLAN_ETH_HLEN 18
-#endif
-
-#ifndef VLAN_ETH_FRAME_LEN
-#define VLAN_ETH_FRAME_LEN 1518
-#endif
-
-#if !defined(IXGBE_DCA) && !defined(IGB_DCA)
-#define dca_get_tag(b) 0
-#define dca_add_requester(a) -1
-#define dca_remove_requester(b) do { } while(0)
-#define DCA_PROVIDER_ADD 0x0001
-#define DCA_PROVIDER_REMOVE 0x0002
-#endif
-
-#ifndef DCA_GET_TAG_TWO_ARGS
-#define dca3_get_tag(a,b) dca_get_tag(b)
-#endif
-
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-#if defined(__i386__) || defined(__x86_64__)
-#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-#endif
-#endif
-
-/* taken from 2.6.24 definition in linux/kernel.h */
-#ifndef IS_ALIGNED
-#define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0)
-#endif
-
-#ifndef NETIF_F_HW_VLAN_TX
-struct _kc_vlan_ethhdr {
- unsigned char h_dest[ETH_ALEN];
- unsigned char h_source[ETH_ALEN];
- __be16 h_vlan_proto;
- __be16 h_vlan_TCI;
- __be16 h_vlan_encapsulated_proto;
-};
-#define vlan_ethhdr _kc_vlan_ethhdr
-struct _kc_vlan_hdr {
- __be16 h_vlan_TCI;
- __be16 h_vlan_encapsulated_proto;
-};
-#define vlan_hdr _kc_vlan_hdr
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
-#define vlan_tx_tag_present(_skb) 0
-#define vlan_tx_tag_get(_skb) 0
-#endif
-#endif
-
-#ifndef VLAN_PRIO_SHIFT
-#define VLAN_PRIO_SHIFT 13
-#endif
-
-
-#ifndef __GFP_COLD
-#define __GFP_COLD 0
-#endif
-
-/*****************************************************************************/
-/* Installations with ethtool version without eeprom, adapter id, or statistics
- * support */
-
-#ifndef ETH_GSTRING_LEN
-#define ETH_GSTRING_LEN 32
-#endif
-
-#ifndef ETHTOOL_GSTATS
-#define ETHTOOL_GSTATS 0x1d
-#undef ethtool_drvinfo
-#define ethtool_drvinfo k_ethtool_drvinfo
-struct k_ethtool_drvinfo {
- u32 cmd;
- char driver[32];
- char version[32];
- char fw_version[32];
- char bus_info[32];
- char reserved1[32];
- char reserved2[16];
- u32 n_stats;
- u32 testinfo_len;
- u32 eedump_len;
- u32 regdump_len;
-};
-
-struct ethtool_stats {
- u32 cmd;
- u32 n_stats;
- u64 data[0];
-};
-#endif /* ETHTOOL_GSTATS */
-
-#ifndef ETHTOOL_PHYS_ID
-#define ETHTOOL_PHYS_ID 0x1c
-#endif /* ETHTOOL_PHYS_ID */
-
-#ifndef ETHTOOL_GSTRINGS
-#define ETHTOOL_GSTRINGS 0x1b
-enum ethtool_stringset {
- ETH_SS_TEST = 0,
- ETH_SS_STATS,
-};
-struct ethtool_gstrings {
- u32 cmd; /* ETHTOOL_GSTRINGS */
- u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/
- u32 len; /* number of strings in the string set */
- u8 data[0];
-};
-#endif /* ETHTOOL_GSTRINGS */
-
-#ifndef ETHTOOL_TEST
-#define ETHTOOL_TEST 0x1a
-enum ethtool_test_flags {
- ETH_TEST_FL_OFFLINE = (1 << 0),
- ETH_TEST_FL_FAILED = (1 << 1),
-};
-struct ethtool_test {
- u32 cmd;
- u32 flags;
- u32 reserved;
- u32 len;
- u64 data[0];
-};
-#endif /* ETHTOOL_TEST */
-
-#ifndef ETHTOOL_GEEPROM
-#define ETHTOOL_GEEPROM 0xb
-#undef ETHTOOL_GREGS
-struct ethtool_eeprom {
- u32 cmd;
- u32 magic;
- u32 offset;
- u32 len;
- u8 data[0];
-};
-
-struct ethtool_value {
- u32 cmd;
- u32 data;
-};
-#endif /* ETHTOOL_GEEPROM */
-
-#ifndef ETHTOOL_GLINK
-#define ETHTOOL_GLINK 0xa
-#endif /* ETHTOOL_GLINK */
-
-#ifndef ETHTOOL_GWOL
-#define ETHTOOL_GWOL 0x5
-#define ETHTOOL_SWOL 0x6
-#define SOPASS_MAX 6
-struct ethtool_wolinfo {
- u32 cmd;
- u32 supported;
- u32 wolopts;
- u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */
-};
-#endif /* ETHTOOL_GWOL */
-
-#ifndef ETHTOOL_GREGS
-#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */
-#define ethtool_regs _kc_ethtool_regs
-/* for passing big chunks of data */
-struct _kc_ethtool_regs {
- u32 cmd;
- u32 version; /* driver-specific, indicates different chips/revs */
- u32 len; /* bytes */
- u8 data[0];
-};
-#endif /* ETHTOOL_GREGS */
-
-#ifndef ETHTOOL_GMSGLVL
-#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */
-#endif
-#ifndef ETHTOOL_SMSGLVL
-#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */
-#endif
-#ifndef ETHTOOL_NWAY_RST
-#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */
-#endif
-#ifndef ETHTOOL_GLINK
-#define ETHTOOL_GLINK 0x0000000a /* Get link status */
-#endif
-#ifndef ETHTOOL_GEEPROM
-#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */
-#endif
-#ifndef ETHTOOL_SEEPROM
-#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */
-#endif
-#ifndef ETHTOOL_GCOALESCE
-#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */
-/* for configuring coalescing parameters of chip */
-#define ethtool_coalesce _kc_ethtool_coalesce
-struct _kc_ethtool_coalesce {
- u32 cmd; /* ETHTOOL_{G,S}COALESCE */
-
- /* How many usecs to delay an RX interrupt after
- * a packet arrives. If 0, only rx_max_coalesced_frames
- * is used.
- */
- u32 rx_coalesce_usecs;
-
- /* How many packets to delay an RX interrupt after
- * a packet arrives. If 0, only rx_coalesce_usecs is
- * used. It is illegal to set both usecs and max frames
- * to zero as this would cause RX interrupts to never be
- * generated.
- */
- u32 rx_max_coalesced_frames;
-
- /* Same as above two parameters, except that these values
- * apply while an IRQ is being serviced by the host. Not
- * all cards support this feature and the values are ignored
- * in that case.
- */
- u32 rx_coalesce_usecs_irq;
- u32 rx_max_coalesced_frames_irq;
-
- /* How many usecs to delay a TX interrupt after
- * a packet is sent. If 0, only tx_max_coalesced_frames
- * is used.
- */
- u32 tx_coalesce_usecs;
-
- /* How many packets to delay a TX interrupt after
- * a packet is sent. If 0, only tx_coalesce_usecs is
- * used. It is illegal to set both usecs and max frames
- * to zero as this would cause TX interrupts to never be
- * generated.
- */
- u32 tx_max_coalesced_frames;
-
- /* Same as above two parameters, except that these values
- * apply while an IRQ is being serviced by the host. Not
- * all cards support this feature and the values are ignored
- * in that case.
- */
- u32 tx_coalesce_usecs_irq;
- u32 tx_max_coalesced_frames_irq;
-
- /* How many usecs to delay in-memory statistics
- * block updates. Some drivers do not have an in-memory
- * statistic block, and in such cases this value is ignored.
- * This value must not be zero.
- */
- u32 stats_block_coalesce_usecs;
-
- /* Adaptive RX/TX coalescing is an algorithm implemented by
- * some drivers to improve latency under low packet rates and
- * improve throughput under high packet rates. Some drivers
- * only implement one of RX or TX adaptive coalescing. Anything
- * not implemented by the driver causes these values to be
- * silently ignored.
- */
- u32 use_adaptive_rx_coalesce;
- u32 use_adaptive_tx_coalesce;
-
- /* When the packet rate (measured in packets per second)
- * is below pkt_rate_low, the {rx,tx}_*_low parameters are
- * used.
- */
- u32 pkt_rate_low;
- u32 rx_coalesce_usecs_low;
- u32 rx_max_coalesced_frames_low;
- u32 tx_coalesce_usecs_low;
- u32 tx_max_coalesced_frames_low;
-
- /* When the packet rate is below pkt_rate_high but above
- * pkt_rate_low (both measured in packets per second) the
- * normal {rx,tx}_* coalescing parameters are used.
- */
-
- /* When the packet rate is (measured in packets per second)
- * is above pkt_rate_high, the {rx,tx}_*_high parameters are
- * used.
- */
- u32 pkt_rate_high;
- u32 rx_coalesce_usecs_high;
- u32 rx_max_coalesced_frames_high;
- u32 tx_coalesce_usecs_high;
- u32 tx_max_coalesced_frames_high;
-
- /* How often to do adaptive coalescing packet rate sampling,
- * measured in seconds. Must not be zero.
- */
- u32 rate_sample_interval;
-};
-#endif /* ETHTOOL_GCOALESCE */
-
-#ifndef ETHTOOL_SCOALESCE
-#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */
-#endif
-#ifndef ETHTOOL_GRINGPARAM
-#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */
-/* for configuring RX/TX ring parameters */
-#define ethtool_ringparam _kc_ethtool_ringparam
-struct _kc_ethtool_ringparam {
- u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */
-
- /* Read only attributes. These indicate the maximum number
- * of pending RX/TX ring entries the driver will allow the
- * user to set.
- */
- u32 rx_max_pending;
- u32 rx_mini_max_pending;
- u32 rx_jumbo_max_pending;
- u32 tx_max_pending;
-
- /* Values changeable by the user. The valid values are
- * in the range 1 to the "*_max_pending" counterpart above.
- */
- u32 rx_pending;
- u32 rx_mini_pending;
- u32 rx_jumbo_pending;
- u32 tx_pending;
-};
-#endif /* ETHTOOL_GRINGPARAM */
-
-#ifndef ETHTOOL_SRINGPARAM
-#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */
-#endif
-#ifndef ETHTOOL_GPAUSEPARAM
-#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */
-/* for configuring link flow control parameters */
-#define ethtool_pauseparam _kc_ethtool_pauseparam
-struct _kc_ethtool_pauseparam {
- u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */
-
- /* If the link is being auto-negotiated (via ethtool_cmd.autoneg
- * being true) the user may set 'autoneg' here non-zero to have the
- * pause parameters be auto-negotiated too. In such a case, the
- * {rx,tx}_pause values below determine what capabilities are
- * advertised.
- *
- * If 'autoneg' is zero or the link is not being auto-negotiated,
- * then {rx,tx}_pause force the driver to use/not-use pause
- * flow control.
- */
- u32 autoneg;
- u32 rx_pause;
- u32 tx_pause;
-};
-#endif /* ETHTOOL_GPAUSEPARAM */
-
-#ifndef ETHTOOL_SPAUSEPARAM
-#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */
-#endif
-#ifndef ETHTOOL_GRXCSUM
-#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */
-#endif
-#ifndef ETHTOOL_SRXCSUM
-#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */
-#endif
-#ifndef ETHTOOL_GTXCSUM
-#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */
-#endif
-#ifndef ETHTOOL_STXCSUM
-#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */
-#endif
-#ifndef ETHTOOL_GSG
-#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable
- * (ethtool_value) */
-#endif
-#ifndef ETHTOOL_SSG
-#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable
- * (ethtool_value). */
-#endif
-#ifndef ETHTOOL_TEST
-#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */
-#endif
-#ifndef ETHTOOL_GSTRINGS
-#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */
-#endif
-#ifndef ETHTOOL_PHYS_ID
-#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */
-#endif
-#ifndef ETHTOOL_GSTATS
-#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */
-#endif
-#ifndef ETHTOOL_GTSO
-#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */
-#endif
-#ifndef ETHTOOL_STSO
-#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */
-#endif
-
-#ifndef ETHTOOL_BUSINFO_LEN
-#define ETHTOOL_BUSINFO_LEN 32
-#endif
-
-#ifndef RHEL_RELEASE_CODE
-/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */
-#define RHEL_RELEASE_CODE 0
-#endif
-#ifndef RHEL_RELEASE_VERSION
-#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b))
-#endif
-#ifndef AX_RELEASE_CODE
-#define AX_RELEASE_CODE 0
-#endif
-#ifndef AX_RELEASE_VERSION
-#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b))
-#endif
-
-/* SuSE version macro is the same as Linux kernel version */
-#ifndef SLE_VERSION
-#define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c)
-#endif
-#ifndef SLE_VERSION_CODE
-#ifdef CONFIG_SUSE_KERNEL
-/* SLES11 GA is 2.6.27 based */
-#if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) )
-#define SLE_VERSION_CODE SLE_VERSION(11,0,0)
-#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) )
-/* SLES11 SP1 is 2.6.32 based */
-#define SLE_VERSION_CODE SLE_VERSION(11,1,0)
-#else
-#define SLE_VERSION_CODE 0
-#endif
-#else /* CONFIG_SUSE_KERNEL */
-#define SLE_VERSION_CODE 0
-#endif /* CONFIG_SUSE_KERNEL */
-#endif /* SLE_VERSION_CODE */
-
-#ifdef __KLOCWORK__
-#ifdef ARRAY_SIZE
-#undef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
-#endif /* __KLOCWORK__ */
-
-/*****************************************************************************/
-/* 2.4.3 => 2.4.0 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
-
-/**************************************/
-/* PCI DRIVER API */
-
-#ifndef pci_set_dma_mask
-#define pci_set_dma_mask _kc_pci_set_dma_mask
-extern int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask);
-#endif
-
-#ifndef pci_request_regions
-#define pci_request_regions _kc_pci_request_regions
-extern int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name);
-#endif
-
-#ifndef pci_release_regions
-#define pci_release_regions _kc_pci_release_regions
-extern void _kc_pci_release_regions(struct pci_dev *pdev);
-#endif
-
-/**************************************/
-/* NETWORK DRIVER API */
-
-#ifndef alloc_etherdev
-#define alloc_etherdev _kc_alloc_etherdev
-extern struct net_device * _kc_alloc_etherdev(int sizeof_priv);
-#endif
-
-#ifndef is_valid_ether_addr
-#define is_valid_ether_addr _kc_is_valid_ether_addr
-extern int _kc_is_valid_ether_addr(u8 *addr);
-#endif
-
-/**************************************/
-/* MISCELLANEOUS */
-
-#ifndef INIT_TQUEUE
-#define INIT_TQUEUE(_tq, _routine, _data) \
- do { \
- INIT_LIST_HEAD(&(_tq)->list); \
- (_tq)->sync = 0; \
- (_tq)->routine = _routine; \
- (_tq)->data = _data; \
- } while (0)
-#endif
-
-#endif /* 2.4.3 => 2.4.0 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) )
-/* Generic MII registers. */
-#define MII_BMCR 0x00 /* Basic mode control register */
-#define MII_BMSR 0x01 /* Basic mode status register */
-#define MII_PHYSID1 0x02 /* PHYS ID 1 */
-#define MII_PHYSID2 0x03 /* PHYS ID 2 */
-#define MII_ADVERTISE 0x04 /* Advertisement control reg */
-#define MII_LPA 0x05 /* Link partner ability reg */
-#define MII_EXPANSION 0x06 /* Expansion register */
-/* Basic mode control register. */
-#define BMCR_FULLDPLX 0x0100 /* Full duplex */
-#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */
-/* Basic mode status register. */
-#define BMSR_ERCAP 0x0001 /* Ext-reg capability */
-#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */
-#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */
-#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */
-#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */
-#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */
-/* Advertisement control register. */
-#define ADVERTISE_CSMA 0x0001 /* Only selector supported */
-#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */
-#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */
-#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
-#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
-#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \
- ADVERTISE_100HALF | ADVERTISE_100FULL)
-/* Expansion register for auto-negotiation. */
-#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */
-#endif
-
-/*****************************************************************************/
-/* 2.4.6 => 2.4.3 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
-
-#ifndef pci_set_power_state
-#define pci_set_power_state _kc_pci_set_power_state
-extern int _kc_pci_set_power_state(struct pci_dev *dev, int state);
-#endif
-
-#ifndef pci_enable_wake
-#define pci_enable_wake _kc_pci_enable_wake
-extern int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable);
-#endif
-
-#ifndef pci_disable_device
-#define pci_disable_device _kc_pci_disable_device
-extern void _kc_pci_disable_device(struct pci_dev *pdev);
-#endif
-
-/* PCI PM entry point syntax changed, so don't support suspend/resume */
-#undef CONFIG_PM
-
-#endif /* 2.4.6 => 2.4.3 */
-
-#ifndef HAVE_PCI_SET_MWI
-#define pci_set_mwi(X) pci_write_config_word(X, \
- PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \
- PCI_COMMAND_INVALIDATE);
-#define pci_clear_mwi(X) pci_write_config_word(X, \
- PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \
- ~PCI_COMMAND_INVALIDATE);
-#endif
-
-/*****************************************************************************/
-/* 2.4.10 => 2.4.9 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) )
-
-/**************************************/
-/* MODULE API */
-
-#ifndef MODULE_LICENSE
- #define MODULE_LICENSE(X)
-#endif
-
-/**************************************/
-/* OTHER */
-
-#undef min
-#define min(x,y) ({ \
- const typeof(x) _x = (x); \
- const typeof(y) _y = (y); \
- (void) (&_x == &_y); \
- _x < _y ? _x : _y; })
-
-#undef max
-#define max(x,y) ({ \
- const typeof(x) _x = (x); \
- const typeof(y) _y = (y); \
- (void) (&_x == &_y); \
- _x > _y ? _x : _y; })
-
-#define min_t(type,x,y) ({ \
- type _x = (x); \
- type _y = (y); \
- _x < _y ? _x : _y; })
-
-#define max_t(type,x,y) ({ \
- type _x = (x); \
- type _y = (y); \
- _x > _y ? _x : _y; })
-
-#ifndef list_for_each_safe
-#define list_for_each_safe(pos, n, head) \
- for (pos = (head)->next, n = pos->next; pos != (head); \
- pos = n, n = pos->next)
-#endif
-
-#ifndef ____cacheline_aligned_in_smp
-#ifdef CONFIG_SMP
-#define ____cacheline_aligned_in_smp ____cacheline_aligned
-#else
-#define ____cacheline_aligned_in_smp
-#endif /* CONFIG_SMP */
-#endif
-
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) )
-extern int _kc_snprintf(char * buf, size_t size, const char *fmt, ...);
-#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args)
-extern int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
-#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args)
-#else /* 2.4.8 => 2.4.9 */
-extern int snprintf(char * buf, size_t size, const char *fmt, ...);
-extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
-#endif
-#endif /* 2.4.10 -> 2.4.6 */
-
-
-/*****************************************************************************/
-/* 2.4.12 => 2.4.10 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,12) )
-#ifndef HAVE_NETIF_MSG
-#define HAVE_NETIF_MSG 1
-enum {
- NETIF_MSG_DRV = 0x0001,
- NETIF_MSG_PROBE = 0x0002,
- NETIF_MSG_LINK = 0x0004,
- NETIF_MSG_TIMER = 0x0008,
- NETIF_MSG_IFDOWN = 0x0010,
- NETIF_MSG_IFUP = 0x0020,
- NETIF_MSG_RX_ERR = 0x0040,
- NETIF_MSG_TX_ERR = 0x0080,
- NETIF_MSG_TX_QUEUED = 0x0100,
- NETIF_MSG_INTR = 0x0200,
- NETIF_MSG_TX_DONE = 0x0400,
- NETIF_MSG_RX_STATUS = 0x0800,
- NETIF_MSG_PKTDATA = 0x1000,
- NETIF_MSG_HW = 0x2000,
- NETIF_MSG_WOL = 0x4000,
-};
-
-#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
-#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
-#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
-#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
-#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
-#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
-#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
-#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
-#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
-#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
-#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
-#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
-#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
-#endif /* !HAVE_NETIF_MSG */
-#endif /* 2.4.12 => 2.4.10 */
-
-/*****************************************************************************/
-/* 2.4.13 => 2.4.12 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
-
-/**************************************/
-/* PCI DMA MAPPING */
-
-#ifndef virt_to_page
- #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT))
-#endif
-
-#ifndef pci_map_page
-#define pci_map_page _kc_pci_map_page
-extern u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction);
-#endif
-
-#ifndef pci_unmap_page
-#define pci_unmap_page _kc_pci_unmap_page
-extern void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction);
-#endif
-
-/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */
-
-#undef DMA_32BIT_MASK
-#define DMA_32BIT_MASK 0xffffffff
-#undef DMA_64BIT_MASK
-#define DMA_64BIT_MASK 0xffffffff
-
-/**************************************/
-/* OTHER */
-
-#ifndef cpu_relax
-#define cpu_relax() rep_nop()
-#endif
-
-struct vlan_ethhdr {
- unsigned char h_dest[ETH_ALEN];
- unsigned char h_source[ETH_ALEN];
- unsigned short h_vlan_proto;
- unsigned short h_vlan_TCI;
- unsigned short h_vlan_encapsulated_proto;
-};
-#endif /* 2.4.13 => 2.4.12 */
-
-/*****************************************************************************/
-/* 2.4.17 => 2.4.12 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) )
-
-#ifndef __devexit_p
- #define __devexit_p(x) &(x)
-#endif
-
-#endif /* 2.4.17 => 2.4.13 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) )
-#define NETIF_MSG_HW 0x2000
-#define NETIF_MSG_WOL 0x4000
-
-#ifndef netif_msg_hw
-#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
-#endif
-#ifndef netif_msg_wol
-#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
-#endif
-#endif /* 2.4.18 */
-
-/*****************************************************************************/
-
-/*****************************************************************************/
-/* 2.4.20 => 2.4.19 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) )
-
-/* we won't support NAPI on less than 2.4.20 */
-#ifdef NAPI
-#undef NAPI
-#undef CONFIG_IXGBE_NAPI
-#endif
-
-#endif /* 2.4.20 => 2.4.19 */
-
-/*****************************************************************************/
-/* 2.4.22 => 2.4.17 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
-#define pci_name(x) ((x)->slot_name)
-#endif
-
-/*****************************************************************************/
-/* 2.4.22 => 2.4.17 */
-
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
-#ifndef IXGBE_NO_LRO
-/* Don't enable LRO for these legacy kernels */
-#define IXGBE_NO_LRO
-#endif
-#endif
-
-/*****************************************************************************/
-/*****************************************************************************/
-/* 2.4.23 => 2.4.22 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) )
-/*****************************************************************************/
-#ifdef NAPI
-#ifndef netif_poll_disable
-#define netif_poll_disable(x) _kc_netif_poll_disable(x)
-static inline void _kc_netif_poll_disable(struct net_device *netdev)
-{
- while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) {
- /* No hurry */
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(1);
- }
-}
-#endif
-#ifndef netif_poll_enable
-#define netif_poll_enable(x) _kc_netif_poll_enable(x)
-static inline void _kc_netif_poll_enable(struct net_device *netdev)
-{
- clear_bit(__LINK_STATE_RX_SCHED, &netdev->state);
-}
-#endif
-#endif /* NAPI */
-#ifndef netif_tx_disable
-#define netif_tx_disable(x) _kc_netif_tx_disable(x)
-static inline void _kc_netif_tx_disable(struct net_device *dev)
-{
- spin_lock_bh(&dev->xmit_lock);
- netif_stop_queue(dev);
- spin_unlock_bh(&dev->xmit_lock);
-}
-#endif
-#else /* 2.4.23 => 2.4.22 */
-#define HAVE_SCTP
-#endif /* 2.4.23 => 2.4.22 */
-
-/*****************************************************************************/
-/* 2.6.4 => 2.6.0 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \
- ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
- LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) )
-#define ETHTOOL_OPS_COMPAT
-#endif /* 2.6.4 => 2.6.0 */
-
-/*****************************************************************************/
-/* 2.5.71 => 2.4.x */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) )
-#define sk_protocol protocol
-#define pci_get_device pci_find_device
-#endif /* 2.5.70 => 2.4.x */
-
-/*****************************************************************************/
-/* < 2.4.27 or 2.6.0 <= 2.6.5 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \
- ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
- LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) )
-
-#ifndef netif_msg_init
-#define netif_msg_init _kc_netif_msg_init
-static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits)
-{
- /* use default */
- if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
- return default_msg_enable_bits;
- if (debug_value == 0) /* no output */
- return 0;
- /* set low N bits */
- return (1 << debug_value) -1;
-}
-#endif
-
-#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */
-/*****************************************************************************/
-#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \
- (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \
- ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )))
-#define netdev_priv(x) x->priv
-#endif
-
-/*****************************************************************************/
-/* <= 2.5.0 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) )
-#include <linux/rtnetlink.h>
-#undef pci_register_driver
-#define pci_register_driver pci_module_init
-
-/*
- * Most of the dma compat code is copied/modifed from the 2.4.37
- * /include/linux/libata-compat.h header file
- */
-/* These definitions mirror those in pci.h, so they can be used
- * interchangeably with their PCI_ counterparts */
-enum dma_data_direction {
- DMA_BIDIRECTIONAL = 0,
- DMA_TO_DEVICE = 1,
- DMA_FROM_DEVICE = 2,
- DMA_NONE = 3,
-};
-
-struct device {
- struct pci_dev pdev;
-};
-
-static inline struct pci_dev *to_pci_dev (struct device *dev)
-{
- return (struct pci_dev *) dev;
-}
-static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
-{
- return (struct device *) pdev;
-}
-
-#define pdev_printk(lvl, pdev, fmt, args...) \
- printk("%s %s: " fmt, lvl, pci_name(pdev), ## args)
-#define dev_err(dev, fmt, args...) \
- pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args)
-#define dev_info(dev, fmt, args...) \
- pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args)
-#define dev_warn(dev, fmt, args...) \
- pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args)
-
-/* NOTE: dangerous! we ignore the 'gfp' argument */
-#define dma_alloc_coherent(dev,sz,dma,gfp) \
- pci_alloc_consistent(to_pci_dev(dev),(sz),(dma))
-#define dma_free_coherent(dev,sz,addr,dma_addr) \
- pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr))
-
-#define dma_map_page(dev,a,b,c,d) \
- pci_map_page(to_pci_dev(dev),(a),(b),(c),(d))
-#define dma_unmap_page(dev,a,b,c) \
- pci_unmap_page(to_pci_dev(dev),(a),(b),(c))
-
-#define dma_map_single(dev,a,b,c) \
- pci_map_single(to_pci_dev(dev),(a),(b),(c))
-#define dma_unmap_single(dev,a,b,c) \
- pci_unmap_single(to_pci_dev(dev),(a),(b),(c))
-
-#define dma_sync_single(dev,a,b,c) \
- pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c))
-
-/* for range just sync everything, that's all the pci API can do */
-#define dma_sync_single_range(dev,addr,off,sz,dir) \
- pci_dma_sync_single(to_pci_dev(dev),(addr),(off)+(sz),(dir))
-
-#define dma_set_mask(dev,mask) \
- pci_set_dma_mask(to_pci_dev(dev),(mask))
-
-/* hlist_* code - double linked lists */
-struct hlist_head {
- struct hlist_node *first;
-};
-
-struct hlist_node {
- struct hlist_node *next, **pprev;
-};
-
-static inline void __hlist_del(struct hlist_node *n)
-{
- struct hlist_node *next = n->next;
- struct hlist_node **pprev = n->pprev;
- *pprev = next;
- if (next)
- next->pprev = pprev;
-}
-
-static inline void hlist_del(struct hlist_node *n)
-{
- __hlist_del(n);
- n->next = NULL;
- n->pprev = NULL;
-}
-
-static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
-{
- struct hlist_node *first = h->first;
- n->next = first;
- if (first)
- first->pprev = &n->next;
- h->first = n;
- n->pprev = &h->first;
-}
-
-static inline int hlist_empty(const struct hlist_head *h)
-{
- return !h->first;
-}
-#define HLIST_HEAD_INIT { .first = NULL }
-#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
-#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
-static inline void INIT_HLIST_NODE(struct hlist_node *h)
-{
- h->next = NULL;
- h->pprev = NULL;
-}
-#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
-
-#define hlist_for_each_entry(tpos, pos, head, member) \
- for (pos = (head)->first; \
- pos && ({ prefetch(pos->next); 1;}) && \
- ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
- pos = pos->next)
-
-#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
- for (pos = (head)->first; \
- pos && ({ n = pos->next; 1; }) && \
- ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
- pos = n)
-
-#ifndef might_sleep
-#define might_sleep()
-#endif
-#else
-static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
-{
- return &pdev->dev;
-}
-#endif /* <= 2.5.0 */
-
-/*****************************************************************************/
-/* 2.5.28 => 2.4.23 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
-
-static inline void _kc_synchronize_irq(void)
-{
- synchronize_irq();
-}
-#undef synchronize_irq
-#define synchronize_irq(X) _kc_synchronize_irq()
-
-#include <linux/tqueue.h>
-#define work_struct tq_struct
-#undef INIT_WORK
-#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a)
-#undef container_of
-#define container_of list_entry
-#define schedule_work schedule_task
-#define flush_scheduled_work flush_scheduled_tasks
-#define cancel_work_sync(x) flush_scheduled_work()
-
-#endif /* 2.5.28 => 2.4.17 */
-
-/*****************************************************************************/
-/* 2.6.0 => 2.5.28 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
-#undef get_cpu
-#define get_cpu() smp_processor_id()
-#undef put_cpu
-#define put_cpu() do { } while(0)
-#define MODULE_INFO(version, _version)
-#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
-#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1
-#endif
-#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1
-
-#define dma_set_coherent_mask(dev,mask) 1
-
-#undef dev_put
-#define dev_put(dev) __dev_put(dev)
-
-#ifndef skb_fill_page_desc
-#define skb_fill_page_desc _kc_skb_fill_page_desc
-extern void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size);
-#endif
-
-#undef ALIGN
-#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
-
-#ifndef page_count
-#define page_count(p) atomic_read(&(p)->count)
-#endif
-
-#ifdef MAX_NUMNODES
-#undef MAX_NUMNODES
-#endif
-#define MAX_NUMNODES 1
-
-/* find_first_bit and find_next bit are not defined for most
- * 2.4 kernels (except for the redhat 2.4.21 kernels
- */
-#include <linux/bitops.h>
-#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
-#undef find_next_bit
-#define find_next_bit _kc_find_next_bit
-extern unsigned long _kc_find_next_bit(const unsigned long *addr,
- unsigned long size,
- unsigned long offset);
-#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
-
-
-#ifndef netdev_name
-static inline const char *_kc_netdev_name(const struct net_device *dev)
-{
- if (strchr(dev->name, '%'))
- return "(unregistered net_device)";
- return dev->name;
-}
-#define netdev_name(netdev) _kc_netdev_name(netdev)
-#endif /* netdev_name */
-
-#ifndef strlcpy
-#define strlcpy _kc_strlcpy
-extern size_t _kc_strlcpy(char *dest, const char *src, size_t size);
-#endif /* strlcpy */
-
-#endif /* 2.6.0 => 2.5.28 */
-
-/*****************************************************************************/
-/* 2.6.4 => 2.6.0 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
-#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
-#endif /* 2.6.4 => 2.6.0 */
-
-/*****************************************************************************/
-/* 2.6.5 => 2.6.0 */
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) )
-#define dma_sync_single_for_cpu dma_sync_single
-#define dma_sync_single_for_device dma_sync_single
-#define dma_sync_single_range_for_cpu dma_sync_single_range
-#define dma_sync_single_range_for_device dma_sync_single_range
-#ifndef pci_dma_mapping_error
-#define pci_dma_mapping_error _kc_pci_dma_mapping_error
-static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr)
-{
- return dma_addr == 0;
-}
-#endif
-#endif /* 2.6.5 => 2.6.0 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
-extern int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...);
-#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args)
-#endif /* < 2.6.4 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) )
-/* taken from 2.6 include/linux/bitmap.h */
-#undef bitmap_zero
-#define bitmap_zero _kc_bitmap_zero
-static inline void _kc_bitmap_zero(unsigned long *dst, int nbits)
-{
- if (nbits <= BITS_PER_LONG)
- *dst = 0UL;
- else {
- int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
- memset(dst, 0, len);
- }
-}
-#define random_ether_addr _kc_random_ether_addr
-static inline void _kc_random_ether_addr(u8 *addr)
-{
- get_random_bytes(addr, ETH_ALEN);
- addr[0] &= 0xfe; /* clear multicast */
- addr[0] |= 0x02; /* set local assignment */
-}
-#define page_to_nid(x) 0
-
-#endif /* < 2.6.6 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) )
-#undef if_mii
-#define if_mii _kc_if_mii
-static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq)
-{
- return (struct mii_ioctl_data *) &rq->ifr_ifru;
-}
-
-#ifndef __force
-#define __force
-#endif
-#endif /* < 2.6.7 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
-#ifndef PCI_EXP_DEVCTL
-#define PCI_EXP_DEVCTL 8
-#endif
-#ifndef PCI_EXP_DEVCTL_CERE
-#define PCI_EXP_DEVCTL_CERE 0x0001
-#endif
-#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \
- schedule_timeout((x * HZ)/1000 + 2); \
- } while (0)
-
-#endif /* < 2.6.8 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9))
-#include <net/dsfield.h>
-#define __iomem
-
-#ifndef kcalloc
-#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags)
-extern void *_kc_kzalloc(size_t size, int flags);
-#endif
-#define MSEC_PER_SEC 1000L
-static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j)
-{
-#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
- return (MSEC_PER_SEC / HZ) * j;
-#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
- return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
-#else
- return (j * MSEC_PER_SEC) / HZ;
-#endif
-}
-static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m)
-{
- if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET))
- return MAX_JIFFY_OFFSET;
-#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
- return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
-#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
- return m * (HZ / MSEC_PER_SEC);
-#else
- return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
-#endif
-}
-
-#define msleep_interruptible _kc_msleep_interruptible
-static inline unsigned long _kc_msleep_interruptible(unsigned int msecs)
-{
- unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1;
-
- while (timeout && !signal_pending(current)) {
- __set_current_state(TASK_INTERRUPTIBLE);
- timeout = schedule_timeout(timeout);
- }
- return _kc_jiffies_to_msecs(timeout);
-}
-
-/* Basic mode control register. */
-#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */
-
-#ifndef __le16
-#define __le16 u16
-#endif
-#ifndef __le32
-#define __le32 u32
-#endif
-#ifndef __le64
-#define __le64 u64
-#endif
-#ifndef __be16
-#define __be16 u16
-#endif
-#ifndef __be32
-#define __be32 u32
-#endif
-#ifndef __be64
-#define __be64 u64
-#endif
-
-static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
-{
- return (struct vlan_ethhdr *)skb->mac.raw;
-}
-
-/* Wake-On-Lan options. */
-#define WAKE_PHY (1 << 0)
-#define WAKE_UCAST (1 << 1)
-#define WAKE_MCAST (1 << 2)
-#define WAKE_BCAST (1 << 3)
-#define WAKE_ARP (1 << 4)
-#define WAKE_MAGIC (1 << 5)
-#define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */
-
-#define skb_header_pointer _kc_skb_header_pointer
-static inline void *_kc_skb_header_pointer(const struct sk_buff *skb,
- int offset, int len, void *buffer)
-{
- int hlen = skb_headlen(skb);
-
- if (hlen - offset >= len)
- return skb->data + offset;
-
-#ifdef MAX_SKB_FRAGS
- if (skb_copy_bits(skb, offset, buffer, len) < 0)
- return NULL;
-
- return buffer;
-#else
- return NULL;
-#endif
-
-#ifndef NETDEV_TX_OK
-#define NETDEV_TX_OK 0
-#endif
-#ifndef NETDEV_TX_BUSY
-#define NETDEV_TX_BUSY 1
-#endif
-#ifndef NETDEV_TX_LOCKED
-#define NETDEV_TX_LOCKED -1
-#endif
-}
-
-#ifndef __bitwise
-#define __bitwise
-#endif
-#endif /* < 2.6.9 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
-#ifdef module_param_array_named
-#undef module_param_array_named
-#define module_param_array_named(name, array, type, nump, perm) \
- static struct kparam_array __param_arr_##name \
- = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \
- sizeof(array[0]), array }; \
- module_param_call(name, param_array_set, param_array_get, \
- &__param_arr_##name, perm)
-#endif /* module_param_array_named */
-/*
- * num_online is broken for all < 2.6.10 kernels. This is needed to support
- * Node module parameter of ixgbe.
- */
-#undef num_online_nodes
-#define num_online_nodes(n) 1
-extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES);
-#undef node_online_map
-#define node_online_map _kcompat_node_online_map
-#endif /* < 2.6.10 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) )
-#define PCI_D0 0
-#define PCI_D1 1
-#define PCI_D2 2
-#define PCI_D3hot 3
-#define PCI_D3cold 4
-typedef int pci_power_t;
-#define pci_choose_state(pdev,state) state
-#define PMSG_SUSPEND 3
-#define PCI_EXP_LNKCTL 16
-
-#undef NETIF_F_LLTX
-
-#ifndef ARCH_HAS_PREFETCH
-#define prefetch(X)
-#endif
-
-#ifndef NET_IP_ALIGN
-#define NET_IP_ALIGN 2
-#endif
-
-#define KC_USEC_PER_SEC 1000000L
-#define usecs_to_jiffies _kc_usecs_to_jiffies
-static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j)
-{
-#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
- return (KC_USEC_PER_SEC / HZ) * j;
-#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
- return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC);
-#else
- return (j * KC_USEC_PER_SEC) / HZ;
-#endif
-}
-static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m)
-{
- if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET))
- return MAX_JIFFY_OFFSET;
-#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
- return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ);
-#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
- return m * (HZ / KC_USEC_PER_SEC);
-#else
- return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC;
-#endif
-}
-#endif /* < 2.6.11 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) )
-#include <linux/reboot.h>
-#define USE_REBOOT_NOTIFIER
-
-/* Generic MII registers. */
-#define MII_CTRL1000 0x09 /* 1000BASE-T control */
-#define MII_STAT1000 0x0a /* 1000BASE-T status */
-/* Advertisement control register. */
-#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */
-#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */
-/* 1000BASE-T Control register */
-#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */
-#ifndef is_zero_ether_addr
-#define is_zero_ether_addr _kc_is_zero_ether_addr
-static inline int _kc_is_zero_ether_addr(const u8 *addr)
-{
- return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
-}
-#endif /* is_zero_ether_addr */
-#ifndef is_multicast_ether_addr
-#define is_multicast_ether_addr _kc_is_multicast_ether_addr
-static inline int _kc_is_multicast_ether_addr(const u8 *addr)
-{
- return addr[0] & 0x01;
-}
-#endif /* is_multicast_ether_addr */
-#endif /* < 2.6.12 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) )
-#ifndef kstrdup
-#define kstrdup _kc_kstrdup
-extern char *_kc_kstrdup(const char *s, unsigned int gfp);
-#endif
-#endif /* < 2.6.13 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
-#define pm_message_t u32
-#ifndef kzalloc
-#define kzalloc _kc_kzalloc
-extern void *_kc_kzalloc(size_t size, int flags);
-#endif
-
-/* Generic MII registers. */
-#define MII_ESTATUS 0x0f /* Extended Status */
-/* Basic mode status register. */
-#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */
-/* Extended status register. */
-#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */
-#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */
-
-#define ADVERTISED_Pause (1 << 13)
-#define ADVERTISED_Asym_Pause (1 << 14)
-
-#if (!(RHEL_RELEASE_CODE && \
- (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,3)) && \
- (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))))
-#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)) && !defined(gfp_t))
-#define gfp_t unsigned
-#else
-typedef unsigned gfp_t;
-#endif
-#endif /* !RHEL4.3->RHEL5.0 */
-
-#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) )
-#ifdef CONFIG_X86_64
-#define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \
- dma_sync_single_for_cpu(dev, dma_handle, size, dir)
-#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \
- dma_sync_single_for_device(dev, dma_handle, size, dir)
-#endif
-#endif
-#endif /* < 2.6.14 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) )
-#ifndef vmalloc_node
-#define vmalloc_node(a,b) vmalloc(a)
-#endif /* vmalloc_node*/
-
-#define setup_timer(_timer, _function, _data) \
-do { \
- (_timer)->function = _function; \
- (_timer)->data = _data; \
- init_timer(_timer); \
-} while (0)
-#ifndef device_can_wakeup
-#define device_can_wakeup(dev) (1)
-#endif
-#ifndef device_set_wakeup_enable
-#define device_set_wakeup_enable(dev, val) do{}while(0)
-#endif
-#ifndef device_init_wakeup
-#define device_init_wakeup(dev,val) do {} while (0)
-#endif
-static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2)
-{
- const u16 *a = (const u16 *) addr1;
- const u16 *b = (const u16 *) addr2;
-
- return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
-}
-#undef compare_ether_addr
-#define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2)
-#endif /* < 2.6.15 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) )
-#undef DEFINE_MUTEX
-#define DEFINE_MUTEX(x) DECLARE_MUTEX(x)
-#define mutex_lock(x) down_interruptible(x)
-#define mutex_unlock(x) up(x)
-
-#ifndef ____cacheline_internodealigned_in_smp
-#ifdef CONFIG_SMP
-#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp
-#else
-#define ____cacheline_internodealigned_in_smp
-#endif /* CONFIG_SMP */
-#endif /* ____cacheline_internodealigned_in_smp */
-#undef HAVE_PCI_ERS
-#else /* 2.6.16 and above */
-#undef HAVE_PCI_ERS
-#define HAVE_PCI_ERS
-#endif /* < 2.6.16 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) )
-#ifndef first_online_node
-#define first_online_node 0
-#endif
-#ifndef NET_SKB_PAD
-#define NET_SKB_PAD 16
-#endif
-#endif /* < 2.6.17 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) )
-
-#ifndef IRQ_HANDLED
-#define irqreturn_t void
-#define IRQ_HANDLED
-#define IRQ_NONE
-#endif
-
-#ifndef IRQF_PROBE_SHARED
-#ifdef SA_PROBEIRQ
-#define IRQF_PROBE_SHARED SA_PROBEIRQ
-#else
-#define IRQF_PROBE_SHARED 0
-#endif
-#endif
-
-#ifndef IRQF_SHARED
-#define IRQF_SHARED SA_SHIRQ
-#endif
-
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
-
-#ifndef FIELD_SIZEOF
-#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
-#endif
-
-#ifndef skb_is_gso
-#ifdef NETIF_F_TSO
-#define skb_is_gso _kc_skb_is_gso
-static inline int _kc_skb_is_gso(const struct sk_buff *skb)
-{
- return skb_shinfo(skb)->gso_size;
-}
-#else
-#define skb_is_gso(a) 0
-#endif
-#endif
-
-#ifndef resource_size_t
-#define resource_size_t unsigned long
-#endif
-
-#ifdef skb_pad
-#undef skb_pad
-#endif
-#define skb_pad(x,y) _kc_skb_pad(x, y)
-int _kc_skb_pad(struct sk_buff *skb, int pad);
-#ifdef skb_padto
-#undef skb_padto
-#endif
-#define skb_padto(x,y) _kc_skb_padto(x, y)
-static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len)
-{
- unsigned int size = skb->len;
- if(likely(size >= len))
- return 0;
- return _kc_skb_pad(skb, len - size);
-}
-
-#ifndef DECLARE_PCI_UNMAP_ADDR
-#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
- dma_addr_t ADDR_NAME
-#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
- u32 LEN_NAME
-#define pci_unmap_addr(PTR, ADDR_NAME) \
- ((PTR)->ADDR_NAME)
-#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
- (((PTR)->ADDR_NAME) = (VAL))
-#define pci_unmap_len(PTR, LEN_NAME) \
- ((PTR)->LEN_NAME)
-#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
- (((PTR)->LEN_NAME) = (VAL))
-#endif /* DECLARE_PCI_UNMAP_ADDR */
-#endif /* < 2.6.18 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
-
-#ifndef DIV_ROUND_UP
-#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
-#endif
-#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) )
-#if (!((RHEL_RELEASE_CODE && \
- ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \
- RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \
- (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0)))) || \
- (AX_RELEASE_CODE && AX_RELEASE_CODE > AX_RELEASE_VERSION(3,0))))
-typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *);
-#endif
-#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
-#undef CONFIG_INET_LRO
-#undef CONFIG_INET_LRO_MODULE
-#undef CONFIG_FCOE
-#undef CONFIG_FCOE_MODULE
-#endif
-typedef irqreturn_t (*new_handler_t)(int, void*);
-static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
-#else /* 2.4.x */
-typedef void (*irq_handler_t)(int, void*, struct pt_regs *);
-typedef void (*new_handler_t)(int, void*);
-static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
-#endif /* >= 2.5.x */
-{
- irq_handler_t new_handler = (irq_handler_t) handler;
- return request_irq(irq, new_handler, flags, devname, dev_id);
-}
-
-#undef request_irq
-#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id))
-
-#define irq_handler_t new_handler_t
-/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */
-#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))
-#define PCIE_CONFIG_SPACE_LEN 256
-#define PCI_CONFIG_SPACE_LEN 64
-#define PCIE_LINK_STATUS 0x12
-#define pci_config_space_ich8lan() do {} while(0)
-#undef pci_save_state
-extern int _kc_pci_save_state(struct pci_dev *);
-#define pci_save_state(pdev) _kc_pci_save_state(pdev)
-#undef pci_restore_state
-extern void _kc_pci_restore_state(struct pci_dev *);
-#define pci_restore_state(pdev) _kc_pci_restore_state(pdev)
-#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */
-
-#ifdef HAVE_PCI_ERS
-#undef free_netdev
-extern void _kc_free_netdev(struct net_device *);
-#define free_netdev(netdev) _kc_free_netdev(netdev)
-#endif
-static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
-{
- return 0;
-}
-#define pci_disable_pcie_error_reporting(dev) do {} while (0)
-#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0)
-
-extern void *_kc_kmemdup(const void *src, size_t len, unsigned gfp);
-#define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp)
-#ifndef bool
-#define bool _Bool
-#define true 1
-#define false 0
-#endif
-#else /* 2.6.19 */
-#include <linux/aer.h>
-#include <linux/string.h>
-#endif /* < 2.6.19 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) )
-#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) )
-#undef INIT_WORK
-#define INIT_WORK(_work, _func) \
-do { \
- INIT_LIST_HEAD(&(_work)->entry); \
- (_work)->pending = 0; \
- (_work)->func = (void (*)(void *))_func; \
- (_work)->data = _work; \
- init_timer(&(_work)->timer); \
-} while (0)
-#endif
-
-#ifndef PCI_VDEVICE
-#define PCI_VDEVICE(ven, dev) \
- PCI_VENDOR_ID_##ven, (dev), \
- PCI_ANY_ID, PCI_ANY_ID, 0, 0
-#endif
-
-#ifndef round_jiffies
-#define round_jiffies(x) x
-#endif
-
-#define csum_offset csum
-
-#define HAVE_EARLY_VMALLOC_NODE
-#define dev_to_node(dev) -1
-#undef set_dev_node
-/* remove compiler warning with b=b, for unused variable */
-#define set_dev_node(a, b) do { (b) = (b); } while(0)
-
-#if (!(RHEL_RELEASE_CODE && \
- (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \
- (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \
- (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,6)))) && \
- !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0)))
-typedef __u16 __bitwise __sum16;
-typedef __u32 __bitwise __wsum;
-#endif
-
-#if (!(RHEL_RELEASE_CODE && \
- (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \
- (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \
- (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))) && \
- !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0)))
-static inline __wsum csum_unfold(__sum16 n)
-{
- return (__force __wsum)n;
-}
-#endif
-
-#else /* < 2.6.20 */
-#define HAVE_DEVICE_NUMA_NODE
-#endif /* < 2.6.20 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
-#define to_net_dev(class) container_of(class, struct net_device, class_dev)
-#define NETDEV_CLASS_DEV
-#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)))
-#define vlan_group_get_device(vg, id) (vg->vlan_devices[id])
-#define vlan_group_set_device(vg, id, dev) \
- do { \
- if (vg) vg->vlan_devices[id] = dev; \
- } while (0)
-#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */
-#define pci_channel_offline(pdev) (pdev->error_state && \
- pdev->error_state != pci_channel_io_normal)
-#define pci_request_selected_regions(pdev, bars, name) \
- pci_request_regions(pdev, name)
-#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev);
-#endif /* < 2.6.21 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
-#define tcp_hdr(skb) (skb->h.th)
-#define tcp_hdrlen(skb) (skb->h.th->doff << 2)
-#define skb_transport_offset(skb) (skb->h.raw - skb->data)
-#define skb_transport_header(skb) (skb->h.raw)
-#define ipv6_hdr(skb) (skb->nh.ipv6h)
-#define ip_hdr(skb) (skb->nh.iph)
-#define skb_network_offset(skb) (skb->nh.raw - skb->data)
-#define skb_network_header(skb) (skb->nh.raw)
-#define skb_tail_pointer(skb) skb->tail
-#define skb_reset_tail_pointer(skb) \
- do { \
- skb->tail = skb->data; \
- } while (0)
-#define skb_copy_to_linear_data(skb, from, len) \
- memcpy(skb->data, from, len)
-#define skb_copy_to_linear_data_offset(skb, offset, from, len) \
- memcpy(skb->data + offset, from, len)
-#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw)
-#define pci_register_driver pci_module_init
-#define skb_mac_header(skb) skb->mac.raw
-
-#ifdef NETIF_F_MULTI_QUEUE
-#ifndef alloc_etherdev_mq
-#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a)
-#endif
-#endif /* NETIF_F_MULTI_QUEUE */
-
-#ifndef ETH_FCS_LEN
-#define ETH_FCS_LEN 4
-#endif
-#define cancel_work_sync(x) flush_scheduled_work()
-#ifndef udp_hdr
-#define udp_hdr _udp_hdr
-static inline struct udphdr *_udp_hdr(const struct sk_buff *skb)
-{
- return (struct udphdr *)skb_transport_header(skb);
-}
-#endif
-
-#ifdef cpu_to_be16
-#undef cpu_to_be16
-#endif
-#define cpu_to_be16(x) __constant_htons(x)
-
-#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)))
-enum {
- DUMP_PREFIX_NONE,
- DUMP_PREFIX_ADDRESS,
- DUMP_PREFIX_OFFSET
-};
-#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */
-#ifndef hex_asc
-#define hex_asc(x) "0123456789abcdef"[x]
-#endif
-#include <linux/ctype.h>
-extern void _kc_print_hex_dump(const char *level, const char *prefix_str,
- int prefix_type, int rowsize, int groupsize,
- const void *buf, size_t len, bool ascii);
-#define print_hex_dump(lvl, s, t, r, g, b, l, a) \
- _kc_print_hex_dump(lvl, s, t, r, g, b, l, a)
-#else /* 2.6.22 */
-#define ETH_TYPE_TRANS_SETS_DEV
-#define HAVE_NETDEV_STATS_IN_NETDEV
-#endif /* < 2.6.22 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) )
-#endif /* > 2.6.22 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) )
-#define netif_subqueue_stopped(_a, _b) 0
-#ifndef PTR_ALIGN
-#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
-#endif
-
-#ifndef CONFIG_PM_SLEEP
-#define CONFIG_PM_SLEEP CONFIG_PM
-#endif
-
-#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) )
-#define HAVE_ETHTOOL_GET_PERM_ADDR
-#endif /* 2.6.14 through 2.6.22 */
-#endif /* < 2.6.23 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
-#ifndef ETH_FLAG_LRO
-#define ETH_FLAG_LRO NETIF_F_LRO
-#endif
-
-/* if GRO is supported then the napi struct must already exist */
-#ifndef NETIF_F_GRO
-/* NAPI API changes in 2.6.24 break everything */
-struct napi_struct {
- /* used to look up the real NAPI polling routine */
- int (*poll)(struct napi_struct *, int);
- struct net_device *dev;
- int weight;
-};
-#endif
-
-#ifdef NAPI
-extern int __kc_adapter_clean(struct net_device *, int *);
-extern struct net_device *napi_to_poll_dev(struct napi_struct *napi);
-#define netif_napi_add(_netdev, _napi, _poll, _weight) \
- do { \
- struct napi_struct *__napi = (_napi); \
- struct net_device *poll_dev = napi_to_poll_dev(__napi); \
- poll_dev->poll = &(__kc_adapter_clean); \
- poll_dev->priv = (_napi); \
- poll_dev->weight = (_weight); \
- set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state); \
- set_bit(__LINK_STATE_START, &poll_dev->state);\
- dev_hold(poll_dev); \
- __napi->poll = &(_poll); \
- __napi->weight = (_weight); \
- __napi->dev = (_netdev); \
- } while (0)
-#define netif_napi_del(_napi) \
- do { \
- struct net_device *poll_dev = napi_to_poll_dev(_napi); \
- WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state)); \
- dev_put(poll_dev); \
- memset(poll_dev, 0, sizeof(struct net_device));\
- } while (0)
-#define napi_schedule_prep(_napi) \
- (netif_running((_napi)->dev) && netif_rx_schedule_prep(napi_to_poll_dev(_napi)))
-#define napi_schedule(_napi) \
- do { \
- if (napi_schedule_prep(_napi)) \
- __netif_rx_schedule(napi_to_poll_dev(_napi)); \
- } while (0)
-#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi))
-#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi))
-#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi))
-#ifndef NETIF_F_GRO
-#define napi_complete(_napi) netif_rx_complete(napi_to_poll_dev(_napi))
-#else
-#define napi_complete(_napi) \
- do { \
- napi_gro_flush(_napi); \
- netif_rx_complete(napi_to_poll_dev(_napi)); \
- } while (0)
-#endif /* NETIF_F_GRO */
-#else /* NAPI */
-#define netif_napi_add(_netdev, _napi, _poll, _weight) \
- do { \
- struct napi_struct *__napi = _napi; \
- _netdev->poll = &(_poll); \
- _netdev->weight = (_weight); \
- __napi->poll = &(_poll); \
- __napi->weight = (_weight); \
- __napi->dev = (_netdev); \
- } while (0)
-#define netif_napi_del(_a) do {} while (0)
-#endif /* NAPI */
-
-#undef dev_get_by_name
-#define dev_get_by_name(_a, _b) dev_get_by_name(_b)
-#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b)
-#ifndef DMA_BIT_MASK
-#define DMA_BIT_MASK(n) (((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1))
-#endif
-
-#ifdef NETIF_F_TSO6
-#define skb_is_gso_v6 _kc_skb_is_gso_v6
-static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb)
-{
- return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
-}
-#endif /* NETIF_F_TSO6 */
-
-#ifndef KERN_CONT
-#define KERN_CONT ""
-#endif
-#else /* < 2.6.24 */
-#define HAVE_ETHTOOL_GET_SSET_COUNT
-#define HAVE_NETDEV_NAPI_LIST
-#endif /* < 2.6.24 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) )
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) )
-#include <linux/pm_qos_params.h>
-#else /* >= 3.2.0 */
-#include <linux/pm_qos.h>
-#endif /* else >= 3.2.0 */
-#endif /* > 2.6.24 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) )
-#define PM_QOS_CPU_DMA_LATENCY 1
-
-#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) )
-#include <linux/latency.h>
-#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY
-#define pm_qos_add_requirement(pm_qos_class, name, value) \
- set_acceptable_latency(name, value)
-#define pm_qos_remove_requirement(pm_qos_class, name) \
- remove_acceptable_latency(name)
-#define pm_qos_update_requirement(pm_qos_class, name, value) \
- modify_acceptable_latency(name, value)
-#else
-#define PM_QOS_DEFAULT_VALUE -1
-#define pm_qos_add_requirement(pm_qos_class, name, value)
-#define pm_qos_remove_requirement(pm_qos_class, name)
-#define pm_qos_update_requirement(pm_qos_class, name, value) { \
- if (value != PM_QOS_DEFAULT_VALUE) { \
- printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \
- pci_name(adapter->pdev)); \
- } \
-}
-
-#endif /* > 2.6.18 */
-
-#define pci_enable_device_mem(pdev) pci_enable_device(pdev)
-
-#ifndef DEFINE_PCI_DEVICE_TABLE
-#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[]
-#endif /* DEFINE_PCI_DEVICE_TABLE */
-
-#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
-#ifndef IXGBE_PROCFS
-#define IXGBE_PROCFS
-#endif /* IXGBE_PROCFS */
-#endif /* >= 2.6.0 */
-
-
-#else /* < 2.6.25 */
-
-#ifndef IXGBE_SYSFS
-#define IXGBE_SYSFS
-#endif /* IXGBE_SYSFS */
-
-
-#endif /* < 2.6.25 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
-#ifndef clamp_t
-#define clamp_t(type, val, min, max) ({ \
- type __val = (val); \
- type __min = (min); \
- type __max = (max); \
- __val = __val < __min ? __min : __val; \
- __val > __max ? __max : __val; })
-#endif /* clamp_t */
-#ifdef NETIF_F_TSO
-#ifdef NETIF_F_TSO6
-#define netif_set_gso_max_size(_netdev, size) \
- do { \
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { \
- _netdev->features &= ~NETIF_F_TSO; \
- _netdev->features &= ~NETIF_F_TSO6; \
- } else { \
- _netdev->features |= NETIF_F_TSO; \
- _netdev->features |= NETIF_F_TSO6; \
- } \
- } while (0)
-#else /* NETIF_F_TSO6 */
-#define netif_set_gso_max_size(_netdev, size) \
- do { \
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
- _netdev->features &= ~NETIF_F_TSO; \
- else \
- _netdev->features |= NETIF_F_TSO; \
- } while (0)
-#endif /* NETIF_F_TSO6 */
-#else
-#define netif_set_gso_max_size(_netdev, size) do {} while (0)
-#endif /* NETIF_F_TSO */
-#undef kzalloc_node
-#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags)
-
-extern void _kc_pci_disable_link_state(struct pci_dev *dev, int state);
-#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s)
-#else /* < 2.6.26 */
-#include <linux/pci-aspm.h>
-#define HAVE_NETDEV_VLAN_FEATURES
-#endif /* < 2.6.26 */
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
-static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep,
- __u32 speed)
-{
- ep->speed = (__u16)speed;
- /* ep->speed_hi = (__u16)(speed >> 16); */
-}
-#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set
-
-static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep)
-{
- /* no speed_hi before 2.6.27, and probably no need for it yet */
- return (__u32)ep->speed;
-}
-#define ethtool_cmd_speed _kc_ethtool_cmd_speed
-
-#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) )
-#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM))
-#define ANCIENT_PM 1
-#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) && \
- (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && \
- defined(CONFIG_PM_SLEEP))
-#define NEWER_PM 1
-#endif
-#if defined(ANCIENT_PM) || defined(NEWER_PM)
-#undef device_set_wakeup_enable
-#define device_set_wakeup_enable(dev, val) \
- do { \
- u16 pmc = 0; \
- int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \
- if (pm) { \
- pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \
- &pmc); \
- } \
- (dev)->power.can_wakeup = !!(pmc >> 11); \
- (dev)->power.should_wakeup = (val && (pmc >> 11)); \
- } while (0)
-#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */
-#endif /* 2.6.15 through 2.6.27 */
-#ifndef netif_napi_del
-#define netif_napi_del(_a) do {} while (0)
-#ifdef NAPI
-#ifdef CONFIG_NETPOLL
-#undef netif_napi_del
-#define netif_napi_del(_a) list_del(&(_a)->dev_list);
-#endif
-#endif
-#endif /* netif_napi_del */
-#ifdef dma_mapping_error
-#undef dma_mapping_error
-#endif
-#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr)
-
-#ifdef CONFIG_NETDEVICES_MULTIQUEUE
-#define HAVE_TX_MQ
-#endif
-
-#ifdef HAVE_TX_MQ
-extern void _kc_netif_tx_stop_all_queues(struct net_device *);
-extern void _kc_netif_tx_wake_all_queues(struct net_device *);
-extern void _kc_netif_tx_start_all_queues(struct net_device *);
-#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a)
-#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a)
-#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a)
-#undef netif_stop_subqueue
-#define netif_stop_subqueue(_ndev,_qi) do { \
- if (netif_is_multiqueue((_ndev))) \
- netif_stop_subqueue((_ndev), (_qi)); \
- else \
- netif_stop_queue((_ndev)); \
- } while (0)
-#undef netif_start_subqueue
-#define netif_start_subqueue(_ndev,_qi) do { \
- if (netif_is_multiqueue((_ndev))) \
- netif_start_subqueue((_ndev), (_qi)); \
- else \
- netif_start_queue((_ndev)); \
- } while (0)
-#else /* HAVE_TX_MQ */
-#define netif_tx_stop_all_queues(a) netif_stop_queue(a)
-#define netif_tx_wake_all_queues(a) netif_wake_queue(a)
-#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) )
-#define netif_tx_start_all_queues(a) netif_start_queue(a)
-#else
-#define netif_tx_start_all_queues(a) do {} while (0)
-#endif
-#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev))
-#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev))
-#endif /* HAVE_TX_MQ */
-#ifndef NETIF_F_MULTI_QUEUE
-#define NETIF_F_MULTI_QUEUE 0
-#define netif_is_multiqueue(a) 0
-#define netif_wake_subqueue(a, b)
-#endif /* NETIF_F_MULTI_QUEUE */
-
-#ifndef __WARN_printf
-extern void __kc_warn_slowpath(const char *file, const int line,
- const char *fmt, ...) __attribute__((format(printf, 3, 4)));
-#define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg)
-#endif /* __WARN_printf */
-
-#ifndef WARN
-#define WARN(condition, format...) ({ \
- int __ret_warn_on = !!(condition); \
- if (unlikely(__ret_warn_on)) \
- __WARN_printf(format); \
- unlikely(__ret_warn_on); \
-})
-#endif /* WARN */
-#else /* < 2.6.27 */
-#define HAVE_TX_MQ
-#define HAVE_NETDEV_SELECT_QUEUE
-#endif /* < 2.6.27 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
-#define pci_ioremap_bar(pdev, bar) ioremap(pci_resource_start(pdev, bar), \
- pci_resource_len(pdev, bar))
-#define pci_wake_from_d3 _kc_pci_wake_from_d3
-#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep
-extern int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable);
-extern int _kc_pci_prepare_to_sleep(struct pci_dev *dev);
-#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC)
-#ifndef __skb_queue_head_init
-static inline void __kc_skb_queue_head_init(struct sk_buff_head *list)
-{
- list->prev = list->next = (struct sk_buff *)list;
- list->qlen = 0;
-}
-#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q)
-#endif
-#endif /* < 2.6.28 */
-
-#ifndef skb_add_rx_frag
-#define skb_add_rx_frag _kc_skb_add_rx_frag
-extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, int, int);
-#endif
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) )
-#ifndef swap
-#define swap(a, b) \
- do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
-#endif
-#define pci_request_selected_regions_exclusive(pdev, bars, name) \
- pci_request_selected_regions(pdev, bars, name)
-#ifndef CONFIG_NR_CPUS
-#define CONFIG_NR_CPUS 1
-#endif /* CONFIG_NR_CPUS */
-#ifndef pcie_aspm_enabled
-#define pcie_aspm_enabled() (1)
-#endif /* pcie_aspm_enabled */
-#else /* < 2.6.29 */
-#ifndef HAVE_NET_DEVICE_OPS
-#define HAVE_NET_DEVICE_OPS
-#endif
-#ifdef CONFIG_DCB
-#define HAVE_PFC_MODE_ENABLE
-#endif /* CONFIG_DCB */
-#endif /* < 2.6.29 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) )
-#define skb_rx_queue_recorded(a) false
-#define skb_get_rx_queue(a) 0
-#undef CONFIG_FCOE
-#undef CONFIG_FCOE_MODULE
-extern u16 _kc_skb_tx_hash(struct net_device *dev, struct sk_buff *skb);
-#define skb_tx_hash(n, s) _kc_skb_tx_hash(n, s)
-#define skb_record_rx_queue(a, b) do {} while (0)
-#ifndef CONFIG_PCI_IOV
-#undef pci_enable_sriov
-#define pci_enable_sriov(a, b) -ENOTSUPP
-#undef pci_disable_sriov
-#define pci_disable_sriov(a) do {} while (0)
-#endif /* CONFIG_PCI_IOV */
-#ifndef pr_cont
-#define pr_cont(fmt, ...) \
- printk(KERN_CONT fmt, ##__VA_ARGS__)
-#endif /* pr_cont */
-#else
-#define HAVE_ASPM_QUIRKS
-#endif /* < 2.6.30 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) )
-#define ETH_P_1588 0x88F7
-#define ETH_P_FIP 0x8914
-#ifndef netdev_uc_count
-#define netdev_uc_count(dev) ((dev)->uc_count)
-#endif
-#ifndef netdev_for_each_uc_addr
-#define netdev_for_each_uc_addr(uclist, dev) \
- for (uclist = dev->uc_list; uclist; uclist = uclist->next)
-#endif
-#else
-#ifndef HAVE_NETDEV_STORAGE_ADDRESS
-#define HAVE_NETDEV_STORAGE_ADDRESS
-#endif
-#ifndef HAVE_NETDEV_HW_ADDR
-#define HAVE_NETDEV_HW_ADDR
-#endif
-#ifndef HAVE_TRANS_START_IN_QUEUE
-#define HAVE_TRANS_START_IN_QUEUE
-#endif
-#endif /* < 2.6.31 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) )
-#undef netdev_tx_t
-#define netdev_tx_t int
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
-#ifndef NETIF_F_FCOE_MTU
-#define NETIF_F_FCOE_MTU (1 << 26)
-#endif
-#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
-
-#ifndef pm_runtime_get_sync
-#define pm_runtime_get_sync(dev) do {} while (0)
-#endif
-#ifndef pm_runtime_put
-#define pm_runtime_put(dev) do {} while (0)
-#endif
-#ifndef pm_runtime_put_sync
-#define pm_runtime_put_sync(dev) do {} while (0)
-#endif
-#ifndef pm_runtime_resume
-#define pm_runtime_resume(dev) do {} while (0)
-#endif
-#ifndef pm_schedule_suspend
-#define pm_schedule_suspend(dev, t) do {} while (0)
-#endif
-#ifndef pm_runtime_set_suspended
-#define pm_runtime_set_suspended(dev) do {} while (0)
-#endif
-#ifndef pm_runtime_disable
-#define pm_runtime_disable(dev) do {} while (0)
-#endif
-#ifndef pm_runtime_put_noidle
-#define pm_runtime_put_noidle(dev) do {} while (0)
-#endif
-#ifndef pm_runtime_set_active
-#define pm_runtime_set_active(dev) do {} while (0)
-#endif
-#ifndef pm_runtime_enable
-#define pm_runtime_enable(dev) do {} while (0)
-#endif
-#ifndef pm_runtime_get_noresume
-#define pm_runtime_get_noresume(dev) do {} while (0)
-#endif
-#else /* < 2.6.32 */
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
-#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE
-#define HAVE_NETDEV_OPS_FCOE_ENABLE
-#endif
-#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
-#ifdef CONFIG_DCB
-#ifndef HAVE_DCBNL_OPS_GETAPP
-#define HAVE_DCBNL_OPS_GETAPP
-#endif
-#endif /* CONFIG_DCB */
-#include <linux/pm_runtime.h>
-/* IOV bad DMA target work arounds require at least this kernel rev support */
-#define HAVE_PCIE_TYPE
-#endif /* < 2.6.32 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) )
-#ifndef pci_pcie_cap
-#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP)
-#endif
-#ifndef IPV4_FLOW
-#define IPV4_FLOW 0x10
-#endif /* IPV4_FLOW */
-#ifndef IPV6_FLOW
-#define IPV6_FLOW 0x11
-#endif /* IPV6_FLOW */
-/* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */
-#if ( (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) || \
- (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,1,0)) )
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
-#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN
-#define HAVE_NETDEV_OPS_FCOE_GETWWN
-#endif
-#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
-#endif /* RHEL6 or SLES11 SP1 */
-#ifndef __percpu
-#define __percpu
-#endif /* __percpu */
-#else /* < 2.6.33 */
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
-#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN
-#define HAVE_NETDEV_OPS_FCOE_GETWWN
-#endif
-#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
-#define HAVE_ETHTOOL_SFP_DISPLAY_PORT
-#endif /* < 2.6.33 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) )
-#ifndef ETH_FLAG_NTUPLE
-#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE
-#endif
-
-#ifndef netdev_mc_count
-#define netdev_mc_count(dev) ((dev)->mc_count)
-#endif
-#ifndef netdev_mc_empty
-#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0)
-#endif
-#ifndef netdev_for_each_mc_addr
-#define netdev_for_each_mc_addr(mclist, dev) \
- for (mclist = dev->mc_list; mclist; mclist = mclist->next)
-#endif
-#ifndef netdev_uc_count
-#define netdev_uc_count(dev) ((dev)->uc.count)
-#endif
-#ifndef netdev_uc_empty
-#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0)
-#endif
-#ifndef netdev_for_each_uc_addr
-#define netdev_for_each_uc_addr(ha, dev) \
- list_for_each_entry(ha, &dev->uc.list, list)
-#endif
-#ifndef dma_set_coherent_mask
-#define dma_set_coherent_mask(dev,mask) \
- pci_set_consistent_dma_mask(to_pci_dev(dev),(mask))
-#endif
-#ifndef pci_dev_run_wake
-#define pci_dev_run_wake(pdev) (0)
-#endif
-
-/* netdev logging taken from include/linux/netdevice.h */
-#ifndef netdev_name
-static inline const char *_kc_netdev_name(const struct net_device *dev)
-{
- if (dev->reg_state != NETREG_REGISTERED)
- return "(unregistered net_device)";
- return dev->name;
-}
-#define netdev_name(netdev) _kc_netdev_name(netdev)
-#endif /* netdev_name */
-
-#undef netdev_printk
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
-#define netdev_printk(level, netdev, format, args...) \
-do { \
- struct adapter_struct *kc_adapter = netdev_priv(netdev);\
- struct pci_dev *pdev = kc_adapter->pdev; \
- printk("%s %s: " format, level, pci_name(pdev), \
- ##args); \
-} while(0)
-#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
-#define netdev_printk(level, netdev, format, args...) \
-do { \
- struct adapter_struct *kc_adapter = netdev_priv(netdev);\
- struct pci_dev *pdev = kc_adapter->pdev; \
- struct device *dev = pci_dev_to_dev(pdev); \
- dev_printk(level, dev, "%s: " format, \
- netdev_name(netdev), ##args); \
-} while(0)
-#else /* 2.6.21 => 2.6.34 */
-#define netdev_printk(level, netdev, format, args...) \
- dev_printk(level, (netdev)->dev.parent, \
- "%s: " format, \
- netdev_name(netdev), ##args)
-#endif /* <2.6.0 <2.6.21 <2.6.34 */
-#undef netdev_emerg
-#define netdev_emerg(dev, format, args...) \
- netdev_printk(KERN_EMERG, dev, format, ##args)
-#undef netdev_alert
-#define netdev_alert(dev, format, args...) \
- netdev_printk(KERN_ALERT, dev, format, ##args)
-#undef netdev_crit
-#define netdev_crit(dev, format, args...) \
- netdev_printk(KERN_CRIT, dev, format, ##args)
-#undef netdev_err
-#define netdev_err(dev, format, args...) \
- netdev_printk(KERN_ERR, dev, format, ##args)
-#undef netdev_warn
-#define netdev_warn(dev, format, args...) \
- netdev_printk(KERN_WARNING, dev, format, ##args)
-#undef netdev_notice
-#define netdev_notice(dev, format, args...) \
- netdev_printk(KERN_NOTICE, dev, format, ##args)
-#undef netdev_info
-#define netdev_info(dev, format, args...) \
- netdev_printk(KERN_INFO, dev, format, ##args)
-#undef netdev_dbg
-#if defined(DEBUG)
-#define netdev_dbg(__dev, format, args...) \
- netdev_printk(KERN_DEBUG, __dev, format, ##args)
-#elif defined(CONFIG_DYNAMIC_DEBUG)
-#define netdev_dbg(__dev, format, args...) \
-do { \
- dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \
- netdev_name(__dev), ##args); \
-} while (0)
-#else /* DEBUG */
-#define netdev_dbg(__dev, format, args...) \
-({ \
- if (0) \
- netdev_printk(KERN_DEBUG, __dev, format, ##args); \
- 0; \
-})
-#endif /* DEBUG */
-
-#undef netif_printk
-#define netif_printk(priv, type, level, dev, fmt, args...) \
-do { \
- if (netif_msg_##type(priv)) \
- netdev_printk(level, (dev), fmt, ##args); \
-} while (0)
-
-#undef netif_emerg
-#define netif_emerg(priv, type, dev, fmt, args...) \
- netif_level(emerg, priv, type, dev, fmt, ##args)
-#undef netif_alert
-#define netif_alert(priv, type, dev, fmt, args...) \
- netif_level(alert, priv, type, dev, fmt, ##args)
-#undef netif_crit
-#define netif_crit(priv, type, dev, fmt, args...) \
- netif_level(crit, priv, type, dev, fmt, ##args)
-#undef netif_err
-#define netif_err(priv, type, dev, fmt, args...) \
- netif_level(err, priv, type, dev, fmt, ##args)
-#undef netif_warn
-#define netif_warn(priv, type, dev, fmt, args...) \
- netif_level(warn, priv, type, dev, fmt, ##args)
-#undef netif_notice
-#define netif_notice(priv, type, dev, fmt, args...) \
- netif_level(notice, priv, type, dev, fmt, ##args)
-#undef netif_info
-#define netif_info(priv, type, dev, fmt, args...) \
- netif_level(info, priv, type, dev, fmt, ##args)
-
-#ifdef SET_SYSTEM_SLEEP_PM_OPS
-#define HAVE_SYSTEM_SLEEP_PM_OPS
-#endif
-
-#ifndef for_each_set_bit
-#define for_each_set_bit(bit, addr, size) \
- for ((bit) = find_first_bit((addr), (size)); \
- (bit) < (size); \
- (bit) = find_next_bit((addr), (size), (bit) + 1))
-#endif /* for_each_set_bit */
-
-#ifndef DEFINE_DMA_UNMAP_ADDR
-#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR
-#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN
-#define dma_unmap_addr pci_unmap_addr
-#define dma_unmap_addr_set pci_unmap_addr_set
-#define dma_unmap_len pci_unmap_len
-#define dma_unmap_len_set pci_unmap_len_set
-#endif /* DEFINE_DMA_UNMAP_ADDR */
-#else /* < 2.6.34 */
-#define HAVE_SYSTEM_SLEEP_PM_OPS
-#ifndef HAVE_SET_RX_MODE
-#define HAVE_SET_RX_MODE
-#endif
-
-#endif /* < 2.6.34 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
-#ifndef numa_node_id
-#define numa_node_id() 0
-#endif
-#ifdef HAVE_TX_MQ
-#include <net/sch_generic.h>
-#ifndef CONFIG_NETDEVICES_MULTIQUEUE
-void _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int);
-#define netif_set_real_num_tx_queues _kc_netif_set_real_num_tx_queues
-#else /* CONFIG_NETDEVICES_MULTI_QUEUE */
-#define netif_set_real_num_tx_queues(_netdev, _count) \
- do { \
- (_netdev)->egress_subqueue_count = _count; \
- } while (0)
-#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */
-#else
-#define netif_set_real_num_tx_queues(_netdev, _count) do {} while(0)
-#endif /* HAVE_TX_MQ */
-#ifndef ETH_FLAG_RXHASH
-#define ETH_FLAG_RXHASH (1<<28)
-#endif /* ETH_FLAG_RXHASH */
-#else /* < 2.6.35 */
-#define HAVE_PM_QOS_REQUEST_LIST
-#define HAVE_IRQ_AFFINITY_HINT
-#endif /* < 2.6.35 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) )
-extern int _kc_ethtool_op_set_flags(struct net_device *, u32, u32);
-#define ethtool_op_set_flags _kc_ethtool_op_set_flags
-extern u32 _kc_ethtool_op_get_flags(struct net_device *);
-#define ethtool_op_get_flags _kc_ethtool_op_get_flags
-
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-#ifdef NET_IP_ALIGN
-#undef NET_IP_ALIGN
-#endif
-#define NET_IP_ALIGN 0
-#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
-
-#ifdef NET_SKB_PAD
-#undef NET_SKB_PAD
-#endif
-
-#if (L1_CACHE_BYTES > 32)
-#define NET_SKB_PAD L1_CACHE_BYTES
-#else
-#define NET_SKB_PAD 32
-#endif
-
-static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev,
- unsigned int length)
-{
- struct sk_buff *skb;
-
- skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC);
- if (skb) {
-#if (NET_IP_ALIGN + NET_SKB_PAD)
- skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
-#endif
- skb->dev = dev;
- }
- return skb;
-}
-
-#ifdef netdev_alloc_skb_ip_align
-#undef netdev_alloc_skb_ip_align
-#endif
-#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l)
-
-#undef netif_level
-#define netif_level(level, priv, type, dev, fmt, args...) \
-do { \
- if (netif_msg_##type(priv)) \
- netdev_##level(dev, fmt, ##args); \
-} while (0)
-
-#undef usleep_range
-#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
-
-#else /* < 2.6.36 */
-#define HAVE_PM_QOS_REQUEST_ACTIVE
-#define HAVE_8021P_SUPPORT
-#define HAVE_NDO_GET_STATS64
-#endif /* < 2.6.36 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) )
-#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR
-#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2)
-#endif
-#ifndef VLAN_N_VID
-#define VLAN_N_VID VLAN_GROUP_ARRAY_LEN
-#endif /* VLAN_N_VID */
-#ifndef ETH_FLAG_TXVLAN
-#define ETH_FLAG_TXVLAN (1 << 7)
-#endif /* ETH_FLAG_TXVLAN */
-#ifndef ETH_FLAG_RXVLAN
-#define ETH_FLAG_RXVLAN (1 << 8)
-#endif /* ETH_FLAG_RXVLAN */
-
-static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb)
-{
- WARN_ON(skb->ip_summed != CHECKSUM_NONE);
-}
-#define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb)
-
-static inline void *_kc_vzalloc_node(unsigned long size, int node)
-{
- void *addr = vmalloc_node(size, node);
- if (addr)
- memset(addr, 0, size);
- return addr;
-}
-#define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node)
-
-static inline void *_kc_vzalloc(unsigned long size)
-{
- void *addr = vmalloc(size);
- if (addr)
- memset(addr, 0, size);
- return addr;
-}
-#define vzalloc(_size) _kc_vzalloc(_size)
-
-#ifndef vlan_get_protocol
-static inline __be16 __kc_vlan_get_protocol(const struct sk_buff *skb)
-{
- if (vlan_tx_tag_present(skb) ||
- skb->protocol != cpu_to_be16(ETH_P_8021Q))
- return skb->protocol;
-
- if (skb_headlen(skb) < sizeof(struct vlan_ethhdr))
- return 0;
-
- return ((struct vlan_ethhdr*)skb->data)->h_vlan_encapsulated_proto;
-}
-#define vlan_get_protocol(_skb) __kc_vlan_get_protocol(_skb)
-#endif
-#ifdef HAVE_HW_TIME_STAMP
-#define SKBTX_HW_TSTAMP (1 << 0)
-#define SKBTX_IN_PROGRESS (1 << 2)
-#define SKB_SHARED_TX_IS_UNION
-#endif
-#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) )
-#ifndef HAVE_VLAN_RX_REGISTER
-#define HAVE_VLAN_RX_REGISTER
-#endif
-#endif /* > 2.4.18 */
-#endif /* < 2.6.37 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) )
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
-#define skb_checksum_start_offset(skb) skb_transport_offset(skb)
-#else /* 2.6.22 -> 2.6.37 */
-static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb)
-{
- return skb->csum_start - skb_headroom(skb);
-}
-#define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb)
-#endif /* 2.6.22 -> 2.6.37 */
-#ifdef CONFIG_DCB
-#ifndef IEEE_8021QAZ_MAX_TCS
-#define IEEE_8021QAZ_MAX_TCS 8
-#endif
-#ifndef DCB_CAP_DCBX_HOST
-#define DCB_CAP_DCBX_HOST 0x01
-#endif
-#ifndef DCB_CAP_DCBX_LLD_MANAGED
-#define DCB_CAP_DCBX_LLD_MANAGED 0x02
-#endif
-#ifndef DCB_CAP_DCBX_VER_CEE
-#define DCB_CAP_DCBX_VER_CEE 0x04
-#endif
-#ifndef DCB_CAP_DCBX_VER_IEEE
-#define DCB_CAP_DCBX_VER_IEEE 0x08
-#endif
-#ifndef DCB_CAP_DCBX_STATIC
-#define DCB_CAP_DCBX_STATIC 0x10
-#endif
-#endif /* CONFIG_DCB */
-#else /* < 2.6.38 */
-#endif /* < 2.6.38 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) )
-#ifndef skb_queue_reverse_walk_safe
-#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
- for (skb = (queue)->prev, tmp = skb->prev; \
- skb != (struct sk_buff *)(queue); \
- skb = tmp, tmp = skb->prev)
-#endif
-#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)))
-extern u8 _kc_netdev_get_num_tc(struct net_device *dev);
-#define netdev_get_num_tc(dev) _kc_netdev_get_num_tc(dev)
-extern u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up);
-#define netdev_get_prio_tc_map(dev, up) _kc_netdev_get_prio_tc_map(dev, up)
-#define netdev_set_prio_tc_map(dev, up, tc) do {} while (0)
-#else /* RHEL6.1 or greater */
-#ifndef HAVE_MQPRIO
-#define HAVE_MQPRIO
-#endif /* HAVE_MQPRIO */
-#ifdef CONFIG_DCB
-#ifndef HAVE_DCBNL_IEEE
-#define HAVE_DCBNL_IEEE
-#ifndef IEEE_8021QAZ_TSA_STRICT
-#define IEEE_8021QAZ_TSA_STRICT 0
-#endif
-#ifndef IEEE_8021QAZ_TSA_ETS
-#define IEEE_8021QAZ_TSA_ETS 2
-#endif
-#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE
-#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1
-#endif
-#endif
-#endif /* CONFIG_DCB */
-#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */
-#else /* < 2.6.39 */
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
-#ifndef HAVE_NETDEV_OPS_FCOE_DDP_TARGET
-#define HAVE_NETDEV_OPS_FCOE_DDP_TARGET
-#endif
-#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
-#ifndef HAVE_MQPRIO
-#define HAVE_MQPRIO
-#endif
-#ifndef HAVE_SETUP_TC
-#define HAVE_SETUP_TC
-#endif
-#ifdef CONFIG_DCB
-#ifndef HAVE_DCBNL_IEEE
-#define HAVE_DCBNL_IEEE
-#endif
-#endif /* CONFIG_DCB */
-#ifndef HAVE_NDO_SET_FEATURES
-#define HAVE_NDO_SET_FEATURES
-#endif
-#endif /* < 2.6.39 */
-
-/*****************************************************************************/
-/* use < 2.6.40 because of a Fedora 15 kernel update where they
- * updated the kernel version to 2.6.40.x and they back-ported 3.0 features
- * like set_phys_id for ethtool.
- */
-#undef ETHTOOL_GRXRINGS
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) )
-#ifdef ETHTOOL_GRXRINGS
-#ifndef FLOW_EXT
-#define FLOW_EXT 0x80000000
-union _kc_ethtool_flow_union {
- struct ethtool_tcpip4_spec tcp_ip4_spec;
- struct ethtool_usrip4_spec usr_ip4_spec;
- __u8 hdata[60];
-};
-struct _kc_ethtool_flow_ext {
- __be16 vlan_etype;
- __be16 vlan_tci;
- __be32 data[2];
-};
-struct _kc_ethtool_rx_flow_spec {
- __u32 flow_type;
- union _kc_ethtool_flow_union h_u;
- struct _kc_ethtool_flow_ext h_ext;
- union _kc_ethtool_flow_union m_u;
- struct _kc_ethtool_flow_ext m_ext;
- __u64 ring_cookie;
- __u32 location;
-};
-#define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec
-#endif /* FLOW_EXT */
-#endif
-
-#define pci_disable_link_state_locked pci_disable_link_state
-
-#ifndef PCI_LTR_VALUE_MASK
-#define PCI_LTR_VALUE_MASK 0x000003ff
-#endif
-#ifndef PCI_LTR_SCALE_MASK
-#define PCI_LTR_SCALE_MASK 0x00001c00
-#endif
-#ifndef PCI_LTR_SCALE_SHIFT
-#define PCI_LTR_SCALE_SHIFT 10
-#endif
-
-#else /* < 2.6.40 */
-#define HAVE_ETHTOOL_SET_PHYS_ID
-#endif /* < 2.6.40 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) )
-#ifndef __netdev_alloc_skb_ip_align
-#define __netdev_alloc_skb_ip_align(d,l,_g) netdev_alloc_skb_ip_align(d,l)
-#endif /* __netdev_alloc_skb_ip_align */
-#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app)
-#define dcb_ieee_delapp(dev, app) 0
-#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority)
-#else /* < 3.1.0 */
-#ifndef HAVE_DCBNL_IEEE_DELAPP
-#define HAVE_DCBNL_IEEE_DELAPP
-#endif
-#endif /* < 3.1.0 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) )
-#ifdef ETHTOOL_GRXRINGS
-#define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
-#endif /* ETHTOOL_GRXRINGS */
-
-#ifndef skb_frag_size
-#define skb_frag_size(frag) _kc_skb_frag_size(frag)
-static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag)
-{
- return frag->size;
-}
-#endif /* skb_frag_size */
-
-#ifndef skb_frag_size_sub
-#define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta)
-static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta)
-{
- frag->size -= delta;
-}
-#endif /* skb_frag_size_sub */
-
-#ifndef skb_frag_page
-#define skb_frag_page(frag) _kc_skb_frag_page(frag)
-static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag)
-{
- return frag->page;
-}
-#endif /* skb_frag_page */
-
-#ifndef skb_frag_address
-#define skb_frag_address(frag) _kc_skb_frag_address(frag)
-static inline void *_kc_skb_frag_address(const skb_frag_t *frag)
-{
- return page_address(skb_frag_page(frag)) + frag->page_offset;
-}
-#endif /* skb_frag_address */
-
-#ifndef skb_frag_dma_map
-#define skb_frag_dma_map(dev,frag,offset,size,dir) \
- _kc_skb_frag_dma_map(dev,frag,offset,size,dir)
-static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev,
- const skb_frag_t *frag,
- size_t offset, size_t size,
- enum dma_data_direction dir)
-{
- return dma_map_page(dev, skb_frag_page(frag),
- frag->page_offset + offset, size, dir);
-}
-#endif /* skb_frag_dma_map */
-
-#ifndef __skb_frag_unref
-#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag)
-static inline void __kc_skb_frag_unref(skb_frag_t *frag)
-{
- put_page(skb_frag_page(frag));
-}
-#endif /* __skb_frag_unref */
-#else /* < 3.2.0 */
-#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED
-#define HAVE_PCI_DEV_FLAGS_ASSIGNED
-#define HAVE_VF_SPOOFCHK_CONFIGURE
-#endif
-#endif /* < 3.2.0 */
-
-#if (RHEL_RELEASE_CODE && \
- (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) && \
- (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))
-#undef ixgbe_get_netdev_tc_txq
-#define ixgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc])
-#endif
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) )
-typedef u32 kni_netdev_features_t;
-#else /* ! < 3.3.0 */
-typedef netdev_features_t kni_netdev_features_t;
-#define HAVE_INT_NDO_VLAN_RX_ADD_VID
-#ifdef ETHTOOL_SRXNTUPLE
-#undef ETHTOOL_SRXNTUPLE
-#endif
-#endif /* < 3.3.0 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) )
-#ifndef NETIF_F_RXFCS
-#define NETIF_F_RXFCS 0
-#endif /* NETIF_F_RXFCS */
-#ifndef NETIF_F_RXALL
-#define NETIF_F_RXALL 0
-#endif /* NETIF_F_RXALL */
-
-#define NUMTCS_RETURNS_U8
-
-
-#endif /* < 3.4.0 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) )
-static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2)
-{
- return !compare_ether_addr(addr1, addr2);
-}
-#define ether_addr_equal(_addr1, _addr2) __kc_ether_addr_equal((_addr1),(_addr2))
-#else
-#define HAVE_FDB_OPS
-#endif /* < 3.5.0 */
-
-/*****************************************************************************/
-#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) )
-#define NETIF_F_HW_VLAN_TX NETIF_F_HW_VLAN_CTAG_TX
-#define NETIF_F_HW_VLAN_RX NETIF_F_HW_VLAN_CTAG_RX
-#define NETIF_F_HW_VLAN_FILTER NETIF_F_HW_VLAN_CTAG_FILTER
-#endif /* >= 3.10.0 */
-
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
-#ifdef CONFIG_PCI_IOV
-extern int __kc_pci_vfs_assigned(struct pci_dev *dev);
-#else
-static inline int __kc_pci_vfs_assigned(struct pci_dev *dev)
-{
- return 0;
-}
-#endif
-#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev)
-
-#endif
-
-#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0) )
-#define SET_ETHTOOL_OPS(netdev, ops) ((netdev)->ethtool_ops = (ops))
-#endif /* >= 3.16.0 */
-
-#endif /* _KCOMPAT_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_dev.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_dev.h
deleted file mode 100755
index e79e4721..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_dev.h
+++ /dev/null
@@ -1,150 +0,0 @@
-/*-
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Corporation
- */
-
-#ifndef _KNI_DEV_H_
-#define _KNI_DEV_H_
-
-#include <linux/if.h>
-#include <linux/wait.h>
-#include <linux/sched.h>
-#include <linux/netdevice.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-
-#ifdef RTE_KNI_VHOST
-#include <net/sock.h>
-#endif
-
-#include <exec-env/rte_kni_common.h>
-#define KNI_KTHREAD_RESCHEDULE_INTERVAL 5 /* us */
-
-/**
- * A structure describing the private information for a kni device.
- */
-
-struct kni_dev {
- /* kni list */
- struct list_head list;
-
- struct net_device_stats stats;
- int status;
- uint16_t group_id; /* Group ID of a group of KNI devices */
- unsigned core_id; /* Core ID to bind */
- char name[RTE_KNI_NAMESIZE]; /* Network device name */
- struct task_struct *pthread;
-
- /* wait queue for req/resp */
- wait_queue_head_t wq;
- struct mutex sync_lock;
-
- /* PCI device id */
- uint16_t device_id;
-
- /* kni device */
- struct net_device *net_dev;
- struct net_device *lad_dev;
- struct pci_dev *pci_dev;
-
- /* queue for packets to be sent out */
- void *tx_q;
-
- /* queue for the packets received */
- void *rx_q;
-
- /* queue for the allocated mbufs those can be used to save sk buffs */
- void *alloc_q;
-
- /* free queue for the mbufs to be freed */
- void *free_q;
-
- /* request queue */
- void *req_q;
-
- /* response queue */
- void *resp_q;
-
- void * sync_kva;
- void *sync_va;
-
- void *mbuf_kva;
- void *mbuf_va;
-
- /* mbuf size */
- unsigned mbuf_size;
-
- /* synchro for request processing */
- unsigned long synchro;
-
-#ifdef RTE_KNI_VHOST
- struct kni_vhost_queue* vhost_queue;
- volatile enum {
- BE_STOP = 0x1,
- BE_START = 0x2,
- BE_FINISH = 0x4,
- }vq_status;
-#endif
-};
-
-#define KNI_ERR(args...) printk(KERN_DEBUG "KNI: Error: " args)
-#define KNI_PRINT(args...) printk(KERN_DEBUG "KNI: " args)
-#ifdef RTE_KNI_KO_DEBUG
- #define KNI_DBG(args...) printk(KERN_DEBUG "KNI: " args)
-#else
- #define KNI_DBG(args...)
-#endif
-
-#ifdef RTE_KNI_VHOST
-unsigned int
-kni_poll(struct file *file, struct socket *sock, poll_table * wait);
-int kni_chk_vhost_rx(struct kni_dev *kni);
-int kni_vhost_init(struct kni_dev *kni);
-int kni_vhost_backend_release(struct kni_dev *kni);
-
-struct kni_vhost_queue {
- struct sock sk;
- struct socket *sock;
- int vnet_hdr_sz;
- struct kni_dev *kni;
- int sockfd;
- unsigned int flags;
- struct sk_buff* cache;
- struct rte_kni_fifo* fifo;
-};
-
-#endif
-
-#ifdef RTE_KNI_VHOST_DEBUG_RX
- #define KNI_DBG_RX(args...) printk(KERN_DEBUG "KNI RX: " args)
-#else
- #define KNI_DBG_RX(args...)
-#endif
-
-#ifdef RTE_KNI_VHOST_DEBUG_TX
- #define KNI_DBG_TX(args...) printk(KERN_DEBUG "KNI TX: " args)
-#else
- #define KNI_DBG_TX(args...)
-#endif
-
-#endif
-
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_ethtool.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_ethtool.c
deleted file mode 100755
index 06b6d463..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_ethtool.c
+++ /dev/null
@@ -1,217 +0,0 @@
-/*-
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Corporation
- */
-
-#include <linux/device.h>
-#include <linux/netdevice.h>
-#include <linux/ethtool.h>
-#include "kni_dev.h"
-
-static int
-kni_check_if_running(struct net_device *dev)
-{
- struct kni_dev *priv = netdev_priv(dev);
- if (priv->lad_dev)
- return 0;
- else
- return -EOPNOTSUPP;
-}
-
-static void
-kni_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
-{
- struct kni_dev *priv = netdev_priv(dev);
- priv->lad_dev->ethtool_ops->get_drvinfo(priv->lad_dev, info);
-}
-
-static int
-kni_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
-{
- struct kni_dev *priv = netdev_priv(dev);
- return priv->lad_dev->ethtool_ops->get_settings(priv->lad_dev, ecmd);
-}
-
-static int
-kni_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
-{
- struct kni_dev *priv = netdev_priv(dev);
- return priv->lad_dev->ethtool_ops->set_settings(priv->lad_dev, ecmd);
-}
-
-static void
-kni_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
-{
- struct kni_dev *priv = netdev_priv(dev);
- priv->lad_dev->ethtool_ops->get_wol(priv->lad_dev, wol);
-}
-
-static int
-kni_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
-{
- struct kni_dev *priv = netdev_priv(dev);
- return priv->lad_dev->ethtool_ops->set_wol(priv->lad_dev, wol);
-}
-
-static int
-kni_nway_reset(struct net_device *dev)
-{
- struct kni_dev *priv = netdev_priv(dev);
- return priv->lad_dev->ethtool_ops->nway_reset(priv->lad_dev);
-}
-
-static int
-kni_get_eeprom_len(struct net_device *dev)
-{
- struct kni_dev *priv = netdev_priv(dev);
- return priv->lad_dev->ethtool_ops->get_eeprom_len(priv->lad_dev);
-}
-
-static int
-kni_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
- u8 *bytes)
-{
- struct kni_dev *priv = netdev_priv(dev);
- return priv->lad_dev->ethtool_ops->get_eeprom(priv->lad_dev, eeprom,
- bytes);
-}
-
-static int
-kni_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
- u8 *bytes)
-{
- struct kni_dev *priv = netdev_priv(dev);
- return priv->lad_dev->ethtool_ops->set_eeprom(priv->lad_dev, eeprom,
- bytes);
-}
-
-static void
-kni_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
-{
- struct kni_dev *priv = netdev_priv(dev);
- priv->lad_dev->ethtool_ops->get_ringparam(priv->lad_dev, ring);
-}
-
-static int
-kni_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
-{
- struct kni_dev *priv = netdev_priv(dev);
- return priv->lad_dev->ethtool_ops->set_ringparam(priv->lad_dev, ring);
-}
-
-static void
-kni_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
-{
- struct kni_dev *priv = netdev_priv(dev);
- priv->lad_dev->ethtool_ops->get_pauseparam(priv->lad_dev, pause);
-}
-
-static int
-kni_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
-{
- struct kni_dev *priv = netdev_priv(dev);
- return priv->lad_dev->ethtool_ops->set_pauseparam(priv->lad_dev,
- pause);
-}
-
-static u32
-kni_get_msglevel(struct net_device *dev)
-{
- struct kni_dev *priv = netdev_priv(dev);
- return priv->lad_dev->ethtool_ops->get_msglevel(priv->lad_dev);
-}
-
-static void
-kni_set_msglevel(struct net_device *dev, u32 data)
-{
- struct kni_dev *priv = netdev_priv(dev);
- priv->lad_dev->ethtool_ops->set_msglevel(priv->lad_dev, data);
-}
-
-static int
-kni_get_regs_len(struct net_device *dev)
-{
- struct kni_dev *priv = netdev_priv(dev);
- return priv->lad_dev->ethtool_ops->get_regs_len(priv->lad_dev);
-}
-
-static void
-kni_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
-{
- struct kni_dev *priv = netdev_priv(dev);
- priv->lad_dev->ethtool_ops->get_regs(priv->lad_dev, regs, p);
-}
-
-static void
-kni_get_strings(struct net_device *dev, u32 stringset, u8 *data)
-{
- struct kni_dev *priv = netdev_priv(dev);
- priv->lad_dev->ethtool_ops->get_strings(priv->lad_dev, stringset,
- data);
-}
-
-static int
-kni_get_sset_count(struct net_device *dev, int sset)
-{
- struct kni_dev *priv = netdev_priv(dev);
- return priv->lad_dev->ethtool_ops->get_sset_count(priv->lad_dev, sset);
-}
-
-static void
-kni_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats,
- u64 *data)
-{
- struct kni_dev *priv = netdev_priv(dev);
- priv->lad_dev->ethtool_ops->get_ethtool_stats(priv->lad_dev, stats,
- data);
-}
-
-struct ethtool_ops kni_ethtool_ops = {
- .begin = kni_check_if_running,
- .get_drvinfo = kni_get_drvinfo,
- .get_settings = kni_get_settings,
- .set_settings = kni_set_settings,
- .get_regs_len = kni_get_regs_len,
- .get_regs = kni_get_regs,
- .get_wol = kni_get_wol,
- .set_wol = kni_set_wol,
- .nway_reset = kni_nway_reset,
- .get_link = ethtool_op_get_link,
- .get_eeprom_len = kni_get_eeprom_len,
- .get_eeprom = kni_get_eeprom,
- .set_eeprom = kni_set_eeprom,
- .get_ringparam = kni_get_ringparam,
- .set_ringparam = kni_set_ringparam,
- .get_pauseparam = kni_get_pauseparam,
- .set_pauseparam = kni_set_pauseparam,
- .get_msglevel = kni_get_msglevel,
- .set_msglevel = kni_set_msglevel,
- .get_strings = kni_get_strings,
- .get_sset_count = kni_get_sset_count,
- .get_ethtool_stats = kni_get_ethtool_stats,
-};
-
-void
-kni_set_ethtool_ops(struct net_device *netdev)
-{
- netdev->ethtool_ops = &kni_ethtool_ops;
-}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_fifo.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_fifo.h
deleted file mode 100755
index 3ea750e2..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_fifo.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/*-
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Corporation
- */
-
-#ifndef _KNI_FIFO_H_
-#define _KNI_FIFO_H_
-
-#include <exec-env/rte_kni_common.h>
-
-/**
- * Adds num elements into the fifo. Return the number actually written
- */
-static inline unsigned
-kni_fifo_put(struct rte_kni_fifo *fifo, void **data, unsigned num)
-{
- unsigned i = 0;
- unsigned fifo_write = fifo->write;
- unsigned fifo_read = fifo->read;
- unsigned new_write = fifo_write;
-
- for (i = 0; i < num; i++) {
- new_write = (new_write + 1) & (fifo->len - 1);
-
- if (new_write == fifo_read)
- break;
- fifo->buffer[fifo_write] = data[i];
- fifo_write = new_write;
- }
- fifo->write = fifo_write;
-
- return i;
-}
-
-/**
- * Get up to num elements from the fifo. Return the number actully read
- */
-static inline unsigned
-kni_fifo_get(struct rte_kni_fifo *fifo, void **data, unsigned num)
-{
- unsigned i = 0;
- unsigned new_read = fifo->read;
- unsigned fifo_write = fifo->write;
-
- for (i = 0; i < num; i++) {
- if (new_read == fifo_write)
- break;
-
- data[i] = fifo->buffer[new_read];
- new_read = (new_read + 1) & (fifo->len - 1);
- }
- fifo->read = new_read;
-
- return i;
-}
-
-/**
- * Get the num of elements in the fifo
- */
-static inline unsigned
-kni_fifo_count(struct rte_kni_fifo *fifo)
-{
- return (fifo->len + fifo->write - fifo->read) & ( fifo->len - 1);
-}
-
-/**
- * Get the num of available elements in the fifo
- */
-static inline unsigned
-kni_fifo_free_count(struct rte_kni_fifo *fifo)
-{
- return (fifo->read - fifo->write - 1) & (fifo->len - 1);
-}
-
-#ifdef RTE_KNI_VHOST
-/**
- * Initializes the kni fifo structure
- */
-static inline void
-kni_fifo_init(struct rte_kni_fifo *fifo, unsigned size)
-{
- fifo->write = 0;
- fifo->read = 0;
- fifo->len = size;
- fifo->elem_size = sizeof(void *);
-}
-#endif
-
-#endif /* _KNI_FIFO_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_misc.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_misc.c
deleted file mode 100755
index 868b3254..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_misc.c
+++ /dev/null
@@ -1,606 +0,0 @@
-/*-
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Corporation
- */
-
-#include <linux/module.h>
-#include <linux/miscdevice.h>
-#include <linux/netdevice.h>
-#include <linux/pci.h>
-#include <linux/kthread.h>
-#include <linux/rwsem.h>
-
-#include <exec-env/rte_kni_common.h>
-#include "kni_dev.h"
-#include <rte_config.h>
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Kernel Module for managing kni devices");
-
-#define KNI_RX_LOOP_NUM 1000
-
-#define KNI_MAX_DEVICES 32
-
-extern void kni_net_rx(struct kni_dev *kni);
-extern void kni_net_init(struct net_device *dev);
-extern void kni_net_config_lo_mode(char *lo_str);
-extern void kni_net_poll_resp(struct kni_dev *kni);
-extern void kni_set_ethtool_ops(struct net_device *netdev);
-
-extern int ixgbe_kni_probe(struct pci_dev *pdev, struct net_device **lad_dev);
-extern void ixgbe_kni_remove(struct pci_dev *pdev);
-extern int igb_kni_probe(struct pci_dev *pdev, struct net_device **lad_dev);
-extern void igb_kni_remove(struct pci_dev *pdev);
-
-static int kni_open(struct inode *inode, struct file *file);
-static int kni_release(struct inode *inode, struct file *file);
-static int kni_ioctl(struct inode *inode, unsigned int ioctl_num,
- unsigned long ioctl_param);
-static int kni_compat_ioctl(struct inode *inode, unsigned int ioctl_num,
- unsigned long ioctl_param);
-static int kni_dev_remove(struct kni_dev *dev);
-
-static int __init kni_parse_kthread_mode(void);
-
-/* KNI processing for single kernel thread mode */
-static int kni_thread_single(void *unused);
-/* KNI processing for multiple kernel thread mode */
-static int kni_thread_multiple(void *param);
-
-static struct file_operations kni_fops = {
- .owner = THIS_MODULE,
- .open = kni_open,
- .release = kni_release,
- .unlocked_ioctl = (void *)kni_ioctl,
- .compat_ioctl = (void *)kni_compat_ioctl,
-};
-
-static struct miscdevice kni_misc = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = KNI_DEVICE,
- .fops = &kni_fops,
-};
-
-/* loopback mode */
-static char *lo_mode = NULL;
-
-/* Kernel thread mode */
-static char *kthread_mode = NULL;
-static unsigned multiple_kthread_on = 0;
-
-#define KNI_DEV_IN_USE_BIT_NUM 0 /* Bit number for device in use */
-
-static volatile unsigned long device_in_use; /* device in use flag */
-static struct task_struct *kni_kthread;
-
-/* kni list lock */
-static DECLARE_RWSEM(kni_list_lock);
-
-/* kni list */
-static struct list_head kni_list_head = LIST_HEAD_INIT(kni_list_head);
-
-static int __init
-kni_init(void)
-{
- KNI_PRINT("######## DPDK kni module loading ########\n");
-
- if (kni_parse_kthread_mode() < 0) {
- KNI_ERR("Invalid parameter for kthread_mode\n");
- return -EINVAL;
- }
-
- if (misc_register(&kni_misc) != 0) {
- KNI_ERR("Misc registration failed\n");
- return -EPERM;
- }
-
- /* Clear the bit of device in use */
- clear_bit(KNI_DEV_IN_USE_BIT_NUM, &device_in_use);
-
- /* Configure the lo mode according to the input parameter */
- kni_net_config_lo_mode(lo_mode);
-
- KNI_PRINT("######## DPDK kni module loaded ########\n");
-
- return 0;
-}
-
-static void __exit
-kni_exit(void)
-{
- misc_deregister(&kni_misc);
- KNI_PRINT("####### DPDK kni module unloaded #######\n");
-}
-
-static int __init
-kni_parse_kthread_mode(void)
-{
- if (!kthread_mode)
- return 0;
-
- if (strcmp(kthread_mode, "single") == 0)
- return 0;
- else if (strcmp(kthread_mode, "multiple") == 0)
- multiple_kthread_on = 1;
- else
- return -1;
-
- return 0;
-}
-
-static int
-kni_open(struct inode *inode, struct file *file)
-{
- /* kni device can be opened by one user only, test and set bit */
- if (test_and_set_bit(KNI_DEV_IN_USE_BIT_NUM, &device_in_use))
- return -EBUSY;
-
- /* Create kernel thread for single mode */
- if (multiple_kthread_on == 0) {
- KNI_PRINT("Single kernel thread for all KNI devices\n");
- /* Create kernel thread for RX */
- kni_kthread = kthread_run(kni_thread_single, NULL,
- "kni_single");
- if (IS_ERR(kni_kthread)) {
- KNI_ERR("Unable to create kernel threaed\n");
- return PTR_ERR(kni_kthread);
- }
- } else
- KNI_PRINT("Multiple kernel thread mode enabled\n");
-
- KNI_PRINT("/dev/kni opened\n");
-
- return 0;
-}
-
-static int
-kni_release(struct inode *inode, struct file *file)
-{
- struct kni_dev *dev, *n;
-
- /* Stop kernel thread for single mode */
- if (multiple_kthread_on == 0) {
- /* Stop kernel thread */
- kthread_stop(kni_kthread);
- kni_kthread = NULL;
- }
-
- down_write(&kni_list_lock);
- list_for_each_entry_safe(dev, n, &kni_list_head, list) {
- /* Stop kernel thread for multiple mode */
- if (multiple_kthread_on && dev->pthread != NULL) {
- kthread_stop(dev->pthread);
- dev->pthread = NULL;
- }
-
-#ifdef RTE_KNI_VHOST
- kni_vhost_backend_release(dev);
-#endif
- kni_dev_remove(dev);
- list_del(&dev->list);
- }
- up_write(&kni_list_lock);
-
- /* Clear the bit of device in use */
- clear_bit(KNI_DEV_IN_USE_BIT_NUM, &device_in_use);
-
- KNI_PRINT("/dev/kni closed\n");
-
- return 0;
-}
-
-static int
-kni_thread_single(void *unused)
-{
- int j;
- struct kni_dev *dev, *n;
-
- while (!kthread_should_stop()) {
- down_read(&kni_list_lock);
- for (j = 0; j < KNI_RX_LOOP_NUM; j++) {
- list_for_each_entry_safe(dev, n,
- &kni_list_head, list) {
-#ifdef RTE_KNI_VHOST
- kni_chk_vhost_rx(dev);
-#else
- kni_net_rx(dev);
-#endif
- kni_net_poll_resp(dev);
- }
- }
- up_read(&kni_list_lock);
- /* reschedule out for a while */
- schedule_timeout_interruptible(usecs_to_jiffies( \
- KNI_KTHREAD_RESCHEDULE_INTERVAL));
- }
-
- return 0;
-}
-
-static int
-kni_thread_multiple(void *param)
-{
- int j;
- struct kni_dev *dev = (struct kni_dev *)param;
-
- while (!kthread_should_stop()) {
- for (j = 0; j < KNI_RX_LOOP_NUM; j++) {
-#ifdef RTE_KNI_VHOST
- kni_chk_vhost_rx(dev);
-#else
- kni_net_rx(dev);
-#endif
- kni_net_poll_resp(dev);
- }
- schedule_timeout_interruptible(usecs_to_jiffies( \
- KNI_KTHREAD_RESCHEDULE_INTERVAL));
- }
-
- return 0;
-}
-
-static int
-kni_dev_remove(struct kni_dev *dev)
-{
- if (!dev)
- return -ENODEV;
-
- switch (dev->device_id) {
- #define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) case (dev):
- #include <rte_pci_dev_ids.h>
- igb_kni_remove(dev->pci_dev);
- break;
- #define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) case (dev):
- #include <rte_pci_dev_ids.h>
- ixgbe_kni_remove(dev->pci_dev);
- break;
- default:
- break;
- }
-
- if (dev->net_dev) {
- unregister_netdev(dev->net_dev);
- free_netdev(dev->net_dev);
- }
-
- return 0;
-}
-
-static int
-kni_check_param(struct kni_dev *kni, struct rte_kni_device_info *dev)
-{
- if (!kni || !dev)
- return -1;
-
- /* Check if network name has been used */
- if (!strncmp(kni->name, dev->name, RTE_KNI_NAMESIZE)) {
- KNI_ERR("KNI name %s duplicated\n", dev->name);
- return -1;
- }
-
- return 0;
-}
-
-static int
-kni_ioctl_create(unsigned int ioctl_num, unsigned long ioctl_param)
-{
- int ret;
- struct rte_kni_device_info dev_info;
- struct pci_dev *pci = NULL;
- struct pci_dev *found_pci = NULL;
- struct net_device *net_dev = NULL;
- struct net_device *lad_dev = NULL;
- struct kni_dev *kni, *dev, *n;
- struct net *net;
-
- printk(KERN_INFO "KNI: Creating kni...\n");
- /* Check the buffer size, to avoid warning */
- if (_IOC_SIZE(ioctl_num) > sizeof(dev_info))
- return -EINVAL;
-
- /* Copy kni info from user space */
- ret = copy_from_user(&dev_info, (void *)ioctl_param, sizeof(dev_info));
- if (ret) {
- KNI_ERR("copy_from_user in kni_ioctl_create");
- return -EIO;
- }
-
- /**
- * Check if the cpu core id is valid for binding,
- * for multiple kernel thread mode.
- */
- if (multiple_kthread_on && dev_info.force_bind &&
- !cpu_online(dev_info.core_id)) {
- KNI_ERR("cpu %u is not online\n", dev_info.core_id);
- return -EINVAL;
- }
-
- /* Check if it has been created */
- down_read(&kni_list_lock);
- list_for_each_entry_safe(dev, n, &kni_list_head, list) {
- if (kni_check_param(dev, &dev_info) < 0) {
- up_read(&kni_list_lock);
- return -EINVAL;
- }
- }
- up_read(&kni_list_lock);
-
- net_dev = alloc_netdev(sizeof(struct kni_dev), dev_info.name,
-#ifdef NET_NAME_UNKNOWN
- NET_NAME_UNKNOWN,
-#endif
- kni_net_init);
- if (net_dev == NULL) {
- KNI_ERR("error allocating device \"%s\"\n", dev_info.name);
- return -EBUSY;
- }
-
- net = get_net_ns_by_pid(current->pid);
- if (IS_ERR(net)) {
- free_netdev(net_dev);
- return PTR_ERR(net);
- }
- dev_net_set(net_dev, net);
- put_net(net);
-
- kni = netdev_priv(net_dev);
-
- kni->net_dev = net_dev;
- kni->group_id = dev_info.group_id;
- kni->core_id = dev_info.core_id;
- strncpy(kni->name, dev_info.name, RTE_KNI_NAMESIZE);
-
- /* Translate user space info into kernel space info */
- kni->tx_q = phys_to_virt(dev_info.tx_phys);
- kni->rx_q = phys_to_virt(dev_info.rx_phys);
- kni->alloc_q = phys_to_virt(dev_info.alloc_phys);
- kni->free_q = phys_to_virt(dev_info.free_phys);
-
- kni->req_q = phys_to_virt(dev_info.req_phys);
- kni->resp_q = phys_to_virt(dev_info.resp_phys);
- kni->sync_va = dev_info.sync_va;
- kni->sync_kva = phys_to_virt(dev_info.sync_phys);
-
- kni->mbuf_kva = phys_to_virt(dev_info.mbuf_phys);
- kni->mbuf_va = dev_info.mbuf_va;
-
-#ifdef RTE_KNI_VHOST
- kni->vhost_queue = NULL;
- kni->vq_status = BE_STOP;
-#endif
- kni->mbuf_size = dev_info.mbuf_size;
-
- KNI_PRINT("tx_phys: 0x%016llx, tx_q addr: 0x%p\n",
- (unsigned long long) dev_info.tx_phys, kni->tx_q);
- KNI_PRINT("rx_phys: 0x%016llx, rx_q addr: 0x%p\n",
- (unsigned long long) dev_info.rx_phys, kni->rx_q);
- KNI_PRINT("alloc_phys: 0x%016llx, alloc_q addr: 0x%p\n",
- (unsigned long long) dev_info.alloc_phys, kni->alloc_q);
- KNI_PRINT("free_phys: 0x%016llx, free_q addr: 0x%p\n",
- (unsigned long long) dev_info.free_phys, kni->free_q);
- KNI_PRINT("req_phys: 0x%016llx, req_q addr: 0x%p\n",
- (unsigned long long) dev_info.req_phys, kni->req_q);
- KNI_PRINT("resp_phys: 0x%016llx, resp_q addr: 0x%p\n",
- (unsigned long long) dev_info.resp_phys, kni->resp_q);
- KNI_PRINT("mbuf_phys: 0x%016llx, mbuf_kva: 0x%p\n",
- (unsigned long long) dev_info.mbuf_phys, kni->mbuf_kva);
- KNI_PRINT("mbuf_va: 0x%p\n", dev_info.mbuf_va);
- KNI_PRINT("mbuf_size: %u\n", kni->mbuf_size);
-
- KNI_DBG("PCI: %02x:%02x.%02x %04x:%04x\n",
- dev_info.bus,
- dev_info.devid,
- dev_info.function,
- dev_info.vendor_id,
- dev_info.device_id);
-
- pci = pci_get_device(dev_info.vendor_id, dev_info.device_id, NULL);
-
- /* Support Ethtool */
- while (pci) {
- KNI_PRINT("pci_bus: %02x:%02x:%02x \n",
- pci->bus->number,
- PCI_SLOT(pci->devfn),
- PCI_FUNC(pci->devfn));
-
- if ((pci->bus->number == dev_info.bus) &&
- (PCI_SLOT(pci->devfn) == dev_info.devid) &&
- (PCI_FUNC(pci->devfn) == dev_info.function)) {
- found_pci = pci;
- switch (dev_info.device_id) {
- #define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) case (dev):
- #include <rte_pci_dev_ids.h>
- ret = igb_kni_probe(found_pci, &lad_dev);
- break;
- #define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) \
- case (dev):
- #include <rte_pci_dev_ids.h>
- ret = ixgbe_kni_probe(found_pci, &lad_dev);
- break;
- default:
- ret = -1;
- break;
- }
-
- KNI_DBG("PCI found: pci=0x%p, lad_dev=0x%p\n",
- pci, lad_dev);
- if (ret == 0) {
- kni->lad_dev = lad_dev;
- kni_set_ethtool_ops(kni->net_dev);
- } else {
- KNI_ERR("Device not supported by ethtool");
- kni->lad_dev = NULL;
- }
-
- kni->pci_dev = found_pci;
- kni->device_id = dev_info.device_id;
- break;
- }
- pci = pci_get_device(dev_info.vendor_id,
- dev_info.device_id, pci);
- }
- if (pci)
- pci_dev_put(pci);
-
- ret = register_netdev(net_dev);
- if (ret) {
- KNI_ERR("error %i registering device \"%s\"\n",
- ret, dev_info.name);
- kni_dev_remove(kni);
- return -ENODEV;
- }
-
-#ifdef RTE_KNI_VHOST
- kni_vhost_init(kni);
-#endif
-
- /**
- * Create a new kernel thread for multiple mode, set its core affinity,
- * and finally wake it up.
- */
- if (multiple_kthread_on) {
- kni->pthread = kthread_create(kni_thread_multiple,
- (void *)kni,
- "kni_%s", kni->name);
- if (IS_ERR(kni->pthread)) {
- kni_dev_remove(kni);
- return -ECANCELED;
- }
- if (dev_info.force_bind)
- kthread_bind(kni->pthread, kni->core_id);
- wake_up_process(kni->pthread);
- }
-
- down_write(&kni_list_lock);
- list_add(&kni->list, &kni_list_head);
- up_write(&kni_list_lock);
-
- return 0;
-}
-
-static int
-kni_ioctl_release(unsigned int ioctl_num, unsigned long ioctl_param)
-{
- int ret = -EINVAL;
- struct kni_dev *dev, *n;
- struct rte_kni_device_info dev_info;
-
- if (_IOC_SIZE(ioctl_num) > sizeof(dev_info))
- return -EINVAL;
-
- ret = copy_from_user(&dev_info, (void *)ioctl_param, sizeof(dev_info));
- if (ret) {
- KNI_ERR("copy_from_user in kni_ioctl_release");
- return -EIO;
- }
-
- /* Release the network device according to its name */
- if (strlen(dev_info.name) == 0)
- return ret;
-
- down_write(&kni_list_lock);
- list_for_each_entry_safe(dev, n, &kni_list_head, list) {
- if (strncmp(dev->name, dev_info.name, RTE_KNI_NAMESIZE) != 0)
- continue;
-
- if (multiple_kthread_on && dev->pthread != NULL) {
- kthread_stop(dev->pthread);
- dev->pthread = NULL;
- }
-
-#ifdef RTE_KNI_VHOST
- kni_vhost_backend_release(dev);
-#endif
- kni_dev_remove(dev);
- list_del(&dev->list);
- ret = 0;
- break;
- }
- up_write(&kni_list_lock);
- printk(KERN_INFO "KNI: %s release kni named %s\n",
- (ret == 0 ? "Successfully" : "Unsuccessfully"), dev_info.name);
-
- return ret;
-}
-
-static int
-kni_ioctl(struct inode *inode,
- unsigned int ioctl_num,
- unsigned long ioctl_param)
-{
- int ret = -EINVAL;
-
- KNI_DBG("IOCTL num=0x%0x param=0x%0lx \n", ioctl_num, ioctl_param);
-
- /*
- * Switch according to the ioctl called
- */
- switch (_IOC_NR(ioctl_num)) {
- case _IOC_NR(RTE_KNI_IOCTL_TEST):
- /* For test only, not used */
- break;
- case _IOC_NR(RTE_KNI_IOCTL_CREATE):
- ret = kni_ioctl_create(ioctl_num, ioctl_param);
- break;
- case _IOC_NR(RTE_KNI_IOCTL_RELEASE):
- ret = kni_ioctl_release(ioctl_num, ioctl_param);
- break;
- default:
- KNI_DBG("IOCTL default \n");
- break;
- }
-
- return ret;
-}
-
-static int
-kni_compat_ioctl(struct inode *inode,
- unsigned int ioctl_num,
- unsigned long ioctl_param)
-{
- /* 32 bits app on 64 bits OS to be supported later */
- KNI_PRINT("Not implemented.\n");
-
- return -EINVAL;
-}
-
-module_init(kni_init);
-module_exit(kni_exit);
-
-module_param(lo_mode, charp, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(lo_mode,
-"KNI loopback mode (default=lo_mode_none):\n"
-" lo_mode_none Kernel loopback disabled\n"
-" lo_mode_fifo Enable kernel loopback with fifo\n"
-" lo_mode_fifo_skb Enable kernel loopback with fifo and skb buffer\n"
-"\n"
-);
-
-module_param(kthread_mode, charp, S_IRUGO);
-MODULE_PARM_DESC(kthread_mode,
-"Kernel thread mode (default=single):\n"
-" single Single kernel thread mode enabled.\n"
-" multiple Multiple kernel thread mode enabled.\n"
-"\n"
-);
-
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_net.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_net.c
deleted file mode 100755
index dd95db5b..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_net.c
+++ /dev/null
@@ -1,687 +0,0 @@
-/*-
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Corporation
- */
-
-/*
- * This code is inspired from the book "Linux Device Drivers" by
- * Alessandro Rubini and Jonathan Corbet, published by O'Reilly & Associates
- */
-
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/version.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h> /* eth_type_trans */
-#include <linux/skbuff.h>
-#include <linux/kthread.h>
-#include <linux/delay.h>
-
-#include <rte_config.h>
-#include <exec-env/rte_kni_common.h>
-#include <kni_fifo.h>
-#include "kni_dev.h"
-
-#define WD_TIMEOUT 5 /*jiffies */
-
-#define MBUF_BURST_SZ 32
-
-#define KNI_WAIT_RESPONSE_TIMEOUT 300 /* 3 seconds */
-
-/* typedef for rx function */
-typedef void (*kni_net_rx_t)(struct kni_dev *kni);
-
-static int kni_net_tx(struct sk_buff *skb, struct net_device *dev);
-static void kni_net_rx_normal(struct kni_dev *kni);
-static void kni_net_rx_lo_fifo(struct kni_dev *kni);
-static void kni_net_rx_lo_fifo_skb(struct kni_dev *kni);
-static int kni_net_process_request(struct kni_dev *kni,
- struct rte_kni_request *req);
-
-/* kni rx function pointer, with default to normal rx */
-static kni_net_rx_t kni_net_rx_func = kni_net_rx_normal;
-
-/*
- * Open and close
- */
-static int
-kni_net_open(struct net_device *dev)
-{
- int ret;
- struct rte_kni_request req;
- struct kni_dev *kni = netdev_priv(dev);
-
- if (kni->lad_dev)
- memcpy(dev->dev_addr, kni->lad_dev->dev_addr, ETH_ALEN);
- else
- /*
- * Generate random mac address. eth_random_addr() is the newer
- * version of generating mac address in linux kernel.
- */
- random_ether_addr(dev->dev_addr);
-
- netif_start_queue(dev);
-
- memset(&req, 0, sizeof(req));
- req.req_id = RTE_KNI_REQ_CFG_NETWORK_IF;
-
- /* Setting if_up to non-zero means up */
- req.if_up = 1;
- ret = kni_net_process_request(kni, &req);
-
- return (ret == 0 ? req.result : ret);
-}
-
-static int
-kni_net_release(struct net_device *dev)
-{
- int ret;
- struct rte_kni_request req;
- struct kni_dev *kni = netdev_priv(dev);
-
- netif_stop_queue(dev); /* can't transmit any more */
-
- memset(&req, 0, sizeof(req));
- req.req_id = RTE_KNI_REQ_CFG_NETWORK_IF;
-
- /* Setting if_up to 0 means down */
- req.if_up = 0;
- ret = kni_net_process_request(kni, &req);
-
- return (ret == 0 ? req.result : ret);
-}
-
-/*
- * Configuration changes (passed on by ifconfig)
- */
-static int
-kni_net_config(struct net_device *dev, struct ifmap *map)
-{
- if (dev->flags & IFF_UP) /* can't act on a running interface */
- return -EBUSY;
-
- /* ignore other fields */
- return 0;
-}
-
-/*
- * RX: normal working mode
- */
-static void
-kni_net_rx_normal(struct kni_dev *kni)
-{
- unsigned ret;
- uint32_t len;
- unsigned i, num, num_rq, num_fq;
- struct rte_kni_mbuf *kva;
- struct rte_kni_mbuf *va[MBUF_BURST_SZ];
- void * data_kva;
-
- struct sk_buff *skb;
- struct net_device *dev = kni->net_dev;
-
- /* Get the number of entries in rx_q */
- num_rq = kni_fifo_count(kni->rx_q);
-
- /* Get the number of free entries in free_q */
- num_fq = kni_fifo_free_count(kni->free_q);
-
- /* Calculate the number of entries to dequeue in rx_q */
- num = min(num_rq, num_fq);
- num = min(num, (unsigned)MBUF_BURST_SZ);
-
- /* Return if no entry in rx_q and no free entry in free_q */
- if (num == 0)
- return;
-
- /* Burst dequeue from rx_q */
- ret = kni_fifo_get(kni->rx_q, (void **)va, num);
- if (ret == 0)
- return; /* Failing should not happen */
-
- /* Transfer received packets to netif */
- for (i = 0; i < num; i++) {
- kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;
- len = kva->data_len;
- data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va
- + kni->mbuf_kva;
-
- skb = dev_alloc_skb(len + 2);
- if (!skb) {
- KNI_ERR("Out of mem, dropping pkts\n");
- /* Update statistics */
- kni->stats.rx_dropped++;
- }
- else {
- /* Align IP on 16B boundary */
- skb_reserve(skb, 2);
- memcpy(skb_put(skb, len), data_kva, len);
- skb->dev = dev;
- skb->protocol = eth_type_trans(skb, dev);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- /* Call netif interface */
- netif_rx(skb);
-
- /* Update statistics */
- kni->stats.rx_bytes += len;
- kni->stats.rx_packets++;
- }
- }
-
- /* Burst enqueue mbufs into free_q */
- ret = kni_fifo_put(kni->free_q, (void **)va, num);
- if (ret != num)
- /* Failing should not happen */
- KNI_ERR("Fail to enqueue entries into free_q\n");
-}
-
-/*
- * RX: loopback with enqueue/dequeue fifos.
- */
-static void
-kni_net_rx_lo_fifo(struct kni_dev *kni)
-{
- unsigned ret;
- uint32_t len;
- unsigned i, num, num_rq, num_tq, num_aq, num_fq;
- struct rte_kni_mbuf *kva;
- struct rte_kni_mbuf *va[MBUF_BURST_SZ];
- void * data_kva;
-
- struct rte_kni_mbuf *alloc_kva;
- struct rte_kni_mbuf *alloc_va[MBUF_BURST_SZ];
- void *alloc_data_kva;
-
- /* Get the number of entries in rx_q */
- num_rq = kni_fifo_count(kni->rx_q);
-
- /* Get the number of free entrie in tx_q */
- num_tq = kni_fifo_free_count(kni->tx_q);
-
- /* Get the number of entries in alloc_q */
- num_aq = kni_fifo_count(kni->alloc_q);
-
- /* Get the number of free entries in free_q */
- num_fq = kni_fifo_free_count(kni->free_q);
-
- /* Calculate the number of entries to be dequeued from rx_q */
- num = min(num_rq, num_tq);
- num = min(num, num_aq);
- num = min(num, num_fq);
- num = min(num, (unsigned)MBUF_BURST_SZ);
-
- /* Return if no entry to dequeue from rx_q */
- if (num == 0)
- return;
-
- /* Burst dequeue from rx_q */
- ret = kni_fifo_get(kni->rx_q, (void **)va, num);
- if (ret == 0)
- return; /* Failing should not happen */
-
- /* Dequeue entries from alloc_q */
- ret = kni_fifo_get(kni->alloc_q, (void **)alloc_va, num);
- if (ret) {
- num = ret;
- /* Copy mbufs */
- for (i = 0; i < num; i++) {
- kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;
- len = kva->pkt_len;
- data_kva = kva->buf_addr + kva->data_off -
- kni->mbuf_va + kni->mbuf_kva;
-
- alloc_kva = (void *)alloc_va[i] - kni->mbuf_va +
- kni->mbuf_kva;
- alloc_data_kva = alloc_kva->buf_addr +
- alloc_kva->data_off - kni->mbuf_va +
- kni->mbuf_kva;
- memcpy(alloc_data_kva, data_kva, len);
- alloc_kva->pkt_len = len;
- alloc_kva->data_len = len;
-
- kni->stats.tx_bytes += len;
- kni->stats.rx_bytes += len;
- }
-
- /* Burst enqueue mbufs into tx_q */
- ret = kni_fifo_put(kni->tx_q, (void **)alloc_va, num);
- if (ret != num)
- /* Failing should not happen */
- KNI_ERR("Fail to enqueue mbufs into tx_q\n");
- }
-
- /* Burst enqueue mbufs into free_q */
- ret = kni_fifo_put(kni->free_q, (void **)va, num);
- if (ret != num)
- /* Failing should not happen */
- KNI_ERR("Fail to enqueue mbufs into free_q\n");
-
- /**
- * Update statistic, and enqueue/dequeue failure is impossible,
- * as all queues are checked at first.
- */
- kni->stats.tx_packets += num;
- kni->stats.rx_packets += num;
-}
-
-/*
- * RX: loopback with enqueue/dequeue fifos and sk buffer copies.
- */
-static void
-kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
-{
- unsigned ret;
- uint32_t len;
- unsigned i, num_rq, num_fq, num;
- struct rte_kni_mbuf *kva;
- struct rte_kni_mbuf *va[MBUF_BURST_SZ];
- void * data_kva;
-
- struct sk_buff *skb;
- struct net_device *dev = kni->net_dev;
-
- /* Get the number of entries in rx_q */
- num_rq = kni_fifo_count(kni->rx_q);
-
- /* Get the number of free entries in free_q */
- num_fq = kni_fifo_free_count(kni->free_q);
-
- /* Calculate the number of entries to dequeue from rx_q */
- num = min(num_rq, num_fq);
- num = min(num, (unsigned)MBUF_BURST_SZ);
-
- /* Return if no entry to dequeue from rx_q */
- if (num == 0)
- return;
-
- /* Burst dequeue mbufs from rx_q */
- ret = kni_fifo_get(kni->rx_q, (void **)va, num);
- if (ret == 0)
- return;
-
- /* Copy mbufs to sk buffer and then call tx interface */
- for (i = 0; i < num; i++) {
- kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;
- len = kva->data_len;
- data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va +
- kni->mbuf_kva;
-
- skb = dev_alloc_skb(len + 2);
- if (skb == NULL)
- KNI_ERR("Out of mem, dropping pkts\n");
- else {
- /* Align IP on 16B boundary */
- skb_reserve(skb, 2);
- memcpy(skb_put(skb, len), data_kva, len);
- skb->dev = dev;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- dev_kfree_skb(skb);
- }
-
- /* Simulate real usage, allocate/copy skb twice */
- skb = dev_alloc_skb(len + 2);
- if (skb == NULL) {
- KNI_ERR("Out of mem, dropping pkts\n");
- kni->stats.rx_dropped++;
- }
- else {
- /* Align IP on 16B boundary */
- skb_reserve(skb, 2);
- memcpy(skb_put(skb, len), data_kva, len);
- skb->dev = dev;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- kni->stats.rx_bytes += len;
- kni->stats.rx_packets++;
-
- /* call tx interface */
- kni_net_tx(skb, dev);
- }
- }
-
- /* enqueue all the mbufs from rx_q into free_q */
- ret = kni_fifo_put(kni->free_q, (void **)&va, num);
- if (ret != num)
- /* Failing should not happen */
- KNI_ERR("Fail to enqueue mbufs into free_q\n");
-}
-
-/* rx interface */
-void
-kni_net_rx(struct kni_dev *kni)
-{
- /**
- * It doesn't need to check if it is NULL pointer,
- * as it has a default value
- */
- (*kni_net_rx_func)(kni);
-}
-
-/*
- * Transmit a packet (called by the kernel)
- */
-#ifdef RTE_KNI_VHOST
-static int
-kni_net_tx(struct sk_buff *skb, struct net_device *dev)
-{
- struct kni_dev *kni = netdev_priv(dev);
-
- dev_kfree_skb(skb);
- kni->stats.tx_dropped++;
-
- return NETDEV_TX_OK;
-}
-#else
-static int
-kni_net_tx(struct sk_buff *skb, struct net_device *dev)
-{
- int len = 0;
- unsigned ret;
- struct kni_dev *kni = netdev_priv(dev);
- struct rte_kni_mbuf *pkt_kva = NULL;
- struct rte_kni_mbuf *pkt_va = NULL;
-
- dev->trans_start = jiffies; /* save the timestamp */
-
- /* Check if the length of skb is less than mbuf size */
- if (skb->len > kni->mbuf_size)
- goto drop;
-
- /**
- * Check if it has at least one free entry in tx_q and
- * one entry in alloc_q.
- */
- if (kni_fifo_free_count(kni->tx_q) == 0 ||
- kni_fifo_count(kni->alloc_q) == 0) {
- /**
- * If no free entry in tx_q or no entry in alloc_q,
- * drops skb and goes out.
- */
- goto drop;
- }
-
- /* dequeue a mbuf from alloc_q */
- ret = kni_fifo_get(kni->alloc_q, (void **)&pkt_va, 1);
- if (likely(ret == 1)) {
- void *data_kva;
-
- pkt_kva = (void *)pkt_va - kni->mbuf_va + kni->mbuf_kva;
- data_kva = pkt_kva->buf_addr + pkt_kva->data_off - kni->mbuf_va
- + kni->mbuf_kva;
-
- len = skb->len;
- memcpy(data_kva, skb->data, len);
- if (unlikely(len < ETH_ZLEN)) {
- memset(data_kva + len, 0, ETH_ZLEN - len);
- len = ETH_ZLEN;
- }
- pkt_kva->pkt_len = len;
- pkt_kva->data_len = len;
-
- /* enqueue mbuf into tx_q */
- ret = kni_fifo_put(kni->tx_q, (void **)&pkt_va, 1);
- if (unlikely(ret != 1)) {
- /* Failing should not happen */
- KNI_ERR("Fail to enqueue mbuf into tx_q\n");
- goto drop;
- }
- } else {
- /* Failing should not happen */
- KNI_ERR("Fail to dequeue mbuf from alloc_q\n");
- goto drop;
- }
-
- /* Free skb and update statistics */
- dev_kfree_skb(skb);
- kni->stats.tx_bytes += len;
- kni->stats.tx_packets++;
-
- return NETDEV_TX_OK;
-
-drop:
- /* Free skb and update statistics */
- dev_kfree_skb(skb);
- kni->stats.tx_dropped++;
-
- return NETDEV_TX_OK;
-}
-#endif
-
-/*
- * Deal with a transmit timeout.
- */
-static void
-kni_net_tx_timeout (struct net_device *dev)
-{
- struct kni_dev *kni = netdev_priv(dev);
-
- KNI_DBG("Transmit timeout at %ld, latency %ld\n", jiffies,
- jiffies - dev->trans_start);
-
- kni->stats.tx_errors++;
- netif_wake_queue(dev);
- return;
-}
-
-/*
- * Ioctl commands
- */
-static int
-kni_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- KNI_DBG("kni_net_ioctl %d\n",
- ((struct kni_dev *)netdev_priv(dev))->group_id);
-
- return 0;
-}
-
-static int
-kni_net_change_mtu(struct net_device *dev, int new_mtu)
-{
- int ret;
- struct rte_kni_request req;
- struct kni_dev *kni = netdev_priv(dev);
-
- KNI_DBG("kni_net_change_mtu new mtu %d to be set\n", new_mtu);
-
- memset(&req, 0, sizeof(req));
- req.req_id = RTE_KNI_REQ_CHANGE_MTU;
- req.new_mtu = new_mtu;
- ret = kni_net_process_request(kni, &req);
- if (ret == 0 && req.result == 0)
- dev->mtu = new_mtu;
-
- return (ret == 0 ? req.result : ret);
-}
-
-/*
- * Checks if the user space application provided the resp message
- */
-void
-kni_net_poll_resp(struct kni_dev *kni)
-{
- if (kni_fifo_count(kni->resp_q))
- wake_up_interruptible(&kni->wq);
-}
-
-/*
- * It can be called to process the request.
- */
-static int
-kni_net_process_request(struct kni_dev *kni, struct rte_kni_request *req)
-{
- int ret = -1;
- void *resp_va;
- unsigned num;
- int ret_val;
-
- if (!kni || !req) {
- KNI_ERR("No kni instance or request\n");
- return -EINVAL;
- }
-
- mutex_lock(&kni->sync_lock);
-
- /* Construct data */
- memcpy(kni->sync_kva, req, sizeof(struct rte_kni_request));
- num = kni_fifo_put(kni->req_q, &kni->sync_va, 1);
- if (num < 1) {
- KNI_ERR("Cannot send to req_q\n");
- ret = -EBUSY;
- goto fail;
- }
-
- ret_val = wait_event_interruptible_timeout(kni->wq,
- kni_fifo_count(kni->resp_q), 3 * HZ);
- if (signal_pending(current) || ret_val <= 0) {
- ret = -ETIME;
- goto fail;
- }
- num = kni_fifo_get(kni->resp_q, (void **)&resp_va, 1);
- if (num != 1 || resp_va != kni->sync_va) {
- /* This should never happen */
- KNI_ERR("No data in resp_q\n");
- ret = -ENODATA;
- goto fail;
- }
-
- memcpy(req, kni->sync_kva, sizeof(struct rte_kni_request));
- ret = 0;
-
-fail:
- mutex_unlock(&kni->sync_lock);
- return ret;
-}
-
-/*
- * Return statistics to the caller
- */
-static struct net_device_stats *
-kni_net_stats(struct net_device *dev)
-{
- struct kni_dev *kni = netdev_priv(dev);
- return &kni->stats;
-}
-
-/*
- * Fill the eth header
- */
-static int
-kni_net_header(struct sk_buff *skb, struct net_device *dev,
- unsigned short type, const void *daddr,
- const void *saddr, unsigned int len)
-{
- struct ethhdr *eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
-
- memcpy(eth->h_source, saddr ? saddr : dev->dev_addr, dev->addr_len);
- memcpy(eth->h_dest, daddr ? daddr : dev->dev_addr, dev->addr_len);
- eth->h_proto = htons(type);
-
- return (dev->hard_header_len);
-}
-
-
-/*
- * Re-fill the eth header
- */
-static int
-kni_net_rebuild_header(struct sk_buff *skb)
-{
- struct net_device *dev = skb->dev;
- struct ethhdr *eth = (struct ethhdr *) skb->data;
-
- memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
- memcpy(eth->h_dest, dev->dev_addr, dev->addr_len);
-
- return 0;
-}
-
-/**
- * kni_net_set_mac - Change the Ethernet Address of the KNI NIC
- * @netdev: network interface device structure
- * @p: pointer to an address structure
- *
- * Returns 0 on success, negative on failure
- **/
-static int kni_net_set_mac(struct net_device *netdev, void *p)
-{
- struct sockaddr *addr = p;
- if (!is_valid_ether_addr((unsigned char *)(addr->sa_data)))
- return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- return 0;
-}
-
-static const struct header_ops kni_net_header_ops = {
- .create = kni_net_header,
- .rebuild = kni_net_rebuild_header,
- .cache = NULL, /* disable caching */
-};
-
-static const struct net_device_ops kni_net_netdev_ops = {
- .ndo_open = kni_net_open,
- .ndo_stop = kni_net_release,
- .ndo_set_config = kni_net_config,
- .ndo_start_xmit = kni_net_tx,
- .ndo_change_mtu = kni_net_change_mtu,
- .ndo_do_ioctl = kni_net_ioctl,
- .ndo_get_stats = kni_net_stats,
- .ndo_tx_timeout = kni_net_tx_timeout,
- .ndo_set_mac_address = kni_net_set_mac,
-};
-
-void
-kni_net_init(struct net_device *dev)
-{
- struct kni_dev *kni = netdev_priv(dev);
-
- KNI_DBG("kni_net_init\n");
-
- init_waitqueue_head(&kni->wq);
- mutex_init(&kni->sync_lock);
-
- ether_setup(dev); /* assign some of the fields */
- dev->netdev_ops = &kni_net_netdev_ops;
- dev->header_ops = &kni_net_header_ops;
- dev->watchdog_timeo = WD_TIMEOUT;
-}
-
-void
-kni_net_config_lo_mode(char *lo_str)
-{
- if (!lo_str) {
- KNI_PRINT("loopback disabled");
- return;
- }
-
- if (!strcmp(lo_str, "lo_mode_none"))
- KNI_PRINT("loopback disabled");
- else if (!strcmp(lo_str, "lo_mode_fifo")) {
- KNI_PRINT("loopback mode=lo_mode_fifo enabled");
- kni_net_rx_func = kni_net_rx_lo_fifo;
- } else if (!strcmp(lo_str, "lo_mode_fifo_skb")) {
- KNI_PRINT("loopback mode=lo_mode_fifo_skb enabled");
- kni_net_rx_func = kni_net_rx_lo_fifo_skb;
- } else
- KNI_PRINT("Incognizant parameter, loopback disabled");
-}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_vhost.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_vhost.c
deleted file mode 100755
index 7141f833..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_vhost.c
+++ /dev/null
@@ -1,811 +0,0 @@
-/*-
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Corporation
- */
-
-#include <linux/module.h>
-#include <linux/net.h>
-#include <net/sock.h>
-#include <linux/virtio_net.h>
-#include <linux/wait.h>
-#include <linux/mm.h>
-#include <linux/nsproxy.h>
-#include <linux/sched.h>
-#include <linux/if_tun.h>
-#include <linux/version.h>
-
-#include "compat.h"
-#include "kni_dev.h"
-#include "kni_fifo.h"
-
-#define RX_BURST_SZ 4
-
-extern void put_unused_fd(unsigned int fd);
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
-extern struct file*
-sock_alloc_file(struct socket *sock,
- int flags, const char *dname);
-
-extern int get_unused_fd_flags(unsigned flags);
-
-extern void fd_install(unsigned int fd, struct file *file);
-
-static int kni_sock_map_fd(struct socket *sock)
-{
- struct file *file;
- int fd = get_unused_fd_flags(0);
- if (fd < 0)
- return fd;
-
- file = sock_alloc_file(sock, 0, NULL);
- if (IS_ERR(file)) {
- put_unused_fd(fd);
- return PTR_ERR(file);
- }
- fd_install(fd, file);
- return fd;
-}
-#else
-#define kni_sock_map_fd(s) sock_map_fd(s, 0)
-#endif
-
-static struct proto kni_raw_proto = {
- .name = "kni_vhost",
- .owner = THIS_MODULE,
- .obj_size = sizeof(struct kni_vhost_queue),
-};
-
-static inline int
-kni_vhost_net_tx(struct kni_dev *kni, struct iovec *iov,
- unsigned offset, unsigned len)
-{
- struct rte_kni_mbuf *pkt_kva = NULL;
- struct rte_kni_mbuf *pkt_va = NULL;
- int ret;
-
- KNI_DBG_TX("tx offset=%d, len=%d, iovlen=%d\n",
- offset, len, (int)iov->iov_len);
-
- /**
- * Check if it has at least one free entry in tx_q and
- * one entry in alloc_q.
- */
- if (kni_fifo_free_count(kni->tx_q) == 0 ||
- kni_fifo_count(kni->alloc_q) == 0) {
- /**
- * If no free entry in tx_q or no entry in alloc_q,
- * drops skb and goes out.
- */
- goto drop;
- }
-
- /* dequeue a mbuf from alloc_q */
- ret = kni_fifo_get(kni->alloc_q, (void **)&pkt_va, 1);
- if (likely(ret == 1)) {
- void *data_kva;
-
- pkt_kva = (void *)pkt_va - kni->mbuf_va + kni->mbuf_kva;
- data_kva = pkt_kva->buf_addr + pkt_kva->data_off
- - kni->mbuf_va + kni->mbuf_kva;
-
- memcpy_fromiovecend(data_kva, iov, offset, len);
- if (unlikely(len < ETH_ZLEN)) {
- memset(data_kva + len, 0, ETH_ZLEN - len);
- len = ETH_ZLEN;
- }
- pkt_kva->pkt_len = len;
- pkt_kva->data_len = len;
-
- /* enqueue mbuf into tx_q */
- ret = kni_fifo_put(kni->tx_q, (void **)&pkt_va, 1);
- if (unlikely(ret != 1)) {
- /* Failing should not happen */
- KNI_ERR("Fail to enqueue mbuf into tx_q\n");
- goto drop;
- }
- } else {
- /* Failing should not happen */
- KNI_ERR("Fail to dequeue mbuf from alloc_q\n");
- goto drop;
- }
-
- /* update statistics */
- kni->stats.tx_bytes += len;
- kni->stats.tx_packets++;
-
- return 0;
-
-drop:
- /* update statistics */
- kni->stats.tx_dropped++;
-
- return 0;
-}
-
-static inline int
-kni_vhost_net_rx(struct kni_dev *kni, struct iovec *iov,
- unsigned offset, unsigned len)
-{
- uint32_t pkt_len;
- struct rte_kni_mbuf *kva;
- struct rte_kni_mbuf *va;
- void * data_kva;
- struct sk_buff *skb;
- struct kni_vhost_queue *q = kni->vhost_queue;
-
- if (unlikely(q == NULL))
- return 0;
-
- /* ensure at least one entry in free_q */
- if (unlikely(kni_fifo_free_count(kni->free_q) == 0))
- return 0;
-
- skb = skb_dequeue(&q->sk.sk_receive_queue);
- if (unlikely(skb == NULL))
- return 0;
-
- kva = (struct rte_kni_mbuf*)skb->data;
-
- /* free skb to cache */
- skb->data = NULL;
- if (unlikely(1 != kni_fifo_put(q->fifo, (void **)&skb, 1)))
- /* Failing should not happen */
- KNI_ERR("Fail to enqueue entries into rx cache fifo\n");
-
- pkt_len = kva->data_len;
- if (unlikely(pkt_len > len))
- goto drop;
-
- KNI_DBG_RX("rx offset=%d, len=%d, pkt_len=%d, iovlen=%d\n",
- offset, len, pkt_len, (int)iov->iov_len);
-
- data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va + kni->mbuf_kva;
- if (unlikely(memcpy_toiovecend(iov, data_kva, offset, pkt_len)))
- goto drop;
-
- /* Update statistics */
- kni->stats.rx_bytes += pkt_len;
- kni->stats.rx_packets++;
-
- /* enqueue mbufs into free_q */
- va = (void*)kva - kni->mbuf_kva + kni->mbuf_va;
- if (unlikely(1 != kni_fifo_put(kni->free_q, (void **)&va, 1)))
- /* Failing should not happen */
- KNI_ERR("Fail to enqueue entries into free_q\n");
-
- KNI_DBG_RX("receive done %d\n", pkt_len);
-
- return pkt_len;
-
-drop:
- /* Update drop statistics */
- kni->stats.rx_dropped++;
-
- return 0;
-}
-
-static unsigned int
-kni_sock_poll(struct file *file, struct socket *sock, poll_table * wait)
-{
- struct kni_vhost_queue *q =
- container_of(sock->sk, struct kni_vhost_queue, sk);
- struct kni_dev *kni;
- unsigned int mask = 0;
-
- if (unlikely(q == NULL || q->kni == NULL))
- return POLLERR;
-
- kni = q->kni;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
- KNI_DBG("start kni_poll on group %d, wq 0x%16llx\n",
- kni->group_id, (uint64_t)sock->wq);
-#else
- KNI_DBG("start kni_poll on group %d, wait at 0x%16llx\n",
- kni->group_id, (uint64_t)&sock->wait);
-#endif
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
- poll_wait(file, &sock->wq->wait, wait);
-#else
- poll_wait(file, &sock->wait, wait);
-#endif
-
- if (kni_fifo_count(kni->rx_q) > 0)
- mask |= POLLIN | POLLRDNORM;
-
- if (sock_writeable(&q->sk) ||
- (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock->flags) &&
- sock_writeable(&q->sk)))
- mask |= POLLOUT | POLLWRNORM;
-
- return mask;
-}
-
-static inline void
-kni_vhost_enqueue(struct kni_dev *kni, struct kni_vhost_queue *q,
- struct sk_buff *skb, struct rte_kni_mbuf *va)
-{
- struct rte_kni_mbuf *kva;
-
- kva = (void *)(va) - kni->mbuf_va + kni->mbuf_kva;
- (skb)->data = (unsigned char*)kva;
- (skb)->len = kva->data_len;
- skb_queue_tail(&q->sk.sk_receive_queue, skb);
-}
-
-static inline void
-kni_vhost_enqueue_burst(struct kni_dev *kni, struct kni_vhost_queue *q,
- struct sk_buff **skb, struct rte_kni_mbuf **va)
-{
- int i;
- for (i = 0; i < RX_BURST_SZ; skb++, va++, i++)
- kni_vhost_enqueue(kni, q, *skb, *va);
-}
-
-int
-kni_chk_vhost_rx(struct kni_dev *kni)
-{
- struct kni_vhost_queue *q = kni->vhost_queue;
- unsigned nb_in, nb_mbuf, nb_skb;
- const unsigned BURST_MASK = RX_BURST_SZ - 1;
- unsigned nb_burst, nb_backlog, i;
- struct sk_buff *skb[RX_BURST_SZ];
- struct rte_kni_mbuf *va[RX_BURST_SZ];
-
- if (unlikely(BE_STOP & kni->vq_status)) {
- kni->vq_status |= BE_FINISH;
- return 0;
- }
-
- if (unlikely(q == NULL))
- return 0;
-
- nb_skb = kni_fifo_count(q->fifo);
- nb_mbuf = kni_fifo_count(kni->rx_q);
-
- nb_in = min(nb_mbuf, nb_skb);
- nb_in = min(nb_in, (unsigned)RX_BURST_SZ);
- nb_burst = (nb_in & ~BURST_MASK);
- nb_backlog = (nb_in & BURST_MASK);
-
- /* enqueue skb_queue per BURST_SIZE bulk */
- if (0 != nb_burst) {
- if (unlikely(RX_BURST_SZ != kni_fifo_get(
- kni->rx_q, (void **)&va,
- RX_BURST_SZ)))
- goto except;
-
- if (unlikely(RX_BURST_SZ != kni_fifo_get(
- q->fifo, (void **)&skb,
- RX_BURST_SZ)))
- goto except;
-
- kni_vhost_enqueue_burst(kni, q, skb, va);
- }
-
- /* all leftover, do one by one */
- for (i = 0; i < nb_backlog; ++i) {
- if (unlikely(1 != kni_fifo_get(
- kni->rx_q,(void **)&va, 1)))
- goto except;
-
- if (unlikely(1 != kni_fifo_get(
- q->fifo, (void **)&skb, 1)))
- goto except;
-
- kni_vhost_enqueue(kni, q, *skb, *va);
- }
-
- /* Ondemand wake up */
- if ((nb_in == RX_BURST_SZ) || (nb_skb == 0) ||
- ((nb_mbuf < RX_BURST_SZ) && (nb_mbuf != 0))) {
- wake_up_interruptible_poll(sk_sleep(&q->sk),
- POLLIN | POLLRDNORM | POLLRDBAND);
- KNI_DBG_RX("RX CHK KICK nb_mbuf %d, nb_skb %d, nb_in %d\n",
- nb_mbuf, nb_skb, nb_in);
- }
-
- return 0;
-
-except:
- /* Failing should not happen */
- KNI_ERR("Fail to enqueue fifo, it shouldn't happen \n");
- BUG_ON(1);
-
- return 0;
-}
-
-static int
-kni_sock_sndmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t total_len)
-{
- struct kni_vhost_queue *q =
- container_of(sock->sk, struct kni_vhost_queue, sk);
- int vnet_hdr_len = 0;
- unsigned long len = total_len;
-
- if (unlikely(q == NULL || q->kni == NULL))
- return 0;
-
- KNI_DBG_TX("kni_sndmsg len %ld, flags 0x%08x, nb_iov %d\n",
- len, q->flags, (int)m->msg_iovlen);
-
-#ifdef RTE_KNI_VHOST_VNET_HDR_EN
- if (likely(q->flags & IFF_VNET_HDR)) {
- vnet_hdr_len = q->vnet_hdr_sz;
- if (unlikely(len < vnet_hdr_len))
- return -EINVAL;
- len -= vnet_hdr_len;
- }
-#endif
-
- if (unlikely(len < ETH_HLEN + q->vnet_hdr_sz))
- return -EINVAL;
-
- return kni_vhost_net_tx(q->kni, m->msg_iov, vnet_hdr_len, len);
-}
-
-static int
-kni_sock_rcvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t len, int flags)
-{
- int vnet_hdr_len = 0;
- int pkt_len = 0;
- struct kni_vhost_queue *q =
- container_of(sock->sk, struct kni_vhost_queue, sk);
- static struct virtio_net_hdr
- __attribute__ ((unused)) vnet_hdr = {
- .flags = 0,
- .gso_type = VIRTIO_NET_HDR_GSO_NONE
- };
-
- if (unlikely(q == NULL || q->kni == NULL))
- return 0;
-
-#ifdef RTE_KNI_VHOST_VNET_HDR_EN
- if (likely(q->flags & IFF_VNET_HDR)) {
- vnet_hdr_len = q->vnet_hdr_sz;
- if ((len -= vnet_hdr_len) < 0)
- return -EINVAL;
- }
-#endif
-
- if (unlikely(0 == (pkt_len = kni_vhost_net_rx(q->kni,
- m->msg_iov, vnet_hdr_len, len))))
- return 0;
-
-#ifdef RTE_KNI_VHOST_VNET_HDR_EN
- /* no need to copy hdr when no pkt received */
- if (unlikely(memcpy_toiovecend(m->msg_iov,
- (void *)&vnet_hdr, 0, vnet_hdr_len)))
- return -EFAULT;
-#endif
- KNI_DBG_RX("kni_rcvmsg expect_len %ld, flags 0x%08x, pkt_len %d\n",
- (unsigned long)len, q->flags, pkt_len);
-
- return (pkt_len + vnet_hdr_len);
-}
-
-/* dummy tap like ioctl */
-static int
-kni_sock_ioctl(struct socket *sock, unsigned int cmd,
- unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- struct ifreq __user *ifr = argp;
- unsigned int __user *up = argp;
- struct kni_vhost_queue *q =
- container_of(sock->sk, struct kni_vhost_queue, sk);
- struct kni_dev *kni;
- unsigned int u;
- int __user *sp = argp;
- int s;
- int ret;
-
- KNI_DBG("tap ioctl cmd 0x%08x\n", cmd);
-
- switch (cmd) {
- case TUNSETIFF:
- KNI_DBG("TUNSETIFF\n");
- /* ignore the name, just look at flags */
- if (get_user(u, &ifr->ifr_flags))
- return -EFAULT;
-
- ret = 0;
- if ((u & ~IFF_VNET_HDR) != (IFF_NO_PI | IFF_TAP))
- ret = -EINVAL;
- else
- q->flags = u;
-
- return ret;
-
- case TUNGETIFF:
- KNI_DBG("TUNGETIFF\n");
- rcu_read_lock_bh();
- kni = rcu_dereference_bh(q->kni);
- if (kni)
- dev_hold(kni->net_dev);
- rcu_read_unlock_bh();
-
- if (!kni)
- return -ENOLINK;
-
- ret = 0;
- if (copy_to_user(&ifr->ifr_name, kni->net_dev->name, IFNAMSIZ) ||
- put_user(q->flags, &ifr->ifr_flags))
- ret = -EFAULT;
- dev_put(kni->net_dev);
- return ret;
-
- case TUNGETFEATURES:
- KNI_DBG("TUNGETFEATURES\n");
- u = IFF_TAP | IFF_NO_PI;
-#ifdef RTE_KNI_VHOST_VNET_HDR_EN
- u |= IFF_VNET_HDR;
-#endif
- if (put_user(u, up))
- return -EFAULT;
- return 0;
-
- case TUNSETSNDBUF:
- KNI_DBG("TUNSETSNDBUF\n");
- if (get_user(u, up))
- return -EFAULT;
-
- q->sk.sk_sndbuf = u;
- return 0;
-
- case TUNGETVNETHDRSZ:
- s = q->vnet_hdr_sz;
- if (put_user(s, sp))
- return -EFAULT;
- KNI_DBG("TUNGETVNETHDRSZ %d\n", s);
- return 0;
-
- case TUNSETVNETHDRSZ:
- if (get_user(s, sp))
- return -EFAULT;
- if (s < (int)sizeof(struct virtio_net_hdr))
- return -EINVAL;
-
- KNI_DBG("TUNSETVNETHDRSZ %d\n", s);
- q->vnet_hdr_sz = s;
- return 0;
-
- case TUNSETOFFLOAD:
- KNI_DBG("TUNSETOFFLOAD %lx\n", arg);
-#ifdef RTE_KNI_VHOST_VNET_HDR_EN
- /* not support any offload yet */
- if (!(q->flags & IFF_VNET_HDR))
- return -EINVAL;
-
- return 0;
-#else
- return -EINVAL;
-#endif
-
- default:
- KNI_DBG("NOT SUPPORT\n");
- return -EINVAL;
- }
-}
-
-static int
-kni_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
- unsigned long arg)
-{
- /* 32 bits app on 64 bits OS to be supported later */
- KNI_PRINT("Not implemented.\n");
-
- return -EINVAL;
-}
-
-#define KNI_VHOST_WAIT_WQ_SAFE() \
-do { \
- while ((BE_FINISH | BE_STOP) == kni->vq_status) \
- msleep(1); \
-}while(0) \
-
-
-static int
-kni_sock_release(struct socket *sock)
-{
- struct kni_vhost_queue *q =
- container_of(sock->sk, struct kni_vhost_queue, sk);
- struct kni_dev *kni;
-
- if (q == NULL)
- return 0;
-
- if (NULL != (kni = q->kni)) {
- kni->vq_status = BE_STOP;
- KNI_VHOST_WAIT_WQ_SAFE();
- kni->vhost_queue = NULL;
- q->kni = NULL;
- }
-
- if (q->sockfd != -1)
- q->sockfd = -1;
-
- sk_set_socket(&q->sk, NULL);
- sock->sk = NULL;
-
- sock_put(&q->sk);
-
- KNI_DBG("dummy sock release done\n");
-
- return 0;
-}
-
-int
-kni_sock_getname (struct socket *sock,
- struct sockaddr *addr,
- int *sockaddr_len, int peer)
-{
- KNI_DBG("dummy sock getname\n");
- ((struct sockaddr_ll*)addr)->sll_family = AF_PACKET;
- return 0;
-}
-
-static const struct proto_ops kni_socket_ops = {
- .getname = kni_sock_getname,
- .sendmsg = kni_sock_sndmsg,
- .recvmsg = kni_sock_rcvmsg,
- .release = kni_sock_release,
- .poll = kni_sock_poll,
- .ioctl = kni_sock_ioctl,
- .compat_ioctl = kni_sock_compat_ioctl,
-};
-
-static void
-kni_sk_write_space(struct sock *sk)
-{
- wait_queue_head_t *wqueue;
-
- if (!sock_writeable(sk) ||
- !test_and_clear_bit(SOCK_ASYNC_NOSPACE,
- &sk->sk_socket->flags))
- return;
- wqueue = sk_sleep(sk);
- if (wqueue && waitqueue_active(wqueue))
- wake_up_interruptible_poll(
- wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
-}
-
-static void
-kni_sk_destruct(struct sock *sk)
-{
- struct kni_vhost_queue *q =
- container_of(sk, struct kni_vhost_queue, sk);
-
- if (!q)
- return;
-
- /* make sure there's no packet in buffer */
- while (skb_dequeue(&sk->sk_receive_queue) != NULL)
- ;
-
- mb();
-
- if (q->fifo != NULL) {
- kfree(q->fifo);
- q->fifo = NULL;
- }
-
- if (q->cache != NULL) {
- kfree(q->cache);
- q->cache = NULL;
- }
-}
-
-static int
-kni_vhost_backend_init(struct kni_dev *kni)
-{
- struct kni_vhost_queue *q;
- struct net *net = current->nsproxy->net_ns;
- int err, i, sockfd;
- struct rte_kni_fifo *fifo;
- struct sk_buff *elem;
-
- if (kni->vhost_queue != NULL)
- return -1;
-
- if (!(q = (struct kni_vhost_queue *)sk_alloc(
- net, AF_UNSPEC, GFP_KERNEL, &kni_raw_proto)))
- return -ENOMEM;
-
- err = sock_create_lite(AF_UNSPEC, SOCK_RAW, IPPROTO_RAW, &q->sock);
- if (err)
- goto free_sk;
-
- sockfd = kni_sock_map_fd(q->sock);
- if (sockfd < 0) {
- err = sockfd;
- goto free_sock;
- }
-
- /* cache init */
- q->cache = (struct sk_buff*)
- kzalloc(RTE_KNI_VHOST_MAX_CACHE_SIZE * sizeof(struct sk_buff),
- GFP_KERNEL);
- if (!q->cache)
- goto free_fd;
-
- fifo = (struct rte_kni_fifo*)
- kzalloc(RTE_KNI_VHOST_MAX_CACHE_SIZE * sizeof(void *)
- + sizeof(struct rte_kni_fifo), GFP_KERNEL);
- if (!fifo)
- goto free_cache;
-
- kni_fifo_init(fifo, RTE_KNI_VHOST_MAX_CACHE_SIZE);
-
- for (i = 0; i < RTE_KNI_VHOST_MAX_CACHE_SIZE; i++) {
- elem = &q->cache[i];
- kni_fifo_put(fifo, (void**)&elem, 1);
- }
- q->fifo = fifo;
-
- /* store sockfd in vhost_queue */
- q->sockfd = sockfd;
-
- /* init socket */
- q->sock->type = SOCK_RAW;
- q->sock->state = SS_CONNECTED;
- q->sock->ops = &kni_socket_ops;
- sock_init_data(q->sock, &q->sk);
-
- /* init sock data */
- q->sk.sk_write_space = kni_sk_write_space;
- q->sk.sk_destruct = kni_sk_destruct;
- q->flags = IFF_NO_PI | IFF_TAP;
- q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
-#ifdef RTE_KNI_VHOST_VNET_HDR_EN
- q->flags |= IFF_VNET_HDR;
-#endif
-
- /* bind kni_dev with vhost_queue */
- q->kni = kni;
- kni->vhost_queue = q;
-
- wmb();
-
- kni->vq_status = BE_START;
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
- KNI_DBG("backend init sockfd=%d, sock->wq=0x%16llx,"
- "sk->sk_wq=0x%16llx",
- q->sockfd, (uint64_t)q->sock->wq,
- (uint64_t)q->sk.sk_wq);
-#else
- KNI_DBG("backend init sockfd=%d, sock->wait at 0x%16llx,"
- "sk->sk_sleep=0x%16llx",
- q->sockfd, (uint64_t)&q->sock->wait,
- (uint64_t)q->sk.sk_sleep);
-#endif
-
- return 0;
-
-free_cache:
- kfree(q->cache);
- q->cache = NULL;
-
-free_fd:
- put_unused_fd(sockfd);
-
-free_sock:
- q->kni = NULL;
- kni->vhost_queue = NULL;
- kni->vq_status |= BE_FINISH;
- sock_release(q->sock);
- q->sock->ops = NULL;
- q->sock = NULL;
-
-free_sk:
- sk_free((struct sock*)q);
-
- return err;
-}
-
-/* kni vhost sock sysfs */
-static ssize_t
-show_sock_fd(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct net_device *net_dev = container_of(dev, struct net_device, dev);
- struct kni_dev *kni = netdev_priv(net_dev);
- int sockfd = -1;
- if (kni->vhost_queue != NULL)
- sockfd = kni->vhost_queue->sockfd;
- return snprintf(buf, 10, "%d\n", sockfd);
-}
-
-static ssize_t
-show_sock_en(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct net_device *net_dev = container_of(dev, struct net_device, dev);
- struct kni_dev *kni = netdev_priv(net_dev);
- return snprintf(buf, 10, "%u\n", (kni->vhost_queue == NULL ? 0 : 1));
-}
-
-static ssize_t
-set_sock_en(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct net_device *net_dev = container_of(dev, struct net_device, dev);
- struct kni_dev *kni = netdev_priv(net_dev);
- unsigned long en;
- int err = 0;
-
- if (0 != kstrtoul(buf, 0, &en))
- return -EINVAL;
-
- if (en)
- err = kni_vhost_backend_init(kni);
-
- return err ? err : count;
-}
-
-static DEVICE_ATTR(sock_fd, S_IRUGO | S_IRUSR, show_sock_fd, NULL);
-static DEVICE_ATTR(sock_en, S_IRUGO | S_IWUSR, show_sock_en, set_sock_en);
-static struct attribute *dev_attrs[] = {
- &dev_attr_sock_fd.attr,
- &dev_attr_sock_en.attr,
- NULL,
-};
-
-static const struct attribute_group dev_attr_grp = {
- .attrs = dev_attrs,
-};
-
-int
-kni_vhost_backend_release(struct kni_dev *kni)
-{
- struct kni_vhost_queue *q = kni->vhost_queue;
-
- if (q == NULL)
- return 0;
-
- /* dettach from kni */
- q->kni = NULL;
-
- KNI_DBG("release backend done\n");
-
- return 0;
-}
-
-int
-kni_vhost_init(struct kni_dev *kni)
-{
- struct net_device *dev = kni->net_dev;
-
- if (sysfs_create_group(&dev->dev.kobj, &dev_attr_grp))
- sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
-
- kni->vq_status = BE_STOP;
-
- KNI_DBG("kni_vhost_init done\n");
-
- return 0;
-}
-
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/xen_dom0/Makefile b/src/dpdk_lib18/librte_eal/linuxapp/xen_dom0/Makefile
deleted file mode 100755
index 9d22fb97..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/xen_dom0/Makefile
+++ /dev/null
@@ -1,56 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-#
-# module name and path
-#
-MODULE = rte_dom0_mm
-
-#
-# CFLAGS
-#
-MODULE_CFLAGS += -I$(SRCDIR) --param max-inline-insns-single=50
-MODULE_CFLAGS += -I$(RTE_OUTPUT)/include
-MODULE_CFLAGS += -include $(RTE_OUTPUT)/include/rte_config.h
-MODULE_CFLAGS += -Wall -Werror
-
-# this lib needs main eal
-DEPDIRS-y += lib/librte_eal/linuxapp/eal
-
-#
-# all source are stored in SRCS-y
-#
-
-SRCS-y += dom0_mm_misc.c
-
-include $(RTE_SDK)/mk/rte.module.mk
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/xen_dom0/dom0_mm_misc.c b/src/dpdk_lib18/librte_eal/linuxapp/xen_dom0/dom0_mm_misc.c
deleted file mode 100755
index 543bf574..00000000
--- a/src/dpdk_lib18/librte_eal/linuxapp/xen_dom0/dom0_mm_misc.c
+++ /dev/null
@@ -1,781 +0,0 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Corporation
- *
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <linux/module.h>
-#include <linux/miscdevice.h>
-#include <linux/fs.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/vmalloc.h>
-#include <linux/mm.h>
-#include <linux/version.h>
-
-#include <xen/xen.h>
-#include <xen/page.h>
-#include <xen/xen-ops.h>
-#include <xen/interface/memory.h>
-
-#include <rte_config.h>
-#include <exec-env/rte_dom0_common.h>
-
-#include "compat.h"
-#include "dom0_mm_dev.h"
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Kernel Module for supporting DPDK running on Xen Dom0");
-
-static struct dom0_mm_dev dom0_dev;
-static struct kobject *dom0_kobj = NULL;
-
-static struct memblock_info *rsv_mm_info;
-
-/* Default configuration for reserved memory size(2048 MB). */
-static uint32_t rsv_memsize = 2048;
-
-static int dom0_open(struct inode *inode, struct file *file);
-static int dom0_release(struct inode *inode, struct file *file);
-static int dom0_ioctl(struct file *file, unsigned int ioctl_num,
- unsigned long ioctl_param);
-static int dom0_mmap(struct file *file, struct vm_area_struct *vma);
-static int dom0_memory_free(uint32_t size);
-static int dom0_memory_release(struct dom0_mm_data *mm_data);
-
-static const struct file_operations data_fops = {
- .owner = THIS_MODULE,
- .open = dom0_open,
- .release = dom0_release,
- .mmap = dom0_mmap,
- .unlocked_ioctl = (void *)dom0_ioctl,
-};
-
-static ssize_t
-show_memsize_rsvd(struct device *dev, struct device_attribute *attr, char *buf)
-{
- return snprintf(buf, 10, "%u\n", dom0_dev.used_memsize);
-}
-
-static ssize_t
-show_memsize(struct device *dev, struct device_attribute *attr, char *buf)
-{
- return snprintf(buf, 10, "%u\n", dom0_dev.config_memsize);
-}
-
-static ssize_t
-store_memsize(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- int err = 0;
- unsigned long mem_size;
-
- if (0 != kstrtoul(buf, 0, &mem_size))
- return -EINVAL;
-
- mutex_lock(&dom0_dev.data_lock);
- if (0 == mem_size) {
- err = -EINVAL;
- goto fail;
- } else if (mem_size > (rsv_memsize - dom0_dev.used_memsize)) {
- XEN_ERR("configure memory size fail\n");
- err = -EINVAL;
- goto fail;
- } else
- dom0_dev.config_memsize = mem_size;
-
-fail:
- mutex_unlock(&dom0_dev.data_lock);
- return err ? err : count;
-}
-
-static DEVICE_ATTR(memsize, S_IRUGO | S_IWUSR, show_memsize, store_memsize);
-static DEVICE_ATTR(memsize_rsvd, S_IRUGO, show_memsize_rsvd, NULL);
-
-static struct attribute *dev_attrs[] = {
- &dev_attr_memsize.attr,
- &dev_attr_memsize_rsvd.attr,
- NULL,
-};
-
-/* the memory size unit is MB */
-static const struct attribute_group dev_attr_grp = {
- .name = "memsize-mB",
- .attrs = dev_attrs,
-};
-
-
-static void
-sort_viraddr(struct memblock_info *mb, int cnt)
-{
- int i,j;
- uint64_t tmp_pfn;
- uint64_t tmp_viraddr;
-
- /*sort virtual address and pfn */
- for(i = 0; i < cnt; i ++) {
- for(j = cnt - 1; j > i; j--) {
- if(mb[j].pfn < mb[j - 1].pfn) {
- tmp_pfn = mb[j - 1].pfn;
- mb[j - 1].pfn = mb[j].pfn;
- mb[j].pfn = tmp_pfn;
-
- tmp_viraddr = mb[j - 1].vir_addr;
- mb[j - 1].vir_addr = mb[j].vir_addr;
- mb[j].vir_addr = tmp_viraddr;
- }
- }
- }
-}
-
-static int
-dom0_find_memdata(const char * mem_name)
-{
- unsigned i;
- int idx = -1;
- for(i = 0; i< NUM_MEM_CTX; i++) {
- if(dom0_dev.mm_data[i] == NULL)
- continue;
- if (!strncmp(dom0_dev.mm_data[i]->name, mem_name,
- sizeof(char) * DOM0_NAME_MAX)) {
- idx = i;
- break;
- }
- }
-
- return idx;
-}
-
-static int
-dom0_find_mempos(void)
-{
- unsigned i;
- int idx = -1;
-
- for(i = 0; i< NUM_MEM_CTX; i++) {
- if(dom0_dev.mm_data[i] == NULL){
- idx = i;
- break;
- }
- }
-
- return idx;
-}
-
-static int
-dom0_memory_release(struct dom0_mm_data *mm_data)
-{
- int idx;
- uint32_t num_block, block_id;
-
- /* each memory block is 2M */
- num_block = mm_data->mem_size / SIZE_PER_BLOCK;
- if (num_block == 0)
- return -EINVAL;
-
- /* reset global memory data */
- idx = dom0_find_memdata(mm_data->name);
- if (idx >= 0) {
- dom0_dev.used_memsize -= mm_data->mem_size;
- dom0_dev.mm_data[idx] = NULL;
- dom0_dev.num_mem_ctx--;
- }
-
- /* reset these memory blocks status as free */
- for (idx = 0; idx < num_block; idx++) {
- block_id = mm_data->block_num[idx];
- rsv_mm_info[block_id].used = 0;
- }
-
- memset(mm_data, 0, sizeof(struct dom0_mm_data));
- vfree(mm_data);
- return 0;
-}
-
-static int
-dom0_memory_free(uint32_t rsv_size)
-{
- uint64_t vstart, vaddr;
- uint32_t i, num_block, size;
-
- if (!xen_pv_domain())
- return -1;
-
- /* each memory block is 2M */
- num_block = rsv_size / SIZE_PER_BLOCK;
- if (num_block == 0)
- return -EINVAL;
-
- /* free all memory blocks of size of 4M and destroy contiguous region */
- for (i = 0; i < dom0_dev.num_bigblock * 2; i += 2) {
- vstart = rsv_mm_info[i].vir_addr;
- if (vstart) {
- #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
- if (rsv_mm_info[i].exchange_flag)
- xen_destroy_contiguous_region(vstart,
- DOM0_CONTIG_NUM_ORDER);
- if (rsv_mm_info[i + 1].exchange_flag)
- xen_destroy_contiguous_region(vstart +
- DOM0_MEMBLOCK_SIZE,
- DOM0_CONTIG_NUM_ORDER);
- #else
- if (rsv_mm_info[i].exchange_flag)
- xen_destroy_contiguous_region(rsv_mm_info[i].pfn
- * PAGE_SIZE,
- DOM0_CONTIG_NUM_ORDER);
- if (rsv_mm_info[i + 1].exchange_flag)
- xen_destroy_contiguous_region(rsv_mm_info[i].pfn
- * PAGE_SIZE + DOM0_MEMBLOCK_SIZE,
- DOM0_CONTIG_NUM_ORDER);
- #endif
-
- size = DOM0_MEMBLOCK_SIZE * 2;
- vaddr = vstart;
- while (size > 0) {
- ClearPageReserved(virt_to_page(vaddr));
- vaddr += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
- free_pages(vstart, MAX_NUM_ORDER);
- }
- }
-
- /* free all memory blocks size of 2M and destroy contiguous region */
- for (; i < num_block; i++) {
- vstart = rsv_mm_info[i].vir_addr;
- if (vstart) {
- if (rsv_mm_info[i].exchange_flag)
- xen_destroy_contiguous_region(vstart,
- DOM0_CONTIG_NUM_ORDER);
-
- size = DOM0_MEMBLOCK_SIZE;
- vaddr = vstart;
- while (size > 0) {
- ClearPageReserved(virt_to_page(vaddr));
- vaddr += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
- free_pages(vstart, DOM0_CONTIG_NUM_ORDER);
- }
- }
-
- memset(rsv_mm_info, 0, sizeof(struct memblock_info) * num_block);
- vfree(rsv_mm_info);
- rsv_mm_info = NULL;
-
- return 0;
-}
-
-static void
-find_free_memory(uint32_t count, struct dom0_mm_data *mm_data)
-{
- uint32_t i = 0;
- uint32_t j = 0;
-
- while ((i < count) && (j < rsv_memsize / SIZE_PER_BLOCK)) {
- if (rsv_mm_info[j].used == 0) {
- mm_data->block_info[i].pfn = rsv_mm_info[j].pfn;
- mm_data->block_info[i].vir_addr =
- rsv_mm_info[j].vir_addr;
- mm_data->block_info[i].mfn = rsv_mm_info[j].mfn;
- mm_data->block_info[i].exchange_flag =
- rsv_mm_info[j].exchange_flag;
- mm_data->block_num[i] = j;
- rsv_mm_info[j].used = 1;
- i++;
- }
- j++;
- }
-}
-
-/**
- * Find all memory segments in which physical addresses are contiguous.
- */
-static void
-find_memseg(int count, struct dom0_mm_data * mm_data)
-{
- int i = 0;
- int j, k, idx = 0;
- uint64_t zone_len, pfn, num_block;
-
- while(i < count) {
- if (mm_data->block_info[i].exchange_flag == 0) {
- i++;
- continue;
- }
- k = 0;
- pfn = mm_data->block_info[i].pfn;
- mm_data->seg_info[idx].pfn = pfn;
- mm_data->seg_info[idx].mfn[k] = mm_data->block_info[i].mfn;
-
- for (j = i + 1; j < count; j++) {
-
- /* ignore exchange fail memory block */
- if (mm_data->block_info[j].exchange_flag == 0)
- break;
-
- if (mm_data->block_info[j].pfn !=
- (mm_data->block_info[j - 1].pfn +
- DOM0_MEMBLOCK_SIZE / PAGE_SIZE))
- break;
- ++k;
- mm_data->seg_info[idx].mfn[k] = mm_data->block_info[j].mfn;
- }
-
- num_block = j - i;
- zone_len = num_block * DOM0_MEMBLOCK_SIZE;
- mm_data->seg_info[idx].size = zone_len;
-
- XEN_PRINT("memseg id=%d, size=0x%llx\n", idx, zone_len);
- i = i+ num_block;
- idx++;
- if (idx == DOM0_NUM_MEMSEG)
- break;
- }
- mm_data->num_memseg = idx;
-}
-
-static int
-dom0_memory_reserve(uint32_t rsv_size)
-{
- uint64_t pfn, vstart, vaddr;
- uint32_t i, num_block, size, allocated_size = 0;
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
- dma_addr_t dma_handle;
-#endif
-
- /* 2M as memory block */
- num_block = rsv_size / SIZE_PER_BLOCK;
-
- rsv_mm_info = vmalloc(sizeof(struct memblock_info) * num_block);
- if (!rsv_mm_info) {
- XEN_ERR("Unable to allocate device memory information\n");
- return -ENOMEM;
- }
- memset(rsv_mm_info, 0, sizeof(struct memblock_info) * num_block);
-
- /* try alloc size of 4M once */
- for (i = 0; i < num_block; i += 2) {
- vstart = (unsigned long)
- __get_free_pages(GFP_ATOMIC, MAX_NUM_ORDER);
- if (vstart == 0)
- break;
-
- dom0_dev.num_bigblock = i / 2 + 1;
- allocated_size = SIZE_PER_BLOCK * (i + 2);
-
- /* size of 4M */
- size = DOM0_MEMBLOCK_SIZE * 2;
-
- vaddr = vstart;
- while (size > 0) {
- SetPageReserved(virt_to_page(vaddr));
- vaddr += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
-
- pfn = virt_to_pfn(vstart);
- rsv_mm_info[i].pfn = pfn;
- rsv_mm_info[i].vir_addr = vstart;
- rsv_mm_info[i + 1].pfn =
- pfn + DOM0_MEMBLOCK_SIZE / PAGE_SIZE;
- rsv_mm_info[i + 1].vir_addr =
- vstart + DOM0_MEMBLOCK_SIZE;
- }
-
- /*if it failed to alloc 4M, and continue to alloc 2M once */
- for (; i < num_block; i++) {
- vstart = (unsigned long)
- __get_free_pages(GFP_ATOMIC, DOM0_CONTIG_NUM_ORDER);
- if (vstart == 0) {
- XEN_ERR("allocate memory fail.\n");
- dom0_memory_free(allocated_size);
- return -ENOMEM;
- }
-
- allocated_size += SIZE_PER_BLOCK;
-
- size = DOM0_MEMBLOCK_SIZE;
- vaddr = vstart;
- while (size > 0) {
- SetPageReserved(virt_to_page(vaddr));
- vaddr += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
- pfn = virt_to_pfn(vstart);
- rsv_mm_info[i].pfn = pfn;
- rsv_mm_info[i].vir_addr = vstart;
- }
-
- sort_viraddr(rsv_mm_info, num_block);
-
- for (i = 0; i< num_block; i++) {
-
- /*
- * This API is used to exchage MFN for getting a block of
- * contiguous physical addresses, its maximum size is 2M.
- */
- #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
- if (xen_create_contiguous_region(rsv_mm_info[i].vir_addr,
- DOM0_CONTIG_NUM_ORDER, 0) == 0) {
- #else
- if (xen_create_contiguous_region(rsv_mm_info[i].pfn * PAGE_SIZE,
- DOM0_CONTIG_NUM_ORDER, 0, &dma_handle) == 0) {
- #endif
- rsv_mm_info[i].exchange_flag = 1;
- rsv_mm_info[i].mfn =
- pfn_to_mfn(rsv_mm_info[i].pfn);
- rsv_mm_info[i].used = 0;
- } else {
- XEN_ERR("exchange memeory fail\n");
- rsv_mm_info[i].exchange_flag = 0;
- dom0_dev.fail_times++;
- if (dom0_dev.fail_times > MAX_EXCHANGE_FAIL_TIME) {
- dom0_memory_free(rsv_size);
- return -EFAULT;
- }
- }
- }
-
- return 0;
-}
-
-static int
-dom0_prepare_memsegs(struct memory_info *meminfo, struct dom0_mm_data *mm_data)
-{
- uint32_t num_block;
- int idx;
-
- /* check if there is a free name buffer */
- memcpy(mm_data->name, meminfo->name, DOM0_NAME_MAX);
- mm_data->name[DOM0_NAME_MAX - 1] = '\0';
- idx = dom0_find_mempos();
- if (idx < 0)
- return -1;
-
- num_block = meminfo->size / SIZE_PER_BLOCK;
- /* find free memory and new memory segments*/
- find_free_memory(num_block, mm_data);
- find_memseg(num_block, mm_data);
-
- /* update private memory data */
- mm_data->refcnt++;
- mm_data->mem_size = meminfo->size;
-
- /* update global memory data */
- dom0_dev.mm_data[idx] = mm_data;
- dom0_dev.num_mem_ctx++;
- dom0_dev.used_memsize += mm_data->mem_size;
-
- return 0;
-}
-
-static int
-dom0_check_memory (struct memory_info *meminfo)
-{
- int idx;
- uint64_t mem_size;
-
- /* round memory size to the next even number. */
- if (meminfo->size % 2)
- ++meminfo->size;
-
- mem_size = meminfo->size;
- if (dom0_dev.num_mem_ctx > NUM_MEM_CTX) {
- XEN_ERR("Memory data space is full in Dom0 driver\n");
- return -1;
- }
- idx = dom0_find_memdata(meminfo->name);
- if (idx >= 0) {
- XEN_ERR("Memory data name %s has already exsited in Dom0 driver.\n",
- meminfo->name);
- return -1;
- }
- if ((dom0_dev.used_memsize + mem_size) > rsv_memsize) {
- XEN_ERR("Total size can't be larger than reserved size.\n");
- return -1;
- }
-
- return 0;
-}
-
-static int __init
-dom0_init(void)
-{
- if (!xen_domain())
- return -ENODEV;
-
- if (rsv_memsize > DOM0_CONFIG_MEMSIZE) {
- XEN_ERR("The reserved memory size cannot be greater than %d\n",
- DOM0_CONFIG_MEMSIZE);
- return -EINVAL;
- }
-
- /* Setup the misc device */
- dom0_dev.miscdev.minor = MISC_DYNAMIC_MINOR;
- dom0_dev.miscdev.name = "dom0_mm";
- dom0_dev.miscdev.fops = &data_fops;
-
- /* register misc char device */
- if (misc_register(&dom0_dev.miscdev) != 0) {
- XEN_ERR("Misc device registration failed\n");
- return -EPERM;
- }
-
- mutex_init(&dom0_dev.data_lock);
- dom0_kobj = kobject_create_and_add("dom0-mm", mm_kobj);
-
- if (!dom0_kobj) {
- XEN_ERR("dom0-mm object creation failed\n");
- misc_deregister(&dom0_dev.miscdev);
- return -ENOMEM;
- }
-
- if (sysfs_create_group(dom0_kobj, &dev_attr_grp)) {
- kobject_put(dom0_kobj);
- misc_deregister(&dom0_dev.miscdev);
- return -EPERM;
- }
-
- if (dom0_memory_reserve(rsv_memsize) < 0) {
- sysfs_remove_group(dom0_kobj, &dev_attr_grp);
- kobject_put(dom0_kobj);
- misc_deregister(&dom0_dev.miscdev);
- return -ENOMEM;
- }
-
- XEN_PRINT("####### DPDK Xen Dom0 module loaded #######\n");
-
- return 0;
-}
-
-static void __exit
-dom0_exit(void)
-{
- if (rsv_mm_info != NULL)
- dom0_memory_free(rsv_memsize);
-
- sysfs_remove_group(dom0_kobj, &dev_attr_grp);
- kobject_put(dom0_kobj);
- misc_deregister(&dom0_dev.miscdev);
-
- XEN_PRINT("####### DPDK Xen Dom0 module unloaded #######\n");
-}
-
-static int
-dom0_open(struct inode *inode, struct file *file)
-{
- file->private_data = NULL;
-
- XEN_PRINT(KERN_INFO "/dev/dom0_mm opened\n");
- return 0;
-}
-
-static int
-dom0_release(struct inode *inode, struct file *file)
-{
- int ret = 0;
- struct dom0_mm_data *mm_data = file->private_data;
-
- if (mm_data == NULL)
- return ret;
-
- mutex_lock(&dom0_dev.data_lock);
- if (--mm_data->refcnt == 0)
- ret = dom0_memory_release(mm_data);
- mutex_unlock(&dom0_dev.data_lock);
-
- file->private_data = NULL;
- XEN_PRINT(KERN_INFO "/dev/dom0_mm closed\n");
- return ret;
-}
-
-static int
-dom0_mmap(struct file *file, struct vm_area_struct *vm)
-{
- int status = 0;
- uint32_t idx = vm->vm_pgoff;
- uint64_t pfn, size = vm->vm_end - vm->vm_start;
- struct dom0_mm_data *mm_data = file->private_data;
-
- if(mm_data == NULL)
- return -EINVAL;
-
- mutex_lock(&dom0_dev.data_lock);
- if (idx >= mm_data->num_memseg) {
- mutex_unlock(&dom0_dev.data_lock);
- return -EINVAL;
- }
-
- if (size > mm_data->seg_info[idx].size){
- mutex_unlock(&dom0_dev.data_lock);
- return -EINVAL;
- }
-
- XEN_PRINT("mmap memseg idx =%d,size = 0x%llx\n", idx, size);
-
- pfn = mm_data->seg_info[idx].pfn;
- mutex_unlock(&dom0_dev.data_lock);
-
- status = remap_pfn_range(vm, vm->vm_start, pfn, size, PAGE_SHARED);
-
- return status;
-}
-static int
-dom0_ioctl(struct file *file,
- unsigned int ioctl_num,
- unsigned long ioctl_param)
-{
- int idx, ret;
- char name[DOM0_NAME_MAX] = {0};
- struct memory_info meminfo;
- struct dom0_mm_data *mm_data = file->private_data;
-
- XEN_PRINT("IOCTL num=0x%0x param=0x%0lx \n", ioctl_num, ioctl_param);
-
- /**
- * Switch according to the ioctl called
- */
- switch _IOC_NR(ioctl_num) {
- case _IOC_NR(RTE_DOM0_IOCTL_PREPARE_MEMSEG):
- ret = copy_from_user(&meminfo, (void *)ioctl_param,
- sizeof(struct memory_info));
- if (ret)
- return -EFAULT;
-
- if (mm_data != NULL) {
- XEN_ERR("Cannot create memory segment for the same"
- " file descriptor\n");
- return -EINVAL;
- }
-
- /* Allocate private data */
- mm_data = vmalloc(sizeof(struct dom0_mm_data));
- if (!mm_data) {
- XEN_ERR("Unable to allocate device private data\n");
- return -ENOMEM;
- }
- memset(mm_data, 0, sizeof(struct dom0_mm_data));
-
- mutex_lock(&dom0_dev.data_lock);
- /* check if we can allocate memory*/
- if (dom0_check_memory(&meminfo) < 0) {
- mutex_unlock(&dom0_dev.data_lock);
- vfree(mm_data);
- return -EINVAL;
- }
-
- /* allocate memory and created memory segments*/
- if (dom0_prepare_memsegs(&meminfo, mm_data) < 0) {
- XEN_ERR("create memory segment fail.\n");
- mutex_unlock(&dom0_dev.data_lock);
- return -EIO;
- }
-
- file->private_data = mm_data;
- mutex_unlock(&dom0_dev.data_lock);
- break;
-
- /* support multiple process in term of memory mapping*/
- case _IOC_NR(RTE_DOM0_IOCTL_ATTACH_TO_MEMSEG):
- ret = copy_from_user(name, (void *)ioctl_param,
- sizeof(char) * DOM0_NAME_MAX);
- if (ret)
- return -EFAULT;
-
- mutex_lock(&dom0_dev.data_lock);
- idx = dom0_find_memdata(name);
- if (idx < 0) {
- mutex_unlock(&dom0_dev.data_lock);
- return -EINVAL;
- }
-
- mm_data = dom0_dev.mm_data[idx];
- mm_data->refcnt++;
- file->private_data = mm_data;
- mutex_unlock(&dom0_dev.data_lock);
- break;
-
- case _IOC_NR(RTE_DOM0_IOCTL_GET_NUM_MEMSEG):
- ret = copy_to_user((void *)ioctl_param, &mm_data->num_memseg,
- sizeof(int));
- if (ret)
- return -EFAULT;
- break;
-
- case _IOC_NR(RTE_DOM0_IOCTL_GET_MEMSEG_INFO):
- ret = copy_to_user((void *)ioctl_param,
- &mm_data->seg_info[0],
- sizeof(struct memseg_info) *
- mm_data->num_memseg);
- if (ret)
- return -EFAULT;
- break;
- default:
- XEN_PRINT("IOCTL default \n");
- break;
- }
-
- return 0;
-}
-
-module_init(dom0_init);
-module_exit(dom0_exit);
-
-module_param(rsv_memsize, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(rsv_memsize, "Xen-dom0 reserved memory size(MB).\n");
diff --git a/src/dpdk_lib18/librte_ether/Makefile b/src/dpdk_lib18/librte_ether/Makefile
deleted file mode 100755
index a461c312..00000000
--- a/src/dpdk_lib18/librte_ether/Makefile
+++ /dev/null
@@ -1,54 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-#
-# library name
-#
-LIB = libethdev.a
-
-CFLAGS += -O3
-CFLAGS += $(WERROR_FLAGS)
-
-SRCS-y += rte_ethdev.c
-
-#
-# Export include files
-#
-SYMLINK-y-include += rte_ether.h
-SYMLINK-y-include += rte_ethdev.h
-SYMLINK-y-include += rte_eth_ctrl.h
-
-# this lib depends upon:
-DEPDIRS-y += lib/librte_eal lib/librte_mempool lib/librte_ring lib/librte_mbuf
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_ether/rte_eth_ctrl.h b/src/dpdk_lib18/librte_ether/rte_eth_ctrl.h
deleted file mode 100755
index d9cdb379..00000000
--- a/src/dpdk_lib18/librte_ether/rte_eth_ctrl.h
+++ /dev/null
@@ -1,459 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_ETH_CTRL_H_
-#define _RTE_ETH_CTRL_H_
-
-/**
- * @file
- *
- * Ethernet device features and related data structures used
- * by control APIs should be defined in this file.
- *
- */
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
- * Feature filter types
- */
-enum rte_filter_type {
- RTE_ETH_FILTER_NONE = 0,
- RTE_ETH_FILTER_MACVLAN,
- RTE_ETH_FILTER_ETHERTYPE,
- RTE_ETH_FILTER_TUNNEL,
- RTE_ETH_FILTER_FDIR,
- RTE_ETH_FILTER_MAX
-};
-
-/**
- * Generic operations on filters
- */
-enum rte_filter_op {
- RTE_ETH_FILTER_NOP = 0,
- /**< used to check whether the type filter is supported */
- RTE_ETH_FILTER_ADD, /**< add filter entry */
- RTE_ETH_FILTER_UPDATE, /**< update filter entry */
- RTE_ETH_FILTER_DELETE, /**< delete filter entry */
- RTE_ETH_FILTER_FLUSH, /**< flush all entries */
- RTE_ETH_FILTER_GET, /**< get filter entry */
- RTE_ETH_FILTER_SET, /**< configurations */
- RTE_ETH_FILTER_INFO, /**< retrieve information */
- RTE_ETH_FILTER_STATS, /**< retrieve statistics */
- RTE_ETH_FILTER_OP_MAX
-};
-
-/**
- * MAC filter type
- */
-enum rte_mac_filter_type {
- RTE_MAC_PERFECT_MATCH = 1, /**< exact match of MAC addr. */
- RTE_MACVLAN_PERFECT_MATCH,
- /**< exact match of MAC addr and VLAN ID. */
- RTE_MAC_HASH_MATCH, /**< hash match of MAC addr. */
- RTE_MACVLAN_HASH_MATCH,
- /**< hash match of MAC addr and exact match of VLAN ID. */
-};
-
-/**
- * MAC filter info
- */
-struct rte_eth_mac_filter {
- uint8_t is_vf; /**< 1 for VF, 0 for port dev */
- uint16_t dst_id; /**< VF ID, available when is_vf is 1*/
- enum rte_mac_filter_type filter_type; /**< MAC filter type */
- struct ether_addr mac_addr;
-};
-
-/**
- * Define all structures for Ethertype Filter type.
- */
-
-#define RTE_ETHTYPE_FLAGS_MAC 0x0001 /**< If set, compare mac */
-#define RTE_ETHTYPE_FLAGS_DROP 0x0002 /**< If set, drop packet when match */
-
-/**
- * A structure used to define the ethertype filter entry
- * to support RTE_ETH_FILTER_ETHERTYPE with RTE_ETH_FILTER_ADD,
- * RTE_ETH_FILTER_DELETE and RTE_ETH_FILTER_GET operations.
- */
-struct rte_eth_ethertype_filter {
- struct ether_addr mac_addr; /**< Mac address to match. */
- uint16_t ether_type; /**< Ether type to match */
- uint16_t flags; /**< Flags from RTE_ETHTYPE_FLAGS_* */
- uint16_t queue; /**< Queue assigned to when match*/
-};
-
-/**
- * Tunneled type.
- */
-enum rte_eth_tunnel_type {
- RTE_TUNNEL_TYPE_NONE = 0,
- RTE_TUNNEL_TYPE_VXLAN,
- RTE_TUNNEL_TYPE_GENEVE,
- RTE_TUNNEL_TYPE_TEREDO,
- RTE_TUNNEL_TYPE_NVGRE,
- RTE_TUNNEL_TYPE_MAX,
-};
-
-/**
- * filter type of tunneling packet
- */
-#define ETH_TUNNEL_FILTER_OMAC 0x01 /**< filter by outer MAC addr */
-#define ETH_TUNNEL_FILTER_OIP 0x02 /**< filter by outer IP Addr */
-#define ETH_TUNNEL_FILTER_TENID 0x04 /**< filter by tenant ID */
-#define ETH_TUNNEL_FILTER_IMAC 0x08 /**< filter by inner MAC addr */
-#define ETH_TUNNEL_FILTER_IVLAN 0x10 /**< filter by inner VLAN ID */
-#define ETH_TUNNEL_FILTER_IIP 0x20 /**< filter by inner IP addr */
-
-#define RTE_TUNNEL_FILTER_IMAC_IVLAN (ETH_TUNNEL_FILTER_IMAC | \
- ETH_TUNNEL_FILTER_IVLAN)
-#define RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID (ETH_TUNNEL_FILTER_IMAC | \
- ETH_TUNNEL_FILTER_IVLAN | \
- ETH_TUNNEL_FILTER_TENID)
-#define RTE_TUNNEL_FILTER_IMAC_TENID (ETH_TUNNEL_FILTER_IMAC | \
- ETH_TUNNEL_FILTER_TENID)
-#define RTE_TUNNEL_FILTER_OMAC_TENID_IMAC (ETH_TUNNEL_FILTER_OMAC | \
- ETH_TUNNEL_FILTER_TENID | \
- ETH_TUNNEL_FILTER_IMAC)
-
-/**
- * Select IPv4 or IPv6 for tunnel filters.
- */
-enum rte_tunnel_iptype {
- RTE_TUNNEL_IPTYPE_IPV4 = 0, /**< IPv4. */
- RTE_TUNNEL_IPTYPE_IPV6, /**< IPv6. */
-};
-
-/**
- * Tunneling Packet filter configuration.
- */
-struct rte_eth_tunnel_filter_conf {
- struct ether_addr *outer_mac; /**< Outer MAC address filter. */
- struct ether_addr *inner_mac; /**< Inner MAC address filter. */
- uint16_t inner_vlan; /**< Inner VLAN filter. */
- enum rte_tunnel_iptype ip_type; /**< IP address type. */
- union {
- uint32_t ipv4_addr; /**< IPv4 source address to match. */
- uint32_t ipv6_addr[4]; /**< IPv6 source address to match. */
- } ip_addr; /**< IPv4/IPv6 source address to match (union of above). */
-
- uint16_t filter_type; /**< Filter type. */
- enum rte_eth_tunnel_type tunnel_type; /**< Tunnel Type. */
- uint32_t tenant_id; /** < Tenant number. */
- uint16_t queue_id; /** < queue number. */
-};
-
-#define RTE_ETH_FDIR_MAX_FLEXLEN 16 /** < Max length of flexbytes. */
-
-/**
- * Flow type
- */
-enum rte_eth_flow_type {
- RTE_ETH_FLOW_TYPE_NONE = 0,
- RTE_ETH_FLOW_TYPE_UDPV4,
- RTE_ETH_FLOW_TYPE_TCPV4,
- RTE_ETH_FLOW_TYPE_SCTPV4,
- RTE_ETH_FLOW_TYPE_IPV4_OTHER,
- RTE_ETH_FLOW_TYPE_FRAG_IPV4,
- RTE_ETH_FLOW_TYPE_UDPV6,
- RTE_ETH_FLOW_TYPE_TCPV6,
- RTE_ETH_FLOW_TYPE_SCTPV6,
- RTE_ETH_FLOW_TYPE_IPV6_OTHER,
- RTE_ETH_FLOW_TYPE_FRAG_IPV6,
- RTE_ETH_FLOW_TYPE_MAX = 64,
-};
-
-/**
- * A structure used to define the input for IPV4 flow
- */
-struct rte_eth_ipv4_flow {
- uint32_t src_ip; /**< IPv4 source address to match. */
- uint32_t dst_ip; /**< IPv4 destination address to match. */
- uint8_t l4_proto; /* IPv4 protocol to match */
-};
-
-/**
- * A structure used to define the input for IPV4 UDP flow
- */
-struct rte_eth_udpv4_flow {
- struct rte_eth_ipv4_flow ip; /**< IPv4 fields to match. */
- uint16_t src_port; /**< UDP source port to match. */
- uint16_t dst_port; /**< UDP destination port to match. */
-};
-
-/**
- * A structure used to define the input for IPV4 TCP flow
- */
-struct rte_eth_tcpv4_flow {
- struct rte_eth_ipv4_flow ip; /**< IPv4 fields to match. */
- uint16_t src_port; /**< TCP source port to match. */
- uint16_t dst_port; /**< TCP destination port to match. */
-};
-
-/**
- * A structure used to define the input for IPV4 SCTP flow
- */
-struct rte_eth_sctpv4_flow {
- struct rte_eth_ipv4_flow ip; /**< IPv4 fields to match. */
- uint32_t verify_tag; /**< Verify tag to match */
-};
-
-/**
- * A structure used to define the input for IPV6 flow
- */
-struct rte_eth_ipv6_flow {
- uint32_t src_ip[4]; /**< IPv6 source address to match. */
- uint32_t dst_ip[4]; /**< IPv6 destination address to match. */
-};
-
-/**
- * A structure used to define the input for IPV6 UDP flow
- */
-struct rte_eth_udpv6_flow {
- struct rte_eth_ipv6_flow ip; /**< IPv6 fields to match. */
- uint16_t src_port; /**< UDP source port to match. */
- uint16_t dst_port; /**< UDP destination port to match. */
-};
-
-/**
- * A structure used to define the input for IPV6 TCP flow
- */
-struct rte_eth_tcpv6_flow {
- struct rte_eth_ipv6_flow ip; /**< IPv6 fields to match. */
- uint16_t src_port; /**< TCP source port to match. */
- uint16_t dst_port; /**< TCP destination port to match. */
-};
-
-/**
- * A structure used to define the input for IPV6 SCTP flow
- */
-struct rte_eth_sctpv6_flow {
- struct rte_eth_ipv6_flow ip; /**< IPv6 fields to match. */
- uint32_t verify_tag; /**< Verify tag to match */
-};
-
-/**
- * An union contains the inputs for all types of flow
- */
-union rte_eth_fdir_flow {
- struct rte_eth_udpv4_flow udp4_flow;
- struct rte_eth_tcpv4_flow tcp4_flow;
- struct rte_eth_sctpv4_flow sctp4_flow;
- struct rte_eth_ipv4_flow ip4_flow;
- struct rte_eth_udpv6_flow udp6_flow;
- struct rte_eth_tcpv6_flow tcp6_flow;
- struct rte_eth_sctpv6_flow sctp6_flow;
- struct rte_eth_ipv6_flow ipv6_flow;
-};
-
-/**
- * A structure used to contain extend input of flow
- */
-struct rte_eth_fdir_flow_ext {
- uint16_t vlan_tci;
- uint8_t flexbytes[RTE_ETH_FDIR_MAX_FLEXLEN];
- /**< It is filled by the flexible payload to match. */
-};
-
-/**
- * A structure used to define the input for a flow director filter entry
- */
-struct rte_eth_fdir_input {
- enum rte_eth_flow_type flow_type; /**< Type of flow */
- union rte_eth_fdir_flow flow;
- uint8_t ttl;
- /**< Flow fields to match, dependent on flow_type */
- struct rte_eth_fdir_flow_ext flow_ext;
- /**< Additional fields to match */
-};
-
-/**
- * Behavior will be taken if FDIR match
- */
-enum rte_eth_fdir_behavior {
- RTE_ETH_FDIR_ACCEPT = 0,
- RTE_ETH_FDIR_REJECT,
-};
-
-/**
- * Flow director report status
- * It defines what will be reported if FDIR entry is matched.
- */
-enum rte_eth_fdir_status {
- RTE_ETH_FDIR_NO_REPORT_STATUS = 0, /**< Report nothing. */
- RTE_ETH_FDIR_REPORT_ID, /**< Only report FD ID. */
- RTE_ETH_FDIR_REPORT_ID_FLEX_4, /**< Report FD ID and 4 flex bytes. */
- RTE_ETH_FDIR_REPORT_FLEX_8, /**< Report 8 flex bytes. */
-};
-
-/**
- * A structure used to define an action when match FDIR packet filter.
- */
-struct rte_eth_fdir_action {
- uint16_t rx_queue; /**< Queue assigned to if FDIR match. */
- enum rte_eth_fdir_behavior behavior; /**< Behavior will be taken */
- enum rte_eth_fdir_status report_status; /**< Status report option */
- uint8_t flex_off;
- /**< If report_status is RTE_ETH_FDIR_REPORT_ID_FLEX_4 or
- RTE_ETH_FDIR_REPORT_FLEX_8, flex_off specifies where the reported
- flex bytes start from in flexible payload. */
-};
-
-/**
- * A structure used to define the flow director filter entry by filter_ctrl API
- * It supports RTE_ETH_FILTER_FDIR with RTE_ETH_FILTER_ADD and
- * RTE_ETH_FILTER_DELETE operations.
- */
-struct rte_eth_fdir_filter {
- uint32_t soft_id;
- /**< ID, an unique value is required when deal with FDIR entry */
- struct rte_eth_fdir_input input; /**< Input set */
- struct rte_eth_fdir_action action; /**< Action taken when match */
-};
-
-/**
- * Payload type
- */
-enum rte_eth_payload_type {
- RTE_ETH_PAYLOAD_UNKNOWN = 0,
- RTE_ETH_L2_PAYLOAD,
- RTE_ETH_L3_PAYLOAD,
- RTE_ETH_L4_PAYLOAD,
- RTE_ETH_PAYLOAD_MAX = 8,
-};
-
-/**
- * A structure used to select bytes extracted from the protocol layers to
- * flexible payload for filter
- */
-struct rte_eth_flex_payload_cfg {
- enum rte_eth_payload_type type; /**< Payload type */
- uint16_t src_offset[RTE_ETH_FDIR_MAX_FLEXLEN];
- /**< Offset in bytes from the beginning of packet's payload
- src_offset[i] indicates the flexbyte i's offset in original
- packet payload. This value should be less than
- flex_payload_limit in struct rte_eth_fdir_info.*/
-};
-
-/**
- * A structure used to define FDIR masks for flexible payload
- * for each flow type
- */
-struct rte_eth_fdir_flex_mask {
- enum rte_eth_flow_type flow_type; /**< Flow type */
- uint8_t mask[RTE_ETH_FDIR_MAX_FLEXLEN];
- /**< Mask for the whole flexible payload */
-};
-
-/**
- * A structure used to define all flexible payload related setting
- * include flexpay load and flex mask
- */
-struct rte_eth_fdir_flex_conf {
- uint16_t nb_payloads; /**< The number of following payload cfg */
- uint16_t nb_flexmasks; /**< The number of following mask */
- struct rte_eth_flex_payload_cfg flex_set[RTE_ETH_PAYLOAD_MAX];
- /**< Flex payload configuration for each payload type */
- struct rte_eth_fdir_flex_mask flex_mask[RTE_ETH_FLOW_TYPE_MAX];
- /**< Flex mask configuration for each flow type */
-};
-
-/**
- * Flow Director setting modes: none, signature or perfect.
- */
-enum rte_fdir_mode {
- RTE_FDIR_MODE_NONE = 0, /**< Disable FDIR support. */
- RTE_FDIR_MODE_SIGNATURE, /**< Enable FDIR signature filter mode. */
- RTE_FDIR_MODE_PERFECT, /**< Enable FDIR perfect filter mode. */
-};
-
-/**
- * A structure used to get the information of flow director filter.
- * It supports RTE_ETH_FILTER_FDIR with RTE_ETH_FILTER_INFO operation.
- * It includes the mode, flexible payload configuration information,
- * capabilities and supported flow types, flexible payload characters.
- * It can be gotten to help taking specific configurations per device.
- */
-struct rte_eth_fdir_info {
- enum rte_fdir_mode mode; /**< Flow director mode */
- struct rte_eth_fdir_flex_conf flex_conf;
- /**< Flex payload configuration information */
- uint32_t guarant_spc; /**< Guaranteed spaces.*/
- uint32_t best_spc; /**< Best effort spaces.*/
- uint32_t flow_types_mask[RTE_ETH_FLOW_TYPE_MAX / sizeof(uint32_t)];
- /**< Bit mask for every supported flow type. */
- uint32_t max_flexpayload; /**< Total flex payload in bytes. */
- uint32_t flex_payload_unit;
- /**< Flexible payload unit in bytes. Size and alignments of all flex
- payload segments should be multiplies of this value. */
- uint32_t max_flex_payload_segment_num;
- /**< Max number of flexible payload continuous segments.
- Each segment should be a multiple of flex_payload_unit.*/
- uint16_t flex_payload_limit;
- /**< Maximum src_offset in bytes allowed. It indicates that
- src_offset[i] in struct rte_eth_flex_payload_cfg should be
- less than this value. */
- uint32_t flex_bitmask_unit;
- /**< Flex bitmask unit in bytes. Size of flex bitmasks should
- be a multiply of this value. */
- uint32_t max_flex_bitmask_num;
- /**< Max supported size of flex bitmasks in flex_bitmask_unit */
-};
-
-/**
- * A structure used to define the statistics of flow director.
- * It supports RTE_ETH_FILTER_FDIR with RTE_ETH_FILTER_STATS operation.
- */
-struct rte_eth_fdir_stats {
- uint32_t collision; /**< Number of filters with collision. */
- uint32_t free; /**< Number of free filters. */
- uint32_t maxhash;
- /**< The lookup hash value of the added filter that updated the value
- of the MAXLEN field */
- uint32_t maxlen; /**< Longest linked list of filters. */
- uint64_t add; /**< Number of added filters. */
- uint64_t remove; /**< Number of removed filters. */
- uint64_t f_add; /**< Number of failed added filters. */
- uint64_t f_remove; /**< Number of failed removed filters. */
- uint32_t guarant_cnt; /**< Number of filters in guaranteed spaces. */
- uint32_t best_cnt; /**< Number of filters in best effort spaces. */
-};
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_ETH_CTRL_H_ */
diff --git a/src/dpdk_lib18/librte_ether/rte_ethdev.c b/src/dpdk_lib18/librte_ether/rte_ethdev.c
deleted file mode 100755
index 38cf247e..00000000
--- a/src/dpdk_lib18/librte_ether/rte_ethdev.c
+++ /dev/null
@@ -1,3271 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <sys/types.h>
-#include <sys/queue.h>
-#include <ctype.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <stdarg.h>
-#include <errno.h>
-#include <stdint.h>
-#include <inttypes.h>
-#include <netinet/in.h>
-
-#include <rte_byteorder.h>
-#include <rte_log.h>
-#include <rte_debug.h>
-#include <rte_interrupts.h>
-#include <rte_pci.h>
-#include <rte_memory.h>
-#include <rte_memcpy.h>
-#include <rte_memzone.h>
-#include <rte_launch.h>
-#include <rte_tailq.h>
-#include <rte_eal.h>
-#include <rte_per_lcore.h>
-#include <rte_lcore.h>
-#include <rte_atomic.h>
-#include <rte_branch_prediction.h>
-#include <rte_common.h>
-#include <rte_ring.h>
-#include <rte_mempool.h>
-#include <rte_malloc.h>
-#include <rte_mbuf.h>
-#include <rte_errno.h>
-#include <rte_spinlock.h>
-#include <rte_string_fns.h>
-
-#include "rte_ether.h"
-#include "rte_ethdev.h"
-
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
-#define PMD_DEBUG_TRACE(fmt, args...) do { \
- RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
- } while (0)
-#else
-#define PMD_DEBUG_TRACE(fmt, args...)
-#endif
-
-/* Macros for checking for restricting functions to primary instance only */
-#define PROC_PRIMARY_OR_ERR_RET(retval) do { \
- if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
- PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
- return (retval); \
- } \
-} while(0)
-#define PROC_PRIMARY_OR_RET() do { \
- if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
- PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
- return; \
- } \
-} while(0)
-
-/* Macros to check for invlaid function pointers in dev_ops structure */
-#define FUNC_PTR_OR_ERR_RET(func, retval) do { \
- if ((func) == NULL) { \
- PMD_DEBUG_TRACE("Function not supported\n"); \
- return (retval); \
- } \
-} while(0)
-#define FUNC_PTR_OR_RET(func) do { \
- if ((func) == NULL) { \
- PMD_DEBUG_TRACE("Function not supported\n"); \
- return; \
- } \
-} while(0)
-
-static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
-struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
-static struct rte_eth_dev_data *rte_eth_dev_data = NULL;
-static uint8_t nb_ports = 0;
-
-/* spinlock for eth device callbacks */
-static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
-
-/* store statistics names and its offset in stats structure */
-struct rte_eth_xstats_name_off {
- char name[RTE_ETH_XSTATS_NAME_SIZE];
- unsigned offset;
-};
-
-static struct rte_eth_xstats_name_off rte_stats_strings[] = {
- {"rx_packets", offsetof(struct rte_eth_stats, ipackets)},
- {"tx_packets", offsetof(struct rte_eth_stats, opackets)},
- {"rx_bytes", offsetof(struct rte_eth_stats, ibytes)},
- {"tx_bytes", offsetof(struct rte_eth_stats, obytes)},
- {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
- {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
- {"rx_crc_errors", offsetof(struct rte_eth_stats, ibadcrc)},
- {"rx_bad_length_errors", offsetof(struct rte_eth_stats, ibadlen)},
- {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
- {"alloc_rx_buff_failed", offsetof(struct rte_eth_stats, rx_nombuf)},
- {"fdir_match", offsetof(struct rte_eth_stats, fdirmatch)},
- {"fdir_miss", offsetof(struct rte_eth_stats, fdirmiss)},
- {"tx_flow_control_xon", offsetof(struct rte_eth_stats, tx_pause_xon)},
- {"rx_flow_control_xon", offsetof(struct rte_eth_stats, rx_pause_xon)},
- {"tx_flow_control_xoff", offsetof(struct rte_eth_stats, tx_pause_xoff)},
- {"rx_flow_control_xoff", offsetof(struct rte_eth_stats, rx_pause_xoff)},
-};
-#define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
-
-static struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
- {"rx_packets", offsetof(struct rte_eth_stats, q_ipackets)},
- {"rx_bytes", offsetof(struct rte_eth_stats, q_ibytes)},
-};
-#define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) / \
- sizeof(rte_rxq_stats_strings[0]))
-
-static struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
- {"tx_packets", offsetof(struct rte_eth_stats, q_opackets)},
- {"tx_bytes", offsetof(struct rte_eth_stats, q_obytes)},
- {"tx_errors", offsetof(struct rte_eth_stats, q_errors)},
-};
-#define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) / \
- sizeof(rte_txq_stats_strings[0]))
-
-
-/**
- * The user application callback description.
- *
- * It contains callback address to be registered by user application,
- * the pointer to the parameters for callback, and the event type.
- */
-struct rte_eth_dev_callback {
- TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
- rte_eth_dev_cb_fn cb_fn; /**< Callback address */
- void *cb_arg; /**< Parameter for callback */
- enum rte_eth_event_type event; /**< Interrupt event type */
- uint32_t active; /**< Callback is executing */
-};
-
-enum {
- STAT_QMAP_TX = 0,
- STAT_QMAP_RX
-};
-
-static inline void
-rte_eth_dev_data_alloc(void)
-{
- const unsigned flags = 0;
- const struct rte_memzone *mz;
-
- if (rte_eal_process_type() == RTE_PROC_PRIMARY){
- mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
- RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
- rte_socket_id(), flags);
- } else
- mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
- if (mz == NULL)
- rte_panic("Cannot allocate memzone for ethernet port data\n");
-
- rte_eth_dev_data = mz->addr;
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- memset(rte_eth_dev_data, 0,
- RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
-}
-
-static struct rte_eth_dev *
-rte_eth_dev_allocated(const char *name)
-{
- unsigned i;
-
- for (i = 0; i < nb_ports; i++) {
- if (strcmp(rte_eth_devices[i].data->name, name) == 0)
- return &rte_eth_devices[i];
- }
- return NULL;
-}
-
-struct rte_eth_dev *
-rte_eth_dev_allocate(const char *name)
-{
- struct rte_eth_dev *eth_dev;
-
- if (nb_ports == RTE_MAX_ETHPORTS) {
- PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
- return NULL;
- }
-
- if (rte_eth_dev_data == NULL)
- rte_eth_dev_data_alloc();
-
- if (rte_eth_dev_allocated(name) != NULL) {
- PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n", name);
- return NULL;
- }
-
- eth_dev = &rte_eth_devices[nb_ports];
- eth_dev->data = &rte_eth_dev_data[nb_ports];
- snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
- eth_dev->data->port_id = nb_ports++;
- return eth_dev;
-}
-
-static int
-rte_eth_dev_init(struct rte_pci_driver *pci_drv,
- struct rte_pci_device *pci_dev)
-{
- struct eth_driver *eth_drv;
- struct rte_eth_dev *eth_dev;
- char ethdev_name[RTE_ETH_NAME_MAX_LEN];
-
- int diag;
-
- eth_drv = (struct eth_driver *)pci_drv;
-
- /* Create unique Ethernet device name using PCI address */
- snprintf(ethdev_name, RTE_ETH_NAME_MAX_LEN, "%d:%d.%d",
- pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
-
- eth_dev = rte_eth_dev_allocate(ethdev_name);
- if (eth_dev == NULL)
- return -ENOMEM;
-
- if (rte_eal_process_type() == RTE_PROC_PRIMARY){
- eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
- eth_drv->dev_private_size,
- RTE_CACHE_LINE_SIZE);
- if (eth_dev->data->dev_private == NULL)
- rte_panic("Cannot allocate memzone for private port data\n");
- }
- eth_dev->pci_dev = pci_dev;
- eth_dev->driver = eth_drv;
- eth_dev->data->rx_mbuf_alloc_failed = 0;
-
- /* init user callbacks */
- TAILQ_INIT(&(eth_dev->callbacks));
-
- /*
- * Set the default MTU.
- */
- eth_dev->data->mtu = ETHER_MTU;
-
- /* Invoke PMD device initialization function */
- diag = (*eth_drv->eth_dev_init)(eth_drv, eth_dev);
- if (diag == 0)
- return (0);
-
- PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x)"
- " failed\n", pci_drv->name,
- (unsigned) pci_dev->id.vendor_id,
- (unsigned) pci_dev->id.device_id);
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- rte_free(eth_dev->data->dev_private);
- nb_ports--;
- return diag;
-}
-
-/**
- * Register an Ethernet [Poll Mode] driver.
- *
- * Function invoked by the initialization function of an Ethernet driver
- * to simultaneously register itself as a PCI driver and as an Ethernet
- * Poll Mode Driver.
- * Invokes the rte_eal_pci_register() function to register the *pci_drv*
- * structure embedded in the *eth_drv* structure, after having stored the
- * address of the rte_eth_dev_init() function in the *devinit* field of
- * the *pci_drv* structure.
- * During the PCI probing phase, the rte_eth_dev_init() function is
- * invoked for each PCI [Ethernet device] matching the embedded PCI
- * identifiers provided by the driver.
- */
-void
-rte_eth_driver_register(struct eth_driver *eth_drv)
-{
- eth_drv->pci_drv.devinit = rte_eth_dev_init;
- rte_eal_pci_register(&eth_drv->pci_drv);
-}
-
-int
-rte_eth_dev_socket_id(uint8_t port_id)
-{
- if (port_id >= nb_ports)
- return -1;
- return rte_eth_devices[port_id].pci_dev->numa_node;
-}
-
-uint8_t
-rte_eth_dev_count(void)
-{
- return (nb_ports);
-}
-
-static int
-rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
-{
- uint16_t old_nb_queues = dev->data->nb_rx_queues;
- void **rxq;
- unsigned i;
-
- if (dev->data->rx_queues == NULL) { /* first time configuration */
- dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
- sizeof(dev->data->rx_queues[0]) * nb_queues,
- RTE_CACHE_LINE_SIZE);
- if (dev->data->rx_queues == NULL) {
- dev->data->nb_rx_queues = 0;
- return -(ENOMEM);
- }
- } else { /* re-configure */
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
-
- rxq = dev->data->rx_queues;
-
- for (i = nb_queues; i < old_nb_queues; i++)
- (*dev->dev_ops->rx_queue_release)(rxq[i]);
- rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
- RTE_CACHE_LINE_SIZE);
- if (rxq == NULL)
- return -(ENOMEM);
-
- if (nb_queues > old_nb_queues)
- memset(rxq + old_nb_queues, 0,
- sizeof(rxq[0]) * (nb_queues - old_nb_queues));
-
- dev->data->rx_queues = rxq;
-
- }
- dev->data->nb_rx_queues = nb_queues;
- return (0);
-}
-
-int
-rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
-{
- struct rte_eth_dev *dev;
-
- /* This function is only safe when called from the primary process
- * in a multi-process setup*/
- PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -EINVAL;
- }
-
- dev = &rte_eth_devices[port_id];
- if (rx_queue_id >= dev->data->nb_rx_queues) {
- PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
- return -EINVAL;
- }
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
-
- return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
-
-}
-
-int
-rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
-{
- struct rte_eth_dev *dev;
-
- /* This function is only safe when called from the primary process
- * in a multi-process setup*/
- PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -EINVAL;
- }
-
- dev = &rte_eth_devices[port_id];
- if (rx_queue_id >= dev->data->nb_rx_queues) {
- PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
- return -EINVAL;
- }
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
-
- return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
-
-}
-
-int
-rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
-{
- struct rte_eth_dev *dev;
-
- /* This function is only safe when called from the primary process
- * in a multi-process setup*/
- PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -EINVAL;
- }
-
- dev = &rte_eth_devices[port_id];
- if (tx_queue_id >= dev->data->nb_tx_queues) {
- PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
- return -EINVAL;
- }
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
-
- return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
-
-}
-
-int
-rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
-{
- struct rte_eth_dev *dev;
-
- /* This function is only safe when called from the primary process
- * in a multi-process setup*/
- PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -EINVAL;
- }
-
- dev = &rte_eth_devices[port_id];
- if (tx_queue_id >= dev->data->nb_tx_queues) {
- PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
- return -EINVAL;
- }
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
-
- return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
-
-}
-
-static int
-rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
-{
- uint16_t old_nb_queues = dev->data->nb_tx_queues;
- void **txq;
- unsigned i;
-
- if (dev->data->tx_queues == NULL) { /* first time configuration */
- dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
- sizeof(dev->data->tx_queues[0]) * nb_queues,
- RTE_CACHE_LINE_SIZE);
- if (dev->data->tx_queues == NULL) {
- dev->data->nb_tx_queues = 0;
- return -(ENOMEM);
- }
- } else { /* re-configure */
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
-
- txq = dev->data->tx_queues;
-
- for (i = nb_queues; i < old_nb_queues; i++)
- (*dev->dev_ops->tx_queue_release)(txq[i]);
- txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
- RTE_CACHE_LINE_SIZE);
- if (txq == NULL)
- return -(ENOMEM);
-
- if (nb_queues > old_nb_queues)
- memset(txq + old_nb_queues, 0,
- sizeof(txq[0]) * (nb_queues - old_nb_queues));
-
- dev->data->tx_queues = txq;
-
- }
- dev->data->nb_tx_queues = nb_queues;
- return (0);
-}
-
-static int
-rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
- const struct rte_eth_conf *dev_conf)
-{
- struct rte_eth_dev *dev = &rte_eth_devices[port_id];
-
- if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
- /* check multi-queue mode */
- if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ||
- (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
- (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) ||
- (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
- /* SRIOV only works in VMDq enable mode */
- PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
- " SRIOV active, "
- "wrong VMDQ mq_mode rx %u tx %u\n",
- port_id,
- dev_conf->rxmode.mq_mode,
- dev_conf->txmode.mq_mode);
- return (-EINVAL);
- }
-
- switch (dev_conf->rxmode.mq_mode) {
- case ETH_MQ_RX_VMDQ_RSS:
- case ETH_MQ_RX_VMDQ_DCB:
- case ETH_MQ_RX_VMDQ_DCB_RSS:
- /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
- PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
- " SRIOV active, "
- "unsupported VMDQ mq_mode rx %u\n",
- port_id, dev_conf->rxmode.mq_mode);
- return (-EINVAL);
- default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
- /* if nothing mq mode configure, use default scheme */
- dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
- if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
- RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
- break;
- }
-
- switch (dev_conf->txmode.mq_mode) {
- case ETH_MQ_TX_VMDQ_DCB:
- /* DCB VMDQ in SRIOV mode, not implement yet */
- PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
- " SRIOV active, "
- "unsupported VMDQ mq_mode tx %u\n",
- port_id, dev_conf->txmode.mq_mode);
- return (-EINVAL);
- default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
- /* if nothing mq mode configure, use default scheme */
- dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
- if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
- RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
- break;
- }
-
- /* check valid queue number */
- if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
- (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
- PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
- "queue number must less equal to %d\n",
- port_id, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
- return (-EINVAL);
- }
- } else {
- /* For vmdb+dcb mode check our configuration before we go further */
- if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
- const struct rte_eth_vmdq_dcb_conf *conf;
-
- if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
- PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q "
- "!= %d\n",
- port_id, ETH_VMDQ_DCB_NUM_QUEUES);
- return (-EINVAL);
- }
- conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
- if (! (conf->nb_queue_pools == ETH_16_POOLS ||
- conf->nb_queue_pools == ETH_32_POOLS)) {
- PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
- "nb_queue_pools must be %d or %d\n",
- port_id, ETH_16_POOLS, ETH_32_POOLS);
- return (-EINVAL);
- }
- }
- if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
- const struct rte_eth_vmdq_dcb_tx_conf *conf;
-
- if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
- PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q "
- "!= %d\n",
- port_id, ETH_VMDQ_DCB_NUM_QUEUES);
- return (-EINVAL);
- }
- conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf);
- if (! (conf->nb_queue_pools == ETH_16_POOLS ||
- conf->nb_queue_pools == ETH_32_POOLS)) {
- PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
- "nb_queue_pools != %d or nb_queue_pools "
- "!= %d\n",
- port_id, ETH_16_POOLS, ETH_32_POOLS);
- return (-EINVAL);
- }
- }
-
- /* For DCB mode check our configuration before we go further */
- if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
- const struct rte_eth_dcb_rx_conf *conf;
-
- if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
- PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "
- "!= %d\n",
- port_id, ETH_DCB_NUM_QUEUES);
- return (-EINVAL);
- }
- conf = &(dev_conf->rx_adv_conf.dcb_rx_conf);
- if (! (conf->nb_tcs == ETH_4_TCS ||
- conf->nb_tcs == ETH_8_TCS)) {
- PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
- "nb_tcs != %d or nb_tcs "
- "!= %d\n",
- port_id, ETH_4_TCS, ETH_8_TCS);
- return (-EINVAL);
- }
- }
-
- if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
- const struct rte_eth_dcb_tx_conf *conf;
-
- if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
- PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "
- "!= %d\n",
- port_id, ETH_DCB_NUM_QUEUES);
- return (-EINVAL);
- }
- conf = &(dev_conf->tx_adv_conf.dcb_tx_conf);
- if (! (conf->nb_tcs == ETH_4_TCS ||
- conf->nb_tcs == ETH_8_TCS)) {
- PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
- "nb_tcs != %d or nb_tcs "
- "!= %d\n",
- port_id, ETH_4_TCS, ETH_8_TCS);
- return (-EINVAL);
- }
- }
- }
- return 0;
-}
-
-int
-rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
- const struct rte_eth_conf *dev_conf)
-{
- struct rte_eth_dev *dev;
- struct rte_eth_dev_info dev_info;
- int diag;
-
- /* This function is only safe when called from the primary process
- * in a multi-process setup*/
- PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
-
- if (port_id >= nb_ports || port_id >= RTE_MAX_ETHPORTS) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-EINVAL);
- }
- dev = &rte_eth_devices[port_id];
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
-
- if (dev->data->dev_started) {
- PMD_DEBUG_TRACE(
- "port %d must be stopped to allow configuration\n", port_id);
- return (-EBUSY);
- }
-
- /*
- * Check that the numbers of RX and TX queues are not greater
- * than the maximum number of RX and TX queues supported by the
- * configured device.
- */
- (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
- if (nb_rx_q > dev_info.max_rx_queues) {
- PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
- port_id, nb_rx_q, dev_info.max_rx_queues);
- return (-EINVAL);
- }
- if (nb_rx_q == 0) {
- PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
- return (-EINVAL);
- }
-
- if (nb_tx_q > dev_info.max_tx_queues) {
- PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
- port_id, nb_tx_q, dev_info.max_tx_queues);
- return (-EINVAL);
- }
- if (nb_tx_q == 0) {
- PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
- return (-EINVAL);
- }
-
- /* Copy the dev_conf parameter into the dev structure */
- memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
-
- /*
- * If link state interrupt is enabled, check that the
- * device supports it.
- */
- if (dev_conf->intr_conf.lsc == 1) {
- const struct rte_pci_driver *pci_drv = &dev->driver->pci_drv;
-
- if (!(pci_drv->drv_flags & RTE_PCI_DRV_INTR_LSC)) {
- PMD_DEBUG_TRACE("driver %s does not support lsc\n",
- pci_drv->name);
- return (-EINVAL);
- }
- }
-
- /*
- * If jumbo frames are enabled, check that the maximum RX packet
- * length is supported by the configured device.
- */
- if (dev_conf->rxmode.jumbo_frame == 1) {
- if (dev_conf->rxmode.max_rx_pkt_len >
- dev_info.max_rx_pktlen) {
- PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
- " > max valid value %u\n",
- port_id,
- (unsigned)dev_conf->rxmode.max_rx_pkt_len,
- (unsigned)dev_info.max_rx_pktlen);
- return (-EINVAL);
- }
- else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
- PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
- " < min valid value %u\n",
- port_id,
- (unsigned)dev_conf->rxmode.max_rx_pkt_len,
- (unsigned)ETHER_MIN_LEN);
- return (-EINVAL);
- }
- } else {
- if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
- dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
- /* Use default value */
- dev->data->dev_conf.rxmode.max_rx_pkt_len =
- ETHER_MAX_LEN;
- }
-
- /* multipe queue mode checking */
- diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q, dev_conf);
- if (diag != 0) {
- PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode = %d\n",
- port_id, diag);
- return diag;
- }
-
- /*
- * Setup new number of RX/TX queues and reconfigure device.
- */
- diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
- if (diag != 0) {
- PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
- port_id, diag);
- return diag;
- }
-
- diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
- if (diag != 0) {
- PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
- port_id, diag);
- rte_eth_dev_rx_queue_config(dev, 0);
- return diag;
- }
-
- diag = (*dev->dev_ops->dev_configure)(dev);
- if (diag != 0) {
- PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
- port_id, diag);
- rte_eth_dev_rx_queue_config(dev, 0);
- rte_eth_dev_tx_queue_config(dev, 0);
- return diag;
- }
-
- return 0;
-}
-
-static void
-rte_eth_dev_config_restore(uint8_t port_id)
-{
- struct rte_eth_dev *dev;
- struct rte_eth_dev_info dev_info;
- struct ether_addr addr;
- uint16_t i;
- uint32_t pool = 0;
-
- dev = &rte_eth_devices[port_id];
-
- rte_eth_dev_info_get(port_id, &dev_info);
-
- if (RTE_ETH_DEV_SRIOV(dev).active)
- pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
-
- /* replay MAC address configuration */
- for (i = 0; i < dev_info.max_mac_addrs; i++) {
- addr = dev->data->mac_addrs[i];
-
- /* skip zero address */
- if (is_zero_ether_addr(&addr))
- continue;
-
- /* add address to the hardware */
- if (*dev->dev_ops->mac_addr_add &&
- dev->data->mac_pool_sel[i] & (1ULL << pool))
- (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
- else {
- PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
- port_id);
- /* exit the loop but not return an error */
- break;
- }
- }
-
- /* replay promiscuous configuration */
- if (rte_eth_promiscuous_get(port_id) == 1)
- rte_eth_promiscuous_enable(port_id);
- else if (rte_eth_promiscuous_get(port_id) == 0)
- rte_eth_promiscuous_disable(port_id);
-
- /* replay allmulticast configuration */
- if (rte_eth_allmulticast_get(port_id) == 1)
- rte_eth_allmulticast_enable(port_id);
- else if (rte_eth_allmulticast_get(port_id) == 0)
- rte_eth_allmulticast_disable(port_id);
-}
-
-int
-rte_eth_dev_start(uint8_t port_id)
-{
- struct rte_eth_dev *dev;
- int diag;
-
- /* This function is only safe when called from the primary process
- * in a multi-process setup*/
- PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%" PRIu8 "\n", port_id);
- return (-EINVAL);
- }
- dev = &rte_eth_devices[port_id];
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
-
- if (dev->data->dev_started != 0) {
- PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
- " already started\n",
- port_id);
- return (0);
- }
-
- diag = (*dev->dev_ops->dev_start)(dev);
- if (diag == 0)
- dev->data->dev_started = 1;
- else
- return diag;
-
- rte_eth_dev_config_restore(port_id);
-
- return 0;
-}
-
-void
-rte_eth_dev_stop(uint8_t port_id)
-{
- struct rte_eth_dev *dev;
-
- /* This function is only safe when called from the primary process
- * in a multi-process setup*/
- PROC_PRIMARY_OR_RET();
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%" PRIu8 "\n", port_id);
- return;
- }
- dev = &rte_eth_devices[port_id];
-
- FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
-
- if (dev->data->dev_started == 0) {
- PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
- " already stopped\n",
- port_id);
- return;
- }
-
- dev->data->dev_started = 0;
- (*dev->dev_ops->dev_stop)(dev);
-}
-
-int
-rte_eth_dev_set_link_up(uint8_t port_id)
-{
- struct rte_eth_dev *dev;
-
- /* This function is only safe when called from the primary process
- * in a multi-process setup*/
- PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -EINVAL;
- }
- dev = &rte_eth_devices[port_id];
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
- return (*dev->dev_ops->dev_set_link_up)(dev);
-}
-
-int
-rte_eth_dev_set_link_down(uint8_t port_id)
-{
- struct rte_eth_dev *dev;
-
- /* This function is only safe when called from the primary process
- * in a multi-process setup*/
- PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -EINVAL;
- }
- dev = &rte_eth_devices[port_id];
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
- return (*dev->dev_ops->dev_set_link_down)(dev);
-}
-
-void
-rte_eth_dev_close(uint8_t port_id)
-{
- struct rte_eth_dev *dev;
-
- /* This function is only safe when called from the primary process
- * in a multi-process setup*/
- PROC_PRIMARY_OR_RET();
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return;
- }
-
- dev = &rte_eth_devices[port_id];
-
- FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
- dev->data->dev_started = 0;
- (*dev->dev_ops->dev_close)(dev);
-}
-
-int
-rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
- uint16_t nb_rx_desc, unsigned int socket_id,
- const struct rte_eth_rxconf *rx_conf,
- struct rte_mempool *mp)
-{
- int ret;
- uint32_t mbp_buf_size;
- struct rte_eth_dev *dev;
- struct rte_pktmbuf_pool_private *mbp_priv;
- struct rte_eth_dev_info dev_info;
-
- /* This function is only safe when called from the primary process
- * in a multi-process setup*/
- PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-EINVAL);
- }
- dev = &rte_eth_devices[port_id];
- if (rx_queue_id >= dev->data->nb_rx_queues) {
- PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
- return (-EINVAL);
- }
-
- if (dev->data->dev_started) {
- PMD_DEBUG_TRACE(
- "port %d must be stopped to allow configuration\n", port_id);
- return -EBUSY;
- }
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
-
- /*
- * Check the size of the mbuf data buffer.
- * This value must be provided in the private data of the memory pool.
- * First check that the memory pool has a valid private data.
- */
- rte_eth_dev_info_get(port_id, &dev_info);
- if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
- PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
- mp->name, (int) mp->private_data_size,
- (int) sizeof(struct rte_pktmbuf_pool_private));
- return (-ENOSPC);
- }
- mbp_priv = rte_mempool_get_priv(mp);
- mbp_buf_size = mbp_priv->mbuf_data_room_size;
-
- if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
- PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
- "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
- "=%d)\n",
- mp->name,
- (int)mbp_buf_size,
- (int)(RTE_PKTMBUF_HEADROOM +
- dev_info.min_rx_bufsize),
- (int)RTE_PKTMBUF_HEADROOM,
- (int)dev_info.min_rx_bufsize);
- return (-EINVAL);
- }
-
- if (rx_conf == NULL)
- rx_conf = &dev_info.default_rxconf;
-
- ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
- socket_id, rx_conf, mp);
- if (!ret) {
- if (!dev->data->min_rx_buf_size ||
- dev->data->min_rx_buf_size > mbp_buf_size)
- dev->data->min_rx_buf_size = mbp_buf_size;
- }
-
- return ret;
-}
-
-int
-rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
- uint16_t nb_tx_desc, unsigned int socket_id,
- const struct rte_eth_txconf *tx_conf)
-{
- struct rte_eth_dev *dev;
- struct rte_eth_dev_info dev_info;
-
- /* This function is only safe when called from the primary process
- * in a multi-process setup*/
- PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
-
- if (port_id >= RTE_MAX_ETHPORTS || port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-EINVAL);
- }
- dev = &rte_eth_devices[port_id];
- if (tx_queue_id >= dev->data->nb_tx_queues) {
- PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
- return (-EINVAL);
- }
-
- if (dev->data->dev_started) {
- PMD_DEBUG_TRACE(
- "port %d must be stopped to allow configuration\n", port_id);
- return -EBUSY;
- }
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
-
- rte_eth_dev_info_get(port_id, &dev_info);
-
- if (tx_conf == NULL)
- tx_conf = &dev_info.default_txconf;
-
- return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
- socket_id, tx_conf);
-}
-
-void
-rte_eth_promiscuous_enable(uint8_t port_id)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return;
- }
- dev = &rte_eth_devices[port_id];
-
- FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
- (*dev->dev_ops->promiscuous_enable)(dev);
- dev->data->promiscuous = 1;
-}
-
-void
-rte_eth_promiscuous_disable(uint8_t port_id)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return;
- }
- dev = &rte_eth_devices[port_id];
-
- FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
- dev->data->promiscuous = 0;
- (*dev->dev_ops->promiscuous_disable)(dev);
-}
-
-int
-rte_eth_promiscuous_get(uint8_t port_id)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -1;
- }
-
- dev = &rte_eth_devices[port_id];
- return dev->data->promiscuous;
-}
-
-void
-rte_eth_allmulticast_enable(uint8_t port_id)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return;
- }
- dev = &rte_eth_devices[port_id];
-
- FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
- (*dev->dev_ops->allmulticast_enable)(dev);
- dev->data->all_multicast = 1;
-}
-
-void
-rte_eth_allmulticast_disable(uint8_t port_id)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return;
- }
- dev = &rte_eth_devices[port_id];
-
- FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
- dev->data->all_multicast = 0;
- (*dev->dev_ops->allmulticast_disable)(dev);
-}
-
-int
-rte_eth_allmulticast_get(uint8_t port_id)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -1;
- }
-
- dev = &rte_eth_devices[port_id];
- return dev->data->all_multicast;
-}
-
-static inline int
-rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = link;
- struct rte_eth_link *src = &(dev->data->dev_link);
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
-void
-rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return;
- }
- dev = &rte_eth_devices[port_id];
-
- if (dev->data->dev_conf.intr_conf.lsc != 0)
- rte_eth_dev_atomic_read_link_status(dev, eth_link);
- else {
- FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
- (*dev->dev_ops->link_update)(dev, 1);
- *eth_link = dev->data->dev_link;
- }
-}
-
-void
-rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return;
- }
- dev = &rte_eth_devices[port_id];
-
- if (dev->data->dev_conf.intr_conf.lsc != 0)
- rte_eth_dev_atomic_read_link_status(dev, eth_link);
- else {
- FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
- (*dev->dev_ops->link_update)(dev, 0);
- *eth_link = dev->data->dev_link;
- }
-}
-
-void
-rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return;
- }
- dev = &rte_eth_devices[port_id];
- memset(stats, 0, sizeof(*stats));
-
- FUNC_PTR_OR_RET(*dev->dev_ops->stats_get);
- (*dev->dev_ops->stats_get)(dev, stats);
- stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
-}
-
-void
-rte_eth_stats_reset(uint8_t port_id)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return;
- }
- dev = &rte_eth_devices[port_id];
-
- FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
- (*dev->dev_ops->stats_reset)(dev);
-}
-
-/* retrieve ethdev extended statistics */
-int
-rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
- unsigned n)
-{
- struct rte_eth_stats eth_stats;
- struct rte_eth_dev *dev;
- unsigned count, i, q;
- uint64_t val;
- char *stats_ptr;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -1;
- }
- dev = &rte_eth_devices[port_id];
-
- /* implemented by the driver */
- if (dev->dev_ops->xstats_get != NULL)
- return (*dev->dev_ops->xstats_get)(dev, xstats, n);
-
- /* else, return generic statistics */
- count = RTE_NB_STATS;
- count += dev->data->nb_rx_queues * RTE_NB_RXQ_STATS;
- count += dev->data->nb_tx_queues * RTE_NB_TXQ_STATS;
- if (n < count)
- return count;
-
- /* now fill the xstats structure */
-
- count = 0;
- memset(&eth_stats, 0, sizeof(eth_stats));
- rte_eth_stats_get(port_id, &eth_stats);
-
- /* global stats */
- for (i = 0; i < RTE_NB_STATS; i++) {
- stats_ptr = (char *)&eth_stats + rte_stats_strings[i].offset;
- val = *(uint64_t *)stats_ptr;
- snprintf(xstats[count].name, sizeof(xstats[count].name),
- "%s", rte_stats_strings[i].name);
- xstats[count++].value = val;
- }
-
- /* per-rxq stats */
- for (q = 0; q < dev->data->nb_rx_queues; q++) {
- for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
- stats_ptr = (char *)&eth_stats;
- stats_ptr += rte_rxq_stats_strings[i].offset;
- stats_ptr += q * sizeof(uint64_t);
- val = *(uint64_t *)stats_ptr;
- snprintf(xstats[count].name, sizeof(xstats[count].name),
- "rx_queue_%u_%s", q,
- rte_rxq_stats_strings[i].name);
- xstats[count++].value = val;
- }
- }
-
- /* per-txq stats */
- for (q = 0; q < dev->data->nb_tx_queues; q++) {
- for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
- stats_ptr = (char *)&eth_stats;
- stats_ptr += rte_txq_stats_strings[i].offset;
- stats_ptr += q * sizeof(uint64_t);
- val = *(uint64_t *)stats_ptr;
- snprintf(xstats[count].name, sizeof(xstats[count].name),
- "tx_queue_%u_%s", q,
- rte_txq_stats_strings[i].name);
- xstats[count++].value = val;
- }
- }
-
- return count;
-}
-
-/* reset ethdev extended statistics */
-void
-rte_eth_xstats_reset(uint8_t port_id)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return;
- }
- dev = &rte_eth_devices[port_id];
-
- /* implemented by the driver */
- if (dev->dev_ops->xstats_reset != NULL) {
- (*dev->dev_ops->xstats_reset)(dev);
- return;
- }
-
- /* fallback to default */
- rte_eth_stats_reset(port_id);
-}
-
-static int
-set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
- uint8_t is_rx)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -ENODEV;
- }
- dev = &rte_eth_devices[port_id];
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
- return (*dev->dev_ops->queue_stats_mapping_set)
- (dev, queue_id, stat_idx, is_rx);
-}
-
-
-int
-rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
- uint8_t stat_idx)
-{
- return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
- STAT_QMAP_TX);
-}
-
-
-int
-rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
- uint8_t stat_idx)
-{
- return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
- STAT_QMAP_RX);
-}
-
-
-void
-rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return;
- }
- dev = &rte_eth_devices[port_id];
-
- memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
-
- FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
- (*dev->dev_ops->dev_infos_get)(dev, dev_info);
- dev_info->pci_dev = dev->pci_dev;
- if (dev->driver)
- dev_info->driver_name = dev->driver->pci_drv.name;
-}
-
-void
-rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return;
- }
- dev = &rte_eth_devices[port_id];
- ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
-}
-
-
-int
-rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- dev = &rte_eth_devices[port_id];
- *mtu = dev->data->mtu;
- return 0;
-}
-
-int
-rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
-{
- int ret;
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
-
- ret = (*dev->dev_ops->mtu_set)(dev, mtu);
- if (!ret)
- dev->data->mtu = mtu;
-
- return ret;
-}
-
-int
-rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
- dev = &rte_eth_devices[port_id];
- if (! (dev->data->dev_conf.rxmode.hw_vlan_filter)) {
- PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
- return (-ENOSYS);
- }
-
- if (vlan_id > 4095) {
- PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
- port_id, (unsigned) vlan_id);
- return (-EINVAL);
- }
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
- (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
- return (0);
-}
-
-int
-rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- dev = &rte_eth_devices[port_id];
- if (rx_queue_id >= dev->data->nb_rx_queues) {
- PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
- return (-EINVAL);
- }
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
- (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
-
- return (0);
-}
-
-int
-rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
- (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
-
- return (0);
-}
-
-int
-rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
-{
- struct rte_eth_dev *dev;
- int ret = 0;
- int mask = 0;
- int cur, org = 0;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- dev = &rte_eth_devices[port_id];
-
- /*check which option changed by application*/
- cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
- org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
- if (cur != org){
- dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
- mask |= ETH_VLAN_STRIP_MASK;
- }
-
- cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
- org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
- if (cur != org){
- dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
- mask |= ETH_VLAN_FILTER_MASK;
- }
-
- cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
- org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
- if (cur != org){
- dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
- mask |= ETH_VLAN_EXTEND_MASK;
- }
-
- /*no change*/
- if(mask == 0)
- return ret;
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
- (*dev->dev_ops->vlan_offload_set)(dev, mask);
-
- return ret;
-}
-
-int
-rte_eth_dev_get_vlan_offload(uint8_t port_id)
-{
- struct rte_eth_dev *dev;
- int ret = 0;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- dev = &rte_eth_devices[port_id];
-
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
- ret |= ETH_VLAN_STRIP_OFFLOAD ;
-
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
- ret |= ETH_VLAN_FILTER_OFFLOAD ;
-
- if (dev->data->dev_conf.rxmode.hw_vlan_extend)
- ret |= ETH_VLAN_EXTEND_OFFLOAD ;
-
- return ret;
-}
-
-int
-rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
- (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
-
- return 0;
-}
-
-int
-rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
- struct rte_fdir_filter *fdir_filter,
- uint8_t queue)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- dev = &rte_eth_devices[port_id];
-
- if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
- PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
- port_id, dev->data->dev_conf.fdir_conf.mode);
- return (-ENOSYS);
- }
-
- if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
- || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
- && (fdir_filter->port_src || fdir_filter->port_dst)) {
- PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
- "None l4type, source & destinations ports " \
- "should be null!\n");
- return (-EINVAL);
- }
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_signature_filter, -ENOTSUP);
- return (*dev->dev_ops->fdir_add_signature_filter)(dev, fdir_filter,
- queue);
-}
-
-int
-rte_eth_dev_fdir_update_signature_filter(uint8_t port_id,
- struct rte_fdir_filter *fdir_filter,
- uint8_t queue)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- dev = &rte_eth_devices[port_id];
-
- if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
- PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
- port_id, dev->data->dev_conf.fdir_conf.mode);
- return (-ENOSYS);
- }
-
- if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
- || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
- && (fdir_filter->port_src || fdir_filter->port_dst)) {
- PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
- "None l4type, source & destinations ports " \
- "should be null!\n");
- return (-EINVAL);
- }
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_signature_filter, -ENOTSUP);
- return (*dev->dev_ops->fdir_update_signature_filter)(dev, fdir_filter,
- queue);
-
-}
-
-int
-rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id,
- struct rte_fdir_filter *fdir_filter)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- dev = &rte_eth_devices[port_id];
-
- if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
- PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
- port_id, dev->data->dev_conf.fdir_conf.mode);
- return (-ENOSYS);
- }
-
- if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
- || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
- && (fdir_filter->port_src || fdir_filter->port_dst)) {
- PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
- "None l4type source & destinations ports " \
- "should be null!\n");
- return (-EINVAL);
- }
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_signature_filter, -ENOTSUP);
- return (*dev->dev_ops->fdir_remove_signature_filter)(dev, fdir_filter);
-}
-
-int
-rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- dev = &rte_eth_devices[port_id];
- if (! (dev->data->dev_conf.fdir_conf.mode)) {
- PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
- return (-ENOSYS);
- }
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_infos_get, -ENOTSUP);
-
- (*dev->dev_ops->fdir_infos_get)(dev, fdir);
- return (0);
-}
-
-int
-rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id,
- struct rte_fdir_filter *fdir_filter,
- uint16_t soft_id, uint8_t queue,
- uint8_t drop)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- dev = &rte_eth_devices[port_id];
-
- if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
- PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
- port_id, dev->data->dev_conf.fdir_conf.mode);
- return (-ENOSYS);
- }
-
- if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
- || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
- && (fdir_filter->port_src || fdir_filter->port_dst)) {
- PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
- "None l4type, source & destinations ports " \
- "should be null!\n");
- return (-EINVAL);
- }
-
- /* For now IPv6 is not supported with perfect filter */
- //if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
- // return (-ENOTSUP);
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_perfect_filter, -ENOTSUP);
- return (*dev->dev_ops->fdir_add_perfect_filter)(dev, fdir_filter,
- soft_id, queue,
- drop);
-}
-
-int
-rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id,
- struct rte_fdir_filter *fdir_filter,
- uint16_t soft_id, uint8_t queue,
- uint8_t drop)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- dev = &rte_eth_devices[port_id];
-
- if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
- PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
- port_id, dev->data->dev_conf.fdir_conf.mode);
- return (-ENOSYS);
- }
-
- if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
- || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
- && (fdir_filter->port_src || fdir_filter->port_dst)) {
- PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
- "None l4type, source & destinations ports " \
- "should be null!\n");
- return (-EINVAL);
- }
-
- /* For now IPv6 is not supported with perfect filter */
- if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
- return (-ENOTSUP);
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_perfect_filter, -ENOTSUP);
- return (*dev->dev_ops->fdir_update_perfect_filter)(dev, fdir_filter,
- soft_id, queue, drop);
-}
-
-int
-rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
- struct rte_fdir_filter *fdir_filter,
- uint16_t soft_id)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- dev = &rte_eth_devices[port_id];
-
- if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
- PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
- port_id, dev->data->dev_conf.fdir_conf.mode);
- return (-ENOSYS);
- }
-
- if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
- || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
- && (fdir_filter->port_src || fdir_filter->port_dst)) {
- PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
- "None l4type, source & destinations ports " \
- "should be null!\n");
- return (-EINVAL);
- }
-
- /* For now IPv6 is not supported with perfect filter */
- if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
- return (-ENOTSUP);
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_perfect_filter, -ENOTSUP);
- return (*dev->dev_ops->fdir_remove_perfect_filter)(dev, fdir_filter,
- soft_id);
-}
-
-int
-rte_eth_dev_fdir_set_masks(uint8_t port_id, struct rte_fdir_masks *fdir_mask)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- dev = &rte_eth_devices[port_id];
- if (! (dev->data->dev_conf.fdir_conf.mode)) {
- PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
- return (-ENOSYS);
- }
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_set_masks, -ENOTSUP);
- return (*dev->dev_ops->fdir_set_masks)(dev, fdir_mask);
-}
-
-int
-rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
- memset(fc_conf, 0, sizeof(*fc_conf));
- return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
-}
-
-int
-rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
- PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
- return (-EINVAL);
- }
-
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
- return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
-}
-
-int
-rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
- PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
- return (-EINVAL);
- }
-
- dev = &rte_eth_devices[port_id];
- /* High water, low water validation are device specific */
- if (*dev->dev_ops->priority_flow_ctrl_set)
- return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
- return (-ENOTSUP);
-}
-
-static inline int
-rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
- uint16_t reta_size)
-{
- uint16_t i, num;
-
- if (!reta_conf)
- return -EINVAL;
-
- if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
- PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
- RTE_RETA_GROUP_SIZE);
- return -EINVAL;
- }
-
- num = reta_size / RTE_RETA_GROUP_SIZE;
- for (i = 0; i < num; i++) {
- if (reta_conf[i].mask)
- return 0;
- }
-
- return -EINVAL;
-}
-
-static inline int
-rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
- uint16_t reta_size,
- uint8_t max_rxq)
-{
- uint16_t i, idx, shift;
-
- if (!reta_conf)
- return -EINVAL;
-
- if (max_rxq == 0) {
- PMD_DEBUG_TRACE("No receive queue is available\n");
- return -EINVAL;
- }
-
- for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
- if ((reta_conf[idx].mask & (1ULL << shift)) &&
- (reta_conf[idx].reta[shift] >= max_rxq)) {
- PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
- "the maximum rxq index: %u\n", idx, shift,
- reta_conf[idx].reta[shift], max_rxq);
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-int
-rte_eth_dev_rss_reta_update(uint8_t port_id,
- struct rte_eth_rss_reta_entry64 *reta_conf,
- uint16_t reta_size)
-{
- struct rte_eth_dev *dev;
- int ret;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -ENODEV;
- }
-
- /* Check mask bits */
- ret = rte_eth_check_reta_mask(reta_conf, reta_size);
- if (ret < 0)
- return ret;
-
- dev = &rte_eth_devices[port_id];
-
- /* Check entry value */
- ret = rte_eth_check_reta_entry(reta_conf, reta_size,
- dev->data->nb_rx_queues);
- if (ret < 0)
- return ret;
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
- return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
-}
-
-int
-rte_eth_dev_rss_reta_query(uint8_t port_id,
- struct rte_eth_rss_reta_entry64 *reta_conf,
- uint16_t reta_size)
-{
- struct rte_eth_dev *dev;
- int ret;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -ENODEV;
- }
-
- /* Check mask bits */
- ret = rte_eth_check_reta_mask(reta_conf, reta_size);
- if (ret < 0)
- return ret;
-
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
- return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
-}
-
-int
-rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
-{
- struct rte_eth_dev *dev;
- uint16_t rss_hash_protos;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
- rss_hash_protos = rss_conf->rss_hf;
- if ((rss_hash_protos != 0) &&
- ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
- PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
- rss_hash_protos);
- return (-EINVAL);
- }
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
- return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
-}
-
-int
-rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
- struct rte_eth_rss_conf *rss_conf)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
- return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
-}
-
-int
-rte_eth_dev_udp_tunnel_add(uint8_t port_id,
- struct rte_eth_udp_tunnel *udp_tunnel)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -ENODEV;
- }
-
- if (udp_tunnel == NULL) {
- PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
- return -EINVAL;
- }
-
- if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
- PMD_DEBUG_TRACE("Invalid tunnel type\n");
- return -EINVAL;
- }
-
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_add, -ENOTSUP);
- return (*dev->dev_ops->udp_tunnel_add)(dev, udp_tunnel);
-}
-
-int
-rte_eth_dev_udp_tunnel_delete(uint8_t port_id,
- struct rte_eth_udp_tunnel *udp_tunnel)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -ENODEV;
- }
- dev = &rte_eth_devices[port_id];
-
- if (udp_tunnel == NULL) {
- PMD_DEBUG_TRACE("Invalid udp_tunnel parametr\n");
- return -EINVAL;
- }
-
- if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
- PMD_DEBUG_TRACE("Invalid tunnel type\n");
- return -EINVAL;
- }
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_del, -ENOTSUP);
- return (*dev->dev_ops->udp_tunnel_del)(dev, udp_tunnel);
-}
-
-int
-rte_eth_led_on(uint8_t port_id)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
- return ((*dev->dev_ops->dev_led_on)(dev));
-}
-
-int
-rte_eth_led_off(uint8_t port_id)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
- return ((*dev->dev_ops->dev_led_off)(dev));
-}
-
-/*
- * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
- * an empty spot.
- */
-static inline int
-get_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
-{
- struct rte_eth_dev_info dev_info;
- struct rte_eth_dev *dev = &rte_eth_devices[port_id];
- unsigned i;
-
- rte_eth_dev_info_get(port_id, &dev_info);
-
- for (i = 0; i < dev_info.max_mac_addrs; i++)
- if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
- return i;
-
- return -1;
-}
-
-static struct ether_addr null_mac_addr = {{0, 0, 0, 0, 0, 0}};
-
-int
-rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
- uint32_t pool)
-{
- struct rte_eth_dev *dev;
- int index;
- uint64_t pool_mask;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
-
- if (is_zero_ether_addr(addr)) {
- PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
- port_id);
- return (-EINVAL);
- }
- if (pool >= ETH_64_POOLS) {
- PMD_DEBUG_TRACE("pool id must be 0-%d\n",ETH_64_POOLS - 1);
- return (-EINVAL);
- }
-
- index = get_mac_addr_index(port_id, addr);
- if (index < 0) {
- index = get_mac_addr_index(port_id, &null_mac_addr);
- if (index < 0) {
- PMD_DEBUG_TRACE("port %d: MAC address array full\n",
- port_id);
- return (-ENOSPC);
- }
- } else {
- pool_mask = dev->data->mac_pool_sel[index];
-
- /* Check if both MAC address and pool is alread there, and do nothing */
- if (pool_mask & (1ULL << pool))
- return 0;
- }
-
- /* Update NIC */
- (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
-
- /* Update address in NIC data structure */
- ether_addr_copy(addr, &dev->data->mac_addrs[index]);
-
- /* Update pool bitmap in NIC data structure */
- dev->data->mac_pool_sel[index] |= (1ULL << pool);
-
- return 0;
-}
-
-int
-rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
-{
- struct rte_eth_dev *dev;
- int index;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
-
- index = get_mac_addr_index(port_id, addr);
- if (index == 0) {
- PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
- return (-EADDRINUSE);
- } else if (index < 0)
- return 0; /* Do nothing if address wasn't found */
-
- /* Update NIC */
- (*dev->dev_ops->mac_addr_remove)(dev, index);
-
- /* Update address in NIC data structure */
- ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
-
- /* reset pool bitmap */
- dev->data->mac_pool_sel[index] = 0;
-
- return 0;
-}
-
-int
-rte_eth_dev_set_vf_rxmode(uint8_t port_id, uint16_t vf,
- uint16_t rx_mode, uint8_t on)
-{
- uint16_t num_vfs;
- struct rte_eth_dev *dev;
- struct rte_eth_dev_info dev_info;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("set VF RX mode:Invalid port_id=%d\n",
- port_id);
- return (-ENODEV);
- }
-
- dev = &rte_eth_devices[port_id];
- rte_eth_dev_info_get(port_id, &dev_info);
-
- num_vfs = dev_info.max_vfs;
- if (vf > num_vfs)
- {
- PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
- return (-EINVAL);
- }
- if (rx_mode == 0)
- {
- PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
- return (-EINVAL);
- }
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
- return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
-}
-
-/*
- * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
- * an empty spot.
- */
-static inline int
-get_hash_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
-{
- struct rte_eth_dev_info dev_info;
- struct rte_eth_dev *dev = &rte_eth_devices[port_id];
- unsigned i;
-
- rte_eth_dev_info_get(port_id, &dev_info);
- if (!dev->data->hash_mac_addrs)
- return -1;
-
- for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
- if (memcmp(addr, &dev->data->hash_mac_addrs[i],
- ETHER_ADDR_LEN) == 0)
- return i;
-
- return -1;
-}
-
-int
-rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
- uint8_t on)
-{
- int index;
- int ret;
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
- port_id);
- return (-ENODEV);
- }
-
- dev = &rte_eth_devices[port_id];
- if (is_zero_ether_addr(addr)) {
- PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
- port_id);
- return (-EINVAL);
- }
-
- index = get_hash_mac_addr_index(port_id, addr);
- /* Check if it's already there, and do nothing */
- if ((index >= 0) && (on))
- return 0;
-
- if (index < 0) {
- if (!on) {
- PMD_DEBUG_TRACE("port %d: the MAC address was not"
- "set in UTA\n", port_id);
- return (-EINVAL);
- }
-
- index = get_hash_mac_addr_index(port_id, &null_mac_addr);
- if (index < 0) {
- PMD_DEBUG_TRACE("port %d: MAC address array full\n",
- port_id);
- return (-ENOSPC);
- }
- }
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
- ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
- if (ret == 0) {
- /* Update address in NIC data structure */
- if (on)
- ether_addr_copy(addr,
- &dev->data->hash_mac_addrs[index]);
- else
- ether_addr_copy(&null_mac_addr,
- &dev->data->hash_mac_addrs[index]);
- }
-
- return ret;
-}
-
-int
-rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
- port_id);
- return (-ENODEV);
- }
-
- dev = &rte_eth_devices[port_id];
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
- return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
-}
-
-int
-rte_eth_dev_set_vf_rx(uint8_t port_id,uint16_t vf, uint8_t on)
-{
- uint16_t num_vfs;
- struct rte_eth_dev *dev;
- struct rte_eth_dev_info dev_info;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- dev = &rte_eth_devices[port_id];
- rte_eth_dev_info_get(port_id, &dev_info);
-
- num_vfs = dev_info.max_vfs;
- if (vf > num_vfs)
- {
- PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
- return (-EINVAL);
- }
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
- return (*dev->dev_ops->set_vf_rx)(dev, vf,on);
-}
-
-int
-rte_eth_dev_set_vf_tx(uint8_t port_id,uint16_t vf, uint8_t on)
-{
- uint16_t num_vfs;
- struct rte_eth_dev *dev;
- struct rte_eth_dev_info dev_info;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("set pool tx:Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- dev = &rte_eth_devices[port_id];
- rte_eth_dev_info_get(port_id, &dev_info);
-
- num_vfs = dev_info.max_vfs;
- if (vf > num_vfs)
- {
- PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
- return (-EINVAL);
- }
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
- return (*dev->dev_ops->set_vf_tx)(dev, vf,on);
-}
-
-int
-rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
- uint64_t vf_mask,uint8_t vlan_on)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("VF VLAN filter:invalid port id=%d\n",
- port_id);
- return (-ENODEV);
- }
- dev = &rte_eth_devices[port_id];
-
- if(vlan_id > ETHER_MAX_VLAN_ID)
- {
- PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
- vlan_id);
- return (-EINVAL);
- }
- if (vf_mask == 0)
- {
- PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
- return (-EINVAL);
- }
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
- return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
- vf_mask,vlan_on);
-}
-
-int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
- uint16_t tx_rate)
-{
- struct rte_eth_dev *dev;
- struct rte_eth_dev_info dev_info;
- struct rte_eth_link link;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("set queue rate limit:invalid port id=%d\n",
- port_id);
- return -ENODEV;
- }
-
- dev = &rte_eth_devices[port_id];
- rte_eth_dev_info_get(port_id, &dev_info);
- link = dev->data->dev_link;
-
- if (queue_idx > dev_info.max_tx_queues) {
- PMD_DEBUG_TRACE("set queue rate limit:port %d: "
- "invalid queue id=%d\n", port_id, queue_idx);
- return -EINVAL;
- }
-
- if (tx_rate > link.link_speed) {
- PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
- "bigger than link speed= %d\n",
- tx_rate, link.link_speed);
- return -EINVAL;
- }
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
- return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
-}
-
-int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
- uint64_t q_msk)
-{
- struct rte_eth_dev *dev;
- struct rte_eth_dev_info dev_info;
- struct rte_eth_link link;
-
- if (q_msk == 0)
- return 0;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("set VF rate limit:invalid port id=%d\n",
- port_id);
- return -ENODEV;
- }
-
- dev = &rte_eth_devices[port_id];
- rte_eth_dev_info_get(port_id, &dev_info);
- link = dev->data->dev_link;
-
- if (vf > dev_info.max_vfs) {
- PMD_DEBUG_TRACE("set VF rate limit:port %d: "
- "invalid vf id=%d\n", port_id, vf);
- return -EINVAL;
- }
-
- if (tx_rate > link.link_speed) {
- PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
- "bigger than link speed= %d\n",
- tx_rate, link.link_speed);
- return -EINVAL;
- }
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
- return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
-}
-
-int
-rte_eth_mirror_rule_set(uint8_t port_id,
- struct rte_eth_vmdq_mirror_conf *mirror_conf,
- uint8_t rule_id, uint8_t on)
-{
- struct rte_eth_dev *dev = &rte_eth_devices[port_id];
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- if (mirror_conf->rule_type_mask == 0) {
- PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
- return (-EINVAL);
- }
-
- if (mirror_conf->dst_pool >= ETH_64_POOLS) {
- PMD_DEBUG_TRACE("Invalid dst pool, pool id must"
- "be 0-%d\n",ETH_64_POOLS - 1);
- return (-EINVAL);
- }
-
- if ((mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) &&
- (mirror_conf->pool_mask == 0)) {
- PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not"
- "be 0.\n");
- return (-EINVAL);
- }
-
- if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
- {
- PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
- ETH_VMDQ_NUM_MIRROR_RULE - 1);
- return (-EINVAL);
- }
-
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
-
- return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
-}
-
-int
-rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
-{
- struct rte_eth_dev *dev = &rte_eth_devices[port_id];
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
- {
- PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
- ETH_VMDQ_NUM_MIRROR_RULE-1);
- return (-EINVAL);
- }
-
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
-
- return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
-}
-
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
-uint16_t
-rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
- struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return 0;
- }
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
- if (queue_id >= dev->data->nb_rx_queues) {
- PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
- return 0;
- }
- return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
- rx_pkts, nb_pkts);
-}
-
-uint16_t
-rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
- struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return 0;
- }
- dev = &rte_eth_devices[port_id];
-
- FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
- if (queue_id >= dev->data->nb_tx_queues) {
- PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
- return 0;
- }
- return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
- tx_pkts, nb_pkts);
-}
-
-uint32_t
-rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return 0;
- }
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, 0);
- return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
-}
-
-int
-rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
- return (*dev->dev_ops->rx_descriptor_done)( \
- dev->data->rx_queues[queue_id], offset);
-}
-#endif
-
-int
-rte_eth_dev_callback_register(uint8_t port_id,
- enum rte_eth_event_type event,
- rte_eth_dev_cb_fn cb_fn, void *cb_arg)
-{
- struct rte_eth_dev *dev;
- struct rte_eth_dev_callback *user_cb;
-
- if (!cb_fn)
- return (-EINVAL);
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-EINVAL);
- }
-
- dev = &rte_eth_devices[port_id];
- rte_spinlock_lock(&rte_eth_dev_cb_lock);
-
- TAILQ_FOREACH(user_cb, &(dev->callbacks), next) {
- if (user_cb->cb_fn == cb_fn &&
- user_cb->cb_arg == cb_arg &&
- user_cb->event == event) {
- break;
- }
- }
-
- /* create a new callback. */
- if (user_cb == NULL && (user_cb = rte_zmalloc("INTR_USER_CALLBACK",
- sizeof(struct rte_eth_dev_callback), 0)) != NULL) {
- user_cb->cb_fn = cb_fn;
- user_cb->cb_arg = cb_arg;
- user_cb->event = event;
- TAILQ_INSERT_TAIL(&(dev->callbacks), user_cb, next);
- }
-
- rte_spinlock_unlock(&rte_eth_dev_cb_lock);
- return ((user_cb == NULL) ? -ENOMEM : 0);
-}
-
-int
-rte_eth_dev_callback_unregister(uint8_t port_id,
- enum rte_eth_event_type event,
- rte_eth_dev_cb_fn cb_fn, void *cb_arg)
-{
- int ret;
- struct rte_eth_dev *dev;
- struct rte_eth_dev_callback *cb, *next;
-
- if (!cb_fn)
- return (-EINVAL);
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-EINVAL);
- }
-
- dev = &rte_eth_devices[port_id];
- rte_spinlock_lock(&rte_eth_dev_cb_lock);
-
- ret = 0;
- for (cb = TAILQ_FIRST(&dev->callbacks); cb != NULL; cb = next) {
-
- next = TAILQ_NEXT(cb, next);
-
- if (cb->cb_fn != cb_fn || cb->event != event ||
- (cb->cb_arg != (void *)-1 &&
- cb->cb_arg != cb_arg))
- continue;
-
- /*
- * if this callback is not executing right now,
- * then remove it.
- */
- if (cb->active == 0) {
- TAILQ_REMOVE(&(dev->callbacks), cb, next);
- rte_free(cb);
- } else {
- ret = -EAGAIN;
- }
- }
-
- rte_spinlock_unlock(&rte_eth_dev_cb_lock);
- return (ret);
-}
-
-void
-_rte_eth_dev_callback_process(struct rte_eth_dev *dev,
- enum rte_eth_event_type event)
-{
- struct rte_eth_dev_callback *cb_lst;
- struct rte_eth_dev_callback dev_cb;
-
- rte_spinlock_lock(&rte_eth_dev_cb_lock);
- TAILQ_FOREACH(cb_lst, &(dev->callbacks), next) {
- if (cb_lst->cb_fn == NULL || cb_lst->event != event)
- continue;
- dev_cb = *cb_lst;
- cb_lst->active = 1;
- rte_spinlock_unlock(&rte_eth_dev_cb_lock);
- dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
- dev_cb.cb_arg);
- rte_spinlock_lock(&rte_eth_dev_cb_lock);
- cb_lst->active = 0;
- }
- rte_spinlock_unlock(&rte_eth_dev_cb_lock);
-}
-#ifdef RTE_NIC_BYPASS
-int rte_eth_dev_bypass_init(uint8_t port_id)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- if ((dev= &rte_eth_devices[port_id]) == NULL) {
- PMD_DEBUG_TRACE("Invalid port device\n");
- return (-ENODEV);
- }
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
- (*dev->dev_ops->bypass_init)(dev);
- return 0;
-}
-
-int
-rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- if ((dev= &rte_eth_devices[port_id]) == NULL) {
- PMD_DEBUG_TRACE("Invalid port device\n");
- return (-ENODEV);
- }
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
- (*dev->dev_ops->bypass_state_show)(dev, state);
- return 0;
-}
-
-int
-rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- if ((dev= &rte_eth_devices[port_id]) == NULL) {
- PMD_DEBUG_TRACE("Invalid port device\n");
- return (-ENODEV);
- }
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
- (*dev->dev_ops->bypass_state_set)(dev, new_state);
- return 0;
-}
-
-int
-rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- if ((dev= &rte_eth_devices[port_id]) == NULL) {
- PMD_DEBUG_TRACE("Invalid port device\n");
- return (-ENODEV);
- }
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
- (*dev->dev_ops->bypass_event_show)(dev, event, state);
- return 0;
-}
-
-int
-rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- if ((dev= &rte_eth_devices[port_id]) == NULL) {
- PMD_DEBUG_TRACE("Invalid port device\n");
- return (-ENODEV);
- }
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
- (*dev->dev_ops->bypass_event_set)(dev, event, state);
- return 0;
-}
-
-int
-rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- if ((dev= &rte_eth_devices[port_id]) == NULL) {
- PMD_DEBUG_TRACE("Invalid port device\n");
- return (-ENODEV);
- }
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
- (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
- return 0;
-}
-
-int
-rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- if ((dev= &rte_eth_devices[port_id]) == NULL) {
- PMD_DEBUG_TRACE("Invalid port device\n");
- return (-ENODEV);
- }
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
- (*dev->dev_ops->bypass_ver_show)(dev, ver);
- return 0;
-}
-
-int
-rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- if ((dev= &rte_eth_devices[port_id]) == NULL) {
- PMD_DEBUG_TRACE("Invalid port device\n");
- return (-ENODEV);
- }
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
- (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
- return 0;
-}
-
-int
-rte_eth_dev_bypass_wd_reset(uint8_t port_id)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return (-ENODEV);
- }
-
- if ((dev= &rte_eth_devices[port_id]) == NULL) {
- PMD_DEBUG_TRACE("Invalid port device\n");
- return (-ENODEV);
- }
-
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
- (*dev->dev_ops->bypass_wd_reset)(dev);
- return 0;
-}
-#endif
-
-int
-rte_eth_dev_add_syn_filter(uint8_t port_id,
- struct rte_syn_filter *filter, uint16_t rx_queue)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -ENODEV;
- }
-
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->add_syn_filter, -ENOTSUP);
- return (*dev->dev_ops->add_syn_filter)(dev, filter, rx_queue);
-}
-
-int
-rte_eth_dev_remove_syn_filter(uint8_t port_id)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -ENODEV;
- }
-
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->remove_syn_filter, -ENOTSUP);
- return (*dev->dev_ops->remove_syn_filter)(dev);
-}
-
-int
-rte_eth_dev_get_syn_filter(uint8_t port_id,
- struct rte_syn_filter *filter, uint16_t *rx_queue)
-{
- struct rte_eth_dev *dev;
-
- if (filter == NULL || rx_queue == NULL)
- return -EINVAL;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -ENODEV;
- }
-
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_syn_filter, -ENOTSUP);
- return (*dev->dev_ops->get_syn_filter)(dev, filter, rx_queue);
-}
-
-int
-rte_eth_dev_add_ethertype_filter(uint8_t port_id, uint16_t index,
- struct rte_ethertype_filter *filter, uint16_t rx_queue)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -ENODEV;
- }
- if (filter->ethertype == ETHER_TYPE_IPv4 ||
- filter->ethertype == ETHER_TYPE_IPv6){
- PMD_DEBUG_TRACE("IP and IPv6 are not supported"
- " in ethertype filter\n");
- return -EINVAL;
- }
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->add_ethertype_filter, -ENOTSUP);
- return (*dev->dev_ops->add_ethertype_filter)(dev, index,
- filter, rx_queue);
-}
-
-int
-rte_eth_dev_remove_ethertype_filter(uint8_t port_id, uint16_t index)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -ENODEV;
- }
-
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->remove_ethertype_filter, -ENOTSUP);
- return (*dev->dev_ops->remove_ethertype_filter)(dev, index);
-}
-
-int
-rte_eth_dev_get_ethertype_filter(uint8_t port_id, uint16_t index,
- struct rte_ethertype_filter *filter, uint16_t *rx_queue)
-{
- struct rte_eth_dev *dev;
-
- if (filter == NULL || rx_queue == NULL)
- return -EINVAL;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -ENODEV;
- }
-
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_ethertype_filter, -ENOTSUP);
- return (*dev->dev_ops->get_ethertype_filter)(dev, index,
- filter, rx_queue);
-}
-
-int
-rte_eth_dev_add_2tuple_filter(uint8_t port_id, uint16_t index,
- struct rte_2tuple_filter *filter, uint16_t rx_queue)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -ENODEV;
- }
- if (filter->protocol != IPPROTO_TCP &&
- filter->tcp_flags != 0){
- PMD_DEBUG_TRACE("tcp flags is 0x%x, but the protocol value"
- " is not TCP\n",
- filter->tcp_flags);
- return -EINVAL;
- }
-
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->add_2tuple_filter, -ENOTSUP);
- return (*dev->dev_ops->add_2tuple_filter)(dev, index, filter, rx_queue);
-}
-
-int
-rte_eth_dev_remove_2tuple_filter(uint8_t port_id, uint16_t index)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -ENODEV;
- }
-
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->remove_2tuple_filter, -ENOTSUP);
- return (*dev->dev_ops->remove_2tuple_filter)(dev, index);
-}
-
-int
-rte_eth_dev_get_2tuple_filter(uint8_t port_id, uint16_t index,
- struct rte_2tuple_filter *filter, uint16_t *rx_queue)
-{
- struct rte_eth_dev *dev;
-
- if (filter == NULL || rx_queue == NULL)
- return -EINVAL;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -ENODEV;
- }
-
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_2tuple_filter, -ENOTSUP);
- return (*dev->dev_ops->get_2tuple_filter)(dev, index, filter, rx_queue);
-}
-
-int
-rte_eth_dev_add_5tuple_filter(uint8_t port_id, uint16_t index,
- struct rte_5tuple_filter *filter, uint16_t rx_queue)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -ENODEV;
- }
-
- if (filter->protocol != IPPROTO_TCP &&
- filter->tcp_flags != 0){
- PMD_DEBUG_TRACE("tcp flags is 0x%x, but the protocol value"
- " is not TCP\n",
- filter->tcp_flags);
- return -EINVAL;
- }
-
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->add_5tuple_filter, -ENOTSUP);
- return (*dev->dev_ops->add_5tuple_filter)(dev, index, filter, rx_queue);
-}
-
-int
-rte_eth_dev_remove_5tuple_filter(uint8_t port_id, uint16_t index)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -ENODEV;
- }
-
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->remove_5tuple_filter, -ENOTSUP);
- return (*dev->dev_ops->remove_5tuple_filter)(dev, index);
-}
-
-int
-rte_eth_dev_get_5tuple_filter(uint8_t port_id, uint16_t index,
- struct rte_5tuple_filter *filter, uint16_t *rx_queue)
-{
- struct rte_eth_dev *dev;
-
- if (filter == NULL || rx_queue == NULL)
- return -EINVAL;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -ENODEV;
- }
-
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_5tuple_filter, -ENOTSUP);
- return (*dev->dev_ops->get_5tuple_filter)(dev, index, filter,
- rx_queue);
-}
-
-int
-rte_eth_dev_add_flex_filter(uint8_t port_id, uint16_t index,
- struct rte_flex_filter *filter, uint16_t rx_queue)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -ENODEV;
- }
-
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->add_flex_filter, -ENOTSUP);
- return (*dev->dev_ops->add_flex_filter)(dev, index, filter, rx_queue);
-}
-
-int
-rte_eth_dev_remove_flex_filter(uint8_t port_id, uint16_t index)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -ENODEV;
- }
-
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->remove_flex_filter, -ENOTSUP);
- return (*dev->dev_ops->remove_flex_filter)(dev, index);
-}
-
-int
-rte_eth_dev_get_flex_filter(uint8_t port_id, uint16_t index,
- struct rte_flex_filter *filter, uint16_t *rx_queue)
-{
- struct rte_eth_dev *dev;
-
- if (filter == NULL || rx_queue == NULL)
- return -EINVAL;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -ENODEV;
- }
-
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_flex_filter, -ENOTSUP);
- return (*dev->dev_ops->get_flex_filter)(dev, index, filter,
- rx_queue);
-}
-
-int
-rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -ENODEV;
- }
-
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
- return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
- RTE_ETH_FILTER_NOP, NULL);
-}
-
-int
-rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
- enum rte_filter_op filter_op, void *arg)
-{
- struct rte_eth_dev *dev;
-
- if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -ENODEV;
- }
-
- dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
- return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
-}
diff --git a/src/dpdk_lib18/librte_hash/Makefile b/src/dpdk_lib18/librte_hash/Makefile
deleted file mode 100755
index 95e4c09c..00000000
--- a/src/dpdk_lib18/librte_hash/Makefile
+++ /dev/null
@@ -1,53 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_hash.a
-
-CFLAGS += -O3
-CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
-
-# all source are stored in SRCS-y
-SRCS-$(CONFIG_RTE_LIBRTE_HASH) := rte_hash.c
-SRCS-$(CONFIG_RTE_LIBRTE_HASH) += rte_fbk_hash.c
-
-# install this header file
-SYMLINK-$(CONFIG_RTE_LIBRTE_HASH)-include := rte_hash.h
-SYMLINK-$(CONFIG_RTE_LIBRTE_HASH)-include += rte_hash_crc.h
-SYMLINK-$(CONFIG_RTE_LIBRTE_HASH)-include += rte_jhash.h
-SYMLINK-$(CONFIG_RTE_LIBRTE_HASH)-include += rte_fbk_hash.h
-
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_HASH) += lib/librte_eal lib/librte_malloc
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_hash/rte_fbk_hash.c b/src/dpdk_lib18/librte_hash/rte_fbk_hash.c
deleted file mode 100755
index 421e1cdd..00000000
--- a/src/dpdk_lib18/librte_hash/rte_fbk_hash.c
+++ /dev/null
@@ -1,240 +0,0 @@
-/**
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdint.h>
-#include <stdio.h>
-#include <stdarg.h>
-#include <string.h>
-#include <errno.h>
-
-#include <sys/queue.h>
-#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_tailq.h>
-#include <rte_eal.h>
-#include <rte_eal_memconfig.h>
-#include <rte_malloc.h>
-#include <rte_common.h>
-#include <rte_per_lcore.h>
-#include <rte_errno.h>
-#include <rte_string_fns.h>
-#include <rte_cpuflags.h>
-#include <rte_log.h>
-#include <rte_spinlock.h>
-
-#include "rte_fbk_hash.h"
-
-TAILQ_HEAD(rte_fbk_hash_list, rte_tailq_entry);
-
-/**
- * Performs a lookup for an existing hash table, and returns a pointer to
- * the table if found.
- *
- * @param name
- * Name of the hash table to find
- *
- * @return
- * pointer to hash table structure or NULL on error.
- */
-struct rte_fbk_hash_table *
-rte_fbk_hash_find_existing(const char *name)
-{
- struct rte_fbk_hash_table *h = NULL;
- struct rte_tailq_entry *te;
- struct rte_fbk_hash_list *fbk_hash_list;
-
- /* check that we have an initialised tail queue */
- if ((fbk_hash_list =
- RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_FBK_HASH,
- rte_fbk_hash_list)) == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return NULL;
- }
-
- rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
- TAILQ_FOREACH(te, fbk_hash_list, next) {
- h = (struct rte_fbk_hash_table *) te->data;
- if (strncmp(name, h->name, RTE_FBK_HASH_NAMESIZE) == 0)
- break;
- }
- rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
- if (te == NULL) {
- rte_errno = ENOENT;
- return NULL;
- }
- return h;
-}
-
-/**
- * Create a new hash table for use with four byte keys.
- *
- * @param params
- * Parameters used in creation of hash table.
- *
- * @return
- * Pointer to hash table structure that is used in future hash table
- * operations, or NULL on error.
- */
-struct rte_fbk_hash_table *
-rte_fbk_hash_create(const struct rte_fbk_hash_params *params)
-{
- struct rte_fbk_hash_table *ht = NULL;
- struct rte_tailq_entry *te;
- char hash_name[RTE_FBK_HASH_NAMESIZE];
- const uint32_t mem_size =
- sizeof(*ht) + (sizeof(ht->t[0]) * params->entries);
- uint32_t i;
- struct rte_fbk_hash_list *fbk_hash_list;
-
- /* check that we have an initialised tail queue */
- if ((fbk_hash_list =
- RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_FBK_HASH,
- rte_fbk_hash_list)) == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return NULL;
- }
-
- /* Error checking of parameters. */
- if ((!rte_is_power_of_2(params->entries)) ||
- (!rte_is_power_of_2(params->entries_per_bucket)) ||
- (params->entries == 0) ||
- (params->entries_per_bucket == 0) ||
- (params->entries_per_bucket > params->entries) ||
- (params->entries > RTE_FBK_HASH_ENTRIES_MAX) ||
- (params->entries_per_bucket > RTE_FBK_HASH_ENTRIES_PER_BUCKET_MAX)){
- rte_errno = EINVAL;
- return NULL;
- }
-
- snprintf(hash_name, sizeof(hash_name), "FBK_%s", params->name);
-
- rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
-
- /* guarantee there's no existing */
- TAILQ_FOREACH(te, fbk_hash_list, next) {
- ht = (struct rte_fbk_hash_table *) te->data;
- if (strncmp(params->name, ht->name, RTE_FBK_HASH_NAMESIZE) == 0)
- break;
- }
- if (te != NULL)
- goto exit;
-
- te = rte_zmalloc("FBK_HASH_TAILQ_ENTRY", sizeof(*te), 0);
- if (te == NULL) {
- RTE_LOG(ERR, HASH, "Failed to allocate tailq entry\n");
- goto exit;
- }
-
- /* Allocate memory for table. */
- ht = (struct rte_fbk_hash_table *)rte_zmalloc_socket(hash_name, mem_size,
- 0, params->socket_id);
- if (ht == NULL) {
- RTE_LOG(ERR, HASH, "Failed to allocate fbk hash table\n");
- rte_free(te);
- goto exit;
- }
-
- /* Set up hash table context. */
- snprintf(ht->name, sizeof(ht->name), "%s", params->name);
- ht->entries = params->entries;
- ht->entries_per_bucket = params->entries_per_bucket;
- ht->used_entries = 0;
- ht->bucket_mask = (params->entries / params->entries_per_bucket) - 1;
- for (ht->bucket_shift = 0, i = 1;
- (params->entries_per_bucket & i) == 0;
- ht->bucket_shift++, i <<= 1)
- ; /* empty loop body */
-
- if (params->hash_func != NULL) {
- ht->hash_func = params->hash_func;
- ht->init_val = params->init_val;
- }
- else {
- ht->hash_func = RTE_FBK_HASH_FUNC_DEFAULT;
- ht->init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT;
- }
-
- te->data = (void *) ht;
-
- TAILQ_INSERT_TAIL(fbk_hash_list, te, next);
-
-exit:
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
-
- return ht;
-}
-
-/**
- * Free all memory used by a hash table.
- *
- * @param ht
- * Hash table to deallocate.
- */
-void
-rte_fbk_hash_free(struct rte_fbk_hash_table *ht)
-{
- struct rte_tailq_entry *te;
- struct rte_fbk_hash_list *fbk_hash_list;
-
- if (ht == NULL)
- return;
-
- /* check that we have an initialised tail queue */
- if ((fbk_hash_list =
- RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_FBK_HASH,
- rte_fbk_hash_list)) == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return;
- }
-
- rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
-
- /* find out tailq entry */
- TAILQ_FOREACH(te, fbk_hash_list, next) {
- if (te->data == (void *) ht)
- break;
- }
-
- if (te == NULL) {
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
- return;
- }
-
- TAILQ_REMOVE(fbk_hash_list, te, next);
-
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
-
- rte_free(ht);
- rte_free(te);
-}
-
diff --git a/src/dpdk_lib18/librte_hash/rte_hash.c b/src/dpdk_lib18/librte_hash/rte_hash.c
deleted file mode 100755
index ba827d25..00000000
--- a/src/dpdk_lib18/librte_hash/rte_hash.c
+++ /dev/null
@@ -1,483 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <string.h>
-#include <stdint.h>
-#include <errno.h>
-#include <stdio.h>
-#include <stdarg.h>
-#include <sys/queue.h>
-
-#include <rte_common.h>
-#include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
-#include <rte_log.h>
-#include <rte_memcpy.h>
-#include <rte_prefetch.h>
-#include <rte_branch_prediction.h>
-#include <rte_memzone.h>
-#include <rte_malloc.h>
-#include <rte_tailq.h>
-#include <rte_eal.h>
-#include <rte_eal_memconfig.h>
-#include <rte_per_lcore.h>
-#include <rte_errno.h>
-#include <rte_string_fns.h>
-#include <rte_cpuflags.h>
-#include <rte_log.h>
-#include <rte_rwlock.h>
-#include <rte_spinlock.h>
-
-#include "rte_hash.h"
-
-
-TAILQ_HEAD(rte_hash_list, rte_tailq_entry);
-
-/* Macro to enable/disable run-time checking of function parameters */
-#if defined(RTE_LIBRTE_HASH_DEBUG)
-#define RETURN_IF_TRUE(cond, retval) do { \
- if (cond) return (retval); \
-} while (0)
-#else
-#define RETURN_IF_TRUE(cond, retval)
-#endif
-
-/* Hash function used if none is specified */
-#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
-#include <rte_hash_crc.h>
-#define DEFAULT_HASH_FUNC rte_hash_crc
-#else
-#include <rte_jhash.h>
-#define DEFAULT_HASH_FUNC rte_jhash
-#endif
-
-/* Signature bucket size is a multiple of this value */
-#define SIG_BUCKET_ALIGNMENT 16
-
-/* Stoered key size is a multiple of this value */
-#define KEY_ALIGNMENT 16
-
-/* The high bit is always set in real signatures */
-#define NULL_SIGNATURE 0
-
-/* Returns a pointer to the first signature in specified bucket. */
-static inline hash_sig_t *
-get_sig_tbl_bucket(const struct rte_hash *h, uint32_t bucket_index)
-{
- return (hash_sig_t *)
- &(h->sig_tbl[bucket_index * h->sig_tbl_bucket_size]);
-}
-
-/* Returns a pointer to the first key in specified bucket. */
-static inline uint8_t *
-get_key_tbl_bucket(const struct rte_hash *h, uint32_t bucket_index)
-{
- return (uint8_t *) &(h->key_tbl[bucket_index * h->bucket_entries *
- h->key_tbl_key_size]);
-}
-
-/* Returns a pointer to a key at a specific position in a specified bucket. */
-static inline void *
-get_key_from_bucket(const struct rte_hash *h, uint8_t *bkt, uint32_t pos)
-{
- return (void *) &bkt[pos * h->key_tbl_key_size];
-}
-
-/* Does integer division with rounding-up of result. */
-static inline uint32_t
-div_roundup(uint32_t numerator, uint32_t denominator)
-{
- return (numerator + denominator - 1) / denominator;
-}
-
-/* Increases a size (if needed) to a multiple of alignment. */
-static inline uint32_t
-align_size(uint32_t val, uint32_t alignment)
-{
- return alignment * div_roundup(val, alignment);
-}
-
-/* Returns the index into the bucket of the first occurrence of a signature. */
-static inline int
-find_first(uint32_t sig, const uint32_t *sig_bucket, uint32_t num_sigs)
-{
- uint32_t i;
- for (i = 0; i < num_sigs; i++) {
- if (sig == sig_bucket[i])
- return i;
- }
- return -1;
-}
-
-struct rte_hash *
-rte_hash_find_existing(const char *name)
-{
- struct rte_hash *h = NULL;
- struct rte_tailq_entry *te;
- struct rte_hash_list *hash_list;
-
- /* check that we have an initialised tail queue */
- if ((hash_list =
- RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_HASH, rte_hash_list)) == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return NULL;
- }
-
- rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
- TAILQ_FOREACH(te, hash_list, next) {
- h = (struct rte_hash *) te->data;
- if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
- break;
- }
- rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
-
- if (te == NULL) {
- rte_errno = ENOENT;
- return NULL;
- }
- return h;
-}
-
-struct rte_hash *
-rte_hash_create(const struct rte_hash_parameters *params)
-{
- struct rte_hash *h = NULL;
- struct rte_tailq_entry *te;
- uint32_t num_buckets, sig_bucket_size, key_size,
- hash_tbl_size, sig_tbl_size, key_tbl_size, mem_size;
- char hash_name[RTE_HASH_NAMESIZE];
- struct rte_hash_list *hash_list;
-
- /* check that we have an initialised tail queue */
- if ((hash_list =
- RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_HASH, rte_hash_list)) == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return NULL;
- }
-
- /* Check for valid parameters */
- if ((params == NULL) ||
- (params->entries > RTE_HASH_ENTRIES_MAX) ||
- (params->bucket_entries > RTE_HASH_BUCKET_ENTRIES_MAX) ||
- (params->entries < params->bucket_entries) ||
- !rte_is_power_of_2(params->entries) ||
- !rte_is_power_of_2(params->bucket_entries) ||
- (params->key_len == 0) ||
- (params->key_len > RTE_HASH_KEY_LENGTH_MAX)) {
- rte_errno = EINVAL;
- RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
- return NULL;
- }
-
- snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);
-
- /* Calculate hash dimensions */
- num_buckets = params->entries / params->bucket_entries;
- sig_bucket_size = align_size(params->bucket_entries *
- sizeof(hash_sig_t), SIG_BUCKET_ALIGNMENT);
- key_size = align_size(params->key_len, KEY_ALIGNMENT);
-
- hash_tbl_size = align_size(sizeof(struct rte_hash), RTE_CACHE_LINE_SIZE);
- sig_tbl_size = align_size(num_buckets * sig_bucket_size,
- RTE_CACHE_LINE_SIZE);
- key_tbl_size = align_size(num_buckets * key_size *
- params->bucket_entries, RTE_CACHE_LINE_SIZE);
-
- /* Total memory required for hash context */
- mem_size = hash_tbl_size + sig_tbl_size + key_tbl_size;
-
- rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
-
- /* guarantee there's no existing */
- TAILQ_FOREACH(te, hash_list, next) {
- h = (struct rte_hash *) te->data;
- if (strncmp(params->name, h->name, RTE_HASH_NAMESIZE) == 0)
- break;
- }
- if (te != NULL)
- goto exit;
-
- te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0);
- if (te == NULL) {
- RTE_LOG(ERR, HASH, "tailq entry allocation failed\n");
- goto exit;
- }
-
- h = (struct rte_hash *)rte_zmalloc_socket(hash_name, mem_size,
- RTE_CACHE_LINE_SIZE, params->socket_id);
- if (h == NULL) {
- RTE_LOG(ERR, HASH, "memory allocation failed\n");
- rte_free(te);
- goto exit;
- }
-
- /* Setup hash context */
- snprintf(h->name, sizeof(h->name), "%s", params->name);
- h->entries = params->entries;
- h->bucket_entries = params->bucket_entries;
- h->key_len = params->key_len;
- h->hash_func_init_val = params->hash_func_init_val;
- h->num_buckets = num_buckets;
- h->bucket_bitmask = h->num_buckets - 1;
- h->sig_msb = 1 << (sizeof(hash_sig_t) * 8 - 1);
- h->sig_tbl = (uint8_t *)h + hash_tbl_size;
- h->sig_tbl_bucket_size = sig_bucket_size;
- h->key_tbl = h->sig_tbl + sig_tbl_size;
- h->key_tbl_key_size = key_size;
- h->hash_func = (params->hash_func == NULL) ?
- DEFAULT_HASH_FUNC : params->hash_func;
-
- te->data = (void *) h;
-
- TAILQ_INSERT_TAIL(hash_list, te, next);
-
-exit:
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
-
- return h;
-}
-
-void
-rte_hash_free(struct rte_hash *h)
-{
- struct rte_tailq_entry *te;
- struct rte_hash_list *hash_list;
-
- if (h == NULL)
- return;
-
- /* check that we have an initialised tail queue */
- if ((hash_list =
- RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_HASH, rte_hash_list)) == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return;
- }
-
- rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
-
- /* find out tailq entry */
- TAILQ_FOREACH(te, hash_list, next) {
- if (te->data == (void *) h)
- break;
- }
-
- if (te == NULL) {
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
- return;
- }
-
- TAILQ_REMOVE(hash_list, te, next);
-
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
-
- rte_free(h);
- rte_free(te);
-}
-
-static inline int32_t
-__rte_hash_add_key_with_hash(const struct rte_hash *h,
- const void *key, hash_sig_t sig)
-{
- hash_sig_t *sig_bucket;
- uint8_t *key_bucket;
- uint32_t bucket_index, i;
- int32_t pos;
-
- /* Get the hash signature and bucket index */
- sig |= h->sig_msb;
- bucket_index = sig & h->bucket_bitmask;
- sig_bucket = get_sig_tbl_bucket(h, bucket_index);
- key_bucket = get_key_tbl_bucket(h, bucket_index);
-
- /* Check if key is already present in the hash */
- for (i = 0; i < h->bucket_entries; i++) {
- if ((sig == sig_bucket[i]) &&
- likely(memcmp(key, get_key_from_bucket(h, key_bucket, i),
- h->key_len) == 0)) {
- return bucket_index * h->bucket_entries + i;
- }
- }
-
- /* Check if any free slot within the bucket to add the new key */
- pos = find_first(NULL_SIGNATURE, sig_bucket, h->bucket_entries);
-
- if (unlikely(pos < 0))
- return -ENOSPC;
-
- /* Add the new key to the bucket */
- sig_bucket[pos] = sig;
- rte_memcpy(get_key_from_bucket(h, key_bucket, pos), key, h->key_len);
- return bucket_index * h->bucket_entries + pos;
-}
-
-int32_t
-rte_hash_add_key_with_hash(const struct rte_hash *h,
- const void *key, hash_sig_t sig)
-{
- RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
- return __rte_hash_add_key_with_hash(h, key, sig);
-}
-
-int32_t
-rte_hash_add_key(const struct rte_hash *h, const void *key)
-{
- RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
- return __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key));
-}
-
-static inline int32_t
-__rte_hash_del_key_with_hash(const struct rte_hash *h,
- const void *key, hash_sig_t sig)
-{
- hash_sig_t *sig_bucket;
- uint8_t *key_bucket;
- uint32_t bucket_index, i;
-
- /* Get the hash signature and bucket index */
- sig = sig | h->sig_msb;
- bucket_index = sig & h->bucket_bitmask;
- sig_bucket = get_sig_tbl_bucket(h, bucket_index);
- key_bucket = get_key_tbl_bucket(h, bucket_index);
-
- /* Check if key is already present in the hash */
- for (i = 0; i < h->bucket_entries; i++) {
- if ((sig == sig_bucket[i]) &&
- likely(memcmp(key, get_key_from_bucket(h, key_bucket, i),
- h->key_len) == 0)) {
- sig_bucket[i] = NULL_SIGNATURE;
- return bucket_index * h->bucket_entries + i;
- }
- }
-
- return -ENOENT;
-}
-
-int32_t
-rte_hash_del_key_with_hash(const struct rte_hash *h,
- const void *key, hash_sig_t sig)
-{
- RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
- return __rte_hash_del_key_with_hash(h, key, sig);
-}
-
-int32_t
-rte_hash_del_key(const struct rte_hash *h, const void *key)
-{
- RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
- return __rte_hash_del_key_with_hash(h, key, rte_hash_hash(h, key));
-}
-
-static inline int32_t
-__rte_hash_lookup_with_hash(const struct rte_hash *h,
- const void *key, hash_sig_t sig)
-{
- hash_sig_t *sig_bucket;
- uint8_t *key_bucket;
- uint32_t bucket_index, i;
-
- /* Get the hash signature and bucket index */
- sig |= h->sig_msb;
- bucket_index = sig & h->bucket_bitmask;
- sig_bucket = get_sig_tbl_bucket(h, bucket_index);
- key_bucket = get_key_tbl_bucket(h, bucket_index);
-
- /* Check if key is already present in the hash */
- for (i = 0; i < h->bucket_entries; i++) {
- if ((sig == sig_bucket[i]) &&
- likely(memcmp(key, get_key_from_bucket(h, key_bucket, i),
- h->key_len) == 0)) {
- return bucket_index * h->bucket_entries + i;
- }
- }
-
- return -ENOENT;
-}
-
-int32_t
-rte_hash_lookup_with_hash(const struct rte_hash *h,
- const void *key, hash_sig_t sig)
-{
- RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
- return __rte_hash_lookup_with_hash(h, key, sig);
-}
-
-int32_t
-rte_hash_lookup(const struct rte_hash *h, const void *key)
-{
- RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
- return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key));
-}
-
-int
-rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
- uint32_t num_keys, int32_t *positions)
-{
- uint32_t i, j, bucket_index;
- hash_sig_t sigs[RTE_HASH_LOOKUP_BULK_MAX];
-
- RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
- (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
- (positions == NULL)), -EINVAL);
-
- /* Get the hash signature and bucket index */
- for (i = 0; i < num_keys; i++) {
- sigs[i] = h->hash_func(keys[i], h->key_len,
- h->hash_func_init_val) | h->sig_msb;
- bucket_index = sigs[i] & h->bucket_bitmask;
-
- /* Pre-fetch relevant buckets */
- rte_prefetch1((void *) get_sig_tbl_bucket(h, bucket_index));
- rte_prefetch1((void *) get_key_tbl_bucket(h, bucket_index));
- }
-
- /* Check if key is already present in the hash */
- for (i = 0; i < num_keys; i++) {
- bucket_index = sigs[i] & h->bucket_bitmask;
- hash_sig_t *sig_bucket = get_sig_tbl_bucket(h, bucket_index);
- uint8_t *key_bucket = get_key_tbl_bucket(h, bucket_index);
-
- positions[i] = -ENOENT;
-
- for (j = 0; j < h->bucket_entries; j++) {
- if ((sigs[i] == sig_bucket[j]) &&
- likely(memcmp(keys[i],
- get_key_from_bucket(h, key_bucket, j),
- h->key_len) == 0)) {
- positions[i] = bucket_index *
- h->bucket_entries + j;
- break;
- }
- }
- }
-
- return 0;
-}
diff --git a/src/dpdk_lib18/librte_hash/rte_jhash.h b/src/dpdk_lib18/librte_hash/rte_jhash.h
deleted file mode 100755
index a4bf5a1b..00000000
--- a/src/dpdk_lib18/librte_hash/rte_jhash.h
+++ /dev/null
@@ -1,253 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_JHASH_H
-#define _RTE_JHASH_H
-
-/**
- * @file
- *
- * jhash functions.
- */
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <stdint.h>
-
-/* jhash.h: Jenkins hash support.
- *
- * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
- *
- * http://burtleburtle.net/bob/hash/
- *
- * These are the credits from Bob's sources:
- *
- * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
- * hash(), hash2(), hash3, and mix() are externally useful functions.
- * Routines to test the hash are included if SELF_TEST is defined.
- * You can use this free for any purpose. It has no warranty.
- *
- * $FreeBSD$
- */
-
-/** @internal Internal function. NOTE: Arguments are modified. */
-#define __rte_jhash_mix(a, b, c) do { \
- a -= b; a -= c; a ^= (c>>13); \
- b -= c; b -= a; b ^= (a<<8); \
- c -= a; c -= b; c ^= (b>>13); \
- a -= b; a -= c; a ^= (c>>12); \
- b -= c; b -= a; b ^= (a<<16); \
- c -= a; c -= b; c ^= (b>>5); \
- a -= b; a -= c; a ^= (c>>3); \
- b -= c; b -= a; b ^= (a<<10); \
- c -= a; c -= b; c ^= (b>>15); \
-} while (0)
-
-/** The golden ratio: an arbitrary value. */
-#define RTE_JHASH_GOLDEN_RATIO 0x9e3779b9
-
-/**
- * The most generic version, hashes an arbitrary sequence
- * of bytes. No alignment or length assumptions are made about
- * the input key.
- *
- * @param key
- * Key to calculate hash of.
- * @param length
- * Length of key in bytes.
- * @param initval
- * Initialising value of hash.
- * @return
- * Calculated hash value.
- */
-static inline uint32_t
-rte_jhash(const void *key, uint32_t length, uint32_t initval)
-{
- uint32_t a, b, c, len;
- const uint8_t *k = (const uint8_t *)key;
- const uint32_t *k32 = (const uint32_t *)key;
-
- len = length;
- a = b = RTE_JHASH_GOLDEN_RATIO;
- c = initval;
-
- while (len >= 12) {
- a += k32[0];
- b += k32[1];
- c += k32[2];
-
- __rte_jhash_mix(a,b,c);
-
- k += (3 * sizeof(uint32_t)), k32 += 3;
- len -= (3 * sizeof(uint32_t));
- }
-
- c += length;
- switch (len) {
- case 11: c += ((uint32_t)k[10] << 24);
- case 10: c += ((uint32_t)k[9] << 16);
- case 9 : c += ((uint32_t)k[8] << 8);
- case 8 : b += ((uint32_t)k[7] << 24);
- case 7 : b += ((uint32_t)k[6] << 16);
- case 6 : b += ((uint32_t)k[5] << 8);
- case 5 : b += k[4];
- case 4 : a += ((uint32_t)k[3] << 24);
- case 3 : a += ((uint32_t)k[2] << 16);
- case 2 : a += ((uint32_t)k[1] << 8);
- case 1 : a += k[0];
- default: break;
- };
-
- __rte_jhash_mix(a,b,c);
-
- return c;
-}
-
-/**
- * A special optimized version that handles 1 or more of uint32_ts.
- * The length parameter here is the number of uint32_ts in the key.
- *
- * @param k
- * Key to calculate hash of.
- * @param length
- * Length of key in units of 4 bytes.
- * @param initval
- * Initialising value of hash.
- * @return
- * Calculated hash value.
- */
-static inline uint32_t
-rte_jhash2(const uint32_t *k, uint32_t length, uint32_t initval)
-{
- uint32_t a, b, c, len;
-
- a = b = RTE_JHASH_GOLDEN_RATIO;
- c = initval;
- len = length;
-
- while (len >= 3) {
- a += k[0];
- b += k[1];
- c += k[2];
- __rte_jhash_mix(a, b, c);
- k += 3; len -= 3;
- }
-
- c += length * 4;
-
- switch (len) {
- case 2 : b += k[1];
- case 1 : a += k[0];
- default: break;
- };
-
- __rte_jhash_mix(a,b,c);
-
- return c;
-}
-
-
-/**
- * A special ultra-optimized versions that knows it is hashing exactly
- * 3 words.
- *
- * @param a
- * First word to calcuate hash of.
- * @param b
- * Second word to calcuate hash of.
- * @param c
- * Third word to calcuate hash of.
- * @param initval
- * Initialising value of hash.
- * @return
- * Calculated hash value.
- */
-static inline uint32_t
-rte_jhash_3words(uint32_t a, uint32_t b, uint32_t c, uint32_t initval)
-{
- a += RTE_JHASH_GOLDEN_RATIO;
- b += RTE_JHASH_GOLDEN_RATIO;
- c += initval;
-
- __rte_jhash_mix(a, b, c);
-
- /*
- * NOTE: In particular the "c += length; __rte_jhash_mix(a,b,c);"
- * normally done at the end is not done here.
- */
- return c;
-}
-
-/**
- * A special ultra-optimized versions that knows it is hashing exactly
- * 2 words.
- *
- * @param a
- * First word to calcuate hash of.
- * @param b
- * Second word to calcuate hash of.
- * @param initval
- * Initialising value of hash.
- * @return
- * Calculated hash value.
- */
-static inline uint32_t
-rte_jhash_2words(uint32_t a, uint32_t b, uint32_t initval)
-{
- return rte_jhash_3words(a, b, 0, initval);
-}
-
-/**
- * A special ultra-optimized versions that knows it is hashing exactly
- * 1 word.
- *
- * @param a
- * Word to calcuate hash of.
- * @param initval
- * Initialising value of hash.
- * @return
- * Calculated hash value.
- */
-static inline uint32_t
-rte_jhash_1word(uint32_t a, uint32_t initval)
-{
- return rte_jhash_3words(a, 0, 0, initval);
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_JHASH_H */
diff --git a/src/dpdk_lib18/librte_ip_frag/Makefile b/src/dpdk_lib18/librte_ip_frag/Makefile
deleted file mode 100755
index 8c00d39c..00000000
--- a/src/dpdk_lib18/librte_ip_frag/Makefile
+++ /dev/null
@@ -1,59 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_ip_frag.a
-
-CFLAGS += -O3
-CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
-
-#source files
-ifeq ($(CONFIG_RTE_MBUF_REFCNT),y)
-SRCS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += rte_ipv4_fragmentation.c
-SRCS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += rte_ipv6_fragmentation.c
-else
-$(info WARNING: Fragmentation feature is disabled because it needs MBUF_REFCNT.)
-endif
-SRCS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += rte_ipv4_reassembly.c
-SRCS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += rte_ipv6_reassembly.c
-SRCS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += rte_ip_frag_common.c
-SRCS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += ip_frag_internal.c
-
-# install this header file
-SYMLINK-$(CONFIG_RTE_LIBRTE_IP_FRAG)-include += rte_ip_frag.h
-
-
-# this library depends on rte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += lib/librte_mempool lib/librte_ether
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_ip_frag/ip_frag_common.h b/src/dpdk_lib18/librte_ip_frag/ip_frag_common.h
deleted file mode 100755
index 210f409d..00000000
--- a/src/dpdk_lib18/librte_ip_frag/ip_frag_common.h
+++ /dev/null
@@ -1,192 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _IP_FRAG_COMMON_H_
-#define _IP_FRAG_COMMON_H_
-
-#include "rte_ip_frag.h"
-
-/* logging macros. */
-#ifdef RTE_LIBRTE_IP_FRAG_DEBUG
-
-#define IP_FRAG_LOG(lvl, fmt, args...) RTE_LOG(lvl, USER1, fmt, ##args)
-
-#define IP_FRAG_ASSERT(exp) \
-if (!(exp)) { \
- rte_panic("function %s, line%d\tassert \"" #exp "\" failed\n", \
- __func__, __LINE__); \
-}
-#else
-#define IP_FRAG_LOG(lvl, fmt, args...) do {} while(0)
-#define IP_FRAG_ASSERT(exp) do {} while (0)
-#endif /* IP_FRAG_DEBUG */
-
-#define IPV4_KEYLEN 1
-#define IPV6_KEYLEN 4
-
-/* helper macros */
-#define IP_FRAG_MBUF2DR(dr, mb) ((dr)->row[(dr)->cnt++] = (mb))
-
-#define IPv6_KEY_BYTES(key) \
- (key)[0], (key)[1], (key)[2], (key)[3]
-#define IPv6_KEY_BYTES_FMT \
- "%08" PRIx64 "%08" PRIx64 "%08" PRIx64 "%08" PRIx64
-
-/* internal functions declarations */
-struct rte_mbuf * ip_frag_process(struct ip_frag_pkt *fp,
- struct rte_ip_frag_death_row *dr, struct rte_mbuf *mb,
- uint16_t ofs, uint16_t len, uint16_t more_frags);
-
-struct ip_frag_pkt * ip_frag_find(struct rte_ip_frag_tbl *tbl,
- struct rte_ip_frag_death_row *dr,
- const struct ip_frag_key *key, uint64_t tms);
-
-struct ip_frag_pkt * ip_frag_lookup(struct rte_ip_frag_tbl *tbl,
- const struct ip_frag_key *key, uint64_t tms,
- struct ip_frag_pkt **free, struct ip_frag_pkt **stale);
-
-/* these functions need to be declared here as ip_frag_process relies on them */
-struct rte_mbuf * ipv4_frag_reassemble(const struct ip_frag_pkt *fp);
-struct rte_mbuf * ipv6_frag_reassemble(const struct ip_frag_pkt *fp);
-
-
-
-/*
- * misc frag key functions
- */
-
-/* check if key is empty */
-static inline int
-ip_frag_key_is_empty(const struct ip_frag_key * key)
-{
- uint32_t i;
- for (i = 0; i < key->key_len; i++)
- if (key->src_dst[i] != 0)
- return 0;
- return 1;
-}
-
-/* empty the key */
-static inline void
-ip_frag_key_invalidate(struct ip_frag_key * key)
-{
- uint32_t i;
- for (i = 0; i < key->key_len; i++)
- key->src_dst[i] = 0;
-}
-
-/* compare two keys */
-static inline int
-ip_frag_key_cmp(const struct ip_frag_key * k1, const struct ip_frag_key * k2)
-{
- uint32_t i, val;
- val = k1->id ^ k2->id;
- for (i = 0; i < k1->key_len; i++)
- val |= k1->src_dst[i] ^ k2->src_dst[i];
- return val;
-}
-
-/*
- * misc fragment functions
- */
-
-/* put fragment on death row */
-static inline void
-ip_frag_free(struct ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr)
-{
- uint32_t i, k;
-
- k = dr->cnt;
- for (i = 0; i != fp->last_idx; i++) {
- if (fp->frags[i].mb != NULL) {
- dr->row[k++] = fp->frags[i].mb;
- fp->frags[i].mb = NULL;
- }
- }
-
- fp->last_idx = 0;
- dr->cnt = k;
-}
-
-/* if key is empty, mark key as in use */
-static inline void
-ip_frag_inuse(struct rte_ip_frag_tbl *tbl, const struct ip_frag_pkt *fp)
-{
- if (ip_frag_key_is_empty(&fp->key)) {
- TAILQ_REMOVE(&tbl->lru, fp, lru);
- tbl->use_entries--;
- }
-}
-
-/* reset the fragment */
-static inline void
-ip_frag_reset(struct ip_frag_pkt *fp, uint64_t tms)
-{
- static const struct ip_frag zero_frag = {
- .ofs = 0,
- .len = 0,
- .mb = NULL,
- };
-
- fp->start = tms;
- fp->total_size = UINT32_MAX;
- fp->frag_size = 0;
- fp->last_idx = IP_MIN_FRAG_NUM;
- fp->frags[IP_LAST_FRAG_IDX] = zero_frag;
- fp->frags[IP_FIRST_FRAG_IDX] = zero_frag;
-}
-
-/* chain two mbufs */
-static inline void
-ip_frag_chain(struct rte_mbuf *mn, struct rte_mbuf *mp)
-{
- struct rte_mbuf *ms;
-
- /* adjust start of the last fragment data. */
- rte_pktmbuf_adj(mp, (uint16_t)(mp->l2_len + mp->l3_len));
-
- /* chain two fragments. */
- ms = rte_pktmbuf_lastseg(mn);
- ms->next = mp;
-
- /* accumulate number of segments and total length. */
- mn->nb_segs = (uint8_t)(mn->nb_segs + mp->nb_segs);
- mn->pkt_len += mp->pkt_len;
-
- /* reset pkt_len and nb_segs for chained fragment. */
- mp->pkt_len = mp->data_len;
- mp->nb_segs = 1;
-}
-
-
-#endif /* _IP_FRAG_COMMON_H_ */
diff --git a/src/dpdk_lib18/librte_ip_frag/ip_frag_internal.c b/src/dpdk_lib18/librte_ip_frag/ip_frag_internal.c
deleted file mode 100755
index a2c645bf..00000000
--- a/src/dpdk_lib18/librte_ip_frag/ip_frag_internal.c
+++ /dev/null
@@ -1,418 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stddef.h>
-
-#include <rte_jhash.h>
-#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
-#include <rte_hash_crc.h>
-#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
-
-#include "ip_frag_common.h"
-
-#define PRIME_VALUE 0xeaad8405
-
-#define IP_FRAG_TBL_POS(tbl, sig) \
- ((tbl)->pkt + ((sig) & (tbl)->entry_mask))
-
-#ifdef RTE_LIBRTE_IP_FRAG_TBL_STAT
-#define IP_FRAG_TBL_STAT_UPDATE(s, f, v) ((s)->f += (v))
-#else
-#define IP_FRAG_TBL_STAT_UPDATE(s, f, v) do {} while (0)
-#endif /* IP_FRAG_TBL_STAT */
-
-/* local frag table helper functions */
-static inline void
-ip_frag_tbl_del(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
- struct ip_frag_pkt *fp)
-{
- ip_frag_free(fp, dr);
- ip_frag_key_invalidate(&fp->key);
- TAILQ_REMOVE(&tbl->lru, fp, lru);
- tbl->use_entries--;
- IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, del_num, 1);
-}
-
-static inline void
-ip_frag_tbl_add(struct rte_ip_frag_tbl *tbl, struct ip_frag_pkt *fp,
- const struct ip_frag_key *key, uint64_t tms)
-{
- fp->key = key[0];
- ip_frag_reset(fp, tms);
- TAILQ_INSERT_TAIL(&tbl->lru, fp, lru);
- tbl->use_entries++;
- IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, add_num, 1);
-}
-
-static inline void
-ip_frag_tbl_reuse(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
- struct ip_frag_pkt *fp, uint64_t tms)
-{
- ip_frag_free(fp, dr);
- ip_frag_reset(fp, tms);
- TAILQ_REMOVE(&tbl->lru, fp, lru);
- TAILQ_INSERT_TAIL(&tbl->lru, fp, lru);
- IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, reuse_num, 1);
-}
-
-
-static inline void
-ipv4_frag_hash(const struct ip_frag_key *key, uint32_t *v1, uint32_t *v2)
-{
- uint32_t v;
- const uint32_t *p;
-
- p = (const uint32_t *)&key->src_dst;
-
-#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
- v = rte_hash_crc_4byte(p[0], PRIME_VALUE);
- v = rte_hash_crc_4byte(p[1], v);
- v = rte_hash_crc_4byte(key->id, v);
-#else
-
- v = rte_jhash_3words(p[0], p[1], key->id, PRIME_VALUE);
-#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
-
- *v1 = v;
- *v2 = (v << 7) + (v >> 14);
-}
-
-static inline void
-ipv6_frag_hash(const struct ip_frag_key *key, uint32_t *v1, uint32_t *v2)
-{
- uint32_t v;
- const uint32_t *p;
-
- p = (const uint32_t *) &key->src_dst;
-
-#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
- v = rte_hash_crc_4byte(p[0], PRIME_VALUE);
- v = rte_hash_crc_4byte(p[1], v);
- v = rte_hash_crc_4byte(p[2], v);
- v = rte_hash_crc_4byte(p[3], v);
- v = rte_hash_crc_4byte(p[4], v);
- v = rte_hash_crc_4byte(p[5], v);
- v = rte_hash_crc_4byte(p[6], v);
- v = rte_hash_crc_4byte(p[7], v);
- v = rte_hash_crc_4byte(key->id, v);
-#else
-
- v = rte_jhash_3words(p[0], p[1], p[2], PRIME_VALUE);
- v = rte_jhash_3words(p[3], p[4], p[5], v);
- v = rte_jhash_3words(p[6], p[7], key->id, v);
-#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
-
- *v1 = v;
- *v2 = (v << 7) + (v >> 14);
-}
-
-struct rte_mbuf *
-ip_frag_process(struct ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr,
- struct rte_mbuf *mb, uint16_t ofs, uint16_t len, uint16_t more_frags)
-{
- uint32_t idx;
-
- fp->frag_size += len;
-
- /* this is the first fragment. */
- if (ofs == 0) {
- idx = (fp->frags[IP_FIRST_FRAG_IDX].mb == NULL) ?
- IP_FIRST_FRAG_IDX : UINT32_MAX;
-
- /* this is the last fragment. */
- } else if (more_frags == 0) {
- fp->total_size = ofs + len;
- idx = (fp->frags[IP_LAST_FRAG_IDX].mb == NULL) ?
- IP_LAST_FRAG_IDX : UINT32_MAX;
-
- /* this is the intermediate fragment. */
- } else if ((idx = fp->last_idx) <
- sizeof (fp->frags) / sizeof (fp->frags[0])) {
- fp->last_idx++;
- }
-
- /*
- * errorneous packet: either exceeed max allowed number of fragments,
- * or duplicate first/last fragment encountered.
- */
- if (idx >= sizeof (fp->frags) / sizeof (fp->frags[0])) {
-
- /* report an error. */
- if (fp->key.key_len == IPV4_KEYLEN)
- IP_FRAG_LOG(DEBUG, "%s:%d invalid fragmented packet:\n"
- "ipv4_frag_pkt: %p, key: <%" PRIx64 ", %#x>, "
- "total_size: %u, frag_size: %u, last_idx: %u\n"
- "first fragment: ofs: %u, len: %u\n"
- "last fragment: ofs: %u, len: %u\n\n",
- __func__, __LINE__,
- fp, fp->key.src_dst[0], fp->key.id,
- fp->total_size, fp->frag_size, fp->last_idx,
- fp->frags[IP_FIRST_FRAG_IDX].ofs,
- fp->frags[IP_FIRST_FRAG_IDX].len,
- fp->frags[IP_LAST_FRAG_IDX].ofs,
- fp->frags[IP_LAST_FRAG_IDX].len);
- else
- IP_FRAG_LOG(DEBUG, "%s:%d invalid fragmented packet:\n"
- "ipv4_frag_pkt: %p, key: <" IPv6_KEY_BYTES_FMT ", %#x>, "
- "total_size: %u, frag_size: %u, last_idx: %u\n"
- "first fragment: ofs: %u, len: %u\n"
- "last fragment: ofs: %u, len: %u\n\n",
- __func__, __LINE__,
- fp, IPv6_KEY_BYTES(fp->key.src_dst), fp->key.id,
- fp->total_size, fp->frag_size, fp->last_idx,
- fp->frags[IP_FIRST_FRAG_IDX].ofs,
- fp->frags[IP_FIRST_FRAG_IDX].len,
- fp->frags[IP_LAST_FRAG_IDX].ofs,
- fp->frags[IP_LAST_FRAG_IDX].len);
-
- /* free all fragments, invalidate the entry. */
- ip_frag_free(fp, dr);
- ip_frag_key_invalidate(&fp->key);
- IP_FRAG_MBUF2DR(dr, mb);
-
- return (NULL);
- }
-
- fp->frags[idx].ofs = ofs;
- fp->frags[idx].len = len;
- fp->frags[idx].mb = mb;
-
- mb = NULL;
-
- /* not all fragments are collected yet. */
- if (likely (fp->frag_size < fp->total_size)) {
- return (mb);
-
- /* if we collected all fragments, then try to reassemble. */
- } else if (fp->frag_size == fp->total_size &&
- fp->frags[IP_FIRST_FRAG_IDX].mb != NULL) {
- if (fp->key.key_len == IPV4_KEYLEN)
- mb = ipv4_frag_reassemble(fp);
- else
- mb = ipv6_frag_reassemble(fp);
- }
-
- /* errorenous set of fragments. */
- if (mb == NULL) {
-
- /* report an error. */
- if (fp->key.key_len == IPV4_KEYLEN)
- IP_FRAG_LOG(DEBUG, "%s:%d invalid fragmented packet:\n"
- "ipv4_frag_pkt: %p, key: <%" PRIx64 ", %#x>, "
- "total_size: %u, frag_size: %u, last_idx: %u\n"
- "first fragment: ofs: %u, len: %u\n"
- "last fragment: ofs: %u, len: %u\n\n",
- __func__, __LINE__,
- fp, fp->key.src_dst[0], fp->key.id,
- fp->total_size, fp->frag_size, fp->last_idx,
- fp->frags[IP_FIRST_FRAG_IDX].ofs,
- fp->frags[IP_FIRST_FRAG_IDX].len,
- fp->frags[IP_LAST_FRAG_IDX].ofs,
- fp->frags[IP_LAST_FRAG_IDX].len);
- else
- IP_FRAG_LOG(DEBUG, "%s:%d invalid fragmented packet:\n"
- "ipv4_frag_pkt: %p, key: <" IPv6_KEY_BYTES_FMT ", %#x>, "
- "total_size: %u, frag_size: %u, last_idx: %u\n"
- "first fragment: ofs: %u, len: %u\n"
- "last fragment: ofs: %u, len: %u\n\n",
- __func__, __LINE__,
- fp, IPv6_KEY_BYTES(fp->key.src_dst), fp->key.id,
- fp->total_size, fp->frag_size, fp->last_idx,
- fp->frags[IP_FIRST_FRAG_IDX].ofs,
- fp->frags[IP_FIRST_FRAG_IDX].len,
- fp->frags[IP_LAST_FRAG_IDX].ofs,
- fp->frags[IP_LAST_FRAG_IDX].len);
-
- /* free associated resources. */
- ip_frag_free(fp, dr);
- }
-
- /* we are done with that entry, invalidate it. */
- ip_frag_key_invalidate(&fp->key);
- return (mb);
-}
-
-
-/*
- * Find an entry in the table for the corresponding fragment.
- * If such entry is not present, then allocate a new one.
- * If the entry is stale, then free and reuse it.
- */
-struct ip_frag_pkt *
-ip_frag_find(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
- const struct ip_frag_key *key, uint64_t tms)
-{
- struct ip_frag_pkt *pkt, *free, *stale, *lru;
- uint64_t max_cycles;
-
- /*
- * Actually the two line below are totally redundant.
- * they are here, just to make gcc 4.6 happy.
- */
- free = NULL;
- stale = NULL;
- max_cycles = tbl->max_cycles;
-
- IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, find_num, 1);
-
- if ((pkt = ip_frag_lookup(tbl, key, tms, &free, &stale)) == NULL) {
-
- /*timed-out entry, free and invalidate it*/
- if (stale != NULL) {
- ip_frag_tbl_del(tbl, dr, stale);
- free = stale;
-
- /*
- * we found a free entry, check if we can use it.
- * If we run out of free entries in the table, then
- * check if we have a timed out entry to delete.
- */
- } else if (free != NULL &&
- tbl->max_entries <= tbl->use_entries) {
- lru = TAILQ_FIRST(&tbl->lru);
- if (max_cycles + lru->start < tms) {
- ip_frag_tbl_del(tbl, dr, lru);
- } else {
- free = NULL;
- IP_FRAG_TBL_STAT_UPDATE(&tbl->stat,
- fail_nospace, 1);
- }
- }
-
- /* found a free entry to reuse. */
- if (free != NULL) {
- ip_frag_tbl_add(tbl, free, key, tms);
- pkt = free;
- }
-
- /*
- * we found the flow, but it is already timed out,
- * so free associated resources, reposition it in the LRU list,
- * and reuse it.
- */
- } else if (max_cycles + pkt->start < tms) {
- ip_frag_tbl_reuse(tbl, dr, pkt, tms);
- }
-
- IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, fail_total, (pkt == NULL));
-
- tbl->last = pkt;
- return (pkt);
-}
-
-struct ip_frag_pkt *
-ip_frag_lookup(struct rte_ip_frag_tbl *tbl,
- const struct ip_frag_key *key, uint64_t tms,
- struct ip_frag_pkt **free, struct ip_frag_pkt **stale)
-{
- struct ip_frag_pkt *p1, *p2;
- struct ip_frag_pkt *empty, *old;
- uint64_t max_cycles;
- uint32_t i, assoc, sig1, sig2;
-
- empty = NULL;
- old = NULL;
-
- max_cycles = tbl->max_cycles;
- assoc = tbl->bucket_entries;
-
- if (tbl->last != NULL && ip_frag_key_cmp(key, &tbl->last->key) == 0)
- return (tbl->last);
-
- /* different hashing methods for IPv4 and IPv6 */
- if (key->key_len == IPV4_KEYLEN)
- ipv4_frag_hash(key, &sig1, &sig2);
- else
- ipv6_frag_hash(key, &sig1, &sig2);
-
- p1 = IP_FRAG_TBL_POS(tbl, sig1);
- p2 = IP_FRAG_TBL_POS(tbl, sig2);
-
- for (i = 0; i != assoc; i++) {
- if (p1->key.key_len == IPV4_KEYLEN)
- IP_FRAG_LOG(DEBUG, "%s:%d:\n"
- "tbl: %p, max_entries: %u, use_entries: %u\n"
- "ipv6_frag_pkt line0: %p, index: %u from %u\n"
- "key: <%" PRIx64 ", %#x>, start: %" PRIu64 "\n",
- __func__, __LINE__,
- tbl, tbl->max_entries, tbl->use_entries,
- p1, i, assoc,
- p1[i].key.src_dst[0], p1[i].key.id, p1[i].start);
- else
- IP_FRAG_LOG(DEBUG, "%s:%d:\n"
- "tbl: %p, max_entries: %u, use_entries: %u\n"
- "ipv6_frag_pkt line0: %p, index: %u from %u\n"
- "key: <" IPv6_KEY_BYTES_FMT ", %#x>, start: %" PRIu64 "\n",
- __func__, __LINE__,
- tbl, tbl->max_entries, tbl->use_entries,
- p1, i, assoc,
- IPv6_KEY_BYTES(p1[i].key.src_dst), p1[i].key.id, p1[i].start);
-
- if (ip_frag_key_cmp(key, &p1[i].key) == 0)
- return (p1 + i);
- else if (ip_frag_key_is_empty(&p1[i].key))
- empty = (empty == NULL) ? (p1 + i) : empty;
- else if (max_cycles + p1[i].start < tms)
- old = (old == NULL) ? (p1 + i) : old;
-
- if (p2->key.key_len == IPV4_KEYLEN)
- IP_FRAG_LOG(DEBUG, "%s:%d:\n"
- "tbl: %p, max_entries: %u, use_entries: %u\n"
- "ipv6_frag_pkt line1: %p, index: %u from %u\n"
- "key: <%" PRIx64 ", %#x>, start: %" PRIu64 "\n",
- __func__, __LINE__,
- tbl, tbl->max_entries, tbl->use_entries,
- p2, i, assoc,
- p2[i].key.src_dst[0], p2[i].key.id, p2[i].start);
- else
- IP_FRAG_LOG(DEBUG, "%s:%d:\n"
- "tbl: %p, max_entries: %u, use_entries: %u\n"
- "ipv6_frag_pkt line1: %p, index: %u from %u\n"
- "key: <" IPv6_KEY_BYTES_FMT ", %#x>, start: %" PRIu64 "\n",
- __func__, __LINE__,
- tbl, tbl->max_entries, tbl->use_entries,
- p2, i, assoc,
- IPv6_KEY_BYTES(p2[i].key.src_dst), p2[i].key.id, p2[i].start);
-
- if (ip_frag_key_cmp(key, &p2[i].key) == 0)
- return (p2 + i);
- else if (ip_frag_key_is_empty(&p2[i].key))
- empty = (empty == NULL) ?( p2 + i) : empty;
- else if (max_cycles + p2[i].start < tms)
- old = (old == NULL) ? (p2 + i) : old;
- }
-
- *free = empty;
- *stale = old;
- return (NULL);
-}
diff --git a/src/dpdk_lib18/librte_ip_frag/rte_ip_frag.h b/src/dpdk_lib18/librte_ip_frag/rte_ip_frag.h
deleted file mode 100755
index 3989a5a8..00000000
--- a/src/dpdk_lib18/librte_ip_frag/rte_ip_frag.h
+++ /dev/null
@@ -1,353 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_IP_FRAG_H_
-#define _RTE_IP_FRAG_H_
-
-/**
- * @file
- * RTE IP Fragmentation and Reassembly
- *
- * Implementation of IP packet fragmentation and reassembly.
- */
-
-#include <stdint.h>
-#include <stdio.h>
-
-#include <rte_malloc.h>
-#include <rte_mbuf.h>
-#include <rte_memory.h>
-#include <rte_ip.h>
-#include <rte_byteorder.h>
-
-enum {
- IP_LAST_FRAG_IDX, /**< index of last fragment */
- IP_FIRST_FRAG_IDX, /**< index of first fragment */
- IP_MIN_FRAG_NUM, /**< minimum number of fragments */
- IP_MAX_FRAG_NUM = RTE_LIBRTE_IP_FRAG_MAX_FRAG,
- /**< maximum number of fragments per packet */
-};
-
-/** @internal fragmented mbuf */
-struct ip_frag {
- uint16_t ofs; /**< offset into the packet */
- uint16_t len; /**< length of fragment */
- struct rte_mbuf *mb; /**< fragment mbuf */
-};
-
-/** @internal <src addr, dst_addr, id> to uniquely indetify fragmented datagram. */
-struct ip_frag_key {
- uint64_t src_dst[4]; /**< src address, first 8 bytes used for IPv4 */
- uint32_t id; /**< dst address */
- uint32_t key_len; /**< src/dst key length */
-};
-
-/*
- * @internal Fragmented packet to reassemble.
- * First two entries in the frags[] array are for the last and first fragments.
- */
-struct ip_frag_pkt {
- TAILQ_ENTRY(ip_frag_pkt) lru; /**< LRU list */
- struct ip_frag_key key; /**< fragmentation key */
- uint64_t start; /**< creation timestamp */
- uint32_t total_size; /**< expected reassembled size */
- uint32_t frag_size; /**< size of fragments received */
- uint32_t last_idx; /**< index of next entry to fill */
- struct ip_frag frags[IP_MAX_FRAG_NUM]; /**< fragments */
-} __rte_cache_aligned;
-
-#define IP_FRAG_DEATH_ROW_LEN 32 /**< death row size (in packets) */
-
-/** mbuf death row (packets to be freed) */
-struct rte_ip_frag_death_row {
- uint32_t cnt; /**< number of mbufs currently on death row */
- struct rte_mbuf *row[IP_FRAG_DEATH_ROW_LEN * (IP_MAX_FRAG_NUM + 1)];
- /**< mbufs to be freed */
-};
-
-TAILQ_HEAD(ip_pkt_list, ip_frag_pkt); /**< @internal fragments tailq */
-
-/** fragmentation table statistics */
-struct ip_frag_tbl_stat {
- uint64_t find_num; /**< total # of find/insert attempts. */
- uint64_t add_num; /**< # of add ops. */
- uint64_t del_num; /**< # of del ops. */
- uint64_t reuse_num; /**< # of reuse (del/add) ops. */
- uint64_t fail_total; /**< total # of add failures. */
- uint64_t fail_nospace; /**< # of 'no space' add failures. */
-} __rte_cache_aligned;
-
-/** fragmentation table */
-struct rte_ip_frag_tbl {
- uint64_t max_cycles; /**< ttl for table entries. */
- uint32_t entry_mask; /**< hash value mask. */
- uint32_t max_entries; /**< max entries allowed. */
- uint32_t use_entries; /**< entries in use. */
- uint32_t bucket_entries; /**< hash assocaitivity. */
- uint32_t nb_entries; /**< total size of the table. */
- uint32_t nb_buckets; /**< num of associativity lines. */
- struct ip_frag_pkt *last; /**< last used entry. */
- struct ip_pkt_list lru; /**< LRU list for table entries. */
- struct ip_frag_tbl_stat stat; /**< statistics counters. */
- struct ip_frag_pkt pkt[0]; /**< hash table. */
-};
-
-/** IPv6 fragment extension header */
-struct ipv6_extension_fragment {
- uint8_t next_header; /**< Next header type */
- uint8_t reserved1; /**< Reserved */
- union {
- struct {
- uint16_t frag_offset:13; /**< Offset from the start of the packet */
- uint16_t reserved2:2; /**< Reserved */
- uint16_t more_frags:1;
- /**< 1 if more fragments left, 0 if last fragment */
- };
- uint16_t frag_data;
- /**< union of all fragmentation data */
- };
- uint32_t id; /**< Packet ID */
-} __attribute__((__packed__));
-
-
-
-/*
- * Create a new IP fragmentation table.
- *
- * @param bucket_num
- * Number of buckets in the hash table.
- * @param bucket_entries
- * Number of entries per bucket (e.g. hash associativity).
- * Should be power of two.
- * @param max_entries
- * Maximum number of entries that could be stored in the table.
- * The value should be less or equal then bucket_num * bucket_entries.
- * @param max_cycles
- * Maximum TTL in cycles for each fragmented packet.
- * @param socket_id
- * The *socket_id* argument is the socket identifier in the case of
- * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA constraints.
- * @return
- * The pointer to the new allocated fragmentation table, on success. NULL on error.
- */
-struct rte_ip_frag_tbl * rte_ip_frag_table_create(uint32_t bucket_num,
- uint32_t bucket_entries, uint32_t max_entries,
- uint64_t max_cycles, int socket_id);
-
-/*
- * Free allocated IP fragmentation table.
- *
- * @param btl
- * Fragmentation table to free.
- */
-static inline void
-rte_ip_frag_table_destroy( struct rte_ip_frag_tbl *tbl)
-{
- rte_free(tbl);
-}
-
-#ifdef RTE_MBUF_REFCNT
-/**
- * This function implements the fragmentation of IPv6 packets.
- *
- * @param pkt_in
- * The input packet.
- * @param pkts_out
- * Array storing the output fragments.
- * @param nb_pkts_out
- * Number of fragments.
- * @param mtu_size
- * Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv6
- * datagrams. This value includes the size of the IPv6 header.
- * @param pool_direct
- * MBUF pool used for allocating direct buffers for the output fragments.
- * @param pool_indirect
- * MBUF pool used for allocating indirect buffers for the output fragments.
- * @return
- * Upon successful completion - number of output fragments placed
- * in the pkts_out array.
- * Otherwise - (-1) * errno.
- */
-int32_t
-rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in,
- struct rte_mbuf **pkts_out,
- uint16_t nb_pkts_out,
- uint16_t mtu_size,
- struct rte_mempool *pool_direct,
- struct rte_mempool *pool_indirect);
-#endif
-
-/*
- * This function implements reassembly of fragmented IPv6 packets.
- * Incoming mbuf should have its l2_len/l3_len fields setup correctly.
- *
- * @param tbl
- * Table where to lookup/add the fragmented packet.
- * @param dr
- * Death row to free buffers to
- * @param mb
- * Incoming mbuf with IPv6 fragment.
- * @param tms
- * Fragment arrival timestamp.
- * @param ip_hdr
- * Pointer to the IPv6 header.
- * @param frag_hdr
- * Pointer to the IPv6 fragment extension header.
- * @return
- * Pointer to mbuf for reassembled packet, or NULL if:
- * - an error occured.
- * - not all fragments of the packet are collected yet.
- */
-struct rte_mbuf *rte_ipv6_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
- struct rte_ip_frag_death_row *dr,
- struct rte_mbuf *mb, uint64_t tms, struct ipv6_hdr *ip_hdr,
- struct ipv6_extension_fragment *frag_hdr);
-
-/*
- * Return a pointer to the packet's fragment header, if found.
- * It only looks at the extension header that's right after the fixed IPv6
- * header, and doesn't follow the whole chain of extension headers.
- *
- * @param hdr
- * Pointer to the IPv6 header.
- * @return
- * Pointer to the IPv6 fragment extension header, or NULL if it's not
- * present.
- */
-static inline struct ipv6_extension_fragment *
-rte_ipv6_frag_get_ipv6_fragment_header(struct ipv6_hdr *hdr)
-{
- if (hdr->proto == IPPROTO_FRAGMENT) {
- return (struct ipv6_extension_fragment *) ++hdr;
- }
- else
- return NULL;
-}
-
-#ifdef RTE_MBUF_REFCNT
-/**
- * IPv4 fragmentation.
- *
- * This function implements the fragmentation of IPv4 packets.
- *
- * @param pkt_in
- * The input packet.
- * @param pkts_out
- * Array storing the output fragments.
- * @param nb_pkts_out
- * Number of fragments.
- * @param mtu_size
- * Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
- * datagrams. This value includes the size of the IPv4 header.
- * @param pool_direct
- * MBUF pool used for allocating direct buffers for the output fragments.
- * @param pool_indirect
- * MBUF pool used for allocating indirect buffers for the output fragments.
- * @return
- * Upon successful completion - number of output fragments placed
- * in the pkts_out array.
- * Otherwise - (-1) * errno.
- */
-int32_t rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
- struct rte_mbuf **pkts_out,
- uint16_t nb_pkts_out, uint16_t mtu_size,
- struct rte_mempool *pool_direct,
- struct rte_mempool *pool_indirect);
-#endif
-
-/*
- * This function implements reassembly of fragmented IPv4 packets.
- * Incoming mbufs should have its l2_len/l3_len fields setup correclty.
- *
- * @param tbl
- * Table where to lookup/add the fragmented packet.
- * @param dr
- * Death row to free buffers to
- * @param mb
- * Incoming mbuf with IPv4 fragment.
- * @param tms
- * Fragment arrival timestamp.
- * @param ip_hdr
- * Pointer to the IPV4 header inside the fragment.
- * @return
- * Pointer to mbuf for reassebled packet, or NULL if:
- * - an error occured.
- * - not all fragments of the packet are collected yet.
- */
-struct rte_mbuf * rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
- struct rte_ip_frag_death_row *dr,
- struct rte_mbuf *mb, uint64_t tms, struct ipv4_hdr *ip_hdr);
-
-/*
- * Check if the IPv4 packet is fragmented
- *
- * @param hdr
- * IPv4 header of the packet
- * @return
- * 1 if fragmented, 0 if not fragmented
- */
-static inline int
-rte_ipv4_frag_pkt_is_fragmented(const struct ipv4_hdr * hdr) {
- uint16_t flag_offset, ip_flag, ip_ofs;
-
- flag_offset = rte_be_to_cpu_16(hdr->fragment_offset);
- ip_ofs = (uint16_t)(flag_offset & IPV4_HDR_OFFSET_MASK);
- ip_flag = (uint16_t)(flag_offset & IPV4_HDR_MF_FLAG);
-
- return ip_flag != 0 || ip_ofs != 0;
-}
-
-/*
- * Free mbufs on a given death row.
- *
- * @param dr
- * Death row to free mbufs in.
- * @param prefetch
- * How many buffers to prefetch before freeing.
- */
-void rte_ip_frag_free_death_row(struct rte_ip_frag_death_row *dr,
- uint32_t prefetch);
-
-
-/*
- * Dump fragmentation table statistics to file.
- *
- * @param f
- * File to dump statistics to
- * @param tbl
- * Fragmentation table to dump statistics from
- */
-void
-rte_ip_frag_table_statistics_dump(FILE * f, const struct rte_ip_frag_tbl *tbl);
-
-#endif /* _RTE_IP_FRAG_H_ */
diff --git a/src/dpdk_lib18/librte_ip_frag/rte_ip_frag_common.c b/src/dpdk_lib18/librte_ip_frag/rte_ip_frag_common.c
deleted file mode 100755
index c982d8cc..00000000
--- a/src/dpdk_lib18/librte_ip_frag/rte_ip_frag_common.c
+++ /dev/null
@@ -1,139 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stddef.h>
-#include <stdio.h>
-
-#include <rte_memory.h>
-#include <rte_log.h>
-
-#include "ip_frag_common.h"
-
-#define IP_FRAG_HASH_FNUM 2
-
-/* free mbufs from death row */
-void
-rte_ip_frag_free_death_row(struct rte_ip_frag_death_row *dr,
- uint32_t prefetch)
-{
- uint32_t i, k, n;
-
- k = RTE_MIN(prefetch, dr->cnt);
- n = dr->cnt;
-
- for (i = 0; i != k; i++)
- rte_prefetch0(dr->row[i]);
-
- for (i = 0; i != n - k; i++) {
- rte_prefetch0(dr->row[i + k]);
- rte_pktmbuf_free(dr->row[i]);
- }
-
- for (; i != n; i++)
- rte_pktmbuf_free(dr->row[i]);
-
- dr->cnt = 0;
-}
-
-/* create fragmentation table */
-struct rte_ip_frag_tbl *
-rte_ip_frag_table_create(uint32_t bucket_num, uint32_t bucket_entries,
- uint32_t max_entries, uint64_t max_cycles, int socket_id)
-{
- struct rte_ip_frag_tbl *tbl;
- size_t sz;
- uint64_t nb_entries;
-
- nb_entries = rte_align32pow2(bucket_num);
- nb_entries *= bucket_entries;
- nb_entries *= IP_FRAG_HASH_FNUM;
-
- /* check input parameters. */
- if (rte_is_power_of_2(bucket_entries) == 0 ||
- nb_entries > UINT32_MAX || nb_entries == 0 ||
- nb_entries < max_entries) {
- RTE_LOG(ERR, USER1, "%s: invalid input parameter\n", __func__);
- return (NULL);
- }
-
- sz = sizeof (*tbl) + nb_entries * sizeof (tbl->pkt[0]);
- if ((tbl = rte_zmalloc_socket(__func__, sz, RTE_CACHE_LINE_SIZE,
- socket_id)) == NULL) {
- RTE_LOG(ERR, USER1,
- "%s: allocation of %zu bytes at socket %d failed do\n",
- __func__, sz, socket_id);
- return (NULL);
- }
-
- RTE_LOG(INFO, USER1, "%s: allocated of %zu bytes at socket %d\n",
- __func__, sz, socket_id);
-
- tbl->max_cycles = max_cycles;
- tbl->max_entries = max_entries;
- tbl->nb_entries = (uint32_t)nb_entries;
- tbl->nb_buckets = bucket_num;
- tbl->bucket_entries = bucket_entries;
- tbl->entry_mask = (tbl->nb_entries - 1) & ~(tbl->bucket_entries - 1);
-
- TAILQ_INIT(&(tbl->lru));
- return (tbl);
-}
-
-/* dump frag table statistics to file */
-void
-rte_ip_frag_table_statistics_dump(FILE *f, const struct rte_ip_frag_tbl *tbl)
-{
- uint64_t fail_total, fail_nospace;
-
- fail_total = tbl->stat.fail_total;
- fail_nospace = tbl->stat.fail_nospace;
-
- fprintf(f, "max entries:\t%u;\n"
- "entries in use:\t%u;\n"
- "finds/inserts:\t%" PRIu64 ";\n"
- "entries added:\t%" PRIu64 ";\n"
- "entries deleted by timeout:\t%" PRIu64 ";\n"
- "entries reused by timeout:\t%" PRIu64 ";\n"
- "total add failures:\t%" PRIu64 ";\n"
- "add no-space failures:\t%" PRIu64 ";\n"
- "add hash-collisions failures:\t%" PRIu64 ";\n",
- tbl->max_entries,
- tbl->use_entries,
- tbl->stat.find_num,
- tbl->stat.add_num,
- tbl->stat.del_num,
- tbl->stat.reuse_num,
- fail_total,
- fail_nospace,
- fail_total - fail_nospace);
-}
diff --git a/src/dpdk_lib18/librte_ip_frag/rte_ipv4_fragmentation.c b/src/dpdk_lib18/librte_ip_frag/rte_ipv4_fragmentation.c
deleted file mode 100755
index a4ed9238..00000000
--- a/src/dpdk_lib18/librte_ip_frag/rte_ipv4_fragmentation.c
+++ /dev/null
@@ -1,209 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stddef.h>
-#include <errno.h>
-
-#include <rte_memcpy.h>
-#include <rte_mempool.h>
-#include <rte_debug.h>
-
-#include "ip_frag_common.h"
-
-/* Fragment Offset */
-#define IPV4_HDR_DF_SHIFT 14
-#define IPV4_HDR_MF_SHIFT 13
-#define IPV4_HDR_FO_SHIFT 3
-
-#define IPV4_HDR_DF_MASK (1 << IPV4_HDR_DF_SHIFT)
-#define IPV4_HDR_MF_MASK (1 << IPV4_HDR_MF_SHIFT)
-
-#define IPV4_HDR_FO_MASK ((1 << IPV4_HDR_FO_SHIFT) - 1)
-
-static inline void __fill_ipv4hdr_frag(struct ipv4_hdr *dst,
- const struct ipv4_hdr *src, uint16_t len, uint16_t fofs,
- uint16_t dofs, uint32_t mf)
-{
- rte_memcpy(dst, src, sizeof(*dst));
- fofs = (uint16_t)(fofs + (dofs >> IPV4_HDR_FO_SHIFT));
- fofs = (uint16_t)(fofs | mf << IPV4_HDR_MF_SHIFT);
- dst->fragment_offset = rte_cpu_to_be_16(fofs);
- dst->total_length = rte_cpu_to_be_16(len);
- dst->hdr_checksum = 0;
-}
-
-static inline void __free_fragments(struct rte_mbuf *mb[], uint32_t num)
-{
- uint32_t i;
- for (i = 0; i != num; i++)
- rte_pktmbuf_free(mb[i]);
-}
-
-/**
- * IPv4 fragmentation.
- *
- * This function implements the fragmentation of IPv4 packets.
- *
- * @param pkt_in
- * The input packet.
- * @param pkts_out
- * Array storing the output fragments.
- * @param mtu_size
- * Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
- * datagrams. This value includes the size of the IPv4 header.
- * @param pool_direct
- * MBUF pool used for allocating direct buffers for the output fragments.
- * @param pool_indirect
- * MBUF pool used for allocating indirect buffers for the output fragments.
- * @return
- * Upon successful completion - number of output fragments placed
- * in the pkts_out array.
- * Otherwise - (-1) * <errno>.
- */
-int32_t
-rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
- struct rte_mbuf **pkts_out,
- uint16_t nb_pkts_out,
- uint16_t mtu_size,
- struct rte_mempool *pool_direct,
- struct rte_mempool *pool_indirect)
-{
- struct rte_mbuf *in_seg = NULL;
- struct ipv4_hdr *in_hdr;
- uint32_t out_pkt_pos, in_seg_data_pos;
- uint32_t more_in_segs;
- uint16_t fragment_offset, flag_offset, frag_size;
-
- frag_size = (uint16_t)(mtu_size - sizeof(struct ipv4_hdr));
-
- /* Fragment size should be a multiply of 8. */
- IP_FRAG_ASSERT((frag_size & IPV4_HDR_FO_MASK) == 0);
-
- in_hdr = rte_pktmbuf_mtod(pkt_in, struct ipv4_hdr *);
- flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
-
- /* If Don't Fragment flag is set */
- if (unlikely ((flag_offset & IPV4_HDR_DF_MASK) != 0))
- return -ENOTSUP;
-
- /* Check that pkts_out is big enough to hold all fragments */
- if (unlikely(frag_size * nb_pkts_out <
- (uint16_t)(pkt_in->pkt_len - sizeof (struct ipv4_hdr))))
- return -EINVAL;
-
- in_seg = pkt_in;
- in_seg_data_pos = sizeof(struct ipv4_hdr);
- out_pkt_pos = 0;
- fragment_offset = 0;
-
- more_in_segs = 1;
- while (likely(more_in_segs)) {
- struct rte_mbuf *out_pkt = NULL, *out_seg_prev = NULL;
- uint32_t more_out_segs;
- struct ipv4_hdr *out_hdr;
-
- /* Allocate direct buffer */
- out_pkt = rte_pktmbuf_alloc(pool_direct);
- if (unlikely(out_pkt == NULL)) {
- __free_fragments(pkts_out, out_pkt_pos);
- return -ENOMEM;
- }
-
- /* Reserve space for the IP header that will be built later */
- out_pkt->data_len = sizeof(struct ipv4_hdr);
- out_pkt->pkt_len = sizeof(struct ipv4_hdr);
-
- out_seg_prev = out_pkt;
- more_out_segs = 1;
- while (likely(more_out_segs && more_in_segs)) {
- struct rte_mbuf *out_seg = NULL;
- uint32_t len;
-
- /* Allocate indirect buffer */
- out_seg = rte_pktmbuf_alloc(pool_indirect);
- if (unlikely(out_seg == NULL)) {
- rte_pktmbuf_free(out_pkt);
- __free_fragments(pkts_out, out_pkt_pos);
- return -ENOMEM;
- }
- out_seg_prev->next = out_seg;
- out_seg_prev = out_seg;
-
- /* Prepare indirect buffer */
- rte_pktmbuf_attach(out_seg, in_seg);
- len = mtu_size - out_pkt->pkt_len;
- if (len > (in_seg->data_len - in_seg_data_pos)) {
- len = in_seg->data_len - in_seg_data_pos;
- }
- out_seg->data_off = in_seg->data_off + in_seg_data_pos;
- out_seg->data_len = (uint16_t)len;
- out_pkt->pkt_len = (uint16_t)(len +
- out_pkt->pkt_len);
- out_pkt->nb_segs += 1;
- in_seg_data_pos += len;
-
- /* Current output packet (i.e. fragment) done ? */
- if (unlikely(out_pkt->pkt_len >= mtu_size))
- more_out_segs = 0;
-
- /* Current input segment done ? */
- if (unlikely(in_seg_data_pos == in_seg->data_len)) {
- in_seg = in_seg->next;
- in_seg_data_pos = 0;
-
- if (unlikely(in_seg == NULL))
- more_in_segs = 0;
- }
- }
-
- /* Build the IP header */
-
- out_hdr = rte_pktmbuf_mtod(out_pkt, struct ipv4_hdr *);
-
- __fill_ipv4hdr_frag(out_hdr, in_hdr,
- (uint16_t)out_pkt->pkt_len,
- flag_offset, fragment_offset, more_in_segs);
-
- fragment_offset = (uint16_t)(fragment_offset +
- out_pkt->pkt_len - sizeof(struct ipv4_hdr));
-
- out_pkt->ol_flags |= PKT_TX_IP_CKSUM;
- out_pkt->l3_len = sizeof(struct ipv4_hdr);
-
- /* Write the fragment to the output list */
- pkts_out[out_pkt_pos] = out_pkt;
- out_pkt_pos ++;
- }
-
- return out_pkt_pos;
-}
diff --git a/src/dpdk_lib18/librte_ip_frag/rte_ipv4_reassembly.c b/src/dpdk_lib18/librte_ip_frag/rte_ipv4_reassembly.c
deleted file mode 100755
index 0b8ceebd..00000000
--- a/src/dpdk_lib18/librte_ip_frag/rte_ipv4_reassembly.c
+++ /dev/null
@@ -1,183 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stddef.h>
-
-#include <rte_debug.h>
-#include <rte_tailq.h>
-
-#include "ip_frag_common.h"
-
-/*
- * Reassemble fragments into one packet.
- */
-struct rte_mbuf *
-ipv4_frag_reassemble(const struct ip_frag_pkt *fp)
-{
- struct ipv4_hdr *ip_hdr;
- struct rte_mbuf *m, *prev;
- uint32_t i, n, ofs, first_len;
-
- first_len = fp->frags[IP_FIRST_FRAG_IDX].len;
- n = fp->last_idx - 1;
-
- /*start from the last fragment. */
- m = fp->frags[IP_LAST_FRAG_IDX].mb;
- ofs = fp->frags[IP_LAST_FRAG_IDX].ofs;
-
- while (ofs != first_len) {
-
- prev = m;
-
- for (i = n; i != IP_FIRST_FRAG_IDX && ofs != first_len; i--) {
-
- /* previous fragment found. */
- if(fp->frags[i].ofs + fp->frags[i].len == ofs) {
-
- ip_frag_chain(fp->frags[i].mb, m);
-
- /* update our last fragment and offset. */
- m = fp->frags[i].mb;
- ofs = fp->frags[i].ofs;
- }
- }
-
- /* error - hole in the packet. */
- if (m == prev) {
- return (NULL);
- }
- }
-
- /* chain with the first fragment. */
- ip_frag_chain(fp->frags[IP_FIRST_FRAG_IDX].mb, m);
- m = fp->frags[IP_FIRST_FRAG_IDX].mb;
-
- /* update mbuf fields for reassembled packet. */
- m->ol_flags |= PKT_TX_IP_CKSUM;
-
- /* update ipv4 header for the reassmebled packet */
- ip_hdr = (struct ipv4_hdr*)(rte_pktmbuf_mtod(m, uint8_t *) +
- m->l2_len);
-
- ip_hdr->total_length = rte_cpu_to_be_16((uint16_t)(fp->total_size +
- m->l3_len));
- ip_hdr->fragment_offset = (uint16_t)(ip_hdr->fragment_offset &
- rte_cpu_to_be_16(IPV4_HDR_DF_FLAG));
- ip_hdr->hdr_checksum = 0;
-
- return (m);
-}
-
-/*
- * Process new mbuf with fragment of IPV4 packet.
- * Incoming mbuf should have it's l2_len/l3_len fields setuped correclty.
- * @param tbl
- * Table where to lookup/add the fragmented packet.
- * @param mb
- * Incoming mbuf with IPV4 fragment.
- * @param tms
- * Fragment arrival timestamp.
- * @param ip_hdr
- * Pointer to the IPV4 header inside the fragment.
- * @return
- * Pointer to mbuf for reassebled packet, or NULL if:
- * - an error occured.
- * - not all fragments of the packet are collected yet.
- */
-struct rte_mbuf *
-rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
- struct rte_ip_frag_death_row *dr, struct rte_mbuf *mb, uint64_t tms,
- struct ipv4_hdr *ip_hdr)
-{
- struct ip_frag_pkt *fp;
- struct ip_frag_key key;
- const uint64_t *psd;
- uint16_t ip_len;
- uint16_t flag_offset, ip_ofs, ip_flag;
-
- flag_offset = rte_be_to_cpu_16(ip_hdr->fragment_offset);
- ip_ofs = (uint16_t)(flag_offset & IPV4_HDR_OFFSET_MASK);
- ip_flag = (uint16_t)(flag_offset & IPV4_HDR_MF_FLAG);
-
- psd = (uint64_t *)&ip_hdr->src_addr;
- /* use first 8 bytes only */
- key.src_dst[0] = psd[0];
- key.id = ip_hdr->packet_id;
- key.key_len = IPV4_KEYLEN;
-
- ip_ofs *= IPV4_HDR_OFFSET_UNITS;
- ip_len = (uint16_t)(rte_be_to_cpu_16(ip_hdr->total_length) -
- mb->l3_len);
-
- IP_FRAG_LOG(DEBUG, "%s:%d:\n"
- "mbuf: %p, tms: %" PRIu64
- ", key: <%" PRIx64 ", %#x>, ofs: %u, len: %u, flags: %#x\n"
- "tbl: %p, max_cycles: %" PRIu64 ", entry_mask: %#x, "
- "max_entries: %u, use_entries: %u\n\n",
- __func__, __LINE__,
- mb, tms, key.src_dst[0], key.id, ip_ofs, ip_len, ip_flag,
- tbl, tbl->max_cycles, tbl->entry_mask, tbl->max_entries,
- tbl->use_entries);
-
- /* try to find/add entry into the fragment's table. */
- if ((fp = ip_frag_find(tbl, dr, &key, tms)) == NULL) {
- IP_FRAG_MBUF2DR(dr, mb);
- return (NULL);
- }
-
- IP_FRAG_LOG(DEBUG, "%s:%d:\n"
- "tbl: %p, max_entries: %u, use_entries: %u\n"
- "ipv4_frag_pkt: %p, key: <%" PRIx64 ", %#x>, start: %" PRIu64
- ", total_size: %u, frag_size: %u, last_idx: %u\n\n",
- __func__, __LINE__,
- tbl, tbl->max_entries, tbl->use_entries,
- fp, fp->key.src_dst[0], fp->key.id, fp->start,
- fp->total_size, fp->frag_size, fp->last_idx);
-
-
- /* process the fragmented packet. */
- mb = ip_frag_process(fp, dr, mb, ip_ofs, ip_len, ip_flag);
- ip_frag_inuse(tbl, fp);
-
- IP_FRAG_LOG(DEBUG, "%s:%d:\n"
- "mbuf: %p\n"
- "tbl: %p, max_entries: %u, use_entries: %u\n"
- "ipv4_frag_pkt: %p, key: <%" PRIx64 ", %#x>, start: %" PRIu64
- ", total_size: %u, frag_size: %u, last_idx: %u\n\n",
- __func__, __LINE__, mb,
- tbl, tbl->max_entries, tbl->use_entries,
- fp, fp->key.src_dst[0], fp->key.id, fp->start,
- fp->total_size, fp->frag_size, fp->last_idx);
-
- return (mb);
-}
diff --git a/src/dpdk_lib18/librte_ip_frag/rte_ipv6_fragmentation.c b/src/dpdk_lib18/librte_ip_frag/rte_ipv6_fragmentation.c
deleted file mode 100755
index 4ffcc7c6..00000000
--- a/src/dpdk_lib18/librte_ip_frag/rte_ipv6_fragmentation.c
+++ /dev/null
@@ -1,215 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stddef.h>
-#include <errno.h>
-
-#include <rte_memcpy.h>
-
-#include "ip_frag_common.h"
-
-/**
- * @file
- * RTE IPv6 Fragmentation
- *
- * Implementation of IPv6 fragmentation.
- *
- */
-
-/* Fragment Extension Header */
-#define IPV6_HDR_MF_SHIFT 0
-#define IPV6_HDR_FO_SHIFT 3
-#define IPV6_HDR_MF_MASK (1 << IPV6_HDR_MF_SHIFT)
-#define IPV6_HDR_FO_MASK ((1 << IPV6_HDR_FO_SHIFT) - 1)
-
-static inline void
-__fill_ipv6hdr_frag(struct ipv6_hdr *dst,
- const struct ipv6_hdr *src, uint16_t len, uint16_t fofs,
- uint32_t mf)
-{
- struct ipv6_extension_fragment *fh;
-
- rte_memcpy(dst, src, sizeof(*dst));
- dst->payload_len = rte_cpu_to_be_16(len);
- dst->proto = IPPROTO_FRAGMENT;
-
- fh = (struct ipv6_extension_fragment *) ++dst;
- fh->next_header = src->proto;
- fh->reserved1 = 0;
- fh->frag_offset = rte_cpu_to_be_16(fofs);
- fh->reserved2 = 0;
- fh->more_frags = rte_cpu_to_be_16(mf);
- fh->id = 0;
-}
-
-static inline void
-__free_fragments(struct rte_mbuf *mb[], uint32_t num)
-{
- uint32_t i;
- for (i = 0; i < num; i++)
- rte_pktmbuf_free(mb[i]);
-}
-
-/**
- * IPv6 fragmentation.
- *
- * This function implements the fragmentation of IPv6 packets.
- *
- * @param pkt_in
- * The input packet.
- * @param pkts_out
- * Array storing the output fragments.
- * @param mtu_size
- * Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv6
- * datagrams. This value includes the size of the IPv6 header.
- * @param pool_direct
- * MBUF pool used for allocating direct buffers for the output fragments.
- * @param pool_indirect
- * MBUF pool used for allocating indirect buffers for the output fragments.
- * @return
- * Upon successful completion - number of output fragments placed
- * in the pkts_out array.
- * Otherwise - (-1) * <errno>.
- */
-int32_t
-rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in,
- struct rte_mbuf **pkts_out,
- uint16_t nb_pkts_out,
- uint16_t mtu_size,
- struct rte_mempool *pool_direct,
- struct rte_mempool *pool_indirect)
-{
- struct rte_mbuf *in_seg = NULL;
- struct ipv6_hdr *in_hdr;
- uint32_t out_pkt_pos, in_seg_data_pos;
- uint32_t more_in_segs;
- uint16_t fragment_offset, frag_size;
-
- frag_size = (uint16_t)(mtu_size - sizeof(struct ipv6_hdr));
-
- /* Fragment size should be a multiple of 8. */
- IP_FRAG_ASSERT((frag_size & IPV6_HDR_FO_MASK) == 0);
-
- /* Check that pkts_out is big enough to hold all fragments */
- if (unlikely (frag_size * nb_pkts_out <
- (uint16_t)(pkt_in->pkt_len - sizeof (struct ipv6_hdr))))
- return (-EINVAL);
-
- in_hdr = rte_pktmbuf_mtod(pkt_in, struct ipv6_hdr *);
-
- in_seg = pkt_in;
- in_seg_data_pos = sizeof(struct ipv6_hdr);
- out_pkt_pos = 0;
- fragment_offset = 0;
-
- more_in_segs = 1;
- while (likely(more_in_segs)) {
- struct rte_mbuf *out_pkt = NULL, *out_seg_prev = NULL;
- uint32_t more_out_segs;
- struct ipv6_hdr *out_hdr;
-
- /* Allocate direct buffer */
- out_pkt = rte_pktmbuf_alloc(pool_direct);
- if (unlikely(out_pkt == NULL)) {
- __free_fragments(pkts_out, out_pkt_pos);
- return (-ENOMEM);
- }
-
- /* Reserve space for the IP header that will be built later */
- out_pkt->data_len = sizeof(struct ipv6_hdr) + sizeof(struct ipv6_extension_fragment);
- out_pkt->pkt_len = sizeof(struct ipv6_hdr) + sizeof(struct ipv6_extension_fragment);
-
- out_seg_prev = out_pkt;
- more_out_segs = 1;
- while (likely(more_out_segs && more_in_segs)) {
- struct rte_mbuf *out_seg = NULL;
- uint32_t len;
-
- /* Allocate indirect buffer */
- out_seg = rte_pktmbuf_alloc(pool_indirect);
- if (unlikely(out_seg == NULL)) {
- rte_pktmbuf_free(out_pkt);
- __free_fragments(pkts_out, out_pkt_pos);
- return (-ENOMEM);
- }
- out_seg_prev->next = out_seg;
- out_seg_prev = out_seg;
-
- /* Prepare indirect buffer */
- rte_pktmbuf_attach(out_seg, in_seg);
- len = mtu_size - out_pkt->pkt_len;
- if (len > (in_seg->data_len - in_seg_data_pos)) {
- len = in_seg->data_len - in_seg_data_pos;
- }
- out_seg->data_off = in_seg->data_off + in_seg_data_pos;
- out_seg->data_len = (uint16_t)len;
- out_pkt->pkt_len = (uint16_t)(len +
- out_pkt->pkt_len);
- out_pkt->nb_segs += 1;
- in_seg_data_pos += len;
-
- /* Current output packet (i.e. fragment) done ? */
- if (unlikely(out_pkt->pkt_len >= mtu_size)) {
- more_out_segs = 0;
- }
-
- /* Current input segment done ? */
- if (unlikely(in_seg_data_pos == in_seg->data_len)) {
- in_seg = in_seg->next;
- in_seg_data_pos = 0;
-
- if (unlikely(in_seg == NULL)) {
- more_in_segs = 0;
- }
- }
- }
-
- /* Build the IP header */
-
- out_hdr = rte_pktmbuf_mtod(out_pkt, struct ipv6_hdr *);
-
- __fill_ipv6hdr_frag(out_hdr, in_hdr,
- (uint16_t) out_pkt->pkt_len - sizeof(struct ipv6_hdr),
- fragment_offset, more_in_segs);
-
- fragment_offset = (uint16_t)(fragment_offset +
- out_pkt->pkt_len - sizeof(struct ipv6_hdr)
- - sizeof(struct ipv6_extension_fragment));
-
- /* Write the fragment to the output list */
- pkts_out[out_pkt_pos] = out_pkt;
- out_pkt_pos ++;
- }
-
- return (out_pkt_pos);
-}
diff --git a/src/dpdk_lib18/librte_ip_frag/rte_ipv6_reassembly.c b/src/dpdk_lib18/librte_ip_frag/rte_ipv6_reassembly.c
deleted file mode 100755
index 71cf721c..00000000
--- a/src/dpdk_lib18/librte_ip_frag/rte_ipv6_reassembly.c
+++ /dev/null
@@ -1,222 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stddef.h>
-
-#include <rte_memcpy.h>
-
-#include "ip_frag_common.h"
-
-/**
- * @file
- * IPv6 reassemble
- *
- * Implementation of IPv6 reassembly.
- *
- */
-
-static inline void
-ip_frag_memmove(char *dst, char *src, int len)
-{
- int i;
-
- /* go backwards to make sure we don't overwrite anything important */
- for (i = len - 1; i >= 0; i--)
- dst[i] = src[i];
-}
-
-/*
- * Reassemble fragments into one packet.
- */
-struct rte_mbuf *
-ipv6_frag_reassemble(const struct ip_frag_pkt *fp)
-{
- struct ipv6_hdr *ip_hdr;
- struct ipv6_extension_fragment *frag_hdr;
- struct rte_mbuf *m, *prev;
- uint32_t i, n, ofs, first_len;
- uint32_t last_len, move_len, payload_len;
-
- first_len = fp->frags[IP_FIRST_FRAG_IDX].len;
- n = fp->last_idx - 1;
-
- /*start from the last fragment. */
- m = fp->frags[IP_LAST_FRAG_IDX].mb;
- ofs = fp->frags[IP_LAST_FRAG_IDX].ofs;
- last_len = fp->frags[IP_LAST_FRAG_IDX].len;
-
- payload_len = ofs + last_len;
-
- while (ofs != first_len) {
-
- prev = m;
-
- for (i = n; i != IP_FIRST_FRAG_IDX && ofs != first_len; i--) {
-
- /* previous fragment found. */
- if (fp->frags[i].ofs + fp->frags[i].len == ofs) {
-
- ip_frag_chain(fp->frags[i].mb, m);
-
- /* update our last fragment and offset. */
- m = fp->frags[i].mb;
- ofs = fp->frags[i].ofs;
- }
- }
-
- /* error - hole in the packet. */
- if (m == prev) {
- return NULL;
- }
- }
-
- /* chain with the first fragment. */
- ip_frag_chain(fp->frags[IP_FIRST_FRAG_IDX].mb, m);
- m = fp->frags[IP_FIRST_FRAG_IDX].mb;
-
- /* update mbuf fields for reassembled packet. */
- m->ol_flags |= PKT_TX_IP_CKSUM;
-
- /* update ipv6 header for the reassembled datagram */
- ip_hdr = (struct ipv6_hdr *) (rte_pktmbuf_mtod(m, uint8_t *) +
- m->l2_len);
-
- ip_hdr->payload_len = rte_cpu_to_be_16(payload_len);
-
- /*
- * remove fragmentation header. note that per RFC2460, we need to update
- * the last non-fragmentable header with the "next header" field to contain
- * type of the first fragmentable header, but we currently don't support
- * other headers, so we assume there are no other headers and thus update
- * the main IPv6 header instead.
- */
- move_len = m->l2_len + m->l3_len - sizeof(*frag_hdr);
- frag_hdr = (struct ipv6_extension_fragment *) (ip_hdr + 1);
- ip_hdr->proto = frag_hdr->next_header;
-
- ip_frag_memmove(rte_pktmbuf_mtod(m, char*) + sizeof(*frag_hdr),
- rte_pktmbuf_mtod(m, char*), move_len);
-
- rte_pktmbuf_adj(m, sizeof(*frag_hdr));
-
- return m;
-}
-
-/*
- * Process new mbuf with fragment of IPV6 datagram.
- * Incoming mbuf should have its l2_len/l3_len fields setup correctly.
- * @param tbl
- * Table where to lookup/add the fragmented packet.
- * @param mb
- * Incoming mbuf with IPV6 fragment.
- * @param tms
- * Fragment arrival timestamp.
- * @param ip_hdr
- * Pointer to the IPV6 header.
- * @param frag_hdr
- * Pointer to the IPV6 fragment extension header.
- * @return
- * Pointer to mbuf for reassembled packet, or NULL if:
- * - an error occured.
- * - not all fragments of the packet are collected yet.
- */
-#define MORE_FRAGS(x) (((x) & 0x100) >> 8)
-#define FRAG_OFFSET(x) (rte_cpu_to_be_16(x) >> 3)
-struct rte_mbuf *
-rte_ipv6_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
- struct rte_ip_frag_death_row *dr, struct rte_mbuf *mb, uint64_t tms,
- struct ipv6_hdr *ip_hdr, struct ipv6_extension_fragment *frag_hdr)
-{
- struct ip_frag_pkt *fp;
- struct ip_frag_key key;
- uint16_t ip_len, ip_ofs;
-
- rte_memcpy(&key.src_dst[0], ip_hdr->src_addr, 16);
- rte_memcpy(&key.src_dst[2], ip_hdr->dst_addr, 16);
-
- key.id = frag_hdr->id;
- key.key_len = IPV6_KEYLEN;
-
- ip_ofs = FRAG_OFFSET(frag_hdr->frag_data) * 8;
-
- /*
- * as per RFC2460, payload length contains all extension headers as well.
- * since we don't support anything but frag headers, this is what we remove
- * from the payload len.
- */
- ip_len = rte_be_to_cpu_16(ip_hdr->payload_len) - sizeof(*frag_hdr);
-
- IP_FRAG_LOG(DEBUG, "%s:%d:\n"
- "mbuf: %p, tms: %" PRIu64
- ", key: <" IPv6_KEY_BYTES_FMT ", %#x>, ofs: %u, len: %u, flags: %#x\n"
- "tbl: %p, max_cycles: %" PRIu64 ", entry_mask: %#x, "
- "max_entries: %u, use_entries: %u\n\n",
- __func__, __LINE__,
- mb, tms, IPv6_KEY_BYTES(key.src_dst), key.id, ip_ofs, ip_len, frag_hdr->more_frags,
- tbl, tbl->max_cycles, tbl->entry_mask, tbl->max_entries,
- tbl->use_entries);
-
- /* try to find/add entry into the fragment's table. */
- fp = ip_frag_find(tbl, dr, &key, tms);
- if (fp == NULL) {
- IP_FRAG_MBUF2DR(dr, mb);
- return NULL;
- }
-
- IP_FRAG_LOG(DEBUG, "%s:%d:\n"
- "tbl: %p, max_entries: %u, use_entries: %u\n"
- "ipv6_frag_pkt: %p, key: <" IPv6_KEY_BYTES_FMT ", %#x>, start: %" PRIu64
- ", total_size: %u, frag_size: %u, last_idx: %u\n\n",
- __func__, __LINE__,
- tbl, tbl->max_entries, tbl->use_entries,
- fp, IPv6_KEY_BYTES(fp->key.src_dst), fp->key.id, fp->start,
- fp->total_size, fp->frag_size, fp->last_idx);
-
-
- /* process the fragmented packet. */
- mb = ip_frag_process(fp, dr, mb, ip_ofs, ip_len,
- MORE_FRAGS(frag_hdr->frag_data));
- ip_frag_inuse(tbl, fp);
-
- IP_FRAG_LOG(DEBUG, "%s:%d:\n"
- "mbuf: %p\n"
- "tbl: %p, max_entries: %u, use_entries: %u\n"
- "ipv6_frag_pkt: %p, key: <" IPv6_KEY_BYTES_FMT ", %#x>, start: %" PRIu64
- ", total_size: %u, frag_size: %u, last_idx: %u\n\n",
- __func__, __LINE__, mb,
- tbl, tbl->max_entries, tbl->use_entries,
- fp, IPv6_KEY_BYTES(fp->key.src_dst), fp->key.id, fp->start,
- fp->total_size, fp->frag_size, fp->last_idx);
-
- return mb;
-}
diff --git a/src/dpdk_lib18/librte_ivshmem/Makefile b/src/dpdk_lib18/librte_ivshmem/Makefile
deleted file mode 100755
index 536814c9..00000000
--- a/src/dpdk_lib18/librte_ivshmem/Makefile
+++ /dev/null
@@ -1,48 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_ivshmem.a
-
-CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
-
-# all source are stored in SRCS-y
-SRCS-$(CONFIG_RTE_LIBRTE_IVSHMEM) := rte_ivshmem.c
-
-# install includes
-SYMLINK-$(CONFIG_RTE_LIBRTE_IVSHMEM)-include := rte_ivshmem.h
-
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_IVSHMEM) += lib/librte_mempool
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_ivshmem/rte_ivshmem.c b/src/dpdk_lib18/librte_ivshmem/rte_ivshmem.c
deleted file mode 100755
index 7ca55edb..00000000
--- a/src/dpdk_lib18/librte_ivshmem/rte_ivshmem.c
+++ /dev/null
@@ -1,884 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#include <fcntl.h>
-#include <limits.h>
-#include <unistd.h>
-#include <sys/mman.h>
-#include <string.h>
-#include <stdio.h>
-
-#include <rte_eal_memconfig.h>
-#include <rte_memory.h>
-#include <rte_ivshmem.h>
-#include <rte_string_fns.h>
-#include <rte_common.h>
-#include <rte_log.h>
-#include <rte_debug.h>
-#include <rte_spinlock.h>
-#include <rte_common.h>
-#include <rte_malloc.h>
-
-#include "rte_ivshmem.h"
-
-#define IVSHMEM_CONFIG_FILE_FMT "/var/run/.dpdk_ivshmem_metadata_%s"
-#define IVSHMEM_QEMU_CMD_LINE_HEADER_FMT "-device ivshmem,size=%" PRIu64 "M,shm=fd%s"
-#define IVSHMEM_QEMU_CMD_FD_FMT ":%s:0x%" PRIx64 ":0x%" PRIx64
-#define IVSHMEM_QEMU_CMDLINE_BUFSIZE 1024
-#define IVSHMEM_MAX_PAGES (1 << 12)
-#define adjacent(x,y) (((x).phys_addr+(x).len)==(y).phys_addr)
-#define METADATA_SIZE_ALIGNED \
- (RTE_ALIGN_CEIL(sizeof(struct rte_ivshmem_metadata),pagesz))
-
-#define GET_PAGEMAP_ADDR(in,addr,dlm,err) \
-{ \
- char *end; \
- errno = 0; \
- addr = strtoull((in), &end, 16); \
- if (errno != 0 || *end != (dlm)) { \
- RTE_LOG(ERR, EAL, err); \
- goto error; \
- } \
- (in) = end + 1; \
-}
-
-static int pagesz;
-
-struct memseg_cache_entry {
- char filepath[PATH_MAX];
- uint64_t offset;
- uint64_t len;
-};
-
-struct ivshmem_config {
- struct rte_ivshmem_metadata * metadata;
- struct memseg_cache_entry memseg_cache[IVSHMEM_MAX_PAGES];
- /**< account for multiple files per segment case */
- struct flock lock;
- rte_spinlock_t sl;
-};
-
-static struct ivshmem_config
-ivshmem_global_config[RTE_LIBRTE_IVSHMEM_MAX_METADATA_FILES];
-
-static rte_spinlock_t global_cfg_sl;
-
-static struct ivshmem_config *
-get_config_by_name(const char * name)
-{
- struct rte_ivshmem_metadata * config;
- unsigned i;
-
- for (i = 0; i < RTE_DIM(ivshmem_global_config); i++) {
- config = ivshmem_global_config[i].metadata;
- if (config == NULL)
- return NULL;
- if (strncmp(name, config->name, IVSHMEM_NAME_LEN) == 0)
- return &ivshmem_global_config[i];
- }
-
- return NULL;
-}
-
-static int
-overlap(const struct rte_memzone * s1, const struct rte_memzone * s2)
-{
- uint64_t start1, end1, start2, end2;
-
- start1 = s1->addr_64;
- end1 = s1->addr_64 + s1->len;
- start2 = s2->addr_64;
- end2 = s2->addr_64 + s2->len;
-
- if (start1 >= start2 && start1 < end2)
- return 1;
- if (start2 >= start1 && start2 < end1)
- return 1;
-
- return 0;
-}
-
-static struct rte_memzone *
-get_memzone_by_addr(const void * addr)
-{
- struct rte_memzone * tmp, * mz;
- struct rte_mem_config * mcfg;
- int i;
-
- mcfg = rte_eal_get_configuration()->mem_config;
- mz = NULL;
-
- /* find memzone for the ring */
- for (i = 0; i < RTE_MAX_MEMZONE; i++) {
- tmp = &mcfg->memzone[i];
-
- if (tmp->addr_64 == (uint64_t) addr) {
- mz = tmp;
- break;
- }
- }
-
- return mz;
-}
-
-static int
-entry_compare(const void * a, const void * b)
-{
- const struct rte_ivshmem_metadata_entry * e1 =
- (const struct rte_ivshmem_metadata_entry*) a;
- const struct rte_ivshmem_metadata_entry * e2 =
- (const struct rte_ivshmem_metadata_entry*) b;
-
- /* move unallocated zones to the end */
- if (e1->mz.addr == NULL && e2->mz.addr == NULL)
- return 0;
- if (e1->mz.addr == 0)
- return 1;
- if (e2->mz.addr == 0)
- return -1;
-
- return e1->mz.phys_addr > e2->mz.phys_addr;
-}
-
-/* fills hugepage cache entry for a given start virt_addr */
-static int
-get_hugefile_by_virt_addr(uint64_t virt_addr, struct memseg_cache_entry * e)
-{
- uint64_t start_addr, end_addr;
- char *start,*path_end;
- char buf[PATH_MAX*2];
- FILE *f;
-
- start = NULL;
- path_end = NULL;
- start_addr = 0;
-
- memset(e->filepath, 0, sizeof(e->filepath));
-
- /* open /proc/self/maps */
- f = fopen("/proc/self/maps", "r");
- if (f == NULL) {
- RTE_LOG(ERR, EAL, "cannot open /proc/self/maps!\n");
- return -1;
- }
-
- /* parse maps */
- while (fgets(buf, sizeof(buf), f) != NULL) {
-
- /* get endptr to end of start addr */
- start = buf;
-
- GET_PAGEMAP_ADDR(start,start_addr,'-',
- "Cannot find start address in maps!\n");
-
- /* if start address is bigger than our address, skip */
- if (start_addr > virt_addr)
- continue;
-
- GET_PAGEMAP_ADDR(start,end_addr,' ',
- "Cannot find end address in maps!\n");
-
- /* if end address is less than our address, skip */
- if (end_addr <= virt_addr)
- continue;
-
- /* find where the path starts */
- start = strstr(start, "/");
-
- if (start == NULL)
- continue;
-
- /* at this point, we know that this is our map.
- * now let's find the file */
- path_end = strstr(start, "\n");
- break;
- }
-
- if (path_end == NULL) {
- RTE_LOG(ERR, EAL, "Hugefile path not found!\n");
- goto error;
- }
-
- /* calculate offset and copy the file path */
- snprintf(e->filepath, RTE_PTR_DIFF(path_end, start) + 1, "%s", start);
-
- e->offset = virt_addr - start_addr;
-
- fclose(f);
-
- return 0;
-error:
- fclose(f);
- return -1;
-}
-
-/*
- * This is a complex function. What it does is the following:
- * 1. Goes through metadata and gets list of hugepages involved
- * 2. Sorts the hugepages by size (1G first)
- * 3. Goes through metadata again and writes correct offsets
- * 4. Goes through pages and finds out their filenames, offsets etc.
- */
-static int
-build_config(struct rte_ivshmem_metadata * metadata)
-{
- struct rte_ivshmem_metadata_entry * e_local;
- struct memseg_cache_entry * ms_local;
- struct rte_memseg pages[IVSHMEM_MAX_PAGES];
- struct rte_ivshmem_metadata_entry *entry;
- struct memseg_cache_entry * c_entry, * prev_entry;
- struct ivshmem_config * config;
- unsigned i, j, mz_iter, ms_iter;
- uint64_t biggest_len;
- int biggest_idx;
-
- /* return error if we try to use an unknown config file */
- config = get_config_by_name(metadata->name);
- if (config == NULL) {
- RTE_LOG(ERR, EAL, "Cannot find IVSHMEM config %s!\n", metadata->name);
- goto fail_e;
- }
-
- memset(pages, 0, sizeof(pages));
-
- e_local = malloc(sizeof(config->metadata->entry));
- if (e_local == NULL)
- goto fail_e;
- ms_local = malloc(sizeof(config->memseg_cache));
- if (ms_local == NULL)
- goto fail_ms;
-
-
- /* make local copies before doing anything */
- memcpy(e_local, config->metadata->entry, sizeof(config->metadata->entry));
- memcpy(ms_local, config->memseg_cache, sizeof(config->memseg_cache));
-
- qsort(e_local, RTE_DIM(config->metadata->entry), sizeof(struct rte_ivshmem_metadata_entry),
- entry_compare);
-
- /* first pass - collect all huge pages */
- for (mz_iter = 0; mz_iter < RTE_DIM(config->metadata->entry); mz_iter++) {
-
- entry = &e_local[mz_iter];
-
- uint64_t start_addr = RTE_ALIGN_FLOOR(entry->mz.addr_64,
- entry->mz.hugepage_sz);
- uint64_t offset = entry->mz.addr_64 - start_addr;
- uint64_t len = RTE_ALIGN_CEIL(entry->mz.len + offset,
- entry->mz.hugepage_sz);
-
- if (entry->mz.addr_64 == 0 || start_addr == 0 || len == 0)
- continue;
-
- int start_page;
-
- /* find first unused page - mz are phys_addr sorted so we don't have to
- * look out for holes */
- for (i = 0; i < RTE_DIM(pages); i++) {
-
- /* skip if we already have this page */
- if (pages[i].addr_64 == start_addr) {
- start_addr += entry->mz.hugepage_sz;
- len -= entry->mz.hugepage_sz;
- continue;
- }
- /* we found a new page */
- else if (pages[i].addr_64 == 0) {
- start_page = i;
- break;
- }
- }
- if (i == RTE_DIM(pages)) {
- RTE_LOG(ERR, EAL, "Cannot find unused page!\n");
- goto fail;
- }
-
- /* populate however many pages the memzone has */
- for (i = start_page; i < RTE_DIM(pages) && len != 0; i++) {
-
- pages[i].addr_64 = start_addr;
- pages[i].len = entry->mz.hugepage_sz;
- start_addr += entry->mz.hugepage_sz;
- len -= entry->mz.hugepage_sz;
- }
- /* if there's still length left */
- if (len != 0) {
- RTE_LOG(ERR, EAL, "Not enough space for pages!\n");
- goto fail;
- }
- }
-
- /* second pass - sort pages by size */
- for (i = 0; i < RTE_DIM(pages); i++) {
-
- if (pages[i].addr == NULL)
- break;
-
- biggest_len = 0;
- biggest_idx = -1;
-
- /*
- * browse all entries starting at 'i', and find the
- * entry with the smallest addr
- */
- for (j=i; j< RTE_DIM(pages); j++) {
- if (pages[j].addr == NULL)
- break;
- if (biggest_len == 0 ||
- pages[j].len > biggest_len) {
- biggest_len = pages[j].len;
- biggest_idx = j;
- }
- }
-
- /* should not happen */
- if (biggest_idx == -1) {
- RTE_LOG(ERR, EAL, "Error sorting by size!\n");
- goto fail;
- }
- if (i != (unsigned) biggest_idx) {
- struct rte_memseg tmp;
-
- memcpy(&tmp, &pages[biggest_idx], sizeof(struct rte_memseg));
-
- /* we don't want to break contiguousness, so instead of just
- * swapping segments, we move all the preceding segments to the
- * right and then put the old segment @ biggest_idx in place of
- * segment @ i */
- for (j = biggest_idx - 1; j >= i; j--) {
- memcpy(&pages[j+1], &pages[j], sizeof(struct rte_memseg));
- memset(&pages[j], 0, sizeof(struct rte_memseg));
- }
-
- /* put old biggest segment to its new place */
- memcpy(&pages[i], &tmp, sizeof(struct rte_memseg));
- }
- }
-
- /* third pass - write correct offsets */
- for (mz_iter = 0; mz_iter < RTE_DIM(config->metadata->entry); mz_iter++) {
-
- uint64_t offset = 0;
-
- entry = &e_local[mz_iter];
-
- if (entry->mz.addr_64 == 0)
- break;
-
- /* find page for current memzone */
- for (i = 0; i < RTE_DIM(pages); i++) {
- /* we found our page */
- if (entry->mz.addr_64 >= pages[i].addr_64 &&
- entry->mz.addr_64 < pages[i].addr_64 + pages[i].len) {
- entry->offset = (entry->mz.addr_64 - pages[i].addr_64) +
- offset;
- break;
- }
- offset += pages[i].len;
- }
- if (i == RTE_DIM(pages)) {
- RTE_LOG(ERR, EAL, "Page not found!\n");
- goto fail;
- }
- }
-
- ms_iter = 0;
- prev_entry = NULL;
-
- /* fourth pass - create proper memseg cache */
- for (i = 0; i < RTE_DIM(pages) &&
- ms_iter <= RTE_DIM(config->memseg_cache); i++) {
- if (pages[i].addr_64 == 0)
- break;
-
-
- if (ms_iter == RTE_DIM(pages)) {
- RTE_LOG(ERR, EAL, "The universe has collapsed!\n");
- goto fail;
- }
-
- c_entry = &ms_local[ms_iter];
- c_entry->len = pages[i].len;
-
- if (get_hugefile_by_virt_addr(pages[i].addr_64, c_entry) < 0)
- goto fail;
-
- /* if previous entry has the same filename and is contiguous,
- * clear current entry and increase previous entry's length
- */
- if (prev_entry != NULL &&
- strncmp(c_entry->filepath, prev_entry->filepath,
- sizeof(c_entry->filepath)) == 0 &&
- prev_entry->offset + prev_entry->len == c_entry->offset) {
- prev_entry->len += pages[i].len;
- memset(c_entry, 0, sizeof(struct memseg_cache_entry));
- }
- else {
- prev_entry = c_entry;
- ms_iter++;
- }
- }
-
- /* update current configuration with new valid data */
- memcpy(config->metadata->entry, e_local, sizeof(config->metadata->entry));
- memcpy(config->memseg_cache, ms_local, sizeof(config->memseg_cache));
-
- free(ms_local);
- free(e_local);
-
- return 0;
-fail:
- free(ms_local);
-fail_ms:
- free(e_local);
-fail_e:
- return -1;
-}
-
-static int
-add_memzone_to_metadata(const struct rte_memzone * mz,
- struct ivshmem_config * config)
-{
- struct rte_ivshmem_metadata_entry * entry;
- unsigned i;
-
- rte_spinlock_lock(&config->sl);
-
- /* find free slot in this config */
- for (i = 0; i < RTE_DIM(config->metadata->entry); i++) {
- entry = &config->metadata->entry[i];
-
- if (&entry->mz.addr_64 != 0 && overlap(mz, &entry->mz)) {
- RTE_LOG(ERR, EAL, "Overlapping memzones!\n");
- goto fail;
- }
-
- /* if addr is zero, the memzone is probably free */
- if (entry->mz.addr_64 == 0) {
- RTE_LOG(DEBUG, EAL, "Adding memzone '%s' at %p to metadata %s\n",
- mz->name, mz->addr, config->metadata->name);
- memcpy(&entry->mz, mz, sizeof(struct rte_memzone));
-
- /* run config file parser */
- if (build_config(config->metadata) < 0)
- goto fail;
-
- break;
- }
- }
-
- /* if we reached the maximum, that means we have no place in config */
- if (i == RTE_DIM(config->metadata->entry)) {
- RTE_LOG(ERR, EAL, "No space left in IVSHMEM metadata %s!\n",
- config->metadata->name);
- goto fail;
- }
-
- rte_spinlock_unlock(&config->sl);
- return 0;
-fail:
- rte_spinlock_unlock(&config->sl);
- return -1;
-}
-
-static int
-add_ring_to_metadata(const struct rte_ring * r,
- struct ivshmem_config * config)
-{
- struct rte_memzone * mz;
-
- mz = get_memzone_by_addr(r);
-
- if (!mz) {
- RTE_LOG(ERR, EAL, "Cannot find memzone for ring!\n");
- return -1;
- }
-
- return add_memzone_to_metadata(mz, config);
-}
-
-static int
-add_mempool_to_metadata(const struct rte_mempool * mp,
- struct ivshmem_config * config)
-{
- struct rte_memzone * mz;
- int ret;
-
- mz = get_memzone_by_addr(mp);
- ret = 0;
-
- if (!mz) {
- RTE_LOG(ERR, EAL, "Cannot find memzone for mempool!\n");
- return -1;
- }
-
- /* mempool consists of memzone and ring */
- ret = add_memzone_to_metadata(mz, config);
- if (ret < 0)
- return -1;
-
- return add_ring_to_metadata(mp->ring, config);
-}
-
-int
-rte_ivshmem_metadata_add_ring(const struct rte_ring * r, const char * name)
-{
- struct ivshmem_config * config;
-
- if (name == NULL || r == NULL)
- return -1;
-
- config = get_config_by_name(name);
-
- if (config == NULL) {
- RTE_LOG(ERR, EAL, "Cannot find IVSHMEM config %s!\n", name);
- return -1;
- }
-
- return add_ring_to_metadata(r, config);
-}
-
-int
-rte_ivshmem_metadata_add_memzone(const struct rte_memzone * mz, const char * name)
-{
- struct ivshmem_config * config;
-
- if (name == NULL || mz == NULL)
- return -1;
-
- config = get_config_by_name(name);
-
- if (config == NULL) {
- RTE_LOG(ERR, EAL, "Cannot find IVSHMEM config %s!\n", name);
- return -1;
- }
-
- return add_memzone_to_metadata(mz, config);
-}
-
-int
-rte_ivshmem_metadata_add_mempool(const struct rte_mempool * mp, const char * name)
-{
- struct ivshmem_config * config;
-
- if (name == NULL || mp == NULL)
- return -1;
-
- config = get_config_by_name(name);
-
- if (config == NULL) {
- RTE_LOG(ERR, EAL, "Cannot find IVSHMEM config %s!\n", name);
- return -1;
- }
-
- return add_mempool_to_metadata(mp, config);
-}
-
-static inline void
-ivshmem_config_path(char *buffer, size_t bufflen, const char *name)
-{
- snprintf(buffer, bufflen, IVSHMEM_CONFIG_FILE_FMT, name);
-}
-
-
-
-static inline
-void *ivshmem_metadata_create(const char *name, size_t size,
- struct flock *lock)
-{
- int retval, fd;
- void *metadata_addr;
- char pathname[PATH_MAX];
-
- ivshmem_config_path(pathname, sizeof(pathname), name);
-
- fd = open(pathname, O_RDWR | O_CREAT, 0660);
- if (fd < 0) {
- RTE_LOG(ERR, EAL, "Cannot open '%s'\n", pathname);
- return NULL;
- }
-
- size = METADATA_SIZE_ALIGNED;
-
- retval = fcntl(fd, F_SETLK, lock);
- if (retval < 0){
- close(fd);
- RTE_LOG(ERR, EAL, "Cannot create lock on '%s'. Is another "
- "process using it?\n", pathname);
- return NULL;
- }
-
- retval = ftruncate(fd, size);
- if (retval < 0){
- close(fd);
- RTE_LOG(ERR, EAL, "Cannot resize '%s'\n", pathname);
- return NULL;
- }
-
- metadata_addr = mmap(NULL, size,
- PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
-
- if (metadata_addr == MAP_FAILED){
- RTE_LOG(ERR, EAL, "Cannot mmap memory for '%s'\n", pathname);
-
- /* we don't care if we can't unlock */
- fcntl(fd, F_UNLCK, lock);
- close(fd);
-
- return NULL;
- }
-
- return metadata_addr;
-}
-
-int rte_ivshmem_metadata_create(const char *name)
-{
- struct ivshmem_config * ivshmem_config;
- unsigned index;
-
- if (pagesz == 0)
- pagesz = getpagesize();
-
- if (name == NULL)
- return -1;
-
- rte_spinlock_lock(&global_cfg_sl);
-
- for (index = 0; index < RTE_DIM(ivshmem_global_config); index++) {
- if (ivshmem_global_config[index].metadata == NULL) {
- ivshmem_config = &ivshmem_global_config[index];
- break;
- }
- }
-
- if (index == RTE_DIM(ivshmem_global_config)) {
- RTE_LOG(ERR, EAL, "Cannot create more ivshmem config files. "
- "Maximum has been reached\n");
- rte_spinlock_unlock(&global_cfg_sl);
- return -1;
- }
-
- ivshmem_config->lock.l_type = F_WRLCK;
- ivshmem_config->lock.l_whence = SEEK_SET;
-
- ivshmem_config->lock.l_start = 0;
- ivshmem_config->lock.l_len = METADATA_SIZE_ALIGNED;
-
- ivshmem_global_config[index].metadata = ((struct rte_ivshmem_metadata *)
- ivshmem_metadata_create(
- name,
- sizeof(struct rte_ivshmem_metadata),
- &ivshmem_config->lock));
-
- if (ivshmem_global_config[index].metadata == NULL) {
- rte_spinlock_unlock(&global_cfg_sl);
- return -1;
- }
-
- /* Metadata setup */
- memset(ivshmem_config->metadata, 0, sizeof(struct rte_ivshmem_metadata));
- ivshmem_config->metadata->magic_number = IVSHMEM_MAGIC;
- snprintf(ivshmem_config->metadata->name,
- sizeof(ivshmem_config->metadata->name), "%s", name);
-
- rte_spinlock_unlock(&global_cfg_sl);
-
- return 0;
-}
-
-int
-rte_ivshmem_metadata_cmdline_generate(char *buffer, unsigned size, const char *name)
-{
- const struct memseg_cache_entry * ms_cache, *entry;
- struct ivshmem_config * config;
- char cmdline[IVSHMEM_QEMU_CMDLINE_BUFSIZE], *cmdline_ptr;
- char cfg_file_path[PATH_MAX];
- unsigned remaining_len, tmplen, iter;
- uint64_t shared_mem_size, zero_size, total_size;
-
- if (buffer == NULL || name == NULL)
- return -1;
-
- config = get_config_by_name(name);
-
- if (config == NULL) {
- RTE_LOG(ERR, EAL, "Config %s not found!\n", name);
- return -1;
- }
-
- rte_spinlock_lock(&config->sl);
-
- /* prepare metadata file path */
- snprintf(cfg_file_path, sizeof(cfg_file_path), IVSHMEM_CONFIG_FILE_FMT,
- config->metadata->name);
-
- ms_cache = config->memseg_cache;
-
- cmdline_ptr = cmdline;
- remaining_len = sizeof(cmdline);
-
- shared_mem_size = 0;
- iter = 0;
-
- while ((ms_cache[iter].len != 0) && (iter < RTE_DIM(config->metadata->entry))) {
-
- entry = &ms_cache[iter];
-
- /* Offset and sizes within the current pathname */
- tmplen = snprintf(cmdline_ptr, remaining_len, IVSHMEM_QEMU_CMD_FD_FMT,
- entry->filepath, entry->offset, entry->len);
-
- shared_mem_size += entry->len;
-
- cmdline_ptr = RTE_PTR_ADD(cmdline_ptr, tmplen);
- remaining_len -= tmplen;
-
- if (remaining_len == 0) {
- RTE_LOG(ERR, EAL, "Command line too long!\n");
- rte_spinlock_unlock(&config->sl);
- return -1;
- }
-
- iter++;
- }
-
- total_size = rte_align64pow2(shared_mem_size + METADATA_SIZE_ALIGNED);
- zero_size = total_size - shared_mem_size - METADATA_SIZE_ALIGNED;
-
- /* add /dev/zero to command-line to fill the space */
- tmplen = snprintf(cmdline_ptr, remaining_len, IVSHMEM_QEMU_CMD_FD_FMT,
- "/dev/zero",
- (uint64_t)0x0,
- zero_size);
-
- cmdline_ptr = RTE_PTR_ADD(cmdline_ptr, tmplen);
- remaining_len -= tmplen;
-
- if (remaining_len == 0) {
- RTE_LOG(ERR, EAL, "Command line too long!\n");
- rte_spinlock_unlock(&config->sl);
- return -1;
- }
-
- /* add metadata file to the end of command-line */
- tmplen = snprintf(cmdline_ptr, remaining_len, IVSHMEM_QEMU_CMD_FD_FMT,
- cfg_file_path,
- (uint64_t)0x0,
- METADATA_SIZE_ALIGNED);
-
- cmdline_ptr = RTE_PTR_ADD(cmdline_ptr, tmplen);
- remaining_len -= tmplen;
-
- if (remaining_len == 0) {
- RTE_LOG(ERR, EAL, "Command line too long!\n");
- rte_spinlock_unlock(&config->sl);
- return -1;
- }
-
- /* if current length of the command line is bigger than the buffer supplied
- * by the user, or if command-line is bigger than what IVSHMEM accepts */
- if ((sizeof(cmdline) - remaining_len) > size) {
- RTE_LOG(ERR, EAL, "Buffer is too short!\n");
- rte_spinlock_unlock(&config->sl);
- return -1;
- }
- /* complete the command-line */
- snprintf(buffer, size,
- IVSHMEM_QEMU_CMD_LINE_HEADER_FMT,
- total_size >> 20,
- cmdline);
-
- rte_spinlock_unlock(&config->sl);
-
- return 0;
-}
-
-void
-rte_ivshmem_metadata_dump(FILE *f, const char *name)
-{
- unsigned i = 0;
- struct ivshmem_config * config;
- struct rte_ivshmem_metadata_entry *entry;
-#ifdef RTE_LIBRTE_IVSHMEM_DEBUG
- uint64_t addr;
- uint64_t end, hugepage_sz;
- struct memseg_cache_entry e;
-#endif
-
- if (name == NULL)
- return;
-
- /* return error if we try to use an unknown config file */
- config = get_config_by_name(name);
- if (config == NULL) {
- RTE_LOG(ERR, EAL, "Cannot find IVSHMEM config %s!\n", name);
- return;
- }
-
- rte_spinlock_lock(&config->sl);
-
- entry = &config->metadata->entry[0];
-
- while (entry->mz.addr != NULL && i < RTE_DIM(config->metadata->entry)) {
-
- fprintf(f, "Entry %u: name:<%-20s>, phys:0x%-15lx, len:0x%-15lx, "
- "virt:%-15p, off:0x%-15lx\n",
- i,
- entry->mz.name,
- entry->mz.phys_addr,
- entry->mz.len,
- entry->mz.addr,
- entry->offset);
- i++;
-
-#ifdef RTE_LIBRTE_IVSHMEM_DEBUG
- fprintf(f, "\tHugepage files:\n");
-
- hugepage_sz = entry->mz.hugepage_sz;
- addr = RTE_ALIGN_FLOOR(entry->mz.addr_64, hugepage_sz);
- end = addr + RTE_ALIGN_CEIL(entry->mz.len + (entry->mz.addr_64 - addr),
- hugepage_sz);
-
- for (; addr < end; addr += hugepage_sz) {
- memset(&e, 0, sizeof(e));
-
- get_hugefile_by_virt_addr(addr, &e);
-
- fprintf(f, "\t0x%"PRIx64 "-0x%" PRIx64 " offset: 0x%" PRIx64 " %s\n",
- addr, addr + hugepage_sz, e.offset, e.filepath);
- }
-#endif
- entry++;
- }
-
- rte_spinlock_unlock(&config->sl);
-}
diff --git a/src/dpdk_lib18/librte_ivshmem/rte_ivshmem.h b/src/dpdk_lib18/librte_ivshmem/rte_ivshmem.h
deleted file mode 100755
index a5d36d6b..00000000
--- a/src/dpdk_lib18/librte_ivshmem/rte_ivshmem.h
+++ /dev/null
@@ -1,165 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef RTE_IVSHMEM_H_
-#define RTE_IVSHMEM_H_
-
-#include <rte_memzone.h>
-#include <rte_mempool.h>
-
-/**
- * @file
- *
- * The RTE IVSHMEM interface provides functions to create metadata files
- * describing memory segments to be shared via QEMU IVSHMEM.
- */
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define IVSHMEM_MAGIC 0x0BADC0DE
-#define IVSHMEM_NAME_LEN 32
-
-/**
- * Structure that holds IVSHMEM shared metadata entry.
- */
-struct rte_ivshmem_metadata_entry {
- struct rte_memzone mz; /**< shared memzone */
- uint64_t offset; /**< offset of memzone within IVSHMEM device */
-};
-
-/**
- * Structure that holds IVSHMEM metadata.
- */
-struct rte_ivshmem_metadata {
- int magic_number; /**< magic number */
- char name[IVSHMEM_NAME_LEN]; /**< name of the metadata file */
- struct rte_ivshmem_metadata_entry entry[RTE_LIBRTE_IVSHMEM_MAX_ENTRIES];
- /**< metadata entries */
-};
-
-/**
- * Creates metadata file with a given name
- *
- * @param name
- * Name of metadata file to be created
- *
- * @return
- * - On success, zero
- * - On failure, a negative value
- */
-int rte_ivshmem_metadata_create(const char * name);
-
-/**
- * Adds memzone to a specific metadata file
- *
- * @param mz
- * Memzone to be added
- * @param md_name
- * Name of metadata file for the memzone to be added to
- *
- * @return
- * - On success, zero
- * - On failure, a negative value
- */
-int rte_ivshmem_metadata_add_memzone(const struct rte_memzone * mz,
- const char * md_name);
-
-/**
- * Adds a ring descriptor to a specific metadata file
- *
- * @param r
- * Ring descriptor to be added
- * @param md_name
- * Name of metadata file for the ring to be added to
- *
- * @return
- * - On success, zero
- * - On failure, a negative value
- */
-int rte_ivshmem_metadata_add_ring(const struct rte_ring * r,
- const char * md_name);
-
-/**
- * Adds a mempool to a specific metadata file
- *
- * @param mp
- * Mempool to be added
- * @param md_name
- * Name of metadata file for the mempool to be added to
- *
- * @return
- * - On success, zero
- * - On failure, a negative value
- */
-int rte_ivshmem_metadata_add_mempool(const struct rte_mempool * mp,
- const char * md_name);
-
-
-/**
- * Generates the QEMU command-line for IVSHMEM device for a given metadata file.
- * This function is to be called after all the objects were added.
- *
- * @param buffer
- * Buffer to be filled with the command line arguments.
- * @param size
- * Size of the buffer.
- * @param name
- * Name of metadata file to generate QEMU command-line parameters for
- *
- * @return
- * - On success, zero
- * - On failure, a negative value
- */
-int rte_ivshmem_metadata_cmdline_generate(char *buffer, unsigned size,
- const char *name);
-
-
-/**
- * Dump all metadata entries from a given metadata file to the console.
- *
- * @param f
- * A pointer to a file for output
- * @name
- * Name of the metadata file to be dumped to console.
- */
-void rte_ivshmem_metadata_dump(FILE *f, const char *name);
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* RTE_IVSHMEM_H_ */
diff --git a/src/dpdk_lib18/librte_kni/Makefile b/src/dpdk_lib18/librte_kni/Makefile
deleted file mode 100755
index 52673040..00000000
--- a/src/dpdk_lib18/librte_kni/Makefile
+++ /dev/null
@@ -1,49 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_kni.a
-
-CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3 -fno-strict-aliasing
-
-# all source are stored in SRCS-y
-SRCS-$(CONFIG_RTE_LIBRTE_KNI) := rte_kni.c
-
-# install includes
-SYMLINK-$(CONFIG_RTE_LIBRTE_KNI)-include := rte_kni.h
-
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_KNI) += lib/librte_eal lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_KNI) += lib/librte_ether
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_kni/rte_kni.c b/src/dpdk_lib18/librte_kni/rte_kni.c
deleted file mode 100755
index fdb75094..00000000
--- a/src/dpdk_lib18/librte_kni/rte_kni.c
+++ /dev/null
@@ -1,747 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef RTE_EXEC_ENV_LINUXAPP
-#error "KNI is not supported"
-#endif
-
-#include <string.h>
-#include <fcntl.h>
-#include <unistd.h>
-#include <sys/ioctl.h>
-
-#include <rte_spinlock.h>
-#include <rte_string_fns.h>
-#include <rte_ethdev.h>
-#include <rte_malloc.h>
-#include <rte_log.h>
-#include <rte_kni.h>
-#include <rte_memzone.h>
-#include <exec-env/rte_kni_common.h>
-#include "rte_kni_fifo.h"
-
-#define MAX_MBUF_BURST_NUM 32
-
-/* Maximum number of ring entries */
-#define KNI_FIFO_COUNT_MAX 1024
-#define KNI_FIFO_SIZE (KNI_FIFO_COUNT_MAX * sizeof(void *) + \
- sizeof(struct rte_kni_fifo))
-
-#define KNI_REQUEST_MBUF_NUM_MAX 32
-
-#define KNI_MEM_CHECK(cond) do { if (cond) goto kni_fail; } while (0)
-
-/**
- * KNI context
- */
-struct rte_kni {
- char name[RTE_KNI_NAMESIZE]; /**< KNI interface name */
- uint16_t group_id; /**< Group ID of KNI devices */
- uint32_t slot_id; /**< KNI pool slot ID */
- struct rte_mempool *pktmbuf_pool; /**< pkt mbuf mempool */
- unsigned mbuf_size; /**< mbuf size */
-
- struct rte_kni_fifo *tx_q; /**< TX queue */
- struct rte_kni_fifo *rx_q; /**< RX queue */
- struct rte_kni_fifo *alloc_q; /**< Allocated mbufs queue */
- struct rte_kni_fifo *free_q; /**< To be freed mbufs queue */
-
- /* For request & response */
- struct rte_kni_fifo *req_q; /**< Request queue */
- struct rte_kni_fifo *resp_q; /**< Response queue */
- void * sync_addr; /**< Req/Resp Mem address */
-
- struct rte_kni_ops ops; /**< operations for request */
- uint8_t in_use : 1; /**< kni in use */
-};
-
-enum kni_ops_status {
- KNI_REQ_NO_REGISTER = 0,
- KNI_REQ_REGISTERED,
-};
-
-/**
- * KNI memzone pool slot
- */
-struct rte_kni_memzone_slot {
- uint32_t id;
- uint8_t in_use : 1; /**< slot in use */
-
- /* Memzones */
- const struct rte_memzone *m_ctx; /**< KNI ctx */
- const struct rte_memzone *m_tx_q; /**< TX queue */
- const struct rte_memzone *m_rx_q; /**< RX queue */
- const struct rte_memzone *m_alloc_q; /**< Allocated mbufs queue */
- const struct rte_memzone *m_free_q; /**< To be freed mbufs queue */
- const struct rte_memzone *m_req_q; /**< Request queue */
- const struct rte_memzone *m_resp_q; /**< Response queue */
- const struct rte_memzone *m_sync_addr;
-
- /* Free linked list */
- struct rte_kni_memzone_slot *next; /**< Next slot link.list */
-};
-
-/**
- * KNI memzone pool
- */
-struct rte_kni_memzone_pool {
- uint8_t initialized : 1; /**< Global KNI pool init flag */
-
- uint32_t max_ifaces; /**< Max. num of KNI ifaces */
- struct rte_kni_memzone_slot *slots; /**< Pool slots */
- rte_spinlock_t mutex; /**< alloc/relase mutex */
-
- /* Free memzone slots linked-list */
- struct rte_kni_memzone_slot *free; /**< First empty slot */
- struct rte_kni_memzone_slot *free_tail; /**< Last empty slot */
-};
-
-
-static void kni_free_mbufs(struct rte_kni *kni);
-static void kni_allocate_mbufs(struct rte_kni *kni);
-
-static volatile int kni_fd = -1;
-static struct rte_kni_memzone_pool kni_memzone_pool = {
- .initialized = 0,
-};
-
-static const struct rte_memzone *
-kni_memzone_reserve(const char *name, size_t len, int socket_id,
- unsigned flags)
-{
- const struct rte_memzone *mz = rte_memzone_lookup(name);
-
- if (mz == NULL)
- mz = rte_memzone_reserve(name, len, socket_id, flags);
-
- return mz;
-}
-
-/* Pool mgmt */
-static struct rte_kni_memzone_slot*
-kni_memzone_pool_alloc(void)
-{
- struct rte_kni_memzone_slot *slot;
-
- rte_spinlock_lock(&kni_memzone_pool.mutex);
-
- if (!kni_memzone_pool.free) {
- rte_spinlock_unlock(&kni_memzone_pool.mutex);
- return NULL;
- }
-
- slot = kni_memzone_pool.free;
- kni_memzone_pool.free = slot->next;
- slot->in_use = 1;
-
- if (!kni_memzone_pool.free)
- kni_memzone_pool.free_tail = NULL;
-
- rte_spinlock_unlock(&kni_memzone_pool.mutex);
-
- return slot;
-}
-
-static void
-kni_memzone_pool_release(struct rte_kni_memzone_slot *slot)
-{
- rte_spinlock_lock(&kni_memzone_pool.mutex);
-
- if (kni_memzone_pool.free)
- kni_memzone_pool.free_tail->next = slot;
- else
- kni_memzone_pool.free = slot;
-
- kni_memzone_pool.free_tail = slot;
- slot->next = NULL;
- slot->in_use = 0;
-
- rte_spinlock_unlock(&kni_memzone_pool.mutex);
-}
-
-
-/* Shall be called before any allocation happens */
-void
-rte_kni_init(unsigned int max_kni_ifaces)
-{
- uint32_t i;
- struct rte_kni_memzone_slot *it;
- const struct rte_memzone *mz;
-#define OBJNAMSIZ 32
- char obj_name[OBJNAMSIZ];
- char mz_name[RTE_MEMZONE_NAMESIZE];
-
- if (max_kni_ifaces == 0) {
- RTE_LOG(ERR, KNI, "Invalid number of max_kni_ifaces %d\n",
- max_kni_ifaces);
- rte_panic("Unable to initialize KNI\n");
- }
-
- /* Check FD and open */
- if (kni_fd < 0) {
- kni_fd = open("/dev/" KNI_DEVICE, O_RDWR);
- if (kni_fd < 0)
- rte_panic("Can not open /dev/%s\n", KNI_DEVICE);
- }
-
- /* Allocate slot objects */
- kni_memzone_pool.slots = (struct rte_kni_memzone_slot *)
- rte_malloc(NULL,
- sizeof(struct rte_kni_memzone_slot) *
- max_kni_ifaces,
- 0);
- KNI_MEM_CHECK(kni_memzone_pool.slots == NULL);
-
- /* Initialize general pool variables */
- kni_memzone_pool.initialized = 1;
- kni_memzone_pool.max_ifaces = max_kni_ifaces;
- kni_memzone_pool.free = &kni_memzone_pool.slots[0];
- rte_spinlock_init(&kni_memzone_pool.mutex);
-
- /* Pre-allocate all memzones of all the slots; panic on error */
- for (i = 0; i < max_kni_ifaces; i++) {
-
- /* Recover current slot */
- it = &kni_memzone_pool.slots[i];
- it->id = i;
-
- /* Allocate KNI context */
- snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "KNI_INFO_%d", i);
- mz = kni_memzone_reserve(mz_name, sizeof(struct rte_kni),
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_ctx = mz;
-
- /* TX RING */
- snprintf(obj_name, OBJNAMSIZ, "kni_tx_%d", i);
- mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_tx_q = mz;
-
- /* RX RING */
- snprintf(obj_name, OBJNAMSIZ, "kni_rx_%d", i);
- mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_rx_q = mz;
-
- /* ALLOC RING */
- snprintf(obj_name, OBJNAMSIZ, "kni_alloc_%d", i);
- mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_alloc_q = mz;
-
- /* FREE RING */
- snprintf(obj_name, OBJNAMSIZ, "kni_free_%d", i);
- mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_free_q = mz;
-
- /* Request RING */
- snprintf(obj_name, OBJNAMSIZ, "kni_req_%d", i);
- mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_req_q = mz;
-
- /* Response RING */
- snprintf(obj_name, OBJNAMSIZ, "kni_resp_%d", i);
- mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_resp_q = mz;
-
- /* Req/Resp sync mem area */
- snprintf(obj_name, OBJNAMSIZ, "kni_sync_%d", i);
- mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_sync_addr = mz;
-
- if ((i+1) == max_kni_ifaces) {
- it->next = NULL;
- kni_memzone_pool.free_tail = it;
- } else
- it->next = &kni_memzone_pool.slots[i+1];
- }
-
- return;
-
-kni_fail:
- rte_panic("Unable to allocate memory for max_kni_ifaces:%d. Increase the amount of hugepages memory\n",
- max_kni_ifaces);
-}
-
-/* It is deprecated and just for backward compatibility */
-struct rte_kni *
-rte_kni_create(uint8_t port_id,
- unsigned mbuf_size,
- struct rte_mempool *pktmbuf_pool,
- struct rte_kni_ops *ops)
-{
- struct rte_kni_conf conf;
- struct rte_eth_dev_info info;
-
- memset(&info, 0, sizeof(info));
- memset(&conf, 0, sizeof(conf));
- rte_eth_dev_info_get(port_id, &info);
-
- snprintf(conf.name, sizeof(conf.name), "vEth%u", port_id);
- conf.addr = info.pci_dev->addr;
- conf.id = info.pci_dev->id;
- conf.group_id = (uint16_t)port_id;
- conf.mbuf_size = mbuf_size;
-
- /* Save the port id for request handling */
- ops->port_id = port_id;
-
- return rte_kni_alloc(pktmbuf_pool, &conf, ops);
-}
-
-struct rte_kni *
-rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
- const struct rte_kni_conf *conf,
- struct rte_kni_ops *ops)
-{
- int ret;
- struct rte_kni_device_info dev_info;
- struct rte_kni *ctx;
- char intf_name[RTE_KNI_NAMESIZE];
- char mz_name[RTE_MEMZONE_NAMESIZE];
- const struct rte_memzone *mz;
- struct rte_kni_memzone_slot *slot = NULL;
-
- if (!pktmbuf_pool || !conf || !conf->name[0])
- return NULL;
-
- /* Check if KNI subsystem has been initialized */
- if (kni_memzone_pool.initialized != 1) {
- RTE_LOG(ERR, KNI, "KNI subsystem has not been initialized. Invoke rte_kni_init() first\n");
- return NULL;
- }
-
- /* Get an available slot from the pool */
- slot = kni_memzone_pool_alloc();
- if (!slot) {
- RTE_LOG(ERR, KNI, "Cannot allocate more KNI interfaces; increase the number of max_kni_ifaces(current %d) or release unusued ones.\n",
- kni_memzone_pool.max_ifaces);
- return NULL;
- }
-
- /* Recover ctx */
- ctx = slot->m_ctx->addr;
- snprintf(intf_name, RTE_KNI_NAMESIZE, "%s", conf->name);
-
- if (ctx->in_use) {
- RTE_LOG(ERR, KNI, "KNI %s is in use\n", ctx->name);
- return NULL;
- }
- memset(ctx, 0, sizeof(struct rte_kni));
- if (ops)
- memcpy(&ctx->ops, ops, sizeof(struct rte_kni_ops));
-
- memset(&dev_info, 0, sizeof(dev_info));
- dev_info.bus = conf->addr.bus;
- dev_info.devid = conf->addr.devid;
- dev_info.function = conf->addr.function;
- dev_info.vendor_id = conf->id.vendor_id;
- dev_info.device_id = conf->id.device_id;
- dev_info.core_id = conf->core_id;
- dev_info.force_bind = conf->force_bind;
- dev_info.group_id = conf->group_id;
- dev_info.mbuf_size = conf->mbuf_size;
-
- snprintf(ctx->name, RTE_KNI_NAMESIZE, "%s", intf_name);
- snprintf(dev_info.name, RTE_KNI_NAMESIZE, "%s", intf_name);
-
- RTE_LOG(INFO, KNI, "pci: %02x:%02x:%02x \t %02x:%02x\n",
- dev_info.bus, dev_info.devid, dev_info.function,
- dev_info.vendor_id, dev_info.device_id);
- /* TX RING */
- mz = slot->m_tx_q;
- ctx->tx_q = mz->addr;
- kni_fifo_init(ctx->tx_q, KNI_FIFO_COUNT_MAX);
- dev_info.tx_phys = mz->phys_addr;
-
- /* RX RING */
- mz = slot->m_rx_q;
- ctx->rx_q = mz->addr;
- kni_fifo_init(ctx->rx_q, KNI_FIFO_COUNT_MAX);
- dev_info.rx_phys = mz->phys_addr;
-
- /* ALLOC RING */
- mz = slot->m_alloc_q;
- ctx->alloc_q = mz->addr;
- kni_fifo_init(ctx->alloc_q, KNI_FIFO_COUNT_MAX);
- dev_info.alloc_phys = mz->phys_addr;
-
- /* FREE RING */
- mz = slot->m_free_q;
- ctx->free_q = mz->addr;
- kni_fifo_init(ctx->free_q, KNI_FIFO_COUNT_MAX);
- dev_info.free_phys = mz->phys_addr;
-
- /* Request RING */
- mz = slot->m_req_q;
- ctx->req_q = mz->addr;
- kni_fifo_init(ctx->req_q, KNI_FIFO_COUNT_MAX);
- dev_info.req_phys = mz->phys_addr;
-
- /* Response RING */
- mz = slot->m_resp_q;
- ctx->resp_q = mz->addr;
- kni_fifo_init(ctx->resp_q, KNI_FIFO_COUNT_MAX);
- dev_info.resp_phys = mz->phys_addr;
-
- /* Req/Resp sync mem area */
- mz = slot->m_sync_addr;
- ctx->sync_addr = mz->addr;
- dev_info.sync_va = mz->addr;
- dev_info.sync_phys = mz->phys_addr;
-
-
- /* MBUF mempool */
- snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_OBJ_NAME,
- pktmbuf_pool->name);
- mz = rte_memzone_lookup(mz_name);
- KNI_MEM_CHECK(mz == NULL);
- dev_info.mbuf_va = mz->addr;
- dev_info.mbuf_phys = mz->phys_addr;
- ctx->pktmbuf_pool = pktmbuf_pool;
- ctx->group_id = conf->group_id;
- ctx->slot_id = slot->id;
- ctx->mbuf_size = conf->mbuf_size;
-
- ret = ioctl(kni_fd, RTE_KNI_IOCTL_CREATE, &dev_info);
- KNI_MEM_CHECK(ret < 0);
-
- ctx->in_use = 1;
-
- return ctx;
-
-kni_fail:
- if (slot)
- kni_memzone_pool_release(&kni_memzone_pool.slots[slot->id]);
-
- return NULL;
-}
-
-static void
-kni_free_fifo(struct rte_kni_fifo *fifo)
-{
- int ret;
- struct rte_mbuf *pkt;
-
- do {
- ret = kni_fifo_get(fifo, (void **)&pkt, 1);
- if (ret)
- rte_pktmbuf_free(pkt);
- } while (ret);
-}
-
-int
-rte_kni_release(struct rte_kni *kni)
-{
- struct rte_kni_device_info dev_info;
- uint32_t slot_id;
-
- if (!kni || !kni->in_use)
- return -1;
-
- snprintf(dev_info.name, sizeof(dev_info.name), "%s", kni->name);
- if (ioctl(kni_fd, RTE_KNI_IOCTL_RELEASE, &dev_info) < 0) {
- RTE_LOG(ERR, KNI, "Fail to release kni device\n");
- return -1;
- }
-
- /* mbufs in all fifo should be released, except request/response */
- kni_free_fifo(kni->tx_q);
- kni_free_fifo(kni->rx_q);
- kni_free_fifo(kni->alloc_q);
- kni_free_fifo(kni->free_q);
-
- slot_id = kni->slot_id;
-
- /* Memset the KNI struct */
- memset(kni, 0, sizeof(struct rte_kni));
-
- /* Release memzone */
- if (slot_id > kni_memzone_pool.max_ifaces) {
- rte_panic("KNI pool: corrupted slot ID: %d, max: %d\n",
- slot_id, kni_memzone_pool.max_ifaces);
- }
- kni_memzone_pool_release(&kni_memzone_pool.slots[slot_id]);
-
- return 0;
-}
-
-int
-rte_kni_handle_request(struct rte_kni *kni)
-{
- unsigned ret;
- struct rte_kni_request *req;
-
- if (kni == NULL)
- return -1;
-
- /* Get request mbuf */
- ret = kni_fifo_get(kni->req_q, (void **)&req, 1);
- if (ret != 1)
- return 0; /* It is OK of can not getting the request mbuf */
-
- if (req != kni->sync_addr) {
- rte_panic("Wrong req pointer %p\n", req);
- }
-
- /* Analyze the request and call the relevant actions for it */
- switch (req->req_id) {
- case RTE_KNI_REQ_CHANGE_MTU: /* Change MTU */
- if (kni->ops.change_mtu)
- req->result = kni->ops.change_mtu(kni->ops.port_id,
- req->new_mtu);
- break;
- case RTE_KNI_REQ_CFG_NETWORK_IF: /* Set network interface up/down */
- if (kni->ops.config_network_if)
- req->result = kni->ops.config_network_if(\
- kni->ops.port_id, req->if_up);
- break;
- default:
- RTE_LOG(ERR, KNI, "Unknown request id %u\n", req->req_id);
- req->result = -EINVAL;
- break;
- }
-
- /* Construct response mbuf and put it back to resp_q */
- ret = kni_fifo_put(kni->resp_q, (void **)&req, 1);
- if (ret != 1) {
- RTE_LOG(ERR, KNI, "Fail to put the muf back to resp_q\n");
- return -1; /* It is an error of can't putting the mbuf back */
- }
-
- return 0;
-}
-
-unsigned
-rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num)
-{
- unsigned ret = kni_fifo_put(kni->rx_q, (void **)mbufs, num);
-
- /* Get mbufs from free_q and then free them */
- kni_free_mbufs(kni);
-
- return ret;
-}
-
-unsigned
-rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num)
-{
- unsigned ret = kni_fifo_get(kni->tx_q, (void **)mbufs, num);
-
- /* Allocate mbufs and then put them into alloc_q */
- kni_allocate_mbufs(kni);
-
- return ret;
-}
-
-static void
-kni_free_mbufs(struct rte_kni *kni)
-{
- int i, ret;
- struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
-
- ret = kni_fifo_get(kni->free_q, (void **)pkts, MAX_MBUF_BURST_NUM);
- if (likely(ret > 0)) {
- for (i = 0; i < ret; i++)
- rte_pktmbuf_free(pkts[i]);
- }
-}
-
-static void
-kni_allocate_mbufs(struct rte_kni *kni)
-{
- int i, ret;
- struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
-
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pool) !=
- offsetof(struct rte_kni_mbuf, pool));
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_addr) !=
- offsetof(struct rte_kni_mbuf, buf_addr));
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, next) !=
- offsetof(struct rte_kni_mbuf, next));
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
- offsetof(struct rte_kni_mbuf, data_off));
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
- offsetof(struct rte_kni_mbuf, data_len));
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
- offsetof(struct rte_kni_mbuf, pkt_len));
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
- offsetof(struct rte_kni_mbuf, ol_flags));
-
- /* Check if pktmbuf pool has been configured */
- if (kni->pktmbuf_pool == NULL) {
- RTE_LOG(ERR, KNI, "No valid mempool for allocating mbufs\n");
- return;
- }
-
- for (i = 0; i < MAX_MBUF_BURST_NUM; i++) {
- pkts[i] = rte_pktmbuf_alloc(kni->pktmbuf_pool);
- if (unlikely(pkts[i] == NULL)) {
- /* Out of memory */
- RTE_LOG(ERR, KNI, "Out of memory\n");
- break;
- }
- }
-
- /* No pkt mbuf alocated */
- if (i <= 0)
- return;
-
- ret = kni_fifo_put(kni->alloc_q, (void **)pkts, i);
-
- /* Check if any mbufs not put into alloc_q, and then free them */
- if (ret >= 0 && ret < i && ret < MAX_MBUF_BURST_NUM) {
- int j;
-
- for (j = ret; j < i; j++)
- rte_pktmbuf_free(pkts[j]);
- }
-}
-
-/* It is deprecated and just for backward compatibility */
-uint8_t
-rte_kni_get_port_id(struct rte_kni *kni)
-{
- if (!kni)
- return ~0x0;
-
- return kni->ops.port_id;
-}
-
-struct rte_kni *
-rte_kni_get(const char *name)
-{
- uint32_t i;
- struct rte_kni_memzone_slot *it;
- struct rte_kni *kni;
-
- /* Note: could be improved perf-wise if necessary */
- for (i = 0; i < kni_memzone_pool.max_ifaces; i++) {
- it = &kni_memzone_pool.slots[i];
- if (it->in_use == 0)
- continue;
- kni = it->m_ctx->addr;
- if (strncmp(kni->name, name, RTE_KNI_NAMESIZE) == 0)
- return kni;
- }
-
- return NULL;
-}
-
-/*
- * It is deprecated and just for backward compatibility.
- */
-struct rte_kni *
-rte_kni_info_get(uint8_t port_id)
-{
- char name[RTE_MEMZONE_NAMESIZE];
-
- if (port_id >= RTE_MAX_ETHPORTS)
- return NULL;
-
- snprintf(name, RTE_MEMZONE_NAMESIZE, "vEth%u", port_id);
-
- return rte_kni_get(name);
-}
-
-static enum kni_ops_status
-kni_check_request_register(struct rte_kni_ops *ops)
-{
- /* check if KNI request ops has been registered*/
- if( NULL == ops )
- return KNI_REQ_NO_REGISTER;
-
- if((NULL == ops->change_mtu) && (NULL == ops->config_network_if))
- return KNI_REQ_NO_REGISTER;
-
- return KNI_REQ_REGISTERED;
-}
-
-int
-rte_kni_register_handlers(struct rte_kni *kni,struct rte_kni_ops *ops)
-{
- enum kni_ops_status req_status;
-
- if (NULL == ops) {
- RTE_LOG(ERR, KNI, "Invalid KNI request operation.\n");
- return -1;
- }
-
- if (NULL == kni) {
- RTE_LOG(ERR, KNI, "Invalid kni info.\n");
- return -1;
- }
-
- req_status = kni_check_request_register(&kni->ops);
- if ( KNI_REQ_REGISTERED == req_status) {
- RTE_LOG(ERR, KNI, "The KNI request operation has already registered.\n");
- return -1;
- }
-
- memcpy(&kni->ops, ops, sizeof(struct rte_kni_ops));
- return 0;
-}
-
-int
-rte_kni_unregister_handlers(struct rte_kni *kni)
-{
- if (NULL == kni) {
- RTE_LOG(ERR, KNI, "Invalid kni info.\n");
- return -1;
- }
-
- kni->ops.change_mtu = NULL;
- kni->ops.config_network_if = NULL;
- return 0;
-}
-void
-rte_kni_close(void)
-{
- if (kni_fd < 0)
- return;
-
- close(kni_fd);
- kni_fd = -1;
-}
diff --git a/src/dpdk_lib18/librte_kni/rte_kni.h b/src/dpdk_lib18/librte_kni/rte_kni.h
deleted file mode 100755
index 815b8e2b..00000000
--- a/src/dpdk_lib18/librte_kni/rte_kni.h
+++ /dev/null
@@ -1,306 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_KNI_H_
-#define _RTE_KNI_H_
-
-/**
- * @file
- * RTE KNI
- *
- * The KNI library provides the ability to create and destroy kernel NIC
- * interfaces that may be used by the RTE application to receive/transmit
- * packets from/to Linux kernel net interfaces.
- *
- * This library provide two APIs to burst receive packets from KNI interfaces,
- * and burst transmit packets to KNI interfaces.
- */
-
-#include <rte_pci.h>
-#include <rte_mbuf.h>
-
-#include <exec-env/rte_kni_common.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct rte_kni;
-
-/**
- * Structure which has the function pointers for KNI interface.
- */
-struct rte_kni_ops {
- uint8_t port_id; /* Port ID */
-
- /* Pointer to function of changing MTU */
- int (*change_mtu)(uint8_t port_id, unsigned new_mtu);
-
- /* Pointer to function of configuring network interface */
- int (*config_network_if)(uint8_t port_id, uint8_t if_up);
-};
-
-/**
- * Structure for configuring KNI device.
- */
-struct rte_kni_conf {
- /*
- * KNI name which will be used in relevant network device.
- * Let the name as short as possible, as it will be part of
- * memzone name.
- */
- char name[RTE_KNI_NAMESIZE];
- uint32_t core_id; /* Core ID to bind kernel thread on */
- uint16_t group_id; /* Group ID */
- unsigned mbuf_size; /* mbuf size */
- struct rte_pci_addr addr;
- struct rte_pci_id id;
-
- uint8_t force_bind : 1; /* Flag to bind kernel thread */
-};
-
-/**
- * Initialize and preallocate KNI subsystem
- *
- * This function is to be executed on the MASTER lcore only, after EAL
- * initialization and before any KNI interface is attempted to be
- * allocated
- *
- * @param max_kni_ifaces
- * The maximum number of KNI interfaces that can coexist concurrently
- */
-extern void rte_kni_init(unsigned int max_kni_ifaces);
-
-
-/**
- * Allocate KNI interface according to the port id, mbuf size, mbuf pool,
- * configurations and callbacks for kernel requests.The KNI interface created
- * in the kernel space is the net interface the traditional Linux application
- * talking to.
- *
- * The rte_kni_alloc shall not be called before rte_kni_init() has been
- * called. rte_kni_alloc is thread safe.
- *
- * @param pktmbuf_pool
- * The mempool for allocting mbufs for packets.
- * @param conf
- * The pointer to the configurations of the KNI device.
- * @param ops
- * The pointer to the callbacks for the KNI kernel requests.
- *
- * @return
- * - The pointer to the context of a KNI interface.
- * - NULL indicate error.
- */
-extern struct rte_kni *rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
- const struct rte_kni_conf *conf,
- struct rte_kni_ops *ops);
-
-/**
- * It create a KNI device for specific port.
- *
- * Note: It is deprecated and just for backward compatibility.
- *
- * @param port_id
- * Port ID.
- * @param mbuf_size
- * mbuf size.
- * @param pktmbuf_pool
- * The mempool for allocting mbufs for packets.
- * @param ops
- * The pointer to the callbacks for the KNI kernel requests.
- *
- * @return
- * - The pointer to the context of a KNI interface.
- * - NULL indicate error.
- */
-extern struct rte_kni *rte_kni_create(uint8_t port_id,
- unsigned mbuf_size,
- struct rte_mempool *pktmbuf_pool,
- struct rte_kni_ops *ops) \
- __attribute__ ((deprecated));
-
-/**
- * Release KNI interface according to the context. It will also release the
- * paired KNI interface in kernel space. All processing on the specific KNI
- * context need to be stopped before calling this interface.
- *
- * rte_kni_release is thread safe.
- *
- * @param kni
- * The pointer to the context of an existent KNI interface.
- *
- * @return
- * - 0 indicates success.
- * - negative value indicates failure.
- */
-extern int rte_kni_release(struct rte_kni *kni);
-
-/**
- * It is used to handle the request mbufs sent from kernel space.
- * Then analyzes it and calls the specific actions for the specific requests.
- * Finally constructs the response mbuf and puts it back to the resp_q.
- *
- * @param kni
- * The pointer to the context of an existent KNI interface.
- *
- * @return
- * - 0
- * - negative value indicates failure.
- */
-extern int rte_kni_handle_request(struct rte_kni *kni);
-
-/**
- * Retrieve a burst of packets from a KNI interface. The retrieved packets are
- * stored in rte_mbuf structures whose pointers are supplied in the array of
- * mbufs, and the maximum number is indicated by num. It handles the freeing of
- * the mbufs in the free queue of KNI interface.
- *
- * @param kni
- * The KNI interface context.
- * @param mbufs
- * The array to store the pointers of mbufs.
- * @param num
- * The maximum number per burst.
- *
- * @return
- * The actual number of packets retrieved.
- */
-extern unsigned rte_kni_rx_burst(struct rte_kni *kni,
- struct rte_mbuf **mbufs, unsigned num);
-
-/**
- * Send a burst of packets to a KNI interface. The packets to be sent out are
- * stored in rte_mbuf structures whose pointers are supplied in the array of
- * mbufs, and the maximum number is indicated by num. It handles allocating
- * the mbufs for KNI interface alloc queue.
- *
- * @param kni
- * The KNI interface context.
- * @param mbufs
- * The array to store the pointers of mbufs.
- * @param num
- * The maximum number per burst.
- *
- * @return
- * The actual number of packets sent.
- */
-extern unsigned rte_kni_tx_burst(struct rte_kni *kni,
- struct rte_mbuf **mbufs, unsigned num);
-
-/**
- * Get the port id from KNI interface.
- *
- * Note: It is deprecated and just for backward compatibility.
- *
- * @param kni
- * The KNI interface context.
- *
- * @return
- * On success: The port id.
- * On failure: ~0x0
- */
-extern uint8_t rte_kni_get_port_id(struct rte_kni *kni) \
- __attribute__ ((deprecated));
-
-/**
- * Get the KNI context of its name.
- *
- * @param name
- * pointer to the KNI device name.
- *
- * @return
- * On success: Pointer to KNI interface.
- * On failure: NULL.
- */
-extern struct rte_kni *rte_kni_get(const char *name);
-
-/**
- * Get the KNI context of the specific port.
- *
- * Note: It is deprecated and just for backward compatibility.
- *
- * @param port_id
- * the port id.
- *
- * @return
- * On success: Pointer to KNI interface.
- * On failure: NULL
- */
-extern struct rte_kni *rte_kni_info_get(uint8_t port_id) \
- __attribute__ ((deprecated));
-
-/**
- * Register KNI request handling for a specified port,and it can
- * be called by master process or slave process.
- *
- * @param kni
- * pointer to struct rte_kni.
- * @param ops
- * ponter to struct rte_kni_ops.
- *
- * @return
- * On success: 0
- * On failure: -1
- */
-extern int rte_kni_register_handlers(struct rte_kni *kni,
- struct rte_kni_ops *ops);
-
-/**
- * Unregister KNI request handling for a specified port.
- *
- * @param kni
- * pointer to struct rte_kni.
- *
- * @return
- * On success: 0
- * On failure: -1
- */
-extern int rte_kni_unregister_handlers(struct rte_kni *kni);
-
-/**
- * close KNI device.
- *
- * @param void
- *
- * @return
- * void
- */
-extern void rte_kni_close(void);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_KNI_H_ */
-
diff --git a/src/dpdk_lib18/librte_kni/rte_kni_fifo.h b/src/dpdk_lib18/librte_kni/rte_kni_fifo.h
deleted file mode 100755
index 8cb85873..00000000
--- a/src/dpdk_lib18/librte_kni/rte_kni_fifo.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-
-/**
- * Initializes the kni fifo structure
- */
-static void
-kni_fifo_init(struct rte_kni_fifo *fifo, unsigned size)
-{
- /* Ensure size is power of 2 */
- if (size & (size - 1))
- rte_panic("KNI fifo size must be power of 2\n");
-
- fifo->write = 0;
- fifo->read = 0;
- fifo->len = size;
- fifo->elem_size = sizeof(void *);
-}
-
-/**
- * Adds num elements into the fifo. Return the number actually written
- */
-static inline unsigned
-kni_fifo_put(struct rte_kni_fifo *fifo, void **data, unsigned num)
-{
- unsigned i = 0;
- unsigned fifo_write = fifo->write;
- unsigned fifo_read = fifo->read;
- unsigned new_write = fifo_write;
-
- for (i = 0; i < num; i++) {
- new_write = (new_write + 1) & (fifo->len - 1);
-
- if (new_write == fifo_read)
- break;
- fifo->buffer[fifo_write] = data[i];
- fifo_write = new_write;
- }
- fifo->write = fifo_write;
- return i;
-}
-
-/**
- * Get up to num elements from the fifo. Return the number actully read
- */
-static inline unsigned
-kni_fifo_get(struct rte_kni_fifo *fifo, void **data, unsigned num)
-{
- unsigned i = 0;
- unsigned new_read = fifo->read;
- unsigned fifo_write = fifo->write;
- for (i = 0; i < num; i++) {
- if (new_read == fifo_write)
- break;
-
- data[i] = fifo->buffer[new_read];
- new_read = (new_read + 1) & (fifo->len - 1);
- }
- fifo->read = new_read;
- return i;
-}
diff --git a/src/dpdk_lib18/librte_kvargs/Makefile b/src/dpdk_lib18/librte_kvargs/Makefile
deleted file mode 100755
index b09359a5..00000000
--- a/src/dpdk_lib18/librte_kvargs/Makefile
+++ /dev/null
@@ -1,51 +0,0 @@
-# BSD LICENSE
-#
-# Copyright 2014 6WIND S.A.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# - Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#
-# - Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-#
-# - Neither the name of 6WIND S.A. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-# OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_kvargs.a
-
-CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
-
-# all source are stored in SRCS-y
-SRCS-$(CONFIG_RTE_LIBRTE_KVARGS) := rte_kvargs.c
-
-# install includes
-INCS := rte_kvargs.h
-SYMLINK-$(CONFIG_RTE_LIBRTE_KVARGS)-include := $(INCS)
-
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_KVARGS) += lib/librte_eal
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_lpm/Makefile b/src/dpdk_lib18/librte_lpm/Makefile
deleted file mode 100755
index fa94163f..00000000
--- a/src/dpdk_lib18/librte_lpm/Makefile
+++ /dev/null
@@ -1,49 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_lpm.a
-
-CFLAGS += -O3
-CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
-
-# all source are stored in SRCS-y
-SRCS-$(CONFIG_RTE_LIBRTE_LPM) := rte_lpm.c rte_lpm6.c
-
-# install this header file
-SYMLINK-$(CONFIG_RTE_LIBRTE_LPM)-include := rte_lpm.h rte_lpm6.h
-
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_LPM) += lib/librte_eal lib/librte_malloc
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_lpm/rte_lpm.c b/src/dpdk_lib18/librte_lpm/rte_lpm.c
deleted file mode 100755
index 983e04b1..00000000
--- a/src/dpdk_lib18/librte_lpm/rte_lpm.c
+++ /dev/null
@@ -1,1017 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <string.h>
-#include <stdint.h>
-#include <errno.h>
-#include <stdarg.h>
-#include <stdio.h>
-#include <errno.h>
-#include <sys/queue.h>
-
-#include <rte_log.h>
-#include <rte_branch_prediction.h>
-#include <rte_common.h>
-#include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
-#include <rte_malloc.h>
-#include <rte_memzone.h>
-#include <rte_tailq.h>
-#include <rte_eal.h>
-#include <rte_eal_memconfig.h>
-#include <rte_per_lcore.h>
-#include <rte_string_fns.h>
-#include <rte_errno.h>
-#include <rte_rwlock.h>
-#include <rte_spinlock.h>
-
-#include "rte_lpm.h"
-
-TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
-
-#define MAX_DEPTH_TBL24 24
-
-enum valid_flag {
- INVALID = 0,
- VALID
-};
-
-/* Macro to enable/disable run-time checks. */
-#if defined(RTE_LIBRTE_LPM_DEBUG)
-#include <rte_debug.h>
-#define VERIFY_DEPTH(depth) do { \
- if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH)) \
- rte_panic("LPM: Invalid depth (%u) at line %d", \
- (unsigned)(depth), __LINE__); \
-} while (0)
-#else
-#define VERIFY_DEPTH(depth)
-#endif
-
-/*
- * Converts a given depth value to its corresponding mask value.
- *
- * depth (IN) : range = 1 - 32
- * mask (OUT) : 32bit mask
- */
-static uint32_t __attribute__((pure))
-depth_to_mask(uint8_t depth)
-{
- VERIFY_DEPTH(depth);
-
- /* To calculate a mask start with a 1 on the left hand side and right
- * shift while populating the left hand side with 1's
- */
- return (int)0x80000000 >> (depth - 1);
-}
-
-/*
- * Converts given depth value to its corresponding range value.
- */
-static inline uint32_t __attribute__((pure))
-depth_to_range(uint8_t depth)
-{
- VERIFY_DEPTH(depth);
-
- /*
- * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
- */
- if (depth <= MAX_DEPTH_TBL24)
- return 1 << (MAX_DEPTH_TBL24 - depth);
-
- /* Else if depth is greater than 24 */
- return (1 << (RTE_LPM_MAX_DEPTH - depth));
-}
-
-/*
- * Find an existing lpm table and return a pointer to it.
- */
-struct rte_lpm *
-rte_lpm_find_existing(const char *name)
-{
- struct rte_lpm *l = NULL;
- struct rte_tailq_entry *te;
- struct rte_lpm_list *lpm_list;
-
- /* check that we have an initialised tail queue */
- if ((lpm_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM,
- rte_lpm_list)) == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return NULL;
- }
-
- rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
- TAILQ_FOREACH(te, lpm_list, next) {
- l = (struct rte_lpm *) te->data;
- if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
- break;
- }
- rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
-
- if (te == NULL) {
- rte_errno = ENOENT;
- return NULL;
- }
-
- return l;
-}
-
-/*
- * Allocates memory for LPM object
- */
-struct rte_lpm *
-rte_lpm_create(const char *name, int socket_id, int max_rules,
- __rte_unused int flags)
-{
- char mem_name[RTE_LPM_NAMESIZE];
- struct rte_lpm *lpm = NULL;
- struct rte_tailq_entry *te;
- uint32_t mem_size;
- struct rte_lpm_list *lpm_list;
-
- /* check that we have an initialised tail queue */
- if ((lpm_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM,
- rte_lpm_list)) == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return NULL;
- }
-
- RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl24_entry) != 2);
- RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl8_entry) != 2);
-
- /* Check user arguments. */
- if ((name == NULL) || (socket_id < -1) || (max_rules == 0)){
- rte_errno = EINVAL;
- return NULL;
- }
-
- snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
-
- /* Determine the amount of memory to allocate. */
- mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
-
- rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
-
- /* guarantee there's no existing */
- TAILQ_FOREACH(te, lpm_list, next) {
- lpm = (struct rte_lpm *) te->data;
- if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
- break;
- }
- if (te != NULL)
- goto exit;
-
- /* allocate tailq entry */
- te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
- if (te == NULL) {
- RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
- goto exit;
- }
-
- /* Allocate memory to store the LPM data structures. */
- lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size,
- RTE_CACHE_LINE_SIZE, socket_id);
- if (lpm == NULL) {
- RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
- rte_free(te);
- goto exit;
- }
-
- /* Save user arguments. */
- lpm->max_rules = max_rules;
- snprintf(lpm->name, sizeof(lpm->name), "%s", name);
-
- te->data = (void *) lpm;
-
- TAILQ_INSERT_TAIL(lpm_list, te, next);
-
-exit:
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
-
- return lpm;
-}
-
-/*
- * Deallocates memory for given LPM table.
- */
-void
-rte_lpm_free(struct rte_lpm *lpm)
-{
- struct rte_lpm_list *lpm_list;
- struct rte_tailq_entry *te;
-
- /* Check user arguments. */
- if (lpm == NULL)
- return;
-
- /* check that we have an initialised tail queue */
- if ((lpm_list =
- RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM, rte_lpm_list)) == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return;
- }
-
- rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
-
- /* find our tailq entry */
- TAILQ_FOREACH(te, lpm_list, next) {
- if (te->data == (void *) lpm)
- break;
- }
- if (te == NULL) {
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
- return;
- }
-
- TAILQ_REMOVE(lpm_list, te, next);
-
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
-
- rte_free(lpm);
- rte_free(te);
-}
-
-/*
- * Adds a rule to the rule table.
- *
- * NOTE: The rule table is split into 32 groups. Each group contains rules that
- * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
- * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
- * to refer to depth 1 because even though the depth range is 1 - 32, depths
- * are stored in the rule table from 0 - 31.
- * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
- */
-static inline int32_t
-rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
- uint8_t next_hop)
-{
- uint32_t rule_gindex, rule_index, last_rule;
- int i;
-
- VERIFY_DEPTH(depth);
-
- /* Scan through rule group to see if rule already exists. */
- if (lpm->rule_info[depth - 1].used_rules > 0) {
-
- /* rule_gindex stands for rule group index. */
- rule_gindex = lpm->rule_info[depth - 1].first_rule;
- /* Initialise rule_index to point to start of rule group. */
- rule_index = rule_gindex;
- /* Last rule = Last used rule in this rule group. */
- last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
-
- for (; rule_index < last_rule; rule_index++) {
-
- /* If rule already exists update its next_hop and return. */
- if (lpm->rules_tbl[rule_index].ip == ip_masked) {
- lpm->rules_tbl[rule_index].next_hop = next_hop;
-
- return rule_index;
- }
- }
- } else {
- /* Calculate the position in which the rule will be stored. */
- rule_index = 0;
-
- for (i = depth - 1; i > 0; i--) {
- if (lpm->rule_info[i - 1].used_rules > 0) {
- rule_index = lpm->rule_info[i - 1].first_rule + lpm->rule_info[i - 1].used_rules;
- break;
- }
- }
- if (rule_index == lpm->max_rules)
- return -ENOSPC;
-
- lpm->rule_info[depth - 1].first_rule = rule_index;
- }
-
- /* Make room for the new rule in the array. */
- for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
- if (lpm->rule_info[i - 1].first_rule + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
- return -ENOSPC;
-
- if (lpm->rule_info[i - 1].used_rules > 0) {
- lpm->rules_tbl[lpm->rule_info[i - 1].first_rule + lpm->rule_info[i - 1].used_rules]
- = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
- lpm->rule_info[i - 1].first_rule++;
- }
- }
-
- /* Add the new rule. */
- lpm->rules_tbl[rule_index].ip = ip_masked;
- lpm->rules_tbl[rule_index].next_hop = next_hop;
-
- /* Increment the used rules counter for this rule group. */
- lpm->rule_info[depth - 1].used_rules++;
-
- return rule_index;
-}
-
-/*
- * Delete a rule from the rule table.
- * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
- */
-static inline void
-rule_delete(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
-{
- int i;
-
- VERIFY_DEPTH(depth);
-
- lpm->rules_tbl[rule_index] = lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
- + lpm->rule_info[depth - 1].used_rules - 1];
-
- for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
- if (lpm->rule_info[i].used_rules > 0) {
- lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
- lpm->rules_tbl[lpm->rule_info[i].first_rule + lpm->rule_info[i].used_rules - 1];
- lpm->rule_info[i].first_rule--;
- }
- }
-
- lpm->rule_info[depth - 1].used_rules--;
-}
-
-/*
- * Finds a rule in rule table.
- * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
- */
-static inline int32_t
-rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
-{
- uint32_t rule_gindex, last_rule, rule_index;
-
- VERIFY_DEPTH(depth);
-
- rule_gindex = lpm->rule_info[depth - 1].first_rule;
- last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
-
- /* Scan used rules at given depth to find rule. */
- for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
- /* If rule is found return the rule index. */
- if (lpm->rules_tbl[rule_index].ip == ip_masked)
- return (rule_index);
- }
-
- /* If rule is not found return -E_RTE_NO_TAILQ. */
- return -E_RTE_NO_TAILQ;
-}
-
-/*
- * Find, clean and allocate a tbl8.
- */
-static inline int32_t
-tbl8_alloc(struct rte_lpm_tbl8_entry *tbl8)
-{
- uint32_t tbl8_gindex; /* tbl8 group index. */
- struct rte_lpm_tbl8_entry *tbl8_entry;
-
- /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
- for (tbl8_gindex = 0; tbl8_gindex < RTE_LPM_TBL8_NUM_GROUPS;
- tbl8_gindex++) {
- tbl8_entry = &tbl8[tbl8_gindex *
- RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
- /* If a free tbl8 group is found clean it and set as VALID. */
- if (!tbl8_entry->valid_group) {
- memset(&tbl8_entry[0], 0,
- RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
- sizeof(tbl8_entry[0]));
-
- tbl8_entry->valid_group = VALID;
-
- /* Return group index for allocated tbl8 group. */
- return tbl8_gindex;
- }
- }
-
- /* If there are no tbl8 groups free then return error. */
- return -ENOSPC;
-}
-
-static inline void
-tbl8_free(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start)
-{
- /* Set tbl8 group invalid*/
- tbl8[tbl8_group_start].valid_group = INVALID;
-}
-
-static inline int32_t
-add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
- uint8_t next_hop)
-{
- uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
-
- /* Calculate the index into Table24. */
- tbl24_index = ip >> 8;
- tbl24_range = depth_to_range(depth);
-
- for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
- /*
- * For invalid OR valid and non-extended tbl 24 entries set
- * entry.
- */
- if (!lpm->tbl24[i].valid || (lpm->tbl24[i].ext_entry == 0 &&
- lpm->tbl24[i].depth <= depth)) {
-
- struct rte_lpm_tbl24_entry new_tbl24_entry = {
- { .next_hop = next_hop, },
- .valid = VALID,
- .ext_entry = 0,
- .depth = depth,
- };
-
- /* Setting tbl24 entry in one go to avoid race
- * conditions */
- lpm->tbl24[i] = new_tbl24_entry;
-
- continue;
- }
-
- /* If tbl24 entry is valid and extended calculate the index
- * into tbl8. */
- tbl8_index = lpm->tbl24[i].tbl8_gindex *
- RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
- tbl8_group_end = tbl8_index + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
-
- for (j = tbl8_index; j < tbl8_group_end; j++) {
- if (!lpm->tbl8[j].valid ||
- lpm->tbl8[j].depth <= depth) {
- struct rte_lpm_tbl8_entry new_tbl8_entry = {
- .valid = VALID,
- .valid_group = VALID,
- .depth = depth,
- .next_hop = next_hop,
- };
-
- /*
- * Setting tbl8 entry in one go to avoid race
- * conditions
- */
- lpm->tbl8[j] = new_tbl8_entry;
-
- continue;
- }
- }
- }
-
- return 0;
-}
-
-static inline int32_t
-add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
- uint8_t next_hop)
-{
- uint32_t tbl24_index;
- int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
- tbl8_range, i;
-
- tbl24_index = (ip_masked >> 8);
- tbl8_range = depth_to_range(depth);
-
- if (!lpm->tbl24[tbl24_index].valid) {
- /* Search for a free tbl8 group. */
- tbl8_group_index = tbl8_alloc(lpm->tbl8);
-
- /* Check tbl8 allocation was successful. */
- if (tbl8_group_index < 0) {
- return tbl8_group_index;
- }
-
- /* Find index into tbl8 and range. */
- tbl8_index = (tbl8_group_index *
- RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
- (ip_masked & 0xFF);
-
- /* Set tbl8 entry. */
- for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
- lpm->tbl8[i].depth = depth;
- lpm->tbl8[i].next_hop = next_hop;
- lpm->tbl8[i].valid = VALID;
- }
-
- /*
- * Update tbl24 entry to point to new tbl8 entry. Note: The
- * ext_flag and tbl8_index need to be updated simultaneously,
- * so assign whole structure in one go
- */
-
- struct rte_lpm_tbl24_entry new_tbl24_entry = {
- { .tbl8_gindex = (uint8_t)tbl8_group_index, },
- .valid = VALID,
- .ext_entry = 1,
- .depth = 0,
- };
-
- lpm->tbl24[tbl24_index] = new_tbl24_entry;
-
- }/* If valid entry but not extended calculate the index into Table8. */
- else if (lpm->tbl24[tbl24_index].ext_entry == 0) {
- /* Search for free tbl8 group. */
- tbl8_group_index = tbl8_alloc(lpm->tbl8);
-
- if (tbl8_group_index < 0) {
- return tbl8_group_index;
- }
-
- tbl8_group_start = tbl8_group_index *
- RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
- tbl8_group_end = tbl8_group_start +
- RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
-
- /* Populate new tbl8 with tbl24 value. */
- for (i = tbl8_group_start; i < tbl8_group_end; i++) {
- lpm->tbl8[i].valid = VALID;
- lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
- lpm->tbl8[i].next_hop =
- lpm->tbl24[tbl24_index].next_hop;
- }
-
- tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
-
- /* Insert new rule into the tbl8 entry. */
- for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
- if (!lpm->tbl8[i].valid ||
- lpm->tbl8[i].depth <= depth) {
- lpm->tbl8[i].valid = VALID;
- lpm->tbl8[i].depth = depth;
- lpm->tbl8[i].next_hop = next_hop;
-
- continue;
- }
- }
-
- /*
- * Update tbl24 entry to point to new tbl8 entry. Note: The
- * ext_flag and tbl8_index need to be updated simultaneously,
- * so assign whole structure in one go.
- */
-
- struct rte_lpm_tbl24_entry new_tbl24_entry = {
- { .tbl8_gindex = (uint8_t)tbl8_group_index, },
- .valid = VALID,
- .ext_entry = 1,
- .depth = 0,
- };
-
- lpm->tbl24[tbl24_index] = new_tbl24_entry;
-
- }
- else { /*
- * If it is valid, extended entry calculate the index into tbl8.
- */
- tbl8_group_index = lpm->tbl24[tbl24_index].tbl8_gindex;
- tbl8_group_start = tbl8_group_index *
- RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
- tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
-
- for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
-
- if (!lpm->tbl8[i].valid ||
- lpm->tbl8[i].depth <= depth) {
- struct rte_lpm_tbl8_entry new_tbl8_entry = {
- .valid = VALID,
- .depth = depth,
- .next_hop = next_hop,
- .valid_group = lpm->tbl8[i].valid_group,
- };
-
- /*
- * Setting tbl8 entry in one go to avoid race
- * condition
- */
- lpm->tbl8[i] = new_tbl8_entry;
-
- continue;
- }
- }
- }
-
- return 0;
-}
-
-/*
- * Add a route
- */
-int
-rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
- uint8_t next_hop)
-{
- int32_t rule_index, status = 0;
- uint32_t ip_masked;
-
- /* Check user arguments. */
- if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
- return -EINVAL;
-
- ip_masked = ip & depth_to_mask(depth);
-
- /* Add the rule to the rule table. */
- rule_index = rule_add(lpm, ip_masked, depth, next_hop);
-
- /* If the is no space available for new rule return error. */
- if (rule_index < 0) {
- return rule_index;
- }
-
- if (depth <= MAX_DEPTH_TBL24) {
- status = add_depth_small(lpm, ip_masked, depth, next_hop);
- }
- else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
- status = add_depth_big(lpm, ip_masked, depth, next_hop);
-
- /*
- * If add fails due to exhaustion of tbl8 extensions delete
- * rule that was added to rule table.
- */
- if (status < 0) {
- rule_delete(lpm, rule_index, depth);
-
- return status;
- }
- }
-
- return 0;
-}
-
-/*
- * Look for a rule in the high-level rules table
- */
-int
-rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
-uint8_t *next_hop)
-{
- uint32_t ip_masked;
- int32_t rule_index;
-
- /* Check user arguments. */
- if ((lpm == NULL) ||
- (next_hop == NULL) ||
- (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
- return -EINVAL;
-
- /* Look for the rule using rule_find. */
- ip_masked = ip & depth_to_mask(depth);
- rule_index = rule_find(lpm, ip_masked, depth);
-
- if (rule_index >= 0) {
- *next_hop = lpm->rules_tbl[rule_index].next_hop;
- return 1;
- }
-
- /* If rule is not found return 0. */
- return 0;
-}
-
-static inline int32_t
-find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t *sub_rule_depth)
-{
- int32_t rule_index;
- uint32_t ip_masked;
- uint8_t prev_depth;
-
- for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
- ip_masked = ip & depth_to_mask(prev_depth);
-
- rule_index = rule_find(lpm, ip_masked, prev_depth);
-
- if (rule_index >= 0) {
- *sub_rule_depth = prev_depth;
- return rule_index;
- }
- }
-
- return -1;
-}
-
-static inline int32_t
-delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
- uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
-{
- uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
-
- /* Calculate the range and index into Table24. */
- tbl24_range = depth_to_range(depth);
- tbl24_index = (ip_masked >> 8);
-
- /*
- * Firstly check the sub_rule_index. A -1 indicates no replacement rule
- * and a positive number indicates a sub_rule_index.
- */
- if (sub_rule_index < 0) {
- /*
- * If no replacement rule exists then invalidate entries
- * associated with this rule.
- */
- for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
-
- if (lpm->tbl24[i].ext_entry == 0 &&
- lpm->tbl24[i].depth <= depth ) {
- lpm->tbl24[i].valid = INVALID;
- }
- else {
- /*
- * If TBL24 entry is extended, then there has
- * to be a rule with depth >= 25 in the
- * associated TBL8 group.
- */
-
- tbl8_group_index = lpm->tbl24[i].tbl8_gindex;
- tbl8_index = tbl8_group_index *
- RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
-
- for (j = tbl8_index; j < (tbl8_index +
- RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
-
- if (lpm->tbl8[j].depth <= depth)
- lpm->tbl8[j].valid = INVALID;
- }
- }
- }
- }
- else {
- /*
- * If a replacement rule exists then modify entries
- * associated with this rule.
- */
-
- struct rte_lpm_tbl24_entry new_tbl24_entry = {
- {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,},
- .valid = VALID,
- .ext_entry = 0,
- .depth = sub_rule_depth,
- };
-
- struct rte_lpm_tbl8_entry new_tbl8_entry = {
- .valid = VALID,
- .depth = sub_rule_depth,
- .next_hop = lpm->rules_tbl
- [sub_rule_index].next_hop,
- };
-
- for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
-
- if (lpm->tbl24[i].ext_entry == 0 &&
- lpm->tbl24[i].depth <= depth ) {
- lpm->tbl24[i] = new_tbl24_entry;
- }
- else {
- /*
- * If TBL24 entry is extended, then there has
- * to be a rule with depth >= 25 in the
- * associated TBL8 group.
- */
-
- tbl8_group_index = lpm->tbl24[i].tbl8_gindex;
- tbl8_index = tbl8_group_index *
- RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
-
- for (j = tbl8_index; j < (tbl8_index +
- RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
-
- if (lpm->tbl8[j].depth <= depth)
- lpm->tbl8[j] = new_tbl8_entry;
- }
- }
- }
- }
-
- return 0;
-}
-
-/*
- * Checks if table 8 group can be recycled.
- *
- * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
- * Return of -EINVAL means tbl8 is empty and thus can be recycled
- * Return of value > -1 means tbl8 is in use but has all the same values and
- * thus can be recycled
- */
-static inline int32_t
-tbl8_recycle_check(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start)
-{
- uint32_t tbl8_group_end, i;
- tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
-
- /*
- * Check the first entry of the given tbl8. If it is invalid we know
- * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
- * (As they would affect all entries in a tbl8) and thus this table
- * can not be recycled.
- */
- if (tbl8[tbl8_group_start].valid) {
- /*
- * If first entry is valid check if the depth is less than 24
- * and if so check the rest of the entries to verify that they
- * are all of this depth.
- */
- if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) {
- for (i = (tbl8_group_start + 1); i < tbl8_group_end;
- i++) {
-
- if (tbl8[i].depth !=
- tbl8[tbl8_group_start].depth) {
-
- return -EEXIST;
- }
- }
- /* If all entries are the same return the tb8 index */
- return tbl8_group_start;
- }
-
- return -EEXIST;
- }
- /*
- * If the first entry is invalid check if the rest of the entries in
- * the tbl8 are invalid.
- */
- for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
- if (tbl8[i].valid)
- return -EEXIST;
- }
- /* If no valid entries are found then return -EINVAL. */
- return -EINVAL;
-}
-
-static inline int32_t
-delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
- uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
-{
- uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
- tbl8_range, i;
- int32_t tbl8_recycle_index;
-
- /*
- * Calculate the index into tbl24 and range. Note: All depths larger
- * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
- */
- tbl24_index = ip_masked >> 8;
-
- /* Calculate the index into tbl8 and range. */
- tbl8_group_index = lpm->tbl24[tbl24_index].tbl8_gindex;
- tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
- tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
- tbl8_range = depth_to_range(depth);
-
- if (sub_rule_index < 0) {
- /*
- * Loop through the range of entries on tbl8 for which the
- * rule_to_delete must be removed or modified.
- */
- for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
- if (lpm->tbl8[i].depth <= depth)
- lpm->tbl8[i].valid = INVALID;
- }
- }
- else {
- /* Set new tbl8 entry. */
- struct rte_lpm_tbl8_entry new_tbl8_entry = {
- .valid = VALID,
- .depth = sub_rule_depth,
- .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
- .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
- };
-
- /*
- * Loop through the range of entries on tbl8 for which the
- * rule_to_delete must be modified.
- */
- for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
- if (lpm->tbl8[i].depth <= depth)
- lpm->tbl8[i] = new_tbl8_entry;
- }
- }
-
- /*
- * Check if there are any valid entries in this tbl8 group. If all
- * tbl8 entries are invalid we can free the tbl8 and invalidate the
- * associated tbl24 entry.
- */
-
- tbl8_recycle_index = tbl8_recycle_check(lpm->tbl8, tbl8_group_start);
-
- if (tbl8_recycle_index == -EINVAL){
- /* Set tbl24 before freeing tbl8 to avoid race condition. */
- lpm->tbl24[tbl24_index].valid = 0;
- tbl8_free(lpm->tbl8, tbl8_group_start);
- }
- else if (tbl8_recycle_index > -1) {
- /* Update tbl24 entry. */
- struct rte_lpm_tbl24_entry new_tbl24_entry = {
- { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, },
- .valid = VALID,
- .ext_entry = 0,
- .depth = lpm->tbl8[tbl8_recycle_index].depth,
- };
-
- /* Set tbl24 before freeing tbl8 to avoid race condition. */
- lpm->tbl24[tbl24_index] = new_tbl24_entry;
- tbl8_free(lpm->tbl8, tbl8_group_start);
- }
-
- return 0;
-}
-
-/*
- * Deletes a rule
- */
-int
-rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
-{
- int32_t rule_to_delete_index, sub_rule_index;
- uint32_t ip_masked;
- uint8_t sub_rule_depth;
- /*
- * Check input arguments. Note: IP must be a positive integer of 32
- * bits in length therefore it need not be checked.
- */
- if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
- return -EINVAL;
- }
-
- ip_masked = ip & depth_to_mask(depth);
-
- /*
- * Find the index of the input rule, that needs to be deleted, in the
- * rule table.
- */
- rule_to_delete_index = rule_find(lpm, ip_masked, depth);
-
- /*
- * Check if rule_to_delete_index was found. If no rule was found the
- * function rule_find returns -E_RTE_NO_TAILQ.
- */
- if (rule_to_delete_index < 0)
- return -E_RTE_NO_TAILQ;
-
- /* Delete the rule from the rule table. */
- rule_delete(lpm, rule_to_delete_index, depth);
-
- /*
- * Find rule to replace the rule_to_delete. If there is no rule to
- * replace the rule_to_delete we return -1 and invalidate the table
- * entries associated with this rule.
- */
- sub_rule_depth = 0;
- sub_rule_index = find_previous_rule(lpm, ip, depth, &sub_rule_depth);
-
- /*
- * If the input depth value is less than 25 use function
- * delete_depth_small otherwise use delete_depth_big.
- */
- if (depth <= MAX_DEPTH_TBL24) {
- return delete_depth_small(lpm, ip_masked, depth,
- sub_rule_index, sub_rule_depth);
- }
- else { /* If depth > MAX_DEPTH_TBL24 */
- return delete_depth_big(lpm, ip_masked, depth, sub_rule_index, sub_rule_depth);
- }
-}
-
-/*
- * Delete all rules from the LPM table.
- */
-void
-rte_lpm_delete_all(struct rte_lpm *lpm)
-{
- /* Zero rule information. */
- memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
-
- /* Zero tbl24. */
- memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
-
- /* Zero tbl8. */
- memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
-
- /* Delete all rules form the rules table. */
- memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
-}
-
diff --git a/src/dpdk_lib18/librte_lpm/rte_lpm.h b/src/dpdk_lib18/librte_lpm/rte_lpm.h
deleted file mode 100755
index 62d7736e..00000000
--- a/src/dpdk_lib18/librte_lpm/rte_lpm.h
+++ /dev/null
@@ -1,472 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_LPM_H_
-#define _RTE_LPM_H_
-
-/**
- * @file
- * RTE Longest Prefix Match (LPM)
- */
-
-#include <errno.h>
-#include <sys/queue.h>
-#include <stdint.h>
-#include <stdlib.h>
-#include <rte_branch_prediction.h>
-#include <rte_memory.h>
-#include <rte_common.h>
-#include <rte_common_vect.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/** Max number of characters in LPM name. */
-#define RTE_LPM_NAMESIZE 32
-
-/** @deprecated Possible location to allocate memory. This was for last
- * parameter of rte_lpm_create(), but is now redundant. The LPM table is always
- * allocated in memory using librte_malloc which uses a memzone. */
-#define RTE_LPM_HEAP 0
-
-/** @deprecated Possible location to allocate memory. This was for last
- * parameter of rte_lpm_create(), but is now redundant. The LPM table is always
- * allocated in memory using librte_malloc which uses a memzone. */
-#define RTE_LPM_MEMZONE 1
-
-/** Maximum depth value possible for IPv4 LPM. */
-#define RTE_LPM_MAX_DEPTH 32
-
-/** @internal Total number of tbl24 entries. */
-#define RTE_LPM_TBL24_NUM_ENTRIES (1 << 24)
-
-/** @internal Number of entries in a tbl8 group. */
-#define RTE_LPM_TBL8_GROUP_NUM_ENTRIES 256
-
-/** @internal Total number of tbl8 groups in the tbl8. */
-#define RTE_LPM_TBL8_NUM_GROUPS 256
-
-/** @internal Total number of tbl8 entries. */
-#define RTE_LPM_TBL8_NUM_ENTRIES (RTE_LPM_TBL8_NUM_GROUPS * \
- RTE_LPM_TBL8_GROUP_NUM_ENTRIES)
-
-/** @internal Macro to enable/disable run-time checks. */
-#if defined(RTE_LIBRTE_LPM_DEBUG)
-#define RTE_LPM_RETURN_IF_TRUE(cond, retval) do { \
- if (cond) return (retval); \
-} while (0)
-#else
-#define RTE_LPM_RETURN_IF_TRUE(cond, retval)
-#endif
-
-/** @internal bitmask with valid and ext_entry/valid_group fields set */
-#define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x0300
-
-/** Bitmask used to indicate successful lookup */
-#define RTE_LPM_LOOKUP_SUCCESS 0x0100
-
-/** @internal Tbl24 entry structure. */
-struct rte_lpm_tbl24_entry {
- /* Stores Next hop or group index (i.e. gindex)into tbl8. */
- union {
- uint8_t next_hop;
- uint8_t tbl8_gindex;
- };
- /* Using single uint8_t to store 3 values. */
- uint8_t valid :1; /**< Validation flag. */
- uint8_t ext_entry :1; /**< External entry. */
- uint8_t depth :6; /**< Rule depth. */
-};
-
-/** @internal Tbl8 entry structure. */
-struct rte_lpm_tbl8_entry {
- uint8_t next_hop; /**< next hop. */
- /* Using single uint8_t to store 3 values. */
- uint8_t valid :1; /**< Validation flag. */
- uint8_t valid_group :1; /**< Group validation flag. */
- uint8_t depth :6; /**< Rule depth. */
-};
-
-/** @internal Rule structure. */
-struct rte_lpm_rule {
- uint32_t ip; /**< Rule IP address. */
- uint8_t next_hop; /**< Rule next hop. */
-};
-
-/** @internal Contains metadata about the rules table. */
-struct rte_lpm_rule_info {
- uint32_t used_rules; /**< Used rules so far. */
- uint32_t first_rule; /**< Indexes the first rule of a given depth. */
-};
-
-/** @internal LPM structure. */
-struct rte_lpm {
- /* LPM metadata. */
- char name[RTE_LPM_NAMESIZE]; /**< Name of the lpm. */
- int mem_location; /**< @deprecated @see RTE_LPM_HEAP and RTE_LPM_MEMZONE. */
- uint32_t max_rules; /**< Max. balanced rules per lpm. */
- struct rte_lpm_rule_info rule_info[RTE_LPM_MAX_DEPTH]; /**< Rule info table. */
-
- /* LPM Tables. */
- struct rte_lpm_tbl24_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES] \
- __rte_cache_aligned; /**< LPM tbl24 table. */
- struct rte_lpm_tbl8_entry tbl8[RTE_LPM_TBL8_NUM_ENTRIES] \
- __rte_cache_aligned; /**< LPM tbl8 table. */
- struct rte_lpm_rule rules_tbl[0] \
- __rte_cache_aligned; /**< LPM rules. */
-};
-
-/**
- * Create an LPM object.
- *
- * @param name
- * LPM object name
- * @param socket_id
- * NUMA socket ID for LPM table memory allocation
- * @param max_rules
- * Maximum number of LPM rules that can be added
- * @param flags
- * This parameter is currently unused
- * @return
- * Handle to LPM object on success, NULL otherwise with rte_errno set
- * to an appropriate values. Possible rte_errno values include:
- * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
- * - E_RTE_SECONDARY - function was called from a secondary process instance
- * - E_RTE_NO_TAILQ - no tailq list could be got for the lpm object list
- * - EINVAL - invalid parameter passed to function
- * - ENOSPC - the maximum number of memzones has already been allocated
- * - EEXIST - a memzone with the same name already exists
- * - ENOMEM - no appropriate memory area found in which to create memzone
- */
-struct rte_lpm *
-rte_lpm_create(const char *name, int socket_id, int max_rules, int flags);
-
-/**
- * Find an existing LPM object and return a pointer to it.
- *
- * @param name
- * Name of the lpm object as passed to rte_lpm_create()
- * @return
- * Pointer to lpm object or NULL if object not found with rte_errno
- * set appropriately. Possible rte_errno values include:
- * - ENOENT - required entry not available to return.
- */
-struct rte_lpm *
-rte_lpm_find_existing(const char *name);
-
-/**
- * Free an LPM object.
- *
- * @param lpm
- * LPM object handle
- * @return
- * None
- */
-void
-rte_lpm_free(struct rte_lpm *lpm);
-
-/**
- * Add a rule to the LPM table.
- *
- * @param lpm
- * LPM object handle
- * @param ip
- * IP of the rule to be added to the LPM table
- * @param depth
- * Depth of the rule to be added to the LPM table
- * @param next_hop
- * Next hop of the rule to be added to the LPM table
- * @return
- * 0 on success, negative value otherwise
- */
-int
-rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t next_hop);
-
-/**
- * Check if a rule is present in the LPM table,
- * and provide its next hop if it is.
- *
- * @param lpm
- * LPM object handle
- * @param ip
- * IP of the rule to be searched
- * @param depth
- * Depth of the rule to searched
- * @param next_hop
- * Next hop of the rule (valid only if it is found)
- * @return
- * 1 if the rule exists, 0 if it does not, a negative value on failure
- */
-int
-rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
-uint8_t *next_hop);
-
-/**
- * Delete a rule from the LPM table.
- *
- * @param lpm
- * LPM object handle
- * @param ip
- * IP of the rule to be deleted from the LPM table
- * @param depth
- * Depth of the rule to be deleted from the LPM table
- * @return
- * 0 on success, negative value otherwise
- */
-int
-rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth);
-
-/**
- * Delete all rules from the LPM table.
- *
- * @param lpm
- * LPM object handle
- */
-void
-rte_lpm_delete_all(struct rte_lpm *lpm);
-
-/**
- * Lookup an IP into the LPM table.
- *
- * @param lpm
- * LPM object handle
- * @param ip
- * IP to be looked up in the LPM table
- * @param next_hop
- * Next hop of the most specific rule found for IP (valid on lookup hit only)
- * @return
- * -EINVAL for incorrect arguments, -ENOENT on lookup miss, 0 on lookup hit
- */
-static inline int
-rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint8_t *next_hop)
-{
- unsigned tbl24_index = (ip >> 8);
- uint16_t tbl_entry;
-
- /* DEBUG: Check user input arguments. */
- RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)), -EINVAL);
-
- /* Copy tbl24 entry */
- tbl_entry = *(const uint16_t *)&lpm->tbl24[tbl24_index];
-
- /* Copy tbl8 entry (only if needed) */
- if (unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
- RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
-
- unsigned tbl8_index = (uint8_t)ip +
- ((uint8_t)tbl_entry * RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
-
- tbl_entry = *(const uint16_t *)&lpm->tbl8[tbl8_index];
- }
-
- *next_hop = (uint8_t)tbl_entry;
- return (tbl_entry & RTE_LPM_LOOKUP_SUCCESS) ? 0 : -ENOENT;
-}
-
-/**
- * Lookup multiple IP addresses in an LPM table. This may be implemented as a
- * macro, so the address of the function should not be used.
- *
- * @param lpm
- * LPM object handle
- * @param ips
- * Array of IPs to be looked up in the LPM table
- * @param next_hops
- * Next hop of the most specific rule found for IP (valid on lookup hit only).
- * This is an array of two byte values. The most significant byte in each
- * value says whether the lookup was successful (bitmask
- * RTE_LPM_LOOKUP_SUCCESS is set). The least significant byte is the
- * actual next hop.
- * @param n
- * Number of elements in ips (and next_hops) array to lookup. This should be a
- * compile time constant, and divisible by 8 for best performance.
- * @return
- * -EINVAL for incorrect arguments, otherwise 0
- */
-#define rte_lpm_lookup_bulk(lpm, ips, next_hops, n) \
- rte_lpm_lookup_bulk_func(lpm, ips, next_hops, n)
-
-static inline int
-rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t * ips,
- uint16_t * next_hops, const unsigned n)
-{
- unsigned i;
- unsigned tbl24_indexes[n];
-
- /* DEBUG: Check user input arguments. */
- RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (ips == NULL) ||
- (next_hops == NULL)), -EINVAL);
-
- for (i = 0; i < n; i++) {
- tbl24_indexes[i] = ips[i] >> 8;
- }
-
- for (i = 0; i < n; i++) {
- /* Simply copy tbl24 entry to output */
- next_hops[i] = *(const uint16_t *)&lpm->tbl24[tbl24_indexes[i]];
-
- /* Overwrite output with tbl8 entry if needed */
- if (unlikely((next_hops[i] & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
- RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
-
- unsigned tbl8_index = (uint8_t)ips[i] +
- ((uint8_t)next_hops[i] *
- RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
-
- next_hops[i] = *(const uint16_t *)&lpm->tbl8[tbl8_index];
- }
- }
- return 0;
-}
-
-/* Mask four results. */
-#define RTE_LPM_MASKX4_RES UINT64_C(0x00ff00ff00ff00ff)
-
-/**
- * Lookup four IP addresses in an LPM table.
- *
- * @param lpm
- * LPM object handle
- * @param ip
- * Four IPs to be looked up in the LPM table
- * @param hop
- * Next hop of the most specific rule found for IP (valid on lookup hit only).
- * This is an 4 elements array of two byte values.
- * If the lookup was succesfull for the given IP, then least significant byte
- * of the corresponding element is the actual next hop and the most
- * significant byte is zero.
- * If the lookup for the given IP failed, then corresponding element would
- * contain default value, see description of then next parameter.
- * @param defv
- * Default value to populate into corresponding element of hop[] array,
- * if lookup would fail.
- */
-static inline void
-rte_lpm_lookupx4(const struct rte_lpm *lpm, __m128i ip, uint16_t hop[4],
- uint16_t defv)
-{
- __m128i i24;
- rte_xmm_t i8;
- uint16_t tbl[4];
- uint64_t idx, pt;
-
- const __m128i mask8 =
- _mm_set_epi32(UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX);
-
- /*
- * RTE_LPM_VALID_EXT_ENTRY_BITMASK for 4 LPM entries
- * as one 64-bit value (0x0300030003000300).
- */
- const uint64_t mask_xv =
- ((uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK |
- (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 16 |
- (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 32 |
- (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 48);
-
- /*
- * RTE_LPM_LOOKUP_SUCCESS for 4 LPM entries
- * as one 64-bit value (0x0100010001000100).
- */
- const uint64_t mask_v =
- ((uint64_t)RTE_LPM_LOOKUP_SUCCESS |
- (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 16 |
- (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 32 |
- (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 48);
-
- /* get 4 indexes for tbl24[]. */
- i24 = _mm_srli_epi32(ip, CHAR_BIT);
-
- /* extract values from tbl24[] */
- idx = _mm_cvtsi128_si64(i24);
- i24 = _mm_srli_si128(i24, sizeof(uint64_t));
-
- tbl[0] = *(const uint16_t *)&lpm->tbl24[(uint32_t)idx];
- tbl[1] = *(const uint16_t *)&lpm->tbl24[idx >> 32];
-
- idx = _mm_cvtsi128_si64(i24);
-
- tbl[2] = *(const uint16_t *)&lpm->tbl24[(uint32_t)idx];
- tbl[3] = *(const uint16_t *)&lpm->tbl24[idx >> 32];
-
- /* get 4 indexes for tbl8[]. */
- i8.m = _mm_and_si128(ip, mask8);
-
- pt = (uint64_t)tbl[0] |
- (uint64_t)tbl[1] << 16 |
- (uint64_t)tbl[2] << 32 |
- (uint64_t)tbl[3] << 48;
-
- /* search successfully finished for all 4 IP addresses. */
- if (likely((pt & mask_xv) == mask_v)) {
- uintptr_t ph = (uintptr_t)hop;
- *(uint64_t *)ph = pt & RTE_LPM_MASKX4_RES;
- return;
- }
-
- if (unlikely((pt & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
- RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
- i8.u32[0] = i8.u32[0] +
- (uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
- tbl[0] = *(const uint16_t *)&lpm->tbl8[i8.u32[0]];
- }
- if (unlikely((pt >> 16 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
- RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
- i8.u32[1] = i8.u32[1] +
- (uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
- tbl[1] = *(const uint16_t *)&lpm->tbl8[i8.u32[1]];
- }
- if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
- RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
- i8.u32[2] = i8.u32[2] +
- (uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
- tbl[2] = *(const uint16_t *)&lpm->tbl8[i8.u32[2]];
- }
- if (unlikely((pt >> 48 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
- RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
- i8.u32[3] = i8.u32[3] +
- (uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
- tbl[3] = *(const uint16_t *)&lpm->tbl8[i8.u32[3]];
- }
-
- hop[0] = (tbl[0] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[0] : defv;
- hop[1] = (tbl[1] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[1] : defv;
- hop[2] = (tbl[2] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[2] : defv;
- hop[3] = (tbl[3] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[3] : defv;
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_LPM_H_ */
diff --git a/src/dpdk_lib18/librte_lpm/rte_lpm6.c b/src/dpdk_lib18/librte_lpm/rte_lpm6.c
deleted file mode 100755
index 42e6d800..00000000
--- a/src/dpdk_lib18/librte_lpm/rte_lpm6.c
+++ /dev/null
@@ -1,892 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#include <string.h>
-#include <stdint.h>
-#include <errno.h>
-#include <stdarg.h>
-#include <stdio.h>
-#include <errno.h>
-#include <sys/queue.h>
-
-#include <rte_log.h>
-#include <rte_branch_prediction.h>
-#include <rte_common.h>
-#include <rte_memory.h>
-#include <rte_malloc.h>
-#include <rte_memzone.h>
-#include <rte_memcpy.h>
-#include <rte_tailq.h>
-#include <rte_eal.h>
-#include <rte_eal_memconfig.h>
-#include <rte_per_lcore.h>
-#include <rte_string_fns.h>
-#include <rte_errno.h>
-#include <rte_rwlock.h>
-#include <rte_spinlock.h>
-
-#include "rte_lpm6.h"
-
-#define RTE_LPM6_TBL24_NUM_ENTRIES (1 << 24)
-#define RTE_LPM6_TBL8_GROUP_NUM_ENTRIES 256
-#define RTE_LPM6_TBL8_MAX_NUM_GROUPS (1 << 21)
-
-#define RTE_LPM6_VALID_EXT_ENTRY_BITMASK 0xA0000000
-#define RTE_LPM6_LOOKUP_SUCCESS 0x20000000
-#define RTE_LPM6_TBL8_BITMASK 0x001FFFFF
-
-#define ADD_FIRST_BYTE 3
-#define LOOKUP_FIRST_BYTE 4
-#define BYTE_SIZE 8
-#define BYTES2_SIZE 16
-
-#define lpm6_tbl8_gindex next_hop
-
-/** Flags for setting an entry as valid/invalid. */
-enum valid_flag {
- INVALID = 0,
- VALID
-};
-
-TAILQ_HEAD(rte_lpm6_list, rte_tailq_entry);
-
-/** Tbl entry structure. It is the same for both tbl24 and tbl8 */
-struct rte_lpm6_tbl_entry {
- uint32_t next_hop: 21; /**< Next hop / next table to be checked. */
- uint32_t depth :8; /**< Rule depth. */
-
- /* Flags. */
- uint32_t valid :1; /**< Validation flag. */
- uint32_t valid_group :1; /**< Group validation flag. */
- uint32_t ext_entry :1; /**< External entry. */
-};
-
-/** Rules tbl entry structure. */
-struct rte_lpm6_rule {
- uint8_t ip[RTE_LPM6_IPV6_ADDR_SIZE]; /**< Rule IP address. */
- uint8_t next_hop; /**< Rule next hop. */
- uint8_t depth; /**< Rule depth. */
-};
-
-/** LPM6 structure. */
-struct rte_lpm6 {
- /* LPM metadata. */
- char name[RTE_LPM6_NAMESIZE]; /**< Name of the lpm. */
- uint32_t max_rules; /**< Max number of rules. */
- uint32_t used_rules; /**< Used rules so far. */
- uint32_t number_tbl8s; /**< Number of tbl8s to allocate. */
- uint32_t next_tbl8; /**< Next tbl8 to be used. */
-
- /* LPM Tables. */
- struct rte_lpm6_rule *rules_tbl; /**< LPM rules. */
- struct rte_lpm6_tbl_entry tbl24[RTE_LPM6_TBL24_NUM_ENTRIES]
- __rte_cache_aligned; /**< LPM tbl24 table. */
- struct rte_lpm6_tbl_entry tbl8[0]
- __rte_cache_aligned; /**< LPM tbl8 table. */
-};
-
-/*
- * Takes an array of uint8_t (IPv6 address) and masks it using the depth.
- * It leaves untouched one bit per unit in the depth variable
- * and set the rest to 0.
- */
-static inline void
-mask_ip(uint8_t *ip, uint8_t depth)
-{
- int16_t part_depth, mask;
- int i;
-
- part_depth = depth;
-
- for (i = 0; i < RTE_LPM6_IPV6_ADDR_SIZE; i++) {
- if (part_depth < BYTE_SIZE && part_depth >= 0) {
- mask = (uint16_t)(~(UINT8_MAX >> part_depth));
- ip[i] = (uint8_t)(ip[i] & mask);
- } else if (part_depth < 0) {
- ip[i] = 0;
- }
- part_depth -= BYTE_SIZE;
- }
-}
-
-/*
- * Allocates memory for LPM object
- */
-struct rte_lpm6 *
-rte_lpm6_create(const char *name, int socket_id,
- const struct rte_lpm6_config *config)
-{
- char mem_name[RTE_LPM6_NAMESIZE];
- struct rte_lpm6 *lpm = NULL;
- struct rte_tailq_entry *te;
- uint64_t mem_size, rules_size;
- struct rte_lpm6_list *lpm_list;
-
- /* Check that we have an initialised tail queue */
- if ((lpm_list =
- RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM6, rte_lpm6_list)) == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return NULL;
- }
-
- RTE_BUILD_BUG_ON(sizeof(struct rte_lpm6_tbl_entry) != sizeof(uint32_t));
-
- /* Check user arguments. */
- if ((name == NULL) || (socket_id < -1) || (config == NULL) ||
- (config->max_rules == 0) ||
- config->number_tbl8s > RTE_LPM6_TBL8_MAX_NUM_GROUPS) {
- rte_errno = EINVAL;
- return NULL;
- }
-
- snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
-
- /* Determine the amount of memory to allocate. */
- mem_size = sizeof(*lpm) + (sizeof(lpm->tbl8[0]) *
- RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
- rules_size = sizeof(struct rte_lpm6_rule) * config->max_rules;
-
- rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
-
- /* Guarantee there's no existing */
- TAILQ_FOREACH(te, lpm_list, next) {
- lpm = (struct rte_lpm6 *) te->data;
- if (strncmp(name, lpm->name, RTE_LPM6_NAMESIZE) == 0)
- break;
- }
- if (te != NULL)
- goto exit;
-
- /* allocate tailq entry */
- te = rte_zmalloc("LPM6_TAILQ_ENTRY", sizeof(*te), 0);
- if (te == NULL) {
- RTE_LOG(ERR, LPM, "Failed to allocate tailq entry!\n");
- goto exit;
- }
-
- /* Allocate memory to store the LPM data structures. */
- lpm = (struct rte_lpm6 *)rte_zmalloc_socket(mem_name, (size_t)mem_size,
- RTE_CACHE_LINE_SIZE, socket_id);
-
- if (lpm == NULL) {
- RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
- rte_free(te);
- goto exit;
- }
-
- lpm->rules_tbl = (struct rte_lpm6_rule *)rte_zmalloc_socket(NULL,
- (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
-
- if (lpm->rules_tbl == NULL) {
- RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
- rte_free(lpm);
- rte_free(te);
- goto exit;
- }
-
- /* Save user arguments. */
- lpm->max_rules = config->max_rules;
- lpm->number_tbl8s = config->number_tbl8s;
- snprintf(lpm->name, sizeof(lpm->name), "%s", name);
-
- te->data = (void *) lpm;
-
- TAILQ_INSERT_TAIL(lpm_list, te, next);
-
-exit:
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
-
- return lpm;
-}
-
-/*
- * Find an existing lpm table and return a pointer to it.
- */
-struct rte_lpm6 *
-rte_lpm6_find_existing(const char *name)
-{
- struct rte_lpm6 *l = NULL;
- struct rte_tailq_entry *te;
- struct rte_lpm6_list *lpm_list;
-
- /* Check that we have an initialised tail queue */
- if ((lpm_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM6,
- rte_lpm6_list)) == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return NULL;
- }
-
- rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
- TAILQ_FOREACH(te, lpm_list, next) {
- l = (struct rte_lpm6 *) te->data;
- if (strncmp(name, l->name, RTE_LPM6_NAMESIZE) == 0)
- break;
- }
- rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
-
- if (te == NULL) {
- rte_errno = ENOENT;
- return NULL;
- }
-
- return l;
-}
-
-/*
- * Deallocates memory for given LPM table.
- */
-void
-rte_lpm6_free(struct rte_lpm6 *lpm)
-{
- struct rte_lpm6_list *lpm_list;
- struct rte_tailq_entry *te;
-
- /* Check user arguments. */
- if (lpm == NULL)
- return;
-
- /* check that we have an initialised tail queue */
- if ((lpm_list =
- RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM, rte_lpm6_list)) == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return;
- }
-
- rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
-
- /* find our tailq entry */
- TAILQ_FOREACH(te, lpm_list, next) {
- if (te->data == (void *) lpm)
- break;
- }
- if (te == NULL) {
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
- return;
- }
-
- TAILQ_REMOVE(lpm_list, te, next);
-
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
-
- rte_free(lpm);
- rte_free(te);
-}
-
-/*
- * Checks if a rule already exists in the rules table and updates
- * the nexthop if so. Otherwise it adds a new rule if enough space is available.
- */
-static inline int32_t
-rule_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t next_hop, uint8_t depth)
-{
- uint32_t rule_index;
-
- /* Scan through rule list to see if rule already exists. */
- for (rule_index = 0; rule_index < lpm->used_rules; rule_index++) {
-
- /* If rule already exists update its next_hop and return. */
- if ((memcmp (lpm->rules_tbl[rule_index].ip, ip,
- RTE_LPM6_IPV6_ADDR_SIZE) == 0) &&
- lpm->rules_tbl[rule_index].depth == depth) {
- lpm->rules_tbl[rule_index].next_hop = next_hop;
-
- return rule_index;
- }
- }
-
- /*
- * If rule does not exist check if there is space to add a new rule to
- * this rule group. If there is no space return error.
- */
- if (lpm->used_rules == lpm->max_rules) {
- return -ENOSPC;
- }
-
- /* If there is space for the new rule add it. */
- rte_memcpy(lpm->rules_tbl[rule_index].ip, ip, RTE_LPM6_IPV6_ADDR_SIZE);
- lpm->rules_tbl[rule_index].next_hop = next_hop;
- lpm->rules_tbl[rule_index].depth = depth;
-
- /* Increment the used rules counter for this rule group. */
- lpm->used_rules++;
-
- return rule_index;
-}
-
-/*
- * Function that expands a rule across the data structure when a less-generic
- * one has been added before. It assures that every possible combination of bits
- * in the IP address returns a match.
- */
-static void
-expand_rule(struct rte_lpm6 *lpm, uint32_t tbl8_gindex, uint8_t depth,
- uint8_t next_hop)
-{
- uint32_t tbl8_group_end, tbl8_gindex_next, j;
-
- tbl8_group_end = tbl8_gindex + RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
-
- struct rte_lpm6_tbl_entry new_tbl8_entry = {
- .valid = VALID,
- .valid_group = VALID,
- .depth = depth,
- .next_hop = next_hop,
- .ext_entry = 0,
- };
-
- for (j = tbl8_gindex; j < tbl8_group_end; j++) {
- if (!lpm->tbl8[j].valid || (lpm->tbl8[j].ext_entry == 0
- && lpm->tbl8[j].depth <= depth)) {
-
- lpm->tbl8[j] = new_tbl8_entry;
-
- } else if (lpm->tbl8[j].ext_entry == 1) {
-
- tbl8_gindex_next = lpm->tbl8[j].lpm6_tbl8_gindex
- * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
- expand_rule(lpm, tbl8_gindex_next, depth, next_hop);
- }
- }
-}
-
-/*
- * Partially adds a new route to the data structure (tbl24+tbl8s).
- * It returns 0 on success, a negative number on failure, or 1 if
- * the process needs to be continued by calling the function again.
- */
-static inline int
-add_step(struct rte_lpm6 *lpm, struct rte_lpm6_tbl_entry *tbl,
- struct rte_lpm6_tbl_entry **tbl_next, uint8_t *ip, uint8_t bytes,
- uint8_t first_byte, uint8_t depth, uint8_t next_hop)
-{
- uint32_t tbl_index, tbl_range, tbl8_group_start, tbl8_group_end, i;
- int32_t tbl8_gindex;
- int8_t bitshift;
- uint8_t bits_covered;
-
- /*
- * Calculate index to the table based on the number and position
- * of the bytes being inspected in this step.
- */
- tbl_index = 0;
- for (i = first_byte; i < (uint32_t)(first_byte + bytes); i++) {
- bitshift = (int8_t)((bytes - i)*BYTE_SIZE);
-
- if (bitshift < 0) bitshift = 0;
- tbl_index = tbl_index | ip[i-1] << bitshift;
- }
-
- /* Number of bits covered in this step */
- bits_covered = (uint8_t)((bytes+first_byte-1)*BYTE_SIZE);
-
- /*
- * If depth if smaller than this number (ie this is the last step)
- * expand the rule across the relevant positions in the table.
- */
- if (depth <= bits_covered) {
- tbl_range = 1 << (bits_covered - depth);
-
- for (i = tbl_index; i < (tbl_index + tbl_range); i++) {
- if (!tbl[i].valid || (tbl[i].ext_entry == 0 &&
- tbl[i].depth <= depth)) {
-
- struct rte_lpm6_tbl_entry new_tbl_entry = {
- .next_hop = next_hop,
- .depth = depth,
- .valid = VALID,
- .valid_group = VALID,
- .ext_entry = 0,
- };
-
- tbl[i] = new_tbl_entry;
-
- } else if (tbl[i].ext_entry == 1) {
-
- /*
- * If tbl entry is valid and extended calculate the index
- * into next tbl8 and expand the rule across the data structure.
- */
- tbl8_gindex = tbl[i].lpm6_tbl8_gindex *
- RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
- expand_rule(lpm, tbl8_gindex, depth, next_hop);
- }
- }
-
- return 0;
- }
- /*
- * If this is not the last step just fill one position
- * and calculate the index to the next table.
- */
- else {
- /* If it's invalid a new tbl8 is needed */
- if (!tbl[tbl_index].valid) {
- if (lpm->next_tbl8 < lpm->number_tbl8s)
- tbl8_gindex = (lpm->next_tbl8)++;
- else
- return -ENOSPC;
-
- struct rte_lpm6_tbl_entry new_tbl_entry = {
- .lpm6_tbl8_gindex = tbl8_gindex,
- .depth = 0,
- .valid = VALID,
- .valid_group = VALID,
- .ext_entry = 1,
- };
-
- tbl[tbl_index] = new_tbl_entry;
- }
- /*
- * If it's valid but not extended the rule that was stored *
- * here needs to be moved to the next table.
- */
- else if (tbl[tbl_index].ext_entry == 0) {
- /* Search for free tbl8 group. */
- if (lpm->next_tbl8 < lpm->number_tbl8s)
- tbl8_gindex = (lpm->next_tbl8)++;
- else
- return -ENOSPC;
-
- tbl8_group_start = tbl8_gindex *
- RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
- tbl8_group_end = tbl8_group_start +
- RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
-
- /* Populate new tbl8 with tbl value. */
- for (i = tbl8_group_start; i < tbl8_group_end; i++) {
- lpm->tbl8[i].valid = VALID;
- lpm->tbl8[i].depth = tbl[tbl_index].depth;
- lpm->tbl8[i].next_hop = tbl[tbl_index].next_hop;
- lpm->tbl8[i].ext_entry = 0;
- }
-
- /*
- * Update tbl entry to point to new tbl8 entry. Note: The
- * ext_flag and tbl8_index need to be updated simultaneously,
- * so assign whole structure in one go.
- */
- struct rte_lpm6_tbl_entry new_tbl_entry = {
- .lpm6_tbl8_gindex = tbl8_gindex,
- .depth = 0,
- .valid = VALID,
- .valid_group = VALID,
- .ext_entry = 1,
- };
-
- tbl[tbl_index] = new_tbl_entry;
- }
-
- *tbl_next = &(lpm->tbl8[tbl[tbl_index].lpm6_tbl8_gindex *
- RTE_LPM6_TBL8_GROUP_NUM_ENTRIES]);
- }
-
- return 1;
-}
-
-/*
- * Add a route
- */
-int
-rte_lpm6_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
- uint8_t next_hop)
-{
- struct rte_lpm6_tbl_entry *tbl;
- struct rte_lpm6_tbl_entry *tbl_next;
- int32_t rule_index;
- int status;
- uint8_t masked_ip[RTE_LPM6_IPV6_ADDR_SIZE];
- int i;
-
- /* Check user arguments. */
- if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH))
- return -EINVAL;
-
- /* Copy the IP and mask it to avoid modifying user's input data. */
- memcpy(masked_ip, ip, RTE_LPM6_IPV6_ADDR_SIZE);
- mask_ip(masked_ip, depth);
-
- /* Add the rule to the rule table. */
- rule_index = rule_add(lpm, masked_ip, next_hop, depth);
-
- /* If there is no space available for new rule return error. */
- if (rule_index < 0) {
- return rule_index;
- }
-
- /* Inspect the first three bytes through tbl24 on the first step. */
- tbl = lpm->tbl24;
- status = add_step (lpm, tbl, &tbl_next, masked_ip, ADD_FIRST_BYTE, 1,
- depth, next_hop);
- if (status < 0) {
- rte_lpm6_delete(lpm, masked_ip, depth);
-
- return status;
- }
-
- /*
- * Inspect one by one the rest of the bytes until
- * the process is completed.
- */
- for (i = ADD_FIRST_BYTE; i < RTE_LPM6_IPV6_ADDR_SIZE && status == 1; i++) {
- tbl = tbl_next;
- status = add_step (lpm, tbl, &tbl_next, masked_ip, 1, (uint8_t)(i+1),
- depth, next_hop);
- if (status < 0) {
- rte_lpm6_delete(lpm, masked_ip, depth);
-
- return status;
- }
- }
-
- return status;
-}
-
-/*
- * Takes a pointer to a table entry and inspect one level.
- * The function returns 0 on lookup success, ENOENT if no match was found
- * or 1 if the process needs to be continued by calling the function again.
- */
-static inline int
-lookup_step(const struct rte_lpm6 *lpm, const struct rte_lpm6_tbl_entry *tbl,
- const struct rte_lpm6_tbl_entry **tbl_next, uint8_t *ip,
- uint8_t first_byte, uint8_t *next_hop)
-{
- uint32_t tbl8_index, tbl_entry;
-
- /* Take the integer value from the pointer. */
- tbl_entry = *(const uint32_t *)tbl;
-
- /* If it is valid and extended we calculate the new pointer to return. */
- if ((tbl_entry & RTE_LPM6_VALID_EXT_ENTRY_BITMASK) ==
- RTE_LPM6_VALID_EXT_ENTRY_BITMASK) {
-
- tbl8_index = ip[first_byte-1] +
- ((tbl_entry & RTE_LPM6_TBL8_BITMASK) *
- RTE_LPM6_TBL8_GROUP_NUM_ENTRIES);
-
- *tbl_next = &lpm->tbl8[tbl8_index];
-
- return 1;
- } else {
- /* If not extended then we can have a match. */
- *next_hop = (uint8_t)tbl_entry;
- return (tbl_entry & RTE_LPM6_LOOKUP_SUCCESS) ? 0 : -ENOENT;
- }
-}
-
-/*
- * Looks up an IP
- */
-int
-rte_lpm6_lookup(const struct rte_lpm6 *lpm, uint8_t *ip, uint8_t *next_hop)
-{
- const struct rte_lpm6_tbl_entry *tbl;
- const struct rte_lpm6_tbl_entry *tbl_next;
- int status;
- uint8_t first_byte;
- uint32_t tbl24_index;
-
- /* DEBUG: Check user input arguments. */
- if ((lpm == NULL) || (ip == NULL) || (next_hop == NULL)) {
- return -EINVAL;
- }
-
- first_byte = LOOKUP_FIRST_BYTE;
- tbl24_index = (ip[0] << BYTES2_SIZE) | (ip[1] << BYTE_SIZE) | ip[2];
-
- /* Calculate pointer to the first entry to be inspected */
- tbl = &lpm->tbl24[tbl24_index];
-
- do {
- /* Continue inspecting following levels until success or failure */
- status = lookup_step(lpm, tbl, &tbl_next, ip, first_byte++, next_hop);
- tbl = tbl_next;
- } while (status == 1);
-
- return status;
-}
-
-/*
- * Looks up a group of IP addresses
- */
-int
-rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm,
- uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
- int16_t * next_hops, unsigned n)
-{
- unsigned i;
- const struct rte_lpm6_tbl_entry *tbl;
- const struct rte_lpm6_tbl_entry *tbl_next;
- uint32_t tbl24_index;
- uint8_t first_byte, next_hop;
- int status;
-
- /* DEBUG: Check user input arguments. */
- if ((lpm == NULL) || (ips == NULL) || (next_hops == NULL)) {
- return -EINVAL;
- }
-
- for (i = 0; i < n; i++) {
- first_byte = LOOKUP_FIRST_BYTE;
- tbl24_index = (ips[i][0] << BYTES2_SIZE) |
- (ips[i][1] << BYTE_SIZE) | ips[i][2];
-
- /* Calculate pointer to the first entry to be inspected */
- tbl = &lpm->tbl24[tbl24_index];
-
- do {
- /* Continue inspecting following levels until success or failure */
- status = lookup_step(lpm, tbl, &tbl_next, ips[i], first_byte++,
- &next_hop);
- tbl = tbl_next;
- } while (status == 1);
-
- if (status < 0)
- next_hops[i] = -1;
- else
- next_hops[i] = next_hop;
- }
-
- return 0;
-}
-
-/*
- * Finds a rule in rule table.
- * NOTE: Valid range for depth parameter is 1 .. 128 inclusive.
- */
-static inline int32_t
-rule_find(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth)
-{
- uint32_t rule_index;
-
- /* Scan used rules at given depth to find rule. */
- for (rule_index = 0; rule_index < lpm->used_rules; rule_index++) {
- /* If rule is found return the rule index. */
- if ((memcmp (lpm->rules_tbl[rule_index].ip, ip,
- RTE_LPM6_IPV6_ADDR_SIZE) == 0) &&
- lpm->rules_tbl[rule_index].depth == depth) {
-
- return rule_index;
- }
- }
-
- /* If rule is not found return -ENOENT. */
- return -ENOENT;
-}
-
-/*
- * Look for a rule in the high-level rules table
- */
-int
-rte_lpm6_is_rule_present(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
-uint8_t *next_hop)
-{
- uint8_t ip_masked[RTE_LPM6_IPV6_ADDR_SIZE];
- int32_t rule_index;
-
- /* Check user arguments. */
- if ((lpm == NULL) || next_hop == NULL || ip == NULL ||
- (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH))
- return -EINVAL;
-
- /* Copy the IP and mask it to avoid modifying user's input data. */
- memcpy(ip_masked, ip, RTE_LPM6_IPV6_ADDR_SIZE);
- mask_ip(ip_masked, depth);
-
- /* Look for the rule using rule_find. */
- rule_index = rule_find(lpm, ip_masked, depth);
-
- if (rule_index >= 0) {
- *next_hop = lpm->rules_tbl[rule_index].next_hop;
- return 1;
- }
-
- /* If rule is not found return 0. */
- return 0;
-}
-
-/*
- * Delete a rule from the rule table.
- * NOTE: Valid range for depth parameter is 1 .. 128 inclusive.
- */
-static inline void
-rule_delete(struct rte_lpm6 *lpm, int32_t rule_index)
-{
- /*
- * Overwrite redundant rule with last rule in group and decrement rule
- * counter.
- */
- lpm->rules_tbl[rule_index] = lpm->rules_tbl[lpm->used_rules-1];
- lpm->used_rules--;
-}
-
-/*
- * Deletes a rule
- */
-int
-rte_lpm6_delete(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth)
-{
- int32_t rule_to_delete_index;
- uint8_t ip_masked[RTE_LPM6_IPV6_ADDR_SIZE];
- unsigned i;
-
- /*
- * Check input arguments.
- */
- if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH)) {
- return -EINVAL;
- }
-
- /* Copy the IP and mask it to avoid modifying user's input data. */
- memcpy(ip_masked, ip, RTE_LPM6_IPV6_ADDR_SIZE);
- mask_ip(ip_masked, depth);
-
- /*
- * Find the index of the input rule, that needs to be deleted, in the
- * rule table.
- */
- rule_to_delete_index = rule_find(lpm, ip_masked, depth);
-
- /*
- * Check if rule_to_delete_index was found. If no rule was found the
- * function rule_find returns -ENOENT.
- */
- if (rule_to_delete_index < 0)
- return rule_to_delete_index;
-
- /* Delete the rule from the rule table. */
- rule_delete(lpm, rule_to_delete_index);
-
- /*
- * Set all the table entries to 0 (ie delete every rule
- * from the data structure.
- */
- lpm->next_tbl8 = 0;
- memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
- memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
- * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
-
- /*
- * Add every rule again (except for the one that was removed from
- * the rules table).
- */
- for (i = 0; i < lpm->used_rules; i++) {
- rte_lpm6_add(lpm, lpm->rules_tbl[i].ip, lpm->rules_tbl[i].depth,
- lpm->rules_tbl[i].next_hop);
- }
-
- return 0;
-}
-
-/*
- * Deletes a group of rules
- */
-int
-rte_lpm6_delete_bulk_func(struct rte_lpm6 *lpm,
- uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE], uint8_t *depths, unsigned n)
-{
- int32_t rule_to_delete_index;
- uint8_t ip_masked[RTE_LPM6_IPV6_ADDR_SIZE];
- unsigned i;
-
- /*
- * Check input arguments.
- */
- if ((lpm == NULL) || (ips == NULL) || (depths == NULL)) {
- return -EINVAL;
- }
-
- for (i = 0; i < n; i++) {
- /* Copy the IP and mask it to avoid modifying user's input data. */
- memcpy(ip_masked, ips[i], RTE_LPM6_IPV6_ADDR_SIZE);
- mask_ip(ip_masked, depths[i]);
-
- /*
- * Find the index of the input rule, that needs to be deleted, in the
- * rule table.
- */
- rule_to_delete_index = rule_find(lpm, ip_masked, depths[i]);
-
- /*
- * Check if rule_to_delete_index was found. If no rule was found the
- * function rule_find returns -ENOENT.
- */
- if (rule_to_delete_index < 0)
- continue;
-
- /* Delete the rule from the rule table. */
- rule_delete(lpm, rule_to_delete_index);
- }
-
- /*
- * Set all the table entries to 0 (ie delete every rule
- * from the data structure.
- */
- lpm->next_tbl8 = 0;
- memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
- memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
- * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
-
- /*
- * Add every rule again (except for the ones that were removed from
- * the rules table).
- */
- for (i = 0; i < lpm->used_rules; i++) {
- rte_lpm6_add(lpm, lpm->rules_tbl[i].ip, lpm->rules_tbl[i].depth,
- lpm->rules_tbl[i].next_hop);
- }
-
- return 0;
-}
-
-/*
- * Delete all rules from the LPM table.
- */
-void
-rte_lpm6_delete_all(struct rte_lpm6 *lpm)
-{
- /* Zero used rules counter. */
- lpm->used_rules = 0;
-
- /* Zero next tbl8 index. */
- lpm->next_tbl8 = 0;
-
- /* Zero tbl24. */
- memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
-
- /* Zero tbl8. */
- memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0]) *
- RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
-
- /* Delete all rules form the rules table. */
- memset(lpm->rules_tbl, 0, sizeof(struct rte_lpm6_rule) * lpm->max_rules);
-}
diff --git a/src/dpdk_lib18/librte_lpm/rte_lpm6.h b/src/dpdk_lib18/librte_lpm/rte_lpm6.h
deleted file mode 100755
index 4db810f9..00000000
--- a/src/dpdk_lib18/librte_lpm/rte_lpm6.h
+++ /dev/null
@@ -1,228 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _RTE_LPM6_H_
-#define _RTE_LPM6_H_
-
-/**
- * @file
- * RTE Longest Prefix Match for IPv6 (LPM6)
- */
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-#define RTE_LPM6_MAX_DEPTH 128
-#define RTE_LPM6_IPV6_ADDR_SIZE 16
-/** Max number of characters in LPM name. */
-#define RTE_LPM6_NAMESIZE 32
-
-/** LPM structure. */
-struct rte_lpm6;
-
-/** LPM configuration structure. */
-struct rte_lpm6_config {
- uint32_t max_rules; /**< Max number of rules. */
- uint32_t number_tbl8s; /**< Number of tbl8s to allocate. */
- int flags; /**< This field is currently unused. */
-};
-
-/**
- * Create an LPM object.
- *
- * @param name
- * LPM object name
- * @param socket_id
- * NUMA socket ID for LPM table memory allocation
- * @param config
- * Structure containing the configuration
- * @return
- * Handle to LPM object on success, NULL otherwise with rte_errno set
- * to an appropriate values. Possible rte_errno values include:
- * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
- * - E_RTE_SECONDARY - function was called from a secondary process instance
- * - E_RTE_NO_TAILQ - no tailq list could be got for the lpm object list
- * - EINVAL - invalid parameter passed to function
- * - ENOSPC - the maximum number of memzones has already been allocated
- * - EEXIST - a memzone with the same name already exists
- * - ENOMEM - no appropriate memory area found in which to create memzone
- */
-struct rte_lpm6 *
-rte_lpm6_create(const char *name, int socket_id,
- const struct rte_lpm6_config *config);
-
-/**
- * Find an existing LPM object and return a pointer to it.
- *
- * @param name
- * Name of the lpm object as passed to rte_lpm6_create()
- * @return
- * Pointer to lpm object or NULL if object not found with rte_errno
- * set appropriately. Possible rte_errno values include:
- * - ENOENT - required entry not available to return.
- */
-struct rte_lpm6 *
-rte_lpm6_find_existing(const char *name);
-
-/**
- * Free an LPM object.
- *
- * @param lpm
- * LPM object handle
- * @return
- * None
- */
-void
-rte_lpm6_free(struct rte_lpm6 *lpm);
-
-/**
- * Add a rule to the LPM table.
- *
- * @param lpm
- * LPM object handle
- * @param ip
- * IP of the rule to be added to the LPM table
- * @param depth
- * Depth of the rule to be added to the LPM table
- * @param next_hop
- * Next hop of the rule to be added to the LPM table
- * @return
- * 0 on success, negative value otherwise
- */
-int
-rte_lpm6_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
- uint8_t next_hop);
-
-/**
- * Check if a rule is present in the LPM table,
- * and provide its next hop if it is.
- *
- * @param lpm
- * LPM object handle
- * @param ip
- * IP of the rule to be searched
- * @param depth
- * Depth of the rule to searched
- * @param next_hop
- * Next hop of the rule (valid only if it is found)
- * @return
- * 1 if the rule exists, 0 if it does not, a negative value on failure
- */
-int
-rte_lpm6_is_rule_present(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
-uint8_t *next_hop);
-
-/**
- * Delete a rule from the LPM table.
- *
- * @param lpm
- * LPM object handle
- * @param ip
- * IP of the rule to be deleted from the LPM table
- * @param depth
- * Depth of the rule to be deleted from the LPM table
- * @return
- * 0 on success, negative value otherwise
- */
-int
-rte_lpm6_delete(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth);
-
-/**
- * Delete a rule from the LPM table.
- *
- * @param lpm
- * LPM object handle
- * @param ips
- * Array of IPs to be deleted from the LPM table
- * @param depths
- * Array of depths of the rules to be deleted from the LPM table
- * @param n
- * Number of rules to be deleted from the LPM table
- * @return
- * 0 on success, negative value otherwise.
- */
-int
-rte_lpm6_delete_bulk_func(struct rte_lpm6 *lpm,
- uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE], uint8_t *depths, unsigned n);
-
-/**
- * Delete all rules from the LPM table.
- *
- * @param lpm
- * LPM object handle
- */
-void
-rte_lpm6_delete_all(struct rte_lpm6 *lpm);
-
-/**
- * Lookup an IP into the LPM table.
- *
- * @param lpm
- * LPM object handle
- * @param ip
- * IP to be looked up in the LPM table
- * @param next_hop
- * Next hop of the most specific rule found for IP (valid on lookup hit only)
- * @return
- * -EINVAL for incorrect arguments, -ENOENT on lookup miss, 0 on lookup hit
- */
-int
-rte_lpm6_lookup(const struct rte_lpm6 *lpm, uint8_t *ip, uint8_t *next_hop);
-
-/**
- * Lookup multiple IP addresses in an LPM table.
- *
- * @param lpm
- * LPM object handle
- * @param ips
- * Array of IPs to be looked up in the LPM table
- * @param next_hops
- * Next hop of the most specific rule found for IP (valid on lookup hit only).
- * This is an array of two byte values. The next hop will be stored on
- * each position on success; otherwise the position will be set to -1.
- * @param n
- * Number of elements in ips (and next_hops) array to lookup.
- * @return
- * -EINVAL for incorrect arguments, otherwise 0
- */
-int
-rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm,
- uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
- int16_t * next_hops, unsigned n);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/src/dpdk_lib18/librte_malloc/Makefile b/src/dpdk_lib18/librte_malloc/Makefile
deleted file mode 100755
index ba87e34b..00000000
--- a/src/dpdk_lib18/librte_malloc/Makefile
+++ /dev/null
@@ -1,48 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_malloc.a
-
-CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
-
-# all source are stored in SRCS-y
-SRCS-$(CONFIG_RTE_LIBRTE_MALLOC) := rte_malloc.c malloc_elem.c malloc_heap.c
-
-# install includes
-SYMLINK-$(CONFIG_RTE_LIBRTE_MALLOC)-include := rte_malloc.h
-
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MALLOC) += lib/librte_eal
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_mbuf/Makefile b/src/dpdk_lib18/librte_mbuf/Makefile
deleted file mode 100755
index 9b45ba43..00000000
--- a/src/dpdk_lib18/librte_mbuf/Makefile
+++ /dev/null
@@ -1,48 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_mbuf.a
-
-CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
-
-# all source are stored in SRCS-y
-SRCS-$(CONFIG_RTE_LIBRTE_MBUF) := rte_mbuf.c
-
-# install includes
-SYMLINK-$(CONFIG_RTE_LIBRTE_MBUF)-include := rte_mbuf.h
-
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MBUF) += lib/librte_eal lib/librte_mempool
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_mempool/Makefile b/src/dpdk_lib18/librte_mempool/Makefile
deleted file mode 100755
index 9939e100..00000000
--- a/src/dpdk_lib18/librte_mempool/Makefile
+++ /dev/null
@@ -1,51 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_mempool.a
-
-CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
-
-# all source are stored in SRCS-y
-SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool.c
-ifeq ($(CONFIG_RTE_LIBRTE_XEN_DOM0),y)
-SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_dom0_mempool.c
-endif
-# install includes
-SYMLINK-$(CONFIG_RTE_LIBRTE_MEMPOOL)-include := rte_mempool.h
-
-# this lib needs eal, rte_ring and rte_malloc
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += lib/librte_eal lib/librte_ring
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += lib/librte_malloc
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_mempool/rte_dom0_mempool.c b/src/dpdk_lib18/librte_mempool/rte_dom0_mempool.c
deleted file mode 100755
index 9ec68fb3..00000000
--- a/src/dpdk_lib18/librte_mempool/rte_dom0_mempool.c
+++ /dev/null
@@ -1,134 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdio.h>
-#include <string.h>
-#include <stdint.h>
-#include <unistd.h>
-#include <stdarg.h>
-#include <inttypes.h>
-#include <errno.h>
-#include <sys/queue.h>
-
-#include <rte_common.h>
-#include <rte_log.h>
-#include <rte_debug.h>
-#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_atomic.h>
-#include <rte_launch.h>
-#include <rte_tailq.h>
-#include <rte_eal.h>
-#include <rte_eal_memconfig.h>
-#include <rte_per_lcore.h>
-#include <rte_lcore.h>
-#include <rte_branch_prediction.h>
-#include <rte_ring.h>
-#include <rte_errno.h>
-#include <rte_string_fns.h>
-#include <rte_spinlock.h>
-
-#include "rte_mempool.h"
-
-static void
-get_phys_map(void *va, phys_addr_t pa[], uint32_t pg_num,
- uint32_t pg_sz, uint32_t memseg_id)
-{
- uint32_t i;
- uint64_t virt_addr, mfn_id;
- struct rte_mem_config *mcfg;
- uint32_t page_size = getpagesize();
-
- /* get pointer to global configuration */
- mcfg = rte_eal_get_configuration()->mem_config;
- virt_addr =(uintptr_t) mcfg->memseg[memseg_id].addr;
-
- for (i = 0; i != pg_num; i++) {
- mfn_id = ((uintptr_t)va + i * pg_sz - virt_addr) / RTE_PGSIZE_2M;
- pa[i] = mcfg->memseg[memseg_id].mfn[mfn_id] * page_size;
- }
-}
-
-/* create the mempool for supporting Dom0 */
-struct rte_mempool *
-rte_dom0_mempool_create(const char *name, unsigned elt_num, unsigned elt_size,
- unsigned cache_size, unsigned private_data_size,
- rte_mempool_ctor_t *mp_init, void *mp_init_arg,
- rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
- int socket_id, unsigned flags)
-{
- struct rte_mempool *mp = NULL;
- phys_addr_t *pa;
- char *va;
- size_t sz;
- uint32_t pg_num, pg_shift, pg_sz, total_size;
- const struct rte_memzone *mz;
- char mz_name[RTE_MEMZONE_NAMESIZE];
- int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
-
- pg_sz = RTE_PGSIZE_2M;
-
- pg_shift = rte_bsf32(pg_sz);
- total_size = rte_mempool_calc_obj_size(elt_size, flags, NULL);
-
- /* calc max memory size and max number of pages needed. */
- sz = rte_mempool_xmem_size(elt_num, total_size, pg_shift) +
- RTE_PGSIZE_2M;
- pg_num = sz >> pg_shift;
-
- /* extract physical mappings of the allocated memory. */
- pa = calloc(pg_num, sizeof (*pa));
- if (pa == NULL)
- return mp;
-
- snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_OBJ_NAME, name);
- mz = rte_memzone_reserve(mz_name, sz, socket_id, mz_flags);
- if (mz == NULL) {
- free(pa);
- return mp;
- }
-
- va = (char *)RTE_ALIGN_CEIL((uintptr_t)mz->addr, RTE_PGSIZE_2M);
- /* extract physical mappings of the allocated memory. */
- get_phys_map(va, pa, pg_num, pg_sz, mz->memseg_id);
-
- mp = rte_mempool_xmem_create(name, elt_num, elt_size,
- cache_size, private_data_size,
- mp_init, mp_init_arg,
- obj_init, obj_init_arg,
- socket_id, flags, va, pa, pg_num, pg_shift);
-
- free(pa);
-
- return (mp);
-}
diff --git a/src/dpdk_lib18/librte_meter/Makefile b/src/dpdk_lib18/librte_meter/Makefile
deleted file mode 100755
index b25c0cc4..00000000
--- a/src/dpdk_lib18/librte_meter/Makefile
+++ /dev/null
@@ -1,53 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-#
-# library name
-#
-LIB = librte_meter.a
-
-CFLAGS += -O3
-CFLAGS += $(WERROR_FLAGS)
-
-#
-# all source are stored in SRCS-y
-#
-SRCS-$(CONFIG_RTE_LIBRTE_METER) := rte_meter.c
-
-# install includes
-SYMLINK-$(CONFIG_RTE_LIBRTE_METER)-include := rte_meter.h
-
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_METER) += lib/librte_eal
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_meter/rte_meter.c b/src/dpdk_lib18/librte_meter/rte_meter.c
deleted file mode 100755
index 5e2dadb8..00000000
--- a/src/dpdk_lib18/librte_meter/rte_meter.c
+++ /dev/null
@@ -1,120 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <inttypes.h>
-#include <stdio.h>
-#include <math.h>
-
-#include <rte_common.h>
-#include <rte_log.h>
-#include <rte_cycles.h>
-
-#include "rte_meter.h"
-
-#ifndef RTE_METER_TB_PERIOD_MIN
-#define RTE_METER_TB_PERIOD_MIN 100
-#endif
-
-static void
-rte_meter_get_tb_params(uint64_t hz, uint64_t rate, uint64_t *tb_period, uint64_t *tb_bytes_per_period)
-{
- double period = ((double) hz) / ((double) rate);
-
- if (period >= RTE_METER_TB_PERIOD_MIN) {
- *tb_bytes_per_period = 1;
- *tb_period = (uint64_t) period;
- } else {
- *tb_bytes_per_period = (uint64_t) ceil(RTE_METER_TB_PERIOD_MIN / period);
- *tb_period = (hz * (*tb_bytes_per_period)) / rate;
- }
-}
-
-int
-rte_meter_srtcm_config(struct rte_meter_srtcm *m, struct rte_meter_srtcm_params *params)
-{
- uint64_t hz;
-
- /* Check input parameters */
- if ((m == NULL) || (params == NULL)) {
- return -1;
- }
-
- if ((params->cir == 0) || ((params->cbs == 0) && (params->ebs == 0))) {
- return -2;
- }
-
- /* Initialize srTCM run-time structure */
- hz = rte_get_tsc_hz();
- m->time = rte_get_tsc_cycles();
- m->tc = m->cbs = params->cbs;
- m->te = m->ebs = params->ebs;
- rte_meter_get_tb_params(hz, params->cir, &m->cir_period, &m->cir_bytes_per_period);
-
- RTE_LOG(INFO, METER, "Low level srTCM config: \n"
- "\tCIR period = %" PRIu64 ", CIR bytes per period = %" PRIu64 "\n",
- m->cir_period, m->cir_bytes_per_period);
-
- return 0;
-}
-
-int
-rte_meter_trtcm_config(struct rte_meter_trtcm *m, struct rte_meter_trtcm_params *params)
-{
- uint64_t hz;
-
- /* Check input parameters */
- if ((m == NULL) || (params == NULL)) {
- return -1;
- }
-
- if ((params->cir == 0) || (params->pir == 0) || (params->pir < params->cir) ||
- (params->cbs == 0) || (params->pbs == 0)) {
- return -2;
- }
-
- /* Initialize trTCM run-time structure */
- hz = rte_get_tsc_hz();
- m->time_tc = m->time_tp = rte_get_tsc_cycles();
- m->tc = m->cbs = params->cbs;
- m->tp = m->pbs = params->pbs;
- rte_meter_get_tb_params(hz, params->cir, &m->cir_period, &m->cir_bytes_per_period);
- rte_meter_get_tb_params(hz, params->pir, &m->pir_period, &m->pir_bytes_per_period);
-
- RTE_LOG(INFO, METER, "Low level trTCM config: \n"
- "\tCIR period = %" PRIu64 ", CIR bytes per period = %" PRIu64 "\n"
- "\tPIR period = %" PRIu64 ", PIR bytes per period = %" PRIu64 "\n",
- m->cir_period, m->cir_bytes_per_period,
- m->pir_period, m->pir_bytes_per_period);
-
- return 0;
-}
diff --git a/src/dpdk_lib18/librte_meter/rte_meter.h b/src/dpdk_lib18/librte_meter/rte_meter.h
deleted file mode 100755
index 92728a5b..00000000
--- a/src/dpdk_lib18/librte_meter/rte_meter.h
+++ /dev/null
@@ -1,387 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __INCLUDE_RTE_METER_H__
-#define __INCLUDE_RTE_METER_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
- * @file
- * RTE Traffic Metering
- *
- * Traffic metering algorithms:
- * 1. Single Rate Three Color Marker (srTCM): defined by IETF RFC 2697
- * 2. Two Rate Three Color Marker (trTCM): defined by IETF RFC 2698
- *
- ***/
-
-#include <stdint.h>
-
-/*
- * Application Programmer's Interface (API)
- *
- ***/
-
-/** Packet Color Set */
-enum rte_meter_color {
- e_RTE_METER_GREEN = 0, /**< Green */
- e_RTE_METER_YELLOW, /**< Yellow */
- e_RTE_METER_RED, /**< Red */
- e_RTE_METER_COLORS /**< Number of available colors */
-};
-
-/** srTCM parameters per metered traffic flow. The CIR, CBS and EBS parameters only
-count bytes of IP packets and do not include link specific headers. At least one of
-the CBS or EBS parameters has to be greater than zero. */
-struct rte_meter_srtcm_params {
- uint64_t cir; /**< Committed Information Rate (CIR). Measured in bytes per second. */
- uint64_t cbs; /**< Committed Burst Size (CBS). Measured in bytes. */
- uint64_t ebs; /**< Excess Burst Size (EBS). Measured in bytes. */
-};
-
-/** trTCM parameters per metered traffic flow. The CIR, PIR, CBS and PBS parameters
-only count bytes of IP packets and do not include link specific headers. PIR has to
-be greater than or equal to CIR. Both CBS or EBS have to be greater than zero. */
-struct rte_meter_trtcm_params {
- uint64_t cir; /**< Committed Information Rate (CIR). Measured in bytes per second. */
- uint64_t pir; /**< Peak Information Rate (PIR). Measured in bytes per second. */
- uint64_t cbs; /**< Committed Burst Size (CBS). Measured in byes. */
- uint64_t pbs; /**< Peak Burst Size (PBS). Measured in bytes. */
-};
-
-/** Internal data structure storing the srTCM run-time context per metered traffic flow. */
-struct rte_meter_srtcm;
-
-/** Internal data structure storing the trTCM run-time context per metered traffic flow. */
-struct rte_meter_trtcm;
-
-/**
- * srTCM configuration per metered traffic flow
- *
- * @param m
- * Pointer to pre-allocated srTCM data structure
- * @param params
- * User parameters per srTCM metered traffic flow
- * @return
- * 0 upon success, error code otherwise
- */
-int
-rte_meter_srtcm_config(struct rte_meter_srtcm *m,
- struct rte_meter_srtcm_params *params);
-
-/**
- * trTCM configuration per metered traffic flow
- *
- * @param m
- * Pointer to pre-allocated trTCM data structure
- * @param params
- * User parameters per trTCM metered traffic flow
- * @return
- * 0 upon success, error code otherwise
- */
-int
-rte_meter_trtcm_config(struct rte_meter_trtcm *m,
- struct rte_meter_trtcm_params *params);
-
-/**
- * srTCM color blind traffic metering
- *
- * @param m
- * Handle to srTCM instance
- * @param time
- * Current CPU time stamp (measured in CPU cycles)
- * @param pkt_length
- * Length of the current IP packet (measured in bytes)
- * @return
- * Color assigned to the current IP packet
- */
-static inline enum rte_meter_color
-rte_meter_srtcm_color_blind_check(struct rte_meter_srtcm *m,
- uint64_t time,
- uint32_t pkt_len);
-
-/**
- * srTCM color aware traffic metering
- *
- * @param m
- * Handle to srTCM instance
- * @param time
- * Current CPU time stamp (measured in CPU cycles)
- * @param pkt_length
- * Length of the current IP packet (measured in bytes)
- * @param pkt_color
- * Input color of the current IP packet
- * @return
- * Color assigned to the current IP packet
- */
-static inline enum rte_meter_color
-rte_meter_srtcm_color_aware_check(struct rte_meter_srtcm *m,
- uint64_t time,
- uint32_t pkt_len,
- enum rte_meter_color pkt_color);
-
-/**
- * trTCM color blind traffic metering
- *
- * @param m
- * Handle to trTCM instance
- * @param time
- * Current CPU time stamp (measured in CPU cycles)
- * @param pkt_length
- * Length of the current IP packet (measured in bytes)
- * @return
- * Color assigned to the current IP packet
- */
-static inline enum rte_meter_color
-rte_meter_trtcm_color_blind_check(struct rte_meter_trtcm *m,
- uint64_t time,
- uint32_t pkt_len);
-
-/**
- * trTCM color aware traffic metering
- *
- * @param m
- * Handle to trTCM instance
- * @param time
- * Current CPU time stamp (measured in CPU cycles)
- * @param pkt_length
- * Length of the current IP packet (measured in bytes)
- * @param pkt_color
- * Input color of the current IP packet
- * @return
- * Color assigned to the current IP packet
- */
-static inline enum rte_meter_color
-rte_meter_trtcm_color_aware_check(struct rte_meter_trtcm *m,
- uint64_t time,
- uint32_t pkt_len,
- enum rte_meter_color pkt_color);
-
-/*
- * Inline implementation of run-time methods
- *
- ***/
-
-/* Internal data structure storing the srTCM run-time context per metered traffic flow. */
-struct rte_meter_srtcm {
- uint64_t time; /* Time of latest update of C and E token buckets */
- uint64_t tc; /* Number of bytes currently available in the committed (C) token bucket */
- uint64_t te; /* Number of bytes currently available in the excess (E) token bucket */
- uint64_t cbs; /* Upper limit for C token bucket */
- uint64_t ebs; /* Upper limit for E token bucket */
- uint64_t cir_period; /* Number of CPU cycles for one update of C and E token buckets */
- uint64_t cir_bytes_per_period; /* Number of bytes to add to C and E token buckets on each update */
-};
-
-/* Internal data structure storing the trTCM run-time context per metered traffic flow. */
-struct rte_meter_trtcm {
- uint64_t time_tc; /* Time of latest update of C token bucket */
- uint64_t time_tp; /* Time of latest update of E token bucket */
- uint64_t tc; /* Number of bytes currently available in the committed (C) token bucket */
- uint64_t tp; /* Number of bytes currently available in the peak (P) token bucket */
- uint64_t cbs; /* Upper limit for C token bucket */
- uint64_t pbs; /* Upper limit for P token bucket */
- uint64_t cir_period; /* Number of CPU cycles for one update of C token bucket */
- uint64_t cir_bytes_per_period; /* Number of bytes to add to C token bucket on each update */
- uint64_t pir_period; /* Number of CPU cycles for one update of P token bucket */
- uint64_t pir_bytes_per_period; /* Number of bytes to add to P token bucket on each update */
-};
-
-static inline enum rte_meter_color
-rte_meter_srtcm_color_blind_check(struct rte_meter_srtcm *m,
- uint64_t time,
- uint32_t pkt_len)
-{
- uint64_t time_diff, n_periods, tc, te;
-
- /* Bucket update */
- time_diff = time - m->time;
- n_periods = time_diff / m->cir_period;
- m->time += n_periods * m->cir_period;
-
- tc = m->tc + n_periods * m->cir_bytes_per_period;
- if (tc > m->cbs)
- tc = m->cbs;
-
- te = m->te + n_periods * m->cir_bytes_per_period;
- if (te > m->ebs)
- te = m->ebs;
-
- /* Color logic */
- if (tc >= pkt_len) {
- m->tc = tc - pkt_len;
- m->te = te;
- return e_RTE_METER_GREEN;
- }
-
- if (te >= pkt_len) {
- m->tc = tc;
- m->te = te - pkt_len;
- return e_RTE_METER_YELLOW;
- }
-
- m->tc = tc;
- m->te = te;
- return e_RTE_METER_RED;
-}
-
-static inline enum rte_meter_color
-rte_meter_srtcm_color_aware_check(struct rte_meter_srtcm *m,
- uint64_t time,
- uint32_t pkt_len,
- enum rte_meter_color pkt_color)
-{
- uint64_t time_diff, n_periods, tc, te;
-
- /* Bucket update */
- time_diff = time - m->time;
- n_periods = time_diff / m->cir_period;
- m->time += n_periods * m->cir_period;
-
- tc = m->tc + n_periods * m->cir_bytes_per_period;
- if (tc > m->cbs)
- tc = m->cbs;
-
- te = m->te + n_periods * m->cir_bytes_per_period;
- if (te > m->ebs)
- te = m->ebs;
-
- /* Color logic */
- if ((pkt_color == e_RTE_METER_GREEN) && (tc >= pkt_len)) {
- m->tc = tc - pkt_len;
- m->te = te;
- return e_RTE_METER_GREEN;
- }
-
- if ((pkt_color != e_RTE_METER_RED) && (te >= pkt_len)) {
- m->tc = tc;
- m->te = te - pkt_len;
- return e_RTE_METER_YELLOW;
- }
-
- m->tc = tc;
- m->te = te;
- return e_RTE_METER_RED;
-}
-
-static inline enum rte_meter_color
-rte_meter_trtcm_color_blind_check(struct rte_meter_trtcm *m,
- uint64_t time,
- uint32_t pkt_len)
-{
- uint64_t time_diff_tc, time_diff_tp, n_periods_tc, n_periods_tp, tc, tp;
-
- /* Bucket update */
- time_diff_tc = time - m->time_tc;
- time_diff_tp = time - m->time_tp;
- n_periods_tc = time_diff_tc / m->cir_period;
- n_periods_tp = time_diff_tp / m->pir_period;
- m->time_tc += n_periods_tc * m->cir_period;
- m->time_tp += n_periods_tp * m->pir_period;
-
- tc = m->tc + n_periods_tc * m->cir_bytes_per_period;
- if (tc > m->cbs)
- tc = m->cbs;
-
- tp = m->tp + n_periods_tp * m->pir_bytes_per_period;
- if (tp > m->pbs)
- tp = m->pbs;
-
- /* Color logic */
- if (tp < pkt_len) {
- m->tc = tc;
- m->tp = tp;
- return e_RTE_METER_RED;
- }
-
- if (tc < pkt_len) {
- m->tc = tc;
- m->tp = tp - pkt_len;
- return e_RTE_METER_YELLOW;
- }
-
- m->tc = tc - pkt_len;
- m->tp = tp - pkt_len;
- return e_RTE_METER_GREEN;
-}
-
-static inline enum rte_meter_color
-rte_meter_trtcm_color_aware_check(struct rte_meter_trtcm *m,
- uint64_t time,
- uint32_t pkt_len,
- enum rte_meter_color pkt_color)
-{
- uint64_t time_diff_tc, time_diff_tp, n_periods_tc, n_periods_tp, tc, tp;
-
- /* Bucket update */
- time_diff_tc = time - m->time_tc;
- time_diff_tp = time - m->time_tp;
- n_periods_tc = time_diff_tc / m->cir_period;
- n_periods_tp = time_diff_tp / m->pir_period;
- m->time_tc += n_periods_tc * m->cir_period;
- m->time_tp += n_periods_tp * m->pir_period;
-
- tc = m->tc + n_periods_tc * m->cir_bytes_per_period;
- if (tc > m->cbs)
- tc = m->cbs;
-
- tp = m->tp + n_periods_tp * m->pir_bytes_per_period;
- if (tp > m->pbs)
- tp = m->pbs;
-
- /* Color logic */
- if ((pkt_color == e_RTE_METER_RED) || (tp < pkt_len)) {
- m->tc = tc;
- m->tp = tp;
- return e_RTE_METER_RED;
- }
-
- if ((pkt_color == e_RTE_METER_YELLOW) || (tc < pkt_len)) {
- m->tc = tc;
- m->tp = tp - pkt_len;
- return e_RTE_METER_YELLOW;
- }
-
- m->tc = tc - pkt_len;
- m->tp = tp - pkt_len;
- return e_RTE_METER_GREEN;
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* __INCLUDE_RTE_METER_H__ */
diff --git a/src/dpdk_lib18/librte_net/Makefile b/src/dpdk_lib18/librte_net/Makefile
deleted file mode 100755
index ad2e482d..00000000
--- a/src/dpdk_lib18/librte_net/Makefile
+++ /dev/null
@@ -1,40 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
-
-# install includes
-SYMLINK-$(CONFIG_RTE_LIBRTE_NET)-include := rte_ip.h rte_tcp.h rte_udp.h rte_sctp.h rte_icmp.h rte_arp.h
-
-
-include $(RTE_SDK)/mk/rte.install.mk
diff --git a/src/dpdk_lib18/librte_pipeline/Makefile b/src/dpdk_lib18/librte_pipeline/Makefile
deleted file mode 100755
index cf8fde8a..00000000
--- a/src/dpdk_lib18/librte_pipeline/Makefile
+++ /dev/null
@@ -1,54 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-#
-# library name
-#
-LIB = librte_pipeline.a
-
-CFLAGS += -O3
-CFLAGS += $(WERROR_FLAGS)
-
-#
-# all source are stored in SRCS-y
-#
-SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) := rte_pipeline.c
-
-# install includes
-SYMLINK-$(CONFIG_RTE_LIBRTE_PIPELINE)-include += rte_pipeline.h
-
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) := lib/librte_table
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) += lib/librte_port
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_pmd_af_packet/Makefile b/src/dpdk_lib18/librte_pmd_af_packet/Makefile
deleted file mode 100755
index 6955e5c2..00000000
--- a/src/dpdk_lib18/librte_pmd_af_packet/Makefile
+++ /dev/null
@@ -1,60 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2014 John W. Linville <linville@redhat.com>
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# Copyright(c) 2014 6WIND S.A.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-#
-# library name
-#
-LIB = librte_pmd_af_packet.a
-
-CFLAGS += -O3
-CFLAGS += $(WERROR_FLAGS)
-
-#
-# all source are stored in SRCS-y
-#
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += rte_eth_af_packet.c
-
-#
-# Export include files
-#
-SYMLINK-y-include += rte_eth_af_packet.h
-
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += lib/librte_malloc
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += lib/librte_kvargs
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_pmd_bond/Makefile b/src/dpdk_lib18/librte_pmd_bond/Makefile
deleted file mode 100755
index cdff1262..00000000
--- a/src/dpdk_lib18/librte_pmd_bond/Makefile
+++ /dev/null
@@ -1,67 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-#
-# library name
-#
-LIB = librte_pmd_bond.a
-
-CFLAGS += -O3
-CFLAGS += $(WERROR_FLAGS)
-
-#
-# all source are stored in SRCS-y
-#
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_api.c
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_pmd.c
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_args.c
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_8023ad.c
-
-ifeq ($(CONFIG_RTE_MBUF_REFCNT),n)
-$(info WARNING: Link Bonding Broadcast mode is disabled because it needs MBUF_REFCNT.)
-endif
-
-#
-# Export include files
-#
-SYMLINK-y-include += rte_eth_bond.h
-SYMLINK-y-include += rte_eth_bond_8023ad.h
-
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_malloc
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_kvargs
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_8023ad.c b/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_8023ad.c
deleted file mode 100755
index f1cf81a6..00000000
--- a/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_8023ad.c
+++ /dev/null
@@ -1,1216 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stddef.h>
-#include <string.h>
-#include <stdbool.h>
-
-#include <rte_alarm.h>
-#include <rte_malloc.h>
-#include <rte_errno.h>
-#include <rte_cycles.h>
-
-#include "rte_eth_bond_private.h"
-
-#ifdef RTE_LIBRTE_BOND_DEBUG_8023AD
-#define MODE4_DEBUG(fmt, ...) RTE_LOG(DEBUG, PMD, "%6u [Port %u: %s] " fmt, \
- bond_dbg_get_time_diff_ms(), slave_id, \
- __func__, ##__VA_ARGS__)
-
-static uint64_t start_time;
-
-static unsigned
-bond_dbg_get_time_diff_ms(void)
-{
- uint64_t now;
-
- now = rte_rdtsc();
- if (start_time == 0)
- start_time = now;
-
- return ((now - start_time) * 1000) / rte_get_tsc_hz();
-}
-
-static void
-bond_print_lacp(struct lacpdu *l)
-{
- char a_address[18];
- char p_address[18];
- char a_state[256] = { 0 };
- char p_state[256] = { 0 };
-
- static const char * const state_labels[] = {
- "ACT", "TIMEOUT", "AGG", "SYNC", "COL", "DIST", "DEF", "EXP"
- };
-
- int a_len = 0;
- int p_len = 0;
- uint8_t i;
- uint8_t *addr;
-
- addr = l->actor.port_params.system.addr_bytes;
- snprintf(a_address, sizeof(a_address), "%02X:%02X:%02X:%02X:%02X:%02X",
- addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
-
- addr = l->partner.port_params.system.addr_bytes;
- snprintf(p_address, sizeof(p_address), "%02X:%02X:%02X:%02X:%02X:%02X",
- addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
-
- for (i = 0; i < 8; i++) {
- if ((l->actor.state >> i) & 1) {
- a_len += snprintf(&a_state[a_len], RTE_DIM(a_state) - a_len, "%s ",
- state_labels[i]);
- }
-
- if ((l->partner.state >> i) & 1) {
- p_len += snprintf(&p_state[p_len], RTE_DIM(p_state) - p_len, "%s ",
- state_labels[i]);
- }
- }
-
- if (a_len && a_state[a_len-1] == ' ')
- a_state[a_len-1] = '\0';
-
- if (p_len && p_state[p_len-1] == ' ')
- p_state[p_len-1] = '\0';
-
- RTE_LOG(DEBUG, PMD, "LACP: {\n"\
- " subtype= %02X\n"\
- " ver_num=%02X\n"\
- " actor={ tlv=%02X, len=%02X\n"\
- " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"\
- " state={ %s }\n"\
- " }\n"\
- " partner={ tlv=%02X, len=%02X\n"\
- " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"\
- " state={ %s }\n"\
- " }\n"\
- " collector={info=%02X, length=%02X, max_delay=%04X\n, " \
- "type_term=%02X, terminator_length = %02X}\n",\
- l->subtype,\
- l->version_number,\
- l->actor.tlv_type_info,\
- l->actor.info_length,\
- l->actor.port_params.system_priority,\
- a_address,\
- l->actor.port_params.key,\
- l->actor.port_params.port_priority,\
- l->actor.port_params.port_number,\
- a_state,\
- l->partner.tlv_type_info,\
- l->partner.info_length,\
- l->partner.port_params.system_priority,\
- p_address,\
- l->partner.port_params.key,\
- l->partner.port_params.port_priority,\
- l->partner.port_params.port_number,\
- p_state,\
- l->tlv_type_collector_info,\
- l->collector_info_length,\
- l->collector_max_delay,\
- l->tlv_type_terminator,\
- l->terminator_length);
-
-}
-#define BOND_PRINT_LACP(lacpdu) bond_print_lacp(lacpdu)
-#else
-#define BOND_PRINT_LACP(lacpdu) do { } while (0)
-#define MODE4_DEBUG(fmt, ...) do { } while (0)
-#endif
-
-static const struct ether_addr lacp_mac_addr = {
- .addr_bytes = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02 }
-};
-
-struct port mode_8023ad_ports[RTE_MAX_ETHPORTS];
-
-static void
-timer_cancel(uint64_t *timer)
-{
- *timer = 0;
-}
-
-static void
-timer_set(uint64_t *timer, uint64_t timeout)
-{
- *timer = rte_rdtsc() + timeout;
-}
-
-/* Forces given timer to be in expired state. */
-static void
-timer_force_expired(uint64_t *timer)
-{
- *timer = rte_rdtsc();
-}
-
-static bool
-timer_is_stopped(uint64_t *timer)
-{
- return *timer == 0;
-}
-
-static bool
-timer_is_expired(uint64_t *timer)
-{
- return *timer < rte_rdtsc();
-}
-
-/* Timer is in running state if it is not stopped nor expired */
-static bool
-timer_is_running(uint64_t *timer)
-{
- return !timer_is_stopped(timer) && !timer_is_expired(timer);
-}
-
-static void
-set_warning_flags(struct port *port, uint16_t flags)
-{
- int retval;
- uint16_t old;
- uint16_t new_flag = 0;
-
- do {
- old = port->warnings_to_show;
- new_flag = old | flags;
- retval = rte_atomic16_cmpset(&port->warnings_to_show, old, new_flag);
- } while (unlikely(retval == 0));
-}
-
-static void
-show_warnings(uint8_t slave_id)
-{
- struct port *port = &mode_8023ad_ports[slave_id];
- uint8_t warnings;
-
- do {
- warnings = port->warnings_to_show;
- } while (rte_atomic16_cmpset(&port->warnings_to_show, warnings, 0) == 0);
-
- if (!warnings)
- return;
-
- if (!timer_is_expired(&port->warning_timer))
- return;
-
-
- timer_set(&port->warning_timer, BOND_8023AD_WARNINGS_PERIOD_MS *
- rte_get_tsc_hz() / 1000);
-
- if (warnings & WRN_RX_QUEUE_FULL) {
- RTE_LOG(DEBUG, PMD,
- "Slave %u: failed to enqueue LACP packet into RX ring.\n"
- "Receive and transmit functions must be invoked on bonded\n"
- "interface at least 10 times per second or LACP will not\n"
- "work correctly\n", slave_id);
- }
-
- if (warnings & WRN_TX_QUEUE_FULL) {
- RTE_LOG(DEBUG, PMD,
- "Slave %u: failed to enqueue LACP packet into TX ring.\n"
- "Receive and transmit functions must be invoked on bonded\n"
- "interface at least 10 times per second or LACP will not\n"
- "work correctly\n", slave_id);
- }
-
- if (warnings & WRN_RX_MARKER_TO_FAST)
- RTE_LOG(INFO, PMD, "Slave %u: marker to early - ignoring.\n", slave_id);
-
- if (warnings & WRN_UNKNOWN_SLOW_TYPE) {
- RTE_LOG(INFO, PMD,
- "Slave %u: ignoring unknown slow protocol frame type", slave_id);
- }
-
- if (warnings & WRN_UNKNOWN_MARKER_TYPE)
- RTE_LOG(INFO, PMD, "Slave %u: ignoring unknown marker type", slave_id);
-
- if (warnings & WRN_NOT_LACP_CAPABLE)
- MODE4_DEBUG("Port %u is not LACP capable!\n", slave_id);
-}
-
-static void
-record_default(struct port *port)
-{
- /* Record default parameters for partner. Partner admin parameters
- * are not implemented so set them to arbitrary default (last known) and
- * mark actor that parner is in defaulted state. */
- port->partner_state = STATE_LACP_ACTIVE;
- ACTOR_STATE_SET(port, DEFAULTED);
-}
-
-/** Function handles rx state machine.
- *
- * This function implements Receive State Machine from point 5.4.12 in
- * 802.1AX documentation. It should be called periodically.
- *
- * @param lacpdu LACPDU received.
- * @param port Port on which LACPDU was received.
- */
-static void
-rx_machine(struct bond_dev_private *internals, uint8_t slave_id,
- struct lacpdu *lacp)
-{
- struct port *agg, *port = &mode_8023ad_ports[slave_id];
- uint64_t timeout;
-
- if (SM_FLAG(port, BEGIN)) {
- /* Initialize stuff */
- MODE4_DEBUG("-> INITIALIZE\n");
- SM_FLAG_CLR(port, MOVED);
- port->selected = UNSELECTED;
-
- record_default(port);
-
- ACTOR_STATE_CLR(port, EXPIRED);
- timer_cancel(&port->current_while_timer);
-
- /* DISABLED: On initialization partner is out of sync */
- PARTNER_STATE_CLR(port, SYNCHRONIZATION);
-
- /* LACP DISABLED stuff if LACP not enabled on this port */
- if (!SM_FLAG(port, LACP_ENABLED))
- PARTNER_STATE_CLR(port, AGGREGATION);
- else
- PARTNER_STATE_SET(port, AGGREGATION);
- }
-
- if (!SM_FLAG(port, LACP_ENABLED)) {
- /* Update parameters only if state changed */
- if (!timer_is_stopped(&port->current_while_timer)) {
- port->selected = UNSELECTED;
- record_default(port);
- PARTNER_STATE_CLR(port, AGGREGATION);
- ACTOR_STATE_CLR(port, EXPIRED);
- timer_cancel(&port->current_while_timer);
- }
- return;
- }
-
- if (lacp) {
- MODE4_DEBUG("LACP -> CURRENT\n");
- BOND_PRINT_LACP(lacp);
- /* Update selected flag. If partner parameters are defaulted assume they
- * are match. If not defaulted compare LACP actor with ports parner
- * params. */
- if (!ACTOR_STATE(port, DEFAULTED) &&
- (ACTOR_STATE(port, AGGREGATION) != PARTNER_STATE(port, AGGREGATION)
- || memcmp(&port->partner, &lacp->actor.port_params,
- sizeof(port->partner)) != 0)) {
- MODE4_DEBUG("selected <- UNSELECTED\n");
- port->selected = UNSELECTED;
- }
-
- /* Record this PDU actor params as partner params */
- memcpy(&port->partner, &lacp->actor.port_params,
- sizeof(struct port_params));
- port->partner_state = lacp->actor.state;
-
- /* Partner parameters are not defaulted any more */
- ACTOR_STATE_CLR(port, DEFAULTED);
-
- /* If LACP partner params match this port actor params */
- agg = &mode_8023ad_ports[port->aggregator_port_id];
- bool match = port->actor.system_priority ==
- lacp->partner.port_params.system_priority &&
- is_same_ether_addr(&agg->actor.system,
- &lacp->partner.port_params.system) &&
- port->actor.port_priority ==
- lacp->partner.port_params.port_priority &&
- port->actor.port_number ==
- lacp->partner.port_params.port_number;
-
- /* Update NTT if partners information are outdated (xored and masked
- * bits are set)*/
- uint8_t state_mask = STATE_LACP_ACTIVE | STATE_LACP_SHORT_TIMEOUT |
- STATE_SYNCHRONIZATION | STATE_AGGREGATION;
-
- if (((port->actor_state ^ lacp->partner.state) & state_mask) ||
- match == false) {
- SM_FLAG_SET(port, NTT);
- }
-
- /* If LACP partner params match this port actor params */
- if (match == true && ACTOR_STATE(port, AGGREGATION) ==
- PARTNER_STATE(port, AGGREGATION))
- PARTNER_STATE_SET(port, SYNCHRONIZATION);
- else if (!PARTNER_STATE(port, AGGREGATION) && ACTOR_STATE(port,
- AGGREGATION))
- PARTNER_STATE_SET(port, SYNCHRONIZATION);
- else
- PARTNER_STATE_CLR(port, SYNCHRONIZATION);
-
- if (ACTOR_STATE(port, LACP_SHORT_TIMEOUT))
- timeout = internals->mode4.short_timeout;
- else
- timeout = internals->mode4.long_timeout;
-
- timer_set(&port->current_while_timer, timeout);
- ACTOR_STATE_CLR(port, EXPIRED);
- return; /* No state change */
- }
-
- /* If CURRENT state timer is not running (stopped or expired)
- * transit to EXPIRED state from DISABLED or CURRENT */
- if (!timer_is_running(&port->current_while_timer)) {
- ACTOR_STATE_SET(port, EXPIRED);
- PARTNER_STATE_CLR(port, SYNCHRONIZATION);
- PARTNER_STATE_SET(port, LACP_SHORT_TIMEOUT);
- timer_set(&port->current_while_timer, internals->mode4.short_timeout);
- }
-}
-
-/**
- * Function handles periodic tx state machine.
- *
- * Function implements Periodic Transmission state machine from point 5.4.13
- * in 802.1AX documentation. It should be called periodically.
- *
- * @param port Port to handle state machine.
- */
-static void
-periodic_machine(struct bond_dev_private *internals, uint8_t slave_id)
-{
- struct port *port = &mode_8023ad_ports[slave_id];
- /* Calculate if either site is LACP enabled */
- uint64_t timeout;
- uint8_t active = ACTOR_STATE(port, LACP_ACTIVE) ||
- PARTNER_STATE(port, LACP_ACTIVE);
-
- uint8_t is_partner_fast, was_partner_fast;
- /* No periodic is on BEGIN, LACP DISABLE or when both sides are pasive */
- if (SM_FLAG(port, BEGIN) || !SM_FLAG(port, LACP_ENABLED) || !active) {
- timer_cancel(&port->periodic_timer);
- timer_force_expired(&port->tx_machine_timer);
- SM_FLAG_CLR(port, PARTNER_SHORT_TIMEOUT);
-
- MODE4_DEBUG("-> NO_PERIODIC ( %s%s%s)\n",
- SM_FLAG(port, BEGIN) ? "begind " : "",
- SM_FLAG(port, LACP_ENABLED) ? "" : "LACP disabled ",
- active ? "LACP active " : "LACP pasive ");
- return;
- }
-
- is_partner_fast = PARTNER_STATE(port, LACP_SHORT_TIMEOUT);
- was_partner_fast = SM_FLAG(port, PARTNER_SHORT_TIMEOUT);
-
- /* If periodic timer is not started, transit from NO PERIODIC to FAST/SLOW.
- * Other case: check if timer expire or partners settings changed. */
- if (!timer_is_stopped(&port->periodic_timer)) {
- if (timer_is_expired(&port->periodic_timer)) {
- SM_FLAG_SET(port, NTT);
- } else if (is_partner_fast != was_partner_fast) {
- /* Partners timeout was slow and now it is fast -> send LACP.
- * In other case (was fast and now it is slow) just switch
- * timeout to slow without forcing send of LACP (because standard
- * say so)*/
- if (!is_partner_fast)
- SM_FLAG_SET(port, NTT);
- } else
- return; /* Nothing changed */
- }
-
- /* Handle state transition to FAST/SLOW LACP timeout */
- if (is_partner_fast) {
- timeout = internals->mode4.fast_periodic_timeout;
- SM_FLAG_SET(port, PARTNER_SHORT_TIMEOUT);
- } else {
- timeout = internals->mode4.slow_periodic_timeout;
- SM_FLAG_CLR(port, PARTNER_SHORT_TIMEOUT);
- }
-
- timer_set(&port->periodic_timer, timeout);
-}
-
-/**
- * Function handles mux state machine.
- *
- * Function implements Mux Machine from point 5.4.15 in 802.1AX documentation.
- * It should be called periodically.
- *
- * @param port Port to handle state machine.
- */
-static void
-mux_machine(struct bond_dev_private *internals, uint8_t slave_id)
-{
- struct port *port = &mode_8023ad_ports[slave_id];
-
- /* Save current state for later use */
- const uint8_t state_mask = STATE_SYNCHRONIZATION | STATE_DISTRIBUTING |
- STATE_COLLECTING;
-
- /* Enter DETACHED state on BEGIN condition or from any other state if
- * port was unselected */
- if (SM_FLAG(port, BEGIN) ||
- port->selected == UNSELECTED || (port->selected == STANDBY &&
- (port->actor_state & state_mask) != 0)) {
- /* detach mux from aggregator */
- port->actor_state &= ~state_mask;
- /* Set ntt to true if BEGIN condition or transition from any other state
- * which is indicated that wait_while_timer was started */
- if (SM_FLAG(port, BEGIN) ||
- !timer_is_stopped(&port->wait_while_timer)) {
- SM_FLAG_SET(port, NTT);
- MODE4_DEBUG("-> DETACHED\n");
- }
- timer_cancel(&port->wait_while_timer);
- }
-
- if (timer_is_stopped(&port->wait_while_timer)) {
- if (port->selected == SELECTED || port->selected == STANDBY) {
- timer_set(&port->wait_while_timer,
- internals->mode4.aggregate_wait_timeout);
-
- MODE4_DEBUG("DETACHED -> WAITING\n");
- }
- /* Waiting state entered */
- return;
- }
-
- /* Transit next state if port is ready */
- if (!timer_is_expired(&port->wait_while_timer))
- return;
-
- if ((ACTOR_STATE(port, DISTRIBUTING) || ACTOR_STATE(port, COLLECTING)) &&
- !PARTNER_STATE(port, SYNCHRONIZATION)) {
- /* If in COLLECTING or DISTRIBUTING state and partner becomes out of
- * sync transit to ATACHED state. */
- ACTOR_STATE_CLR(port, DISTRIBUTING);
- ACTOR_STATE_CLR(port, COLLECTING);
- /* Clear actor sync to activate transit ATACHED in condition bellow */
- ACTOR_STATE_CLR(port, SYNCHRONIZATION);
- MODE4_DEBUG("Out of sync -> ATTACHED\n");
- }
-
- if (!ACTOR_STATE(port, SYNCHRONIZATION)) {
- /* attach mux to aggregator */
- RTE_VERIFY((port->actor_state & (STATE_COLLECTING |
- STATE_DISTRIBUTING)) == 0);
-
- ACTOR_STATE_SET(port, SYNCHRONIZATION);
- SM_FLAG_SET(port, NTT);
- MODE4_DEBUG("ATTACHED Entered\n");
- } else if (!ACTOR_STATE(port, COLLECTING)) {
- /* Start collecting if in sync */
- if (PARTNER_STATE(port, SYNCHRONIZATION)) {
- MODE4_DEBUG("ATTACHED -> COLLECTING\n");
- ACTOR_STATE_SET(port, COLLECTING);
- SM_FLAG_SET(port, NTT);
- }
- } else if (ACTOR_STATE(port, COLLECTING)) {
- /* Check if partner is in COLLECTING state. If so this port can
- * distribute frames to it */
- if (!ACTOR_STATE(port, DISTRIBUTING)) {
- if (PARTNER_STATE(port, COLLECTING)) {
- /* Enable DISTRIBUTING if partner is collecting */
- ACTOR_STATE_SET(port, DISTRIBUTING);
- SM_FLAG_SET(port, NTT);
- MODE4_DEBUG("COLLECTING -> DISTRIBUTING\n");
- RTE_LOG(INFO, PMD,
- "Bond %u: slave id %u distributing started.\n",
- internals->port_id, slave_id);
- }
- } else {
- if (!PARTNER_STATE(port, COLLECTING)) {
- /* Disable DISTRIBUTING (enter COLLECTING state) if partner
- * is not collecting */
- ACTOR_STATE_CLR(port, DISTRIBUTING);
- SM_FLAG_SET(port, NTT);
- MODE4_DEBUG("DISTRIBUTING -> COLLECTING\n");
- RTE_LOG(INFO, PMD,
- "Bond %u: slave id %u distributing stopped.\n",
- internals->port_id, slave_id);
- }
- }
- }
-}
-
-/**
- * Function handles transmit state machine.
- *
- * Function implements Transmit Machine from point 5.4.16 in 802.1AX
- * documentation.
- *
- * @param port
- */
-static void
-tx_machine(struct bond_dev_private *internals, uint8_t slave_id)
-{
- struct port *agg, *port = &mode_8023ad_ports[slave_id];
-
- struct rte_mbuf *lacp_pkt = NULL;
- struct lacpdu_header *hdr;
- struct lacpdu *lacpdu;
-
- /* If periodic timer is not running periodic machine is in NO PERIODIC and
- * according to 802.3ax standard tx machine should not transmit any frames
- * and set ntt to false. */
- if (timer_is_stopped(&port->periodic_timer))
- SM_FLAG_CLR(port, NTT);
-
- if (!SM_FLAG(port, NTT))
- return;
-
- if (!timer_is_expired(&port->tx_machine_timer))
- return;
-
- lacp_pkt = rte_pktmbuf_alloc(port->mbuf_pool);
- if (lacp_pkt == NULL) {
- RTE_LOG(ERR, PMD, "Failed to allocate LACP packet from pool\n");
- return;
- }
-
- lacp_pkt->data_len = sizeof(*hdr);
- lacp_pkt->pkt_len = sizeof(*hdr);
-
- hdr = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
-
- /* Source and destination MAC */
- ether_addr_copy(&lacp_mac_addr, &hdr->eth_hdr.d_addr);
- rte_eth_macaddr_get(slave_id, &hdr->eth_hdr.s_addr);
- hdr->eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_SLOW);
-
- lacpdu = &hdr->lacpdu;
- memset(lacpdu, 0, sizeof(*lacpdu));
-
- /* Initialize LACP part */
- lacpdu->subtype = SLOW_SUBTYPE_LACP;
- lacpdu->version_number = 1;
-
- /* ACTOR */
- lacpdu->actor.tlv_type_info = TLV_TYPE_ACTOR_INFORMATION;
- lacpdu->actor.info_length = sizeof(struct lacpdu_actor_partner_params);
- memcpy(&hdr->lacpdu.actor.port_params, &port->actor,
- sizeof(port->actor));
- agg = &mode_8023ad_ports[port->aggregator_port_id];
- ether_addr_copy(&agg->actor.system, &hdr->lacpdu.actor.port_params.system);
- lacpdu->actor.state = port->actor_state;
-
- /* PARTNER */
- lacpdu->partner.tlv_type_info = TLV_TYPE_PARTNER_INFORMATION;
- lacpdu->partner.info_length = sizeof(struct lacpdu_actor_partner_params);
- memcpy(&lacpdu->partner.port_params, &port->partner,
- sizeof(struct port_params));
- lacpdu->partner.state = port->partner_state;
-
- /* Other fields */
- lacpdu->tlv_type_collector_info = TLV_TYPE_COLLECTOR_INFORMATION;
- lacpdu->collector_info_length = 0x10;
- lacpdu->collector_max_delay = 0;
-
- lacpdu->tlv_type_terminator = TLV_TYPE_TERMINATOR_INFORMATION;
- lacpdu->terminator_length = 0;
-
- if (rte_ring_enqueue(port->tx_ring, lacp_pkt) == -ENOBUFS) {
- /* If TX ring full, drop packet and free message. Retransmission
- * will happen in next function call. */
- rte_pktmbuf_free(lacp_pkt);
- set_warning_flags(port, WRN_TX_QUEUE_FULL);
- return;
- }
-
- MODE4_DEBUG("sending LACP frame\n");
- BOND_PRINT_LACP(lacpdu);
-
- timer_set(&port->tx_machine_timer, internals->mode4.tx_period_timeout);
- SM_FLAG_CLR(port, NTT);
-}
-
-/**
- * Function assigns port to aggregator.
- *
- * @param bond_dev_private Pointer to bond_dev_private structure.
- * @param port_pos Port to assign.
- */
-static void
-selection_logic(struct bond_dev_private *internals, uint8_t slave_id)
-{
- struct port *agg, *port;
- uint8_t slaves_count, new_agg_id, i;
- uint8_t *slaves;
-
- slaves = internals->active_slaves;
- slaves_count = internals->active_slave_count;
- port = &mode_8023ad_ports[slave_id];
-
- /* Search for aggregator suitable for this port */
- for (i = 0; i < slaves_count; ++i) {
- agg = &mode_8023ad_ports[slaves[i]];
- /* Skip ports that are not aggreagators */
- if (agg->aggregator_port_id != slaves[i])
- continue;
-
- /* Actors system ID is not checked since all slave device have the same
- * ID (MAC address). */
- if ((agg->actor.key == port->actor.key &&
- agg->partner.system_priority == port->partner.system_priority &&
- is_same_ether_addr(&agg->partner.system, &port->partner.system) == 1
- && (agg->partner.key == port->partner.key)) &&
- is_zero_ether_addr(&port->partner.system) != 1 &&
- (agg->actor.key &
- rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) != 0) {
-
- break;
- }
- }
-
- /* By default, port uses it self as agregator */
- if (i == slaves_count)
- new_agg_id = slave_id;
- else
- new_agg_id = slaves[i];
-
- if (new_agg_id != port->aggregator_port_id) {
- port->aggregator_port_id = new_agg_id;
-
- MODE4_DEBUG("-> SELECTED: ID=%3u\n"
- "\t%s aggregator ID=%3u\n",
- port->aggregator_port_id,
- port->aggregator_port_id == slave_id ?
- "aggregator not found, using default" : "aggregator found",
- port->aggregator_port_id);
- }
-
- port->selected = SELECTED;
-}
-
-/* Function maps DPDK speed to bonding speed stored in key field */
-static uint16_t
-link_speed_key(uint16_t speed) {
- uint16_t key_speed;
-
- switch (speed) {
- case ETH_LINK_SPEED_AUTONEG:
- key_speed = 0x00;
- break;
- case ETH_LINK_SPEED_10:
- key_speed = BOND_LINK_SPEED_KEY_10M;
- break;
- case ETH_LINK_SPEED_100:
- key_speed = BOND_LINK_SPEED_KEY_100M;
- break;
- case ETH_LINK_SPEED_1000:
- key_speed = BOND_LINK_SPEED_KEY_1000M;
- break;
- case ETH_LINK_SPEED_10G:
- key_speed = BOND_LINK_SPEED_KEY_10G;
- break;
- case ETH_LINK_SPEED_20G:
- key_speed = BOND_LINK_SPEED_KEY_20G;
- break;
- case ETH_LINK_SPEED_40G:
- key_speed = BOND_LINK_SPEED_KEY_40G;
- break;
- default:
- /* Unknown speed*/
- key_speed = 0xFFFF;
- }
-
- return key_speed;
-}
-
-static void
-bond_mode_8023ad_periodic_cb(void *arg)
-{
- struct rte_eth_dev *bond_dev = arg;
- struct bond_dev_private *internals = bond_dev->data->dev_private;
- struct port *port;
- struct rte_eth_link link_info;
- struct ether_addr slave_addr;
-
- void *pkt = NULL;
- uint16_t i, slave_id;
-
-
- /* Update link status on each port */
- for (i = 0; i < internals->active_slave_count; i++) {
- uint16_t key;
-
- slave_id = internals->active_slaves[i];
- rte_eth_link_get(slave_id, &link_info);
- rte_eth_macaddr_get(slave_id, &slave_addr);
-
- if (link_info.link_status != 0) {
- key = link_speed_key(link_info.link_speed) << 1;
- if (link_info.link_duplex == ETH_LINK_FULL_DUPLEX)
- key |= BOND_LINK_FULL_DUPLEX_KEY;
- } else
- key = 0;
-
- port = &mode_8023ad_ports[slave_id];
-
- key = rte_cpu_to_be_16(key);
- if (key != port->actor.key) {
- if (!(key & rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)))
- set_warning_flags(port, WRN_NOT_LACP_CAPABLE);
-
- port->actor.key = key;
- SM_FLAG_SET(port, NTT);
- }
-
- if (!is_same_ether_addr(&port->actor.system, &slave_addr)) {
- ether_addr_copy(&slave_addr, &port->actor.system);
- if (port->aggregator_port_id == slave_id)
- SM_FLAG_SET(port, NTT);
- }
- }
-
- for (i = 0; i < internals->active_slave_count; i++) {
- slave_id = internals->active_slaves[i];
- port = &mode_8023ad_ports[slave_id];
-
- if ((port->actor.key &
- rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) == 0) {
-
- SM_FLAG_SET(port, BEGIN);
-
- /* LACP is disabled on half duples or link is down */
- if (SM_FLAG(port, LACP_ENABLED)) {
- /* If port was enabled set it to BEGIN state */
- SM_FLAG_CLR(port, LACP_ENABLED);
- ACTOR_STATE_CLR(port, DISTRIBUTING);
- ACTOR_STATE_CLR(port, COLLECTING);
- }
-
- /* Skip this port processing */
- continue;
- }
-
- SM_FLAG_SET(port, LACP_ENABLED);
-
- /* Find LACP packet to this port. Do not check subtype, it is done in
- * function that queued packet */
- if (rte_ring_dequeue(port->rx_ring, &pkt) == 0) {
- struct rte_mbuf *lacp_pkt = pkt;
- struct lacpdu_header *lacp;
-
- lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
- RTE_VERIFY(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
-
- /* This is LACP frame so pass it to rx_machine */
- rx_machine(internals, slave_id, &lacp->lacpdu);
- rte_pktmbuf_free(lacp_pkt);
- } else
- rx_machine(internals, slave_id, NULL);
-
- periodic_machine(internals, slave_id);
- mux_machine(internals, slave_id);
- tx_machine(internals, slave_id);
- selection_logic(internals, slave_id);
-
- SM_FLAG_CLR(port, BEGIN);
- show_warnings(slave_id);
- }
-
- rte_eal_alarm_set(internals->mode4.update_timeout_us,
- bond_mode_8023ad_periodic_cb, arg);
-}
-
-void
-bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id)
-{
- struct bond_dev_private *internals = bond_dev->data->dev_private;
-
- struct port *port = &mode_8023ad_ports[slave_id];
- struct port_params initial = {
- .system = { { 0 } },
- .system_priority = rte_cpu_to_be_16(0xFFFF),
- .key = rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY),
- .port_priority = rte_cpu_to_be_16(0x00FF),
- .port_number = 0,
- };
-
- char mem_name[RTE_ETH_NAME_MAX_LEN];
- uint8_t socket_id;
- unsigned element_size;
-
- /* Given slave mus not be in active list */
- RTE_VERIFY(find_slave_by_id(internals->active_slaves,
- internals->active_slave_count, slave_id) == internals->active_slave_count);
-
- memcpy(&port->actor, &initial, sizeof(struct port_params));
- /* Standard requires that port ID must be grater than 0.
- * Add 1 do get corresponding port_number */
- port->actor.port_number = rte_cpu_to_be_16((uint16_t)slave_id + 1);
-
- memcpy(&port->partner, &initial, sizeof(struct port_params));
-
- /* default states */
- port->actor_state = STATE_AGGREGATION | STATE_LACP_ACTIVE | STATE_DEFAULTED;
- port->partner_state = STATE_LACP_ACTIVE;
- port->sm_flags = SM_FLAGS_BEGIN;
-
- /* use this port as agregator */
- port->aggregator_port_id = slave_id;
- rte_eth_promiscuous_enable(slave_id);
-
- timer_cancel(&port->warning_timer);
-
- if (port->mbuf_pool != NULL)
- return;
-
- RTE_VERIFY(port->rx_ring == NULL);
- RTE_VERIFY(port->tx_ring == NULL);
- socket_id = rte_eth_devices[slave_id].pci_dev->numa_node;
-
- element_size = sizeof(struct slow_protocol_frame) + sizeof(struct rte_mbuf)
- + RTE_PKTMBUF_HEADROOM;
-
- /* How big memory pool should be? If driver will not
- * free packets quick enough there will be ENOMEM in tx_machine.
- * For now give 511 pkts * max number of queued TX packets per slave.
- * Hope it will be enough. */
- snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_pool", slave_id);
- port->mbuf_pool = rte_mempool_create(mem_name,
- BOND_MODE_8023AX_SLAVE_TX_PKTS * 512 - 1,
- element_size,
- RTE_MEMPOOL_CACHE_MAX_SIZE >= 32 ? 32 : RTE_MEMPOOL_CACHE_MAX_SIZE,
- sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init,
- NULL, rte_pktmbuf_init, NULL, socket_id, MEMPOOL_F_NO_SPREAD);
-
- /* Any memory allocation failure in initalization is critical because
- * resources can't be free, so reinitialization is impossible. */
- if (port->mbuf_pool == NULL) {
- rte_panic("Slave %u: Failed to create memory pool '%s': %s\n",
- slave_id, mem_name, rte_strerror(rte_errno));
- }
-
- snprintf(mem_name, RTE_DIM(mem_name), "slave_%u_rx", slave_id);
- port->rx_ring = rte_ring_create(mem_name,
- rte_align32pow2(BOND_MODE_8023AX_SLAVE_RX_PKTS), socket_id, 0);
-
- if (port->rx_ring == NULL) {
- rte_panic("Slave %u: Failed to create rx ring '%s': %s\n", slave_id,
- mem_name, rte_strerror(rte_errno));
- }
-
- /* TX ring is at least one pkt longer to make room for marker packet. */
- snprintf(mem_name, RTE_DIM(mem_name), "slave_%u_tx", slave_id);
- port->tx_ring = rte_ring_create(mem_name,
- rte_align32pow2(BOND_MODE_8023AX_SLAVE_TX_PKTS + 1), socket_id, 0);
-
- if (port->tx_ring == NULL) {
- rte_panic("Slave %u: Failed to create tx ring '%s': %s\n", slave_id,
- mem_name, rte_strerror(rte_errno));
- }
-}
-
-int
-bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *bond_dev,
- uint8_t slave_id)
-{
- struct bond_dev_private *internals = bond_dev->data->dev_private;
- void *pkt = NULL;
- struct port *port;
- uint8_t i;
-
- /* Given slave mus be in active list */
- RTE_VERIFY(find_slave_by_id(internals->active_slaves,
- internals->active_slave_count, slave_id) < internals->active_slave_count);
-
- /* Exclude slave from transmit policy. If this slave is an aggregator
- * make all aggregated slaves unselected to force sellection logic
- * to select suitable aggregator for this port. */
- for (i = 0; i < internals->active_slave_count; i++) {
- port = &mode_8023ad_ports[internals->active_slaves[i]];
- if (port->aggregator_port_id != slave_id)
- continue;
-
- port->selected = UNSELECTED;
-
- /* Use default aggregator */
- port->aggregator_port_id = internals->active_slaves[i];
- }
-
- port = &mode_8023ad_ports[slave_id];
- port->selected = UNSELECTED;
- port->actor_state &= ~(STATE_SYNCHRONIZATION | STATE_DISTRIBUTING |
- STATE_COLLECTING);
-
- while (rte_ring_dequeue(port->rx_ring, &pkt) == 0)
- rte_pktmbuf_free((struct rte_mbuf *)pkt);
-
- while (rte_ring_dequeue(port->tx_ring, &pkt) == 0)
- rte_pktmbuf_free((struct rte_mbuf *)pkt);
- return 0;
-}
-
-void
-bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev)
-{
- struct bond_dev_private *internals = bond_dev->data->dev_private;
- struct ether_addr slave_addr;
- struct port *slave, *agg_slave;
- uint8_t slave_id, i, j;
-
- bond_mode_8023ad_stop(bond_dev);
-
- for (i = 0; i < internals->active_slave_count; i++) {
- slave_id = internals->active_slaves[i];
- slave = &mode_8023ad_ports[slave_id];
- rte_eth_macaddr_get(slave_id, &slave_addr);
-
- if (is_same_ether_addr(&slave_addr, &slave->actor.system))
- continue;
-
- ether_addr_copy(&slave_addr, &slave->actor.system);
- /* Do nothing if this port is not an aggregator. In other case
- * Set NTT flag on every port that use this aggregator. */
- if (slave->aggregator_port_id != slave_id)
- continue;
-
- for (j = 0; j < internals->active_slave_count; j++) {
- agg_slave = &mode_8023ad_ports[internals->active_slaves[j]];
- if (agg_slave->aggregator_port_id == slave_id)
- SM_FLAG_SET(agg_slave, NTT);
- }
- }
-
- if (bond_dev->data->dev_started)
- bond_mode_8023ad_start(bond_dev);
-}
-
-void
-bond_mode_8023ad_conf_get(struct rte_eth_dev *dev,
- struct rte_eth_bond_8023ad_conf *conf)
-{
- struct bond_dev_private *internals = dev->data->dev_private;
- struct mode8023ad_private *mode4 = &internals->mode4;
- uint64_t ms_ticks = rte_get_tsc_hz() / 1000;
-
- conf->fast_periodic_ms = mode4->fast_periodic_timeout / ms_ticks;
- conf->slow_periodic_ms = mode4->slow_periodic_timeout / ms_ticks;
- conf->short_timeout_ms = mode4->short_timeout / ms_ticks;
- conf->long_timeout_ms = mode4->long_timeout / ms_ticks;
- conf->aggregate_wait_timeout_ms = mode4->aggregate_wait_timeout / ms_ticks;
- conf->tx_period_ms = mode4->tx_period_timeout / ms_ticks;
- conf->update_timeout_ms = mode4->update_timeout_us / 1000;
-}
-
-void
-bond_mode_8023ad_setup(struct rte_eth_dev *dev,
- struct rte_eth_bond_8023ad_conf *conf)
-{
- struct rte_eth_bond_8023ad_conf def_conf;
- struct bond_dev_private *internals = dev->data->dev_private;
- struct mode8023ad_private *mode4 = &internals->mode4;
- uint64_t ms_ticks = rte_get_tsc_hz() / 1000;
-
- if (conf == NULL) {
- conf = &def_conf;
- conf->fast_periodic_ms = BOND_8023AD_FAST_PERIODIC_MS;
- conf->slow_periodic_ms = BOND_8023AD_SLOW_PERIODIC_MS;
- conf->short_timeout_ms = BOND_8023AD_SHORT_TIMEOUT_MS;
- conf->long_timeout_ms = BOND_8023AD_LONG_TIMEOUT_MS;
- conf->aggregate_wait_timeout_ms = BOND_8023AD_AGGREGATE_WAIT_TIMEOUT_MS;
- conf->tx_period_ms = BOND_8023AD_TX_MACHINE_PERIOD_MS;
- conf->rx_marker_period_ms = BOND_8023AD_RX_MARKER_PERIOD_MS;
- conf->update_timeout_ms = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS;
- }
-
- mode4->fast_periodic_timeout = conf->fast_periodic_ms * ms_ticks;
- mode4->slow_periodic_timeout = conf->slow_periodic_ms * ms_ticks;
- mode4->short_timeout = conf->short_timeout_ms * ms_ticks;
- mode4->long_timeout = conf->long_timeout_ms * ms_ticks;
- mode4->aggregate_wait_timeout = conf->aggregate_wait_timeout_ms * ms_ticks;
- mode4->tx_period_timeout = conf->tx_period_ms * ms_ticks;
- mode4->rx_marker_timeout = conf->rx_marker_period_ms * ms_ticks;
- mode4->update_timeout_us = conf->update_timeout_ms * 1000;
-}
-
-int
-bond_mode_8023ad_enable(struct rte_eth_dev *bond_dev)
-{
- struct bond_dev_private *internals = bond_dev->data->dev_private;
- uint16_t i;
-
- for (i = 0; i < internals->active_slave_count; i++)
- bond_mode_8023ad_activate_slave(bond_dev, i);
-
- return 0;
-}
-
-int
-bond_mode_8023ad_start(struct rte_eth_dev *bond_dev)
-{
- return rte_eal_alarm_set(BOND_MODE_8023AX_UPDATE_TIMEOUT_MS * 1000,
- &bond_mode_8023ad_periodic_cb, bond_dev);
-}
-
-void
-bond_mode_8023ad_stop(struct rte_eth_dev *bond_dev)
-{
- rte_eal_alarm_cancel(&bond_mode_8023ad_periodic_cb, bond_dev);
-}
-
-void
-bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals,
- uint8_t slave_id, struct rte_mbuf *pkt)
-{
- struct mode8023ad_private *mode4 = &internals->mode4;
- struct port *port = &mode_8023ad_ports[slave_id];
- struct marker_header *m_hdr;
- uint64_t marker_timer, old_marker_timer;
- int retval;
- uint8_t wrn, subtype;
- /* If packet is a marker, we send response now by reusing given packet
- * and update only source MAC, destination MAC is multicast so don't
- * update it. Other frames will be handled later by state machines */
- subtype = rte_pktmbuf_mtod(pkt,
- struct slow_protocol_frame *)->slow_protocol.subtype;
-
- if (subtype == SLOW_SUBTYPE_MARKER) {
- m_hdr = rte_pktmbuf_mtod(pkt, struct marker_header *);
-
- if (likely(m_hdr->marker.tlv_type_marker != MARKER_TLV_TYPE_INFO)) {
- wrn = WRN_UNKNOWN_MARKER_TYPE;
- goto free_out;
- }
-
- /* Setup marker timer. Do it in loop in case concurent access. */
- do {
- old_marker_timer = port->rx_marker_timer;
- if (!timer_is_expired(&old_marker_timer)) {
- wrn = WRN_RX_MARKER_TO_FAST;
- goto free_out;
- }
-
- timer_set(&marker_timer, mode4->rx_marker_timeout);
- retval = rte_atomic64_cmpset(&port->rx_marker_timer,
- old_marker_timer, marker_timer);
- } while (unlikely(retval == 0));
-
- m_hdr->marker.tlv_type_marker = MARKER_TLV_TYPE_RESP;
- rte_eth_macaddr_get(slave_id, &m_hdr->eth_hdr.s_addr);
-
- if (unlikely(rte_ring_enqueue(port->tx_ring, pkt) == -ENOBUFS)) {
- /* reset timer */
- port->rx_marker_timer = 0;
- wrn = WRN_TX_QUEUE_FULL;
- goto free_out;
- }
- } else if (likely(subtype == SLOW_SUBTYPE_LACP)) {
- if (unlikely(rte_ring_enqueue(port->rx_ring, pkt) == -ENOBUFS)) {
- /* If RX fing full free lacpdu message and drop packet */
- wrn = WRN_RX_QUEUE_FULL;
- goto free_out;
- }
- } else {
- wrn = WRN_UNKNOWN_SLOW_TYPE;
- goto free_out;
- }
-
- return;
-
-free_out:
- set_warning_flags(port, wrn);
- rte_pktmbuf_free(pkt);
-}
-
-int
-rte_eth_bond_8023ad_conf_get(uint8_t port_id,
- struct rte_eth_bond_8023ad_conf *conf)
-{
- struct rte_eth_dev *bond_dev;
-
- if (valid_bonded_port_id(port_id) != 0)
- return -EINVAL;
-
- if (conf == NULL)
- return -EINVAL;
-
- bond_dev = &rte_eth_devices[port_id];
- bond_mode_8023ad_conf_get(bond_dev, conf);
- return 0;
-}
-
-int
-rte_eth_bond_8023ad_setup(uint8_t port_id,
- struct rte_eth_bond_8023ad_conf *conf)
-{
- struct rte_eth_dev *bond_dev;
-
- if (valid_bonded_port_id(port_id) != 0)
- return -EINVAL;
-
- if (conf != NULL) {
- /* Basic sanity check */
- if (conf->slow_periodic_ms == 0 ||
- conf->fast_periodic_ms >= conf->slow_periodic_ms ||
- conf->long_timeout_ms == 0 ||
- conf->short_timeout_ms >= conf->long_timeout_ms ||
- conf->aggregate_wait_timeout_ms == 0 ||
- conf->tx_period_ms == 0 ||
- conf->rx_marker_period_ms == 0 ||
- conf->update_timeout_ms == 0) {
- RTE_LOG(ERR, PMD, "given mode 4 configuration is invalid\n");
- return -EINVAL;
- }
- }
-
- bond_dev = &rte_eth_devices[port_id];
- bond_mode_8023ad_setup(bond_dev, conf);
-
- return 0;
-}
-
-int
-rte_eth_bond_8023ad_slave_info(uint8_t port_id, uint8_t slave_id,
- struct rte_eth_bond_8023ad_slave_info *info)
-{
- struct rte_eth_dev *bond_dev;
- struct bond_dev_private *internals;
- struct port *port;
-
- if (info == NULL || valid_bonded_port_id(port_id) != 0 ||
- rte_eth_bond_mode_get(port_id) != BONDING_MODE_8023AD)
- return -EINVAL;
-
- bond_dev = &rte_eth_devices[port_id];
-
- internals = bond_dev->data->dev_private;
- if (find_slave_by_id(internals->active_slaves,
- internals->active_slave_count, slave_id) ==
- internals->active_slave_count)
- return -EINVAL;
-
- port = &mode_8023ad_ports[slave_id];
- info->selected = port->selected;
-
- info->actor_state = port->actor_state;
- rte_memcpy(&info->actor, &port->actor, sizeof(port->actor));
-
- info->partner_state = port->partner_state;
- rte_memcpy(&info->partner, &port->partner, sizeof(port->partner));
-
- info->agg_port_id = port->aggregator_port_id;
- return 0;
-}
diff --git a/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_api.c b/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_api.c
deleted file mode 100755
index c2a99a3b..00000000
--- a/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_api.c
+++ /dev/null
@@ -1,822 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <string.h>
-
-#include <rte_mbuf.h>
-#include <rte_malloc.h>
-#include <rte_ethdev.h>
-#include <rte_tcp.h>
-
-#include "rte_eth_bond.h"
-#include "rte_eth_bond_private.h"
-#include "rte_eth_bond_8023ad_private.h"
-
-#define DEFAULT_POLLING_INTERVAL_10_MS (10)
-
-int
-valid_bonded_ethdev(struct rte_eth_dev *eth_dev)
-{
- size_t len;
-
- /* Check valid pointer */
- if (eth_dev->driver->pci_drv.name == NULL || driver_name == NULL)
- return -1;
-
- /* Check string lengths are equal */
- len = strlen(driver_name);
- if (strlen(eth_dev->driver->pci_drv.name) != len)
- return -1;
-
- /* Compare strings */
- return strncmp(eth_dev->driver->pci_drv.name, driver_name, len);
-}
-
-int
-valid_port_id(uint8_t port_id)
-{
- /* Verify that port id is valid */
- int ethdev_count = rte_eth_dev_count();
- if (port_id >= ethdev_count) {
- RTE_BOND_LOG(ERR, "Port Id %d is greater than rte_eth_dev_count %d",
- port_id, ethdev_count);
- return -1;
- }
-
- return 0;
-}
-
-int
-valid_bonded_port_id(uint8_t port_id)
-{
- /* Verify that port id's are valid */
- if (valid_port_id(port_id))
- return -1;
-
- /* Verify that bonded_port_id refers to a bonded port */
- if (valid_bonded_ethdev(&rte_eth_devices[port_id])) {
- RTE_BOND_LOG(ERR, "Specified port Id %d is not a bonded eth_dev device",
- port_id);
- return -1;
- }
-
- return 0;
-}
-
-int
-valid_slave_port_id(uint8_t port_id)
-{
- /* Verify that port id's are valid */
- if (valid_port_id(port_id))
- return -1;
-
- /* Verify that port_id refers to a non bonded port */
- if (!valid_bonded_ethdev(&rte_eth_devices[port_id]))
- return -1;
-
- return 0;
-}
-
-void
-activate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id)
-{
- struct bond_dev_private *internals = eth_dev->data->dev_private;
-
- if (internals->mode == BONDING_MODE_8023AD)
- bond_mode_8023ad_activate_slave(eth_dev, port_id);
-
- RTE_VERIFY(internals->active_slave_count <
- (RTE_DIM(internals->active_slaves) - 1));
-
- internals->active_slaves[internals->active_slave_count] = port_id;
- internals->active_slave_count++;
-}
-
-void
-deactivate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id)
-{
- uint8_t slave_pos;
- struct bond_dev_private *internals = eth_dev->data->dev_private;
- uint8_t active_count = internals->active_slave_count;
-
- if (internals->mode == BONDING_MODE_8023AD) {
- bond_mode_8023ad_stop(eth_dev);
- bond_mode_8023ad_deactivate_slave(eth_dev, port_id);
- }
-
- slave_pos = find_slave_by_id(internals->active_slaves, active_count,
- port_id);
-
- /* If slave was not at the end of the list
- * shift active slaves up active array list */
- if (slave_pos < active_count) {
- active_count--;
- memmove(internals->active_slaves + slave_pos,
- internals->active_slaves + slave_pos + 1,
- (active_count - slave_pos) *
- sizeof(internals->active_slaves[0]));
- }
-
- RTE_VERIFY(active_count < RTE_DIM(internals->active_slaves));
- internals->active_slave_count = active_count;
-
- if (eth_dev->data->dev_started && internals->mode == BONDING_MODE_8023AD)
- bond_mode_8023ad_start(eth_dev);
-}
-
-uint8_t
-number_of_sockets(void)
-{
- int sockets = 0;
- int i;
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
-
- for (i = 0; ((i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL)); i++) {
- if (sockets < ms[i].socket_id)
- sockets = ms[i].socket_id;
- }
-
- /* Number of sockets = maximum socket_id + 1 */
- return ++sockets;
-}
-
-const char *driver_name = "Link Bonding PMD";
-
-int
-rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id)
-{
- struct rte_pci_device *pci_dev = NULL;
- struct bond_dev_private *internals = NULL;
- struct rte_eth_dev *eth_dev = NULL;
- struct eth_driver *eth_drv = NULL;
- struct rte_pci_driver *pci_drv = NULL;
- struct rte_pci_id *pci_id_table = NULL;
- /* now do all data allocation - for eth_dev structure, dummy pci driver
- * and internal (private) data
- */
-
- if (name == NULL) {
- RTE_BOND_LOG(ERR, "Invalid name specified");
- goto err;
- }
-
- if (socket_id >= number_of_sockets()) {
- RTE_BOND_LOG(ERR,
- "Invalid socket id specified to create bonded device on.");
- goto err;
- }
-
- pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, socket_id);
- if (pci_dev == NULL) {
- RTE_BOND_LOG(ERR, "Unable to malloc pci dev on socket");
- goto err;
- }
-
- eth_drv = rte_zmalloc_socket(name, sizeof(*eth_drv), 0, socket_id);
- if (eth_drv == NULL) {
- RTE_BOND_LOG(ERR, "Unable to malloc eth_drv on socket");
- goto err;
- }
-
- pci_drv = rte_zmalloc_socket(name, sizeof(*pci_drv), 0, socket_id);
- if (pci_drv == NULL) {
- RTE_BOND_LOG(ERR, "Unable to malloc pci_drv on socket");
- goto err;
- }
- pci_id_table = rte_zmalloc_socket(name, sizeof(*pci_id_table), 0, socket_id);
- if (pci_id_table == NULL) {
- RTE_BOND_LOG(ERR, "Unable to malloc pci_id_table on socket");
- goto err;
- }
-
- pci_drv->id_table = pci_id_table;
-
- pci_drv->id_table->device_id = PCI_ANY_ID;
- pci_drv->id_table->subsystem_device_id = PCI_ANY_ID;
- pci_drv->id_table->vendor_id = PCI_ANY_ID;
- pci_drv->id_table->subsystem_vendor_id = PCI_ANY_ID;
-
- pci_drv->drv_flags = RTE_PCI_DRV_INTR_LSC;
-
- internals = rte_zmalloc_socket(name, sizeof(*internals), 0, socket_id);
- if (internals == NULL) {
- RTE_BOND_LOG(ERR, "Unable to malloc internals on socket");
- goto err;
- }
-
- /* reserve an ethdev entry */
- eth_dev = rte_eth_dev_allocate(name);
- if (eth_dev == NULL) {
- RTE_BOND_LOG(ERR, "Unable to allocate rte_eth_dev");
- goto err;
- }
-
- pci_dev->numa_node = socket_id;
- pci_drv->name = driver_name;
-
- eth_drv->pci_drv = (struct rte_pci_driver)(*pci_drv);
- eth_dev->driver = eth_drv;
-
- eth_dev->data->dev_private = internals;
- eth_dev->data->nb_rx_queues = (uint16_t)1;
- eth_dev->data->nb_tx_queues = (uint16_t)1;
-
- TAILQ_INIT(&(eth_dev->callbacks));
-
- eth_dev->data->dev_link.link_status = 0;
-
- eth_dev->data->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN, 0,
- socket_id);
-
- eth_dev->data->dev_started = 0;
- eth_dev->data->promiscuous = 0;
- eth_dev->data->scattered_rx = 0;
- eth_dev->data->all_multicast = 0;
-
- eth_dev->dev_ops = &default_dev_ops;
- eth_dev->pci_dev = pci_dev;
-
- rte_spinlock_init(&internals->lock);
-
- internals->port_id = eth_dev->data->port_id;
- internals->mode = BONDING_MODE_INVALID;
- internals->current_primary_port = 0;
- internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
- internals->user_defined_mac = 0;
- internals->link_props_set = 0;
-
- internals->link_status_polling_enabled = 0;
-
- internals->link_status_polling_interval_ms = DEFAULT_POLLING_INTERVAL_10_MS;
- internals->link_down_delay_ms = 0;
- internals->link_up_delay_ms = 0;
-
- internals->slave_count = 0;
- internals->active_slave_count = 0;
- internals->rx_offload_capa = 0;
- internals->tx_offload_capa = 0;
-
- memset(internals->active_slaves, 0, sizeof(internals->active_slaves));
- memset(internals->slaves, 0, sizeof(internals->slaves));
-
- /* Set mode 4 default configuration */
- bond_mode_8023ad_setup(eth_dev, NULL);
- if (bond_ethdev_mode_set(eth_dev, mode)) {
- RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode too %d",
- eth_dev->data->port_id, mode);
- goto err;
- }
-
- return eth_dev->data->port_id;
-
-err:
- if (pci_dev)
- rte_free(pci_dev);
- if (pci_drv)
- rte_free(pci_drv);
- if (pci_id_table)
- rte_free(pci_id_table);
- if (eth_drv)
- rte_free(eth_drv);
- if (internals)
- rte_free(internals);
- return -1;
-}
-
-static int
-__eth_bond_slave_add_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
-{
- struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
- struct bond_dev_private *internals;
- struct bond_dev_private *temp_internals;
- struct rte_eth_link link_props;
- struct rte_eth_dev_info dev_info;
-
- int i, j;
-
- if (valid_slave_port_id(slave_port_id) != 0)
- return -1;
-
- bonded_eth_dev = &rte_eth_devices[bonded_port_id];
- internals = bonded_eth_dev->data->dev_private;
-
- /* Verify that new slave device is not already a slave of another
- * bonded device */
- for (i = rte_eth_dev_count()-1; i >= 0; i--) {
- if (valid_bonded_ethdev(&rte_eth_devices[i]) == 0) {
- temp_internals = rte_eth_devices[i].data->dev_private;
-
- for (j = 0; j < temp_internals->slave_count; j++) {
- /* Device already a slave of a bonded device */
- if (temp_internals->slaves[j].port_id == slave_port_id) {
- RTE_BOND_LOG(ERR, "Slave port %d is already a slave",
- slave_port_id);
- return -1;
- }
- }
- }
- }
-
- slave_eth_dev = &rte_eth_devices[slave_port_id];
-
- /* Add slave details to bonded device */
- slave_add(internals, slave_eth_dev);
-
- memset(&dev_info, 0, sizeof(dev_info));
- rte_eth_dev_info_get(slave_port_id, &dev_info);
-
- if (internals->slave_count < 1) {
- /* if MAC is not user defined then use MAC of first slave add to
- * bonded device */
- if (!internals->user_defined_mac)
- mac_address_set(bonded_eth_dev, slave_eth_dev->data->mac_addrs);
-
- /* Inherit eth dev link properties from first slave */
- link_properties_set(bonded_eth_dev,
- &(slave_eth_dev->data->dev_link));
-
- /* Make primary slave */
- internals->primary_port = slave_port_id;
-
- /* Take the first dev's offload capabilities */
- internals->rx_offload_capa = dev_info.rx_offload_capa;
- internals->tx_offload_capa = dev_info.tx_offload_capa;
-
- } else {
- /* Check slave link properties are supported if props are set,
- * all slaves must be the same */
- if (internals->link_props_set) {
- if (link_properties_valid(&(bonded_eth_dev->data->dev_link),
- &(slave_eth_dev->data->dev_link))) {
- RTE_BOND_LOG(ERR,
- "Slave port %d link speed/duplex not supported",
- slave_port_id);
- return -1;
- }
- } else {
- link_properties_set(bonded_eth_dev,
- &(slave_eth_dev->data->dev_link));
- }
- internals->rx_offload_capa &= dev_info.rx_offload_capa;
- internals->tx_offload_capa &= dev_info.tx_offload_capa;
- }
-
- internals->slave_count++;
-
- /* Update all slave devices MACs*/
- mac_address_slaves_update(bonded_eth_dev);
-
- if (bonded_eth_dev->data->dev_started) {
- if (slave_configure(bonded_eth_dev, slave_eth_dev) != 0) {
- RTE_BOND_LOG(ERR, "rte_bond_slaves_configure: port=%d",
- slave_port_id);
- return -1;
- }
- }
-
- /* Register link status change callback with bonded device pointer as
- * argument*/
- rte_eth_dev_callback_register(slave_port_id, RTE_ETH_EVENT_INTR_LSC,
- bond_ethdev_lsc_event_callback, &bonded_eth_dev->data->port_id);
-
- /* If bonded device is started then we can add the slave to our active
- * slave array */
- if (bonded_eth_dev->data->dev_started) {
- rte_eth_link_get_nowait(slave_port_id, &link_props);
-
- if (link_props.link_status == 1)
- activate_slave(bonded_eth_dev, slave_port_id);
- }
- return 0;
-
-}
-
-int
-rte_eth_bond_slave_add(uint8_t bonded_port_id, uint8_t slave_port_id)
-{
- struct rte_eth_dev *bonded_eth_dev;
- struct bond_dev_private *internals;
-
- int retval;
-
- /* Verify that port id's are valid bonded and slave ports */
- if (valid_bonded_port_id(bonded_port_id) != 0)
- return -1;
-
- bonded_eth_dev = &rte_eth_devices[bonded_port_id];
- internals = bonded_eth_dev->data->dev_private;
-
- rte_spinlock_lock(&internals->lock);
-
- retval = __eth_bond_slave_add_lock_free(bonded_port_id, slave_port_id);
-
- rte_spinlock_unlock(&internals->lock);
-
- return retval;
-}
-
-static int
-__eth_bond_slave_remove_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
-{
- struct rte_eth_dev *bonded_eth_dev;
- struct bond_dev_private *internals;
-
- int i, slave_idx;
-
- if (valid_slave_port_id(slave_port_id) != 0)
- return -1;
-
- bonded_eth_dev = &rte_eth_devices[bonded_port_id];
- internals = bonded_eth_dev->data->dev_private;
-
- /* first remove from active slave list */
- slave_idx = find_slave_by_id(internals->active_slaves,
- internals->active_slave_count, slave_port_id);
-
- if (slave_idx < internals->active_slave_count)
- deactivate_slave(bonded_eth_dev, slave_port_id);
-
- slave_idx = -1;
- /* now find in slave list */
- for (i = 0; i < internals->slave_count; i++)
- if (internals->slaves[i].port_id == slave_port_id) {
- slave_idx = i;
- break;
- }
-
- if (slave_idx < 0) {
- RTE_BOND_LOG(ERR, "Couldn't find slave in port list, slave count %d",
- internals->slave_count);
- return -1;
- }
-
- /* Un-register link status change callback with bonded device pointer as
- * argument*/
- rte_eth_dev_callback_unregister(slave_port_id, RTE_ETH_EVENT_INTR_LSC,
- bond_ethdev_lsc_event_callback,
- &rte_eth_devices[bonded_port_id].data->port_id);
-
- /* Restore original MAC address of slave device */
- mac_address_set(&rte_eth_devices[slave_port_id],
- &(internals->slaves[slave_idx].persisted_mac_addr));
-
- slave_remove(internals, &rte_eth_devices[slave_port_id]);
-
- /* first slave in the active list will be the primary by default,
- * otherwise use first device in list */
- if (internals->current_primary_port == slave_port_id) {
- if (internals->active_slave_count > 0)
- internals->current_primary_port = internals->active_slaves[0];
- else if (internals->slave_count > 0)
- internals->current_primary_port = internals->slaves[0].port_id;
- else
- internals->primary_port = 0;
- }
-
- if (internals->active_slave_count < 1) {
- /* reset device link properties as no slaves are active */
- link_properties_reset(&rte_eth_devices[bonded_port_id]);
-
- /* if no slaves are any longer attached to bonded device and MAC is not
- * user defined then clear MAC of bonded device as it will be reset
- * when a new slave is added */
- if (internals->slave_count < 1 && !internals->user_defined_mac)
- memset(rte_eth_devices[bonded_port_id].data->mac_addrs, 0,
- sizeof(*(rte_eth_devices[bonded_port_id].data->mac_addrs)));
- }
- if (internals->slave_count == 0) {
- internals->rx_offload_capa = 0;
- internals->tx_offload_capa = 0;
- }
- return 0;
-}
-
-int
-rte_eth_bond_slave_remove(uint8_t bonded_port_id, uint8_t slave_port_id)
-{
- struct rte_eth_dev *bonded_eth_dev;
- struct bond_dev_private *internals;
- int retval;
-
- if (valid_bonded_port_id(bonded_port_id) != 0)
- return -1;
-
- bonded_eth_dev = &rte_eth_devices[bonded_port_id];
- internals = bonded_eth_dev->data->dev_private;
-
- rte_spinlock_lock(&internals->lock);
-
- retval = __eth_bond_slave_remove_lock_free(bonded_port_id, slave_port_id);
-
- rte_spinlock_unlock(&internals->lock);
-
- return retval;
-}
-
-int
-rte_eth_bond_mode_set(uint8_t bonded_port_id, uint8_t mode)
-{
- if (valid_bonded_port_id(bonded_port_id) != 0)
- return -1;
-
- return bond_ethdev_mode_set(&rte_eth_devices[bonded_port_id], mode);
-}
-
-int
-rte_eth_bond_mode_get(uint8_t bonded_port_id)
-{
- struct bond_dev_private *internals;
-
- if (valid_bonded_port_id(bonded_port_id) != 0)
- return -1;
-
- internals = rte_eth_devices[bonded_port_id].data->dev_private;
-
- return internals->mode;
-}
-
-int
-rte_eth_bond_primary_set(uint8_t bonded_port_id, uint8_t slave_port_id)
-{
- struct bond_dev_private *internals;
-
- if (valid_bonded_port_id(bonded_port_id) != 0)
- return -1;
-
- if (valid_slave_port_id(slave_port_id) != 0)
- return -1;
-
- internals = rte_eth_devices[bonded_port_id].data->dev_private;
-
- internals->user_defined_primary_port = 1;
- internals->primary_port = slave_port_id;
-
- bond_ethdev_primary_set(internals, slave_port_id);
-
- return 0;
-}
-
-int
-rte_eth_bond_primary_get(uint8_t bonded_port_id)
-{
- struct bond_dev_private *internals;
-
- if (valid_bonded_port_id(bonded_port_id) != 0)
- return -1;
-
- internals = rte_eth_devices[bonded_port_id].data->dev_private;
-
- if (internals->slave_count < 1)
- return -1;
-
- return internals->current_primary_port;
-}
-
-int
-rte_eth_bond_slaves_get(uint8_t bonded_port_id, uint8_t slaves[], uint8_t len)
-{
- struct bond_dev_private *internals;
- uint8_t i;
-
- if (valid_bonded_port_id(bonded_port_id) != 0)
- return -1;
-
- if (slaves == NULL)
- return -1;
-
- internals = rte_eth_devices[bonded_port_id].data->dev_private;
-
- if (internals->slave_count > len)
- return -1;
-
- for (i = 0; i < internals->slave_count; i++)
- slaves[i] = internals->slaves[i].port_id;
-
- return internals->slave_count;
-}
-
-int
-rte_eth_bond_active_slaves_get(uint8_t bonded_port_id, uint8_t slaves[],
- uint8_t len)
-{
- struct bond_dev_private *internals;
-
- if (valid_bonded_port_id(bonded_port_id) != 0)
- return -1;
-
- if (slaves == NULL)
- return -1;
-
- internals = rte_eth_devices[bonded_port_id].data->dev_private;
-
- if (internals->active_slave_count > len)
- return -1;
-
- memcpy(slaves, internals->active_slaves, internals->active_slave_count);
-
- return internals->active_slave_count;
-}
-
-int
-rte_eth_bond_mac_address_set(uint8_t bonded_port_id,
- struct ether_addr *mac_addr)
-{
- struct rte_eth_dev *bonded_eth_dev;
- struct bond_dev_private *internals;
-
- if (valid_bonded_port_id(bonded_port_id) != 0)
- return -1;
-
- bonded_eth_dev = &rte_eth_devices[bonded_port_id];
- internals = bonded_eth_dev->data->dev_private;
-
- /* Set MAC Address of Bonded Device */
- if (mac_address_set(bonded_eth_dev, mac_addr))
- return -1;
-
- internals->user_defined_mac = 1;
-
- /* Update all slave devices MACs*/
- if (internals->slave_count > 0)
- return mac_address_slaves_update(bonded_eth_dev);
-
- return 0;
-}
-
-int
-rte_eth_bond_mac_address_reset(uint8_t bonded_port_id)
-{
- struct rte_eth_dev *bonded_eth_dev;
- struct bond_dev_private *internals;
-
- if (valid_bonded_port_id(bonded_port_id) != 0)
- return -1;
-
- bonded_eth_dev = &rte_eth_devices[bonded_port_id];
- internals = bonded_eth_dev->data->dev_private;
-
- internals->user_defined_mac = 0;
-
- if (internals->slave_count > 0) {
- /* Set MAC Address of Bonded Device */
- if (mac_address_set(bonded_eth_dev,
- &internals->slaves[internals->primary_port].persisted_mac_addr)
- != 0) {
- RTE_BOND_LOG(ERR, "Failed to set MAC address on bonded device");
- return -1;
- }
- /* Update all slave devices MAC addresses */
- return mac_address_slaves_update(bonded_eth_dev);
- }
- /* No need to update anything as no slaves present */
- return 0;
-}
-
-int
-rte_eth_bond_xmit_policy_set(uint8_t bonded_port_id, uint8_t policy)
-{
- struct bond_dev_private *internals;
-
- if (valid_bonded_port_id(bonded_port_id) != 0)
- return -1;
-
- internals = rte_eth_devices[bonded_port_id].data->dev_private;
-
- switch (policy) {
- case BALANCE_XMIT_POLICY_LAYER2:
- case BALANCE_XMIT_POLICY_LAYER23:
- case BALANCE_XMIT_POLICY_LAYER34:
- internals->balance_xmit_policy = policy;
- break;
-
- default:
- return -1;
- }
- return 0;
-}
-
-int
-rte_eth_bond_xmit_policy_get(uint8_t bonded_port_id)
-{
- struct bond_dev_private *internals;
-
- if (valid_bonded_port_id(bonded_port_id) != 0)
- return -1;
-
- internals = rte_eth_devices[bonded_port_id].data->dev_private;
-
- return internals->balance_xmit_policy;
-}
-
-int
-rte_eth_bond_link_monitoring_set(uint8_t bonded_port_id, uint32_t internal_ms)
-{
- struct bond_dev_private *internals;
-
- if (valid_bonded_port_id(bonded_port_id) != 0)
- return -1;
-
- internals = rte_eth_devices[bonded_port_id].data->dev_private;
- internals->link_status_polling_interval_ms = internal_ms;
-
- return 0;
-}
-
-int
-rte_eth_bond_link_monitoring_get(uint8_t bonded_port_id)
-{
- struct bond_dev_private *internals;
-
- if (valid_bonded_port_id(bonded_port_id) != 0)
- return -1;
-
- internals = rte_eth_devices[bonded_port_id].data->dev_private;
-
- return internals->link_status_polling_interval_ms;
-}
-
-int
-rte_eth_bond_link_down_prop_delay_set(uint8_t bonded_port_id, uint32_t delay_ms)
-
-{
- struct bond_dev_private *internals;
-
- if (valid_bonded_port_id(bonded_port_id) != 0)
- return -1;
-
- internals = rte_eth_devices[bonded_port_id].data->dev_private;
- internals->link_down_delay_ms = delay_ms;
-
- return 0;
-}
-
-int
-rte_eth_bond_link_down_prop_delay_get(uint8_t bonded_port_id)
-{
- struct bond_dev_private *internals;
-
- if (valid_bonded_port_id(bonded_port_id) != 0)
- return -1;
-
- internals = rte_eth_devices[bonded_port_id].data->dev_private;
-
- return internals->link_down_delay_ms;
-}
-
-int
-rte_eth_bond_link_up_prop_delay_set(uint8_t bonded_port_id, uint32_t delay_ms)
-
-{
- struct bond_dev_private *internals;
-
- if (valid_bonded_port_id(bonded_port_id) != 0)
- return -1;
-
- internals = rte_eth_devices[bonded_port_id].data->dev_private;
- internals->link_up_delay_ms = delay_ms;
-
- return 0;
-}
-
-int
-rte_eth_bond_link_up_prop_delay_get(uint8_t bonded_port_id)
-{
- struct bond_dev_private *internals;
-
- if (valid_bonded_port_id(bonded_port_id) != 0)
- return -1;
-
- internals = rte_eth_devices[bonded_port_id].data->dev_private;
-
- return internals->link_up_delay_ms;
-}
diff --git a/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_args.c b/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_args.c
deleted file mode 100755
index ca4de38f..00000000
--- a/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_args.c
+++ /dev/null
@@ -1,279 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <rte_devargs.h>
-#include <rte_kvargs.h>
-
-#include <cmdline_parse.h>
-#include <cmdline_parse_etheraddr.h>
-
-#include "rte_eth_bond.h"
-#include "rte_eth_bond_private.h"
-
-const char *pmd_bond_init_valid_arguments[] = {
- PMD_BOND_SLAVE_PORT_KVARG,
- PMD_BOND_PRIMARY_SLAVE_KVARG,
- PMD_BOND_MODE_KVARG,
- PMD_BOND_XMIT_POLICY_KVARG,
- PMD_BOND_SOCKET_ID_KVARG,
- PMD_BOND_MAC_ADDR_KVARG,
-
- NULL
-};
-
-static inline int
-find_port_id_by_pci_addr(const struct rte_pci_addr *pci_addr)
-{
- struct rte_pci_addr *eth_pci_addr;
- unsigned i;
-
- for (i = 0; i < rte_eth_dev_count(); i++) {
-
- if (rte_eth_devices[i].pci_dev == NULL)
- continue;
-
- eth_pci_addr = &(rte_eth_devices[i].pci_dev->addr);
-
- if (pci_addr->bus == eth_pci_addr->bus &&
- pci_addr->devid == eth_pci_addr->devid &&
- pci_addr->domain == eth_pci_addr->domain &&
- pci_addr->function == eth_pci_addr->function)
- return i;
- }
- return -1;
-}
-
-static inline int
-find_port_id_by_dev_name(const char *name)
-{
- unsigned i;
-
- for (i = 0; i < rte_eth_dev_count(); i++) {
- if (rte_eth_devices[i].data == NULL)
- continue;
-
- if (strcmp(rte_eth_devices[i].data->name, name) == 0)
- return i;
- }
- return -1;
-}
-
-/**
- * Parses a port identifier string to a port id by pci address, then by name,
- * and finally port id.
- */
-static inline int
-parse_port_id(const char *port_str)
-{
- struct rte_pci_addr dev_addr;
- int port_id;
-
- /* try parsing as pci address, physical devices */
- if (eal_parse_pci_DomBDF(port_str, &dev_addr) == 0) {
- port_id = find_port_id_by_pci_addr(&dev_addr);
- if (port_id < 0)
- return -1;
- } else {
- /* try parsing as device name, virtual devices */
- port_id = find_port_id_by_dev_name(port_str);
- if (port_id < 0) {
- char *end;
- errno = 0;
-
- /* try parsing as port id */
- port_id = strtol(port_str, &end, 10);
- if (*end != 0 || errno != 0)
- return -1;
- }
- }
-
- if (port_id < 0 || port_id > RTE_MAX_ETHPORTS) {
- RTE_BOND_LOG(ERR, "Slave port specified (%s) outside expected range",
- port_str);
- return -1;
- }
- return port_id;
-}
-
-int
-bond_ethdev_parse_slave_port_kvarg(const char *key __rte_unused,
- const char *value, void *extra_args)
-{
- struct bond_ethdev_slave_ports *slave_ports;
-
- if (value == NULL || extra_args == NULL)
- return -1;
-
- slave_ports = extra_args;
-
- if (strcmp(key, PMD_BOND_SLAVE_PORT_KVARG) == 0) {
- int port_id = parse_port_id(value);
- if (port_id < 0) {
- RTE_BOND_LOG(ERR, "Invalid slave port value (%s) specified", value);
- return -1;
- } else
- slave_ports->slaves[slave_ports->slave_count++] =
- (uint8_t)port_id;
- }
- return 0;
-}
-
-int
-bond_ethdev_parse_slave_mode_kvarg(const char *key __rte_unused,
- const char *value, void *extra_args)
-{
- uint8_t *mode;
- char *endptr;
-
- if (value == NULL || extra_args == NULL)
- return -1;
-
- mode = extra_args;
-
- errno = 0;
- *mode = strtol(value, &endptr, 10);
- if (*endptr != 0 || errno != 0)
- return -1;
-
- /* validate mode value */
- switch (*mode) {
- case BONDING_MODE_ROUND_ROBIN:
- case BONDING_MODE_ACTIVE_BACKUP:
- case BONDING_MODE_BALANCE:
-#ifdef RTE_MBUF_REFCNT
- case BONDING_MODE_BROADCAST:
-#endif
- case BONDING_MODE_8023AD:
- case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
- return 0;
- default:
- RTE_BOND_LOG(ERR, "Invalid slave mode value (%s) specified", value);
- return -1;
- }
-}
-
-int
-bond_ethdev_parse_socket_id_kvarg(const char *key __rte_unused,
- const char *value, void *extra_args)
-{
- int socket_id;
- char *endptr;
-
- if (value == NULL || extra_args == NULL)
- return -1;
-
- errno = 0;
- socket_id = (uint8_t)strtol(value, &endptr, 10);
- if (*endptr != 0 || errno != 0)
- return -1;
-
- /* validate mode value */
- if (socket_id >= 0 && socket_id < number_of_sockets()) {
- *(uint8_t *)extra_args = (uint8_t)socket_id;
- return 0;
- }
- return -1;
-}
-
-int
-bond_ethdev_parse_primary_slave_port_id_kvarg(const char *key __rte_unused,
- const char *value, void *extra_args)
-{
- int primary_slave_port_id;
-
- if (value == NULL || extra_args == NULL)
- return -1;
-
- primary_slave_port_id = parse_port_id(value);
- if (primary_slave_port_id < 0)
- return -1;
-
- *(uint8_t *)extra_args = (uint8_t)primary_slave_port_id;
-
- return 0;
-}
-
-int
-bond_ethdev_parse_balance_xmit_policy_kvarg(const char *key __rte_unused,
- const char *value, void *extra_args)
-{
- uint8_t *xmit_policy;
-
- if (value == NULL || extra_args == NULL)
- return -1;
-
- xmit_policy = extra_args;
-
- if (strcmp(PMD_BOND_XMIT_POLICY_LAYER2_KVARG, value) == 0)
- *xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
- else if (strcmp(PMD_BOND_XMIT_POLICY_LAYER23_KVARG, value) == 0)
- *xmit_policy = BALANCE_XMIT_POLICY_LAYER23;
- else if (strcmp(PMD_BOND_XMIT_POLICY_LAYER34_KVARG, value) == 0)
- *xmit_policy = BALANCE_XMIT_POLICY_LAYER34;
- else
- return -1;
-
- return 0;
-}
-
-int
-bond_ethdev_parse_bond_mac_addr_kvarg(const char *key __rte_unused,
- const char *value, void *extra_args)
-{
- if (value == NULL || extra_args == NULL)
- return -1;
-
- /* Parse MAC */
- return cmdline_parse_etheraddr(NULL, value, extra_args,
- sizeof(struct ether_addr));
-}
-
-int
-bond_ethdev_parse_time_ms_kvarg(const char *key __rte_unused,
- const char *value, void *extra_args)
-{
- uint32_t time_ms;
- char *endptr;
-
- if (value == NULL || extra_args == NULL)
- return -1;
-
- errno = 0;
- time_ms = (uint32_t)strtol(value, &endptr, 10);
- if (*endptr != 0 || errno != 0)
- return -1;
-
- *(uint32_t *)extra_args = time_ms;
-
- return 0;
-}
diff --git a/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_pmd.c b/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_pmd.c
deleted file mode 100755
index bb4a5379..00000000
--- a/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_pmd.c
+++ /dev/null
@@ -1,1881 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#include <stdlib.h>
-#include <rte_mbuf.h>
-#include <rte_malloc.h>
-#include <rte_ethdev.h>
-#include <rte_tcp.h>
-#include <rte_udp.h>
-#include <rte_ip.h>
-#include <rte_devargs.h>
-#include <rte_kvargs.h>
-#include <rte_dev.h>
-#include <rte_alarm.h>
-#include <rte_cycles.h>
-
-#include "rte_eth_bond.h"
-#include "rte_eth_bond_private.h"
-#include "rte_eth_bond_8023ad_private.h"
-
-#define REORDER_PERIOD_MS 10
-/* Table for statistics in mode 5 TLB */
-static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
-
-static uint16_t
-bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
-{
- struct bond_dev_private *internals;
-
- uint16_t num_rx_slave = 0;
- uint16_t num_rx_total = 0;
-
- int i;
-
- /* Cast to structure, containing bonded device's port id and queue id */
- struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
-
- internals = bd_rx_q->dev_private;
-
-
- for (i = 0; i < internals->active_slave_count && nb_pkts; i++) {
- /* Offset of pointer to *bufs increases as packets are received
- * from other slaves */
- num_rx_slave = rte_eth_rx_burst(internals->active_slaves[i],
- bd_rx_q->queue_id, bufs + num_rx_total, nb_pkts);
- if (num_rx_slave) {
- num_rx_total += num_rx_slave;
- nb_pkts -= num_rx_slave;
- }
- }
-
- return num_rx_total;
-}
-
-static uint16_t
-bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
- uint16_t nb_pkts)
-{
- struct bond_dev_private *internals;
-
- /* Cast to structure, containing bonded device's port id and queue id */
- struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
-
- internals = bd_rx_q->dev_private;
-
- return rte_eth_rx_burst(internals->current_primary_port,
- bd_rx_q->queue_id, bufs, nb_pkts);
-}
-
-static uint16_t
-bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
- uint16_t nb_pkts)
-{
- /* Cast to structure, containing bonded device's port id and queue id */
- struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
- struct bond_dev_private *internals = bd_rx_q->dev_private;
- struct ether_addr bond_mac;
-
- struct ether_hdr *hdr;
-
- const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
- uint16_t num_rx_total = 0; /* Total number of received packets */
- uint8_t slaves[RTE_MAX_ETHPORTS];
- uint8_t slave_count;
-
- uint8_t collecting; /* current slave collecting status */
- const uint8_t promisc = internals->promiscuous_en;
- uint8_t i, j, k;
-
- rte_eth_macaddr_get(internals->port_id, &bond_mac);
- /* Copy slave list to protect against slave up/down changes during tx
- * bursting */
- slave_count = internals->active_slave_count;
- memcpy(slaves, internals->active_slaves,
- sizeof(internals->active_slaves[0]) * slave_count);
-
- for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
- j = num_rx_total;
- collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[i]], COLLECTING);
-
- /* Read packets from this slave */
- num_rx_total += rte_eth_rx_burst(slaves[i], bd_rx_q->queue_id,
- &bufs[num_rx_total], nb_pkts - num_rx_total);
-
- for (k = j; k < 2 && k < num_rx_total; k++)
- rte_prefetch0(rte_pktmbuf_mtod(bufs[k], void *));
-
- /* Handle slow protocol packets. */
- while (j < num_rx_total) {
- if (j + 3 < num_rx_total)
- rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
-
- hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
- /* Remove packet from array if it is slow packet or slave is not
- * in collecting state or bondign interface is not in promiscus
- * mode and packet address does not match. */
- if (unlikely(hdr->ether_type == ether_type_slow_be ||
- !collecting || (!promisc &&
- !is_same_ether_addr(&bond_mac, &hdr->d_addr)))) {
-
- if (hdr->ether_type == ether_type_slow_be) {
- bond_mode_8023ad_handle_slow_pkt(internals, slaves[i],
- bufs[j]);
- } else
- rte_pktmbuf_free(bufs[j]);
-
- /* Packet is managed by mode 4 or dropped, shift the array */
- num_rx_total--;
- if (j < num_rx_total) {
- memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) *
- (num_rx_total - j));
- }
- } else
- j++;
- }
- }
-
- return num_rx_total;
-}
-
-static uint16_t
-bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
- uint16_t nb_pkts)
-{
- struct bond_dev_private *internals;
- struct bond_tx_queue *bd_tx_q;
-
- struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
- uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
-
- uint8_t num_of_slaves;
- uint8_t slaves[RTE_MAX_ETHPORTS];
-
- uint16_t num_tx_total = 0, num_tx_slave;
-
- static int slave_idx = 0;
- int i, cslave_idx = 0, tx_fail_total = 0;
-
- bd_tx_q = (struct bond_tx_queue *)queue;
- internals = bd_tx_q->dev_private;
-
- /* Copy slave list to protect against slave up/down changes during tx
- * bursting */
- num_of_slaves = internals->active_slave_count;
- memcpy(slaves, internals->active_slaves,
- sizeof(internals->active_slaves[0]) * num_of_slaves);
-
- if (num_of_slaves < 1)
- return num_tx_total;
-
- /* Populate slaves mbuf with which packets are to be sent on it */
- for (i = 0; i < nb_pkts; i++) {
- cslave_idx = (slave_idx + i) % num_of_slaves;
- slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i];
- }
-
- /* increment current slave index so the next call to tx burst starts on the
- * next slave */
- slave_idx = ++cslave_idx;
-
- /* Send packet burst on each slave device */
- for (i = 0; i < num_of_slaves; i++) {
- if (slave_nb_pkts[i] > 0) {
- num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
- slave_bufs[i], slave_nb_pkts[i]);
-
- /* if tx burst fails move packets to end of bufs */
- if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
- int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave;
-
- tx_fail_total += tx_fail_slave;
-
- memcpy(&bufs[nb_pkts - tx_fail_total],
- &slave_bufs[i][num_tx_slave],
- tx_fail_slave * sizeof(bufs[0]));
- }
- num_tx_total += num_tx_slave;
- }
- }
-
- return num_tx_total;
-}
-
-static uint16_t
-bond_ethdev_tx_burst_active_backup(void *queue,
- struct rte_mbuf **bufs, uint16_t nb_pkts)
-{
- struct bond_dev_private *internals;
- struct bond_tx_queue *bd_tx_q;
-
- bd_tx_q = (struct bond_tx_queue *)queue;
- internals = bd_tx_q->dev_private;
-
- if (internals->active_slave_count < 1)
- return 0;
-
- return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
- bufs, nb_pkts);
-}
-
-static inline uint16_t
-ether_hash(struct ether_hdr *eth_hdr)
-{
- uint16_t *word_src_addr = (uint16_t *)eth_hdr->s_addr.addr_bytes;
- uint16_t *word_dst_addr = (uint16_t *)eth_hdr->d_addr.addr_bytes;
-
- return (word_src_addr[0] ^ word_dst_addr[0]) ^
- (word_src_addr[1] ^ word_dst_addr[1]) ^
- (word_src_addr[2] ^ word_dst_addr[2]);
-}
-
-static inline uint32_t
-ipv4_hash(struct ipv4_hdr *ipv4_hdr)
-{
- return (ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr);
-}
-
-static inline uint32_t
-ipv6_hash(struct ipv6_hdr *ipv6_hdr)
-{
- uint32_t *word_src_addr = (uint32_t *)&(ipv6_hdr->src_addr[0]);
- uint32_t *word_dst_addr = (uint32_t *)&(ipv6_hdr->dst_addr[0]);
-
- return (word_src_addr[0] ^ word_dst_addr[0]) ^
- (word_src_addr[1] ^ word_dst_addr[1]) ^
- (word_src_addr[2] ^ word_dst_addr[2]) ^
- (word_src_addr[3] ^ word_dst_addr[3]);
-}
-
-static uint32_t
-udp_hash(struct udp_hdr *hdr)
-{
- return hdr->src_port ^ hdr->dst_port;
-}
-
-static inline uint16_t
-xmit_slave_hash(const struct rte_mbuf *buf, uint8_t slave_count, uint8_t policy)
-{
- struct ether_hdr *eth_hdr;
- struct udp_hdr *udp_hdr;
- size_t eth_offset = 0;
- uint32_t hash = 0;
-
- if (slave_count == 1)
- return 0;
-
- switch (policy) {
- case BALANCE_XMIT_POLICY_LAYER2:
- eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
-
- hash = ether_hash(eth_hdr);
- hash ^= hash >> 8;
- return hash % slave_count;
-
- case BALANCE_XMIT_POLICY_LAYER23:
- eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
-
- if (buf->ol_flags & PKT_RX_VLAN_PKT)
- eth_offset = sizeof(struct ether_hdr) + sizeof(struct vlan_hdr);
- else
- eth_offset = sizeof(struct ether_hdr);
-
- if (buf->ol_flags & PKT_RX_IPV4_HDR) {
- struct ipv4_hdr *ipv4_hdr;
- ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(buf,
- unsigned char *) + eth_offset);
-
- hash = ether_hash(eth_hdr) ^ ipv4_hash(ipv4_hdr);
-
- } else {
- struct ipv6_hdr *ipv6_hdr;
-
- ipv6_hdr = (struct ipv6_hdr *)(rte_pktmbuf_mtod(buf,
- unsigned char *) + eth_offset);
-
- hash = ether_hash(eth_hdr) ^ ipv6_hash(ipv6_hdr);
- }
- break;
-
- case BALANCE_XMIT_POLICY_LAYER34:
- if (buf->ol_flags & PKT_RX_VLAN_PKT)
- eth_offset = sizeof(struct ether_hdr) + sizeof(struct vlan_hdr);
- else
- eth_offset = sizeof(struct ether_hdr);
-
- if (buf->ol_flags & PKT_RX_IPV4_HDR) {
- struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
- (rte_pktmbuf_mtod(buf, unsigned char *) + eth_offset);
-
- if (ipv4_hdr->next_proto_id == IPPROTO_UDP) {
- udp_hdr = (struct udp_hdr *)
- (rte_pktmbuf_mtod(buf, unsigned char *) + eth_offset +
- sizeof(struct ipv4_hdr));
- hash = ipv4_hash(ipv4_hdr) ^ udp_hash(udp_hdr);
- } else {
- hash = ipv4_hash(ipv4_hdr);
- }
- } else {
- struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
- (rte_pktmbuf_mtod(buf, unsigned char *) + eth_offset);
-
- if (ipv6_hdr->proto == IPPROTO_UDP) {
- udp_hdr = (struct udp_hdr *)
- (rte_pktmbuf_mtod(buf, unsigned char *) + eth_offset +
- sizeof(struct ipv6_hdr));
- hash = ipv6_hash(ipv6_hdr) ^ udp_hash(udp_hdr);
- } else {
- hash = ipv6_hash(ipv6_hdr);
- }
- }
- break;
- }
-
- hash ^= hash >> 16;
- hash ^= hash >> 8;
-
- return hash % slave_count;
-}
-
-struct bwg_slave {
- uint64_t bwg_left_int;
- uint64_t bwg_left_remainder;
- uint8_t slave;
-};
-
-static int
-bandwidth_cmp(const void *a, const void *b)
-{
- const struct bwg_slave *bwg_a = a;
- const struct bwg_slave *bwg_b = b;
- int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int;
- int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder -
- (int64_t)bwg_a->bwg_left_remainder;
- if (diff > 0)
- return 1;
- else if (diff < 0)
- return -1;
- else if (diff2 > 0)
- return 1;
- else if (diff2 < 0)
- return -1;
- else
- return 0;
-}
-
-static void
-bandwidth_left(int port_id, uint64_t load, uint8_t update_idx,
- struct bwg_slave *bwg_slave)
-{
- struct rte_eth_link link_status;
-
- rte_eth_link_get(port_id, &link_status);
- uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
- if (link_bwg == 0)
- return;
- link_bwg = (link_bwg * (update_idx+1) * REORDER_PERIOD_MS);
- bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg;
- bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg;
-}
-
-static void
-bond_ethdev_update_tlb_slave_cb(void *arg)
-{
- struct bond_dev_private *internals = arg;
- struct rte_eth_stats slave_stats;
- struct bwg_slave bwg_array[RTE_MAX_ETHPORTS];
- uint8_t slave_count;
- uint64_t tx_bytes;
-
- uint8_t update_stats = 0;
- uint8_t i, slave_id;
-
- internals->slave_update_idx++;
-
-
- if (internals->slave_update_idx >= REORDER_PERIOD_MS)
- update_stats = 1;
-
- for (i = 0; i < internals->active_slave_count; i++) {
- slave_id = internals->active_slaves[i];
- rte_eth_stats_get(slave_id, &slave_stats);
- tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id];
- bandwidth_left(slave_id, tx_bytes,
- internals->slave_update_idx, &bwg_array[i]);
- bwg_array[i].slave = slave_id;
-
- if (update_stats)
- tlb_last_obytets[slave_id] = slave_stats.obytes;
- }
-
- if (update_stats == 1)
- internals->slave_update_idx = 0;
-
- slave_count = i;
- qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp);
- for (i = 0; i < slave_count; i++)
- internals->active_slaves[i] = bwg_array[i].slave;
-
- rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb,
- (struct bond_dev_private *)internals);
-}
-
-static uint16_t
-bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
-{
- struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
- struct bond_dev_private *internals = bd_tx_q->dev_private;
-
- struct rte_eth_dev *primary_port =
- &rte_eth_devices[internals->primary_port];
- uint16_t num_tx_total = 0;
- uint8_t i, j;
-
- uint8_t num_of_slaves = internals->active_slave_count;
- uint8_t slaves[RTE_MAX_ETHPORTS];
-
- struct ether_hdr *ether_hdr;
- struct ether_addr primary_slave_addr;
- struct ether_addr active_slave_addr;
-
- if (num_of_slaves < 1)
- return num_tx_total;
-
- memcpy(slaves, internals->active_slaves,
- sizeof(internals->active_slaves[0]) * num_of_slaves);
-
-
- ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
-
- if (nb_pkts > 3) {
- for (i = 0; i < 3; i++)
- rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*));
- }
-
- for (i = 0; i < num_of_slaves; i++) {
- ether_addr_copy(&internals->slaves[slaves[i]].persisted_mac_addr,
- &active_slave_addr);
-
- for (j = num_tx_total; j < nb_pkts; j++) {
- if (j + 3 < nb_pkts)
- rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
-
- ether_hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
- if (is_same_ether_addr(&ether_hdr->s_addr, &primary_slave_addr))
- ether_addr_copy(&active_slave_addr, &ether_hdr->s_addr);
- }
-
- num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
- bufs + num_tx_total, nb_pkts - num_tx_total);
-
- if (num_tx_total == nb_pkts)
- break;
- }
-
- return num_tx_total;
-}
-
-static uint16_t
-bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
- uint16_t nb_pkts)
-{
- struct bond_dev_private *internals;
- struct bond_tx_queue *bd_tx_q;
-
- uint8_t num_of_slaves;
- uint8_t slaves[RTE_MAX_ETHPORTS];
-
- uint16_t num_tx_total = 0, num_tx_slave = 0, tx_fail_total = 0;
-
- int i, op_slave_id;
-
- struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
- uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
-
- bd_tx_q = (struct bond_tx_queue *)queue;
- internals = bd_tx_q->dev_private;
-
- /* Copy slave list to protect against slave up/down changes during tx
- * bursting */
- num_of_slaves = internals->active_slave_count;
- memcpy(slaves, internals->active_slaves,
- sizeof(internals->active_slaves[0]) * num_of_slaves);
-
- if (num_of_slaves < 1)
- return num_tx_total;
-
- /* Populate slaves mbuf with the packets which are to be sent on it */
- for (i = 0; i < nb_pkts; i++) {
- /* Select output slave using hash based on xmit policy */
- op_slave_id = xmit_slave_hash(bufs[i], num_of_slaves,
- internals->balance_xmit_policy);
-
- /* Populate slave mbuf arrays with mbufs for that slave */
- slave_bufs[op_slave_id][slave_nb_pkts[op_slave_id]++] = bufs[i];
- }
-
- /* Send packet burst on each slave device */
- for (i = 0; i < num_of_slaves; i++) {
- if (slave_nb_pkts[i] > 0) {
- num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
- slave_bufs[i], slave_nb_pkts[i]);
-
- /* if tx burst fails move packets to end of bufs */
- if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
- int slave_tx_fail_count = slave_nb_pkts[i] - num_tx_slave;
-
- tx_fail_total += slave_tx_fail_count;
- memcpy(&bufs[nb_pkts - tx_fail_total],
- &slave_bufs[i][num_tx_slave],
- slave_tx_fail_count * sizeof(bufs[0]));
- }
-
- num_tx_total += num_tx_slave;
- }
- }
-
- return num_tx_total;
-}
-
-static uint16_t
-bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
- uint16_t nb_pkts)
-{
- struct bond_dev_private *internals;
- struct bond_tx_queue *bd_tx_q;
-
- uint8_t num_of_slaves;
- uint8_t slaves[RTE_MAX_ETHPORTS];
- /* possitions in slaves, not ID */
- uint8_t distributing_offsets[RTE_MAX_ETHPORTS];
- uint8_t distributing_count;
-
- uint16_t num_tx_slave, num_tx_total = 0, num_tx_fail_total = 0;
- uint16_t i, j, op_slave_idx;
- const uint16_t buffs_size = nb_pkts + BOND_MODE_8023AX_SLAVE_TX_PKTS + 1;
-
- /* Allocate additional packets in case 8023AD mode. */
- struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][buffs_size];
- void *slow_pkts[BOND_MODE_8023AX_SLAVE_TX_PKTS] = { NULL };
-
- /* Total amount of packets in slave_bufs */
- uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
- /* Slow packets placed in each slave */
- uint8_t slave_slow_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
-
- bd_tx_q = (struct bond_tx_queue *)queue;
- internals = bd_tx_q->dev_private;
-
- /* Copy slave list to protect against slave up/down changes during tx
- * bursting */
- num_of_slaves = internals->active_slave_count;
- if (num_of_slaves < 1)
- return num_tx_total;
-
- memcpy(slaves, internals->active_slaves, sizeof(slaves[0]) * num_of_slaves);
-
- distributing_count = 0;
- for (i = 0; i < num_of_slaves; i++) {
- struct port *port = &mode_8023ad_ports[slaves[i]];
-
- slave_slow_nb_pkts[i] = rte_ring_dequeue_burst(port->tx_ring,
- slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS);
- slave_nb_pkts[i] = slave_slow_nb_pkts[i];
-
- for (j = 0; j < slave_slow_nb_pkts[i]; j++)
- slave_bufs[i][j] = slow_pkts[j];
-
- if (ACTOR_STATE(port, DISTRIBUTING))
- distributing_offsets[distributing_count++] = i;
- }
-
- if (likely(distributing_count > 0)) {
- /* Populate slaves mbuf with the packets which are to be sent on it */
- for (i = 0; i < nb_pkts; i++) {
- /* Select output slave using hash based on xmit policy */
- op_slave_idx = xmit_slave_hash(bufs[i], distributing_count,
- internals->balance_xmit_policy);
-
- /* Populate slave mbuf arrays with mbufs for that slave. Use only
- * slaves that are currently distributing. */
- uint8_t slave_offset = distributing_offsets[op_slave_idx];
- slave_bufs[slave_offset][slave_nb_pkts[slave_offset]] = bufs[i];
- slave_nb_pkts[slave_offset]++;
- }
- }
-
- /* Send packet burst on each slave device */
- for (i = 0; i < num_of_slaves; i++) {
- if (slave_nb_pkts[i] == 0)
- continue;
-
- num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
- slave_bufs[i], slave_nb_pkts[i]);
-
- /* If tx burst fails drop slow packets */
- for ( ; num_tx_slave < slave_slow_nb_pkts[i]; num_tx_slave++)
- rte_pktmbuf_free(slave_bufs[i][num_tx_slave]);
-
- num_tx_total += num_tx_slave - slave_slow_nb_pkts[i];
- num_tx_fail_total += slave_nb_pkts[i] - num_tx_slave;
-
- /* If tx burst fails move packets to end of bufs */
- if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
- uint16_t j = nb_pkts - num_tx_fail_total;
- for ( ; num_tx_slave < slave_nb_pkts[i]; j++, num_tx_slave++)
- bufs[j] = slave_bufs[i][num_tx_slave];
- }
- }
-
- return num_tx_total;
-}
-
-#ifdef RTE_MBUF_REFCNT
-static uint16_t
-bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
- uint16_t nb_pkts)
-{
- struct bond_dev_private *internals;
- struct bond_tx_queue *bd_tx_q;
-
- uint8_t tx_failed_flag = 0, num_of_slaves;
- uint8_t slaves[RTE_MAX_ETHPORTS];
-
- uint16_t max_nb_of_tx_pkts = 0;
-
- int slave_tx_total[RTE_MAX_ETHPORTS];
- int i, most_successful_tx_slave = -1;
-
- bd_tx_q = (struct bond_tx_queue *)queue;
- internals = bd_tx_q->dev_private;
-
- /* Copy slave list to protect against slave up/down changes during tx
- * bursting */
- num_of_slaves = internals->active_slave_count;
- memcpy(slaves, internals->active_slaves,
- sizeof(internals->active_slaves[0]) * num_of_slaves);
-
- if (num_of_slaves < 1)
- return 0;
-
- /* Increment reference count on mbufs */
- for (i = 0; i < nb_pkts; i++)
- rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1);
-
- /* Transmit burst on each active slave */
- for (i = 0; i < num_of_slaves; i++) {
- slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
- bufs, nb_pkts);
-
- if (unlikely(slave_tx_total[i] < nb_pkts))
- tx_failed_flag = 1;
-
- /* record the value and slave index for the slave which transmits the
- * maximum number of packets */
- if (slave_tx_total[i] > max_nb_of_tx_pkts) {
- max_nb_of_tx_pkts = slave_tx_total[i];
- most_successful_tx_slave = i;
- }
- }
-
- /* if slaves fail to transmit packets from burst, the calling application
- * is not expected to know about multiple references to packets so we must
- * handle failures of all packets except those of the most successful slave
- */
- if (unlikely(tx_failed_flag))
- for (i = 0; i < num_of_slaves; i++)
- if (i != most_successful_tx_slave)
- while (slave_tx_total[i] < nb_pkts)
- rte_pktmbuf_free(bufs[slave_tx_total[i]++]);
-
- return max_nb_of_tx_pkts;
-}
-#endif
-
-void
-link_properties_set(struct rte_eth_dev *bonded_eth_dev,
- struct rte_eth_link *slave_dev_link)
-{
- struct rte_eth_link *bonded_dev_link = &bonded_eth_dev->data->dev_link;
- struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
-
- if (slave_dev_link->link_status &&
- bonded_eth_dev->data->dev_started) {
- bonded_dev_link->link_duplex = slave_dev_link->link_duplex;
- bonded_dev_link->link_speed = slave_dev_link->link_speed;
-
- internals->link_props_set = 1;
- }
-}
-
-void
-link_properties_reset(struct rte_eth_dev *bonded_eth_dev)
-{
- struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
-
- memset(&(bonded_eth_dev->data->dev_link), 0,
- sizeof(bonded_eth_dev->data->dev_link));
-
- internals->link_props_set = 0;
-}
-
-int
-link_properties_valid(struct rte_eth_link *bonded_dev_link,
- struct rte_eth_link *slave_dev_link)
-{
- if (bonded_dev_link->link_duplex != slave_dev_link->link_duplex ||
- bonded_dev_link->link_speed != slave_dev_link->link_speed)
- return -1;
-
- return 0;
-}
-
-int
-mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr)
-{
- struct ether_addr *mac_addr;
-
- if (eth_dev == NULL) {
- RTE_LOG(ERR, PMD, "%s: NULL pointer eth_dev specified\n", __func__);
- return -1;
- }
-
- if (dst_mac_addr == NULL) {
- RTE_LOG(ERR, PMD, "%s: NULL pointer MAC specified\n", __func__);
- return -1;
- }
-
- mac_addr = eth_dev->data->mac_addrs;
-
- ether_addr_copy(mac_addr, dst_mac_addr);
- return 0;
-}
-
-int
-mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr)
-{
- struct ether_addr *mac_addr;
-
- if (eth_dev == NULL) {
- RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
- return -1;
- }
-
- if (new_mac_addr == NULL) {
- RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
- return -1;
- }
-
- mac_addr = eth_dev->data->mac_addrs;
-
- /* If new MAC is different to current MAC then update */
- if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
- memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
-
- return 0;
-}
-
-int
-mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
-{
- struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
- int i;
-
- /* Update slave devices MAC addresses */
- if (internals->slave_count < 1)
- return -1;
-
- switch (internals->mode) {
- case BONDING_MODE_ROUND_ROBIN:
- case BONDING_MODE_BALANCE:
-#ifdef RTE_MBUF_REFCNT
- case BONDING_MODE_BROADCAST:
-#endif
- for (i = 0; i < internals->slave_count; i++) {
- if (mac_address_set(&rte_eth_devices[internals->slaves[i].port_id],
- bonded_eth_dev->data->mac_addrs)) {
- RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
- internals->slaves[i].port_id);
- return -1;
- }
- }
- break;
- case BONDING_MODE_8023AD:
- bond_mode_8023ad_mac_address_update(bonded_eth_dev);
- break;
- case BONDING_MODE_ACTIVE_BACKUP:
- case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
- default:
- for (i = 0; i < internals->slave_count; i++) {
- if (internals->slaves[i].port_id ==
- internals->current_primary_port) {
- if (mac_address_set(&rte_eth_devices[internals->primary_port],
- bonded_eth_dev->data->mac_addrs)) {
- RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
- internals->current_primary_port);
- return -1;
- }
- } else {
- if (mac_address_set(
- &rte_eth_devices[internals->slaves[i].port_id],
- &internals->slaves[i].persisted_mac_addr)) {
- RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
- internals->slaves[i].port_id);
- return -1;
- }
- }
- }
- }
-
- return 0;
-}
-
-int
-bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
-{
- struct bond_dev_private *internals;
-
- internals = eth_dev->data->dev_private;
-
- switch (mode) {
- case BONDING_MODE_ROUND_ROBIN:
- eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin;
- eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
- break;
- case BONDING_MODE_ACTIVE_BACKUP:
- eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup;
- eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
- break;
- case BONDING_MODE_BALANCE:
- eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance;
- eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
- break;
-#ifdef RTE_MBUF_REFCNT
- case BONDING_MODE_BROADCAST:
- eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
- eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
- break;
-#endif
- case BONDING_MODE_8023AD:
- if (bond_mode_8023ad_enable(eth_dev) != 0)
- return -1;
-
- eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
- eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
- RTE_BOND_LOG(WARNING,
- "Using mode 4, it is necessary to do TX burst and RX burst "
- "at least every 100ms.");
- break;
- case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
- eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
- eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
- break;
- default:
- return -1;
- }
-
- internals->mode = mode;
-
- return 0;
-}
-
-int
-slave_configure(struct rte_eth_dev *bonded_eth_dev,
- struct rte_eth_dev *slave_eth_dev)
-{
- struct bond_rx_queue *bd_rx_q;
- struct bond_tx_queue *bd_tx_q;
-
- int errval, q_id;
-
- /* Stop slave */
- rte_eth_dev_stop(slave_eth_dev->data->port_id);
-
- /* Enable interrupts on slave device if supported */
- if (slave_eth_dev->driver->pci_drv.drv_flags & RTE_PCI_DRV_INTR_LSC)
- slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
-
- /* Configure device */
- errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
- bonded_eth_dev->data->nb_rx_queues,
- bonded_eth_dev->data->nb_tx_queues,
- &(slave_eth_dev->data->dev_conf));
- if (errval != 0) {
- RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u , err (%d)",
- slave_eth_dev->data->port_id, errval);
- return errval;
- }
-
- /* Setup Rx Queues */
- for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
- bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
-
- errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
- bd_rx_q->nb_rx_desc,
- rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
- &(bd_rx_q->rx_conf), bd_rx_q->mb_pool);
- if (errval != 0) {
- RTE_BOND_LOG(ERR,
- "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
- slave_eth_dev->data->port_id, q_id, errval);
- return errval;
- }
- }
-
- /* Setup Tx Queues */
- for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
- bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
-
- errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
- bd_tx_q->nb_tx_desc,
- rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
- &bd_tx_q->tx_conf);
- if (errval != 0) {
- RTE_BOND_LOG(ERR,
- "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
- slave_eth_dev->data->port_id, q_id, errval);
- return errval;
- }
- }
-
- /* Start device */
- errval = rte_eth_dev_start(slave_eth_dev->data->port_id);
- if (errval != 0) {
- RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)",
- slave_eth_dev->data->port_id, errval);
- return -1;
- }
-
- return 0;
-}
-
-void
-slave_remove(struct bond_dev_private *internals,
- struct rte_eth_dev *slave_eth_dev)
-{
- uint8_t i;
-
- for (i = 0; i < internals->slave_count; i++)
- if (internals->slaves[i].port_id ==
- slave_eth_dev->data->port_id)
- break;
-
- if (i < (internals->slave_count - 1))
- memmove(&internals->slaves[i], &internals->slaves[i + 1],
- sizeof(internals->slaves[0]) *
- (internals->slave_count - i - 1));
-
- internals->slave_count--;
-}
-
-static void
-bond_ethdev_slave_link_status_change_monitor(void *cb_arg);
-
-void
-slave_add(struct bond_dev_private *internals,
- struct rte_eth_dev *slave_eth_dev)
-{
- struct bond_slave_details *slave_details =
- &internals->slaves[internals->slave_count];
-
- slave_details->port_id = slave_eth_dev->data->port_id;
- slave_details->last_link_status = 0;
-
- /* If slave device doesn't support interrupts then we need to enabled
- * polling to monitor link status */
- if (!(slave_eth_dev->pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)) {
- slave_details->link_status_poll_enabled = 1;
-
- if (!internals->link_status_polling_enabled) {
- internals->link_status_polling_enabled = 1;
-
- rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
- bond_ethdev_slave_link_status_change_monitor,
- (void *)&rte_eth_devices[internals->port_id]);
- }
- }
-
- slave_details->link_status_wait_to_complete = 0;
- /* clean tlb_last_obytes when adding port for bonding device */
- memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs,
- sizeof(struct ether_addr));
-}
-
-void
-bond_ethdev_primary_set(struct bond_dev_private *internals,
- uint8_t slave_port_id)
-{
- int i;
-
- if (internals->active_slave_count < 1)
- internals->current_primary_port = slave_port_id;
- else
- /* Search bonded device slave ports for new proposed primary port */
- for (i = 0; i < internals->active_slave_count; i++) {
- if (internals->active_slaves[i] == slave_port_id)
- internals->current_primary_port = slave_port_id;
- }
-}
-
-static void
-bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
-
-static int
-bond_ethdev_start(struct rte_eth_dev *eth_dev)
-{
- struct bond_dev_private *internals;
- int i;
-
- /* slave eth dev will be started by bonded device */
- if (valid_bonded_ethdev(eth_dev)) {
- RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)",
- eth_dev->data->port_id);
- return -1;
- }
-
- eth_dev->data->dev_link.link_status = 0;
- eth_dev->data->dev_started = 1;
-
- internals = eth_dev->data->dev_private;
-
- if (internals->slave_count == 0) {
- RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
- return -1;
- }
-
- if (internals->user_defined_mac == 0) {
- struct ether_addr *new_mac_addr = NULL;
-
- for (i = 0; i < internals->slave_count; i++)
- if (internals->slaves[i].port_id == internals->primary_port)
- new_mac_addr = &internals->slaves[i].persisted_mac_addr;
-
- if (new_mac_addr == NULL)
- return -1;
-
- if (mac_address_set(eth_dev, new_mac_addr) != 0) {
- RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
- eth_dev->data->port_id);
- return -1;
- }
- }
-
- /* Update all slave devices MACs*/
- if (mac_address_slaves_update(eth_dev) != 0)
- return -1;
-
- /* If bonded device is configure in promiscuous mode then re-apply config */
- if (internals->promiscuous_en)
- bond_ethdev_promiscuous_enable(eth_dev);
-
- /* Reconfigure each slave device if starting bonded device */
- for (i = 0; i < internals->slave_count; i++) {
- if (slave_configure(eth_dev,
- &(rte_eth_devices[internals->slaves[i].port_id])) != 0) {
- RTE_BOND_LOG(ERR,
- "bonded port (%d) failed to reconfigure slave device (%d)",
- eth_dev->data->port_id, internals->slaves[i].port_id);
- return -1;
- }
- }
-
- if (internals->user_defined_primary_port)
- bond_ethdev_primary_set(internals, internals->primary_port);
-
- if (internals->mode == BONDING_MODE_8023AD)
- bond_mode_8023ad_start(eth_dev);
-
- if (internals->mode == BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING)
- bond_ethdev_update_tlb_slave_cb(internals);
-
- return 0;
-}
-
-static void
-bond_ethdev_stop(struct rte_eth_dev *eth_dev)
-{
- struct bond_dev_private *internals = eth_dev->data->dev_private;
- uint8_t i;
-
- if (internals->mode == BONDING_MODE_8023AD) {
- struct port *port;
- void *pkt = NULL;
-
- bond_mode_8023ad_stop(eth_dev);
-
- /* Discard all messages to/from mode 4 state machines */
- for (i = 0; i < internals->slave_count; i++) {
- port = &mode_8023ad_ports[internals->slaves[i].port_id];
-
- RTE_VERIFY(port->rx_ring != NULL);
- while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
- rte_pktmbuf_free(pkt);
-
- RTE_VERIFY(port->tx_ring != NULL);
- while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
- rte_pktmbuf_free(pkt);
- }
- }
-
- if (internals->mode == BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING) {
- rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
- }
-
- internals->active_slave_count = 0;
- internals->link_status_polling_enabled = 0;
-
- eth_dev->data->dev_link.link_status = 0;
- eth_dev->data->dev_started = 0;
-}
-
-static void
-bond_ethdev_close(struct rte_eth_dev *dev __rte_unused)
-{
-}
-
-/* forward declaration */
-static int bond_ethdev_configure(struct rte_eth_dev *dev);
-
-static void
-bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
-{
- struct bond_dev_private *internals = dev->data->dev_private;
-
- dev_info->driver_name = driver_name;
- dev_info->max_mac_addrs = 1;
-
- dev_info->max_rx_pktlen = (uint32_t)2048;
-
- dev_info->max_rx_queues = (uint16_t)128;
- dev_info->max_tx_queues = (uint16_t)512;
-
- dev_info->min_rx_bufsize = 0;
- dev_info->pci_dev = dev->pci_dev;
-
- dev_info->rx_offload_capa = internals->rx_offload_capa;
- dev_info->tx_offload_capa = internals->tx_offload_capa;
-}
-
-static int
-bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
- uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
- const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
-{
- struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
- rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
- 0, dev->pci_dev->numa_node);
- if (bd_rx_q == NULL)
- return -1;
-
- bd_rx_q->queue_id = rx_queue_id;
- bd_rx_q->dev_private = dev->data->dev_private;
-
- bd_rx_q->nb_rx_desc = nb_rx_desc;
-
- memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
- bd_rx_q->mb_pool = mb_pool;
-
- dev->data->rx_queues[rx_queue_id] = bd_rx_q;
-
- return 0;
-}
-
-static int
-bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
- uint16_t nb_tx_desc, unsigned int socket_id __rte_unused,
- const struct rte_eth_txconf *tx_conf)
-{
- struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)
- rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
- 0, dev->pci_dev->numa_node);
-
- if (bd_tx_q == NULL)
- return -1;
-
- bd_tx_q->queue_id = tx_queue_id;
- bd_tx_q->dev_private = dev->data->dev_private;
-
- bd_tx_q->nb_tx_desc = nb_tx_desc;
- memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
-
- dev->data->tx_queues[tx_queue_id] = bd_tx_q;
-
- return 0;
-}
-
-static void
-bond_ethdev_rx_queue_release(void *queue)
-{
- if (queue == NULL)
- return;
-
- rte_free(queue);
-}
-
-static void
-bond_ethdev_tx_queue_release(void *queue)
-{
- if (queue == NULL)
- return;
-
- rte_free(queue);
-}
-
-static void
-bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
-{
- struct rte_eth_dev *bonded_ethdev, *slave_ethdev;
- struct bond_dev_private *internals;
-
- /* Default value for polling slave found is true as we don't want to
- * disable the polling thread if we cannot get the lock */
- int i, polling_slave_found = 1;
-
- if (cb_arg == NULL)
- return;
-
- bonded_ethdev = (struct rte_eth_dev *)cb_arg;
- internals = (struct bond_dev_private *)bonded_ethdev->data->dev_private;
-
- if (!bonded_ethdev->data->dev_started ||
- !internals->link_status_polling_enabled)
- return;
-
- /* If device is currently being configured then don't check slaves link
- * status, wait until next period */
- if (rte_spinlock_trylock(&internals->lock)) {
- if (internals->slave_count > 0)
- polling_slave_found = 0;
-
- for (i = 0; i < internals->slave_count; i++) {
- if (!internals->slaves[i].link_status_poll_enabled)
- continue;
-
- slave_ethdev = &rte_eth_devices[internals->slaves[i].port_id];
- polling_slave_found = 1;
-
- /* Update slave link status */
- (*slave_ethdev->dev_ops->link_update)(slave_ethdev,
- internals->slaves[i].link_status_wait_to_complete);
-
- /* if link status has changed since last checked then call lsc
- * event callback */
- if (slave_ethdev->data->dev_link.link_status !=
- internals->slaves[i].last_link_status) {
- internals->slaves[i].last_link_status =
- slave_ethdev->data->dev_link.link_status;
-
- bond_ethdev_lsc_event_callback(internals->slaves[i].port_id,
- RTE_ETH_EVENT_INTR_LSC,
- &bonded_ethdev->data->port_id);
- }
- }
- rte_spinlock_unlock(&internals->lock);
- }
-
- if (polling_slave_found)
- /* Set alarm to continue monitoring link status of slave ethdev's */
- rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
- bond_ethdev_slave_link_status_change_monitor, cb_arg);
-}
-
-static int
-bond_ethdev_link_update(struct rte_eth_dev *bonded_eth_dev,
- int wait_to_complete)
-{
- struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
-
- if (!bonded_eth_dev->data->dev_started ||
- internals->active_slave_count == 0) {
- bonded_eth_dev->data->dev_link.link_status = 0;
- return 0;
- } else {
- struct rte_eth_dev *slave_eth_dev;
- int i, link_up = 0;
-
- for (i = 0; i < internals->active_slave_count; i++) {
- slave_eth_dev = &rte_eth_devices[internals->active_slaves[i]];
-
- (*slave_eth_dev->dev_ops->link_update)(slave_eth_dev,
- wait_to_complete);
- if (slave_eth_dev->data->dev_link.link_status == 1) {
- link_up = 1;
- break;
- }
- }
-
- bonded_eth_dev->data->dev_link.link_status = link_up;
- }
-
- return 0;
-}
-
-static void
-bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
-{
- struct bond_dev_private *internals = dev->data->dev_private;
- struct rte_eth_stats slave_stats;
-
- int i;
-
- /* clear bonded stats before populating from slaves */
- memset(stats, 0, sizeof(*stats));
-
- for (i = 0; i < internals->slave_count; i++) {
- rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
-
- stats->ipackets += slave_stats.ipackets;
- stats->opackets += slave_stats.opackets;
- stats->ibytes += slave_stats.ibytes;
- stats->obytes += slave_stats.obytes;
- stats->ierrors += slave_stats.ierrors;
- stats->oerrors += slave_stats.oerrors;
- stats->imcasts += slave_stats.imcasts;
- stats->rx_nombuf += slave_stats.rx_nombuf;
- stats->fdirmatch += slave_stats.fdirmatch;
- stats->fdirmiss += slave_stats.fdirmiss;
- stats->tx_pause_xon += slave_stats.tx_pause_xon;
- stats->rx_pause_xon += slave_stats.rx_pause_xon;
- stats->tx_pause_xoff += slave_stats.tx_pause_xoff;
- stats->rx_pause_xoff += slave_stats.rx_pause_xoff;
- }
-}
-
-static void
-bond_ethdev_stats_reset(struct rte_eth_dev *dev)
-{
- struct bond_dev_private *internals = dev->data->dev_private;
- int i;
-
- for (i = 0; i < internals->slave_count; i++)
- rte_eth_stats_reset(internals->slaves[i].port_id);
-}
-
-static void
-bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
-{
- struct bond_dev_private *internals = eth_dev->data->dev_private;
- int i;
-
- internals->promiscuous_en = 1;
-
- switch (internals->mode) {
- /* Promiscuous mode is propagated to all slaves */
- case BONDING_MODE_ROUND_ROBIN:
- case BONDING_MODE_BALANCE:
-#ifdef RTE_MBUF_REFCNT
- case BONDING_MODE_BROADCAST:
-#endif
- for (i = 0; i < internals->slave_count; i++)
- rte_eth_promiscuous_enable(internals->slaves[i].port_id);
- break;
- /* In mode4 promiscus mode is managed when slave is added/removed */
- case BONDING_MODE_8023AD:
- break;
- /* Promiscuous mode is propagated only to primary slave */
- case BONDING_MODE_ACTIVE_BACKUP:
- case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
- default:
- rte_eth_promiscuous_enable(internals->current_primary_port);
- }
-}
-
-static void
-bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
-{
- struct bond_dev_private *internals = dev->data->dev_private;
- int i;
-
- internals->promiscuous_en = 0;
-
- switch (internals->mode) {
- /* Promiscuous mode is propagated to all slaves */
- case BONDING_MODE_ROUND_ROBIN:
- case BONDING_MODE_BALANCE:
-#ifdef RTE_MBUF_REFCNT
- case BONDING_MODE_BROADCAST:
-#endif
- for (i = 0; i < internals->slave_count; i++)
- rte_eth_promiscuous_disable(internals->slaves[i].port_id);
- break;
- /* In mode4 promiscus mode is set managed when slave is added/removed */
- case BONDING_MODE_8023AD:
- break;
- /* Promiscuous mode is propagated only to primary slave */
- case BONDING_MODE_ACTIVE_BACKUP:
- case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
- default:
- rte_eth_promiscuous_disable(internals->current_primary_port);
- }
-}
-
-static void
-bond_ethdev_delayed_lsc_propagation(void *arg)
-{
- if (arg == NULL)
- return;
-
- _rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
- RTE_ETH_EVENT_INTR_LSC);
-}
-
-void
-bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
- void *param)
-{
- struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
- struct bond_dev_private *internals;
- struct rte_eth_link link;
-
- int i, valid_slave = 0;
- uint8_t active_pos;
- uint8_t lsc_flag = 0;
-
- if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
- return;
-
- bonded_eth_dev = &rte_eth_devices[*(uint8_t *)param];
- slave_eth_dev = &rte_eth_devices[port_id];
-
- if (valid_bonded_ethdev(bonded_eth_dev))
- return;
-
- internals = bonded_eth_dev->data->dev_private;
-
- /* If the device isn't started don't handle interrupts */
- if (!bonded_eth_dev->data->dev_started)
- return;
-
- /* verify that port_id is a valid slave of bonded port */
- for (i = 0; i < internals->slave_count; i++) {
- if (internals->slaves[i].port_id == port_id) {
- valid_slave = 1;
- break;
- }
- }
-
- if (!valid_slave)
- return;
-
- /* Search for port in active port list */
- active_pos = find_slave_by_id(internals->active_slaves,
- internals->active_slave_count, port_id);
-
- rte_eth_link_get_nowait(port_id, &link);
- if (link.link_status) {
- if (active_pos < internals->active_slave_count)
- return;
-
- /* if no active slave ports then set this port to be primary port */
- if (internals->active_slave_count < 1) {
- /* If first active slave, then change link status */
- bonded_eth_dev->data->dev_link.link_status = 1;
- internals->current_primary_port = port_id;
- lsc_flag = 1;
-
- mac_address_slaves_update(bonded_eth_dev);
-
- /* Inherit eth dev link properties from first active slave */
- link_properties_set(bonded_eth_dev,
- &(slave_eth_dev->data->dev_link));
- }
-
- activate_slave(bonded_eth_dev, port_id);
-
- /* If user has defined the primary port then default to using it */
- if (internals->user_defined_primary_port &&
- internals->primary_port == port_id)
- bond_ethdev_primary_set(internals, port_id);
- } else {
- if (active_pos == internals->active_slave_count)
- return;
-
- /* Remove from active slave list */
- deactivate_slave(bonded_eth_dev, port_id);
-
- /* No active slaves, change link status to down and reset other
- * link properties */
- if (internals->active_slave_count < 1) {
- lsc_flag = 1;
- bonded_eth_dev->data->dev_link.link_status = 0;
-
- link_properties_reset(bonded_eth_dev);
- }
-
- /* Update primary id, take first active slave from list or if none
- * available set to -1 */
- if (port_id == internals->current_primary_port) {
- if (internals->active_slave_count > 0)
- bond_ethdev_primary_set(internals,
- internals->active_slaves[0]);
- else
- internals->current_primary_port = internals->primary_port;
- }
- }
-
- if (lsc_flag) {
- /* Cancel any possible outstanding interrupts if delays are enabled */
- if (internals->link_up_delay_ms > 0 ||
- internals->link_down_delay_ms > 0)
- rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation,
- bonded_eth_dev);
-
- if (bonded_eth_dev->data->dev_link.link_status) {
- if (internals->link_up_delay_ms > 0)
- rte_eal_alarm_set(internals->link_up_delay_ms * 1000,
- bond_ethdev_delayed_lsc_propagation,
- (void *)bonded_eth_dev);
- else
- _rte_eth_dev_callback_process(bonded_eth_dev,
- RTE_ETH_EVENT_INTR_LSC);
-
- } else {
- if (internals->link_down_delay_ms > 0)
- rte_eal_alarm_set(internals->link_down_delay_ms * 1000,
- bond_ethdev_delayed_lsc_propagation,
- (void *)bonded_eth_dev);
- else
- _rte_eth_dev_callback_process(bonded_eth_dev,
- RTE_ETH_EVENT_INTR_LSC);
- }
- }
-}
-
-struct eth_dev_ops default_dev_ops = {
- .dev_start = bond_ethdev_start,
- .dev_stop = bond_ethdev_stop,
- .dev_close = bond_ethdev_close,
- .dev_configure = bond_ethdev_configure,
- .dev_infos_get = bond_ethdev_info,
- .rx_queue_setup = bond_ethdev_rx_queue_setup,
- .tx_queue_setup = bond_ethdev_tx_queue_setup,
- .rx_queue_release = bond_ethdev_rx_queue_release,
- .tx_queue_release = bond_ethdev_tx_queue_release,
- .link_update = bond_ethdev_link_update,
- .stats_get = bond_ethdev_stats_get,
- .stats_reset = bond_ethdev_stats_reset,
- .promiscuous_enable = bond_ethdev_promiscuous_enable,
- .promiscuous_disable = bond_ethdev_promiscuous_disable
-};
-
-static int
-bond_init(const char *name, const char *params)
-{
- struct bond_dev_private *internals;
- struct rte_kvargs *kvlist;
- uint8_t bonding_mode, socket_id;
- int arg_count, port_id;
-
- RTE_LOG(INFO, EAL, "Initializing pmd_bond for %s\n", name);
-
- kvlist = rte_kvargs_parse(params, pmd_bond_init_valid_arguments);
- if (kvlist == NULL)
- return -1;
-
- /* Parse link bonding mode */
- if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
- if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
- &bond_ethdev_parse_slave_mode_kvarg, &bonding_mode) != 0) {
- RTE_LOG(ERR, EAL, "Invalid mode for bonded device %s\n", name);
- return -1;
- }
- } else {
- RTE_LOG(ERR, EAL,
- "Mode must be specified only once for bonded device %s\n",
- name);
- return -1;
- }
-
- /* Parse socket id to create bonding device on */
- arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
- if (arg_count == 1) {
- if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
- &bond_ethdev_parse_socket_id_kvarg, &socket_id) != 0) {
- RTE_LOG(ERR, EAL,
- "Invalid socket Id specified for bonded device %s\n",
- name);
- return -1;
- }
- } else if (arg_count > 1) {
- RTE_LOG(ERR, EAL,
- "Socket Id can be specified only once for bonded device %s\n",
- name);
- return -1;
- } else {
- socket_id = rte_socket_id();
- }
-
- /* Create link bonding eth device */
- port_id = rte_eth_bond_create(name, bonding_mode, socket_id);
- if (port_id < 0) {
- RTE_LOG(ERR, EAL,
- "Failed to create socket %s in mode %u on socket %u.\n",
- name, bonding_mode, socket_id);
- return -1;
- }
- internals = rte_eth_devices[port_id].data->dev_private;
- internals->kvlist = kvlist;
-
- RTE_LOG(INFO, EAL,
- "Create bonded device %s on port %d in mode %u on socket %u.\n",
- name, port_id, bonding_mode, socket_id);
- return 0;
-}
-
-/* this part will resolve the slave portids after all the other pdev and vdev
- * have been allocated */
-static int
-bond_ethdev_configure(struct rte_eth_dev *dev)
-{
- char *name = dev->data->name;
- struct bond_dev_private *internals = dev->data->dev_private;
- struct rte_kvargs *kvlist = internals->kvlist;
- int arg_count, port_id = dev - rte_eth_devices;
-
- /*
- * if no kvlist, it means that this bonded device has been created
- * through the bonding api.
- */
- if (!kvlist)
- return 0;
-
- /* Parse MAC address for bonded device */
- arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
- if (arg_count == 1) {
- struct ether_addr bond_mac;
-
- if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
- &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
- RTE_LOG(INFO, EAL, "Invalid mac address for bonded device %s\n",
- name);
- return -1;
- }
-
- /* Set MAC address */
- if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
- RTE_LOG(ERR, EAL,
- "Failed to set mac address on bonded device %s\n",
- name);
- return -1;
- }
- } else if (arg_count > 1) {
- RTE_LOG(ERR, EAL,
- "MAC address can be specified only once for bonded device %s\n",
- name);
- return -1;
- }
-
- /* Parse/set balance mode transmit policy */
- arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG);
- if (arg_count == 1) {
- uint8_t xmit_policy;
-
- if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
- &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
- 0) {
- RTE_LOG(INFO, EAL,
- "Invalid xmit policy specified for bonded device %s\n",
- name);
- return -1;
- }
-
- /* Set balance mode transmit policy*/
- if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
- RTE_LOG(ERR, EAL,
- "Failed to set balance xmit policy on bonded device %s\n",
- name);
- return -1;
- }
- } else if (arg_count > 1) {
- RTE_LOG(ERR, EAL,
- "Transmit policy can be specified only once for bonded device"
- " %s\n", name);
- return -1;
- }
-
- /* Parse/add slave ports to bonded device */
- if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
- struct bond_ethdev_slave_ports slave_ports;
- unsigned i;
-
- memset(&slave_ports, 0, sizeof(slave_ports));
-
- if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
- &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
- RTE_LOG(ERR, EAL,
- "Failed to parse slave ports for bonded device %s\n",
- name);
- return -1;
- }
-
- for (i = 0; i < slave_ports.slave_count; i++) {
- if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
- RTE_LOG(ERR, EAL,
- "Failed to add port %d as slave to bonded device %s\n",
- slave_ports.slaves[i], name);
- }
- }
-
- } else {
- RTE_LOG(INFO, EAL, "No slaves specified for bonded device %s\n", name);
- return -1;
- }
-
- /* Parse/set primary slave port id*/
- arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
- if (arg_count == 1) {
- uint8_t primary_slave_port_id;
-
- if (rte_kvargs_process(kvlist,
- PMD_BOND_PRIMARY_SLAVE_KVARG,
- &bond_ethdev_parse_primary_slave_port_id_kvarg,
- &primary_slave_port_id) < 0) {
- RTE_LOG(INFO, EAL,
- "Invalid primary slave port id specified for bonded device"
- " %s\n", name);
- return -1;
- }
-
- /* Set balance mode transmit policy*/
- if (rte_eth_bond_primary_set(port_id, (uint8_t)primary_slave_port_id)
- != 0) {
- RTE_LOG(ERR, EAL,
- "Failed to set primary slave port %d on bonded device %s\n",
- primary_slave_port_id, name);
- return -1;
- }
- } else if (arg_count > 1) {
- RTE_LOG(INFO, EAL,
- "Primary slave can be specified only once for bonded device"
- " %s\n", name);
- return -1;
- }
-
- /* Parse link status monitor polling interval */
- arg_count = rte_kvargs_count(kvlist, PMD_BOND_LSC_POLL_PERIOD_KVARG);
- if (arg_count == 1) {
- uint32_t lsc_poll_interval_ms;
-
- if (rte_kvargs_process(kvlist,
- PMD_BOND_LSC_POLL_PERIOD_KVARG,
- &bond_ethdev_parse_time_ms_kvarg,
- &lsc_poll_interval_ms) < 0) {
- RTE_LOG(INFO, EAL,
- "Invalid lsc polling interval value specified for bonded"
- " device %s\n", name);
- return -1;
- }
-
- if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms)
- != 0) {
- RTE_LOG(ERR, EAL,
- "Failed to set lsc monitor polling interval (%u ms) on"
- " bonded device %s\n", lsc_poll_interval_ms, name);
- return -1;
- }
- } else if (arg_count > 1) {
- RTE_LOG(INFO, EAL,
- "LSC polling interval can be specified only once for bonded"
- " device %s\n", name);
- return -1;
- }
-
- /* Parse link up interrupt propagation delay */
- arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_UP_PROP_DELAY_KVARG);
- if (arg_count == 1) {
- uint32_t link_up_delay_ms;
-
- if (rte_kvargs_process(kvlist,
- PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
- &bond_ethdev_parse_time_ms_kvarg,
- &link_up_delay_ms) < 0) {
- RTE_LOG(INFO, EAL,
- "Invalid link up propagation delay value specified for"
- " bonded device %s\n", name);
- return -1;
- }
-
- /* Set balance mode transmit policy*/
- if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms)
- != 0) {
- RTE_LOG(ERR, EAL,
- "Failed to set link up propagation delay (%u ms) on bonded"
- " device %s\n", link_up_delay_ms, name);
- return -1;
- }
- } else if (arg_count > 1) {
- RTE_LOG(INFO, EAL,
- "Link up propagation delay can be specified only once for"
- " bonded device %s\n", name);
- return -1;
- }
-
- /* Parse link down interrupt propagation delay */
- arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG);
- if (arg_count == 1) {
- uint32_t link_down_delay_ms;
-
- if (rte_kvargs_process(kvlist,
- PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
- &bond_ethdev_parse_time_ms_kvarg,
- &link_down_delay_ms) < 0) {
- RTE_LOG(INFO, EAL,
- "Invalid link down propagation delay value specified for"
- " bonded device %s\n", name);
- return -1;
- }
-
- /* Set balance mode transmit policy*/
- if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms)
- != 0) {
- RTE_LOG(ERR, EAL,
- "Failed to set link down propagation delay (%u ms) on"
- " bonded device %s\n", link_down_delay_ms, name);
- return -1;
- }
- } else if (arg_count > 1) {
- RTE_LOG(INFO, EAL,
- "Link down propagation delay can be specified only once for"
- " bonded device %s\n", name);
- return -1;
- }
-
- return 0;
-}
-
-static struct rte_driver bond_drv = {
- .name = "eth_bond",
- .type = PMD_VDEV,
- .init = bond_init,
-};
-
-PMD_REGISTER_DRIVER(bond_drv);
diff --git a/src/dpdk_lib18/librte_pmd_e1000/Makefile b/src/dpdk_lib18/librte_pmd_e1000/Makefile
deleted file mode 100755
index 14bc4a24..00000000
--- a/src/dpdk_lib18/librte_pmd_e1000/Makefile
+++ /dev/null
@@ -1,95 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-#
-# library name
-#
-LIB = librte_pmd_e1000.a
-
-CFLAGS += -O3
-CFLAGS += $(WERROR_FLAGS)
-
-ifeq ($(CC), icc)
-#
-# CFLAGS for icc
-#
-CFLAGS_BASE_DRIVER = -wd177 -wd181 -wd188 -wd869 -wd2259
-else
-#
-# CFLAGS for gcc
-#
-CFLAGS_BASE_DRIVER = -Wno-uninitialized -Wno-unused-parameter
-CFLAGS_BASE_DRIVER += -Wno-unused-variable
-endif
-
-#
-# Add extra flags for base driver files (also known as shared code)
-# to disable warnings in them
-#
-BASE_DRIVER_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(RTE_SDK)/lib/librte_pmd_e1000/e1000/*.c)))
-$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
-
-VPATH += $(RTE_SDK)/lib/librte_pmd_e1000/e1000
-
-#
-# all source are stored in SRCS-y
-#
-SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_80003es2lan.c
-SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82540.c
-SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82541.c
-SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82542.c
-SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82543.c
-SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82571.c
-SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82575.c
-SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_i210.c
-SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_api.c
-SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_ich8lan.c
-SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_mac.c
-SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_manage.c
-SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_mbx.c
-SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_nvm.c
-SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_osdep.c
-SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_phy.c
-SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_vf.c
-SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_ethdev.c
-SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_rxtx.c
-SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_pf.c
-SRCS-$(CONFIG_RTE_LIBRTE_EM_PMD) += em_ethdev.c
-SRCS-$(CONFIG_RTE_LIBRTE_EM_PMD) += em_rxtx.c
-
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += lib/librte_net lib/librte_malloc
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_pmd_e1000/igb_ethdev.c b/src/dpdk_lib18/librte_pmd_e1000/igb_ethdev.c
deleted file mode 100755
index 0cebf985..00000000
--- a/src/dpdk_lib18/librte_pmd_e1000/igb_ethdev.c
+++ /dev/null
@@ -1,3164 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <sys/queue.h>
-#include <stdio.h>
-#include <errno.h>
-#include <stdint.h>
-#include <stdarg.h>
-
-#include <rte_common.h>
-#include <rte_interrupts.h>
-#include <rte_byteorder.h>
-#include <rte_log.h>
-#include <rte_debug.h>
-#include <rte_pci.h>
-#include <rte_ether.h>
-#include <rte_ethdev.h>
-#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_tailq.h>
-#include <rte_eal.h>
-#include <rte_atomic.h>
-#include <rte_malloc.h>
-#include <rte_dev.h>
-
-#include "e1000_logs.h"
-#include "e1000/e1000_api.h"
-#include "e1000_ethdev.h"
-
-/*
- * Default values for port configuration
- */
-#define IGB_DEFAULT_RX_FREE_THRESH 32
-#define IGB_DEFAULT_RX_PTHRESH 8
-#define IGB_DEFAULT_RX_HTHRESH 8
-#define IGB_DEFAULT_RX_WTHRESH 0
-
-#define IGB_DEFAULT_TX_PTHRESH 32
-#define IGB_DEFAULT_TX_HTHRESH 0
-#define IGB_DEFAULT_TX_WTHRESH 0
-
-/* Bit shift and mask */
-#define IGB_4_BIT_WIDTH (CHAR_BIT / 2)
-#define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t)
-#define IGB_8_BIT_WIDTH CHAR_BIT
-#define IGB_8_BIT_MASK UINT8_MAX
-
-static int eth_igb_configure(struct rte_eth_dev *dev);
-static int eth_igb_start(struct rte_eth_dev *dev);
-static void eth_igb_stop(struct rte_eth_dev *dev);
-static void eth_igb_close(struct rte_eth_dev *dev);
-static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev);
-static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev);
-static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev);
-static void eth_igb_allmulticast_disable(struct rte_eth_dev *dev);
-static int eth_igb_link_update(struct rte_eth_dev *dev,
- int wait_to_complete);
-static void eth_igb_stats_get(struct rte_eth_dev *dev,
- struct rte_eth_stats *rte_stats);
-static void eth_igb_stats_reset(struct rte_eth_dev *dev);
-static void eth_igb_infos_get(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info);
-static void eth_igbvf_infos_get(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info);
-static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
- struct rte_eth_fc_conf *fc_conf);
-static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
- struct rte_eth_fc_conf *fc_conf);
-static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);
-static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
-static int eth_igb_interrupt_action(struct rte_eth_dev *dev);
-static void eth_igb_interrupt_handler(struct rte_intr_handle *handle,
- void *param);
-static int igb_hardware_init(struct e1000_hw *hw);
-static void igb_hw_control_acquire(struct e1000_hw *hw);
-static void igb_hw_control_release(struct e1000_hw *hw);
-static void igb_init_manageability(struct e1000_hw *hw);
-static void igb_release_manageability(struct e1000_hw *hw);
-
-static int eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
-
-static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev,
- uint16_t vlan_id, int on);
-static void eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
-static void eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask);
-
-static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev);
-static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev);
-static void igb_vlan_hw_strip_enable(struct rte_eth_dev *dev);
-static void igb_vlan_hw_strip_disable(struct rte_eth_dev *dev);
-static void igb_vlan_hw_extend_enable(struct rte_eth_dev *dev);
-static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev);
-
-static int eth_igb_led_on(struct rte_eth_dev *dev);
-static int eth_igb_led_off(struct rte_eth_dev *dev);
-
-static void igb_intr_disable(struct e1000_hw *hw);
-static int igb_get_rx_buffer_size(struct e1000_hw *hw);
-static void eth_igb_rar_set(struct rte_eth_dev *dev,
- struct ether_addr *mac_addr,
- uint32_t index, uint32_t pool);
-static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
-
-static void igbvf_intr_disable(struct e1000_hw *hw);
-static int igbvf_dev_configure(struct rte_eth_dev *dev);
-static int igbvf_dev_start(struct rte_eth_dev *dev);
-static void igbvf_dev_stop(struct rte_eth_dev *dev);
-static void igbvf_dev_close(struct rte_eth_dev *dev);
-static int eth_igbvf_link_update(struct e1000_hw *hw);
-static void eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats);
-static void eth_igbvf_stats_reset(struct rte_eth_dev *dev);
-static int igbvf_vlan_filter_set(struct rte_eth_dev *dev,
- uint16_t vlan_id, int on);
-static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on);
-static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on);
-static int eth_igb_rss_reta_update(struct rte_eth_dev *dev,
- struct rte_eth_rss_reta_entry64 *reta_conf,
- uint16_t reta_size);
-static int eth_igb_rss_reta_query(struct rte_eth_dev *dev,
- struct rte_eth_rss_reta_entry64 *reta_conf,
- uint16_t reta_size);
-static int eth_igb_add_syn_filter(struct rte_eth_dev *dev,
- struct rte_syn_filter *filter, uint16_t rx_queue);
-static int eth_igb_remove_syn_filter(struct rte_eth_dev *dev);
-static int eth_igb_get_syn_filter(struct rte_eth_dev *dev,
- struct rte_syn_filter *filter, uint16_t *rx_queue);
-static int eth_igb_add_ethertype_filter(struct rte_eth_dev *dev,
- uint16_t index,
- struct rte_ethertype_filter *filter, uint16_t rx_queue);
-static int eth_igb_remove_ethertype_filter(struct rte_eth_dev *dev,
- uint16_t index);
-static int eth_igb_get_ethertype_filter(struct rte_eth_dev *dev,
- uint16_t index,
- struct rte_ethertype_filter *filter, uint16_t *rx_queue);
-static int eth_igb_add_2tuple_filter(struct rte_eth_dev *dev,
- uint16_t index,
- struct rte_2tuple_filter *filter, uint16_t rx_queue);
-static int eth_igb_remove_2tuple_filter(struct rte_eth_dev *dev,
- uint16_t index);
-static int eth_igb_get_2tuple_filter(struct rte_eth_dev *dev,
- uint16_t index,
- struct rte_2tuple_filter *filter, uint16_t *rx_queue);
-static int eth_igb_add_flex_filter(struct rte_eth_dev *dev,
- uint16_t index,
- struct rte_flex_filter *filter, uint16_t rx_queue);
-static int eth_igb_remove_flex_filter(struct rte_eth_dev *dev,
- uint16_t index);
-static int eth_igb_get_flex_filter(struct rte_eth_dev *dev,
- uint16_t index,
- struct rte_flex_filter *filter, uint16_t *rx_queue);
-static int eth_igb_add_5tuple_filter(struct rte_eth_dev *dev,
- uint16_t index,
- struct rte_5tuple_filter *filter, uint16_t rx_queue);
-static int eth_igb_remove_5tuple_filter(struct rte_eth_dev *dev,
- uint16_t index);
-static int eth_igb_get_5tuple_filter(struct rte_eth_dev *dev,
- uint16_t index,
- struct rte_5tuple_filter *filter, uint16_t *rx_queue);
-
-/*
- * Define VF Stats MACRO for Non "cleared on read" register
- */
-#define UPDATE_VF_STAT(reg, last, cur) \
-{ \
- u32 latest = E1000_READ_REG(hw, reg); \
- cur += latest - last; \
- last = latest; \
-}
-
-
-#define IGB_FC_PAUSE_TIME 0x0680
-#define IGB_LINK_UPDATE_CHECK_TIMEOUT 10 /* 9s */
-#define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
-
-
-#define IGBVF_PMD_NAME "rte_igbvf_pmd" /* PMD name */
-
-static enum e1000_fc_mode igb_fc_setting = e1000_fc_full;
-
-/*
- * The set of PCI devices this driver supports
- */
-static struct rte_pci_id pci_id_igb_map[] = {
-
-#define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
-#include "rte_pci_dev_ids.h"
-
-{.device_id = 0},
-};
-
-/*
- * The set of PCI devices this driver supports (for 82576&I350 VF)
- */
-static struct rte_pci_id pci_id_igbvf_map[] = {
-
-#define RTE_PCI_DEV_ID_DECL_IGBVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
-#include "rte_pci_dev_ids.h"
-
-{.device_id = 0},
-};
-
-static struct eth_dev_ops eth_igb_ops = {
- .dev_configure = eth_igb_configure,
- .dev_start = eth_igb_start,
- .dev_stop = eth_igb_stop,
- .dev_close = eth_igb_close,
- .promiscuous_enable = eth_igb_promiscuous_enable,
- .promiscuous_disable = eth_igb_promiscuous_disable,
- .allmulticast_enable = eth_igb_allmulticast_enable,
- .allmulticast_disable = eth_igb_allmulticast_disable,
- .link_update = eth_igb_link_update,
- .stats_get = eth_igb_stats_get,
- .stats_reset = eth_igb_stats_reset,
- .dev_infos_get = eth_igb_infos_get,
- .mtu_set = eth_igb_mtu_set,
- .vlan_filter_set = eth_igb_vlan_filter_set,
- .vlan_tpid_set = eth_igb_vlan_tpid_set,
- .vlan_offload_set = eth_igb_vlan_offload_set,
- .rx_queue_setup = eth_igb_rx_queue_setup,
- .rx_queue_release = eth_igb_rx_queue_release,
- .rx_queue_count = eth_igb_rx_queue_count,
- .rx_descriptor_done = eth_igb_rx_descriptor_done,
- .tx_queue_setup = eth_igb_tx_queue_setup,
- .tx_queue_release = eth_igb_tx_queue_release,
- .dev_led_on = eth_igb_led_on,
- .dev_led_off = eth_igb_led_off,
- .flow_ctrl_get = eth_igb_flow_ctrl_get,
- .flow_ctrl_set = eth_igb_flow_ctrl_set,
- .mac_addr_add = eth_igb_rar_set,
- .mac_addr_remove = eth_igb_rar_clear,
- .reta_update = eth_igb_rss_reta_update,
- .reta_query = eth_igb_rss_reta_query,
- .rss_hash_update = eth_igb_rss_hash_update,
- .rss_hash_conf_get = eth_igb_rss_hash_conf_get,
- .add_syn_filter = eth_igb_add_syn_filter,
- .remove_syn_filter = eth_igb_remove_syn_filter,
- .get_syn_filter = eth_igb_get_syn_filter,
- .add_ethertype_filter = eth_igb_add_ethertype_filter,
- .remove_ethertype_filter = eth_igb_remove_ethertype_filter,
- .get_ethertype_filter = eth_igb_get_ethertype_filter,
- .add_2tuple_filter = eth_igb_add_2tuple_filter,
- .remove_2tuple_filter = eth_igb_remove_2tuple_filter,
- .get_2tuple_filter = eth_igb_get_2tuple_filter,
- .add_flex_filter = eth_igb_add_flex_filter,
- .remove_flex_filter = eth_igb_remove_flex_filter,
- .get_flex_filter = eth_igb_get_flex_filter,
- .add_5tuple_filter = eth_igb_add_5tuple_filter,
- .remove_5tuple_filter = eth_igb_remove_5tuple_filter,
- .get_5tuple_filter = eth_igb_get_5tuple_filter,
-};
-
-/*
- * dev_ops for virtual function, bare necessities for basic vf
- * operation have been implemented
- */
-static struct eth_dev_ops igbvf_eth_dev_ops = {
- .dev_configure = igbvf_dev_configure,
- .dev_start = igbvf_dev_start,
- .dev_stop = igbvf_dev_stop,
- .dev_close = igbvf_dev_close,
- .link_update = eth_igb_link_update,
- .stats_get = eth_igbvf_stats_get,
- .stats_reset = eth_igbvf_stats_reset,
- .vlan_filter_set = igbvf_vlan_filter_set,
- .dev_infos_get = eth_igbvf_infos_get,
- .rx_queue_setup = eth_igb_rx_queue_setup,
- .rx_queue_release = eth_igb_rx_queue_release,
- .tx_queue_setup = eth_igb_tx_queue_setup,
- .tx_queue_release = eth_igb_tx_queue_release,
-};
-
-/**
- * Atomically reads the link status information from global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-rte_igb_dev_atomic_read_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = link;
- struct rte_eth_link *src = &(dev->data->dev_link);
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
-/**
- * Atomically writes the link status information into global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-rte_igb_dev_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &(dev->data->dev_link);
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
-static inline void
-igb_intr_enable(struct rte_eth_dev *dev)
-{
- struct e1000_interrupt *intr =
- E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
- struct e1000_hw *hw =
- E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- E1000_WRITE_REG(hw, E1000_IMS, intr->mask);
- E1000_WRITE_FLUSH(hw);
-}
-
-static void
-igb_intr_disable(struct e1000_hw *hw)
-{
- E1000_WRITE_REG(hw, E1000_IMC, ~0);
- E1000_WRITE_FLUSH(hw);
-}
-
-static inline int32_t
-igb_pf_reset_hw(struct e1000_hw *hw)
-{
- uint32_t ctrl_ext;
- int32_t status;
-
- status = e1000_reset_hw(hw);
-
- ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
- /* Set PF Reset Done bit so PF/VF Mail Ops can work */
- ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
- E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
- E1000_WRITE_FLUSH(hw);
-
- return status;
-}
-
-static void
-igb_identify_hardware(struct rte_eth_dev *dev)
-{
- struct e1000_hw *hw =
- E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- hw->vendor_id = dev->pci_dev->id.vendor_id;
- hw->device_id = dev->pci_dev->id.device_id;
- hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
- hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
-
- e1000_set_mac_type(hw);
-
- /* need to check if it is a vf device below */
-}
-
-static int
-igb_reset_swfw_lock(struct e1000_hw *hw)
-{
- int ret_val;
-
- /*
- * Do mac ops initialization manually here, since we will need
- * some function pointers set by this call.
- */
- ret_val = e1000_init_mac_params(hw);
- if (ret_val)
- return ret_val;
-
- /*
- * SMBI lock should not fail in this early stage. If this is the case,
- * it is due to an improper exit of the application.
- * So force the release of the faulty lock.
- */
- if (e1000_get_hw_semaphore_generic(hw) < 0) {
- PMD_DRV_LOG(DEBUG, "SMBI lock released");
- }
- e1000_put_hw_semaphore_generic(hw);
-
- if (hw->mac.ops.acquire_swfw_sync != NULL) {
- uint16_t mask;
-
- /*
- * Phy lock should not fail in this early stage. If this is the case,
- * it is due to an improper exit of the application.
- * So force the release of the faulty lock.
- */
- mask = E1000_SWFW_PHY0_SM << hw->bus.func;
- if (hw->bus.func > E1000_FUNC_1)
- mask <<= 2;
- if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
- PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
- hw->bus.func);
- }
- hw->mac.ops.release_swfw_sync(hw, mask);
-
- /*
- * This one is more tricky since it is common to all ports; but
- * swfw_sync retries last long enough (1s) to be almost sure that if
- * lock can not be taken it is due to an improper lock of the
- * semaphore.
- */
- mask = E1000_SWFW_EEP_SM;
- if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
- PMD_DRV_LOG(DEBUG, "SWFW common locks released");
- }
- hw->mac.ops.release_swfw_sync(hw, mask);
- }
-
- return E1000_SUCCESS;
-}
-
-static int
-eth_igb_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
- struct rte_eth_dev *eth_dev)
-{
- int error = 0;
- struct rte_pci_device *pci_dev;
- struct e1000_hw *hw =
- E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- struct e1000_vfta * shadow_vfta =
- E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
- uint32_t ctrl_ext;
-
- pci_dev = eth_dev->pci_dev;
- eth_dev->dev_ops = &eth_igb_ops;
- eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
- eth_dev->tx_pkt_burst = &eth_igb_xmit_pkts;
-
- /* for secondary processes, we don't initialise any further as primary
- * has already done this work. Only check we don't need a different
- * RX function */
- if (rte_eal_process_type() != RTE_PROC_PRIMARY){
- if (eth_dev->data->scattered_rx)
- eth_dev->rx_pkt_burst = &eth_igb_recv_scattered_pkts;
- return 0;
- }
-
- hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
-
- igb_identify_hardware(eth_dev);
- if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) {
- error = -EIO;
- goto err_late;
- }
-
- e1000_get_bus_info(hw);
-
- /* Reset any pending lock */
- if (igb_reset_swfw_lock(hw) != E1000_SUCCESS) {
- error = -EIO;
- goto err_late;
- }
-
- /* Finish initialization */
- if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) {
- error = -EIO;
- goto err_late;
- }
-
- hw->mac.autoneg = 1;
- hw->phy.autoneg_wait_to_complete = 0;
- hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
-
- /* Copper options */
- if (hw->phy.media_type == e1000_media_type_copper) {
- hw->phy.mdix = 0; /* AUTO_ALL_MODES */
- hw->phy.disable_polarity_correction = 0;
- hw->phy.ms_type = e1000_ms_hw_default;
- }
-
- /*
- * Start from a known state, this is important in reading the nvm
- * and mac from that.
- */
- igb_pf_reset_hw(hw);
-
- /* Make sure we have a good EEPROM before we read from it */
- if (e1000_validate_nvm_checksum(hw) < 0) {
- /*
- * Some PCI-E parts fail the first check due to
- * the link being in sleep state, call it again,
- * if it fails a second time its a real issue.
- */
- if (e1000_validate_nvm_checksum(hw) < 0) {
- PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
- error = -EIO;
- goto err_late;
- }
- }
-
- /* Read the permanent MAC address out of the EEPROM */
- if (e1000_read_mac_addr(hw) != 0) {
- PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
- error = -EIO;
- goto err_late;
- }
-
- /* Allocate memory for storing MAC addresses */
- eth_dev->data->mac_addrs = rte_zmalloc("e1000",
- ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
- if (eth_dev->data->mac_addrs == NULL) {
- PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
- "store MAC addresses",
- ETHER_ADDR_LEN * hw->mac.rar_entry_count);
- error = -ENOMEM;
- goto err_late;
- }
-
- /* Copy the permanent MAC address */
- ether_addr_copy((struct ether_addr *)hw->mac.addr, &eth_dev->data->mac_addrs[0]);
-
- /* initialize the vfta */
- memset(shadow_vfta, 0, sizeof(*shadow_vfta));
-
- /* Now initialize the hardware */
- if (igb_hardware_init(hw) != 0) {
- PMD_INIT_LOG(ERR, "Hardware initialization failed");
- rte_free(eth_dev->data->mac_addrs);
- eth_dev->data->mac_addrs = NULL;
- error = -ENODEV;
- goto err_late;
- }
- hw->mac.get_link_status = 1;
-
- /* Indicate SOL/IDER usage */
- if (e1000_check_reset_block(hw) < 0) {
- PMD_INIT_LOG(ERR, "PHY reset is blocked due to"
- "SOL/IDER session");
- }
-
- /* initialize PF if max_vfs not zero */
- igb_pf_host_init(eth_dev);
-
- ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
- /* Set PF Reset Done bit so PF/VF Mail Ops can work */
- ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
- E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
- E1000_WRITE_FLUSH(hw);
-
- PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x",
- eth_dev->data->port_id, pci_dev->id.vendor_id,
- pci_dev->id.device_id);
-
- rte_intr_callback_register(&(pci_dev->intr_handle),
- eth_igb_interrupt_handler, (void *)eth_dev);
-
- /* enable uio intr after callback register */
- rte_intr_enable(&(pci_dev->intr_handle));
-
- /* enable support intr */
- igb_intr_enable(eth_dev);
-
- return 0;
-
-err_late:
- igb_hw_control_release(hw);
-
- return (error);
-}
-
-/*
- * Virtual Function device init
- */
-static int
-eth_igbvf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
- struct rte_eth_dev *eth_dev)
-{
- struct rte_pci_device *pci_dev;
- struct e1000_hw *hw =
- E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- int diag;
-
- PMD_INIT_FUNC_TRACE();
-
- eth_dev->dev_ops = &igbvf_eth_dev_ops;
- eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
- eth_dev->tx_pkt_burst = &eth_igb_xmit_pkts;
-
- /* for secondary processes, we don't initialise any further as primary
- * has already done this work. Only check we don't need a different
- * RX function */
- if (rte_eal_process_type() != RTE_PROC_PRIMARY){
- if (eth_dev->data->scattered_rx)
- eth_dev->rx_pkt_burst = &eth_igb_recv_scattered_pkts;
- return 0;
- }
-
- pci_dev = eth_dev->pci_dev;
-
- hw->device_id = pci_dev->id.device_id;
- hw->vendor_id = pci_dev->id.vendor_id;
- hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
-
- /* Initialize the shared code (base driver) */
- diag = e1000_setup_init_funcs(hw, TRUE);
- if (diag != 0) {
- PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d",
- diag);
- return -EIO;
- }
-
- /* init_mailbox_params */
- hw->mbx.ops.init_params(hw);
-
- /* Disable the interrupts for VF */
- igbvf_intr_disable(hw);
-
- diag = hw->mac.ops.reset_hw(hw);
-
- /* Allocate memory for storing MAC addresses */
- eth_dev->data->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN *
- hw->mac.rar_entry_count, 0);
- if (eth_dev->data->mac_addrs == NULL) {
- PMD_INIT_LOG(ERR,
- "Failed to allocate %d bytes needed to store MAC "
- "addresses",
- ETHER_ADDR_LEN * hw->mac.rar_entry_count);
- return -ENOMEM;
- }
-
- /* Copy the permanent MAC address */
- ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
- &eth_dev->data->mac_addrs[0]);
-
- PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x "
- "mac.type=%s",
- eth_dev->data->port_id, pci_dev->id.vendor_id,
- pci_dev->id.device_id, "igb_mac_82576_vf");
-
- return 0;
-}
-
-static struct eth_driver rte_igb_pmd = {
- {
- .name = "rte_igb_pmd",
- .id_table = pci_id_igb_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
- },
- .eth_dev_init = eth_igb_dev_init,
- .dev_private_size = sizeof(struct e1000_adapter),
-};
-
-/*
- * virtual function driver struct
- */
-static struct eth_driver rte_igbvf_pmd = {
- {
- .name = "rte_igbvf_pmd",
- .id_table = pci_id_igbvf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
- },
- .eth_dev_init = eth_igbvf_dev_init,
- .dev_private_size = sizeof(struct e1000_adapter),
-};
-
-static int
-rte_igb_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
-{
- rte_eth_driver_register(&rte_igb_pmd);
- return 0;
-}
-
-static void
-igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
-{
- struct e1000_hw *hw =
- E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- /* RCTL: enable VLAN filter since VMDq always use VLAN filter */
- uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
- rctl |= E1000_RCTL_VFE;
- E1000_WRITE_REG(hw, E1000_RCTL, rctl);
-}
-
-/*
- * VF Driver initialization routine.
- * Invoked one at EAL init time.
- * Register itself as the [Virtual Poll Mode] Driver of PCI IGB devices.
- */
-static int
-rte_igbvf_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
-
- rte_eth_driver_register(&rte_igbvf_pmd);
- return (0);
-}
-
-static int
-eth_igb_configure(struct rte_eth_dev *dev)
-{
- struct e1000_interrupt *intr =
- E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
-
- PMD_INIT_FUNC_TRACE();
- intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
- PMD_INIT_FUNC_TRACE();
-
- return (0);
-}
-
-static int
-eth_igb_start(struct rte_eth_dev *dev)
-{
- struct e1000_hw *hw =
- E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int ret, i, mask;
- uint32_t ctrl_ext;
-
- PMD_INIT_FUNC_TRACE();
-
- /* Power up the phy. Needed to make the link go Up */
- e1000_power_up_phy(hw);
-
- /*
- * Packet Buffer Allocation (PBA)
- * Writing PBA sets the receive portion of the buffer
- * the remainder is used for the transmit buffer.
- */
- if (hw->mac.type == e1000_82575) {
- uint32_t pba;
-
- pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
- E1000_WRITE_REG(hw, E1000_PBA, pba);
- }
-
- /* Put the address into the Receive Address Array */
- e1000_rar_set(hw, hw->mac.addr, 0);
-
- /* Initialize the hardware */
- if (igb_hardware_init(hw)) {
- PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
- return (-EIO);
- }
-
- E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
-
- ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
- /* Set PF Reset Done bit so PF/VF Mail Ops can work */
- ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
- E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
- E1000_WRITE_FLUSH(hw);
-
- /* configure PF module if SRIOV enabled */
- igb_pf_host_configure(dev);
-
- /* Configure for OS presence */
- igb_init_manageability(hw);
-
- eth_igb_tx_init(dev);
-
- /* This can fail when allocating mbufs for descriptor rings */
- ret = eth_igb_rx_init(dev);
- if (ret) {
- PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
- igb_dev_clear_queues(dev);
- return ret;
- }
-
- e1000_clear_hw_cntrs_base_generic(hw);
-
- /*
- * VLAN Offload Settings
- */
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
- ETH_VLAN_EXTEND_MASK;
- eth_igb_vlan_offload_set(dev, mask);
-
- if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
- /* Enable VLAN filter since VMDq always use VLAN filter */
- igb_vmdq_vlan_hw_filter_enable(dev);
- }
-
- /*
- * Configure the Interrupt Moderation register (EITR) with the maximum
- * possible value (0xFFFF) to minimize "System Partial Write" issued by
- * spurious [DMA] memory updates of RX and TX ring descriptors.
- *
- * With a EITR granularity of 2 microseconds in the 82576, only 7/8
- * spurious memory updates per second should be expected.
- * ((65535 * 2) / 1000.1000 ~= 0.131 second).
- *
- * Because interrupts are not used at all, the MSI-X is not activated
- * and interrupt moderation is controlled by EITR[0].
- *
- * Note that having [almost] disabled memory updates of RX and TX ring
- * descriptors through the Interrupt Moderation mechanism, memory
- * updates of ring descriptors are now moderated by the configurable
- * value of Write-Back Threshold registers.
- */
- if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) ||
- (hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210) ||
- (hw->mac.type == e1000_i211)) {
- uint32_t ivar;
-
- /* Enable all RX & TX queues in the IVAR registers */
- ivar = (uint32_t) ((E1000_IVAR_VALID << 16) | E1000_IVAR_VALID);
- for (i = 0; i < 8; i++)
- E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, i, ivar);
-
- /* Configure EITR with the maximum possible value (0xFFFF) */
- E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF);
- }
-
- /* Setup link speed and duplex */
- switch (dev->data->dev_conf.link_speed) {
- case ETH_LINK_SPEED_AUTONEG:
- if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
- hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
- else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
- hw->phy.autoneg_advertised = E1000_ALL_HALF_DUPLEX;
- else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
- hw->phy.autoneg_advertised = E1000_ALL_FULL_DUPLEX;
- else
- goto error_invalid_config;
- break;
- case ETH_LINK_SPEED_10:
- if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
- hw->phy.autoneg_advertised = E1000_ALL_10_SPEED;
- else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
- hw->phy.autoneg_advertised = ADVERTISE_10_HALF;
- else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
- hw->phy.autoneg_advertised = ADVERTISE_10_FULL;
- else
- goto error_invalid_config;
- break;
- case ETH_LINK_SPEED_100:
- if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
- hw->phy.autoneg_advertised = E1000_ALL_100_SPEED;
- else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
- hw->phy.autoneg_advertised = ADVERTISE_100_HALF;
- else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
- hw->phy.autoneg_advertised = ADVERTISE_100_FULL;
- else
- goto error_invalid_config;
- break;
- case ETH_LINK_SPEED_1000:
- if ((dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX) ||
- (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX))
- hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
- else
- goto error_invalid_config;
- break;
- case ETH_LINK_SPEED_10000:
- default:
- goto error_invalid_config;
- }
- e1000_setup_link(hw);
-
- /* check if lsc interrupt feature is enabled */
- if (dev->data->dev_conf.intr_conf.lsc != 0)
- ret = eth_igb_lsc_interrupt_setup(dev);
-
- /* resume enabled intr since hw reset */
- igb_intr_enable(dev);
-
- PMD_INIT_LOG(DEBUG, "<<");
-
- return (0);
-
-error_invalid_config:
- PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u",
- dev->data->dev_conf.link_speed,
- dev->data->dev_conf.link_duplex, dev->data->port_id);
- igb_dev_clear_queues(dev);
- return (-EINVAL);
-}
-
-/*********************************************************************
- *
- * This routine disables all traffic on the adapter by issuing a
- * global reset on the MAC.
- *
- **********************************************************************/
-static void
-eth_igb_stop(struct rte_eth_dev *dev)
-{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_eth_link link;
-
- igb_intr_disable(hw);
- igb_pf_reset_hw(hw);
- E1000_WRITE_REG(hw, E1000_WUC, 0);
-
- /* Set bit for Go Link disconnect */
- if (hw->mac.type >= e1000_82580) {
- uint32_t phpm_reg;
-
- phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
- phpm_reg |= E1000_82580_PM_GO_LINKD;
- E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
- }
-
- /* Power down the phy. Needed to make the link go Down */
- e1000_power_down_phy(hw);
-
- igb_dev_clear_queues(dev);
-
- /* clear the recorded link status */
- memset(&link, 0, sizeof(link));
- rte_igb_dev_atomic_write_link_status(dev, &link);
-}
-
-static void
-eth_igb_close(struct rte_eth_dev *dev)
-{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_eth_link link;
-
- eth_igb_stop(dev);
- e1000_phy_hw_reset(hw);
- igb_release_manageability(hw);
- igb_hw_control_release(hw);
-
- /* Clear bit for Go Link disconnect */
- if (hw->mac.type >= e1000_82580) {
- uint32_t phpm_reg;
-
- phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
- phpm_reg &= ~E1000_82580_PM_GO_LINKD;
- E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
- }
-
- igb_dev_clear_queues(dev);
-
- memset(&link, 0, sizeof(link));
- rte_igb_dev_atomic_write_link_status(dev, &link);
-}
-
-static int
-igb_get_rx_buffer_size(struct e1000_hw *hw)
-{
- uint32_t rx_buf_size;
- if (hw->mac.type == e1000_82576) {
- rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10;
- } else if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i350) {
- /* PBS needs to be translated according to a lookup table */
- rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf);
- rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size);
- rx_buf_size = (rx_buf_size << 10);
- } else if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
- rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0x3f) << 10;
- } else {
- rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10;
- }
-
- return rx_buf_size;
-}
-
-/*********************************************************************
- *
- * Initialize the hardware
- *
- **********************************************************************/
-static int
-igb_hardware_init(struct e1000_hw *hw)
-{
- uint32_t rx_buf_size;
- int diag;
-
- /* Let the firmware know the OS is in control */
- igb_hw_control_acquire(hw);
-
- /*
- * These parameters control the automatic generation (Tx) and
- * response (Rx) to Ethernet PAUSE frames.
- * - High water mark should allow for at least two standard size (1518)
- * frames to be received after sending an XOFF.
- * - Low water mark works best when it is very near the high water mark.
- * This allows the receiver to restart by sending XON when it has
- * drained a bit. Here we use an arbitrary value of 1500 which will
- * restart after one full frame is pulled from the buffer. There
- * could be several smaller frames in the buffer and if so they will
- * not trigger the XON until their total number reduces the buffer
- * by 1500.
- * - The pause time is fairly large at 1000 x 512ns = 512 usec.
- */
- rx_buf_size = igb_get_rx_buffer_size(hw);
-
- hw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2);
- hw->fc.low_water = hw->fc.high_water - 1500;
- hw->fc.pause_time = IGB_FC_PAUSE_TIME;
- hw->fc.send_xon = 1;
-
- /* Set Flow control, use the tunable location if sane */
- if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4))
- hw->fc.requested_mode = igb_fc_setting;
- else
- hw->fc.requested_mode = e1000_fc_none;
-
- /* Issue a global reset */
- igb_pf_reset_hw(hw);
- E1000_WRITE_REG(hw, E1000_WUC, 0);
-
- diag = e1000_init_hw(hw);
- if (diag < 0)
- return (diag);
-
- E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
- e1000_get_phy_info(hw);
- e1000_check_for_link(hw);
-
- return (0);
-}
-
-/* This function is based on igb_update_stats_counters() in igb/if_igb.c */
-static void
-eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
-{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct e1000_hw_stats *stats =
- E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
- int pause_frames;
-
- if(hw->phy.media_type == e1000_media_type_copper ||
- (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
- stats->symerrs +=
- E1000_READ_REG(hw,E1000_SYMERRS);
- stats->sec += E1000_READ_REG(hw, E1000_SEC);
- }
-
- stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
- stats->mpc += E1000_READ_REG(hw, E1000_MPC);
- stats->scc += E1000_READ_REG(hw, E1000_SCC);
- stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
-
- stats->mcc += E1000_READ_REG(hw, E1000_MCC);
- stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
- stats->colc += E1000_READ_REG(hw, E1000_COLC);
- stats->dc += E1000_READ_REG(hw, E1000_DC);
- stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
- stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
- stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
- /*
- ** For watchdog management we need to know if we have been
- ** paused during the last interval, so capture that here.
- */
- pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
- stats->xoffrxc += pause_frames;
- stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
- stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
- stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
- stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
- stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
- stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
- stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
- stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
- stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
- stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
- stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
- stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
-
- /* For the 64-bit byte counters the low dword must be read first. */
- /* Both registers clear on the read of the high dword */
-
- stats->gorc += E1000_READ_REG(hw, E1000_GORCL);
- stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
- stats->gotc += E1000_READ_REG(hw, E1000_GOTCL);
- stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
-
- stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
- stats->ruc += E1000_READ_REG(hw, E1000_RUC);
- stats->rfc += E1000_READ_REG(hw, E1000_RFC);
- stats->roc += E1000_READ_REG(hw, E1000_ROC);
- stats->rjc += E1000_READ_REG(hw, E1000_RJC);
-
- stats->tor += E1000_READ_REG(hw, E1000_TORH);
- stats->tot += E1000_READ_REG(hw, E1000_TOTH);
-
- stats->tpr += E1000_READ_REG(hw, E1000_TPR);
- stats->tpt += E1000_READ_REG(hw, E1000_TPT);
- stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
- stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
- stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
- stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
- stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
- stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
- stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
- stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
-
- /* Interrupt Counts */
-
- stats->iac += E1000_READ_REG(hw, E1000_IAC);
- stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
- stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
- stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
- stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
- stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
- stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
- stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
- stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
-
- /* Host to Card Statistics */
-
- stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
- stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
- stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
- stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
- stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
- stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
- stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
- stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL);
- stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32);
- stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL);
- stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32);
- stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
- stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
- stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
-
- stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
- stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
- stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
- stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
- stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
- stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
-
- if (rte_stats == NULL)
- return;
-
- /* Rx Errors */
- rte_stats->ibadcrc = stats->crcerrs;
- rte_stats->ibadlen = stats->rlec + stats->ruc + stats->roc;
- rte_stats->imissed = stats->mpc;
- rte_stats->ierrors = rte_stats->ibadcrc +
- rte_stats->ibadlen +
- rte_stats->imissed +
- stats->rxerrc + stats->algnerrc + stats->cexterr;
-
- /* Tx Errors */
- rte_stats->oerrors = stats->ecol + stats->latecol;
-
- /* XON/XOFF pause frames */
- rte_stats->tx_pause_xon = stats->xontxc;
- rte_stats->rx_pause_xon = stats->xonrxc;
- rte_stats->tx_pause_xoff = stats->xofftxc;
- rte_stats->rx_pause_xoff = stats->xoffrxc;
-
- rte_stats->ipackets = stats->gprc;
- rte_stats->opackets = stats->gptc;
- rte_stats->ibytes = stats->gorc;
- rte_stats->obytes = stats->gotc;
-}
-
-static void
-eth_igb_stats_reset(struct rte_eth_dev *dev)
-{
- struct e1000_hw_stats *hw_stats =
- E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
-
- /* HW registers are cleared on read */
- eth_igb_stats_get(dev, NULL);
-
- /* Reset software totals */
- memset(hw_stats, 0, sizeof(*hw_stats));
-}
-
-static void
-eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
-{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
- E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
-
- /* Good Rx packets, include VF loopback */
- UPDATE_VF_STAT(E1000_VFGPRC,
- hw_stats->last_gprc, hw_stats->gprc);
-
- /* Good Rx octets, include VF loopback */
- UPDATE_VF_STAT(E1000_VFGORC,
- hw_stats->last_gorc, hw_stats->gorc);
-
- /* Good Tx packets, include VF loopback */
- UPDATE_VF_STAT(E1000_VFGPTC,
- hw_stats->last_gptc, hw_stats->gptc);
-
- /* Good Tx octets, include VF loopback */
- UPDATE_VF_STAT(E1000_VFGOTC,
- hw_stats->last_gotc, hw_stats->gotc);
-
- /* Rx Multicst packets */
- UPDATE_VF_STAT(E1000_VFMPRC,
- hw_stats->last_mprc, hw_stats->mprc);
-
- /* Good Rx loopback packets */
- UPDATE_VF_STAT(E1000_VFGPRLBC,
- hw_stats->last_gprlbc, hw_stats->gprlbc);
-
- /* Good Rx loopback octets */
- UPDATE_VF_STAT(E1000_VFGORLBC,
- hw_stats->last_gorlbc, hw_stats->gorlbc);
-
- /* Good Tx loopback packets */
- UPDATE_VF_STAT(E1000_VFGPTLBC,
- hw_stats->last_gptlbc, hw_stats->gptlbc);
-
- /* Good Tx loopback octets */
- UPDATE_VF_STAT(E1000_VFGOTLBC,
- hw_stats->last_gotlbc, hw_stats->gotlbc);
-
- if (rte_stats == NULL)
- return;
-
- memset(rte_stats, 0, sizeof(*rte_stats));
- rte_stats->ipackets = hw_stats->gprc;
- rte_stats->ibytes = hw_stats->gorc;
- rte_stats->opackets = hw_stats->gptc;
- rte_stats->obytes = hw_stats->gotc;
- rte_stats->imcasts = hw_stats->mprc;
- rte_stats->ilbpackets = hw_stats->gprlbc;
- rte_stats->ilbbytes = hw_stats->gorlbc;
- rte_stats->olbpackets = hw_stats->gptlbc;
- rte_stats->olbbytes = hw_stats->gotlbc;
-
-}
-
-static void
-eth_igbvf_stats_reset(struct rte_eth_dev *dev)
-{
- struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
- E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
-
- /* Sync HW register to the last stats */
- eth_igbvf_stats_get(dev, NULL);
-
- /* reset HW current stats*/
- memset(&hw_stats->gprc, 0, sizeof(*hw_stats) -
- offsetof(struct e1000_vf_stats, gprc));
-
-}
-
-static void
-eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
-{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
- dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
- dev_info->max_mac_addrs = hw->mac.rar_entry_count;
- dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
- dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM;
-
- switch (hw->mac.type) {
- case e1000_82575:
- dev_info->max_rx_queues = 4;
- dev_info->max_tx_queues = 4;
- dev_info->max_vmdq_pools = 0;
- break;
-
- case e1000_82576:
- dev_info->max_rx_queues = 16;
- dev_info->max_tx_queues = 16;
- dev_info->max_vmdq_pools = ETH_8_POOLS;
- dev_info->vmdq_queue_num = 16;
- break;
-
- case e1000_82580:
- dev_info->max_rx_queues = 8;
- dev_info->max_tx_queues = 8;
- dev_info->max_vmdq_pools = ETH_8_POOLS;
- dev_info->vmdq_queue_num = 8;
- break;
-
- case e1000_i350:
- dev_info->max_rx_queues = 8;
- dev_info->max_tx_queues = 8;
- dev_info->max_vmdq_pools = ETH_8_POOLS;
- dev_info->vmdq_queue_num = 8;
- break;
-
- case e1000_i354:
- dev_info->max_rx_queues = 8;
- dev_info->max_tx_queues = 8;
- break;
-
- case e1000_i210:
- dev_info->max_rx_queues = 4;
- dev_info->max_tx_queues = 4;
- dev_info->max_vmdq_pools = 0;
- break;
-
- case e1000_i211:
- dev_info->max_rx_queues = 2;
- dev_info->max_tx_queues = 2;
- dev_info->max_vmdq_pools = 0;
- break;
-
- default:
- /* Should not happen */
- break;
- }
- dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
-
- dev_info->default_rxconf = (struct rte_eth_rxconf) {
- .rx_thresh = {
- .pthresh = IGB_DEFAULT_RX_PTHRESH,
- .hthresh = IGB_DEFAULT_RX_HTHRESH,
- .wthresh = IGB_DEFAULT_RX_WTHRESH,
- },
- .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
- .rx_drop_en = 0,
- };
-
- dev_info->default_txconf = (struct rte_eth_txconf) {
- .tx_thresh = {
- .pthresh = IGB_DEFAULT_TX_PTHRESH,
- .hthresh = IGB_DEFAULT_TX_HTHRESH,
- .wthresh = IGB_DEFAULT_TX_WTHRESH,
- },
- .txq_flags = 0,
- };
-}
-
-static void
-eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
-{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
- dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
- dev_info->max_mac_addrs = hw->mac.rar_entry_count;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
- dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM;
- switch (hw->mac.type) {
- case e1000_vfadapt:
- dev_info->max_rx_queues = 2;
- dev_info->max_tx_queues = 2;
- break;
- case e1000_vfadapt_i350:
- dev_info->max_rx_queues = 1;
- dev_info->max_tx_queues = 1;
- break;
- default:
- /* Should not happen */
- break;
- }
-
- dev_info->default_rxconf = (struct rte_eth_rxconf) {
- .rx_thresh = {
- .pthresh = IGB_DEFAULT_RX_PTHRESH,
- .hthresh = IGB_DEFAULT_RX_HTHRESH,
- .wthresh = IGB_DEFAULT_RX_WTHRESH,
- },
- .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
- .rx_drop_en = 0,
- };
-
- dev_info->default_txconf = (struct rte_eth_txconf) {
- .tx_thresh = {
- .pthresh = IGB_DEFAULT_TX_PTHRESH,
- .hthresh = IGB_DEFAULT_TX_HTHRESH,
- .wthresh = IGB_DEFAULT_TX_WTHRESH,
- },
- .txq_flags = 0,
- };
-}
-
-/* return 0 means link status changed, -1 means not changed */
-static int
-eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
-{
- struct e1000_hw *hw =
- E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_eth_link link, old;
- int link_check, count;
-
- link_check = 0;
- hw->mac.get_link_status = 1;
-
- /* possible wait-to-complete in up to 9 seconds */
- for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) {
- /* Read the real link status */
- switch (hw->phy.media_type) {
- case e1000_media_type_copper:
- /* Do the work to read phy */
- e1000_check_for_link(hw);
- link_check = !hw->mac.get_link_status;
- break;
-
- case e1000_media_type_fiber:
- e1000_check_for_link(hw);
- link_check = (E1000_READ_REG(hw, E1000_STATUS) &
- E1000_STATUS_LU);
- break;
-
- case e1000_media_type_internal_serdes:
- e1000_check_for_link(hw);
- link_check = hw->mac.serdes_has_link;
- break;
-
- /* VF device is type_unknown */
- case e1000_media_type_unknown:
- eth_igbvf_link_update(hw);
- link_check = !hw->mac.get_link_status;
- break;
-
- default:
- break;
- }
- if (link_check || wait_to_complete == 0)
- break;
- rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL);
- }
- memset(&link, 0, sizeof(link));
- rte_igb_dev_atomic_read_link_status(dev, &link);
- old = link;
-
- /* Now we check if a transition has happened */
- if (link_check) {
- hw->mac.ops.get_link_up_info(hw, &link.link_speed,
- &link.link_duplex);
- link.link_status = 1;
- } else if (!link_check) {
- link.link_speed = 0;
- link.link_duplex = 0;
- link.link_status = 0;
- }
- rte_igb_dev_atomic_write_link_status(dev, &link);
-
- /* not changed */
- if (old.link_status == link.link_status)
- return -1;
-
- /* changed */
- return 0;
-}
-
-/*
- * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
- * For ASF and Pass Through versions of f/w this means
- * that the driver is loaded.
- */
-static void
-igb_hw_control_acquire(struct e1000_hw *hw)
-{
- uint32_t ctrl_ext;
-
- /* Let firmware know the driver has taken over */
- ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
- E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
-}
-
-/*
- * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
- * For ASF and Pass Through versions of f/w this means that the
- * driver is no longer loaded.
- */
-static void
-igb_hw_control_release(struct e1000_hw *hw)
-{
- uint32_t ctrl_ext;
-
- /* Let firmware taken over control of h/w */
- ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
- E1000_WRITE_REG(hw, E1000_CTRL_EXT,
- ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
-}
-
-/*
- * Bit of a misnomer, what this really means is
- * to enable OS management of the system... aka
- * to disable special hardware management features.
- */
-static void
-igb_init_manageability(struct e1000_hw *hw)
-{
- if (e1000_enable_mng_pass_thru(hw)) {
- uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H);
- uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
-
- /* disable hardware interception of ARP */
- manc &= ~(E1000_MANC_ARP_EN);
-
- /* enable receiving management packets to the host */
- manc |= E1000_MANC_EN_MNG2HOST;
- manc2h |= 1 << 5; /* Mng Port 623 */
- manc2h |= 1 << 6; /* Mng Port 664 */
- E1000_WRITE_REG(hw, E1000_MANC2H, manc2h);
- E1000_WRITE_REG(hw, E1000_MANC, manc);
- }
-}
-
-static void
-igb_release_manageability(struct e1000_hw *hw)
-{
- if (e1000_enable_mng_pass_thru(hw)) {
- uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
-
- manc |= E1000_MANC_ARP_EN;
- manc &= ~E1000_MANC_EN_MNG2HOST;
-
- E1000_WRITE_REG(hw, E1000_MANC, manc);
- }
-}
-
-static void
-eth_igb_promiscuous_enable(struct rte_eth_dev *dev)
-{
- struct e1000_hw *hw =
- E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t rctl;
-
- rctl = E1000_READ_REG(hw, E1000_RCTL);
- rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
- E1000_WRITE_REG(hw, E1000_RCTL, rctl);
-}
-
-static void
-eth_igb_promiscuous_disable(struct rte_eth_dev *dev)
-{
- struct e1000_hw *hw =
- E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t rctl;
-
- rctl = E1000_READ_REG(hw, E1000_RCTL);
- rctl &= (~E1000_RCTL_UPE);
- if (dev->data->all_multicast == 1)
- rctl |= E1000_RCTL_MPE;
- else
- rctl &= (~E1000_RCTL_MPE);
- E1000_WRITE_REG(hw, E1000_RCTL, rctl);
-}
-
-static void
-eth_igb_allmulticast_enable(struct rte_eth_dev *dev)
-{
- struct e1000_hw *hw =
- E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t rctl;
-
- rctl = E1000_READ_REG(hw, E1000_RCTL);
- rctl |= E1000_RCTL_MPE;
- E1000_WRITE_REG(hw, E1000_RCTL, rctl);
-}
-
-static void
-eth_igb_allmulticast_disable(struct rte_eth_dev *dev)
-{
- struct e1000_hw *hw =
- E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t rctl;
-
- if (dev->data->promiscuous == 1)
- return; /* must remain in all_multicast mode */
- rctl = E1000_READ_REG(hw, E1000_RCTL);
- rctl &= (~E1000_RCTL_MPE);
- E1000_WRITE_REG(hw, E1000_RCTL, rctl);
-}
-
-static int
-eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
-{
- struct e1000_hw *hw =
- E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct e1000_vfta * shadow_vfta =
- E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
- uint32_t vfta;
- uint32_t vid_idx;
- uint32_t vid_bit;
-
- vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) &
- E1000_VFTA_ENTRY_MASK);
- vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK));
- vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx);
- if (on)
- vfta |= vid_bit;
- else
- vfta &= ~vid_bit;
- E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta);
-
- /* update local VFTA copy */
- shadow_vfta->vfta[vid_idx] = vfta;
-
- return 0;
-}
-
-static void
-eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid)
-{
- struct e1000_hw *hw =
- E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t reg = ETHER_TYPE_VLAN ;
-
- reg |= (tpid << 16);
- E1000_WRITE_REG(hw, E1000_VET, reg);
-}
-
-static void
-igb_vlan_hw_filter_disable(struct rte_eth_dev *dev)
-{
- struct e1000_hw *hw =
- E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t reg;
-
- /* Filter Table Disable */
- reg = E1000_READ_REG(hw, E1000_RCTL);
- reg &= ~E1000_RCTL_CFIEN;
- reg &= ~E1000_RCTL_VFE;
- E1000_WRITE_REG(hw, E1000_RCTL, reg);
-}
-
-static void
-igb_vlan_hw_filter_enable(struct rte_eth_dev *dev)
-{
- struct e1000_hw *hw =
- E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct e1000_vfta * shadow_vfta =
- E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
- uint32_t reg;
- int i;
-
- /* Filter Table Enable, CFI not used for packet acceptance */
- reg = E1000_READ_REG(hw, E1000_RCTL);
- reg &= ~E1000_RCTL_CFIEN;
- reg |= E1000_RCTL_VFE;
- E1000_WRITE_REG(hw, E1000_RCTL, reg);
-
- /* restore VFTA table */
- for (i = 0; i < IGB_VFTA_SIZE; i++)
- E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]);
-}
-
-static void
-igb_vlan_hw_strip_disable(struct rte_eth_dev *dev)
-{
- struct e1000_hw *hw =
- E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t reg;
-
- /* VLAN Mode Disable */
- reg = E1000_READ_REG(hw, E1000_CTRL);
- reg &= ~E1000_CTRL_VME;
- E1000_WRITE_REG(hw, E1000_CTRL, reg);
-}
-
-static void
-igb_vlan_hw_strip_enable(struct rte_eth_dev *dev)
-{
- struct e1000_hw *hw =
- E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t reg;
-
- /* VLAN Mode Enable */
- reg = E1000_READ_REG(hw, E1000_CTRL);
- reg |= E1000_CTRL_VME;
- E1000_WRITE_REG(hw, E1000_CTRL, reg);
-}
-
-static void
-igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
-{
- struct e1000_hw *hw =
- E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t reg;
-
- /* CTRL_EXT: Extended VLAN */
- reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
- reg &= ~E1000_CTRL_EXT_EXTEND_VLAN;
- E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
-
- /* Update maximum packet length */
- if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
- E1000_WRITE_REG(hw, E1000_RLPML,
- dev->data->dev_conf.rxmode.max_rx_pkt_len +
- VLAN_TAG_SIZE);
-}
-
-static void
-igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
-{
- struct e1000_hw *hw =
- E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t reg;
-
- /* CTRL_EXT: Extended VLAN */
- reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
- reg |= E1000_CTRL_EXT_EXTEND_VLAN;
- E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
-
- /* Update maximum packet length */
- if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
- E1000_WRITE_REG(hw, E1000_RLPML,
- dev->data->dev_conf.rxmode.max_rx_pkt_len +
- 2 * VLAN_TAG_SIZE);
-}
-
-static void
-eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
-{
- if(mask & ETH_VLAN_STRIP_MASK){
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
- igb_vlan_hw_strip_enable(dev);
- else
- igb_vlan_hw_strip_disable(dev);
- }
-
- if(mask & ETH_VLAN_FILTER_MASK){
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
- igb_vlan_hw_filter_enable(dev);
- else
- igb_vlan_hw_filter_disable(dev);
- }
-
- if(mask & ETH_VLAN_EXTEND_MASK){
- if (dev->data->dev_conf.rxmode.hw_vlan_extend)
- igb_vlan_hw_extend_enable(dev);
- else
- igb_vlan_hw_extend_disable(dev);
- }
-}
-
-
-/**
- * It enables the interrupt mask and then enable the interrupt.
- *
- * @param dev
- * Pointer to struct rte_eth_dev.
- *
- * @return
- * - On success, zero.
- * - On failure, a negative value.
- */
-static int
-eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev)
-{
- struct e1000_interrupt *intr =
- E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
-
- intr->mask |= E1000_ICR_LSC;
-
- return 0;
-}
-
-/*
- * It reads ICR and gets interrupt causes, check it and set a bit flag
- * to update link status.
- *
- * @param dev
- * Pointer to struct rte_eth_dev.
- *
- * @return
- * - On success, zero.
- * - On failure, a negative value.
- */
-static int
-eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
-{
- uint32_t icr;
- struct e1000_hw *hw =
- E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct e1000_interrupt *intr =
- E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
-
- igb_intr_disable(hw);
-
- /* read-on-clear nic registers here */
- icr = E1000_READ_REG(hw, E1000_ICR);
-
- intr->flags = 0;
- if (icr & E1000_ICR_LSC) {
- intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
- }
-
- if (icr & E1000_ICR_VMMB)
- intr->flags |= E1000_FLAG_MAILBOX;
-
- return 0;
-}
-
-/*
- * It executes link_update after knowing an interrupt is prsent.
- *
- * @param dev
- * Pointer to struct rte_eth_dev.
- *
- * @return
- * - On success, zero.
- * - On failure, a negative value.
- */
-static int
-eth_igb_interrupt_action(struct rte_eth_dev *dev)
-{
- struct e1000_hw *hw =
- E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct e1000_interrupt *intr =
- E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
- uint32_t tctl, rctl;
- struct rte_eth_link link;
- int ret;
-
- if (intr->flags & E1000_FLAG_MAILBOX) {
- igb_pf_mbx_process(dev);
- intr->flags &= ~E1000_FLAG_MAILBOX;
- }
-
- igb_intr_enable(dev);
- rte_intr_enable(&(dev->pci_dev->intr_handle));
-
- if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) {
- intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
-
- /* set get_link_status to check register later */
- hw->mac.get_link_status = 1;
- ret = eth_igb_link_update(dev, 0);
-
- /* check if link has changed */
- if (ret < 0)
- return 0;
-
- memset(&link, 0, sizeof(link));
- rte_igb_dev_atomic_read_link_status(dev, &link);
- if (link.link_status) {
- PMD_INIT_LOG(INFO,
- " Port %d: Link Up - speed %u Mbps - %s",
- dev->data->port_id,
- (unsigned)link.link_speed,
- link.link_duplex == ETH_LINK_FULL_DUPLEX ?
- "full-duplex" : "half-duplex");
- } else {
- PMD_INIT_LOG(INFO, " Port %d: Link Down",
- dev->data->port_id);
- }
- PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
- dev->pci_dev->addr.domain,
- dev->pci_dev->addr.bus,
- dev->pci_dev->addr.devid,
- dev->pci_dev->addr.function);
- tctl = E1000_READ_REG(hw, E1000_TCTL);
- rctl = E1000_READ_REG(hw, E1000_RCTL);
- if (link.link_status) {
- /* enable Tx/Rx */
- tctl |= E1000_TCTL_EN;
- rctl |= E1000_RCTL_EN;
- } else {
- /* disable Tx/Rx */
- tctl &= ~E1000_TCTL_EN;
- rctl &= ~E1000_RCTL_EN;
- }
- E1000_WRITE_REG(hw, E1000_TCTL, tctl);
- E1000_WRITE_REG(hw, E1000_RCTL, rctl);
- E1000_WRITE_FLUSH(hw);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
- }
-
- return 0;
-}
-
-/**
- * Interrupt handler which shall be registered at first.
- *
- * @param handle
- * Pointer to interrupt handle.
- * @param param
- * The address of parameter (struct rte_eth_dev *) regsitered before.
- *
- * @return
- * void
- */
-static void
-eth_igb_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
- void *param)
-{
- struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
-
- eth_igb_interrupt_get_status(dev);
- eth_igb_interrupt_action(dev);
-}
-
-static int
-eth_igb_led_on(struct rte_eth_dev *dev)
-{
- struct e1000_hw *hw;
-
- hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- return (e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
-}
-
-static int
-eth_igb_led_off(struct rte_eth_dev *dev)
-{
- struct e1000_hw *hw;
-
- hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- return (e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
-}
-
-static int
-eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
-{
- struct e1000_hw *hw;
- uint32_t ctrl;
- int tx_pause;
- int rx_pause;
-
- hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- fc_conf->pause_time = hw->fc.pause_time;
- fc_conf->high_water = hw->fc.high_water;
- fc_conf->low_water = hw->fc.low_water;
- fc_conf->send_xon = hw->fc.send_xon;
- fc_conf->autoneg = hw->mac.autoneg;
-
- /*
- * Return rx_pause and tx_pause status according to actual setting of
- * the TFCE and RFCE bits in the CTRL register.
- */
- ctrl = E1000_READ_REG(hw, E1000_CTRL);
- if (ctrl & E1000_CTRL_TFCE)
- tx_pause = 1;
- else
- tx_pause = 0;
-
- if (ctrl & E1000_CTRL_RFCE)
- rx_pause = 1;
- else
- rx_pause = 0;
-
- if (rx_pause && tx_pause)
- fc_conf->mode = RTE_FC_FULL;
- else if (rx_pause)
- fc_conf->mode = RTE_FC_RX_PAUSE;
- else if (tx_pause)
- fc_conf->mode = RTE_FC_TX_PAUSE;
- else
- fc_conf->mode = RTE_FC_NONE;
-
- return 0;
-}
-
-static int
-eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
-{
- struct e1000_hw *hw;
- int err;
- enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = {
- e1000_fc_none,
- e1000_fc_rx_pause,
- e1000_fc_tx_pause,
- e1000_fc_full
- };
- uint32_t rx_buf_size;
- uint32_t max_high_water;
- uint32_t rctl;
-
- hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (fc_conf->autoneg != hw->mac.autoneg)
- return -ENOTSUP;
- rx_buf_size = igb_get_rx_buffer_size(hw);
- PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
-
- /* At least reserve one Ethernet frame for watermark */
- max_high_water = rx_buf_size - ETHER_MAX_LEN;
- if ((fc_conf->high_water > max_high_water) ||
- (fc_conf->high_water < fc_conf->low_water)) {
- PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
- PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water);
- return (-EINVAL);
- }
-
- hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
- hw->fc.pause_time = fc_conf->pause_time;
- hw->fc.high_water = fc_conf->high_water;
- hw->fc.low_water = fc_conf->low_water;
- hw->fc.send_xon = fc_conf->send_xon;
-
- err = e1000_setup_link_generic(hw);
- if (err == E1000_SUCCESS) {
-
- /* check if we want to forward MAC frames - driver doesn't have native
- * capability to do that, so we'll write the registers ourselves */
-
- rctl = E1000_READ_REG(hw, E1000_RCTL);
-
- /* set or clear MFLCN.PMCF bit depending on configuration */
- if (fc_conf->mac_ctrl_frame_fwd != 0)
- rctl |= E1000_RCTL_PMCF;
- else
- rctl &= ~E1000_RCTL_PMCF;
-
- E1000_WRITE_REG(hw, E1000_RCTL, rctl);
- E1000_WRITE_FLUSH(hw);
-
- return 0;
- }
-
- PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
- return (-EIO);
-}
-
-#define E1000_RAH_POOLSEL_SHIFT (18)
-static void
-eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
- uint32_t index, __rte_unused uint32_t pool)
-{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t rah;
-
- e1000_rar_set(hw, mac_addr->addr_bytes, index);
- rah = E1000_READ_REG(hw, E1000_RAH(index));
- rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool));
- E1000_WRITE_REG(hw, E1000_RAH(index), rah);
-}
-
-static void
-eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
-{
- uint8_t addr[ETHER_ADDR_LEN];
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- memset(addr, 0, sizeof(addr));
-
- e1000_rar_set(hw, addr, index);
-}
-
-/*
- * Virtual Function operations
- */
-static void
-igbvf_intr_disable(struct e1000_hw *hw)
-{
- PMD_INIT_FUNC_TRACE();
-
- /* Clear interrupt mask to stop from interrupts being generated */
- E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF);
-
- E1000_WRITE_FLUSH(hw);
-}
-
-static void
-igbvf_stop_adapter(struct rte_eth_dev *dev)
-{
- u32 reg_val;
- u16 i;
- struct rte_eth_dev_info dev_info;
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- memset(&dev_info, 0, sizeof(dev_info));
- eth_igbvf_infos_get(dev, &dev_info);
-
- /* Clear interrupt mask to stop from interrupts being generated */
- igbvf_intr_disable(hw);
-
- /* Clear any pending interrupts, flush previous writes */
- E1000_READ_REG(hw, E1000_EICR);
-
- /* Disable the transmit unit. Each queue must be disabled. */
- for (i = 0; i < dev_info.max_tx_queues; i++)
- E1000_WRITE_REG(hw, E1000_TXDCTL(i), E1000_TXDCTL_SWFLSH);
-
- /* Disable the receive unit by stopping each queue */
- for (i = 0; i < dev_info.max_rx_queues; i++) {
- reg_val = E1000_READ_REG(hw, E1000_RXDCTL(i));
- reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE;
- E1000_WRITE_REG(hw, E1000_RXDCTL(i), reg_val);
- while (E1000_READ_REG(hw, E1000_RXDCTL(i)) & E1000_RXDCTL_QUEUE_ENABLE)
- ;
- }
-
- /* flush all queues disables */
- E1000_WRITE_FLUSH(hw);
- msec_delay(2);
-}
-
-static int eth_igbvf_link_update(struct e1000_hw *hw)
-{
- struct e1000_mbx_info *mbx = &hw->mbx;
- struct e1000_mac_info *mac = &hw->mac;
- int ret_val = E1000_SUCCESS;
-
- PMD_INIT_LOG(DEBUG, "e1000_check_for_link_vf");
-
- /*
- * We only want to run this if there has been a rst asserted.
- * in this case that could mean a link change, device reset,
- * or a virtual function reset
- */
-
- /* If we were hit with a reset or timeout drop the link */
- if (!e1000_check_for_rst(hw, 0) || !mbx->timeout)
- mac->get_link_status = TRUE;
-
- if (!mac->get_link_status)
- goto out;
-
- /* if link status is down no point in checking to see if pf is up */
- if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))
- goto out;
-
- /* if we passed all the tests above then the link is up and we no
- * longer need to check for link */
- mac->get_link_status = FALSE;
-
-out:
- return ret_val;
-}
-
-
-static int
-igbvf_dev_configure(struct rte_eth_dev *dev)
-{
- struct rte_eth_conf* conf = &dev->data->dev_conf;
-
- PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
- dev->data->port_id);
-
- /*
- * VF has no ability to enable/disable HW CRC
- * Keep the persistent behavior the same as Host PF
- */
-#ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
- if (!conf->rxmode.hw_strip_crc) {
- PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip");
- conf->rxmode.hw_strip_crc = 1;
- }
-#else
- if (conf->rxmode.hw_strip_crc) {
- PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip");
- conf->rxmode.hw_strip_crc = 0;
- }
-#endif
-
- return 0;
-}
-
-static int
-igbvf_dev_start(struct rte_eth_dev *dev)
-{
- struct e1000_hw *hw =
- E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int ret;
-
- PMD_INIT_FUNC_TRACE();
-
- hw->mac.ops.reset_hw(hw);
-
- /* Set all vfta */
- igbvf_set_vfta_all(dev,1);
-
- eth_igbvf_tx_init(dev);
-
- /* This can fail when allocating mbufs for descriptor rings */
- ret = eth_igbvf_rx_init(dev);
- if (ret) {
- PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
- igb_dev_clear_queues(dev);
- return ret;
- }
-
- return 0;
-}
-
-static void
-igbvf_dev_stop(struct rte_eth_dev *dev)
-{
- PMD_INIT_FUNC_TRACE();
-
- igbvf_stop_adapter(dev);
-
- /*
- * Clear what we set, but we still keep shadow_vfta to
- * restore after device starts
- */
- igbvf_set_vfta_all(dev,0);
-
- igb_dev_clear_queues(dev);
-}
-
-static void
-igbvf_dev_close(struct rte_eth_dev *dev)
-{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- PMD_INIT_FUNC_TRACE();
-
- e1000_reset_hw(hw);
-
- igbvf_dev_stop(dev);
-}
-
-static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on)
-{
- struct e1000_mbx_info *mbx = &hw->mbx;
- uint32_t msgbuf[2];
-
- /* After set vlan, vlan strip will also be enabled in igb driver*/
- msgbuf[0] = E1000_VF_SET_VLAN;
- msgbuf[1] = vid;
- /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
- if (on)
- msgbuf[0] |= E1000_VF_SET_VLAN_ADD;
-
- return (mbx->ops.write_posted(hw, msgbuf, 2, 0));
-}
-
-static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on)
-{
- struct e1000_hw *hw =
- E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct e1000_vfta * shadow_vfta =
- E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
- int i = 0, j = 0, vfta = 0, mask = 1;
-
- for (i = 0; i < IGB_VFTA_SIZE; i++){
- vfta = shadow_vfta->vfta[i];
- if(vfta){
- mask = 1;
- for (j = 0; j < 32; j++){
- if(vfta & mask)
- igbvf_set_vfta(hw,
- (uint16_t)((i<<5)+j), on);
- mask<<=1;
- }
- }
- }
-
-}
-
-static int
-igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
-{
- struct e1000_hw *hw =
- E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct e1000_vfta * shadow_vfta =
- E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
- uint32_t vid_idx = 0;
- uint32_t vid_bit = 0;
- int ret = 0;
-
- PMD_INIT_FUNC_TRACE();
-
- /*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/
- ret = igbvf_set_vfta(hw, vlan_id, !!on);
- if(ret){
- PMD_INIT_LOG(ERR, "Unable to set VF vlan");
- return ret;
- }
- vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
- vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
-
- /*Save what we set and retore it after device reset*/
- if (on)
- shadow_vfta->vfta[vid_idx] |= vid_bit;
- else
- shadow_vfta->vfta[vid_idx] &= ~vid_bit;
-
- return 0;
-}
-
-static int
-eth_igb_rss_reta_update(struct rte_eth_dev *dev,
- struct rte_eth_rss_reta_entry64 *reta_conf,
- uint16_t reta_size)
-{
- uint8_t i, j, mask;
- uint32_t reta, r;
- uint16_t idx, shift;
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- if (reta_size != ETH_RSS_RETA_SIZE_128) {
- PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
- "(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
- return -EINVAL;
- }
-
- for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
- mask = (uint8_t)((reta_conf[idx].mask >> shift) &
- IGB_4_BIT_MASK);
- if (!mask)
- continue;
- if (mask == IGB_4_BIT_MASK)
- r = 0;
- else
- r = E1000_READ_REG(hw, E1000_RETA(i >> 2));
- for (j = 0, reta = 0; j < IGB_4_BIT_WIDTH; j++) {
- if (mask & (0x1 << j))
- reta |= reta_conf[idx].reta[shift + j] <<
- (CHAR_BIT * j);
- else
- reta |= r & (IGB_8_BIT_MASK << (CHAR_BIT * j));
- }
- E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta);
- }
-
- return 0;
-}
-
-static int
-eth_igb_rss_reta_query(struct rte_eth_dev *dev,
- struct rte_eth_rss_reta_entry64 *reta_conf,
- uint16_t reta_size)
-{
- uint8_t i, j, mask;
- uint32_t reta;
- uint16_t idx, shift;
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- if (reta_size != ETH_RSS_RETA_SIZE_128) {
- PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
- "(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
- return -EINVAL;
- }
-
- for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
- mask = (uint8_t)((reta_conf[idx].mask >> shift) &
- IGB_4_BIT_MASK);
- if (!mask)
- continue;
- reta = E1000_READ_REG(hw, E1000_RETA(i >> 2));
- for (j = 0; j < IGB_4_BIT_WIDTH; j++) {
- if (mask & (0x1 << j))
- reta_conf[idx].reta[shift + j] =
- ((reta >> (CHAR_BIT * j)) &
- IGB_8_BIT_MASK);
- }
- }
-
- return 0;
-}
-
-#define MAC_TYPE_FILTER_SUP(type) do {\
- if ((type) != e1000_82580 && (type) != e1000_i350 &&\
- (type) != e1000_82576)\
- return -ENOSYS;\
-} while (0)
-
-/*
- * add the syn filter
- *
- * @param
- * dev: Pointer to struct rte_eth_dev.
- * filter: ponter to the filter that will be added.
- * rx_queue: the queue id the filter assigned to.
- *
- * @return
- * - On success, zero.
- * - On failure, a negative value.
- */
-static int
-eth_igb_add_syn_filter(struct rte_eth_dev *dev,
- struct rte_syn_filter *filter, uint16_t rx_queue)
-{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t synqf, rfctl;
-
- MAC_TYPE_FILTER_SUP(hw->mac.type);
-
- if (rx_queue >= IGB_MAX_RX_QUEUE_NUM)
- return -EINVAL;
-
- synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
- if (synqf & E1000_SYN_FILTER_ENABLE)
- return -EINVAL;
-
- synqf = (uint32_t)(((rx_queue << E1000_SYN_FILTER_QUEUE_SHIFT) &
- E1000_SYN_FILTER_QUEUE) | E1000_SYN_FILTER_ENABLE);
-
- rfctl = E1000_READ_REG(hw, E1000_RFCTL);
- if (filter->hig_pri)
- rfctl |= E1000_RFCTL_SYNQFP;
- else
- rfctl &= ~E1000_RFCTL_SYNQFP;
-
- E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
- E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
- return 0;
-}
-
-/*
- * remove the syn filter
- *
- * @param
- * dev: Pointer to struct rte_eth_dev.
- *
- * @return
- * - On success, zero.
- * - On failure, a negative value.
- */
-static int
-eth_igb_remove_syn_filter(struct rte_eth_dev *dev)
-{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- MAC_TYPE_FILTER_SUP(hw->mac.type);
-
- E1000_WRITE_REG(hw, E1000_SYNQF(0), 0);
- return 0;
-}
-
-/*
- * get the syn filter's info
- *
- * @param
- * dev: Pointer to struct rte_eth_dev.
- * filter: ponter to the filter that returns.
- * *rx_queue: pointer to the queue id the filter assigned to.
- *
- * @return
- * - On success, zero.
- * - On failure, a negative value.
- */
-static int
-eth_igb_get_syn_filter(struct rte_eth_dev *dev,
- struct rte_syn_filter *filter, uint16_t *rx_queue)
-{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t synqf, rfctl;
-
- MAC_TYPE_FILTER_SUP(hw->mac.type);
- synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
- if (synqf & E1000_SYN_FILTER_ENABLE) {
- rfctl = E1000_READ_REG(hw, E1000_RFCTL);
- filter->hig_pri = (rfctl & E1000_RFCTL_SYNQFP) ? 1 : 0;
- *rx_queue = (uint8_t)((synqf & E1000_SYN_FILTER_QUEUE) >>
- E1000_SYN_FILTER_QUEUE_SHIFT);
- return 0;
- }
- return -ENOENT;
-}
-
-/*
- * add an ethertype filter
- *
- * @param
- * dev: Pointer to struct rte_eth_dev.
- * index: the index the filter allocates.
- * filter: ponter to the filter that will be added.
- * rx_queue: the queue id the filter assigned to.
- *
- * @return
- * - On success, zero.
- * - On failure, a negative value.
- */
-static int
-eth_igb_add_ethertype_filter(struct rte_eth_dev *dev, uint16_t index,
- struct rte_ethertype_filter *filter, uint16_t rx_queue)
-{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t etqf;
-
- MAC_TYPE_FILTER_SUP(hw->mac.type);
-
- if (index >= E1000_MAX_ETQF_FILTERS || rx_queue >= IGB_MAX_RX_QUEUE_NUM)
- return -EINVAL;
-
- etqf = E1000_READ_REG(hw, E1000_ETQF(index));
- if (etqf & E1000_ETQF_FILTER_ENABLE)
- return -EINVAL; /* filter index is in use. */
- else
- etqf = 0;
-
- etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE;
- etqf |= (uint32_t)(filter->ethertype & E1000_ETQF_ETHERTYPE);
- etqf |= rx_queue << E1000_ETQF_QUEUE_SHIFT;
-
- if (filter->priority_en) {
- PMD_INIT_LOG(ERR, "vlan and priority (%d) is not supported"
- " in E1000.", filter->priority);
- return -EINVAL;
- }
-
- E1000_WRITE_REG(hw, E1000_ETQF(index), etqf);
- return 0;
-}
-
-/*
- * remove an ethertype filter
- *
- * @param
- * dev: Pointer to struct rte_eth_dev.
- * index: the index the filter allocates.
- *
- * @return
- * - On success, zero.
- * - On failure, a negative value.
- */
-static int
-eth_igb_remove_ethertype_filter(struct rte_eth_dev *dev, uint16_t index)
-{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- MAC_TYPE_FILTER_SUP(hw->mac.type);
-
- if (index >= E1000_MAX_ETQF_FILTERS)
- return -EINVAL;
-
- E1000_WRITE_REG(hw, E1000_ETQF(index), 0);
- return 0;
-}
-
-/*
- * get an ethertype filter
- *
- * @param
- * dev: Pointer to struct rte_eth_dev.
- * index: the index the filter allocates.
- * filter: ponter to the filter that will be gotten.
- * *rx_queue: the ponited of the queue id the filter assigned to.
- *
- * @return
- * - On success, zero.
- * - On failure, a negative value.
- */
-static int
-eth_igb_get_ethertype_filter(struct rte_eth_dev *dev, uint16_t index,
- struct rte_ethertype_filter *filter, uint16_t *rx_queue)
-{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t etqf;
-
- MAC_TYPE_FILTER_SUP(hw->mac.type);
-
- if (index >= E1000_MAX_ETQF_FILTERS)
- return -EINVAL;
-
- etqf = E1000_READ_REG(hw, E1000_ETQF(index));
- if (etqf & E1000_ETQF_FILTER_ENABLE) {
- filter->ethertype = etqf & E1000_ETQF_ETHERTYPE;
- filter->priority_en = 0;
- *rx_queue = (etqf & E1000_ETQF_QUEUE) >> E1000_ETQF_QUEUE_SHIFT;
- return 0;
- }
- return -ENOENT;
-}
-
-#define MAC_TYPE_FILTER_SUP_EXT(type) do {\
- if ((type) != e1000_82580 && (type) != e1000_i350)\
- return -ENOSYS; \
-} while (0)
-
-/*
- * add a 2tuple filter
- *
- * @param
- * dev: Pointer to struct rte_eth_dev.
- * index: the index the filter allocates.
- * filter: ponter to the filter that will be added.
- * rx_queue: the queue id the filter assigned to.
- *
- * @return
- * - On success, zero.
- * - On failure, a negative value.
- */
-static int
-eth_igb_add_2tuple_filter(struct rte_eth_dev *dev, uint16_t index,
- struct rte_2tuple_filter *filter, uint16_t rx_queue)
-{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t ttqf, imir = 0;
- uint32_t imir_ext = 0;
-
- MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
-
- if (index >= E1000_MAX_TTQF_FILTERS ||
- rx_queue >= IGB_MAX_RX_QUEUE_NUM ||
- filter->priority > E1000_2TUPLE_MAX_PRI)
- return -EINVAL; /* filter index is out of range. */
- if (filter->tcp_flags > TCP_FLAG_ALL)
- return -EINVAL; /* flags is invalid. */
-
- ttqf = E1000_READ_REG(hw, E1000_TTQF(index));
- if (ttqf & E1000_TTQF_QUEUE_ENABLE)
- return -EINVAL; /* filter index is in use. */
-
- imir = (uint32_t)(filter->dst_port & E1000_IMIR_DSTPORT);
- if (filter->dst_port_mask == 1) /* 1b means not compare. */
- imir |= E1000_IMIR_PORT_BP;
- else
- imir &= ~E1000_IMIR_PORT_BP;
-
- imir |= filter->priority << E1000_IMIR_PRIORITY_SHIFT;
-
- ttqf = 0;
- ttqf |= E1000_TTQF_QUEUE_ENABLE;
- ttqf |= (uint32_t)(rx_queue << E1000_TTQF_QUEUE_SHIFT);
- ttqf |= (uint32_t)(filter->protocol & E1000_TTQF_PROTOCOL_MASK);
- if (filter->protocol_mask == 1)
- ttqf |= E1000_TTQF_MASK_ENABLE;
- else
- ttqf &= ~E1000_TTQF_MASK_ENABLE;
-
- imir_ext |= E1000_IMIR_EXT_SIZE_BP;
- /* tcp flags bits setting. */
- if (filter->tcp_flags & TCP_FLAG_ALL) {
- if (filter->tcp_flags & TCP_UGR_FLAG)
- imir_ext |= E1000_IMIR_EXT_CTRL_UGR;
- if (filter->tcp_flags & TCP_ACK_FLAG)
- imir_ext |= E1000_IMIR_EXT_CTRL_ACK;
- if (filter->tcp_flags & TCP_PSH_FLAG)
- imir_ext |= E1000_IMIR_EXT_CTRL_PSH;
- if (filter->tcp_flags & TCP_RST_FLAG)
- imir_ext |= E1000_IMIR_EXT_CTRL_RST;
- if (filter->tcp_flags & TCP_SYN_FLAG)
- imir_ext |= E1000_IMIR_EXT_CTRL_SYN;
- if (filter->tcp_flags & TCP_FIN_FLAG)
- imir_ext |= E1000_IMIR_EXT_CTRL_FIN;
- imir_ext &= ~E1000_IMIR_EXT_CTRL_BP;
- } else
- imir_ext |= E1000_IMIR_EXT_CTRL_BP;
- E1000_WRITE_REG(hw, E1000_IMIR(index), imir);
- E1000_WRITE_REG(hw, E1000_TTQF(index), ttqf);
- E1000_WRITE_REG(hw, E1000_IMIREXT(index), imir_ext);
- return 0;
-}
-
-/*
- * remove a 2tuple filter
- *
- * @param
- * dev: Pointer to struct rte_eth_dev.
- * index: the index the filter allocates.
- *
- * @return
- * - On success, zero.
- * - On failure, a negative value.
- */
-static int
-eth_igb_remove_2tuple_filter(struct rte_eth_dev *dev,
- uint16_t index)
-{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
-
- if (index >= E1000_MAX_TTQF_FILTERS)
- return -EINVAL; /* filter index is out of range */
-
- E1000_WRITE_REG(hw, E1000_TTQF(index), 0);
- E1000_WRITE_REG(hw, E1000_IMIR(index), 0);
- E1000_WRITE_REG(hw, E1000_IMIREXT(index), 0);
- return 0;
-}
-
-/*
- * get a 2tuple filter
- *
- * @param
- * dev: Pointer to struct rte_eth_dev.
- * index: the index the filter allocates.
- * filter: ponter to the filter that returns.
- * *rx_queue: pointer of the queue id the filter assigned to.
- *
- * @return
- * - On success, zero.
- * - On failure, a negative value.
- */
-static int
-eth_igb_get_2tuple_filter(struct rte_eth_dev *dev, uint16_t index,
- struct rte_2tuple_filter *filter, uint16_t *rx_queue)
-{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t imir, ttqf, imir_ext;
-
- MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
-
- if (index >= E1000_MAX_TTQF_FILTERS)
- return -EINVAL; /* filter index is out of range. */
-
- ttqf = E1000_READ_REG(hw, E1000_TTQF(index));
- if (ttqf & E1000_TTQF_QUEUE_ENABLE) {
- imir = E1000_READ_REG(hw, E1000_IMIR(index));
- filter->protocol = ttqf & E1000_TTQF_PROTOCOL_MASK;
- filter->protocol_mask = (ttqf & E1000_TTQF_MASK_ENABLE) ? 1 : 0;
- *rx_queue = (ttqf & E1000_TTQF_RX_QUEUE_MASK) >>
- E1000_TTQF_QUEUE_SHIFT;
- filter->dst_port = (uint16_t)(imir & E1000_IMIR_DSTPORT);
- filter->dst_port_mask = (imir & E1000_IMIR_PORT_BP) ? 1 : 0;
- filter->priority = (imir & E1000_IMIR_PRIORITY) >>
- E1000_IMIR_PRIORITY_SHIFT;
-
- imir_ext = E1000_READ_REG(hw, E1000_IMIREXT(index));
- if (!(imir_ext & E1000_IMIR_EXT_CTRL_BP)) {
- if (imir_ext & E1000_IMIR_EXT_CTRL_UGR)
- filter->tcp_flags |= TCP_UGR_FLAG;
- if (imir_ext & E1000_IMIR_EXT_CTRL_ACK)
- filter->tcp_flags |= TCP_ACK_FLAG;
- if (imir_ext & E1000_IMIR_EXT_CTRL_PSH)
- filter->tcp_flags |= TCP_PSH_FLAG;
- if (imir_ext & E1000_IMIR_EXT_CTRL_RST)
- filter->tcp_flags |= TCP_RST_FLAG;
- if (imir_ext & E1000_IMIR_EXT_CTRL_SYN)
- filter->tcp_flags |= TCP_SYN_FLAG;
- if (imir_ext & E1000_IMIR_EXT_CTRL_FIN)
- filter->tcp_flags |= TCP_FIN_FLAG;
- } else
- filter->tcp_flags = 0;
- return 0;
- }
- return -ENOENT;
-}
-
-/*
- * add a flex filter
- *
- * @param
- * dev: Pointer to struct rte_eth_dev.
- * index: the index the filter allocates.
- * filter: ponter to the filter that will be added.
- * rx_queue: the queue id the filter assigned to.
- *
- * @return
- * - On success, zero.
- * - On failure, a negative value.
- */
-static int
-eth_igb_add_flex_filter(struct rte_eth_dev *dev, uint16_t index,
- struct rte_flex_filter *filter, uint16_t rx_queue)
-{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t wufc, en_bits = 0;
- uint32_t queueing = 0;
- uint32_t reg_off = 0;
- uint8_t i, j = 0;
-
- MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
-
- if (index >= E1000_MAX_FLEXIBLE_FILTERS)
- return -EINVAL; /* filter index is out of range. */
-
- if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN ||
- filter->len % 8 != 0 ||
- filter->priority > E1000_MAX_FLEX_FILTER_PRI)
- return -EINVAL;
-
- wufc = E1000_READ_REG(hw, E1000_WUFC);
- en_bits = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << index);
- if ((wufc & en_bits) == en_bits)
- return -EINVAL; /* the filter is in use. */
-
- E1000_WRITE_REG(hw, E1000_WUFC,
- wufc | E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << index));
-
- j = 0;
- if (index < E1000_MAX_FHFT)
- reg_off = E1000_FHFT(index);
- else
- reg_off = E1000_FHFT_EXT(index - E1000_MAX_FHFT);
-
- for (i = 0; i < 16; i++) {
- E1000_WRITE_REG(hw, reg_off + i*4*4, filter->dwords[j]);
- E1000_WRITE_REG(hw, reg_off + (i*4+1)*4, filter->dwords[++j]);
- E1000_WRITE_REG(hw, reg_off + (i*4+2)*4,
- (uint32_t)filter->mask[i]);
- ++j;
- }
- queueing |= filter->len |
- (rx_queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) |
- (filter->priority << E1000_FHFT_QUEUEING_PRIO_SHIFT);
- E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET, queueing);
- return 0;
-}
-
-/*
- * remove a flex filter
- *
- * @param
- * dev: Pointer to struct rte_eth_dev.
- * index: the index the filter allocates.
- *
- * @return
- * - On success, zero.
- * - On failure, a negative value.
- */
-static int
-eth_igb_remove_flex_filter(struct rte_eth_dev *dev,
- uint16_t index)
-{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t wufc, reg_off = 0;
- uint8_t i;
-
- MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
-
- if (index >= E1000_MAX_FLEXIBLE_FILTERS)
- return -EINVAL; /* filter index is out of range. */
-
- wufc = E1000_READ_REG(hw, E1000_WUFC);
- E1000_WRITE_REG(hw, E1000_WUFC, wufc & (~(E1000_WUFC_FLX0 << index)));
-
- if (index < E1000_MAX_FHFT)
- reg_off = E1000_FHFT(index);
- else
- reg_off = E1000_FHFT_EXT(index - E1000_MAX_FHFT);
-
- for (i = 0; i < 64; i++)
- E1000_WRITE_REG(hw, reg_off + i*4, 0);
- return 0;
-}
-
-/*
- * get a flex filter
- *
- * @param
- * dev: Pointer to struct rte_eth_dev.
- * index: the index the filter allocates.
- * filter: ponter to the filter that returns.
- * *rx_queue: the pointer of the queue id the filter assigned to.
- *
- * @return
- * - On success, zero.
- * - On failure, a negative value.
- */
-static int
-eth_igb_get_flex_filter(struct rte_eth_dev *dev, uint16_t index,
- struct rte_flex_filter *filter, uint16_t *rx_queue)
-{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t wufc, queueing, wufc_en = 0;
- uint8_t i, j;
-
- MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
-
- if (index >= E1000_MAX_FLEXIBLE_FILTERS)
- return -EINVAL; /* filter index is out of range. */
-
- wufc = E1000_READ_REG(hw, E1000_WUFC);
- wufc_en = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << index);
-
- if ((wufc & wufc_en) == wufc_en) {
- uint32_t reg_off = 0;
- j = 0;
- if (index < E1000_MAX_FHFT)
- reg_off = E1000_FHFT(index);
- else
- reg_off = E1000_FHFT_EXT(index - E1000_MAX_FHFT);
-
- for (i = 0; i < 16; i++, j = i * 2) {
- filter->dwords[j] =
- E1000_READ_REG(hw, reg_off + i*4*4);
- filter->dwords[j+1] =
- E1000_READ_REG(hw, reg_off + (i*4+1)*4);
- filter->mask[i] =
- E1000_READ_REG(hw, reg_off + (i*4+2)*4);
- }
- queueing = E1000_READ_REG(hw,
- reg_off + E1000_FHFT_QUEUEING_OFFSET);
- filter->len = queueing & E1000_FHFT_QUEUEING_LEN;
- filter->priority = (queueing & E1000_FHFT_QUEUEING_PRIO) >>
- E1000_FHFT_QUEUEING_PRIO_SHIFT;
- *rx_queue = (queueing & E1000_FHFT_QUEUEING_QUEUE) >>
- E1000_FHFT_QUEUEING_QUEUE_SHIFT;
- return 0;
- }
- return -ENOENT;
-}
-
-/*
- * add a 5tuple filter
- *
- * @param
- * dev: Pointer to struct rte_eth_dev.
- * index: the index the filter allocates.
- * filter: ponter to the filter that will be added.
- * rx_queue: the queue id the filter assigned to.
- *
- * @return
- * - On success, zero.
- * - On failure, a negative value.
- */
-static int
-eth_igb_add_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
- struct rte_5tuple_filter *filter, uint16_t rx_queue)
-{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t ftqf, spqf = 0;
- uint32_t imir = 0;
- uint32_t imir_ext = 0;
-
- if (hw->mac.type != e1000_82576)
- return -ENOSYS;
-
- if (index >= E1000_MAX_FTQF_FILTERS ||
- rx_queue >= IGB_MAX_RX_QUEUE_NUM_82576)
- return -EINVAL; /* filter index is out of range. */
-
- ftqf = E1000_READ_REG(hw, E1000_FTQF(index));
- if (ftqf & E1000_FTQF_QUEUE_ENABLE)
- return -EINVAL; /* filter index is in use. */
-
- ftqf = 0;
- ftqf |= filter->protocol & E1000_FTQF_PROTOCOL_MASK;
- if (filter->src_ip_mask == 1) /* 1b means not compare. */
- ftqf |= E1000_FTQF_SOURCE_ADDR_MASK;
- if (filter->dst_ip_mask == 1)
- ftqf |= E1000_FTQF_DEST_ADDR_MASK;
- if (filter->src_port_mask == 1)
- ftqf |= E1000_FTQF_SOURCE_PORT_MASK;
- if (filter->protocol_mask == 1)
- ftqf |= E1000_FTQF_PROTOCOL_COMP_MASK;
- ftqf |= (rx_queue << E1000_FTQF_QUEUE_SHIFT) & E1000_FTQF_QUEUE_MASK;
- ftqf |= E1000_FTQF_VF_MASK_EN;
- ftqf |= E1000_FTQF_QUEUE_ENABLE;
- E1000_WRITE_REG(hw, E1000_FTQF(index), ftqf);
- E1000_WRITE_REG(hw, E1000_DAQF(index), filter->dst_ip);
- E1000_WRITE_REG(hw, E1000_SAQF(index), filter->src_ip);
-
- spqf |= filter->src_port & E1000_SPQF_SRCPORT;
- E1000_WRITE_REG(hw, E1000_SPQF(index), spqf);
-
- imir |= (uint32_t)(filter->dst_port & E1000_IMIR_DSTPORT);
- if (filter->dst_port_mask == 1) /* 1b means not compare. */
- imir |= E1000_IMIR_PORT_BP;
- else
- imir &= ~E1000_IMIR_PORT_BP;
- imir |= filter->priority << E1000_IMIR_PRIORITY_SHIFT;
-
- imir_ext |= E1000_IMIR_EXT_SIZE_BP;
- /* tcp flags bits setting. */
- if (filter->tcp_flags & TCP_FLAG_ALL) {
- if (filter->tcp_flags & TCP_UGR_FLAG)
- imir_ext |= E1000_IMIR_EXT_CTRL_UGR;
- if (filter->tcp_flags & TCP_ACK_FLAG)
- imir_ext |= E1000_IMIR_EXT_CTRL_ACK;
- if (filter->tcp_flags & TCP_PSH_FLAG)
- imir_ext |= E1000_IMIR_EXT_CTRL_PSH;
- if (filter->tcp_flags & TCP_RST_FLAG)
- imir_ext |= E1000_IMIR_EXT_CTRL_RST;
- if (filter->tcp_flags & TCP_SYN_FLAG)
- imir_ext |= E1000_IMIR_EXT_CTRL_SYN;
- if (filter->tcp_flags & TCP_FIN_FLAG)
- imir_ext |= E1000_IMIR_EXT_CTRL_FIN;
- } else
- imir_ext |= E1000_IMIR_EXT_CTRL_BP;
- E1000_WRITE_REG(hw, E1000_IMIR(index), imir);
- E1000_WRITE_REG(hw, E1000_IMIREXT(index), imir_ext);
- return 0;
-}
-
-/*
- * remove a 5tuple filter
- *
- * @param
- * dev: Pointer to struct rte_eth_dev.
- * index: the index the filter allocates
- *
- * @return
- * - On success, zero.
- * - On failure, a negative value.
- */
-static int
-eth_igb_remove_5tuple_filter(struct rte_eth_dev *dev,
- uint16_t index)
-{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- if (hw->mac.type != e1000_82576)
- return -ENOSYS;
-
- if (index >= E1000_MAX_FTQF_FILTERS)
- return -EINVAL; /* filter index is out of range. */
-
- E1000_WRITE_REG(hw, E1000_FTQF(index), 0);
- E1000_WRITE_REG(hw, E1000_DAQF(index), 0);
- E1000_WRITE_REG(hw, E1000_SAQF(index), 0);
- E1000_WRITE_REG(hw, E1000_SPQF(index), 0);
- E1000_WRITE_REG(hw, E1000_IMIR(index), 0);
- E1000_WRITE_REG(hw, E1000_IMIREXT(index), 0);
- return 0;
-}
-
-/*
- * get a 5tuple filter
- *
- * @param
- * dev: Pointer to struct rte_eth_dev.
- * index: the index the filter allocates
- * filter: ponter to the filter that returns
- * *rx_queue: pointer of the queue id the filter assigned to
- *
- * @return
- * - On success, zero.
- * - On failure, a negative value.
- */
-static int
-eth_igb_get_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
- struct rte_5tuple_filter *filter, uint16_t *rx_queue)
-{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t spqf, ftqf, imir, imir_ext;
-
- if (hw->mac.type != e1000_82576)
- return -ENOSYS;
-
- if (index >= E1000_MAX_FTQF_FILTERS)
- return -EINVAL; /* filter index is out of range. */
-
- ftqf = E1000_READ_REG(hw, E1000_FTQF(index));
- if (ftqf & E1000_FTQF_QUEUE_ENABLE) {
- filter->src_ip_mask =
- (ftqf & E1000_FTQF_SOURCE_ADDR_MASK) ? 1 : 0;
- filter->dst_ip_mask =
- (ftqf & E1000_FTQF_DEST_ADDR_MASK) ? 1 : 0;
- filter->src_port_mask =
- (ftqf & E1000_FTQF_SOURCE_PORT_MASK) ? 1 : 0;
- filter->protocol_mask =
- (ftqf & E1000_FTQF_PROTOCOL_COMP_MASK) ? 1 : 0;
- filter->protocol =
- (uint8_t)ftqf & E1000_FTQF_PROTOCOL_MASK;
- *rx_queue = (uint16_t)((ftqf & E1000_FTQF_QUEUE_MASK) >>
- E1000_FTQF_QUEUE_SHIFT);
-
- spqf = E1000_READ_REG(hw, E1000_SPQF(index));
- filter->src_port = spqf & E1000_SPQF_SRCPORT;
-
- filter->dst_ip = E1000_READ_REG(hw, E1000_DAQF(index));
- filter->src_ip = E1000_READ_REG(hw, E1000_SAQF(index));
-
- imir = E1000_READ_REG(hw, E1000_IMIR(index));
- filter->dst_port_mask = (imir & E1000_IMIR_PORT_BP) ? 1 : 0;
- filter->dst_port = (uint16_t)(imir & E1000_IMIR_DSTPORT);
- filter->priority = (imir & E1000_IMIR_PRIORITY) >>
- E1000_IMIR_PRIORITY_SHIFT;
-
- imir_ext = E1000_READ_REG(hw, E1000_IMIREXT(index));
- if (!(imir_ext & E1000_IMIR_EXT_CTRL_BP)) {
- if (imir_ext & E1000_IMIR_EXT_CTRL_UGR)
- filter->tcp_flags |= TCP_UGR_FLAG;
- if (imir_ext & E1000_IMIR_EXT_CTRL_ACK)
- filter->tcp_flags |= TCP_ACK_FLAG;
- if (imir_ext & E1000_IMIR_EXT_CTRL_PSH)
- filter->tcp_flags |= TCP_PSH_FLAG;
- if (imir_ext & E1000_IMIR_EXT_CTRL_RST)
- filter->tcp_flags |= TCP_RST_FLAG;
- if (imir_ext & E1000_IMIR_EXT_CTRL_SYN)
- filter->tcp_flags |= TCP_SYN_FLAG;
- if (imir_ext & E1000_IMIR_EXT_CTRL_FIN)
- filter->tcp_flags |= TCP_FIN_FLAG;
- } else
- filter->tcp_flags = 0;
- return 0;
- }
- return -ENOENT;
-}
-
-static int
-eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
-{
- uint32_t rctl;
- struct e1000_hw *hw;
- struct rte_eth_dev_info dev_info;
- uint32_t frame_size = mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN +
- VLAN_TAG_SIZE);
-
- hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-#ifdef RTE_LIBRTE_82571_SUPPORT
- /* XXX: not bigger than max_rx_pktlen */
- if (hw->mac.type == e1000_82571)
- return -ENOTSUP;
-#endif
- eth_igb_infos_get(dev, &dev_info);
-
- /* check that mtu is within the allowed range */
- if ((mtu < ETHER_MIN_MTU) ||
- (frame_size > dev_info.max_rx_pktlen))
- return -EINVAL;
-
- /* refuse mtu that requires the support of scattered packets when this
- * feature has not been enabled before. */
- if (!dev->data->scattered_rx &&
- frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
- return -EINVAL;
-
- rctl = E1000_READ_REG(hw, E1000_RCTL);
-
- /* switch to jumbo mode if needed */
- if (frame_size > ETHER_MAX_LEN) {
- dev->data->dev_conf.rxmode.jumbo_frame = 1;
- rctl |= E1000_RCTL_LPE;
- } else {
- dev->data->dev_conf.rxmode.jumbo_frame = 0;
- rctl &= ~E1000_RCTL_LPE;
- }
- E1000_WRITE_REG(hw, E1000_RCTL, rctl);
-
- /* update max frame size */
- dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
- E1000_WRITE_REG(hw, E1000_RLPML,
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
-
- return 0;
-}
-
-static struct rte_driver pmd_igb_drv = {
- .type = PMD_PDEV,
- .init = rte_igb_pmd_init,
-};
-
-static struct rte_driver pmd_igbvf_drv = {
- .type = PMD_PDEV,
- .init = rte_igbvf_pmd_init,
-};
-
-PMD_REGISTER_DRIVER(pmd_igb_drv);
-PMD_REGISTER_DRIVER(pmd_igbvf_drv);
diff --git a/src/dpdk_lib18/librte_pmd_enic/Makefile b/src/dpdk_lib18/librte_pmd_enic/Makefile
deleted file mode 100755
index a2a623f3..00000000
--- a/src/dpdk_lib18/librte_pmd_enic/Makefile
+++ /dev/null
@@ -1,67 +0,0 @@
-#
-# Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
-# Copyright 2007 Nuova Systems, Inc. All rights reserved.
-#
-# Copyright (c) 2014, Cisco Systems, Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-#
-# library name
-#
-LIB = librte_pmd_enic.a
-
-CFLAGS += -I$(RTE_SDK)/lib/librte_pmd_enic/vnic/
-CFLAGS += -I$(RTE_SDK)/lib/librte_pmd_enic/
-CFLAGS += -O3
-CFLAGS += $(WERROR_FLAGS) -Wno-strict-aliasing
-
-VPATH += $(RTE_SDK)/lib/librte_pmd_enic/src
-
-#
-# all source are stored in SRCS-y
-#
-SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_ethdev.c
-SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_main.c
-SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_clsf.c
-SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_res.c
-SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += vnic/vnic_cq.c
-SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += vnic/vnic_wq.c
-SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += vnic/vnic_dev.c
-SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += vnic/vnic_intr.c
-SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += vnic/vnic_rq.c
-SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += vnic/vnic_rss.c
-
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += lib/librte_net lib/librte_malloc
-DEPDIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += lib/librte_hash
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_pmd_i40e/Makefile b/src/dpdk_lib18/librte_pmd_i40e/Makefile
deleted file mode 100755
index 98e4bdf2..00000000
--- a/src/dpdk_lib18/librte_pmd_i40e/Makefile
+++ /dev/null
@@ -1,101 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-#
-# library name
-#
-LIB = librte_pmd_i40e.a
-
-CFLAGS += -O3
-CFLAGS += $(WERROR_FLAGS)
-
-#
-# Add extra flags for base driver files (also known as shared code)
-# to disable warnings
-#
-ifeq ($(CC), icc)
-CFLAGS_BASE_DRIVER = -wd593
-else ifeq ($(CC), clang)
-CFLAGS_BASE_DRIVER += -Wno-sign-compare
-CFLAGS_BASE_DRIVER += -Wno-unused-value
-CFLAGS_BASE_DRIVER += -Wno-unused-parameter
-CFLAGS_BASE_DRIVER += -Wno-strict-aliasing
-CFLAGS_BASE_DRIVER += -Wno-format
-CFLAGS_BASE_DRIVER += -Wno-missing-field-initializers
-CFLAGS_BASE_DRIVER += -Wno-pointer-to-int-cast
-CFLAGS_BASE_DRIVER += -Wno-format-nonliteral
-else
-CFLAGS_BASE_DRIVER = -Wno-sign-compare
-CFLAGS_BASE_DRIVER += -Wno-unused-value
-CFLAGS_BASE_DRIVER += -Wno-unused-parameter
-CFLAGS_BASE_DRIVER += -Wno-strict-aliasing
-CFLAGS_BASE_DRIVER += -Wno-format
-CFLAGS_BASE_DRIVER += -Wno-missing-field-initializers
-CFLAGS_BASE_DRIVER += -Wno-pointer-to-int-cast
-CFLAGS_BASE_DRIVER += -Wno-format-nonliteral
-CFLAGS_BASE_DRIVER += -Wno-format-security
-
-ifeq ($(shell test $(GCC_MAJOR_VERSION) -ge 4 -a $(GCC_MINOR_VERSION) -ge 4 && echo 1), 1)
-CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable
-endif
-
-CFLAGS_i40e_lan_hmc.o += -Wno-error
-endif
-OBJS_BASE_DRIVER=$(patsubst %.c,%.o,$(notdir $(wildcard $(RTE_SDK)/lib/librte_pmd_i40e/i40e/*.c)))
-$(foreach obj, $(OBJS_BASE_DRIVER), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
-
-VPATH += $(RTE_SDK)/lib/librte_pmd_i40e/i40e
-
-#
-# all source are stored in SRCS-y
-#
-SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_adminq.c
-SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_common.c
-SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_diag.c
-SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_hmc.c
-SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_lan_hmc.c
-SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_nvm.c
-SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_dcb.c
-
-SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev.c
-SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_rxtx.c
-SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev_vf.c
-SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_pf.c
-SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c
-
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_net lib/librte_malloc
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_dcb.c b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_dcb.c
deleted file mode 100755
index d0670287..00000000
--- a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_dcb.c
+++ /dev/null
@@ -1,479 +0,0 @@
-/*******************************************************************************
-
-Copyright (c) 2013 - 2014, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
-
-#include "i40e_adminq.h"
-#include "i40e_prototype.h"
-#include "i40e_dcb.h"
-
-/**
- * i40e_get_dcbx_status
- * @hw: pointer to the hw struct
- * @status: Embedded DCBX Engine Status
- *
- * Get the DCBX status from the Firmware
- **/
-enum i40e_status_code i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
-{
- u32 reg;
-
- if (!status)
- return I40E_ERR_PARAM;
-
- reg = rd32(hw, I40E_PRTDCB_GENS);
- *status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >>
- I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT);
-
- return I40E_SUCCESS;
-}
-
-/**
- * i40e_parse_ieee_etscfg_tlv
- * @tlv: IEEE 802.1Qaz ETS CFG TLV
- * @dcbcfg: Local store to update ETS CFG data
- *
- * Parses IEEE 802.1Qaz ETS CFG TLV
- **/
-static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv,
- struct i40e_dcbx_config *dcbcfg)
-{
- struct i40e_ieee_ets_config *etscfg;
- u8 *buf = tlv->tlvinfo;
- u16 offset = 0;
- u8 priority;
- int i;
-
- /* First Octet post subtype
- * --------------------------
- * |will-|CBS | Re- | Max |
- * |ing | |served| TCs |
- * --------------------------
- * |1bit | 1bit|3 bits|3bits|
- */
- etscfg = &dcbcfg->etscfg;
- etscfg->willing = (u8)((buf[offset] & I40E_IEEE_ETS_WILLING_MASK) >>
- I40E_IEEE_ETS_WILLING_SHIFT);
- etscfg->cbs = (u8)((buf[offset] & I40E_IEEE_ETS_CBS_MASK) >>
- I40E_IEEE_ETS_CBS_SHIFT);
- etscfg->maxtcs = (u8)((buf[offset] & I40E_IEEE_ETS_MAXTC_MASK) >>
- I40E_IEEE_ETS_MAXTC_SHIFT);
-
- /* Move offset to Priority Assignment Table */
- offset++;
-
- /* Priority Assignment Table (4 octets)
- * Octets:| 1 | 2 | 3 | 4 |
- * -----------------------------------------
- * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
- * -----------------------------------------
- * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
- * -----------------------------------------
- */
- for (i = 0; i < 4; i++) {
- priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
- I40E_IEEE_ETS_PRIO_1_SHIFT);
- etscfg->prioritytable[i * 2] = priority;
- priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
- I40E_IEEE_ETS_PRIO_0_SHIFT);
- etscfg->prioritytable[i * 2 + 1] = priority;
- offset++;
- }
-
- /* TC Bandwidth Table (8 octets)
- * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
- * ---------------------------------
- * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
- * ---------------------------------
- */
- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
- etscfg->tcbwtable[i] = buf[offset++];
-
- /* TSA Assignment Table (8 octets)
- * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
- * ---------------------------------
- * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
- * ---------------------------------
- */
- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
- etscfg->tsatable[i] = buf[offset++];
-}
-
-/**
- * i40e_parse_ieee_etsrec_tlv
- * @tlv: IEEE 802.1Qaz ETS REC TLV
- * @dcbcfg: Local store to update ETS REC data
- *
- * Parses IEEE 802.1Qaz ETS REC TLV
- **/
-static void i40e_parse_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv,
- struct i40e_dcbx_config *dcbcfg)
-{
- u8 *buf = tlv->tlvinfo;
- u16 offset = 0;
- u8 priority;
- int i;
-
- /* Move offset to priority table */
- offset++;
-
- /* Priority Assignment Table (4 octets)
- * Octets:| 1 | 2 | 3 | 4 |
- * -----------------------------------------
- * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
- * -----------------------------------------
- * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
- * -----------------------------------------
- */
- for (i = 0; i < 4; i++) {
- priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
- I40E_IEEE_ETS_PRIO_1_SHIFT);
- dcbcfg->etsrec.prioritytable[i*2] = priority;
- priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
- I40E_IEEE_ETS_PRIO_0_SHIFT);
- dcbcfg->etsrec.prioritytable[i*2 + 1] = priority;
- offset++;
- }
-
- /* TC Bandwidth Table (8 octets)
- * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
- * ---------------------------------
- * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
- * ---------------------------------
- */
- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
- dcbcfg->etsrec.tcbwtable[i] = buf[offset++];
-
- /* TSA Assignment Table (8 octets)
- * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
- * ---------------------------------
- * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
- * ---------------------------------
- */
- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
- dcbcfg->etsrec.tsatable[i] = buf[offset++];
-}
-
-/**
- * i40e_parse_ieee_pfccfg_tlv
- * @tlv: IEEE 802.1Qaz PFC CFG TLV
- * @dcbcfg: Local store to update PFC CFG data
- *
- * Parses IEEE 802.1Qaz PFC CFG TLV
- **/
-static void i40e_parse_ieee_pfccfg_tlv(struct i40e_lldp_org_tlv *tlv,
- struct i40e_dcbx_config *dcbcfg)
-{
- u8 *buf = tlv->tlvinfo;
-
- /* ----------------------------------------
- * |will-|MBC | Re- | PFC | PFC Enable |
- * |ing | |served| cap | |
- * -----------------------------------------
- * |1bit | 1bit|2 bits|4bits| 1 octet |
- */
- dcbcfg->pfc.willing = (u8)((buf[0] & I40E_IEEE_PFC_WILLING_MASK) >>
- I40E_IEEE_PFC_WILLING_SHIFT);
- dcbcfg->pfc.mbc = (u8)((buf[0] & I40E_IEEE_PFC_MBC_MASK) >>
- I40E_IEEE_PFC_MBC_SHIFT);
- dcbcfg->pfc.pfccap = (u8)((buf[0] & I40E_IEEE_PFC_CAP_MASK) >>
- I40E_IEEE_PFC_CAP_SHIFT);
- dcbcfg->pfc.pfcenable = buf[1];
-}
-
-/**
- * i40e_parse_ieee_app_tlv
- * @tlv: IEEE 802.1Qaz APP TLV
- * @dcbcfg: Local store to update APP PRIO data
- *
- * Parses IEEE 802.1Qaz APP PRIO TLV
- **/
-static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv,
- struct i40e_dcbx_config *dcbcfg)
-{
- u16 typelength;
- u16 offset = 0;
- u16 length;
- int i = 0;
- u8 *buf;
-
- typelength = I40E_NTOHS(tlv->typelength);
- length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
- I40E_LLDP_TLV_LEN_SHIFT);
- buf = tlv->tlvinfo;
-
- /* The App priority table starts 5 octets after TLV header */
- length -= (sizeof(tlv->ouisubtype) + 1);
-
- /* Move offset to App Priority Table */
- offset++;
-
- /* Application Priority Table (3 octets)
- * Octets:| 1 | 2 | 3 |
- * -----------------------------------------
- * |Priority|Rsrvd| Sel | Protocol ID |
- * -----------------------------------------
- * Bits:|23 21|20 19|18 16|15 0|
- * -----------------------------------------
- */
- while (offset < length) {
- dcbcfg->app[i].priority = (u8)((buf[offset] &
- I40E_IEEE_APP_PRIO_MASK) >>
- I40E_IEEE_APP_PRIO_SHIFT);
- dcbcfg->app[i].selector = (u8)((buf[offset] &
- I40E_IEEE_APP_SEL_MASK) >>
- I40E_IEEE_APP_SEL_SHIFT);
- dcbcfg->app[i].protocolid = (buf[offset + 1] << 0x8) |
- buf[offset + 2];
- /* Move to next app */
- offset += 3;
- i++;
- if (i >= I40E_DCBX_MAX_APPS)
- break;
- }
-
- dcbcfg->numapps = i;
-}
-
-/**
- * i40e_parse_ieee_etsrec_tlv
- * @tlv: IEEE 802.1Qaz TLV
- * @dcbcfg: Local store to update ETS REC data
- *
- * Get the TLV subtype and send it to parsing function
- * based on the subtype value
- **/
-static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv,
- struct i40e_dcbx_config *dcbcfg)
-{
- u32 ouisubtype;
- u8 subtype;
-
- ouisubtype = I40E_NTOHL(tlv->ouisubtype);
- subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
- I40E_LLDP_TLV_SUBTYPE_SHIFT);
- switch (subtype) {
- case I40E_IEEE_SUBTYPE_ETS_CFG:
- i40e_parse_ieee_etscfg_tlv(tlv, dcbcfg);
- break;
- case I40E_IEEE_SUBTYPE_ETS_REC:
- i40e_parse_ieee_etsrec_tlv(tlv, dcbcfg);
- break;
- case I40E_IEEE_SUBTYPE_PFC_CFG:
- i40e_parse_ieee_pfccfg_tlv(tlv, dcbcfg);
- break;
- case I40E_IEEE_SUBTYPE_APP_PRI:
- i40e_parse_ieee_app_tlv(tlv, dcbcfg);
- break;
- default:
- break;
- }
-}
-
-/**
- * i40e_parse_org_tlv
- * @tlv: Organization specific TLV
- * @dcbcfg: Local store to update ETS REC data
- *
- * Currently only IEEE 802.1Qaz TLV is supported, all others
- * will be returned
- **/
-static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv,
- struct i40e_dcbx_config *dcbcfg)
-{
- u32 ouisubtype;
- u32 oui;
-
- ouisubtype = I40E_NTOHL(tlv->ouisubtype);
- oui = (u32)((ouisubtype & I40E_LLDP_TLV_OUI_MASK) >>
- I40E_LLDP_TLV_OUI_SHIFT);
- switch (oui) {
- case I40E_IEEE_8021QAZ_OUI:
- i40e_parse_ieee_tlv(tlv, dcbcfg);
- break;
- default:
- break;
- }
-}
-
-/**
- * i40e_lldp_to_dcb_config
- * @lldpmib: LLDPDU to be parsed
- * @dcbcfg: store for LLDPDU data
- *
- * Parse DCB configuration from the LLDPDU
- **/
-enum i40e_status_code i40e_lldp_to_dcb_config(u8 *lldpmib,
- struct i40e_dcbx_config *dcbcfg)
-{
- enum i40e_status_code ret = I40E_SUCCESS;
- struct i40e_lldp_org_tlv *tlv;
- u16 type;
- u16 length;
- u16 typelength;
- u16 offset = 0;
-
- if (!lldpmib || !dcbcfg)
- return I40E_ERR_PARAM;
-
- /* set to the start of LLDPDU */
- lldpmib += I40E_LLDP_MIB_HLEN;
- tlv = (struct i40e_lldp_org_tlv *)lldpmib;
- while (1) {
- typelength = I40E_NTOHS(tlv->typelength);
- type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
- I40E_LLDP_TLV_TYPE_SHIFT);
- length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
- I40E_LLDP_TLV_LEN_SHIFT);
- offset += sizeof(typelength) + length;
-
- /* END TLV or beyond LLDPDU size */
- if ((type == I40E_TLV_TYPE_END) || (offset > I40E_LLDPDU_SIZE))
- break;
-
- switch (type) {
- case I40E_TLV_TYPE_ORG:
- i40e_parse_org_tlv(tlv, dcbcfg);
- break;
- default:
- break;
- }
-
- /* Move to next TLV */
- tlv = (struct i40e_lldp_org_tlv *)((char *)tlv +
- sizeof(tlv->typelength) +
- length);
- }
-
- return ret;
-}
-
-/**
- * i40e_aq_get_dcb_config
- * @hw: pointer to the hw struct
- * @mib_type: mib type for the query
- * @bridgetype: bridge type for the query (remote)
- * @dcbcfg: store for LLDPDU data
- *
- * Query DCB configuration from the Firmware
- **/
-enum i40e_status_code i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
- u8 bridgetype,
- struct i40e_dcbx_config *dcbcfg)
-{
- enum i40e_status_code ret = I40E_SUCCESS;
- struct i40e_virt_mem mem;
- u8 *lldpmib;
-
- /* Allocate the LLDPDU */
- ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
- if (ret)
- return ret;
-
- lldpmib = (u8 *)mem.va;
- ret = i40e_aq_get_lldp_mib(hw, bridgetype, mib_type,
- (void *)lldpmib, I40E_LLDPDU_SIZE,
- NULL, NULL, NULL);
- if (ret)
- goto free_mem;
-
- /* Parse LLDP MIB to get dcb configuration */
- ret = i40e_lldp_to_dcb_config(lldpmib, dcbcfg);
-
-free_mem:
- i40e_free_virt_mem(hw, &mem);
- return ret;
-}
-
-/**
- * i40e_get_dcb_config
- * @hw: pointer to the hw struct
- *
- * Get DCB configuration from the Firmware
- **/
-enum i40e_status_code i40e_get_dcb_config(struct i40e_hw *hw)
-{
- enum i40e_status_code ret = I40E_SUCCESS;
-
- /* Get Local DCB Config */
- ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
- &hw->local_dcbx_config);
- if (ret)
- goto out;
-
- /* Get Remote DCB Config */
- ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
- I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
- &hw->remote_dcbx_config);
-out:
- return ret;
-}
-
-/**
- * i40e_init_dcb
- * @hw: pointer to the hw struct
- *
- * Update DCB configuration from the Firmware
- **/
-enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw)
-{
- enum i40e_status_code ret = I40E_SUCCESS;
-
- if (!hw->func_caps.dcb)
- return ret;
-
- /* Get DCBX status */
- ret = i40e_get_dcbx_status(hw, &hw->dcbx_status);
- if (ret)
- return ret;
-
- /* Check the DCBX Status */
- switch (hw->dcbx_status) {
- case I40E_DCBX_STATUS_DONE:
- case I40E_DCBX_STATUS_IN_PROGRESS:
- /* Get current DCBX configuration */
- ret = i40e_get_dcb_config(hw);
- break;
- case I40E_DCBX_STATUS_DISABLED:
- return ret;
- case I40E_DCBX_STATUS_NOT_STARTED:
- case I40E_DCBX_STATUS_MULTIPLE_PEERS:
- default:
- break;
- }
-
- /* Configure the LLDP MIB change event */
- ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL);
- if (ret)
- return ret;
-
- return ret;
-}
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/Makefile b/src/dpdk_lib18/librte_pmd_ixgbe/Makefile
deleted file mode 100755
index 35880479..00000000
--- a/src/dpdk_lib18/librte_pmd_ixgbe/Makefile
+++ /dev/null
@@ -1,117 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-#
-# library name
-#
-LIB = librte_pmd_ixgbe.a
-
-CFLAGS += -O3
-CFLAGS += $(WERROR_FLAGS)
-
-ifeq ($(CC), icc)
-#
-# CFLAGS for icc
-#
-CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259
-
-else ifeq ($(CC), clang)
-#
-# CFLAGS for clang
-#
-CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value
-CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args
-
-else
-#
-# CFLAGS for gcc
-#
-ifneq ($(shell test $(GCC_MAJOR_VERSION) -le 4 -a $(GCC_MINOR_VERSION) -le 3 && echo 1), 1)
-CFLAGS += -Wno-deprecated
-endif
-CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value
-CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args
-
-ifeq ($(shell test $(GCC_MAJOR_VERSION) -ge 4 -a $(GCC_MINOR_VERSION) -ge 6 && echo 1), 1)
-CFLAGS_ixgbe_common.o += -Wno-unused-but-set-variable
-CFLAGS_ixgbe_x550.o += -Wno-unused-but-set-variable -Wno-maybe-uninitialized
-endif
-
-ifeq ($(shell test $(GCC_MAJOR_VERSION) -le 4 -a $(GCC_MINOR_VERSION) -le 6 && echo 1), 1)
-CFLAGS_ixgbe_x550.o += -Wno-uninitialized
-CFLAGS_ixgbe_phy.o += -Wno-uninitialized
-endif
-endif
-
-#
-# Add extra flags for base driver files (also known as shared code)
-# to disable warnings in them
-#
-BASE_DRIVER_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(RTE_SDK)/lib/librte_pmd_ixgbe/ixgbe/*.c)))
-$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
-
-VPATH += $(RTE_SDK)/lib/librte_pmd_ixgbe/ixgbe
-
-#
-# all source are stored in SRCS-y
-#
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_common.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82598.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_x540.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_x550.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_phy.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_api.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_vf.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb_82599.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb_82598.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_mbx.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_rxtx.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ethdev.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_fdir.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_pf.c
-SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec.c
-
-ifeq ($(CONFIG_RTE_NIC_BYPASS),y)
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_bypass.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599_bypass.c
-endif
-
-
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_net lib/librte_malloc
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_x550.c b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_x550.c
deleted file mode 100755
index 06d66dd5..00000000
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_x550.c
+++ /dev/null
@@ -1,1809 +0,0 @@
-/*******************************************************************************
-
-Copyright (c) 2001-2014, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
-
-#include "ixgbe_x550.h"
-#include "ixgbe_x540.h"
-#include "ixgbe_type.h"
-#include "ixgbe_api.h"
-#include "ixgbe_common.h"
-#include "ixgbe_phy.h"
-
-/**
- * ixgbe_init_ops_X550 - Inits func ptrs and MAC type
- * @hw: pointer to hardware structure
- *
- * Initialize the function pointers and assign the MAC type for X550.
- * Does not touch the hardware.
- **/
-s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
-{
- struct ixgbe_mac_info *mac = &hw->mac;
- struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
- s32 ret_val;
-
- DEBUGFUNC("ixgbe_init_ops_X550");
-
- ret_val = ixgbe_init_ops_X540(hw);
- mac->ops.dmac_config = &ixgbe_dmac_config_X550;
- mac->ops.dmac_config_tcs = &ixgbe_dmac_config_tcs_X550;
- mac->ops.dmac_update_tcs = &ixgbe_dmac_update_tcs_X550;
- mac->ops.setup_eee = &ixgbe_setup_eee_X550;
- mac->ops.set_source_address_pruning =
- &ixgbe_set_source_address_pruning_X550;
- mac->ops.set_ethertype_anti_spoofing =
- &ixgbe_set_ethertype_anti_spoofing_X550;
-
- mac->ops.get_rtrup2tc = &ixgbe_dcb_get_rtrup2tc_generic;
- eeprom->ops.init_params = &ixgbe_init_eeprom_params_X550;
- eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_X550;
- eeprom->ops.read = &ixgbe_read_ee_hostif_X550;
- eeprom->ops.read_buffer = &ixgbe_read_ee_hostif_buffer_X550;
- eeprom->ops.write = &ixgbe_write_ee_hostif_X550;
- eeprom->ops.write_buffer = &ixgbe_write_ee_hostif_buffer_X550;
- eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_X550;
- eeprom->ops.validate_checksum = &ixgbe_validate_eeprom_checksum_X550;
-
- mac->ops.disable_mdd = &ixgbe_disable_mdd_X550;
- mac->ops.enable_mdd = &ixgbe_enable_mdd_X550;
- mac->ops.mdd_event = &ixgbe_mdd_event_X550;
- mac->ops.restore_mdd_vf = &ixgbe_restore_mdd_vf_X550;
- mac->ops.disable_rx = &ixgbe_disable_rx_x550;
- return ret_val;
-}
-
-/**
- * ixgbe_identify_phy_x550em - Get PHY type based on device id
- * @hw: pointer to hardware structure
- *
- * Returns error code
- */
-STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
-{
- u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
-
- switch (hw->device_id) {
- case IXGBE_DEV_ID_X550EM_X_SFP:
- /* set up for CS4227 usage */
- hw->phy.lan_id = IXGBE_READ_REG(hw, IXGBE_STATUS) &
- IXGBE_STATUS_LAN_ID_1;
- hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
- if (hw->phy.lan_id) {
-
- esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
- esdp |= IXGBE_ESDP_SDP1_DIR;
- }
- esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
-
- return ixgbe_identify_module_generic(hw);
- break;
- case IXGBE_DEV_ID_X550EM_X_KX4:
- hw->phy.type = ixgbe_phy_x550em_kx4;
- break;
- case IXGBE_DEV_ID_X550EM_X_KR:
- case IXGBE_DEV_ID_X550EM_X:
- hw->phy.type = ixgbe_phy_x550em_kr;
- break;
- default:
- break;
- }
- return IXGBE_SUCCESS;
-}
-
-STATIC s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 *phy_data)
-{
- UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, *phy_data);
- return IXGBE_NOT_IMPLEMENTED;
-}
-
-STATIC s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 phy_data)
-{
- UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, phy_data);
- return IXGBE_NOT_IMPLEMENTED;
-}
-
-/**
-* ixgbe_init_ops_X550EM - Inits func ptrs and MAC type
-* @hw: pointer to hardware structure
-*
-* Initialize the function pointers and for MAC type X550EM.
-* Does not touch the hardware.
-**/
-s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
-{
- struct ixgbe_mac_info *mac = &hw->mac;
- struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
- struct ixgbe_phy_info *phy = &hw->phy;
- s32 ret_val;
-
- DEBUGFUNC("ixgbe_init_ops_X550EM");
-
- /* Similar to X550 so start there. */
- ret_val = ixgbe_init_ops_X550(hw);
-
- /* Since this function eventually calls
- * ixgbe_init_ops_540 by design, we are setting
- * the pointers to NULL explicitly here to overwrite
- * the values being set in the x540 function.
- */
- /* Thermal sensor not supported in x550EM */
- mac->ops.get_thermal_sensor_data = NULL;
- mac->ops.init_thermal_sensor_thresh = NULL;
- mac->thermal_sensor_enabled = false;
-
- /* FCOE not supported in x550EM */
- mac->ops.get_san_mac_addr = NULL;
- mac->ops.set_san_mac_addr = NULL;
- mac->ops.get_wwn_prefix = NULL;
- mac->ops.get_fcoe_boot_status = NULL;
-
- /* IPsec not supported in x550EM */
- mac->ops.disable_sec_rx_path = NULL;
- mac->ops.enable_sec_rx_path = NULL;
-
- /* PCIe bus info not supported in X550EM */
- mac->ops.get_bus_info = NULL;
-
- mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
- mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
- mac->ops.get_media_type = &ixgbe_get_media_type_X550em;
- mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_X550em;
- mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_X550em;
- mac->ops.reset_hw = &ixgbe_reset_hw_X550em;
- mac->ops.get_supported_physical_layer =
- &ixgbe_get_supported_physical_layer_X550em;
-
- /* PHY */
- phy->ops.init = &ixgbe_init_phy_ops_X550em;
- phy->ops.identify = &ixgbe_identify_phy_x550em;
- phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
- phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
- phy->ops.setup_link = ixgbe_setup_kr_x550em;
-
-
- /* EEPROM */
- eeprom->ops.init_params = &ixgbe_init_eeprom_params_X540;
- eeprom->ops.read = &ixgbe_read_ee_hostif_X550;
- eeprom->ops.read_buffer = &ixgbe_read_ee_hostif_buffer_X550;
- eeprom->ops.write = &ixgbe_write_ee_hostif_X550;
- eeprom->ops.write_buffer = &ixgbe_write_ee_hostif_buffer_X550;
- eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_X550;
- eeprom->ops.validate_checksum = &ixgbe_validate_eeprom_checksum_X550;
- eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_X550;
-
- return ret_val;
-}
-
-/**
- * ixgbe_dmac_config_X550
- * @hw: pointer to hardware structure
- *
- * Configure DMA coalescing. If enabling dmac, dmac is activated.
- * When disabling dmac, dmac enable dmac bit is cleared.
- **/
-s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw)
-{
- u32 reg, high_pri_tc;
-
- DEBUGFUNC("ixgbe_dmac_config_X550");
-
- /* Disable DMA coalescing before configuring */
- reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
- reg &= ~IXGBE_DMACR_DMAC_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
-
- /* Disable DMA Coalescing if the watchdog timer is 0 */
- if (!hw->mac.dmac_config.watchdog_timer)
- goto out;
-
- ixgbe_dmac_config_tcs_X550(hw);
-
- /* Configure DMA Coalescing Control Register */
- reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
-
- /* Set the watchdog timer in units of 40.96 usec */
- reg &= ~IXGBE_DMACR_DMACWT_MASK;
- reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096;
-
- reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK;
- /* If fcoe is enabled, set high priority traffic class */
- if (hw->mac.dmac_config.fcoe_en) {
- high_pri_tc = 1 << hw->mac.dmac_config.fcoe_tc;
- reg |= ((high_pri_tc << IXGBE_DMACR_HIGH_PRI_TC_SHIFT) &
- IXGBE_DMACR_HIGH_PRI_TC_MASK);
- }
- reg |= IXGBE_DMACR_EN_MNG_IND;
-
- /* Enable DMA coalescing after configuration */
- reg |= IXGBE_DMACR_DMAC_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
-
-out:
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_dmac_config_tcs_X550
- * @hw: pointer to hardware structure
- *
- * Configure DMA coalescing threshold per TC. The dmac enable bit must
- * be cleared before configuring.
- **/
-s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw)
-{
- u32 tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb;
-
- DEBUGFUNC("ixgbe_dmac_config_tcs_X550");
-
- /* Configure DMA coalescing enabled */
- switch (hw->mac.dmac_config.link_speed) {
- case IXGBE_LINK_SPEED_100_FULL:
- pb_headroom = IXGBE_DMACRXT_100M;
- break;
- case IXGBE_LINK_SPEED_1GB_FULL:
- pb_headroom = IXGBE_DMACRXT_1G;
- break;
- default:
- pb_headroom = IXGBE_DMACRXT_10G;
- break;
- }
-
- maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >>
- IXGBE_MHADD_MFS_SHIFT) / 1024);
-
- /* Set the per Rx packet buffer receive threshold */
- for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) {
- reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc));
- reg &= ~IXGBE_DMCTH_DMACRXT_MASK;
-
- if (tc < hw->mac.dmac_config.num_tcs) {
- /* Get Rx PB size */
- rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc));
- rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >>
- IXGBE_RXPBSIZE_SHIFT;
-
- /* Calculate receive buffer threshold in kilobytes */
- if (rx_pb_size > pb_headroom)
- rx_pb_size = rx_pb_size - pb_headroom;
- else
- rx_pb_size = 0;
-
- /* Minimum of MFS shall be set for DMCTH */
- reg |= (rx_pb_size > maxframe_size_kb) ?
- rx_pb_size : maxframe_size_kb;
- }
- IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg);
- }
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_dmac_update_tcs_X550
- * @hw: pointer to hardware structure
- *
- * Disables dmac, updates per TC settings, and then enables dmac.
- **/
-s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw)
-{
- u32 reg;
-
- DEBUGFUNC("ixgbe_dmac_update_tcs_X550");
-
- /* Disable DMA coalescing before configuring */
- reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
- reg &= ~IXGBE_DMACR_DMAC_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
-
- ixgbe_dmac_config_tcs_X550(hw);
-
- /* Enable DMA coalescing after configuration */
- reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
- reg |= IXGBE_DMACR_DMAC_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
-
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
- * @hw: pointer to hardware structure
- *
- * Initializes the EEPROM parameters ixgbe_eeprom_info within the
- * ixgbe_hw struct in order to set up EEPROM access.
- **/
-s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
-{
- struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
- u32 eec;
- u16 eeprom_size;
-
- DEBUGFUNC("ixgbe_init_eeprom_params_X550");
-
- if (eeprom->type == ixgbe_eeprom_uninitialized) {
- eeprom->semaphore_delay = 10;
- eeprom->type = ixgbe_flash;
-
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
- eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
- IXGBE_EEC_SIZE_SHIFT);
- eeprom->word_size = 1 << (eeprom_size +
- IXGBE_EEPROM_WORD_SIZE_SHIFT);
-
- DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
- eeprom->type, eeprom->word_size);
- }
-
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_setup_eee_X550 - Enable/disable EEE support
- * @hw: pointer to the HW structure
- * @enable_eee: boolean flag to enable EEE
- *
- * Enable/disable EEE based on enable_eee flag.
- * Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C
- * are modified.
- *
- **/
-s32 ixgbe_setup_eee_X550(struct ixgbe_hw *hw, bool enable_eee)
-{
- u32 eeer;
- u16 autoneg_eee_reg;
- u32 link_reg;
- s32 status;
-
- DEBUGFUNC("ixgbe_setup_eee_X550");
-
- eeer = IXGBE_READ_REG(hw, IXGBE_EEER);
- /* Enable or disable EEE per flag */
- if (enable_eee) {
- eeer |= (IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN);
-
- if (hw->device_id == IXGBE_DEV_ID_X550T) {
- /* Advertise EEE capability */
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_eee_reg);
-
- autoneg_eee_reg |= (IXGBE_AUTO_NEG_10GBASE_EEE_ADVT |
- IXGBE_AUTO_NEG_1000BASE_EEE_ADVT |
- IXGBE_AUTO_NEG_100BASE_EEE_ADVT);
-
- hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_eee_reg);
- } else if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR ||
- hw->device_id == IXGBE_DEV_ID_X550EM_X) {
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->phy.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
- if (status != IXGBE_SUCCESS)
- return status;
-
- link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR |
- IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX;
-
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->phy.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
- if (status != IXGBE_SUCCESS)
- return status;
- }
- } else {
- eeer &= ~(IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN);
-
- if (hw->device_id == IXGBE_DEV_ID_X550T) {
- /* Disable advertised EEE capability */
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_eee_reg);
-
- autoneg_eee_reg &= ~(IXGBE_AUTO_NEG_10GBASE_EEE_ADVT |
- IXGBE_AUTO_NEG_1000BASE_EEE_ADVT |
- IXGBE_AUTO_NEG_100BASE_EEE_ADVT);
-
- hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_eee_reg);
- } else if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR ||
- hw->device_id == IXGBE_DEV_ID_X550EM_X) {
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->phy.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
- if (status != IXGBE_SUCCESS)
- return status;
-
- link_reg &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR |
- IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX);
-
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->phy.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
- if (status != IXGBE_SUCCESS)
- return status;
- }
- }
- IXGBE_WRITE_REG(hw, IXGBE_EEER, eeer);
-
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning
- * @hw: pointer to hardware structure
- * @enable: enable or disable source address pruning
- * @pool: Rx pool to set source address pruning for
- **/
-void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
- unsigned int pool)
-{
- u64 pfflp;
-
- /* max rx pool is 63 */
- if (pool > 63)
- return;
-
- pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL);
- pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32;
-
- if (enable)
- pfflp |= (1ULL << pool);
- else
- pfflp &= ~(1ULL << pool);
-
- IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp);
- IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32));
-}
-
-/**
- * ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype anti-spoofing
- * @hw: pointer to hardware structure
- * @enable: enable or disable switch for Ethertype anti-spoofing
- * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
- *
- **/
-void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
- bool enable, int vf)
-{
- int vf_target_reg = vf >> 3;
- int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
- u32 pfvfspoof;
-
- DEBUGFUNC("ixgbe_set_ethertype_anti_spoofing_X550");
-
- pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
- if (enable)
- pfvfspoof |= (1 << vf_target_shift);
- else
- pfvfspoof &= ~(1 << vf_target_shift);
-
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
-}
-
-/**
- * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the IOSF
- * device
- * @hw: pointer to hardware structure
- * @reg_addr: 32 bit PHY register to write
- * @device_type: 3 bit device type
- * @data: Data to write to the register
- **/
-s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u32 data)
-{
- u32 i, command, error;
-
- command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
- (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
-
- /* Write IOSF control register */
- IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
-
- /* Write IOSF data register */
- IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
- /*
- * Check every 10 usec to see if the address cycle completed.
- * The SB IOSF BUSY bit will clear when the operation is
- * complete
- */
- for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
- usec_delay(10);
-
- command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
- if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
- break;
- }
-
- if ((command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) != 0) {
- error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
- IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
- ERROR_REPORT2(IXGBE_ERROR_POLLING,
- "Failed to write, error %x\n", error);
- return IXGBE_ERR_PHY;
- }
-
- if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
- ERROR_REPORT1(IXGBE_ERROR_POLLING, "Write timed out\n");
- return IXGBE_ERR_PHY;
- }
-
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the IOSF
- * device
- * @hw: pointer to hardware structure
- * @reg_addr: 32 bit PHY register to write
- * @device_type: 3 bit device type
- * @phy_data: Pointer to read data from the register
- **/
-s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u32 *data)
-{
- u32 i, command, error;
-
- command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
- (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
-
- /* Write IOSF control register */
- IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
-
- /*
- * Check every 10 usec to see if the address cycle completed.
- * The SB IOSF BUSY bit will clear when the operation is
- * complete
- */
- for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
- usec_delay(10);
-
- command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
- if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
- break;
- }
-
- if ((command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) != 0) {
- error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
- IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
- ERROR_REPORT2(IXGBE_ERROR_POLLING,
- "Failed to read, error %x\n", error);
- return IXGBE_ERR_PHY;
- }
-
- if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
- ERROR_REPORT1(IXGBE_ERROR_POLLING, "Read timed out\n");
- return IXGBE_ERR_PHY;
- }
-
- *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
-
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_disable_mdd_X550
- * @hw: pointer to hardware structure
- *
- * Disable malicious driver detection
- **/
-void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw)
-{
- u32 reg;
-
- DEBUGFUNC("ixgbe_disable_mdd_X550");
-
- /* Disable MDD for TX DMA and interrupt */
- reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
- reg &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
- IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
-
- /* Disable MDD for RX and interrupt */
- reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
- reg &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
- IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
-}
-
-/**
- * ixgbe_enable_mdd_X550
- * @hw: pointer to hardware structure
- *
- * Enable malicious driver detection
- **/
-void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw)
-{
- u32 reg;
-
- DEBUGFUNC("ixgbe_enable_mdd_X550");
-
- /* Enable MDD for TX DMA and interrupt */
- reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
- reg |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
- IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
-
- /* Enable MDD for RX and interrupt */
- reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
- reg |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
- IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
-}
-
-/**
- * ixgbe_restore_mdd_vf_X550
- * @hw: pointer to hardware structure
- * @vf: vf index
- *
- * Restore VF that was disabled during malicious driver detection event
- **/
-void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf)
-{
- u32 idx, reg, num_qs, start_q, bitmask;
-
- DEBUGFUNC("ixgbe_restore_mdd_vf_X550");
-
- /* Map VF to queues */
- reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
- switch (reg & IXGBE_MRQC_MRQE_MASK) {
- case IXGBE_MRQC_VMDQRT8TCEN:
- num_qs = 8; /* 16 VFs / pools */
- bitmask = 0x000000FF;
- break;
- case IXGBE_MRQC_VMDQRSS32EN:
- case IXGBE_MRQC_VMDQRT4TCEN:
- num_qs = 4; /* 32 VFs / pools */
- bitmask = 0x0000000F;
- break;
- default: /* 64 VFs / pools */
- num_qs = 2;
- bitmask = 0x00000003;
- break;
- }
- start_q = vf * num_qs;
-
- /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */
- idx = start_q / 32;
- reg = 0;
- reg |= (bitmask << (start_q % 32));
- IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), reg);
- IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), reg);
-}
-
-/**
- * ixgbe_mdd_event_X550
- * @hw: pointer to hardware structure
- * @vf_bitmap: vf bitmap of malicious vfs
- *
- * Handle malicious driver detection event.
- **/
-void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap)
-{
- u32 wqbr;
- u32 i, j, reg, q, shift, vf, idx;
-
- DEBUGFUNC("ixgbe_mdd_event_X550");
-
- /* figure out pool size for mapping to vf's */
- reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
- switch (reg & IXGBE_MRQC_MRQE_MASK) {
- case IXGBE_MRQC_VMDQRT8TCEN:
- shift = 3; /* 16 VFs / pools */
- break;
- case IXGBE_MRQC_VMDQRSS32EN:
- case IXGBE_MRQC_VMDQRT4TCEN:
- shift = 2; /* 32 VFs / pools */
- break;
- default:
- shift = 1; /* 64 VFs / pools */
- break;
- }
-
- /* Read WQBR_TX and WQBR_RX and check for malicious queues */
- for (i = 0; i < 4; i++) {
- wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i));
- wqbr |= IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i));
-
- if (!wqbr)
- continue;
-
- /* Get malicious queue */
- for (j = 0; j < 32 && wqbr; j++) {
-
- if (!(wqbr & (1 << j)))
- continue;
-
- /* Get queue from bitmask */
- q = j + (i * 32);
-
- /* Map queue to vf */
- vf = (q >> shift);
-
- /* Set vf bit in vf_bitmap */
- idx = vf / 32;
- vf_bitmap[idx] |= (1 << (vf % 32));
- wqbr &= ~(1 << j);
- }
- }
-}
-
-/**
- * ixgbe_get_media_type_X550em - Get media type
- * @hw: pointer to hardware structure
- *
- * Returns the media type (fiber, copper, backplane)
- */
-enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
-{
- enum ixgbe_media_type media_type;
-
- DEBUGFUNC("ixgbe_get_media_type_X550em");
-
- /* Detect if there is a copper PHY attached. */
- switch (hw->device_id) {
- case IXGBE_DEV_ID_X550EM_X:
- case IXGBE_DEV_ID_X550EM_X_KR:
- case IXGBE_DEV_ID_X550EM_X_KX4:
- media_type = ixgbe_media_type_backplane;
- break;
- case IXGBE_DEV_ID_X550EM_X_SFP:
- media_type = ixgbe_media_type_fiber;
- break;
- default:
- media_type = ixgbe_media_type_unknown;
- break;
- }
- return media_type;
-}
-
-/**
- * ixgbe_setup_sfp_modules_X550em - Setup SFP module
- * @hw: pointer to hardware structure
- */
-s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
-{
- bool setup_linear;
- u16 reg_slice, edc_mode;
-
- DEBUGFUNC("ixgbe_setup_sfp_modules_X550em");
-
- switch (hw->phy.sfp_type) {
- case ixgbe_sfp_type_unknown:
- return IXGBE_SUCCESS;
- case ixgbe_sfp_type_not_present:
- return IXGBE_ERR_SFP_NOT_PRESENT;
- case ixgbe_sfp_type_da_cu_core0:
- case ixgbe_sfp_type_da_cu_core1:
- setup_linear = true;
- break;
- case ixgbe_sfp_type_srlr_core0:
- case ixgbe_sfp_type_srlr_core1:
- case ixgbe_sfp_type_da_act_lmt_core0:
- case ixgbe_sfp_type_da_act_lmt_core1:
- case ixgbe_sfp_type_1g_sx_core0:
- case ixgbe_sfp_type_1g_sx_core1:
- case ixgbe_sfp_type_1g_lx_core0:
- case ixgbe_sfp_type_1g_lx_core1:
- setup_linear = false;
- break;
- default:
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
- }
-
- ixgbe_init_mac_link_ops_X550em(hw);
- hw->phy.ops.reset = NULL;
-
- /* The CS4227 slice address is the base address + the port-pair reg
- * offset. I.e. Slice 0 = 0x0000 and slice 1 = 0x1000.
- */
- reg_slice = IXGBE_CS4227_SPARE24_LSB + (hw->phy.lan_id << 12);
-
- if (setup_linear)
- edc_mode = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
- else
- edc_mode = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
-
- /* Configure CS4227 for connection type. */
- return hw->phy.ops.write_i2c_combined(hw, IXGBE_CS4227,
- reg_slice, edc_mode);
-}
-
-/**
- * ixgbe_init_mac_link_ops_X550em - init mac link function pointers
- * @hw: pointer to hardware structure
- */
-void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
-{
- struct ixgbe_mac_info *mac = &hw->mac;
-
- DEBUGFUNC("ixgbe_init_mac_link_ops_X550em");
-
- /* CS4227 does not support autoneg, so disable the laser control
- * functions for SFP+ fiber
- */
- if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
- mac->ops.disable_tx_laser = NULL;
- mac->ops.enable_tx_laser = NULL;
- mac->ops.flap_tx_laser = NULL;
- }
-}
-
-/**
- * ixgbe_get_link_capabilities_x550em - Determines link capabilities
- * @hw: pointer to hardware structure
- * @speed: pointer to link speed
- * @autoneg: true when autoneg or autotry is enabled
- */
-s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg)
-{
- DEBUGFUNC("ixgbe_get_link_capabilities_X550em");
-
- /* SFP */
- if (hw->phy.media_type == ixgbe_media_type_fiber) {
-
- /* CS4227 SFP must not enable auto-negotiation */
- *autoneg = false;
-
- /* Check if 1G SFP module. */
- if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1
- || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
- *speed = IXGBE_LINK_SPEED_1GB_FULL;
- return IXGBE_SUCCESS;
- }
-
- /* Link capabilities are based on SFP */
- if (hw->phy.multispeed_fiber)
- *speed |= IXGBE_LINK_SPEED_10GB_FULL |
- IXGBE_LINK_SPEED_1GB_FULL;
- else
- *speed = IXGBE_LINK_SPEED_10GB_FULL;
- } else {
- *speed |= IXGBE_LINK_SPEED_10GB_FULL |
- IXGBE_LINK_SPEED_1GB_FULL;
- *autoneg = true;
- }
-
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_init_phy_ops_X550em - PHY/SFP specific init
- * @hw: pointer to hardware structure
- *
- * Initialize any function pointers that were not able to be
- * set during init_shared_code because the PHY/SFP type was
- * not known. Perform the SFP init if necessary.
- */
-s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
-{
- struct ixgbe_phy_info *phy = &hw->phy;
- s32 ret_val;
- u32 esdp;
-
- DEBUGFUNC("ixgbe_init_phy_ops_X550em");
-
- if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
- esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
- phy->lan_id = IXGBE_READ_REG(hw, IXGBE_STATUS) &
- IXGBE_STATUS_LAN_ID_1;
- phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
- if (phy->lan_id) {
- esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
- esdp |= IXGBE_ESDP_SDP1_DIR;
- }
- esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
- }
-
- /* Identify the PHY or SFP module */
- ret_val = phy->ops.identify(hw);
- if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
- return ret_val;
-
- /* Setup function pointers based on detected SFP module and speeds */
- ixgbe_init_mac_link_ops_X550em(hw);
- if (phy->sfp_type != ixgbe_sfp_type_unknown)
- phy->ops.reset = NULL;
-
- /* Set functions pointers based on phy type */
- switch (hw->phy.type) {
- case ixgbe_phy_x550em_kr:
- phy->ops.setup_link = ixgbe_setup_kr_x550em;
- break;
- default:
- break;
- }
- return ret_val;
-}
-
-/**
- * ixgbe_reset_hw_X550em - Perform hardware reset
- * @hw: pointer to hardware structure
- *
- * Resets the hardware by resetting the transmit and receive units, masks
- * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
- * reset.
- */
-s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
-{
- ixgbe_link_speed link_speed;
- s32 status;
- u32 ctrl = 0;
- u32 i;
- bool link_up = false;
-
- DEBUGFUNC("ixgbe_reset_hw_X550em");
-
- /* Call adapter stop to disable Tx/Rx and clear interrupts */
- status = hw->mac.ops.stop_adapter(hw);
- if (status != IXGBE_SUCCESS)
- return status;
-
- /* flush pending Tx transactions */
- ixgbe_clear_tx_pending(hw);
-
- /* PHY ops must be identified and initialized prior to reset */
-
- /* Identify PHY and related function pointers */
- status = hw->phy.ops.init(hw);
-
- if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
- return status;
-
- /* Setup SFP module if there is one present. */
- if (hw->phy.sfp_setup_needed) {
- status = hw->mac.ops.setup_sfp(hw);
- hw->phy.sfp_setup_needed = false;
- }
-
- if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
- return status;
-
- /* Reset PHY */
- if (!hw->phy.reset_disable && hw->phy.ops.reset)
- hw->phy.ops.reset(hw);
-
-mac_reset_top:
- /* Issue global reset to the MAC. Needs to be SW reset if link is up.
- * If link reset is used when link is up, it might reset the PHY when
- * mng is using it. If link is down or the flag to force full link
- * reset is set, then perform link reset.
- */
- ctrl = IXGBE_CTRL_LNK_RST;
- if (!hw->force_full_reset) {
- hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
- if (link_up)
- ctrl = IXGBE_CTRL_RST;
- }
-
- ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
- IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
- IXGBE_WRITE_FLUSH(hw);
-
- /* Poll for reset bit to self-clear meaning reset is complete */
- for (i = 0; i < 10; i++) {
- usec_delay(1);
- ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
- if (!(ctrl & IXGBE_CTRL_RST_MASK))
- break;
- }
-
- if (ctrl & IXGBE_CTRL_RST_MASK) {
- status = IXGBE_ERR_RESET_FAILED;
- DEBUGOUT("Reset polling failed to complete.\n");
- }
-
- msec_delay(50);
-
- /* Double resets are required for recovery from certain error
- * conditions. Between resets, it is necessary to stall to
- * allow time for any pending HW events to complete.
- */
- if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
- hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
- goto mac_reset_top;
- }
-
- /* Store the permanent mac address */
- hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
-
- /* Store MAC address from RAR0, clear receive address registers, and
- * clear the multicast table. Also reset num_rar_entries to 128,
- * since we modify this value when programming the SAN MAC address.
- */
- hw->mac.num_rar_entries = 128;
- hw->mac.ops.init_rx_addrs(hw);
-
- return status;
-}
-
-/**
- * ixgbe_setup_kr_x550em - Configure the KR PHY.
- * @hw: pointer to hardware structure
- *
- * Configures the integrated KR PHY.
- **/
-s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
-{
- s32 status;
- u32 reg_val;
-
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->phy.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
-
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ;
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
- reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
- IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
-
- /* Advertise 10G support. */
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
-
- /* Advertise 1G support. */
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
-
- /* Restart auto-negotiation. */
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->phy.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-
- return status;
-}
-
-/**
- * ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI.
- * @hw: pointer to hardware structure
- *
- * Configures the integrated KR PHY to use iXFI mode.
- **/
-s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw)
-{
- s32 status;
- u32 reg_val;
-
- /* Disable AN and force speed to 10G Serial. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->phy.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- if (status != IXGBE_SUCCESS)
- return status;
- reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
- reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->phy.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
- if (status != IXGBE_SUCCESS)
- return status;
-
- /* Disable training protocol FSM. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->phy.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- if (status != IXGBE_SUCCESS)
- return status;
- reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->phy.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
- if (status != IXGBE_SUCCESS)
- return status;
-
- /* Disable Flex from training TXFFE. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_DSP_TXFFE_STATE_4(hw->phy.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- if (status != IXGBE_SUCCESS)
- return status;
- reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
- reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
- reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_DSP_TXFFE_STATE_4(hw->phy.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
- if (status != IXGBE_SUCCESS)
- return status;
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_DSP_TXFFE_STATE_5(hw->phy.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- if (status != IXGBE_SUCCESS)
- return status;
- reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
- reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
- reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_DSP_TXFFE_STATE_5(hw->phy.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
- if (status != IXGBE_SUCCESS)
- return status;
-
- /* Enable override for coefficients. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_TX_COEFF_CTRL_1(hw->phy.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- if (status != IXGBE_SUCCESS)
- return status;
- reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
- reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
- reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
- reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_TX_COEFF_CTRL_1(hw->phy.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
- if (status != IXGBE_SUCCESS)
- return status;
-
- /* Toggle port SW reset by AN reset. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->phy.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- if (status != IXGBE_SUCCESS)
- return status;
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->phy.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-
- return status;
-}
-
-/**
- * ixgbe_setup_phy_loopback_x550em - Configure the KR PHY for loopback.
- * @hw: pointer to hardware structure
- *
- * Configures the integrated KR PHY to use internal loopback mode.
- **/
-s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw)
-{
- s32 status;
- u32 reg_val;
-
- /* Disable AN and force speed to 10G Serial. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->phy.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- if (status != IXGBE_SUCCESS)
- return status;
- reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
- reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->phy.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
- if (status != IXGBE_SUCCESS)
- return status;
-
- /* Set near-end loopback clocks. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->phy.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- if (status != IXGBE_SUCCESS)
- return status;
- reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B;
- reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->phy.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
- if (status != IXGBE_SUCCESS)
- return status;
-
- /* Set loopback enable. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_PMD_DFX_BURNIN(hw->phy.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- if (status != IXGBE_SUCCESS)
- return status;
- reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_PMD_DFX_BURNIN(hw->phy.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
- if (status != IXGBE_SUCCESS)
- return status;
-
- /* Training bypass. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->phy.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- if (status != IXGBE_SUCCESS)
- return status;
- reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->phy.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-
- return status;
-}
-
-/**
- * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
- * assuming that the semaphore is already obtained.
- * @hw: pointer to hardware structure
- * @offset: offset of word in the EEPROM to read
- * @data: word read from the EEPROM
- *
- * Reads a 16 bit word from the EEPROM using the hostif.
- **/
-s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
- u16 *data)
-{
- s32 status;
- struct ixgbe_hic_read_shadow_ram buffer;
-
- DEBUGFUNC("ixgbe_read_ee_hostif_data_X550");
- buffer.hdr.cmd = FW_READ_SHADOW_RAM_CMD;
- buffer.hdr.buf_len1 = 0;
- buffer.hdr.buf_len2 = FW_READ_SHADOW_RAM_LEN;
- buffer.hdr.checksum = FW_DEFAULT_CHECKSUM;
-
- /* convert offset from words to bytes */
- buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
- /* one word */
- buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
-
- status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
- sizeof(buffer), false);
-
- if (status)
- return status;
-
- *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
- FW_NVM_DATA_OFFSET);
-
- return 0;
-}
-
-/**
- * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
- * @hw: pointer to hardware structure
- * @offset: offset of word in the EEPROM to read
- * @data: word read from the EEPROM
- *
- * Reads a 16 bit word from the EEPROM using the hostif.
- **/
-s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
- u16 *data)
-{
- s32 status = IXGBE_SUCCESS;
-
- DEBUGFUNC("ixgbe_read_ee_hostif_X550");
-
- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
- IXGBE_SUCCESS) {
- status = ixgbe_read_ee_hostif_data_X550(hw, offset, data);
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
- } else {
- status = IXGBE_ERR_SWFW_SYNC;
- }
-
- return status;
-}
-
-/**
- * ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
- * @hw: pointer to hardware structure
- * @offset: offset of word in the EEPROM to read
- * @words: number of words
- * @data: word(s) read from the EEPROM
- *
- * Reads a 16 bit word(s) from the EEPROM using the hostif.
- **/
-s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
- u16 offset, u16 words, u16 *data)
-{
- struct ixgbe_hic_read_shadow_ram buffer;
- u32 current_word = 0;
- u16 words_to_read;
- s32 status;
- u32 i;
-
- DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550");
-
- /* Take semaphore for the entire operation. */
- status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
- if (status) {
- DEBUGOUT("EEPROM read buffer - semaphore failed\n");
- return status;
- }
- while (words) {
- if (words > FW_MAX_READ_BUFFER_SIZE / 2)
- words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
- else
- words_to_read = words;
-
- buffer.hdr.cmd = FW_READ_SHADOW_RAM_CMD;
- buffer.hdr.buf_len1 = 0;
- buffer.hdr.buf_len2 = FW_READ_SHADOW_RAM_LEN;
- buffer.hdr.checksum = FW_DEFAULT_CHECKSUM;
-
- /* convert offset from words to bytes */
- buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2);
- buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2);
-
- status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
- sizeof(buffer), false);
-
- if (status) {
- DEBUGOUT("Host interface command failed\n");
- goto out;
- }
-
- for (i = 0; i < words_to_read; i++) {
- u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) +
- 2 * i;
- u32 value = IXGBE_READ_REG(hw, reg);
-
- data[current_word] = (u16)(value & 0xffff);
- current_word++;
- i++;
- if (i < words_to_read) {
- value >>= 16;
- data[current_word] = (u16)(value & 0xffff);
- current_word++;
- }
- }
- words -= words_to_read;
- }
-
-out:
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
- return status;
-}
-
-/**
- * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
- * @hw: pointer to hardware structure
- * @offset: offset of word in the EEPROM to write
- * @data: word write to the EEPROM
- *
- * Write a 16 bit word to the EEPROM using the hostif.
- **/
-s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
- u16 data)
-{
- s32 status;
- struct ixgbe_hic_write_shadow_ram buffer;
-
- DEBUGFUNC("ixgbe_write_ee_hostif_data_X550");
-
- buffer.hdr.cmd = FW_WRITE_SHADOW_RAM_CMD;
- buffer.hdr.buf_len1 = 0;
- buffer.hdr.buf_len2 = FW_WRITE_SHADOW_RAM_LEN;
- buffer.hdr.checksum = FW_DEFAULT_CHECKSUM;
-
- /* one word */
- buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
- buffer.data = data;
- buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
-
- status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
- sizeof(buffer), false);
-
- return status;
-}
-
-/**
- * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
- * @hw: pointer to hardware structure
- * @offset: offset of word in the EEPROM to write
- * @data: word write to the EEPROM
- *
- * Write a 16 bit word to the EEPROM using the hostif.
- **/
-s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
- u16 data)
-{
- s32 status = IXGBE_SUCCESS;
-
- DEBUGFUNC("ixgbe_write_ee_hostif_X550");
-
- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
- IXGBE_SUCCESS) {
- status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
- } else {
- DEBUGOUT("write ee hostif failed to get semaphore");
- status = IXGBE_ERR_SWFW_SYNC;
- }
-
- return status;
-}
-
-/**
- * ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif
- * @hw: pointer to hardware structure
- * @offset: offset of word in the EEPROM to write
- * @words: number of words
- * @data: word(s) write to the EEPROM
- *
- * Write a 16 bit word(s) to the EEPROM using the hostif.
- **/
-s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
- u16 offset, u16 words, u16 *data)
-{
- s32 status = IXGBE_SUCCESS;
- u32 i = 0;
-
- DEBUGFUNC("ixgbe_write_ee_hostif_buffer_X550");
-
- /* Take semaphore for the entire operation. */
- status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
- if (status != IXGBE_SUCCESS) {
- DEBUGOUT("EEPROM write buffer - semaphore failed\n");
- goto out;
- }
-
- for (i = 0; i < words; i++) {
- status = ixgbe_write_ee_hostif_data_X550(hw, offset + i,
- data[i]);
-
- if (status != IXGBE_SUCCESS) {
- DEBUGOUT("Eeprom buffered write failed\n");
- break;
- }
- }
-
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
-out:
-
- return status;
-}
-
-/**
- * ixgbe_checksum_ptr_x550 - Checksum one pointer region
- * @hw: pointer to hardware structure
- * @ptr: pointer offset in eeprom
- * @size: size of section pointed by ptr, if 0 first word will be used as size
- * @csum: address of checksum to update
- *
- * Returns error status for any failure
- */
-STATIC s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
- u16 size, u16 *csum)
-{
- u16 buf[256];
- s32 status;
- u16 length, bufsz, i, start;
-
- bufsz = sizeof(buf) / sizeof(buf[0]);
-
- /* Read a chunk at the pointer location */
- status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf);
- if (status) {
- DEBUGOUT("Failed to read EEPROM image\n");
- return status;
- }
-
- if (size) {
- start = 0;
- length = size;
- } else {
- start = 1;
- length = buf[0];
-
- /* Skip pointer section if length is invalid. */
- if (length == 0xFFFF || length == 0 ||
- (ptr + length) >= hw->eeprom.word_size)
- return IXGBE_SUCCESS;
- }
-
- for (i = start; length; i++, length--) {
- if (i == bufsz) {
- ptr += bufsz;
- i = 0;
- if (length < bufsz)
- bufsz = length;
-
- /* Read a chunk at the pointer location */
- status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr,
- bufsz, buf);
- if (status) {
- DEBUGOUT("Failed to read EEPROM image\n");
- return status;
- }
- }
- *csum += buf[i];
- }
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
- * @hw: pointer to hardware structure
- *
- * Returns a negative error code on error, or the 16-bit checksum
- **/
-s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
-{
- u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
- s32 status;
- u16 checksum = 0;
- u16 pointer, i, size;
-
- DEBUGFUNC("ixgbe_calc_eeprom_checksum_X550");
-
- hw->eeprom.ops.init_params(hw);
-
- /* Read pointer area */
- status = ixgbe_read_ee_hostif_buffer_X550(hw, 0,
- IXGBE_EEPROM_LAST_WORD + 1,
- eeprom_ptrs);
- if (status) {
- DEBUGOUT("Failed to read EEPROM image\n");
- return status;
- }
-
- /*
- * For X550 hardware include 0x0-0x41 in the checksum, skip the
- * checksum word itself
- */
- for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++)
- if (i != IXGBE_EEPROM_CHECKSUM)
- checksum += eeprom_ptrs[i];
-
- /*
- * Include all data from pointers 0x3, 0x6-0xE. This excludes the
- * FW, PHY module, and PCIe Expansion/Option ROM pointers.
- */
- for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) {
- if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
- continue;
-
- pointer = eeprom_ptrs[i];
-
- /* Skip pointer section if the pointer is invalid. */
- if (pointer == 0xFFFF || pointer == 0 ||
- pointer >= hw->eeprom.word_size)
- continue;
-
- switch (i) {
- case IXGBE_PCIE_GENERAL_PTR:
- size = IXGBE_IXGBE_PCIE_GENERAL_SIZE;
- break;
- case IXGBE_PCIE_CONFIG0_PTR:
- case IXGBE_PCIE_CONFIG1_PTR:
- size = IXGBE_PCIE_CONFIG_SIZE;
- break;
- default:
- size = 0;
- break;
- }
-
- status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum);
- if (status)
- return status;
- }
-
- checksum = (u16)IXGBE_EEPROM_SUM - checksum;
-
- return (s32)checksum;
-}
-
-/**
- * ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum
- * @hw: pointer to hardware structure
- * @checksum_val: calculated checksum
- *
- * Performs checksum calculation and validates the EEPROM checksum. If the
- * caller does not need checksum_val, the value can be NULL.
- **/
-s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
- u16 *checksum_val)
-{
- s32 status;
- u16 checksum;
- u16 read_checksum = 0;
-
- DEBUGFUNC("ixgbe_validate_eeprom_checksum_X550");
-
- /* Read the first word from the EEPROM. If this times out or fails, do
- * not continue or we could be in for a very long wait while every
- * EEPROM read fails
- */
- status = hw->eeprom.ops.read(hw, 0, &checksum);
- if (status) {
- DEBUGOUT("EEPROM read failed\n");
- return status;
- }
-
- status = hw->eeprom.ops.calc_checksum(hw);
- if (status < 0)
- return status;
-
- checksum = (u16)(status & 0xffff);
-
- status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
- &read_checksum);
- if (status)
- return status;
-
- /* Verify read checksum from EEPROM is the same as
- * calculated checksum
- */
- if (read_checksum != checksum) {
- status = IXGBE_ERR_EEPROM_CHECKSUM;
- ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
- "Invalid EEPROM checksum");
- }
-
- /* If the user cares, return the calculated checksum */
- if (checksum_val)
- *checksum_val = checksum;
-
- return status;
-}
-
-/**
- * ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash
- * @hw: pointer to hardware structure
- *
- * After writing EEPROM to shadow RAM using EEWR register, software calculates
- * checksum and updates the EEPROM and instructs the hardware to update
- * the flash.
- **/
-s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
-{
- s32 status;
- u16 checksum = 0;
-
- DEBUGFUNC("ixgbe_update_eeprom_checksum_X550");
-
- /* Read the first word from the EEPROM. If this times out or fails, do
- * not continue or we could be in for a very long wait while every
- * EEPROM read fails
- */
- status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum);
- if (status) {
- DEBUGOUT("EEPROM read failed\n");
- return status;
- }
-
- status = ixgbe_calc_eeprom_checksum_X550(hw);
- if (status < 0)
- return status;
-
- checksum = (u16)(status & 0xffff);
-
- status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
- checksum);
- if (status)
- return status;
-
- status = ixgbe_update_flash_X550(hw);
-
- return status;
-}
-
-/**
- * ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device
- * @hw: pointer to hardware structure
- *
- * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
- **/
-s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
-{
- s32 status = IXGBE_SUCCESS;
- struct ixgbe_hic_hdr2 buffer;
-
- DEBUGFUNC("ixgbe_update_flash_X550");
-
- buffer.cmd = FW_SHADOW_RAM_DUMP_CMD;
- buffer.buf_len1 = 0;
- buffer.buf_len2 = FW_SHADOW_RAM_DUMP_LEN;
- buffer.checksum = FW_DEFAULT_CHECKSUM;
-
- status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
- sizeof(buffer), false);
-
- return status;
-}
-
-/**
- * ixgbe_get_supported_physical_layer_X550em - Returns physical layer type
- * @hw: pointer to hardware structure
- *
- * Determines physical layer capabilities of the current configuration.
- **/
-u32 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
-{
- u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
-
- DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em");
-
- hw->phy.ops.identify(hw);
-
- switch (hw->phy.type) {
- case ixgbe_phy_x550em_kr:
- physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
- IXGBE_PHYSICAL_LAYER_1000BASE_KX;
- break;
- case ixgbe_phy_x550em_kx4:
- physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
- IXGBE_PHYSICAL_LAYER_1000BASE_KX;
- break;
- default:
- break;
- }
-
- if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
- physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
-
- return physical_layer;
-}
-
-/**
- * ixgbe_disable_rx_x550 - Disable RX unit
- *
- * Enables the Rx DMA unit for x550
- **/
-void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
-{
- u32 rxctrl, pfdtxgswc;
- s32 status;
- struct ixgbe_hic_disable_rxen fw_cmd;
-
- DEBUGFUNC("ixgbe_enable_rx_dma_x550");
-
- rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
- if (rxctrl & IXGBE_RXCTRL_RXEN) {
- pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
- if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
- pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
- IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
- hw->mac.set_lben = true;
- } else {
- hw->mac.set_lben = false;
- }
-
- fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
- fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
- fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
- fw_cmd.port_number = hw->phy.lan_id;
-
- status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
- sizeof(struct ixgbe_hic_disable_rxen),
- true);
-
- /* If we fail - disable RX using register write */
- if (status) {
- rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
- if (rxctrl & IXGBE_RXCTRL_RXEN) {
- rxctrl &= ~IXGBE_RXCTRL_RXEN;
- IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
- }
- }
- }
-}
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_82599_bypass.c b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_82599_bypass.c
deleted file mode 100755
index 12cc01d5..00000000
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_82599_bypass.c
+++ /dev/null
@@ -1,314 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "ixgbe/ixgbe_type.h"
-#include "ixgbe/ixgbe_82599.h"
-#include "ixgbe/ixgbe_api.h"
-#include "ixgbe/ixgbe_common.h"
-#include "ixgbe/ixgbe_phy.h"
-#include "ixgbe_bypass_defines.h"
-#include "ixgbe_bypass.h"
-
-/**
- * ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber
- * @hw: pointer to hardware structure
- * @speed: link speed to set
- *
- * We set the module speed differently for fixed fiber. For other
- * multi-speed devices we don't have an error value so here if we
- * detect an error we just log it and exit.
- */
-static void
-ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed)
-{
- s32 status;
- u8 rs, eeprom_data;
-
- switch (speed) {
- case IXGBE_LINK_SPEED_10GB_FULL:
- /* one bit mask same as setting on */
- rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
- break;
- case IXGBE_LINK_SPEED_1GB_FULL:
- rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
- break;
- default:
- PMD_DRV_LOG(ERR, "Invalid fixed module speed");
- return;
- }
-
- /* Set RS0 */
- status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
- IXGBE_I2C_EEPROM_DEV_ADDR2,
- &eeprom_data);
- if (status) {
- PMD_DRV_LOG(ERR, "Failed to read Rx Rate Select RS0");
- goto out;
- }
-
- eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
-
- status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
- IXGBE_I2C_EEPROM_DEV_ADDR2,
- eeprom_data);
- if (status) {
- PMD_DRV_LOG(ERR, "Failed to write Rx Rate Select RS0");
- goto out;
- }
-
- /* Set RS1 */
- status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
- IXGBE_I2C_EEPROM_DEV_ADDR2,
- &eeprom_data);
- if (status) {
- PMD_DRV_LOG(ERR, "Failed to read Rx Rate Select RS1");
- goto out;
- }
-
- eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
-
- status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
- IXGBE_I2C_EEPROM_DEV_ADDR2,
- eeprom_data);
- if (status) {
- PMD_DRV_LOG(ERR, "Failed to write Rx Rate Select RS1");
- goto out;
- }
-out:
- return;
-}
-
-/**
- * ixgbe_setup_mac_link_multispeed_fixed_fiber - Set MAC link speed
- * @hw: pointer to hardware structure
- * @speed: new link speed
- * @autoneg_wait_to_complete: true when waiting for completion is needed
- *
- * Set the link speed in the AUTOC register and restarts link.
- **/
-static s32
-ixgbe_setup_mac_link_multispeed_fixed_fiber(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
-{
- s32 status = IXGBE_SUCCESS;
- ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
- ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
- u32 speedcnt = 0;
- u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
- u32 i = 0;
- bool link_up = false;
- bool negotiation;
-
- PMD_INIT_FUNC_TRACE();
-
- /* Mask off requested but non-supported speeds */
- status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
- if (status != IXGBE_SUCCESS)
- return status;
-
- speed &= link_speed;
-
- /*
- * Try each speed one by one, highest priority first. We do this in
- * software because 10gb fiber doesn't support speed autonegotiation.
- */
- if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
- speedcnt++;
- highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
-
- /* If we already have link at this speed, just jump out */
- status = ixgbe_check_link(hw, &link_speed, &link_up, false);
- if (status != IXGBE_SUCCESS)
- return status;
-
- if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
- goto out;
- /* Set the module link speed */
- ixgbe_set_fiber_fixed_speed(hw, IXGBE_LINK_SPEED_10GB_FULL);
-
- /* Set the module link speed */
- esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- IXGBE_WRITE_FLUSH(hw);
-
- /* Allow module to change analog characteristics (1G->10G) */
- msec_delay(40);
-
- status = ixgbe_setup_mac_link_82599(hw,
- IXGBE_LINK_SPEED_10GB_FULL,
- autoneg_wait_to_complete);
- if (status != IXGBE_SUCCESS)
- return status;
-
- /* Flap the tx laser if it has not already been done */
- ixgbe_flap_tx_laser(hw);
-
- /*
- * Wait for the controller to acquire link. Per IEEE 802.3ap,
- * Section 73.10.2, we may have to wait up to 500ms if KR is
- * attempted. 82599 uses the same timing for 10g SFI.
- */
- for (i = 0; i < 5; i++) {
- /* Wait for the link partner to also set speed */
- msec_delay(100);
-
- /* If we have link, just jump out */
- status = ixgbe_check_link(hw, &link_speed,
- &link_up, false);
- if (status != IXGBE_SUCCESS)
- return status;
-
- if (link_up)
- goto out;
- }
- }
-
- if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
- speedcnt++;
- if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
- highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
-
- /* If we already have link at this speed, just jump out */
- status = ixgbe_check_link(hw, &link_speed, &link_up, false);
- if (status != IXGBE_SUCCESS)
- return status;
-
- if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
- goto out;
-
- /* Set the module link speed */
- ixgbe_set_fiber_fixed_speed(hw, IXGBE_LINK_SPEED_1GB_FULL);
-
- /* Allow module to change analog characteristics (10G->1G) */
- msec_delay(40);
-
- status = ixgbe_setup_mac_link_82599(hw,
- IXGBE_LINK_SPEED_1GB_FULL,
- autoneg_wait_to_complete);
- if (status != IXGBE_SUCCESS)
- return status;
-
- /* Flap the tx laser if it has not already been done */
- ixgbe_flap_tx_laser(hw);
-
- /* Wait for the link partner to also set speed */
- msec_delay(100);
-
- /* If we have link, just jump out */
- status = ixgbe_check_link(hw, &link_speed, &link_up, false);
- if (status != IXGBE_SUCCESS)
- return status;
-
- if (link_up)
- goto out;
- }
-
- /*
- * We didn't get link. Configure back to the highest speed we tried,
- * (if there was more than one). We call ourselves back with just the
- * single highest speed that the user requested.
- */
- if (speedcnt > 1)
- status = ixgbe_setup_mac_link_multispeed_fixed_fiber(hw,
- highest_link_speed, autoneg_wait_to_complete);
-
-out:
- /* Set autoneg_advertised value based on input link speed */
- hw->phy.autoneg_advertised = 0;
-
- if (speed & IXGBE_LINK_SPEED_10GB_FULL)
- hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
-
- if (speed & IXGBE_LINK_SPEED_1GB_FULL)
- hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
-
- return status;
-}
-
-static enum ixgbe_media_type
-ixgbe_bypass_get_media_type(struct ixgbe_hw *hw)
-{
- enum ixgbe_media_type media_type;
-
- PMD_INIT_FUNC_TRACE();
-
- if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
- media_type = ixgbe_media_type_fiber;
- } else {
- media_type = ixgbe_get_media_type_82599(hw);
- }
- return (media_type);
-}
-
-/*
- * Wrapper around shared code (base driver) to support BYPASS nic.
- */
-s32
-ixgbe_bypass_init_shared_code(struct ixgbe_hw *hw)
-{
- s32 ret_val;
-
- if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
- hw->mac.type = ixgbe_mac_82599EB;
- }
-
- ret_val = ixgbe_init_shared_code(hw);
- if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
- hw->mac.ops.get_media_type = &ixgbe_bypass_get_media_type;
- ixgbe_init_mac_link_ops_82599(hw);
- }
-
- return ret_val;
-}
-
-s32
-ixgbe_bypass_init_hw(struct ixgbe_hw *hw)
-{
- int rc;
-
- if ((rc = ixgbe_init_hw(hw)) == 0 &&
- hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
-
- hw->mac.ops.setup_link =
- &ixgbe_setup_mac_link_multispeed_fixed_fiber;
-
- hw->mac.ops.get_media_type = &ixgbe_bypass_get_media_type;
-
- hw->mac.ops.disable_tx_laser = NULL;
- hw->mac.ops.enable_tx_laser = NULL;
- hw->mac.ops.flap_tx_laser = NULL;
- }
-
- return (rc);
-}
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_bypass.c b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_bypass.c
deleted file mode 100755
index 832f4156..00000000
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_bypass.c
+++ /dev/null
@@ -1,414 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <time.h>
-#include <rte_atomic.h>
-#include <rte_ethdev.h>
-#include "ixgbe_ethdev.h"
-#include "ixgbe_bypass_api.h"
-
-#define BYPASS_STATUS_OFF_MASK 3
-
-/* Macros to check for invlaid function pointers. */
-#define FUNC_PTR_OR_ERR_RET(func, retval) do { \
- if ((func) == NULL) { \
- PMD_DRV_LOG(ERR, "%s:%d function not supported", \
- __func__, __LINE__); \
- return retval; \
- } \
-} while(0)
-
-#define FUNC_PTR_OR_RET(func) do { \
- if ((func) == NULL) { \
- PMD_DRV_LOG(ERR, "%s:%d function not supported", \
- __func__, __LINE__); \
- return; \
- } \
-} while(0)
-
-
-/**
- * ixgbe_bypass_set_time - Set bypass FW time epoc.
- *
- * @hw: pointer to hardware structure
- *
- * This function with sync the FW date stamp with that of the
- * system clock.
- **/
-static void
-ixgbe_bypass_set_time(struct ixgbe_adapter *adapter)
-{
- u32 mask, value;
- u32 sec;
- struct ixgbe_hw *hw = &adapter->hw;
-
- sec = 0;
-
- /*
- * Send the FW our current time and turn on time_valid and
- * timer_reset bits.
- */
- mask = BYPASS_CTL1_TIME_M |
- BYPASS_CTL1_VALID_M |
- BYPASS_CTL1_OFFTRST_M;
- value = (sec & BYPASS_CTL1_TIME_M) |
- BYPASS_CTL1_VALID |
- BYPASS_CTL1_OFFTRST;
-
- FUNC_PTR_OR_RET(adapter->bps.ops.bypass_set);
-
- /* Store FW reset time (in seconds from epoch). */
- adapter->bps.reset_tm = time(NULL);
-
- /* reset FW timer. */
- adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL1, mask, value);
-}
-
-/**
- * ixgbe_bypass_init - Make some environment changes for bypass
- *
- * @adapter: pointer to ixgbe_adapter structure for access to state bits
- *
- * This function collects all the modifications needed by the bypass
- * driver.
- **/
-void
-ixgbe_bypass_init(struct rte_eth_dev *dev)
-{
- struct ixgbe_adapter *adapter;
- struct ixgbe_hw *hw;
-
- adapter = IXGBE_DEV_TO_ADPATER(dev);
- hw = &adapter->hw;
-
- /* Only allow BYPASS ops on the first port */
- if (hw->device_id != IXGBE_DEV_ID_82599_BYPASS ||
- hw->bus.func != 0) {
- PMD_DRV_LOG(ERR, "bypass function is not supported on that device");
- return;
- }
-
- /* set bypass ops. */
- adapter->bps.ops.bypass_rw = &ixgbe_bypass_rw_generic;
- adapter->bps.ops.bypass_valid_rd = &ixgbe_bypass_valid_rd_generic;
- adapter->bps.ops.bypass_set = &ixgbe_bypass_set_generic;
- adapter->bps.ops.bypass_rd_eep = &ixgbe_bypass_rd_eep_generic;
-
- /* set the time for logging. */
- ixgbe_bypass_set_time(adapter);
-
- /* Don't have the SDP to the laser */
- hw->mac.ops.disable_tx_laser = NULL;
- hw->mac.ops.enable_tx_laser = NULL;
- hw->mac.ops.flap_tx_laser = NULL;
-}
-
-s32
-ixgbe_bypass_state_show(struct rte_eth_dev *dev, u32 *state)
-{
- struct ixgbe_hw *hw;
- s32 ret_val;
- u32 cmd;
- u32 by_ctl = 0;
- struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
-
- hw = &adapter->hw;
- FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP);
-
- cmd = BYPASS_PAGE_CTL0;
- ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &by_ctl);
-
- /* Assume bypass_rw didn't error out, if it did state will
- * be ignored anyway.
- */
- *state = (by_ctl >> BYPASS_STATUS_OFF_SHIFT) & BYPASS_STATUS_OFF_MASK;
-
- return (ret_val);
-}
-
-
-s32
-ixgbe_bypass_state_store(struct rte_eth_dev *dev, u32 *new_state)
-{
- struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
- struct ixgbe_hw *hw;
- s32 ret_val;
-
- hw = &adapter->hw;
- FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_set, -ENOTSUP);
-
- /* Set the new state */
- ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0,
- BYPASS_MODE_OFF_M, *new_state);
- if (ret_val)
- goto exit;
-
- /* Set AUTO back on so FW can receive events */
- ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0,
- BYPASS_MODE_OFF_M, BYPASS_AUTO);
-
-exit:
- return ret_val;
-
-}
-
-s32
-ixgbe_bypass_event_show(struct rte_eth_dev *dev, u32 event,
- u32 *state)
-{
- struct ixgbe_hw *hw;
- s32 ret_val;
- u32 shift;
- u32 cmd;
- u32 by_ctl = 0;
- struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
-
- hw = &adapter->hw;
- FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP);
-
- cmd = BYPASS_PAGE_CTL0;
- ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &by_ctl);
-
- /* Assume bypass_rw didn't error out, if it did event will
- * be ignored anyway.
- */
- switch (event) {
- case BYPASS_EVENT_WDT_TO:
- shift = BYPASS_WDTIMEOUT_SHIFT;
- break;
- case BYPASS_EVENT_MAIN_ON:
- shift = BYPASS_MAIN_ON_SHIFT;
- break;
- case BYPASS_EVENT_MAIN_OFF:
- shift = BYPASS_MAIN_OFF_SHIFT;
- break;
- case BYPASS_EVENT_AUX_ON:
- shift = BYPASS_AUX_ON_SHIFT;
- break;
- case BYPASS_EVENT_AUX_OFF:
- shift = BYPASS_AUX_OFF_SHIFT;
- break;
- default:
- return EINVAL;
- }
-
- *state = (by_ctl >> shift) & 0x3;
-
- return ret_val;
-}
-
-s32
-ixgbe_bypass_event_store(struct rte_eth_dev *dev, u32 event,
- u32 state)
-{
- struct ixgbe_hw *hw;
- u32 status;
- u32 off;
- s32 ret_val;
- struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
-
- hw = &adapter->hw;
- FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_set, -ENOTSUP);
-
- switch (event) {
- case BYPASS_EVENT_WDT_TO:
- off = BYPASS_WDTIMEOUT_M;
- status = state << BYPASS_WDTIMEOUT_SHIFT;
- break;
- case BYPASS_EVENT_MAIN_ON:
- off = BYPASS_MAIN_ON_M;
- status = state << BYPASS_MAIN_ON_SHIFT;
- break;
- case BYPASS_EVENT_MAIN_OFF:
- off = BYPASS_MAIN_OFF_M;
- status = state << BYPASS_MAIN_OFF_SHIFT;
- break;
- case BYPASS_EVENT_AUX_ON:
- off = BYPASS_AUX_ON_M;
- status = state << BYPASS_AUX_ON_SHIFT;
- break;
- case BYPASS_EVENT_AUX_OFF:
- off = BYPASS_AUX_OFF_M;
- status = state << BYPASS_AUX_OFF_SHIFT;
- break;
- default:
- return EINVAL;
- }
-
- ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0,
- off, status);
-
- return ret_val;
-}
-
-s32
-ixgbe_bypass_wd_timeout_store(struct rte_eth_dev *dev, u32 timeout)
-{
- struct ixgbe_hw *hw;
- u32 status;
- u32 mask;
- s32 ret_val;
- struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
-
- hw = &adapter->hw;
- FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_set, -ENOTSUP);
-
- /* disable the timer with timeout of zero */
- if (timeout == RTE_BYPASS_TMT_OFF) {
- status = 0x0; /* WDG enable off */
- mask = BYPASS_WDT_ENABLE_M;
- } else {
- /* set time out value */
- mask = BYPASS_WDT_VALUE_M;
-
- /* enable the timer */
- status = timeout << BYPASS_WDT_TIME_SHIFT;
- status |= 0x1 << BYPASS_WDT_ENABLE_SHIFT;
- mask |= BYPASS_WDT_ENABLE_M;
- }
-
- ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0,
- mask, status);
-
- return ret_val;
-}
-
-s32
-ixgbe_bypass_ver_show(struct rte_eth_dev *dev, u32 *ver)
-{
- struct ixgbe_hw *hw;
- u32 cmd;
- u32 status;
- s32 ret_val;
- struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
-
- hw = &adapter->hw;
- FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP);
-
- cmd = BYPASS_PAGE_CTL2 | BYPASS_WE;
- cmd |= (BYPASS_EEPROM_VER_ADD << BYPASS_CTL2_OFFSET_SHIFT) &
- BYPASS_CTL2_OFFSET_M;
- ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &status);
- if (ret_val)
- goto exit;
-
- /* wait for the write to stick */
- msleep(100);
-
- /* Now read the results */
- cmd &= ~BYPASS_WE;
- ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &status);
- if (ret_val)
- goto exit;
-
- *ver = status & BYPASS_CTL2_DATA_M; /* only one byte of date */
-
-exit:
- return ret_val;
-}
-
-s32
-ixgbe_bypass_wd_timeout_show(struct rte_eth_dev *dev, u32 *wd_timeout)
-{
- struct ixgbe_hw *hw;
- u32 by_ctl = 0;
- u32 cmd;
- u32 wdg;
- s32 ret_val;
- struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
-
- hw = &adapter->hw;
- FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP);
-
- cmd = BYPASS_PAGE_CTL0;
- ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &by_ctl);
-
- wdg = by_ctl & BYPASS_WDT_ENABLE_M;
- if (!wdg)
- *wd_timeout = RTE_BYPASS_TMT_OFF;
- else
- *wd_timeout = (by_ctl >> BYPASS_WDT_TIME_SHIFT) &
- BYPASS_WDT_MASK;
-
- return ret_val;
-}
-
-s32
-ixgbe_bypass_wd_reset(struct rte_eth_dev *dev)
-{
- u32 cmd;
- u32 status;
- u32 sec;
- u32 count = 0;
- s32 ret_val;
- struct ixgbe_hw *hw;
- struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
-
- hw = &adapter->hw;
-
- FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP);
- FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_valid_rd, -ENOTSUP);
-
- /* Use the lower level bit-bang functions since we don't need
- * to read the register first to get it's current state as we
- * are setting every thing in this write.
- */
- /* Set up WD pet */
- cmd = BYPASS_PAGE_CTL1 | BYPASS_WE | BYPASS_CTL1_WDT_PET;
-
- /* Resync the FW time while writing to CTL1 anyway */
- adapter->bps.reset_tm = time(NULL);
- sec = 0;
-
- cmd |= (sec & BYPASS_CTL1_TIME_M) | BYPASS_CTL1_VALID;
-
- /* reset FW timer offset since we are resetting the clock */
- cmd |= BYPASS_CTL1_OFFTRST;
-
- ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &status);
-
- /* Read until it matches what we wrote, or we time out */
- do {
- if (count++ > 10) {
- ret_val = IXGBE_BYPASS_FW_WRITE_FAILURE;
- break;
- }
-
- if (adapter->bps.ops.bypass_rw(hw, BYPASS_PAGE_CTL1, &status)) {
- ret_val = IXGBE_ERR_INVALID_ARGUMENT;
- break;
- }
- } while (!adapter->bps.ops.bypass_valid_rd(cmd, status));
-
- return ret_val;
-}
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_fdir.c b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_fdir.c
deleted file mode 100755
index 571decc5..00000000
--- a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_fdir.c
+++ /dev/null
@@ -1,922 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdio.h>
-#include <stdint.h>
-#include <stdarg.h>
-#include <errno.h>
-#include <sys/queue.h>
-
-#include <rte_interrupts.h>
-#include <rte_log.h>
-#include <rte_debug.h>
-#include <rte_pci.h>
-#include <rte_ether.h>
-#include <rte_ethdev.h>
-
-#include "ixgbe_logs.h"
-#include "ixgbe/ixgbe_api.h"
-#include "ixgbe/ixgbe_common.h"
-#include "ixgbe_ethdev.h"
-
-/* To get PBALLOC (Packet Buffer Allocation) bits from FDIRCTRL value */
-#define FDIRCTRL_PBALLOC_MASK 0x03
-
-/* For calculating memory required for FDIR filters */
-#define PBALLOC_SIZE_SHIFT 15
-
-/* Number of bits used to mask bucket hash for different pballoc sizes */
-#define PERFECT_BUCKET_64KB_HASH_MASK 0x07FF /* 11 bits */
-#define PERFECT_BUCKET_128KB_HASH_MASK 0x0FFF /* 12 bits */
-#define PERFECT_BUCKET_256KB_HASH_MASK 0x1FFF /* 13 bits */
-#define SIG_BUCKET_64KB_HASH_MASK 0x1FFF /* 13 bits */
-#define SIG_BUCKET_128KB_HASH_MASK 0x3FFF /* 14 bits */
-#define SIG_BUCKET_256KB_HASH_MASK 0x7FFF /* 15 bits */
-
-/**
- * This function is based on ixgbe_fdir_enable_82599() in ixgbe/ixgbe_82599.c.
- * It adds extra configuration of fdirctrl that is common for all filter types.
- *
- * Initialize Flow Director control registers
- * @hw: pointer to hardware structure
- * @fdirctrl: value to write to flow director control register
- **/
-static void fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
-{
- int i;
-
- PMD_INIT_FUNC_TRACE();
-
- /* Prime the keys for hashing */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
-
- /*
- * Continue setup of fdirctrl register bits:
- * Set the maximum length per hash bucket to 0xA filters
- * Send interrupt when 64 filters are left
- */
- fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
- (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
-
- /*
- * Poll init-done after we write the register. Estimated times:
- * 10G: PBALLOC = 11b, timing is 60us
- * 1G: PBALLOC = 11b, timing is 600us
- * 100M: PBALLOC = 11b, timing is 6ms
- *
- * Multiple these timings by 4 if under full Rx load
- *
- * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
- * 1 msec per poll time. If we're at line rate and drop to 100M, then
- * this might not finish in our poll time, but we can live with that
- * for now.
- */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
- IXGBE_WRITE_FLUSH(hw);
- for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
- if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
- IXGBE_FDIRCTRL_INIT_DONE)
- break;
- msec_delay(1);
- }
-
- if (i >= IXGBE_FDIR_INIT_DONE_POLL)
- PMD_INIT_LOG(WARNING, "Flow Director poll time exceeded!");
-}
-
-/*
- * Set appropriate bits in fdirctrl for: variable reporting levels, moving
- * flexbytes matching field, and drop queue (only for perfect matching mode).
- */
-static int
-configure_fdir_flags(struct rte_fdir_conf *conf, uint32_t *fdirctrl)
-{
- *fdirctrl = 0;
-
- switch (conf->pballoc) {
- case RTE_FDIR_PBALLOC_64K:
- /* 8k - 1 signature filters */
- *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
- break;
- case RTE_FDIR_PBALLOC_128K:
- /* 16k - 1 signature filters */
- *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
- break;
- case RTE_FDIR_PBALLOC_256K:
- /* 32k - 1 signature filters */
- *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
- break;
- default:
- /* bad value */
- PMD_INIT_LOG(ERR, "Invalid fdir_conf->pballoc value");
- return -EINVAL;
- };
-
- /* status flags: write hash & swindex in the rx descriptor */
- switch (conf->status) {
- case RTE_FDIR_NO_REPORT_STATUS:
- /* do nothing, default mode */
- break;
- case RTE_FDIR_REPORT_STATUS:
- /* report status when the packet matches a fdir rule */
- *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
- break;
- case RTE_FDIR_REPORT_STATUS_ALWAYS:
- /* always report status */
- *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS;
- break;
- default:
- /* bad value */
- PMD_INIT_LOG(ERR, "Invalid fdir_conf->status value");
- return -EINVAL;
- };
-
- *fdirctrl |= (conf->flexbytes_offset << IXGBE_FDIRCTRL_FLEX_SHIFT);
-
- if (conf->mode == RTE_FDIR_MODE_PERFECT) {
- *fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
- *fdirctrl |= (conf->drop_queue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
- }
-
- return 0;
-}
-
-int
-ixgbe_fdir_configure(struct rte_eth_dev *dev)
-{
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int err;
- uint32_t fdirctrl, pbsize;
- int i;
-
- PMD_INIT_FUNC_TRACE();
-
- if (hw->mac.type != ixgbe_mac_82599EB &&
- hw->mac.type != ixgbe_mac_X540 &&
- hw->mac.type != ixgbe_mac_X550 &&
- hw->mac.type != ixgbe_mac_X550EM_x)
- return -ENOSYS;
-
- err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, &fdirctrl);
- if (err)
- return err;
-
- /*
- * Before enabling Flow Director, the Rx Packet Buffer size
- * must be reduced. The new value is the current size minus
- * flow director memory usage size.
- */
- pbsize = (1 << (PBALLOC_SIZE_SHIFT + (fdirctrl & FDIRCTRL_PBALLOC_MASK)));
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
- (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
-
- /*
- * The defaults in the HW for RX PB 1-7 are not zero and so should be
- * intialized to zero for non DCB mode otherwise actual total RX PB
- * would be bigger than programmed and filter space would run into
- * the PB 0 region.
- */
- for (i = 1; i < 8; i++)
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
-
- fdir_enable_82599(hw, fdirctrl);
- return 0;
-}
-
-/*
- * The below function is taken from the FreeBSD IXGBE drivers release
- * 2.3.8. The only change is not to mask hash_result with IXGBE_ATR_HASH_MASK
- * before returning, as the signature hash can use 16bits.
- *
- * The newer driver has optimised functions for calculating bucket and
- * signature hashes. However they don't support IPv6 type packets for signature
- * filters so are not used here.
- *
- * Note that the bkt_hash field in the ixgbe_atr_input structure is also never
- * set.
- *
- * Compute the hashes for SW ATR
- * @stream: input bitstream to compute the hash on
- * @key: 32-bit hash key
- **/
-static u32
-ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
- u32 key)
-{
- /*
- * The algorithm is as follows:
- * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
- * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
- * and A[n] x B[n] is bitwise AND between same length strings
- *
- * K[n] is 16 bits, defined as:
- * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
- * for n modulo 32 < 15, K[n] =
- * K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
- *
- * S[n] is 16 bits, defined as:
- * for n >= 15, S[n] = S[n:n - 15]
- * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
- *
- * To simplify for programming, the algorithm is implemented
- * in software this way:
- *
- * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
- *
- * for (i = 0; i < 352; i+=32)
- * hi_hash_dword[31:0] ^= Stream[(i+31):i];
- *
- * lo_hash_dword[15:0] ^= Stream[15:0];
- * lo_hash_dword[15:0] ^= hi_hash_dword[31:16];
- * lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
- *
- * hi_hash_dword[31:0] ^= Stream[351:320];
- *
- * if(key[0])
- * hash[15:0] ^= Stream[15:0];
- *
- * for (i = 0; i < 16; i++) {
- * if (key[i])
- * hash[15:0] ^= lo_hash_dword[(i+15):i];
- * if (key[i + 16])
- * hash[15:0] ^= hi_hash_dword[(i+15):i];
- * }
- *
- */
- __be32 common_hash_dword = 0;
- u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
- u32 hash_result = 0;
- u8 i;
-
- /* record the flow_vm_vlan bits as they are a key part to the hash */
- flow_vm_vlan = IXGBE_NTOHL(atr_input->dword_stream[0]);
-
- /* generate common hash dword */
- for (i = 1; i <= 13; i++)
- common_hash_dword ^= atr_input->dword_stream[i];
-
- hi_hash_dword = IXGBE_NTOHL(common_hash_dword);
-
- /* low dword is word swapped version of common */
- lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
-
- /* apply flow ID/VM pool/VLAN ID bits to hash words */
- hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
-
- /* Process bits 0 and 16 */
- if (key & 0x0001) hash_result ^= lo_hash_dword;
- if (key & 0x00010000) hash_result ^= hi_hash_dword;
-
- /*
- * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
- * delay this because bit 0 of the stream should not be processed
- * so we do not add the vlan until after bit 0 was processed
- */
- lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
-
-
- /* process the remaining 30 bits in the key 2 bits at a time */
- for (i = 15; i; i-- ) {
- if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
- if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
- }
-
- return hash_result;
-}
-
-/*
- * Calculate the hash value needed for signature-match filters. In the FreeBSD
- * driver, this is done by the optimised function
- * ixgbe_atr_compute_sig_hash_82599(). However that can't be used here as it
- * doesn't support calculating a hash for an IPv6 filter.
- */
-static uint32_t
-atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
- enum rte_fdir_pballoc_type pballoc)
-{
- uint32_t bucket_hash, sig_hash;
-
- if (pballoc == RTE_FDIR_PBALLOC_256K)
- bucket_hash = ixgbe_atr_compute_hash_82599(input,
- IXGBE_ATR_BUCKET_HASH_KEY) &
- SIG_BUCKET_256KB_HASH_MASK;
- else if (pballoc == RTE_FDIR_PBALLOC_128K)
- bucket_hash = ixgbe_atr_compute_hash_82599(input,
- IXGBE_ATR_BUCKET_HASH_KEY) &
- SIG_BUCKET_128KB_HASH_MASK;
- else
- bucket_hash = ixgbe_atr_compute_hash_82599(input,
- IXGBE_ATR_BUCKET_HASH_KEY) &
- SIG_BUCKET_64KB_HASH_MASK;
-
- sig_hash = ixgbe_atr_compute_hash_82599(input,
- IXGBE_ATR_SIGNATURE_HASH_KEY);
-
- return (sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT) | bucket_hash;
-}
-
-/**
- * This function is based on ixgbe_atr_add_signature_filter_82599() in
- * ixgbe/ixgbe_82599.c, but uses a pre-calculated hash value. It also supports
- * setting extra fields in the FDIRCMD register, and removes the code that was
- * verifying the flow_type field. According to the documentation, a flow type of
- * 00 (i.e. not TCP, UDP, or SCTP) is not supported, however it appears to
- * work ok...
- *
- * Adds a signature hash filter
- * @hw: pointer to hardware structure
- * @input: unique input dword
- * @queue: queue index to direct traffic to
- * @fdircmd: any extra flags to set in fdircmd register
- * @fdirhash: pre-calculated hash value for the filter
- **/
-static void
-fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_input *input, u8 queue, u32 fdircmd,
- u32 fdirhash)
-{
- u64 fdirhashcmd;
-
- PMD_INIT_FUNC_TRACE();
-
- /* configure FDIRCMD register */
- fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
- IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
- fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
- fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
-
- /*
- * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
- * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
- */
- fdirhashcmd = (u64)fdircmd << 32;
- fdirhashcmd |= fdirhash;
- IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
-
- PMD_INIT_LOG(DEBUG, "Tx Queue=%x hash=%x", queue, (u32)fdirhashcmd);
-}
-
-/*
- * Convert DPDK rte_fdir_filter struct to ixgbe_atr_input union that is used
- * by the IXGBE driver code.
- */
-static int
-fdir_filter_to_atr_input(struct rte_fdir_filter *fdir_filter,
- union ixgbe_atr_input *input)
-{
- if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP ||
- fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE) &&
- (fdir_filter->port_src || fdir_filter->port_dst)) {
- PMD_INIT_LOG(ERR, "Invalid fdir_filter");
- return -EINVAL;
- }
-
- memset(input, 0, sizeof(*input));
-
- input->formatted.vlan_id = fdir_filter->vlan_id;
- input->formatted.src_port = fdir_filter->port_src;
- input->formatted.dst_port = fdir_filter->port_dst;
- input->formatted.flex_bytes = fdir_filter->flex_bytes;
-
- switch (fdir_filter->l4type) {
- case RTE_FDIR_L4TYPE_TCP:
- input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
- break;
- case RTE_FDIR_L4TYPE_UDP:
- input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
- break;
- case RTE_FDIR_L4TYPE_SCTP:
- input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
- break;
- case RTE_FDIR_L4TYPE_NONE:
- input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
- break;
- default:
- PMD_INIT_LOG(ERR, " Error on l4type input");
- return -EINVAL;
- }
-
- if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6) {
- input->formatted.flow_type |= IXGBE_ATR_L4TYPE_IPV6_MASK;
-
- input->formatted.src_ip[0] = fdir_filter->ip_src.ipv6_addr[0];
- input->formatted.src_ip[1] = fdir_filter->ip_src.ipv6_addr[1];
- input->formatted.src_ip[2] = fdir_filter->ip_src.ipv6_addr[2];
- input->formatted.src_ip[3] = fdir_filter->ip_src.ipv6_addr[3];
-
- input->formatted.dst_ip[0] = fdir_filter->ip_dst.ipv6_addr[0];
- input->formatted.dst_ip[1] = fdir_filter->ip_dst.ipv6_addr[1];
- input->formatted.dst_ip[2] = fdir_filter->ip_dst.ipv6_addr[2];
- input->formatted.dst_ip[3] = fdir_filter->ip_dst.ipv6_addr[3];
-
- } else {
- input->formatted.src_ip[0] = fdir_filter->ip_src.ipv4_addr;
- input->formatted.dst_ip[0] = fdir_filter->ip_dst.ipv4_addr;
- }
-
- return 0;
-}
-
-/*
- * Adds or updates a signature filter.
- *
- * dev: ethernet device to add filter to
- * fdir_filter: filter details
- * queue: queue index to direct traffic to
- * update: 0 to add a new filter, otherwise update existing.
- */
-static int
-fdir_add_update_signature_filter(struct rte_eth_dev *dev,
- struct rte_fdir_filter *fdir_filter, uint8_t queue, int update)
-{
- struct ixgbe_hw *hw= IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0;
- uint32_t fdirhash;
- union ixgbe_atr_input input;
- int err;
-
- if (hw->mac.type != ixgbe_mac_82599EB &&
- hw->mac.type != ixgbe_mac_X540 &&
- hw->mac.type != ixgbe_mac_X550 &&
- hw->mac.type != ixgbe_mac_X550EM_x)
- return -ENOSYS;
-
- err = fdir_filter_to_atr_input(fdir_filter, &input);
- if (err)
- return err;
-
- fdirhash = atr_compute_sig_hash_82599(&input,
- dev->data->dev_conf.fdir_conf.pballoc);
- fdir_add_signature_filter_82599(hw, &input, queue, fdircmd_flags,
- fdirhash);
- return 0;
-}
-
-int
-ixgbe_fdir_add_signature_filter(struct rte_eth_dev *dev,
- struct rte_fdir_filter *fdir_filter, uint8_t queue)
-{
- PMD_INIT_FUNC_TRACE();
- return fdir_add_update_signature_filter(dev, fdir_filter, queue, 0);
-}
-
-int
-ixgbe_fdir_update_signature_filter(struct rte_eth_dev *dev,
- struct rte_fdir_filter *fdir_filter, uint8_t queue)
-{
- PMD_INIT_FUNC_TRACE();
- return fdir_add_update_signature_filter(dev, fdir_filter, queue, 1);
-}
-
-/*
- * This is based on ixgbe_fdir_erase_perfect_filter_82599() in
- * ixgbe/ixgbe_82599.c. It is modified to take in the hash as a parameter so
- * that it can be used for removing signature and perfect filters.
- */
-static s32
-fdir_erase_filter_82599(struct ixgbe_hw *hw,
- __rte_unused union ixgbe_atr_input *input, uint32_t fdirhash)
-{
- u32 fdircmd = 0;
- u32 retry_count;
- s32 err = 0;
-
- IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
-
- /* flush hash to HW */
- IXGBE_WRITE_FLUSH(hw);
-
- /* Query if filter is present */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
-
- for (retry_count = 10; retry_count; retry_count--) {
- /* allow 10us for query to process */
- usec_delay(10);
- /* verify query completed successfully */
- fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
- if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
- break;
- }
-
- if (!retry_count) {
- PMD_INIT_LOG(ERR, "Timeout querying for flow director filter");
- err = -EIO;
- }
-
- /* if filter exists in hardware then remove it */
- if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
- IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
- IXGBE_WRITE_FLUSH(hw);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
- IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
- }
-
- return err;
-}
-
-int
-ixgbe_fdir_remove_signature_filter(struct rte_eth_dev *dev,
- struct rte_fdir_filter *fdir_filter)
-{
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- union ixgbe_atr_input input;
- int err;
-
- PMD_INIT_FUNC_TRACE();
-
- if (hw->mac.type != ixgbe_mac_82599EB &&
- hw->mac.type != ixgbe_mac_X540 &&
- hw->mac.type != ixgbe_mac_X550 &&
- hw->mac.type != ixgbe_mac_X550EM_x)
- return -ENOSYS;
-
- err = fdir_filter_to_atr_input(fdir_filter, &input);
- if (err)
- return err;
-
- return fdir_erase_filter_82599(hw, &input,
- atr_compute_sig_hash_82599(&input,
- dev->data->dev_conf.fdir_conf.pballoc));
-}
-
-/**
- * Reverse the bits in FDIR registers that store 2 x 16 bit masks.
- *
- * @hi_dword: Bits 31:16 mask to be bit swapped.
- * @lo_dword: Bits 15:0 mask to be bit swapped.
- *
- * Flow director uses several registers to store 2 x 16 bit masks with the
- * bits reversed such as FDIRTCPM, FDIRUDPM and FDIRIP6M. The LS bit of the
- * mask affects the MS bit/byte of the target. This function reverses the
- * bits in these masks.
- * **/
-static uint32_t
-reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword)
-{
- u32 mask = hi_dword << 16;
- mask |= lo_dword;
- mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
- mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
- mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
- return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
-}
-
-/*
- * This macro exists in ixgbe/ixgbe_82599.c, however in that file it reverses
- * the bytes, and then reverses them again. So here it does nothing.
- */
-#define IXGBE_WRITE_REG_BE32 IXGBE_WRITE_REG
-
-/*
- * This is based on ixgbe_fdir_set_input_mask_82599() in ixgbe/ixgbe_82599.c,
- * but makes use of the rte_fdir_masks structure to see which bits to set.
- */
-static int
-fdir_set_input_mask_82599(struct ixgbe_hw *hw,
- struct rte_fdir_masks *input_mask)
-{
- /* mask VM pool since it is currently not supported */
- u32 fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
- u32 fdirtcpm; /* TCP source and destination port masks. */
- //u32 fdiripv6m; /* IPv6 source and destination masks. */
-
- PMD_INIT_FUNC_TRACE();
-
- /*
- * Program the relevant mask registers. If src/dst_port or src/dst_addr
- * are zero, then assume a full mask for that field. Also assume that
- * a VLAN of 0 is unspecified, so mask that out as well. L4type
- * cannot be masked out in this implementation.
- */
- if (input_mask->only_ip_flow) {
- /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
- fdirm |= IXGBE_FDIRM_L4P;
- if (input_mask->dst_port_mask || input_mask->src_port_mask) {
- PMD_INIT_LOG(ERR, " Error on src/dst port mask");
- return -EINVAL;
- }
- }
-
- //if (!input_mask->comp_ipv6_dst)
- /* mask DIPV6 */
- // fdirm |= IXGBE_FDIRM_DIPv6;
-
- if (!input_mask->vlan_id)
- /* mask VLAN ID*/
- fdirm |= IXGBE_FDIRM_VLANID;
-
- if (!input_mask->vlan_prio)
- /* mask VLAN priority */
- fdirm |= IXGBE_FDIRM_VLANP;
-
- if (!input_mask->flexbytes)
- /* Mask Flex Bytes */
- fdirm |= IXGBE_FDIRM_FLEX;
-
- IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
-
- /* store the TCP/UDP port masks, bit reversed from port layout */
- fdirtcpm = reverse_fdir_bitmasks(input_mask->dst_port_mask,
- input_mask->src_port_mask);
-
- /* write both the same so that UDP and TCP use the same mask */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
-
- // if (!input_mask->set_ipv6_mask) {
- /* Store source and destination IPv4 masks (big-endian) */
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
- IXGBE_NTOHL(~input_mask->src_ipv4_mask));
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
- IXGBE_NTOHL(~input_mask->dst_ipv4_mask));
- //}
- //else {
- /* Store source and destination IPv6 masks (bit reversed) */
- // fdiripv6m = reverse_fdir_bitmasks(input_mask->dst_ipv6_mask,
- // input_mask->src_ipv6_mask);
-
- // IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, ~fdiripv6m);
- //}
- /* store IPv6 mask */
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, 0xffffffff);
-
-
- return IXGBE_SUCCESS;
-}
-
-int
-ixgbe_fdir_set_masks(struct rte_eth_dev *dev, struct rte_fdir_masks *fdir_masks)
-{
- struct ixgbe_hw *hw;
- int err;
-
- PMD_INIT_FUNC_TRACE();
-
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- if (hw->mac.type != ixgbe_mac_82599EB &&
- hw->mac.type != ixgbe_mac_X540 &&
- hw->mac.type != ixgbe_mac_X550 &&
- hw->mac.type != ixgbe_mac_X550EM_x)
- return -ENOSYS;
-
- err = ixgbe_reinit_fdir_tables_82599(hw);
- if (err) {
- PMD_INIT_LOG(ERR, "reinit of fdir tables failed");
- return -EIO;
- }
-
- return fdir_set_input_mask_82599(hw, fdir_masks);
-}
-
-static uint32_t
-atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
- enum rte_fdir_pballoc_type pballoc)
-{
- if (pballoc == RTE_FDIR_PBALLOC_256K)
- return ixgbe_atr_compute_hash_82599(input,
- IXGBE_ATR_BUCKET_HASH_KEY) &
- PERFECT_BUCKET_256KB_HASH_MASK;
- else if (pballoc == RTE_FDIR_PBALLOC_128K)
- return ixgbe_atr_compute_hash_82599(input,
- IXGBE_ATR_BUCKET_HASH_KEY) &
- PERFECT_BUCKET_128KB_HASH_MASK;
- else
- return ixgbe_atr_compute_hash_82599(input,
- IXGBE_ATR_BUCKET_HASH_KEY) &
- PERFECT_BUCKET_64KB_HASH_MASK;
-}
-
-/*
- * This is based on ixgbe_fdir_write_perfect_filter_82599() in
- * ixgbe/ixgbe_82599.c, with the ability to set extra flags in FDIRCMD register
- * added, and IPv6 support also added. The hash value is also pre-calculated
- * as the pballoc value is needed to do it.
- */
-static void
-fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input,
- uint16_t soft_id, uint8_t queue, uint32_t fdircmd,
- uint32_t fdirhash)
-{
- u32 fdirport, fdirvlan;
-
- /* record the source address (big-endian) */
- if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), input->formatted.src_ip[0]);
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), input->formatted.src_ip[1]);
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.src_ip[2]);
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[3]);
- }
- else {
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
- }
-
- /* record the first 32 bits of the destination address (big-endian) */
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
-
- /* record source and destination port (little-endian)*/
- fdirport = IXGBE_NTOHS(input->formatted.dst_port);
- fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
- fdirport |= IXGBE_NTOHS(input->formatted.src_port);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
-
- /* record vlan (little-endian) and flex_bytes(big-endian) */
- fdirvlan = input->formatted.flex_bytes;
- fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
- fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
-
- /* configure FDIRHASH register */
- fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
- IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
-
- /*
- * flush all previous writes to make certain registers are
- * programmed prior to issuing the command
- */
- IXGBE_WRITE_FLUSH(hw);
-
- /* configure FDIRCMD register */
- fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
- IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
- fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
- fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
- fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
-
- IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
-}
-
-/*
- * Adds or updates a perfect filter.
- *
- * dev: ethernet device to add filter to
- * fdir_filter: filter details
- * soft_id: software index for the filters
- * queue: queue index to direct traffic to
- * drop: non-zero if packets should be sent to the drop queue
- * update: 0 to add a new filter, otherwise update existing.
- */
-static int
-fdir_add_update_perfect_filter(struct rte_eth_dev *dev,
- struct rte_fdir_filter *fdir_filter, uint16_t soft_id,
- uint8_t queue, int drop, int update)
-{
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0;
- uint32_t fdirhash;
- union ixgbe_atr_input input;
- int err;
-
- if (hw->mac.type != ixgbe_mac_82599EB &&
- hw->mac.type != ixgbe_mac_X540 &&
- hw->mac.type != ixgbe_mac_X550 &&
- hw->mac.type != ixgbe_mac_X550EM_x)
- return -ENOSYS;
-
- err = fdir_filter_to_atr_input(fdir_filter, &input);
- if (err)
- return err;
-
- if (drop) {
- queue = dev->data->dev_conf.fdir_conf.drop_queue;
- fdircmd_flags |= IXGBE_FDIRCMD_DROP;
- }
-
- fdirhash = atr_compute_perfect_hash_82599(&input,
- dev->data->dev_conf.fdir_conf.pballoc);
-
- fdir_write_perfect_filter_82599(hw, &input, soft_id, queue,
- fdircmd_flags, fdirhash);
- return 0;
-}
-
-int
-ixgbe_fdir_add_perfect_filter(struct rte_eth_dev *dev,
- struct rte_fdir_filter *fdir_filter, uint16_t soft_id,
- uint8_t queue, uint8_t drop)
-{
- PMD_INIT_FUNC_TRACE();
- return fdir_add_update_perfect_filter(dev, fdir_filter, soft_id, queue,
- drop, 0);
-}
-
-int
-ixgbe_fdir_update_perfect_filter(struct rte_eth_dev *dev,
- struct rte_fdir_filter *fdir_filter, uint16_t soft_id,
- uint8_t queue, uint8_t drop)
-{
- PMD_INIT_FUNC_TRACE();
- return fdir_add_update_perfect_filter(dev, fdir_filter, soft_id, queue,
- drop, 1);
-}
-
-int
-ixgbe_fdir_remove_perfect_filter(struct rte_eth_dev *dev,
- struct rte_fdir_filter *fdir_filter,
- uint16_t soft_id)
-{
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- union ixgbe_atr_input input;
- uint32_t fdirhash;
- int err;
-
- PMD_INIT_FUNC_TRACE();
-
- if (hw->mac.type != ixgbe_mac_82599EB &&
- hw->mac.type != ixgbe_mac_X540 &&
- hw->mac.type != ixgbe_mac_X550 &&
- hw->mac.type != ixgbe_mac_X550EM_x)
- return -ENOSYS;
-
- err = fdir_filter_to_atr_input(fdir_filter, &input);
- if (err)
- return err;
-
- /* configure FDIRHASH register */
- fdirhash = atr_compute_perfect_hash_82599(&input,
- dev->data->dev_conf.fdir_conf.pballoc);
- fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
-
- return fdir_erase_filter_82599(hw, &input, fdirhash);
-}
-
-void
-ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir *fdir)
-{
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_hw_fdir_info *info =
- IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
- uint32_t reg;
-
- if (hw->mac.type != ixgbe_mac_82599EB &&
- hw->mac.type != ixgbe_mac_X540 &&
- hw->mac.type != ixgbe_mac_X550 &&
- hw->mac.type != ixgbe_mac_X550EM_x)
- return;
-
- /* Get the information from registers */
- reg = IXGBE_READ_REG(hw, IXGBE_FDIRFREE);
- info->collision = (uint16_t)((reg & IXGBE_FDIRFREE_COLL_MASK) >>
- IXGBE_FDIRFREE_COLL_SHIFT);
- info->free = (uint16_t)((reg & IXGBE_FDIRFREE_FREE_MASK) >>
- IXGBE_FDIRFREE_FREE_SHIFT);
-
- reg = IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
- info->maxhash = (uint16_t)((reg & IXGBE_FDIRLEN_MAXHASH_MASK) >>
- IXGBE_FDIRLEN_MAXHASH_SHIFT);
- info->maxlen = (uint8_t)((reg & IXGBE_FDIRLEN_MAXLEN_MASK) >>
- IXGBE_FDIRLEN_MAXLEN_SHIFT);
-
- reg = IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
- info->remove += (reg & IXGBE_FDIRUSTAT_REMOVE_MASK) >>
- IXGBE_FDIRUSTAT_REMOVE_SHIFT;
- info->add += (reg & IXGBE_FDIRUSTAT_ADD_MASK) >>
- IXGBE_FDIRUSTAT_ADD_SHIFT;
-
- reg = IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT) & 0xFFFF;
- info->f_remove += (reg & IXGBE_FDIRFSTAT_FREMOVE_MASK) >>
- IXGBE_FDIRFSTAT_FREMOVE_SHIFT;
- info->f_add += (reg & IXGBE_FDIRFSTAT_FADD_MASK) >>
- IXGBE_FDIRFSTAT_FADD_SHIFT;
-
- /* Copy the new information in the fdir parameter */
- fdir->collision = info->collision;
- fdir->free = info->free;
- fdir->maxhash = info->maxhash;
- fdir->maxlen = info->maxlen;
- fdir->remove = info->remove;
- fdir->add = info->add;
- fdir->f_remove = info->f_remove;
- fdir->f_add = info->f_add;
-}
diff --git a/src/dpdk_lib18/librte_pmd_pcap/Makefile b/src/dpdk_lib18/librte_pmd_pcap/Makefile
deleted file mode 100755
index c5c214dc..00000000
--- a/src/dpdk_lib18/librte_pmd_pcap/Makefile
+++ /dev/null
@@ -1,59 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# Copyright(c) 2014 6WIND S.A.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-#
-# library name
-#
-LIB = librte_pmd_pcap.a
-
-CFLAGS += -O3
-CFLAGS += $(WERROR_FLAGS)
-
-#
-# all source are stored in SRCS-y
-#
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += rte_eth_pcap.c
-
-#
-# Export include files
-#
-SYMLINK-y-include +=
-
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += lib/librte_malloc
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += lib/librte_kvargs
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_pmd_pcap/rte_eth_pcap.c b/src/dpdk_lib18/librte_pmd_pcap/rte_eth_pcap.c
deleted file mode 100755
index f12d1e7e..00000000
--- a/src/dpdk_lib18/librte_pmd_pcap/rte_eth_pcap.c
+++ /dev/null
@@ -1,936 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2014 6WIND S.A.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <time.h>
-#include <rte_mbuf.h>
-#include <rte_ethdev.h>
-#include <rte_malloc.h>
-#include <rte_memcpy.h>
-#include <rte_string_fns.h>
-#include <rte_cycles.h>
-#include <rte_kvargs.h>
-#include <rte_dev.h>
-
-#include <net/if.h>
-
-#include <pcap.h>
-
-#define RTE_ETH_PCAP_SNAPSHOT_LEN 65535
-#define RTE_ETH_PCAP_SNAPLEN 4096
-#define RTE_ETH_PCAP_PROMISC 1
-#define RTE_ETH_PCAP_TIMEOUT -1
-#define ETH_PCAP_RX_PCAP_ARG "rx_pcap"
-#define ETH_PCAP_TX_PCAP_ARG "tx_pcap"
-#define ETH_PCAP_RX_IFACE_ARG "rx_iface"
-#define ETH_PCAP_TX_IFACE_ARG "tx_iface"
-#define ETH_PCAP_IFACE_ARG "iface"
-
-static char errbuf[PCAP_ERRBUF_SIZE];
-static struct timeval start_time;
-static uint64_t start_cycles;
-static uint64_t hz;
-
-struct pcap_rx_queue {
- pcap_t *pcap;
- uint8_t in_port;
- struct rte_mempool *mb_pool;
- volatile unsigned long rx_pkts;
- volatile unsigned long err_pkts;
- const char *name;
- const char *type;
-};
-
-struct pcap_tx_queue {
- pcap_dumper_t *dumper;
- pcap_t *pcap;
- volatile unsigned long tx_pkts;
- volatile unsigned long err_pkts;
- const char *name;
- const char *type;
-};
-
-struct rx_pcaps {
- unsigned num_of_rx;
- pcap_t *pcaps[RTE_PMD_RING_MAX_RX_RINGS];
- const char *names[RTE_PMD_RING_MAX_RX_RINGS];
- const char *types[RTE_PMD_RING_MAX_RX_RINGS];
-};
-
-struct tx_pcaps {
- unsigned num_of_tx;
- pcap_dumper_t *dumpers[RTE_PMD_RING_MAX_TX_RINGS];
- pcap_t *pcaps[RTE_PMD_RING_MAX_RX_RINGS];
- const char *names[RTE_PMD_RING_MAX_RX_RINGS];
- const char *types[RTE_PMD_RING_MAX_RX_RINGS];
-};
-
-struct pmd_internals {
- struct pcap_rx_queue rx_queue[RTE_PMD_RING_MAX_RX_RINGS];
- struct pcap_tx_queue tx_queue[RTE_PMD_RING_MAX_TX_RINGS];
- unsigned nb_rx_queues;
- unsigned nb_tx_queues;
- int if_index;
- int single_iface;
-};
-
-const char *valid_arguments[] = {
- ETH_PCAP_RX_PCAP_ARG,
- ETH_PCAP_TX_PCAP_ARG,
- ETH_PCAP_RX_IFACE_ARG,
- ETH_PCAP_TX_IFACE_ARG,
- ETH_PCAP_IFACE_ARG,
- NULL
-};
-
-static int open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper);
-static int open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap);
-static int open_single_iface(const char *iface, pcap_t **pcap);
-
-static struct ether_addr eth_addr = { .addr_bytes = { 0, 0, 0, 0x1, 0x2, 0x3 } };
-static const char *drivername = "Pcap PMD";
-static struct rte_eth_link pmd_link = {
- .link_speed = 10000,
- .link_duplex = ETH_LINK_FULL_DUPLEX,
- .link_status = 0
-};
-
-
-static uint16_t
-eth_pcap_rx(void *queue,
- struct rte_mbuf **bufs,
- uint16_t nb_pkts)
-{
- unsigned i;
- struct pcap_pkthdr header;
- const u_char *packet;
- struct rte_mbuf *mbuf;
- struct pcap_rx_queue *pcap_q = queue;
- struct rte_pktmbuf_pool_private *mbp_priv;
- uint16_t num_rx = 0;
- uint16_t buf_size;
-
- if (unlikely(pcap_q->pcap == NULL || nb_pkts == 0))
- return 0;
-
- /* Reads the given number of packets from the pcap file one by one
- * and copies the packet data into a newly allocated mbuf to return.
- */
- for (i = 0; i < nb_pkts; i++) {
- /* Get the next PCAP packet */
- packet = pcap_next(pcap_q->pcap, &header);
- if (unlikely(packet == NULL))
- break;
- else
- mbuf = rte_pktmbuf_alloc(pcap_q->mb_pool);
- if (unlikely(mbuf == NULL))
- break;
-
- /* Now get the space available for data in the mbuf */
- mbp_priv = rte_mempool_get_priv(pcap_q->mb_pool);
- buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
- RTE_PKTMBUF_HEADROOM);
-
- if (header.len <= buf_size) {
- /* pcap packet will fit in the mbuf, go ahead and copy */
- rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet,
- header.len);
- mbuf->data_len = (uint16_t)header.len;
- mbuf->pkt_len = mbuf->data_len;
- mbuf->port = pcap_q->in_port;
- bufs[num_rx] = mbuf;
- num_rx++;
- } else {
- /* pcap packet will not fit in the mbuf, so drop packet */
- RTE_LOG(ERR, PMD,
- "PCAP packet %d bytes will not fit in mbuf (%d bytes)\n",
- header.len, buf_size);
- rte_pktmbuf_free(mbuf);
- }
- }
- pcap_q->rx_pkts += num_rx;
- return num_rx;
-}
-
-static inline void
-calculate_timestamp(struct timeval *ts) {
- uint64_t cycles;
- struct timeval cur_time;
-
- cycles = rte_get_timer_cycles() - start_cycles;
- cur_time.tv_sec = cycles / hz;
- cur_time.tv_usec = (cycles % hz) * 10e6 / hz;
- timeradd(&start_time, &cur_time, ts);
-}
-
-/*
- * Callback to handle writing packets to a pcap file.
- */
-static uint16_t
-eth_pcap_tx_dumper(void *queue,
- struct rte_mbuf **bufs,
- uint16_t nb_pkts)
-{
- unsigned i;
- struct rte_mbuf *mbuf;
- struct pcap_tx_queue *dumper_q = queue;
- uint16_t num_tx = 0;
- struct pcap_pkthdr header;
-
- if (dumper_q->dumper == NULL || nb_pkts == 0)
- return 0;
-
- /* writes the nb_pkts packets to the previously opened pcap file dumper */
- for (i = 0; i < nb_pkts; i++) {
- mbuf = bufs[i];
- calculate_timestamp(&header.ts);
- header.len = mbuf->data_len;
- header.caplen = header.len;
- pcap_dump((u_char *)dumper_q->dumper, &header,
- rte_pktmbuf_mtod(mbuf, void*));
- rte_pktmbuf_free(mbuf);
- num_tx++;
- }
-
- /*
- * Since there's no place to hook a callback when the forwarding
- * process stops and to make sure the pcap file is actually written,
- * we flush the pcap dumper within each burst.
- */
- pcap_dump_flush(dumper_q->dumper);
- dumper_q->tx_pkts += num_tx;
- dumper_q->err_pkts += nb_pkts - num_tx;
- return num_tx;
-}
-
-/*
- * Callback to handle sending packets through a real NIC.
- */
-static uint16_t
-eth_pcap_tx(void *queue,
- struct rte_mbuf **bufs,
- uint16_t nb_pkts)
-{
- unsigned i;
- int ret;
- struct rte_mbuf *mbuf;
- struct pcap_tx_queue *tx_queue = queue;
- uint16_t num_tx = 0;
-
- if (unlikely(nb_pkts == 0 || tx_queue->pcap == NULL))
- return 0;
-
- for (i = 0; i < nb_pkts; i++) {
- mbuf = bufs[i];
- ret = pcap_sendpacket(tx_queue->pcap,
- rte_pktmbuf_mtod(mbuf, u_char *),
- mbuf->data_len);
- if (unlikely(ret != 0))
- break;
- num_tx++;
- rte_pktmbuf_free(mbuf);
- }
-
- tx_queue->tx_pkts += num_tx;
- tx_queue->err_pkts += nb_pkts - num_tx;
- return num_tx;
-}
-
-static int
-eth_dev_start(struct rte_eth_dev *dev)
-{
- unsigned i;
- struct pmd_internals *internals = dev->data->dev_private;
- struct pcap_tx_queue *tx;
- struct pcap_rx_queue *rx;
-
- /* Special iface case. Single pcap is open and shared between tx/rx. */
- if (internals->single_iface) {
- tx = &internals->tx_queue[0];
- rx = &internals->rx_queue[0];
-
- if (!tx->pcap && strcmp(tx->type, ETH_PCAP_IFACE_ARG) == 0) {
- if (open_single_iface(tx->name, &tx->pcap) < 0)
- return -1;
- rx->pcap = tx->pcap;
- }
- goto status_up;
- }
-
- /* If not open already, open tx pcaps/dumpers */
- for (i = 0; i < internals->nb_tx_queues; i++) {
- tx = &internals->tx_queue[i];
-
- if (!tx->dumper && strcmp(tx->type, ETH_PCAP_TX_PCAP_ARG) == 0) {
- if (open_single_tx_pcap(tx->name, &tx->dumper) < 0)
- return -1;
- }
-
- else if (!tx->pcap && strcmp(tx->type, ETH_PCAP_TX_IFACE_ARG) == 0) {
- if (open_single_iface(tx->name, &tx->pcap) < 0)
- return -1;
- }
- }
-
- /* If not open already, open rx pcaps */
- for (i = 0; i < internals->nb_rx_queues; i++) {
- rx = &internals->rx_queue[i];
-
- if (rx->pcap != NULL)
- continue;
-
- if (strcmp(rx->type, ETH_PCAP_RX_PCAP_ARG) == 0) {
- if (open_single_rx_pcap(rx->name, &rx->pcap) < 0)
- return -1;
- }
-
- else if (strcmp(rx->type, ETH_PCAP_RX_IFACE_ARG) == 0) {
- if (open_single_iface(rx->name, &rx->pcap) < 0)
- return -1;
- }
- }
-
-status_up:
-
- dev->data->dev_link.link_status = 1;
- return 0;
-}
-
-/*
- * This function gets called when the current port gets stopped.
- * Is the only place for us to close all the tx streams dumpers.
- * If not called the dumpers will be flushed within each tx burst.
- */
-static void
-eth_dev_stop(struct rte_eth_dev *dev)
-{
- unsigned i;
- struct pmd_internals *internals = dev->data->dev_private;
- struct pcap_tx_queue *tx;
- struct pcap_rx_queue *rx;
-
- /* Special iface case. Single pcap is open and shared between tx/rx. */
- if (internals->single_iface) {
- tx = &internals->tx_queue[0];
- rx = &internals->rx_queue[0];
- pcap_close(tx->pcap);
- tx->pcap = NULL;
- rx->pcap = NULL;
- goto status_down;
- }
-
- for (i = 0; i < internals->nb_tx_queues; i++) {
- tx = &internals->tx_queue[i];
-
- if (tx->dumper != NULL) {
- pcap_dump_close(tx->dumper);
- tx->dumper = NULL;
- }
-
- if (tx->pcap != NULL) {
- pcap_close(tx->pcap);
- tx->pcap = NULL;
- }
- }
-
- for (i = 0; i < internals->nb_rx_queues; i++) {
- rx = &internals->rx_queue[i];
-
- if (rx->pcap != NULL) {
- pcap_close(rx->pcap);
- rx->pcap = NULL;
- }
- }
-
-status_down:
- dev->data->dev_link.link_status = 0;
-}
-
-static int
-eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
-{
- return 0;
-}
-
-static void
-eth_dev_info(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info)
-{
- struct pmd_internals *internals = dev->data->dev_private;
- dev_info->driver_name = drivername;
- dev_info->if_index = internals->if_index;
- dev_info->max_mac_addrs = 1;
- dev_info->max_rx_pktlen = (uint32_t) -1;
- dev_info->max_rx_queues = (uint16_t)internals->nb_rx_queues;
- dev_info->max_tx_queues = (uint16_t)internals->nb_tx_queues;
- dev_info->min_rx_bufsize = 0;
- dev_info->pci_dev = NULL;
-}
-
-static void
-eth_stats_get(struct rte_eth_dev *dev,
- struct rte_eth_stats *igb_stats)
-{
- unsigned i;
- unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
- const struct pmd_internals *internal = dev->data->dev_private;
-
- memset(igb_stats, 0, sizeof(*igb_stats));
- for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internal->nb_rx_queues;
- i++) {
- igb_stats->q_ipackets[i] = internal->rx_queue[i].rx_pkts;
- rx_total += igb_stats->q_ipackets[i];
- }
-
- for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internal->nb_tx_queues;
- i++) {
- igb_stats->q_opackets[i] = internal->tx_queue[i].tx_pkts;
- igb_stats->q_errors[i] = internal->tx_queue[i].err_pkts;
- tx_total += igb_stats->q_opackets[i];
- tx_err_total += igb_stats->q_errors[i];
- }
-
- igb_stats->ipackets = rx_total;
- igb_stats->opackets = tx_total;
- igb_stats->oerrors = tx_err_total;
-}
-
-static void
-eth_stats_reset(struct rte_eth_dev *dev)
-{
- unsigned i;
- struct pmd_internals *internal = dev->data->dev_private;
- for (i = 0; i < internal->nb_rx_queues; i++)
- internal->rx_queue[i].rx_pkts = 0;
- for (i = 0; i < internal->nb_tx_queues; i++) {
- internal->tx_queue[i].tx_pkts = 0;
- internal->tx_queue[i].err_pkts = 0;
- }
-}
-
-static void
-eth_dev_close(struct rte_eth_dev *dev __rte_unused)
-{
-}
-
-static void
-eth_queue_release(void *q __rte_unused)
-{
-}
-
-static int
-eth_link_update(struct rte_eth_dev *dev __rte_unused,
- int wait_to_complete __rte_unused)
-{
- return 0;
-}
-
-static int
-eth_rx_queue_setup(struct rte_eth_dev *dev,
- uint16_t rx_queue_id,
- uint16_t nb_rx_desc __rte_unused,
- unsigned int socket_id __rte_unused,
- const struct rte_eth_rxconf *rx_conf __rte_unused,
- struct rte_mempool *mb_pool)
-{
- struct pmd_internals *internals = dev->data->dev_private;
- struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id];
- pcap_q->mb_pool = mb_pool;
- dev->data->rx_queues[rx_queue_id] = pcap_q;
- pcap_q->in_port = dev->data->port_id;
- return 0;
-}
-
-static int
-eth_tx_queue_setup(struct rte_eth_dev *dev,
- uint16_t tx_queue_id,
- uint16_t nb_tx_desc __rte_unused,
- unsigned int socket_id __rte_unused,
- const struct rte_eth_txconf *tx_conf __rte_unused)
-{
-
- struct pmd_internals *internals = dev->data->dev_private;
- dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id];
- return 0;
-}
-
-static struct eth_dev_ops ops = {
- .dev_start = eth_dev_start,
- .dev_stop = eth_dev_stop,
- .dev_close = eth_dev_close,
- .dev_configure = eth_dev_configure,
- .dev_infos_get = eth_dev_info,
- .rx_queue_setup = eth_rx_queue_setup,
- .tx_queue_setup = eth_tx_queue_setup,
- .rx_queue_release = eth_queue_release,
- .tx_queue_release = eth_queue_release,
- .link_update = eth_link_update,
- .stats_get = eth_stats_get,
- .stats_reset = eth_stats_reset,
-};
-
-/*
- * Function handler that opens the pcap file for reading a stores a
- * reference of it for use it later on.
- */
-static int
-open_rx_pcap(const char *key, const char *value, void *extra_args)
-{
- unsigned i;
- const char *pcap_filename = value;
- struct rx_pcaps *pcaps = extra_args;
- pcap_t *pcap = NULL;
-
- for (i = 0; i < pcaps->num_of_rx; i++) {
- if (open_single_rx_pcap(pcap_filename, &pcap) < 0)
- return -1;
-
- pcaps->pcaps[i] = pcap;
- pcaps->names[i] = pcap_filename;
- pcaps->types[i] = key;
- }
-
- return 0;
-}
-
-static int
-open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap)
-{
- if ((*pcap = pcap_open_offline(pcap_filename, errbuf)) == NULL) {
- RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", pcap_filename, errbuf);
- return -1;
- }
- return 0;
-}
-
-/*
- * Opens a pcap file for writing and stores a reference to it
- * for use it later on.
- */
-static int
-open_tx_pcap(const char *key, const char *value, void *extra_args)
-{
- unsigned i;
- const char *pcap_filename = value;
- struct tx_pcaps *dumpers = extra_args;
- pcap_dumper_t *dumper;
-
- for (i = 0; i < dumpers->num_of_tx; i++) {
- if (open_single_tx_pcap(pcap_filename, &dumper) < 0)
- return -1;
-
- dumpers->dumpers[i] = dumper;
- dumpers->names[i] = pcap_filename;
- dumpers->types[i] = key;
- }
-
- return 0;
-}
-
-static int
-open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper)
-{
- pcap_t *tx_pcap;
- /*
- * We need to create a dummy empty pcap_t to use it
- * with pcap_dump_open(). We create big enough an Ethernet
- * pcap holder.
- */
-
- if ((tx_pcap = pcap_open_dead(DLT_EN10MB, RTE_ETH_PCAP_SNAPSHOT_LEN))
- == NULL) {
- RTE_LOG(ERR, PMD, "Couldn't create dead pcap\n");
- return -1;
- }
-
- /* The dumper is created using the previous pcap_t reference */
- if ((*dumper = pcap_dump_open(tx_pcap, pcap_filename)) == NULL) {
- RTE_LOG(ERR, PMD, "Couldn't open %s for writing.\n", pcap_filename);
- return -1;
- }
-
- return 0;
-}
-
-/*
- * pcap_open_live wrapper function
- */
-static inline int
-open_iface_live(const char *iface, pcap_t **pcap) {
- *pcap = pcap_open_live(iface, RTE_ETH_PCAP_SNAPLEN,
- RTE_ETH_PCAP_PROMISC, RTE_ETH_PCAP_TIMEOUT, errbuf);
-
- if (*pcap == NULL) {
- RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", iface, errbuf);
- return -1;
- }
- return 0;
-}
-
-/*
- * Opens an interface for reading and writing
- */
-static inline int
-open_rx_tx_iface(const char *key, const char *value, void *extra_args)
-{
- const char *iface = value;
- struct rx_pcaps *pcaps = extra_args;
- pcap_t *pcap = NULL;
-
- if (open_single_iface(iface, &pcap) < 0)
- return -1;
-
- pcaps->pcaps[0] = pcap;
- pcaps->names[0] = iface;
- pcaps->types[0] = key;
-
- return 0;
-}
-
-/*
- * Opens a NIC for reading packets from it
- */
-static inline int
-open_rx_iface(const char *key, const char *value, void *extra_args)
-{
- unsigned i;
- const char *iface = value;
- struct rx_pcaps *pcaps = extra_args;
- pcap_t *pcap = NULL;
-
- for (i = 0; i < pcaps->num_of_rx; i++) {
- if (open_single_iface(iface, &pcap) < 0)
- return -1;
- pcaps->pcaps[i] = pcap;
- pcaps->names[i] = iface;
- pcaps->types[i] = key;
- }
-
- return 0;
-}
-
-/*
- * Opens a NIC for writing packets to it
- */
-static int
-open_tx_iface(const char *key, const char *value, void *extra_args)
-{
- unsigned i;
- const char *iface = value;
- struct tx_pcaps *pcaps = extra_args;
- pcap_t *pcap;
-
- for (i = 0; i < pcaps->num_of_tx; i++) {
- if (open_single_iface(iface, &pcap) < 0)
- return -1;
- pcaps->pcaps[i] = pcap;
- pcaps->names[i] = iface;
- pcaps->types[i] = key;
- }
-
- return 0;
-}
-
-static int
-open_single_iface(const char *iface, pcap_t **pcap)
-{
- if (open_iface_live(iface, pcap) < 0) {
- RTE_LOG(ERR, PMD, "Couldn't open interface %s\n", iface);
- return -1;
- }
-
- return 0;
-}
-
-static int
-rte_pmd_init_internals(const char *name, const unsigned nb_rx_queues,
- const unsigned nb_tx_queues,
- const unsigned numa_node,
- struct pmd_internals **internals,
- struct rte_eth_dev **eth_dev,
- struct rte_kvargs *kvlist)
-{
- struct rte_eth_dev_data *data = NULL;
- struct rte_pci_device *pci_dev = NULL;
- unsigned k_idx;
- struct rte_kvargs_pair *pair = NULL;
-
- for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
- pair = &kvlist->pairs[k_idx];
- if (strstr(pair->key, ETH_PCAP_IFACE_ARG) != NULL)
- break;
- }
-
- RTE_LOG(INFO, PMD,
- "Creating pcap-backed ethdev on numa socket %u\n", numa_node);
-
- /* now do all data allocation - for eth_dev structure, dummy pci driver
- * and internal (private) data
- */
- data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
- if (data == NULL)
- goto error;
-
- pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, numa_node);
- if (pci_dev == NULL)
- goto error;
-
- *internals = rte_zmalloc_socket(name, sizeof(**internals), 0, numa_node);
- if (*internals == NULL)
- goto error;
-
- /* reserve an ethdev entry */
- *eth_dev = rte_eth_dev_allocate(name);
- if (*eth_dev == NULL)
- goto error;
-
- /* now put it all together
- * - store queue data in internals,
- * - store numa_node info in pci_driver
- * - point eth_dev_data to internals and pci_driver
- * - and point eth_dev structure to new eth_dev_data structure
- */
- /* NOTE: we'll replace the data element, of originally allocated eth_dev
- * so the rings are local per-process */
-
- (*internals)->nb_rx_queues = nb_rx_queues;
- (*internals)->nb_tx_queues = nb_tx_queues;
-
- if (pair == NULL)
- (*internals)->if_index = 0;
- else
- (*internals)->if_index = if_nametoindex(pair->value);
-
- pci_dev->numa_node = numa_node;
-
- data->dev_private = *internals;
- data->port_id = (*eth_dev)->data->port_id;
- data->nb_rx_queues = (uint16_t)nb_rx_queues;
- data->nb_tx_queues = (uint16_t)nb_tx_queues;
- data->dev_link = pmd_link;
- data->mac_addrs = &eth_addr;
-
- (*eth_dev)->data = data;
- (*eth_dev)->dev_ops = &ops;
- (*eth_dev)->pci_dev = pci_dev;
-
- return 0;
-
- error: if (data)
- rte_free(data);
- if (pci_dev)
- rte_free(pci_dev);
- if (*internals)
- rte_free(*internals);
- return -1;
-}
-
-static int
-rte_eth_from_pcaps_n_dumpers(const char *name,
- struct rx_pcaps *rx_queues,
- const unsigned nb_rx_queues,
- struct tx_pcaps *tx_queues,
- const unsigned nb_tx_queues,
- const unsigned numa_node,
- struct rte_kvargs *kvlist)
-{
- struct pmd_internals *internals = NULL;
- struct rte_eth_dev *eth_dev = NULL;
- unsigned i;
-
- /* do some parameter checking */
- if (rx_queues == NULL && nb_rx_queues > 0)
- return -1;
- if (tx_queues == NULL && nb_tx_queues > 0)
- return -1;
-
- if (rte_pmd_init_internals(name, nb_rx_queues, nb_tx_queues, numa_node,
- &internals, &eth_dev, kvlist) < 0)
- return -1;
-
- for (i = 0; i < nb_rx_queues; i++) {
- internals->rx_queue->pcap = rx_queues->pcaps[i];
- internals->rx_queue->name = rx_queues->names[i];
- internals->rx_queue->type = rx_queues->types[i];
- }
- for (i = 0; i < nb_tx_queues; i++) {
- internals->tx_queue->dumper = tx_queues->dumpers[i];
- internals->tx_queue->name = tx_queues->names[i];
- internals->tx_queue->type = tx_queues->types[i];
- }
-
- /* using multiple pcaps/interfaces */
- internals->single_iface = 0;
-
- eth_dev->rx_pkt_burst = eth_pcap_rx;
- eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
-
- return 0;
-}
-
- struct rx_pcaps pcaps;
-static int
-rte_eth_from_pcaps(const char *name,
- struct rx_pcaps *rx_queues,
- const unsigned nb_rx_queues,
- struct tx_pcaps *tx_queues,
- const unsigned nb_tx_queues,
- const unsigned numa_node,
- struct rte_kvargs *kvlist,
- int single_iface)
-{
- struct pmd_internals *internals = NULL;
- struct rte_eth_dev *eth_dev = NULL;
- unsigned i;
-
- /* do some parameter checking */
- if (rx_queues == NULL && nb_rx_queues > 0)
- return -1;
- if (tx_queues == NULL && nb_tx_queues > 0)
- return -1;
-
- if (rte_pmd_init_internals(name, nb_rx_queues, nb_tx_queues, numa_node,
- &internals, &eth_dev, kvlist) < 0)
- return -1;
-
- for (i = 0; i < nb_rx_queues; i++) {
- internals->rx_queue->pcap = rx_queues->pcaps[i];
- internals->rx_queue->name = rx_queues->names[i];
- internals->rx_queue->type = rx_queues->types[i];
- }
- for (i = 0; i < nb_tx_queues; i++) {
- internals->tx_queue->pcap = tx_queues->pcaps[i];
- internals->tx_queue->name = tx_queues->names[i];
- internals->tx_queue->type = tx_queues->types[i];
- }
-
- /* store wether we are using a single interface for rx/tx or not */
- internals->single_iface = single_iface;
-
- eth_dev->rx_pkt_burst = eth_pcap_rx;
- eth_dev->tx_pkt_burst = eth_pcap_tx;
-
- return 0;
-}
-
-
-static int
-rte_pmd_pcap_devinit(const char *name, const char *params)
-{
- unsigned numa_node, using_dumpers = 0;
- int ret;
- struct rte_kvargs *kvlist;
- struct rx_pcaps pcaps;
- struct tx_pcaps dumpers;
-
- RTE_LOG(INFO, PMD, "Initializing pmd_pcap for %s\n", name);
-
- numa_node = rte_socket_id();
-
- gettimeofday(&start_time, NULL);
- start_cycles = rte_get_timer_cycles();
- hz = rte_get_timer_hz();
-
- kvlist = rte_kvargs_parse(params, valid_arguments);
- if (kvlist == NULL)
- return -1;
-
- /*
- * If iface argument is passed we open the NICs and use them for
- * reading / writing
- */
- if (rte_kvargs_count(kvlist, ETH_PCAP_IFACE_ARG) == 1) {
-
- ret = rte_kvargs_process(kvlist, ETH_PCAP_IFACE_ARG,
- &open_rx_tx_iface, &pcaps);
- if (ret < 0)
- return -1;
- dumpers.pcaps[0] = pcaps.pcaps[0];
- dumpers.names[0] = pcaps.names[0];
- dumpers.types[0] = pcaps.types[0];
- return rte_eth_from_pcaps(name, &pcaps, 1, &dumpers, 1,
- numa_node, kvlist, 1);
- }
-
- /*
- * We check whether we want to open a RX stream from a real NIC or a
- * pcap file
- */
- if ((pcaps.num_of_rx = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG))) {
- ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
- &open_rx_pcap, &pcaps);
- } else {
- pcaps.num_of_rx = rte_kvargs_count(kvlist,
- ETH_PCAP_RX_IFACE_ARG);
- ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_IFACE_ARG,
- &open_rx_iface, &pcaps);
- }
-
- if (ret < 0)
- return -1;
-
- /*
- * We check whether we want to open a TX stream to a real NIC or a
- * pcap file
- */
- if ((dumpers.num_of_tx = rte_kvargs_count(kvlist,
- ETH_PCAP_TX_PCAP_ARG))) {
- ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG,
- &open_tx_pcap, &dumpers);
- using_dumpers = 1;
- } else {
- dumpers.num_of_tx = rte_kvargs_count(kvlist,
- ETH_PCAP_TX_IFACE_ARG);
- ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG,
- &open_tx_iface, &dumpers);
- }
-
- if (ret < 0)
- return -1;
-
- if (using_dumpers)
- return rte_eth_from_pcaps_n_dumpers(name, &pcaps, pcaps.num_of_rx,
- &dumpers, dumpers.num_of_tx, numa_node, kvlist);
-
- return rte_eth_from_pcaps(name, &pcaps, pcaps.num_of_rx, &dumpers,
- dumpers.num_of_tx, numa_node, kvlist, 0);
-
-}
-
-static struct rte_driver pmd_pcap_drv = {
- .name = "eth_pcap",
- .type = PMD_VDEV,
- .init = rte_pmd_pcap_devinit,
-};
-
-PMD_REGISTER_DRIVER(pmd_pcap_drv);
diff --git a/src/dpdk_lib18/librte_pmd_ring/Makefile b/src/dpdk_lib18/librte_pmd_ring/Makefile
deleted file mode 100755
index b57e4210..00000000
--- a/src/dpdk_lib18/librte_pmd_ring/Makefile
+++ /dev/null
@@ -1,57 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-#
-# library name
-#
-LIB = librte_pmd_ring.a
-
-CFLAGS += -O3
-CFLAGS += $(WERROR_FLAGS)
-
-#
-# all source are stored in SRCS-y
-#
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_RING) += rte_eth_ring.c
-
-#
-# Export include files
-#
-SYMLINK-y-include += rte_eth_ring.h
-
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_RING) += lib/librte_eal lib/librte_ring
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_RING) += lib/librte_mbuf lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_RING) += lib/librte_kvargs
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_pmd_virtio/Makefile b/src/dpdk_lib18/librte_pmd_virtio/Makefile
deleted file mode 100755
index 456095b3..00000000
--- a/src/dpdk_lib18/librte_pmd_virtio/Makefile
+++ /dev/null
@@ -1,57 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-#
-# library name
-#
-LIB = librte_pmd_virtio_uio.a
-
-CFLAGS += -O3
-CFLAGS += $(WERROR_FLAGS)
-
-
-#
-# all source are stored in SRCS-y
-#
-SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtqueue.c
-SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_pci.c
-SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_rxtx.c
-SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_ethdev.c
-
-
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += lib/librte_net lib/librte_malloc
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_pmd_vmxnet3/Makefile b/src/dpdk_lib18/librte_pmd_vmxnet3/Makefile
deleted file mode 100755
index 6872c747..00000000
--- a/src/dpdk_lib18/librte_pmd_vmxnet3/Makefile
+++ /dev/null
@@ -1,80 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-#
-# library name
-#
-LIB = librte_pmd_vmxnet3_uio.a
-
-CFLAGS += -O3
-CFLAGS += $(WERROR_FLAGS)
-
-ifeq ($(CC), icc)
-#
-# CFLAGS for icc
-#
-CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259
-
-else ifeq ($(CC), clang)
-#
-# CFLAGS for clang
-#
-CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value
-CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args
-
-else
-#
-# CFLAGS for gcc
-#
-ifneq ($(shell test $(GCC_MAJOR_VERSION) -le 4 -a $(GCC_MINOR_VERSION) -le 3 && echo 1), 1)
-CFLAGS += -Wno-deprecated
-endif
-CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value
-CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args
-
-endif
-
-VPATH += $(RTE_SDK)/lib/librte_pmd_vmxnet3/vmxnet3
-
-#
-# all source are stored in SRCS-y
-#
-SRCS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3_rxtx.c
-SRCS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3_ethdev.c
-
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += lib/librte_net lib/librte_malloc
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_pmd_xenvirt/Makefile b/src/dpdk_lib18/librte_pmd_xenvirt/Makefile
deleted file mode 100755
index 01bfcaa9..00000000
--- a/src/dpdk_lib18/librte_pmd_xenvirt/Makefile
+++ /dev/null
@@ -1,58 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-#
-# library name
-#
-LIB = librte_pmd_xenvirt.a
-
-CFLAGS += -O3
-CFLAGS += $(WERROR_FLAGS)
-
-#
-# all source are stored in SRCS-y
-#
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += rte_eth_xenvirt.c rte_mempool_gntalloc.c rte_xen_lib.c
-
-#
-# Export include files
-#
-SYMLINK-y-include += rte_eth_xenvirt.h
-
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += lib/librte_net lib/librte_malloc
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += lib/librte_cmdline
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_pmd_xenvirt/rte_eth_xenvirt.c b/src/dpdk_lib18/librte_pmd_xenvirt/rte_eth_xenvirt.c
deleted file mode 100755
index 04e30c94..00000000
--- a/src/dpdk_lib18/librte_pmd_xenvirt/rte_eth_xenvirt.c
+++ /dev/null
@@ -1,716 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdint.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/types.h>
-#include <sys/mman.h>
-#include <errno.h>
-#include <sys/user.h>
-#include <linux/binfmts.h>
-#include <xen/xen-compat.h>
-#if __XEN_LATEST_INTERFACE_VERSION__ < 0x00040200
-#include <xs.h>
-#else
-#include <xenstore.h>
-#endif
-#include <linux/virtio_ring.h>
-
-#include <rte_mbuf.h>
-#include <rte_ethdev.h>
-#include <rte_malloc.h>
-#include <rte_memcpy.h>
-#include <rte_string_fns.h>
-#include <rte_dev.h>
-#include <cmdline_parse.h>
-#include <cmdline_parse_etheraddr.h>
-
-#include "rte_xen_lib.h"
-#include "virtqueue.h"
-#include "rte_eth_xenvirt.h"
-
-#define VQ_DESC_NUM 256
-#define VIRTIO_MBUF_BURST_SZ 64
-
-/* virtio_idx is increased after new device is created.*/
-static int virtio_idx = 0;
-
-static const char *drivername = "xen dummy virtio PMD";
-
-static struct rte_eth_link pmd_link = {
- .link_speed = 10000,
- .link_duplex = ETH_LINK_FULL_DUPLEX,
- .link_status = 0
-};
-
-static inline struct rte_mbuf *
-rte_rxmbuf_alloc(struct rte_mempool *mp)
-{
- struct rte_mbuf *m;
-
- m = __rte_mbuf_raw_alloc(mp);
- __rte_mbuf_sanity_check_raw(m, 0);
-
- return m;
-}
-
-
-static uint16_t
-eth_xenvirt_rx(void *q, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
-{
- struct virtqueue *rxvq = q;
- struct rte_mbuf *rxm, *new_mbuf;
- uint16_t nb_used, num;
- uint32_t len[VIRTIO_MBUF_BURST_SZ];
- uint32_t i;
- struct pmd_internals *pi = rxvq->internals;
-
- nb_used = VIRTQUEUE_NUSED(rxvq);
-
- rte_compiler_barrier(); /* rmb */
- num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
- num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ);
- if (unlikely(num == 0)) return 0;
-
- num = virtqueue_dequeue_burst(rxvq, rx_pkts, len, num);
- PMD_RX_LOG(DEBUG, "used:%d dequeue:%d\n", nb_used, num);
- for (i = 0; i < num ; i ++) {
- rxm = rx_pkts[i];
- PMD_RX_LOG(DEBUG, "packet len:%d\n", len[i]);
- rxm->next = NULL;
- rxm->data_off = RTE_PKTMBUF_HEADROOM;
- rxm->data_len = (uint16_t)(len[i] - sizeof(struct virtio_net_hdr));
- rxm->nb_segs = 1;
- rxm->port = pi->port_id;
- rxm->pkt_len = (uint32_t)(len[i] - sizeof(struct virtio_net_hdr));
- }
- /* allocate new mbuf for the used descriptor */
- while (likely(!virtqueue_full(rxvq))) {
- new_mbuf = rte_rxmbuf_alloc(rxvq->mpool);
- if (unlikely(new_mbuf == NULL)) {
- break;
- }
- if (unlikely(virtqueue_enqueue_recv_refill(rxvq, new_mbuf))) {
- rte_pktmbuf_free_seg(new_mbuf);
- break;
- }
- }
- pi->eth_stats.ipackets += num;
- return num;
-}
-
-static uint16_t
-eth_xenvirt_tx(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
-{
- struct virtqueue *txvq = tx_queue;
- struct rte_mbuf *txm;
- uint16_t nb_used, nb_tx, num, i;
- int error;
- uint32_t len[VIRTIO_MBUF_BURST_SZ];
- struct rte_mbuf *snd_pkts[VIRTIO_MBUF_BURST_SZ];
- struct pmd_internals *pi = txvq->internals;
-
- nb_tx = 0;
-
- if (unlikely(nb_pkts == 0))
- return 0;
-
- PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
- nb_used = VIRTQUEUE_NUSED(txvq);
-
- rte_compiler_barrier(); /* rmb */
-
- num = (uint16_t)(likely(nb_used <= VIRTIO_MBUF_BURST_SZ) ? nb_used : VIRTIO_MBUF_BURST_SZ);
- num = virtqueue_dequeue_burst(txvq, snd_pkts, len, num);
-
- for (i = 0; i < num ; i ++) {
- /* mergable not supported, one segment only */
- rte_pktmbuf_free_seg(snd_pkts[i]);
- }
-
- while (nb_tx < nb_pkts) {
- if (likely(!virtqueue_full(txvq))) {
- /* TODO drop tx_pkts if it contains multiple segments */
- txm = tx_pkts[nb_tx];
- error = virtqueue_enqueue_xmit(txvq, txm);
- if (unlikely(error)) {
- if (error == ENOSPC)
- PMD_TX_LOG(ERR, "virtqueue_enqueue Free count = 0\n");
- else if (error == EMSGSIZE)
- PMD_TX_LOG(ERR, "virtqueue_enqueue Free count < 1\n");
- else
- PMD_TX_LOG(ERR, "virtqueue_enqueue error: %d\n", error);
- break;
- }
- nb_tx++;
- } else {
- PMD_TX_LOG(ERR, "No free tx descriptors to transmit\n");
- /* virtqueue_notify not needed in our para-virt solution */
- break;
- }
- }
- pi->eth_stats.opackets += nb_tx;
- return nb_tx;
-}
-
-static int
-eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
-{
- RTE_LOG(ERR, PMD, "%s\n", __func__);
- return 0;
-}
-
-/*
- * Create a shared page between guest and host.
- * Host monitors this page if it is cleared on unmap, and then
- * do necessary clean up.
- */
-static void
-gntalloc_vring_flag(int vtidx)
-{
- char key_str[PATH_MAX];
- char val_str[PATH_MAX];
- uint32_t gref_tmp;
- void *ptr;
-
- if (grefwatch_from_alloc(&gref_tmp, &ptr)) {
- RTE_LOG(ERR, PMD, "grefwatch_from_alloc error\n");
- exit(0);
- }
-
- *(uint8_t *)ptr = MAP_FLAG;
- snprintf(val_str, sizeof(val_str), "%u", gref_tmp);
- snprintf(key_str, sizeof(key_str),
- DPDK_XENSTORE_PATH"%d"VRING_FLAG_STR, vtidx);
- xenstore_write(key_str, val_str);
-}
-
-/*
- * Notify host this virtio device is started.
- * Host could start polling this device.
- */
-static void
-dev_start_notify(int vtidx)
-{
- char key_str[PATH_MAX];
- char val_str[PATH_MAX];
-
- RTE_LOG(INFO, PMD, "%s: virtio %d is started\n", __func__, vtidx);
- gntalloc_vring_flag(vtidx);
-
- snprintf(key_str, sizeof(key_str), "%s%s%d",
- DPDK_XENSTORE_PATH, EVENT_TYPE_START_STR,
- vtidx);
- snprintf(val_str, sizeof(val_str), "1");
- xenstore_write(key_str, val_str);
-}
-
-/*
- * Notify host this virtio device is stopped.
- * Host could stop polling this device.
- */
-static void
-dev_stop_notify(int vtidx)
-{
- RTE_SET_USED(vtidx);
-}
-
-
-static int
-update_mac_address(struct ether_addr *mac_addrs, int vtidx)
-{
- char key_str[PATH_MAX];
- char val_str[PATH_MAX];
- int rv;
-
- if (mac_addrs == NULL) {
- RTE_LOG(ERR, PMD, "%s: NULL pointer mac specified\n", __func__);
- return -1;
- }
- rv = snprintf(key_str, sizeof(key_str),
- DPDK_XENSTORE_PATH"%d_ether_addr", vtidx);
- if (rv == -1)
- return rv;
- rv = snprintf(val_str, sizeof(val_str), "%02x:%02x:%02x:%02x:%02x:%02x",
- mac_addrs->addr_bytes[0],
- mac_addrs->addr_bytes[1],
- mac_addrs->addr_bytes[2],
- mac_addrs->addr_bytes[3],
- mac_addrs->addr_bytes[4],
- mac_addrs->addr_bytes[5]);
- if (rv == -1)
- return rv;
- if (xenstore_write(key_str, val_str))
- return rv;
- return 0;
-}
-
-
-static int
-eth_dev_start(struct rte_eth_dev *dev)
-{
- struct virtqueue *rxvq = dev->data->rx_queues[0];
- struct virtqueue *txvq = dev->data->tx_queues[0];
- struct rte_mbuf *m;
- struct pmd_internals *pi = (struct pmd_internals *)dev->data->dev_private;
- int rv;
-
- dev->data->dev_link.link_status = 1;
- while (!virtqueue_full(rxvq)) {
- m = rte_rxmbuf_alloc(rxvq->mpool);
- if (m == NULL)
- break;
- /* Enqueue allocated buffers. */
- if (virtqueue_enqueue_recv_refill(rxvq, m)) {
- rte_pktmbuf_free_seg(m);
- break;
- }
- }
-
- rxvq->internals = pi;
- txvq->internals = pi;
-
- rv = update_mac_address(dev->data->mac_addrs, pi->virtio_idx);
- if (rv)
- return -1;
- dev_start_notify(pi->virtio_idx);
-
- return 0;
-}
-
-static void
-eth_dev_stop(struct rte_eth_dev *dev)
-{
- struct pmd_internals *pi = (struct pmd_internals *)dev->data->dev_private;
-
- dev->data->dev_link.link_status = 0;
- dev_stop_notify(pi->virtio_idx);
-}
-
-/*
- * Notify host this virtio device is closed.
- * Host could do necessary clean up to this device.
- */
-static void
-eth_dev_close(struct rte_eth_dev *dev)
-{
- RTE_SET_USED(dev);
-}
-
-static void
-eth_dev_info(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info)
-{
- struct pmd_internals *internals = dev->data->dev_private;
-
- RTE_SET_USED(internals);
- dev_info->driver_name = drivername;
- dev_info->max_mac_addrs = 1;
- dev_info->max_rx_pktlen = (uint32_t)2048;
- dev_info->max_rx_queues = (uint16_t)1;
- dev_info->max_tx_queues = (uint16_t)1;
- dev_info->min_rx_bufsize = 0;
- dev_info->pci_dev = NULL;
-}
-
-static void
-eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
-{
- struct pmd_internals *internals = dev->data->dev_private;
- if(stats)
- rte_memcpy(stats, &internals->eth_stats, sizeof(*stats));
-}
-
-static void
-eth_stats_reset(struct rte_eth_dev *dev)
-{
- struct pmd_internals *internals = dev->data->dev_private;
- /* Reset software totals */
- memset(&internals->eth_stats, 0, sizeof(internals->eth_stats));
-}
-
-static void
-eth_queue_release(void *q __rte_unused)
-{
-}
-
-static int
-eth_link_update(struct rte_eth_dev *dev __rte_unused,
- int wait_to_complete __rte_unused)
-{
- return 0;
-}
-
-/*
- * Create shared vring between guest and host.
- * Memory is allocated through grant alloc driver, so it is not physical continuous.
- */
-static void *
-gntalloc_vring_create(int queue_type, uint32_t size, int vtidx)
-{
- char key_str[PATH_MAX] = {0};
- char val_str[PATH_MAX] = {0};
- void *va = NULL;
- int pg_size;
- uint32_t pg_num;
- uint32_t *gref_arr = NULL;
- phys_addr_t *pa_arr = NULL;
- uint64_t start_index;
- int rv;
-
- pg_size = getpagesize();
- size = RTE_ALIGN_CEIL(size, pg_size);
- pg_num = size / pg_size;
-
- gref_arr = calloc(pg_num, sizeof(gref_arr[0]));
- pa_arr = calloc(pg_num, sizeof(pa_arr[0]));
-
- if (gref_arr == NULL || pa_arr == NULL) {
- RTE_LOG(ERR, PMD, "%s: calloc failed\n", __func__);
- goto out;
- }
-
- va = gntalloc(size, gref_arr, &start_index);
- if (va == NULL) {
- RTE_LOG(ERR, PMD, "%s: gntalloc failed\n", __func__);
- goto out;
- }
-
- if (get_phys_map(va, pa_arr, pg_num, pg_size))
- goto out;
-
- /* write in xenstore gref and pfn for each page of vring */
- if (grant_node_create(pg_num, gref_arr, pa_arr, val_str, sizeof(val_str))) {
- gntfree(va, size, start_index);
- va = NULL;
- goto out;
- }
-
- if (queue_type == VTNET_RQ)
- rv = snprintf(key_str, sizeof(key_str), DPDK_XENSTORE_PATH"%d"RXVRING_XENSTORE_STR, vtidx);
- else
- rv = snprintf(key_str, sizeof(key_str), DPDK_XENSTORE_PATH"%d"TXVRING_XENSTORE_STR, vtidx);
- if (rv == -1 || xenstore_write(key_str, val_str) == -1) {
- gntfree(va, size, start_index);
- va = NULL;
- }
-out:
- if (pa_arr)
- free(pa_arr);
- if (gref_arr)
- free(gref_arr);
-
- return va;
-}
-
-
-
-static struct virtqueue *
-virtio_queue_setup(struct rte_eth_dev *dev, int queue_type)
-{
- struct virtqueue *vq = NULL;
- uint16_t vq_size = VQ_DESC_NUM;
- int i = 0;
- char vq_name[VIRTQUEUE_MAX_NAME_SZ];
- size_t size;
- struct vring *vr;
-
- /* Allocate memory for virtqueue. */
- if (queue_type == VTNET_RQ) {
- snprintf(vq_name, sizeof(vq_name), "port%d_rvq",
- dev->data->port_id);
- vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
- vq_size * sizeof(struct vq_desc_extra), RTE_CACHE_LINE_SIZE);
- if (vq == NULL) {
- RTE_LOG(ERR, PMD, "%s: unabled to allocate virtqueue\n", __func__);
- return NULL;
- }
- memcpy(vq->vq_name, vq_name, sizeof(vq->vq_name));
- } else if(queue_type == VTNET_TQ) {
- snprintf(vq_name, sizeof(vq_name), "port%d_tvq",
- dev->data->port_id);
- vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
- vq_size * sizeof(struct vq_desc_extra), RTE_CACHE_LINE_SIZE);
- if (vq == NULL) {
- RTE_LOG(ERR, PMD, "%s: unabled to allocate virtqueue\n", __func__);
- return NULL;
- }
- memcpy(vq->vq_name, vq_name, sizeof(vq->vq_name));
- }
-
- memcpy(vq->vq_name, vq_name, sizeof(vq->vq_name));
-
- vq->vq_alignment = VIRTIO_PCI_VRING_ALIGN;
- vq->vq_nentries = vq_size;
- vq->vq_free_cnt = vq_size;
- /* Calcuate vring size according to virtio spec */
- size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
- vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
- /* Allocate memory for virtio vring through gntalloc driver*/
- vq->vq_ring_virt_mem = gntalloc_vring_create(queue_type, vq->vq_ring_size,
- ((struct pmd_internals *)dev->data->dev_private)->virtio_idx);
- memset(vq->vq_ring_virt_mem, 0, vq->vq_ring_size);
- vr = &vq->vq_ring;
- vring_init(vr, vq_size, vq->vq_ring_virt_mem, vq->vq_alignment);
- /*
- * Locally maintained last consumed index, this idex trails
- * vq_ring.used->idx.
- */
- vq->vq_used_cons_idx = 0;
- vq->vq_desc_head_idx = 0;
- vq->vq_free_cnt = vq->vq_nentries;
- memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
-
- /* Chain all the descriptors in the ring with an END */
- for (i = 0; i < vq_size - 1; i++)
- vr->desc[i].next = (uint16_t)(i + 1);
- vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
-
- return vq;
-}
-
-static int
-eth_rx_queue_setup(struct rte_eth_dev *dev,uint16_t rx_queue_id,
- uint16_t nb_rx_desc __rte_unused,
- unsigned int socket_id __rte_unused,
- const struct rte_eth_rxconf *rx_conf __rte_unused,
- struct rte_mempool *mb_pool)
-{
- struct virtqueue *vq;
- vq = dev->data->rx_queues[rx_queue_id] = virtio_queue_setup(dev, VTNET_RQ);
- vq->mpool = mb_pool;
- return 0;
-}
-
-static int
-eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
- uint16_t nb_tx_desc __rte_unused,
- unsigned int socket_id __rte_unused,
- const struct rte_eth_txconf *tx_conf __rte_unused)
-{
- dev->data->tx_queues[tx_queue_id] = virtio_queue_setup(dev, VTNET_TQ);
- return 0;
-}
-
-
-
-static struct eth_dev_ops ops = {
- .dev_start = eth_dev_start,
- .dev_stop = eth_dev_stop,
- .dev_close = eth_dev_close,
- .dev_configure = eth_dev_configure,
- .dev_infos_get = eth_dev_info,
- .rx_queue_setup = eth_rx_queue_setup,
- .tx_queue_setup = eth_tx_queue_setup,
- .rx_queue_release = eth_queue_release,
- .tx_queue_release = eth_queue_release,
- .link_update = eth_link_update,
- .stats_get = eth_stats_get,
- .stats_reset = eth_stats_reset,
-};
-
-
-static int
-rte_eth_xenvirt_parse_args(struct xenvirt_dict *dict,
- const char *name, const char *params)
-{
- int i;
- char *pairs[RTE_ETH_XENVIRT_MAX_ARGS];
- int num_of_pairs;
- char *pair[2];
- char *args;
- int ret = -1;
-
- if (params == NULL)
- return 0;
-
- args = rte_zmalloc(NULL, strlen(params) + 1, RTE_CACHE_LINE_SIZE);
- if (args == NULL) {
- RTE_LOG(ERR, PMD, "Couldn't parse %s device \n", name);
- return -1;
- }
- rte_memcpy(args, params, strlen(params));
-
- num_of_pairs = rte_strsplit(args, strnlen(args, MAX_ARG_STRLEN),
- pairs,
- RTE_ETH_XENVIRT_MAX_ARGS ,
- RTE_ETH_XENVIRT_PAIRS_DELIM);
-
- for (i = 0; i < num_of_pairs; i++) {
- pair[0] = NULL;
- pair[1] = NULL;
- rte_strsplit(pairs[i], strnlen(pairs[i], MAX_ARG_STRLEN),
- pair, 2,
- RTE_ETH_XENVIRT_KEY_VALUE_DELIM);
-
- if (pair[0] == NULL || pair[1] == NULL || pair[0][0] == 0
- || pair[1][0] == 0) {
- RTE_LOG(ERR, PMD,
- "Couldn't parse %s device,"
- "wrong key or value \n", name);
- goto err;
- }
-
- if (!strncmp(pair[0], RTE_ETH_XENVIRT_MAC_PARAM,
- sizeof(RTE_ETH_XENVIRT_MAC_PARAM))) {
- if (cmdline_parse_etheraddr(NULL,
- pair[1],
- &dict->addr,
- sizeof(dict->addr)) < 0) {
- RTE_LOG(ERR, PMD,
- "Invalid %s device ether address\n",
- name);
- goto err;
- }
-
- dict->addr_valid = 1;
- }
- }
-
- ret = 0;
-err:
- rte_free(args);
- return ret;
-}
-
-enum dev_action {
- DEV_CREATE,
- DEV_ATTACH
-};
-
-
-static int
-eth_dev_xenvirt_create(const char *name, const char *params,
- const unsigned numa_node,
- enum dev_action action)
-{
- struct rte_eth_dev_data *data = NULL;
- struct rte_pci_device *pci_dev = NULL;
- struct pmd_internals *internals = NULL;
- struct rte_eth_dev *eth_dev = NULL;
- struct xenvirt_dict dict;
- bzero(&dict, sizeof(struct xenvirt_dict));
-
- RTE_LOG(INFO, PMD, "Creating virtio rings backed ethdev on numa socket %u\n",
- numa_node);
- RTE_SET_USED(action);
-
- if (rte_eth_xenvirt_parse_args(&dict, name, params) < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to parse ethdev parameters\n", __func__);
- return -1;
- }
-
- /* now do all data allocation - for eth_dev structure, dummy pci driver
- * and internal (private) data
- */
- data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
- if (data == NULL)
- goto err;
-
- pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, numa_node);
- if (pci_dev == NULL)
- goto err;
-
- internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
- if (internals == NULL)
- goto err;
-
- /* reserve an ethdev entry */
- eth_dev = rte_eth_dev_allocate(name);
- if (eth_dev == NULL)
- goto err;
-
- pci_dev->numa_node = numa_node;
-
- data->dev_private = internals;
- data->port_id = eth_dev->data->port_id;
- data->nb_rx_queues = (uint16_t)1;
- data->nb_tx_queues = (uint16_t)1;
- data->dev_link = pmd_link;
- data->mac_addrs = rte_zmalloc("xen_virtio", ETHER_ADDR_LEN, 0);
-
- if(dict.addr_valid)
- memcpy(&data->mac_addrs->addr_bytes, &dict.addr, sizeof(struct ether_addr));
- else
- eth_random_addr(&data->mac_addrs->addr_bytes[0]);
-
- eth_dev->data = data;
- eth_dev->dev_ops = &ops;
- eth_dev->pci_dev = pci_dev;
-
- eth_dev->rx_pkt_burst = eth_xenvirt_rx;
- eth_dev->tx_pkt_burst = eth_xenvirt_tx;
-
- internals->virtio_idx = virtio_idx++;
- internals->port_id = eth_dev->data->port_id;
-
- return 0;
-
-err:
- if (data)
- rte_free(data);
- if (pci_dev)
- rte_free(pci_dev);
- if (internals)
- rte_free(internals);
- return -1;
-}
-
-
-/*TODO: Support multiple process model */
-static int
-rte_pmd_xenvirt_devinit(const char *name, const char *params)
-{
- if (virtio_idx == 0) {
- if (xenstore_init() != 0) {
- RTE_LOG(ERR, PMD, "%s: xenstore init failed\n", __func__);
- return -1;
- }
- if (gntalloc_open() != 0) {
- RTE_LOG(ERR, PMD, "%s: grant init failed\n", __func__);
- return -1;
- }
- }
- eth_dev_xenvirt_create(name, params, rte_socket_id(), DEV_CREATE);
- return 0;
-}
-
-static struct rte_driver pmd_xenvirt_drv = {
- .name = "eth_xenvirt",
- .type = PMD_VDEV,
- .init = rte_pmd_xenvirt_devinit,
-};
-
-PMD_REGISTER_DRIVER(pmd_xenvirt_drv);
diff --git a/src/dpdk_lib18/librte_pmd_xenvirt/rte_mempool_gntalloc.c b/src/dpdk_lib18/librte_pmd_xenvirt/rte_mempool_gntalloc.c
deleted file mode 100755
index 3a650e8d..00000000
--- a/src/dpdk_lib18/librte_pmd_xenvirt/rte_mempool_gntalloc.c
+++ /dev/null
@@ -1,298 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdint.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <sys/mman.h>
-#include <sys/ioctl.h>
-#include <string.h>
-#include <xen/sys/gntalloc.h>
-
-#include <rte_common.h>
-#include <rte_mempool.h>
-#include <rte_memory.h>
-#include <rte_errno.h>
-
-#include "rte_xen_lib.h"
-#include "rte_eth_xenvirt.h"
-
-struct _gntarr {
- uint32_t gref;
- phys_addr_t pa;
- uint64_t index;
- void *va;
-};
-
-struct _mempool_gntalloc_info {
- struct rte_mempool *mp;
- uint32_t pg_num;
- uint32_t *gref_arr;
- phys_addr_t *pa_arr;
- void *va;
- uint32_t mempool_idx;
- uint64_t start_index;
-};
-
-
-static rte_atomic32_t global_xenvirt_mempool_idx = RTE_ATOMIC32_INIT(-1);
-
-static int
-compare(const void *p1, const void *p2)
-{
- return ((const struct _gntarr *)p1)->pa - ((const struct _gntarr *)p2)->pa;
-}
-
-
-static struct _mempool_gntalloc_info
-_create_mempool(const char *name, unsigned elt_num, unsigned elt_size,
- unsigned cache_size, unsigned private_data_size,
- rte_mempool_ctor_t *mp_init, void *mp_init_arg,
- rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
- int socket_id, unsigned flags)
-{
- struct _mempool_gntalloc_info mgi;
- struct rte_mempool *mp = NULL;
- struct rte_mempool_objsz objsz;
- uint32_t pg_num, rpg_num, pg_shift, pg_sz;
- char *va, *orig_va, *uv; /* uv: from which, the pages could be freed */
- ssize_t sz, usz; /* usz: unused size */
- /*
- * for each page allocated through xen_gntalloc driver,
- * gref_arr:stores grant references,
- * pa_arr: stores physical address,
- * gnt_arr: stores all meta dat
- */
- uint32_t *gref_arr = NULL;
- phys_addr_t *pa_arr = NULL;
- struct _gntarr *gnt_arr = NULL;
- /* start index of the grant referances, used for dealloc*/
- uint64_t start_index;
- uint32_t i, j;
- int rv = 0;
- struct ioctl_gntalloc_dealloc_gref arg;
-
- mgi.mp = NULL;
- va = orig_va = uv = NULL;
- pg_num = rpg_num = 0;
- sz = 0;
-
- pg_sz = getpagesize();
- if (rte_is_power_of_2(pg_sz) == 0) {
- goto out;
- }
- pg_shift = rte_bsf32(pg_sz);
-
- rte_mempool_calc_obj_size(elt_size, flags, &objsz);
- sz = rte_mempool_xmem_size(elt_num, objsz.total_size, pg_shift);
- pg_num = sz >> pg_shift;
-
- pa_arr = calloc(pg_num, sizeof(pa_arr[0]));
- gref_arr = calloc(pg_num, sizeof(gref_arr[0]));
- gnt_arr = calloc(pg_num, sizeof(gnt_arr[0]));
- if ((gnt_arr == NULL) || (gref_arr == NULL) || (pa_arr == NULL))
- goto out;
-
- /* grant index is continuous in ascending order */
- orig_va = gntalloc(sz, gref_arr, &start_index);
- if (orig_va == NULL)
- goto out;
-
- get_phys_map(orig_va, pa_arr, pg_num, pg_sz);
- for (i = 0; i < pg_num; i++) {
- gnt_arr[i].index = start_index + i * pg_sz;
- gnt_arr[i].gref = gref_arr[i];
- gnt_arr[i].pa = pa_arr[i];
- gnt_arr[i].va = RTE_PTR_ADD(orig_va, i * pg_sz);
- }
- qsort(gnt_arr, pg_num, sizeof(struct _gntarr), compare);
-
- va = get_xen_virtual(sz, pg_sz);
- if (va == NULL) {
- goto out;
- }
-
- /*
- * map one by one, as index isn't continuous now.
- * pg_num VMAs, doesn't linux has a limitation on this?
- */
- for (i = 0; i < pg_num; i++) {
- /* update gref_arr and pa_arr after sort */
- gref_arr[i] = gnt_arr[i].gref;
- pa_arr[i] = gnt_arr[i].pa;
- gnt_arr[i].va = mmap(va + i * pg_sz, pg_sz, PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_FIXED, gntalloc_fd, gnt_arr[i].index);
- if ((gnt_arr[i].va == MAP_FAILED) || (gnt_arr[i].va != (va + i * pg_sz))) {
- RTE_LOG(ERR, PMD, "failed to map %d pages\n", i);
- goto mmap_failed;
- }
- }
-
- /*
- * Check that allocated size is big enough to hold elt_num
- * objects and a calcualte how many bytes are actually required.
- */
- usz = rte_mempool_xmem_usage(va, elt_num, objsz.total_size, pa_arr, pg_num, pg_shift);
- if (usz < 0) {
- mp = NULL;
- i = pg_num;
- goto mmap_failed;
- } else {
- /* unmap unused pages if any */
- uv = RTE_PTR_ADD(va, usz);
- if ((usz = va + sz - uv) > 0) {
-
- RTE_LOG(ERR, PMD,
- "%s(%s): unmap unused %zu of %zu "
- "mmaped bytes @%p orig:%p\n",
- __func__, name, usz, sz, uv, va);
- munmap(uv, usz);
- i = (sz - usz) / pg_sz;
- for (; i < pg_num; i++) {
- arg.count = 1;
- arg.index = gnt_arr[i].index;
- rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, &arg);
- if (rv) {
- /* shouldn't fail here */
- RTE_LOG(ERR, PMD, "va=%p pa=%p index=%p %s\n",
- gnt_arr[i].va,
- (void *)gnt_arr[i].pa,
- (void *)arg.index, strerror(errno));
- rte_panic("gntdealloc failed when freeing pages\n");
- }
- }
-
- rpg_num = (sz - usz) >> pg_shift;
- } else
- rpg_num = pg_num;
-
- mp = rte_mempool_xmem_create(name, elt_num, elt_size,
- cache_size, private_data_size,
- mp_init, mp_init_arg,
- obj_init, obj_init_arg,
- socket_id, flags, va, pa_arr, rpg_num, pg_shift);
-
- RTE_VERIFY(elt_num == mp->size);
- }
- mgi.mp = mp;
- mgi.pg_num = rpg_num;
- mgi.gref_arr = gref_arr;
- mgi.pa_arr = pa_arr;
- if (mp)
- mgi.mempool_idx = rte_atomic32_add_return(&global_xenvirt_mempool_idx, 1);
- mgi.start_index = start_index;
- mgi.va = va;
-
- if (mp == NULL) {
- i = pg_num;
- goto mmap_failed;
- }
-
-/*
- * unmap only, without deallocate grant reference.
- * unused pages have already been unmaped,
- * unmap twice will fail, but it is safe.
- */
-mmap_failed:
- for (j = 0; j < i; j++) {
- if (gnt_arr[i].va)
- munmap(gnt_arr[i].va, pg_sz);
- }
-out:
- if (gnt_arr)
- free(gnt_arr);
- if (orig_va)
- munmap(orig_va, sz);
- if (mp == NULL) {
- if (gref_arr)
- free(gref_arr);
- if (pa_arr)
- free(pa_arr);
-
- /* some gref has already been de-allocated from the list in the driver,
- * so dealloc one by one, and it is safe to deallocate twice
- */
- if (orig_va) {
- for (i = 0; i < pg_num; i++) {
- arg.index = start_index + i * pg_sz;
- rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, arg);
- }
- }
- }
- return mgi;
-}
-
-struct rte_mempool *
-rte_mempool_gntalloc_create(const char *name, unsigned elt_num, unsigned elt_size,
- unsigned cache_size, unsigned private_data_size,
- rte_mempool_ctor_t *mp_init, void *mp_init_arg,
- rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
- int socket_id, unsigned flags)
-{
- int rv;
- uint32_t i;
- struct _mempool_gntalloc_info mgi;
- struct ioctl_gntalloc_dealloc_gref arg;
- int pg_sz = getpagesize();
-
- mgi = _create_mempool(name, elt_num, elt_size,
- cache_size, private_data_size,
- mp_init, mp_init_arg,
- obj_init, obj_init_arg,
- socket_id, flags);
- if (mgi.mp) {
- rv = grant_gntalloc_mbuf_pool(mgi.mp,
- mgi.pg_num,
- mgi.gref_arr,
- mgi.pa_arr,
- mgi.mempool_idx);
- free(mgi.gref_arr);
- free(mgi.pa_arr);
- if (rv == 0)
- return mgi.mp;
- /*
- * in _create_mempool, unused pages have already been unmapped, deallocagted
- * unmap and dealloc the remained ones here.
- */
- munmap(mgi.va, pg_sz * mgi.pg_num);
- for (i = 0; i < mgi.pg_num; i++) {
- arg.index = mgi.start_index + i * pg_sz;
- rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, arg);
- }
- return NULL;
- }
- return NULL;
-
-
-
-}
diff --git a/src/dpdk_lib18/librte_pmd_xenvirt/rte_xen_lib.c b/src/dpdk_lib18/librte_pmd_xenvirt/rte_xen_lib.c
deleted file mode 100755
index b3932f0e..00000000
--- a/src/dpdk_lib18/librte_pmd_xenvirt/rte_xen_lib.c
+++ /dev/null
@@ -1,428 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <string.h>
-#include <sys/types.h>
-#include <fcntl.h>
-#include <sys/mman.h>
-#include <sys/ioctl.h>
-#include <xen/xen-compat.h>
-#if __XEN_LATEST_INTERFACE_VERSION__ < 0x00040200
-#include <xs.h>
-#else
-#include <xenstore.h>
-#endif
-#include <xen/sys/gntalloc.h>
-
-#include <rte_common.h>
-#include <rte_string_fns.h>
-
-#include "rte_xen_lib.h"
-
-/*
- * The grant node format in xenstore for vring/mpool is:
- * 0_rx_vring_gref = "gref1#, gref2#, gref3#"
- * 0_mempool_gref = "gref1#, gref2#, gref3#"
- * each gref# is a grant reference for a shared page.
- * In each shared page, we store the grant_node_item items.
- */
-struct grant_node_item {
- uint32_t gref;
- uint32_t pfn;
-} __attribute__((packed));
-
-/* fd for xen_gntalloc driver, used to allocate grant pages*/
-int gntalloc_fd = -1;
-
-/* xenstore path for local domain, now it is '/local/domain/domid/' */
-static char *dompath = NULL;
-/* handle to xenstore read/write operations */
-static struct xs_handle *xs = NULL;
-
-/*
- * Reserve a virtual address space.
- * On success, returns the pointer. On failure, returns NULL.
- */
-void *
-get_xen_virtual(size_t size, size_t page_sz)
-{
- void *addr;
- uintptr_t aligned_addr;
-
- addr = mmap(NULL, size + page_sz, PROT_READ, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
- if (addr == MAP_FAILED) {
- RTE_LOG(ERR, PMD, "failed get a virtual area\n");
- return NULL;
- }
-
- aligned_addr = RTE_ALIGN_CEIL((uintptr_t)addr, page_sz);
- addr = (void *)(aligned_addr);
-
- return addr;
-}
-
-/*
- * Get the physical address for virtual memory starting at va.
- */
-int
-get_phys_map(void *va, phys_addr_t pa[], uint32_t pg_num, uint32_t pg_sz)
-{
- int32_t fd, rc = 0;
- uint32_t i, nb;
- off_t ofs;
-
- ofs = (uintptr_t)va / pg_sz * sizeof(*pa);
- nb = pg_num * sizeof(*pa);
-
- if ((fd = open(PAGEMAP_FNAME, O_RDONLY)) < 0 ||
- (rc = pread(fd, pa, nb, ofs)) < 0 ||
- (rc -= nb) != 0) {
- RTE_LOG(ERR, PMD, "%s: failed read of %u bytes from \'%s\' "
- "at offset %zu, error code: %d\n",
- __func__, nb, PAGEMAP_FNAME, ofs, errno);
- rc = ENOENT;
- }
-
- close(fd);
- for (i = 0; i != pg_num; i++)
- pa[i] = (pa[i] & PAGEMAP_PFN_MASK) * pg_sz;
-
- return rc;
-}
-
-int
-gntalloc_open(void)
-{
- gntalloc_fd = open(XEN_GNTALLOC_FNAME, O_RDWR);
- return (gntalloc_fd != -1) ? 0 : -1;
-}
-
-void
-gntalloc_close(void)
-{
- if (gntalloc_fd != -1)
- close(gntalloc_fd);
- gntalloc_fd = -1;
-}
-
-void *
-gntalloc(size_t size, uint32_t *gref, uint64_t *start_index)
-{
- int page_size = getpagesize();
- uint32_t i, pg_num;
- void *va;
- int rv;
- struct ioctl_gntalloc_alloc_gref *arg;
- struct ioctl_gntalloc_dealloc_gref arg_d;
-
- if (size % page_size) {
- RTE_LOG(ERR, PMD, "%s: %zu isn't multiple of page size\n",
- __func__, size);
- return NULL;
- }
-
- pg_num = size / page_size;
- arg = malloc(sizeof(*arg) + (pg_num - 1) * sizeof(uint32_t));
- if (arg == NULL)
- return NULL;
- arg->domid = DOM0_DOMID;
- arg->flags = GNTALLOC_FLAG_WRITABLE;
- arg->count = pg_num;
-
- rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_ALLOC_GREF, arg);
- if (rv) {
- RTE_LOG(ERR, PMD, "%s: ioctl error\n", __func__);
- free(arg);
- return NULL;
- }
-
- va = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, gntalloc_fd, arg->index);
- if (va == MAP_FAILED) {
- RTE_LOG(ERR, PMD, "%s: mmap failed\n", __func__);
- arg_d.count = pg_num;
- arg_d.index = arg->index;
- ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, arg_d);
- free(arg);
- return NULL;
- }
-
- if (gref) {
- for (i = 0; i < pg_num; i++) {
- gref[i] = arg->gref_ids[i];
- }
- }
- if (start_index)
- *start_index = arg->index;
-
- free(arg);
-
- return va;
-}
-
-int
-grefwatch_from_alloc(uint32_t *gref, void **pptr)
-{
- int rv;
- void *ptr;
- int pg_size = getpagesize();
- struct ioctl_gntalloc_alloc_gref arg = {
- .domid = DOM0_DOMID,
- .flags = GNTALLOC_FLAG_WRITABLE,
- .count = 1
- };
- struct ioctl_gntalloc_dealloc_gref arg_d;
- struct ioctl_gntalloc_unmap_notify notify = {
- .action = UNMAP_NOTIFY_CLEAR_BYTE
- };
-
- rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_ALLOC_GREF, &arg);
- if (rv) {
- RTE_LOG(ERR, PMD, "%s: ioctl error\n", __func__);
- return -1;
- }
-
- ptr = (void *)mmap(NULL, pg_size, PROT_READ|PROT_WRITE, MAP_SHARED, gntalloc_fd, arg.index);
- arg_d.index = arg.index;
- arg_d.count = 1;
- if (ptr == MAP_FAILED) {
- RTE_LOG(ERR, PMD, "%s: mmap failed\n", __func__);
- ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, &arg_d);
- return -1;
- }
- if (pptr)
- *pptr = ptr;
- if (gref)
- *gref = arg.gref_ids[0];
-
- notify.index = arg.index;
- rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_SET_UNMAP_NOTIFY, &notify);
- if (rv) {
- RTE_LOG(ERR, PMD, "%s: unmap notify failed\n", __func__);
- munmap(ptr, pg_size);
- ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, &arg_d);
- return -1;
- }
-
- return 0;
-}
-
-void
-gntfree(void *va, size_t sz, uint64_t start_index)
-{
- struct ioctl_gntalloc_dealloc_gref arg_d;
-
- if (va && sz) {
- munmap(va, sz);
- arg_d.count = sz / getpagesize();
- arg_d.index = start_index;
- ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, &arg_d);
- }
-}
-
-static int
-xenstore_cleanup(void)
-{
- char store_path[PATH_MAX] = {0};
-
- if (snprintf(store_path, sizeof(store_path),
- "%s%s", dompath, DPDK_XENSTORE_NODE) == -1)
- return -1;
-
- if (xs_rm(xs, XBT_NULL, store_path) == false) {
- RTE_LOG(ERR, PMD, "%s: failed cleanup node\n", __func__);
- return -1;
- }
-
- return 0;
-}
-
-int
-xenstore_init(void)
-{
- unsigned int len, domid;
- char *buf;
- static int cleanup = 0;
- char *end;
-
- xs = xs_domain_open();
- if (xs == NULL) {
- RTE_LOG(ERR, PMD,"%s: xs_domain_open failed\n", __func__);
- return -1;
- }
- buf = xs_read(xs, XBT_NULL, "domid", &len);
- if (buf == NULL) {
- RTE_LOG(ERR, PMD, "%s: failed read domid\n", __func__);
- return -1;
- }
- errno = 0;
- domid = strtoul(buf, &end, 0);
- if (errno != 0 || end == NULL || end == buf || domid == 0)
- return -1;
-
- RTE_LOG(INFO, PMD, "retrieved dom ID = %d\n", domid);
-
- dompath = xs_get_domain_path(xs, domid);
- if (dompath == NULL)
- return -1;
-
- xs_transaction_start(xs); /* When to stop transaction */
-
- if (cleanup == 0) {
- if (xenstore_cleanup())
- return -1;
- cleanup = 1;
- }
-
- return 0;
-}
-
-int
-xenstore_write(const char *key_str, const char *val_str)
-{
- char grant_path[PATH_MAX];
- int rv, len;
-
- if (xs == NULL) {
- RTE_LOG(ERR, PMD, "%s: xenstore init failed\n", __func__);
- return -1;
- }
- rv = snprintf(grant_path, sizeof(grant_path), "%s%s", dompath, key_str);
- if (rv == -1) {
- RTE_LOG(ERR, PMD, "%s: snprintf %s %s failed\n",
- __func__, dompath, key_str);
- return -1;
- }
- len = strnlen(val_str, PATH_MAX);
-
- if (xs_write(xs, XBT_NULL, grant_path, val_str, len) == false) {
- RTE_LOG(ERR, PMD, "%s: xs_write failed\n", __func__);
- return -1;
- }
-
- return 0;
-}
-
-int
-grant_node_create(uint32_t pg_num, uint32_t *gref_arr, phys_addr_t *pa_arr, char *val_str, size_t str_size)
-{
- uint64_t start_index;
- int pg_size;
- uint32_t pg_shift;
- void *ptr = NULL;
- uint32_t count, entries_per_pg;
- uint32_t i, j = 0, k = 0;;
- uint32_t *gref_tmp;
- int first = 1;
- char tmp_str[PATH_MAX] = {0};
- int rv = -1;
-
- pg_size = getpagesize();
- if (rte_is_power_of_2(pg_size) == 0) {
- return -1;
- }
- pg_shift = rte_bsf32(pg_size);
- if (pg_size % sizeof(struct grant_node_item)) {
- RTE_LOG(ERR, PMD, "pg_size isn't a multiple of grant node item\n");
- return -1;
- }
-
- entries_per_pg = pg_size / sizeof(struct grant_node_item);
- count = (pg_num + entries_per_pg - 1 ) / entries_per_pg;
- gref_tmp = malloc(count * sizeof(uint32_t));
- if (gref_tmp == NULL)
- return -1;
- ptr = gntalloc(pg_size * count, gref_tmp, &start_index);
- if (ptr == NULL) {
- RTE_LOG(ERR, PMD, "%s: gntalloc error of %d pages\n", __func__, count);
- free(gref_tmp);
- return -1;
- }
-
- while (j < pg_num) {
- if (first) {
- rv = snprintf(val_str, str_size, "%u", gref_tmp[k]);
- first = 0;
- } else {
- snprintf(tmp_str, PATH_MAX, "%s", val_str);
- rv = snprintf(val_str, str_size, "%s,%u", tmp_str, gref_tmp[k]);
- }
- k++;
- if (rv == -1)
- break;
-
- for (i = 0; i < entries_per_pg && j < pg_num ; i++) {
- ((struct grant_node_item *)ptr)->gref = gref_arr[j];
- ((struct grant_node_item *)ptr)->pfn = pa_arr[j] >> pg_shift;
- ptr = RTE_PTR_ADD(ptr, sizeof(struct grant_node_item));
- j++;
- }
- }
- if (rv == -1) {
- gntfree(ptr, pg_size * count, start_index);
- } else
- rv = 0;
- free(gref_tmp);
- return rv;
-}
-
-
-int
-grant_gntalloc_mbuf_pool(struct rte_mempool *mpool, uint32_t pg_num, uint32_t *gref_arr, phys_addr_t *pa_arr, int mempool_idx)
-{
- char key_str[PATH_MAX] = {0};
- char val_str[PATH_MAX] = {0};
-
- if (grant_node_create(pg_num, gref_arr, pa_arr, val_str, sizeof(val_str))) {
- return -1;
- }
-
- if (snprintf(key_str, sizeof(key_str),
- DPDK_XENSTORE_PATH"%d"MEMPOOL_XENSTORE_STR, mempool_idx) == -1)
- return -1;
- if (xenstore_write(key_str, val_str) == -1)
- return -1;
-
- if (snprintf(key_str, sizeof(key_str),
- DPDK_XENSTORE_PATH"%d"MEMPOOL_VA_XENSTORE_STR, mempool_idx) == -1)
- return -1;
- if (snprintf(val_str, sizeof(val_str), "%"PRIxPTR, (uintptr_t)mpool->elt_va_start) == -1)
- return -1;
- if (xenstore_write(key_str, val_str) == -1)
- return -1;
-
- return 0;
-}
diff --git a/src/dpdk_lib18/librte_port/Makefile b/src/dpdk_lib18/librte_port/Makefile
deleted file mode 100755
index 82b51929..00000000
--- a/src/dpdk_lib18/librte_port/Makefile
+++ /dev/null
@@ -1,77 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-#
-# library name
-#
-LIB = librte_port.a
-
-CFLAGS += -O3
-CFLAGS += $(WERROR_FLAGS)
-
-#
-# all source are stored in SRCS-y
-#
-SRCS-$(CONFIG_RTE_LIBRTE_PORT) += rte_port_ethdev.c
-SRCS-$(CONFIG_RTE_LIBRTE_PORT) += rte_port_ring.c
-ifeq ($(CONFIG_RTE_LIBRTE_IP_FRAG),y)
-ifeq ($(CONFIG_RTE_MBUF_REFCNT),y)
-SRCS-$(CONFIG_RTE_LIBRTE_PORT) += rte_port_frag.c
-endif
-SRCS-$(CONFIG_RTE_LIBRTE_PORT) += rte_port_ras.c
-endif
-SRCS-$(CONFIG_RTE_LIBRTE_PORT) += rte_port_sched.c
-SRCS-$(CONFIG_RTE_LIBRTE_PORT) += rte_port_source_sink.c
-
-# install includes
-SYMLINK-$(CONFIG_RTE_LIBRTE_PORT)-include += rte_port.h
-SYMLINK-$(CONFIG_RTE_LIBRTE_PORT)-include += rte_port_ethdev.h
-SYMLINK-$(CONFIG_RTE_LIBRTE_PORT)-include += rte_port_ring.h
-ifeq ($(CONFIG_RTE_LIBRTE_IP_FRAG),y)
-ifeq ($(CONFIG_RTE_MBUF_REFCNT),y)
-SYMLINK-$(CONFIG_RTE_LIBRTE_PORT)-include += rte_port_frag.h
-endif
-SYMLINK-$(CONFIG_RTE_LIBRTE_PORT)-include += rte_port_ras.h
-endif
-SYMLINK-$(CONFIG_RTE_LIBRTE_PORT)-include += rte_port_sched.h
-SYMLINK-$(CONFIG_RTE_LIBRTE_PORT)-include += rte_port_source_sink.h
-
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PORT) := lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PORT) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PORT) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PORT) += lib/librte_malloc
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PORT) += lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PORT) += lib/librte_ip_frag
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_port/rte_port_ethdev.c b/src/dpdk_lib18/librte_port/rte_port_ethdev.c
deleted file mode 100755
index d0149133..00000000
--- a/src/dpdk_lib18/librte_port/rte_port_ethdev.c
+++ /dev/null
@@ -1,305 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#include <string.h>
-
-#include <rte_mbuf.h>
-#include <rte_ethdev.h>
-#include <rte_malloc.h>
-
-#include "rte_port_ethdev.h"
-
-/*
- * Port ETHDEV Reader
- */
-struct rte_port_ethdev_reader {
- uint16_t queue_id;
- uint8_t port_id;
-};
-
-static void *
-rte_port_ethdev_reader_create(void *params, int socket_id)
-{
- struct rte_port_ethdev_reader_params *conf =
- (struct rte_port_ethdev_reader_params *) params;
- struct rte_port_ethdev_reader *port;
-
- /* Check input parameters */
- if (conf == NULL) {
- RTE_LOG(ERR, PORT, "%s: params is NULL\n", __func__);
- return NULL;
- }
-
- /* Memory allocation */
- port = rte_zmalloc_socket("PORT", sizeof(*port),
- RTE_CACHE_LINE_SIZE, socket_id);
- if (port == NULL) {
- RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
- return NULL;
- }
-
- /* Initialization */
- port->port_id = conf->port_id;
- port->queue_id = conf->queue_id;
-
- return port;
-}
-
-static int
-rte_port_ethdev_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
-{
- struct rte_port_ethdev_reader *p =
- (struct rte_port_ethdev_reader *) port;
-
- return rte_eth_rx_burst(p->port_id, p->queue_id, pkts, n_pkts);
-}
-
-static int
-rte_port_ethdev_reader_free(void *port)
-{
- if (port == NULL) {
- RTE_LOG(ERR, PORT, "%s: port is NULL\n", __func__);
- return -EINVAL;
- }
-
- rte_free(port);
-
- return 0;
-}
-
-/*
- * Port ETHDEV Writer
- */
-#define RTE_PORT_ETHDEV_WRITER_APPROACH 1
-
-struct rte_port_ethdev_writer {
- struct rte_mbuf *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX];
- uint32_t tx_burst_sz;
- uint16_t tx_buf_count;
- uint64_t bsz_mask;
- uint16_t queue_id;
- uint8_t port_id;
-};
-
-static void *
-rte_port_ethdev_writer_create(void *params, int socket_id)
-{
- struct rte_port_ethdev_writer_params *conf =
- (struct rte_port_ethdev_writer_params *) params;
- struct rte_port_ethdev_writer *port;
-
- /* Check input parameters */
- if ((conf == NULL) ||
- (conf->tx_burst_sz == 0) ||
- (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) ||
- (!rte_is_power_of_2(conf->tx_burst_sz))) {
- RTE_LOG(ERR, PORT, "%s: Invalid input parameters\n", __func__);
- return NULL;
- }
-
- /* Memory allocation */
- port = rte_zmalloc_socket("PORT", sizeof(*port),
- RTE_CACHE_LINE_SIZE, socket_id);
- if (port == NULL) {
- RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
- return NULL;
- }
-
- /* Initialization */
- port->port_id = conf->port_id;
- port->queue_id = conf->queue_id;
- port->tx_burst_sz = conf->tx_burst_sz;
- port->tx_buf_count = 0;
- port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1);
-
- return port;
-}
-
-static inline void
-send_burst(struct rte_port_ethdev_writer *p)
-{
- uint32_t nb_tx;
-
- nb_tx = rte_eth_tx_burst(p->port_id, p->queue_id,
- p->tx_buf, p->tx_buf_count);
-
- for ( ; nb_tx < p->tx_buf_count; nb_tx++)
- rte_pktmbuf_free(p->tx_buf[nb_tx]);
-
- p->tx_buf_count = 0;
-}
-
-static int
-rte_port_ethdev_writer_tx(void *port, struct rte_mbuf *pkt)
-{
- struct rte_port_ethdev_writer *p =
- (struct rte_port_ethdev_writer *) port;
-
- p->tx_buf[p->tx_buf_count++] = pkt;
- if (p->tx_buf_count >= p->tx_burst_sz)
- send_burst(p);
-
- return 0;
-}
-
-#if RTE_PORT_ETHDEV_WRITER_APPROACH == 0
-
-static int
-rte_port_ethdev_writer_tx_bulk(void *port,
- struct rte_mbuf **pkts,
- uint64_t pkts_mask)
-{
- struct rte_port_ethdev_writer *p =
- (struct rte_port_ethdev_writer *) port;
-
- if ((pkts_mask & (pkts_mask + 1)) == 0) {
- uint64_t n_pkts = __builtin_popcountll(pkts_mask);
- uint32_t i;
-
- for (i = 0; i < n_pkts; i++) {
- struct rte_mbuf *pkt = pkts[i];
-
- p->tx_buf[p->tx_buf_count++] = pkt;
- if (p->tx_buf_count >= p->tx_burst_sz)
- send_burst(p);
- }
- } else {
- for ( ; pkts_mask; ) {
- uint32_t pkt_index = __builtin_ctzll(pkts_mask);
- uint64_t pkt_mask = 1LLU << pkt_index;
- struct rte_mbuf *pkt = pkts[pkt_index];
-
- p->tx_buf[p->tx_buf_count++] = pkt;
- if (p->tx_buf_count >= p->tx_burst_sz)
- send_burst(p);
- pkts_mask &= ~pkt_mask;
- }
- }
-
- return 0;
-}
-
-#elif RTE_PORT_ETHDEV_WRITER_APPROACH == 1
-
-static int
-rte_port_ethdev_writer_tx_bulk(void *port,
- struct rte_mbuf **pkts,
- uint64_t pkts_mask)
-{
- struct rte_port_ethdev_writer *p =
- (struct rte_port_ethdev_writer *) port;
- uint32_t bsz_mask = p->bsz_mask;
- uint32_t tx_buf_count = p->tx_buf_count;
- uint64_t expr = (pkts_mask & (pkts_mask + 1)) |
- ((pkts_mask & bsz_mask) ^ bsz_mask);
-
- if (expr == 0) {
- uint64_t n_pkts = __builtin_popcountll(pkts_mask);
- uint32_t n_pkts_ok;
-
- if (tx_buf_count)
- send_burst(p);
-
- n_pkts_ok = rte_eth_tx_burst(p->port_id, p->queue_id, pkts,
- n_pkts);
-
- for ( ; n_pkts_ok < n_pkts; n_pkts_ok++) {
- struct rte_mbuf *pkt = pkts[n_pkts_ok];
-
- rte_pktmbuf_free(pkt);
- }
- } else {
- for ( ; pkts_mask; ) {
- uint32_t pkt_index = __builtin_ctzll(pkts_mask);
- uint64_t pkt_mask = 1LLU << pkt_index;
- struct rte_mbuf *pkt = pkts[pkt_index];
-
- p->tx_buf[tx_buf_count++] = pkt;
- pkts_mask &= ~pkt_mask;
- }
-
- p->tx_buf_count = tx_buf_count;
- if (tx_buf_count >= p->tx_burst_sz)
- send_burst(p);
- }
-
- return 0;
-}
-
-#else
-
-#error Invalid value for RTE_PORT_ETHDEV_WRITER_APPROACH
-
-#endif
-
-static int
-rte_port_ethdev_writer_flush(void *port)
-{
- struct rte_port_ethdev_writer *p =
- (struct rte_port_ethdev_writer *) port;
-
- if (p->tx_buf_count > 0)
- send_burst(p);
-
- return 0;
-}
-
-static int
-rte_port_ethdev_writer_free(void *port)
-{
- if (port == NULL) {
- RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__);
- return -EINVAL;
- }
-
- rte_port_ethdev_writer_flush(port);
- rte_free(port);
-
- return 0;
-}
-
-/*
- * Summary of port operations
- */
-struct rte_port_in_ops rte_port_ethdev_reader_ops = {
- .f_create = rte_port_ethdev_reader_create,
- .f_free = rte_port_ethdev_reader_free,
- .f_rx = rte_port_ethdev_reader_rx,
-};
-
-struct rte_port_out_ops rte_port_ethdev_writer_ops = {
- .f_create = rte_port_ethdev_writer_create,
- .f_free = rte_port_ethdev_writer_free,
- .f_tx = rte_port_ethdev_writer_tx,
- .f_tx_bulk = rte_port_ethdev_writer_tx_bulk,
- .f_flush = rte_port_ethdev_writer_flush,
-};
diff --git a/src/dpdk_lib18/librte_port/rte_port_frag.c b/src/dpdk_lib18/librte_port/rte_port_frag.c
deleted file mode 100755
index ff0ab9b8..00000000
--- a/src/dpdk_lib18/librte_port/rte_port_frag.c
+++ /dev/null
@@ -1,241 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#include <string.h>
-
-#include <rte_ether.h>
-#include <rte_ip_frag.h>
-#include <rte_memory.h>
-
-#include "rte_port_frag.h"
-
-/* Default byte size for the IPv4 Maximum Transfer Unit (MTU).
- * This value includes the size of IPv4 header. */
-#define IPV4_MTU_DEFAULT ETHER_MTU
-
-/* Max number of fragments per packet allowed */
-#define IPV4_MAX_FRAGS_PER_PACKET 0x80
-
-struct rte_port_ring_reader_ipv4_frag {
- /* Input parameters */
- struct rte_ring *ring;
- uint32_t mtu;
- uint32_t metadata_size;
- struct rte_mempool *pool_direct;
- struct rte_mempool *pool_indirect;
-
- /* Internal buffers */
- struct rte_mbuf *pkts[RTE_PORT_IN_BURST_SIZE_MAX];
- struct rte_mbuf *frags[IPV4_MAX_FRAGS_PER_PACKET];
- uint32_t n_pkts;
- uint32_t pos_pkts;
- uint32_t n_frags;
- uint32_t pos_frags;
-} __rte_cache_aligned;
-
-static void *
-rte_port_ring_reader_ipv4_frag_create(void *params, int socket_id)
-{
- struct rte_port_ring_reader_ipv4_frag_params *conf =
- (struct rte_port_ring_reader_ipv4_frag_params *) params;
- struct rte_port_ring_reader_ipv4_frag *port;
-
- /* Check input parameters */
- if (conf == NULL) {
- RTE_LOG(ERR, PORT, "%s: Parameter conf is NULL\n", __func__);
- return NULL;
- }
- if (conf->ring == NULL) {
- RTE_LOG(ERR, PORT, "%s: Parameter ring is NULL\n", __func__);
- return NULL;
- }
- if (conf->mtu == 0) {
- RTE_LOG(ERR, PORT, "%s: Parameter mtu is invalid\n", __func__);
- return NULL;
- }
- if (conf->pool_direct == NULL) {
- RTE_LOG(ERR, PORT, "%s: Parameter pool_direct is NULL\n",
- __func__);
- return NULL;
- }
- if (conf->pool_indirect == NULL) {
- RTE_LOG(ERR, PORT, "%s: Parameter pool_indirect is NULL\n",
- __func__);
- return NULL;
- }
-
- /* Memory allocation */
- port = rte_zmalloc_socket("PORT", sizeof(*port), RTE_CACHE_LINE_SIZE,
- socket_id);
- if (port == NULL) {
- RTE_LOG(ERR, PORT, "%s: port is NULL\n", __func__);
- return NULL;
- }
-
- /* Initialization */
- port->ring = conf->ring;
- port->mtu = conf->mtu;
- port->metadata_size = conf->metadata_size;
- port->pool_direct = conf->pool_direct;
- port->pool_indirect = conf->pool_indirect;
-
- port->n_pkts = 0;
- port->pos_pkts = 0;
- port->n_frags = 0;
- port->pos_frags = 0;
-
- return port;
-}
-
-static int
-rte_port_ring_reader_ipv4_frag_rx(void *port,
- struct rte_mbuf **pkts,
- uint32_t n_pkts)
-{
- struct rte_port_ring_reader_ipv4_frag *p =
- (struct rte_port_ring_reader_ipv4_frag *) port;
- uint32_t n_pkts_out;
-
- n_pkts_out = 0;
-
- /* Get packets from the "frag" buffer */
- if (p->n_frags >= n_pkts) {
- memcpy(pkts, &p->frags[p->pos_frags], n_pkts * sizeof(void *));
- p->pos_frags += n_pkts;
- p->n_frags -= n_pkts;
-
- return n_pkts;
- }
-
- memcpy(pkts, &p->frags[p->pos_frags], p->n_frags * sizeof(void *));
- n_pkts_out = p->n_frags;
- p->n_frags = 0;
-
- /* Look to "pkts" buffer to get more packets */
- for ( ; ; ) {
- struct rte_mbuf *pkt;
- uint32_t n_pkts_to_provide, i;
- int status;
-
- /* If "pkts" buffer is empty, read packet burst from ring */
- if (p->n_pkts == 0) {
- p->n_pkts = rte_ring_sc_dequeue_burst(p->ring,
- (void **) p->pkts, RTE_PORT_IN_BURST_SIZE_MAX);
- if (p->n_pkts == 0)
- return n_pkts_out;
- p->pos_pkts = 0;
- }
-
- /* Read next packet from "pkts" buffer */
- pkt = p->pkts[p->pos_pkts++];
- p->n_pkts--;
-
- /* If not jumbo, pass current packet to output */
- if (pkt->pkt_len <= IPV4_MTU_DEFAULT) {
- pkts[n_pkts_out++] = pkt;
-
- n_pkts_to_provide = n_pkts - n_pkts_out;
- if (n_pkts_to_provide == 0)
- return n_pkts;
-
- continue;
- }
-
- /* Fragment current packet into the "frags" buffer */
- status = rte_ipv4_fragment_packet(
- pkt,
- p->frags,
- IPV4_MAX_FRAGS_PER_PACKET,
- p->mtu,
- p->pool_direct,
- p->pool_indirect
- );
-
- if (status < 0) {
- rte_pktmbuf_free(pkt);
- continue;
- }
-
- p->n_frags = (uint32_t) status;
- p->pos_frags = 0;
-
- /* Copy meta-data from input jumbo packet to its fragments */
- for (i = 0; i < p->n_frags; i++) {
- uint8_t *src = RTE_MBUF_METADATA_UINT8_PTR(pkt, 0);
- uint8_t *dst =
- RTE_MBUF_METADATA_UINT8_PTR(p->frags[i], 0);
-
- memcpy(dst, src, p->metadata_size);
- }
-
- /* Free input jumbo packet */
- rte_pktmbuf_free(pkt);
-
- /* Get packets from "frag" buffer */
- n_pkts_to_provide = n_pkts - n_pkts_out;
- if (p->n_frags >= n_pkts_to_provide) {
- memcpy(&pkts[n_pkts_out], p->frags,
- n_pkts_to_provide * sizeof(void *));
- p->n_frags -= n_pkts_to_provide;
- p->pos_frags += n_pkts_to_provide;
-
- return n_pkts;
- }
-
- memcpy(&pkts[n_pkts_out], p->frags,
- p->n_frags * sizeof(void *));
- n_pkts_out += p->n_frags;
- p->n_frags = 0;
- }
-}
-
-static int
-rte_port_ring_reader_ipv4_frag_free(void *port)
-{
- if (port == NULL) {
- RTE_LOG(ERR, PORT, "%s: Parameter port is NULL\n", __func__);
- return -1;
- }
-
- rte_free(port);
-
- return 0;
-}
-
-/*
- * Summary of port operations
- */
-struct rte_port_in_ops rte_port_ring_reader_ipv4_frag_ops = {
- .f_create = rte_port_ring_reader_ipv4_frag_create,
- .f_free = rte_port_ring_reader_ipv4_frag_free,
- .f_rx = rte_port_ring_reader_ipv4_frag_rx,
-};
diff --git a/src/dpdk_lib18/librte_port/rte_port_ras.c b/src/dpdk_lib18/librte_port/rte_port_ras.c
deleted file mode 100755
index b6ab67ab..00000000
--- a/src/dpdk_lib18/librte_port/rte_port_ras.c
+++ /dev/null
@@ -1,252 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#include <string.h>
-
-#include <rte_ether.h>
-#include <rte_ip_frag.h>
-#include <rte_cycles.h>
-#include <rte_log.h>
-
-#include "rte_port_ras.h"
-
-#ifndef IPV4_RAS_N_BUCKETS
-#define IPV4_RAS_N_BUCKETS 4094
-#endif
-
-#ifndef IPV4_RAS_N_ENTRIES_PER_BUCKET
-#define IPV4_RAS_N_ENTRIES_PER_BUCKET 8
-#endif
-
-#ifndef IPV4_RAS_N_ENTRIES
-#define IPV4_RAS_N_ENTRIES (IPV4_RAS_N_BUCKETS * IPV4_RAS_N_ENTRIES_PER_BUCKET)
-#endif
-
-struct rte_port_ring_writer_ipv4_ras {
- struct rte_mbuf *tx_buf[RTE_PORT_IN_BURST_SIZE_MAX];
- struct rte_ring *ring;
- uint32_t tx_burst_sz;
- uint32_t tx_buf_count;
- struct rte_ip_frag_tbl *frag_tbl;
- struct rte_ip_frag_death_row death_row;
-};
-
-static void *
-rte_port_ring_writer_ipv4_ras_create(void *params, int socket_id)
-{
- struct rte_port_ring_writer_ipv4_ras_params *conf =
- (struct rte_port_ring_writer_ipv4_ras_params *) params;
- struct rte_port_ring_writer_ipv4_ras *port;
- uint64_t frag_cycles;
-
- /* Check input parameters */
- if (conf == NULL) {
- RTE_LOG(ERR, PORT, "%s: Parameter conf is NULL\n", __func__);
- return NULL;
- }
- if (conf->ring == NULL) {
- RTE_LOG(ERR, PORT, "%s: Parameter ring is NULL\n", __func__);
- return NULL;
- }
- if ((conf->tx_burst_sz == 0) ||
- (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX)) {
- RTE_LOG(ERR, PORT, "%s: Parameter tx_burst_sz is invalid\n",
- __func__);
- return NULL;
- }
-
- /* Memory allocation */
- port = rte_zmalloc_socket("PORT", sizeof(*port),
- RTE_CACHE_LINE_SIZE, socket_id);
- if (port == NULL) {
- RTE_LOG(ERR, PORT, "%s: Failed to allocate socket\n", __func__);
- return NULL;
- }
-
- /* Create fragmentation table */
- frag_cycles = (rte_get_tsc_hz() + MS_PER_S - 1) / MS_PER_S * MS_PER_S;
- frag_cycles *= 100;
-
- port->frag_tbl = rte_ip_frag_table_create(
- IPV4_RAS_N_BUCKETS,
- IPV4_RAS_N_ENTRIES_PER_BUCKET,
- IPV4_RAS_N_ENTRIES,
- frag_cycles,
- socket_id);
-
- if (port->frag_tbl == NULL) {
- RTE_LOG(ERR, PORT, "%s: rte_ip_frag_table_create failed\n",
- __func__);
- rte_free(port);
- return NULL;
- }
-
- /* Initialization */
- port->ring = conf->ring;
- port->tx_burst_sz = conf->tx_burst_sz;
- port->tx_buf_count = 0;
-
- return port;
-}
-
-static inline void
-send_burst(struct rte_port_ring_writer_ipv4_ras *p)
-{
- uint32_t nb_tx;
-
- nb_tx = rte_ring_sp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
-
- for ( ; nb_tx < p->tx_buf_count; nb_tx++)
- rte_pktmbuf_free(p->tx_buf[nb_tx]);
-
- p->tx_buf_count = 0;
-}
-
-static inline void
-process_one(struct rte_port_ring_writer_ipv4_ras *p, struct rte_mbuf *pkt)
-{
- /* Assume there is no ethernet header */
- struct ipv4_hdr *pkt_hdr = (struct ipv4_hdr *)
- (rte_pktmbuf_mtod(pkt, unsigned char *));
-
- /* Get "Do not fragment" flag and fragment offset */
- uint16_t frag_field = rte_be_to_cpu_16(pkt_hdr->fragment_offset);
- uint16_t frag_offset = (uint16_t)(frag_field & IPV4_HDR_OFFSET_MASK);
- uint16_t frag_flag = (uint16_t)(frag_field & IPV4_HDR_MF_FLAG);
-
- /* If it is a fragmented packet, then try to reassemble */
- if ((frag_flag == 0) && (frag_offset == 0))
- p->tx_buf[p->tx_buf_count++] = pkt;
- else {
- struct rte_mbuf *mo;
- struct rte_ip_frag_tbl *tbl = p->frag_tbl;
- struct rte_ip_frag_death_row *dr = &p->death_row;
-
- /* Process this fragment */
- mo = rte_ipv4_frag_reassemble_packet(tbl, dr, pkt, rte_rdtsc(), pkt_hdr);
- if (mo != NULL)
- p->tx_buf[p->tx_buf_count++] = mo;
-
- rte_ip_frag_free_death_row(&p->death_row, 3);
- }
-}
-
-static int
-rte_port_ring_writer_ipv4_ras_tx(void *port, struct rte_mbuf *pkt)
-{
- struct rte_port_ring_writer_ipv4_ras *p =
- (struct rte_port_ring_writer_ipv4_ras *) port;
-
- process_one(p, pkt);
- if (p->tx_buf_count >= p->tx_burst_sz)
- send_burst(p);
-
- return 0;
-}
-
-static int
-rte_port_ring_writer_ipv4_ras_tx_bulk(void *port,
- struct rte_mbuf **pkts,
- uint64_t pkts_mask)
-{
- struct rte_port_ring_writer_ipv4_ras *p =
- (struct rte_port_ring_writer_ipv4_ras *) port;
-
- if ((pkts_mask & (pkts_mask + 1)) == 0) {
- uint64_t n_pkts = __builtin_popcountll(pkts_mask);
- uint32_t i;
-
- for (i = 0; i < n_pkts; i++) {
- struct rte_mbuf *pkt = pkts[i];
-
- process_one(p, pkt);
- if (p->tx_buf_count >= p->tx_burst_sz)
- send_burst(p);
- }
- } else {
- for ( ; pkts_mask; ) {
- uint32_t pkt_index = __builtin_ctzll(pkts_mask);
- uint64_t pkt_mask = 1LLU << pkt_index;
- struct rte_mbuf *pkt = pkts[pkt_index];
-
- process_one(p, pkt);
- if (p->tx_buf_count >= p->tx_burst_sz)
- send_burst(p);
-
- pkts_mask &= ~pkt_mask;
- }
- }
-
- return 0;
-}
-
-static int
-rte_port_ring_writer_ipv4_ras_flush(void *port)
-{
- struct rte_port_ring_writer_ipv4_ras *p =
- (struct rte_port_ring_writer_ipv4_ras *) port;
-
- if (p->tx_buf_count > 0)
- send_burst(p);
-
- return 0;
-}
-
-static int
-rte_port_ring_writer_ipv4_ras_free(void *port)
-{
- struct rte_port_ring_writer_ipv4_ras *p =
- (struct rte_port_ring_writer_ipv4_ras *) port;
-
- if (port == NULL) {
- RTE_LOG(ERR, PORT, "%s: Parameter port is NULL\n", __func__);
- return -1;
- }
-
- rte_port_ring_writer_ipv4_ras_flush(port);
- rte_ip_frag_table_destroy(p->frag_tbl);
- rte_free(port);
-
- return 0;
-}
-
-/*
- * Summary of port operations
- */
-struct rte_port_out_ops rte_port_ring_writer_ipv4_ras_ops = {
- .f_create = rte_port_ring_writer_ipv4_ras_create,
- .f_free = rte_port_ring_writer_ipv4_ras_free,
- .f_tx = rte_port_ring_writer_ipv4_ras_tx,
- .f_tx_bulk = rte_port_ring_writer_ipv4_ras_tx_bulk,
- .f_flush = rte_port_ring_writer_ipv4_ras_flush,
-};
diff --git a/src/dpdk_lib18/librte_port/rte_port_ring.c b/src/dpdk_lib18/librte_port/rte_port_ring.c
deleted file mode 100755
index fa3d77b4..00000000
--- a/src/dpdk_lib18/librte_port/rte_port_ring.c
+++ /dev/null
@@ -1,237 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#include <string.h>
-
-#include <rte_mbuf.h>
-#include <rte_ring.h>
-#include <rte_malloc.h>
-
-#include "rte_port_ring.h"
-
-/*
- * Port RING Reader
- */
-struct rte_port_ring_reader {
- struct rte_ring *ring;
-};
-
-static void *
-rte_port_ring_reader_create(void *params, int socket_id)
-{
- struct rte_port_ring_reader_params *conf =
- (struct rte_port_ring_reader_params *) params;
- struct rte_port_ring_reader *port;
-
- /* Check input parameters */
- if (conf == NULL) {
- RTE_LOG(ERR, PORT, "%s: params is NULL\n", __func__);
- return NULL;
- }
-
- /* Memory allocation */
- port = rte_zmalloc_socket("PORT", sizeof(*port),
- RTE_CACHE_LINE_SIZE, socket_id);
- if (port == NULL) {
- RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
- return NULL;
- }
-
- /* Initialization */
- port->ring = conf->ring;
-
- return port;
-}
-
-static int
-rte_port_ring_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
-{
- struct rte_port_ring_reader *p = (struct rte_port_ring_reader *) port;
-
- return rte_ring_sc_dequeue_burst(p->ring, (void **) pkts, n_pkts);
-}
-
-static int
-rte_port_ring_reader_free(void *port)
-{
- if (port == NULL) {
- RTE_LOG(ERR, PORT, "%s: port is NULL\n", __func__);
- return -EINVAL;
- }
-
- rte_free(port);
-
- return 0;
-}
-
-/*
- * Port RING Writer
- */
-struct rte_port_ring_writer {
- struct rte_mbuf *tx_buf[RTE_PORT_IN_BURST_SIZE_MAX];
- struct rte_ring *ring;
- uint32_t tx_burst_sz;
- uint32_t tx_buf_count;
-};
-
-static void *
-rte_port_ring_writer_create(void *params, int socket_id)
-{
- struct rte_port_ring_writer_params *conf =
- (struct rte_port_ring_writer_params *) params;
- struct rte_port_ring_writer *port;
-
- /* Check input parameters */
- if ((conf == NULL) ||
- (conf->ring == NULL) ||
- (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX)) {
- RTE_LOG(ERR, PORT, "%s: Invalid Parameters\n", __func__);
- return NULL;
- }
-
- /* Memory allocation */
- port = rte_zmalloc_socket("PORT", sizeof(*port),
- RTE_CACHE_LINE_SIZE, socket_id);
- if (port == NULL) {
- RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
- return NULL;
- }
-
- /* Initialization */
- port->ring = conf->ring;
- port->tx_burst_sz = conf->tx_burst_sz;
- port->tx_buf_count = 0;
-
- return port;
-}
-
-static inline void
-send_burst(struct rte_port_ring_writer *p)
-{
- uint32_t nb_tx;
-
- nb_tx = rte_ring_sp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
-
- for ( ; nb_tx < p->tx_buf_count; nb_tx++)
- rte_pktmbuf_free(p->tx_buf[nb_tx]);
-
- p->tx_buf_count = 0;
-}
-
-static int
-rte_port_ring_writer_tx(void *port, struct rte_mbuf *pkt)
-{
- struct rte_port_ring_writer *p = (struct rte_port_ring_writer *) port;
-
- p->tx_buf[p->tx_buf_count++] = pkt;
- if (p->tx_buf_count >= p->tx_burst_sz)
- send_burst(p);
-
- return 0;
-}
-
-static int
-rte_port_ring_writer_tx_bulk(void *port,
- struct rte_mbuf **pkts,
- uint64_t pkts_mask)
-{
- struct rte_port_ring_writer *p = (struct rte_port_ring_writer *) port;
-
- if ((pkts_mask & (pkts_mask + 1)) == 0) {
- uint64_t n_pkts = __builtin_popcountll(pkts_mask);
- uint32_t i;
-
- for (i = 0; i < n_pkts; i++) {
- struct rte_mbuf *pkt = pkts[i];
-
- p->tx_buf[p->tx_buf_count++] = pkt;
- if (p->tx_buf_count >= p->tx_burst_sz)
- send_burst(p);
- }
- } else {
- for ( ; pkts_mask; ) {
- uint32_t pkt_index = __builtin_ctzll(pkts_mask);
- uint64_t pkt_mask = 1LLU << pkt_index;
- struct rte_mbuf *pkt = pkts[pkt_index];
-
- p->tx_buf[p->tx_buf_count++] = pkt;
- if (p->tx_buf_count >= p->tx_burst_sz)
- send_burst(p);
- pkts_mask &= ~pkt_mask;
- }
- }
-
- return 0;
-}
-
-static int
-rte_port_ring_writer_flush(void *port)
-{
- struct rte_port_ring_writer *p = (struct rte_port_ring_writer *) port;
-
- if (p->tx_buf_count > 0)
- send_burst(p);
-
- return 0;
-}
-
-static int
-rte_port_ring_writer_free(void *port)
-{
- if (port == NULL) {
- RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__);
- return -EINVAL;
- }
-
- rte_port_ring_writer_flush(port);
- rte_free(port);
-
- return 0;
-}
-
-/*
- * Summary of port operations
- */
-struct rte_port_in_ops rte_port_ring_reader_ops = {
- .f_create = rte_port_ring_reader_create,
- .f_free = rte_port_ring_reader_free,
- .f_rx = rte_port_ring_reader_rx,
-};
-
-struct rte_port_out_ops rte_port_ring_writer_ops = {
- .f_create = rte_port_ring_writer_create,
- .f_free = rte_port_ring_writer_free,
- .f_tx = rte_port_ring_writer_tx,
- .f_tx_bulk = rte_port_ring_writer_tx_bulk,
- .f_flush = rte_port_ring_writer_flush,
-};
diff --git a/src/dpdk_lib18/librte_port/rte_port_sched.c b/src/dpdk_lib18/librte_port/rte_port_sched.c
deleted file mode 100755
index 2107f4c8..00000000
--- a/src/dpdk_lib18/librte_port/rte_port_sched.c
+++ /dev/null
@@ -1,239 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#include <string.h>
-
-#include <rte_mbuf.h>
-#include <rte_malloc.h>
-
-#include "rte_port_sched.h"
-
-/*
- * Reader
- */
-struct rte_port_sched_reader {
- struct rte_sched_port *sched;
-};
-
-static void *
-rte_port_sched_reader_create(void *params, int socket_id)
-{
- struct rte_port_sched_reader_params *conf =
- (struct rte_port_sched_reader_params *) params;
- struct rte_port_sched_reader *port;
-
- /* Check input parameters */
- if ((conf == NULL) ||
- (conf->sched == NULL)) {
- RTE_LOG(ERR, PORT, "%s: Invalid params\n", __func__);
- return NULL;
- }
-
- /* Memory allocation */
- port = rte_zmalloc_socket("PORT", sizeof(*port),
- RTE_CACHE_LINE_SIZE, socket_id);
- if (port == NULL) {
- RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
- return NULL;
- }
-
- /* Initialization */
- port->sched = conf->sched;
-
- return port;
-}
-
-static int
-rte_port_sched_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
-{
- struct rte_port_sched_reader *p = (struct rte_port_sched_reader *) port;
-
- return rte_sched_port_dequeue(p->sched, pkts, n_pkts);
-}
-
-static int
-rte_port_sched_reader_free(void *port)
-{
- if (port == NULL) {
- RTE_LOG(ERR, PORT, "%s: port is NULL\n", __func__);
- return -EINVAL;
- }
-
- rte_free(port);
-
- return 0;
-}
-
-/*
- * Writer
- */
-struct rte_port_sched_writer {
- struct rte_mbuf *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX];
- struct rte_sched_port *sched;
- uint32_t tx_burst_sz;
- uint32_t tx_buf_count;
- uint64_t bsz_mask;
-};
-
-static void *
-rte_port_sched_writer_create(void *params, int socket_id)
-{
- struct rte_port_sched_writer_params *conf =
- (struct rte_port_sched_writer_params *) params;
- struct rte_port_sched_writer *port;
-
- /* Check input parameters */
- if ((conf == NULL) ||
- (conf->sched == NULL) ||
- (conf->tx_burst_sz == 0) ||
- (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) ||
- (!rte_is_power_of_2(conf->tx_burst_sz))) {
- RTE_LOG(ERR, PORT, "%s: Invalid params\n", __func__);
- return NULL;
- }
-
- /* Memory allocation */
- port = rte_zmalloc_socket("PORT", sizeof(*port),
- RTE_CACHE_LINE_SIZE, socket_id);
- if (port == NULL) {
- RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
- return NULL;
- }
-
- /* Initialization */
- port->sched = conf->sched;
- port->tx_burst_sz = conf->tx_burst_sz;
- port->tx_buf_count = 0;
- port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1);
-
- return port;
-}
-
-static int
-rte_port_sched_writer_tx(void *port, struct rte_mbuf *pkt)
-{
- struct rte_port_sched_writer *p = (struct rte_port_sched_writer *) port;
-
- p->tx_buf[p->tx_buf_count++] = pkt;
- if (p->tx_buf_count >= p->tx_burst_sz) {
- rte_sched_port_enqueue(p->sched, p->tx_buf, p->tx_buf_count);
- p->tx_buf_count = 0;
- }
-
- return 0;
-}
-
-static int
-rte_port_sched_writer_tx_bulk(void *port,
- struct rte_mbuf **pkts,
- uint64_t pkts_mask)
-{
- struct rte_port_sched_writer *p = (struct rte_port_sched_writer *) port;
- uint32_t bsz_mask = p->bsz_mask;
- uint32_t tx_buf_count = p->tx_buf_count;
- uint64_t expr = (pkts_mask & (pkts_mask + 1)) |
- ((pkts_mask & bsz_mask) ^ bsz_mask);
-
- if (expr == 0) {
- uint64_t n_pkts = __builtin_popcountll(pkts_mask);
-
- if (tx_buf_count) {
- rte_sched_port_enqueue(p->sched, p->tx_buf,
- tx_buf_count);
- p->tx_buf_count = 0;
- }
-
- rte_sched_port_enqueue(p->sched, pkts, n_pkts);
- } else {
- for ( ; pkts_mask; ) {
- uint32_t pkt_index = __builtin_ctzll(pkts_mask);
- uint64_t pkt_mask = 1LLU << pkt_index;
- struct rte_mbuf *pkt = pkts[pkt_index];
-
- p->tx_buf[tx_buf_count++] = pkt;
- pkts_mask &= ~pkt_mask;
- }
- p->tx_buf_count = tx_buf_count;
-
- if (tx_buf_count >= p->tx_burst_sz) {
- rte_sched_port_enqueue(p->sched, p->tx_buf,
- tx_buf_count);
- p->tx_buf_count = 0;
- }
- }
-
- return 0;
-}
-
-static int
-rte_port_sched_writer_flush(void *port)
-{
- struct rte_port_sched_writer *p = (struct rte_port_sched_writer *) port;
-
- if (p->tx_buf_count) {
- rte_sched_port_enqueue(p->sched, p->tx_buf, p->tx_buf_count);
- p->tx_buf_count = 0;
- }
-
- return 0;
-}
-
-static int
-rte_port_sched_writer_free(void *port)
-{
- if (port == NULL) {
- RTE_LOG(ERR, PORT, "%s: port is NULL\n", __func__);
- return -EINVAL;
- }
-
- rte_port_sched_writer_flush(port);
- rte_free(port);
-
- return 0;
-}
-
-/*
- * Summary of port operations
- */
-struct rte_port_in_ops rte_port_sched_reader_ops = {
- .f_create = rte_port_sched_reader_create,
- .f_free = rte_port_sched_reader_free,
- .f_rx = rte_port_sched_reader_rx,
-};
-
-struct rte_port_out_ops rte_port_sched_writer_ops = {
- .f_create = rte_port_sched_writer_create,
- .f_free = rte_port_sched_writer_free,
- .f_tx = rte_port_sched_writer_tx,
- .f_tx_bulk = rte_port_sched_writer_tx_bulk,
- .f_flush = rte_port_sched_writer_flush,
-};
diff --git a/src/dpdk_lib18/librte_port/rte_port_source_sink.c b/src/dpdk_lib18/librte_port/rte_port_source_sink.c
deleted file mode 100755
index b9a25bb0..00000000
--- a/src/dpdk_lib18/librte_port/rte_port_source_sink.c
+++ /dev/null
@@ -1,158 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#include <stdint.h>
-#include <string.h>
-
-#include <rte_mbuf.h>
-#include <rte_mempool.h>
-#include <rte_malloc.h>
-
-#include "rte_port_source_sink.h"
-
-/*
- * Port SOURCE
- */
-struct rte_port_source {
- struct rte_mempool *mempool;
-};
-
-static void *
-rte_port_source_create(void *params, int socket_id)
-{
- struct rte_port_source_params *p =
- (struct rte_port_source_params *) params;
- struct rte_port_source *port;
-
- /* Check input arguments*/
- if ((p == NULL) || (p->mempool == NULL)) {
- RTE_LOG(ERR, PORT, "%s: Invalid params\n", __func__);
- return NULL;
- }
-
- /* Memory allocation */
- port = rte_zmalloc_socket("PORT", sizeof(*port),
- RTE_CACHE_LINE_SIZE, socket_id);
- if (port == NULL) {
- RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
- return NULL;
- }
-
- /* Initialization */
- port->mempool = (struct rte_mempool *) p->mempool;
-
- return port;
-}
-
-static int
-rte_port_source_free(void *port)
-{
- /* Check input parameters */
- if (port == NULL)
- return 0;
-
- rte_free(port);
-
- return 0;
-}
-
-static int
-rte_port_source_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
-{
- struct rte_port_source *p = (struct rte_port_source *) port;
-
- if (rte_mempool_get_bulk(p->mempool, (void **) pkts, n_pkts) != 0)
- return 0;
-
- return n_pkts;
-}
-
-/*
- * Port SINK
- */
-static void *
-rte_port_sink_create(__rte_unused void *params, __rte_unused int socket_id)
-{
- return (void *) 1;
-}
-
-static int
-rte_port_sink_tx(__rte_unused void *port, struct rte_mbuf *pkt)
-{
- rte_pktmbuf_free(pkt);
-
- return 0;
-}
-
-static int
-rte_port_sink_tx_bulk(__rte_unused void *port, struct rte_mbuf **pkts,
- uint64_t pkts_mask)
-{
- if ((pkts_mask & (pkts_mask + 1)) == 0) {
- uint64_t n_pkts = __builtin_popcountll(pkts_mask);
- uint32_t i;
-
- for (i = 0; i < n_pkts; i++) {
- struct rte_mbuf *pkt = pkts[i];
-
- rte_pktmbuf_free(pkt);
- }
- } else {
- for ( ; pkts_mask; ) {
- uint32_t pkt_index = __builtin_ctzll(pkts_mask);
- uint64_t pkt_mask = 1LLU << pkt_index;
- struct rte_mbuf *pkt = pkts[pkt_index];
-
- rte_pktmbuf_free(pkt);
- pkts_mask &= ~pkt_mask;
- }
- }
-
- return 0;
-}
-
-/*
- * Summary of port operations
- */
-struct rte_port_in_ops rte_port_source_ops = {
- .f_create = rte_port_source_create,
- .f_free = rte_port_source_free,
- .f_rx = rte_port_source_rx,
-};
-
-struct rte_port_out_ops rte_port_sink_ops = {
- .f_create = rte_port_sink_create,
- .f_free = NULL,
- .f_tx = rte_port_sink_tx,
- .f_tx_bulk = rte_port_sink_tx_bulk,
- .f_flush = NULL,
-};
diff --git a/src/dpdk_lib18/librte_power/Makefile b/src/dpdk_lib18/librte_power/Makefile
deleted file mode 100755
index d672a5a6..00000000
--- a/src/dpdk_lib18/librte_power/Makefile
+++ /dev/null
@@ -1,49 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_power.a
-
-CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3 -fno-strict-aliasing
-
-# all source are stored in SRCS-y
-SRCS-$(CONFIG_RTE_LIBRTE_POWER) := rte_power.c rte_power_acpi_cpufreq.c
-SRCS-$(CONFIG_RTE_LIBRTE_POWER) += rte_power_kvm_vm.c guest_channel.c
-
-# install this header file
-SYMLINK-$(CONFIG_RTE_LIBRTE_POWER)-include := rte_power.h
-
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_POWER) += lib/librte_eal
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_power/guest_channel.c b/src/dpdk_lib18/librte_power/guest_channel.c
deleted file mode 100755
index 22956657..00000000
--- a/src/dpdk_lib18/librte_power/guest_channel.c
+++ /dev/null
@@ -1,162 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <signal.h>
-#include <limits.h>
-#include <fcntl.h>
-#include <string.h>
-#include <errno.h>
-
-
-#include <rte_log.h>
-#include <rte_config.h>
-
-#include "guest_channel.h"
-#include "channel_commands.h"
-
-#define RTE_LOGTYPE_GUEST_CHANNEL RTE_LOGTYPE_USER1
-
-static int global_fds[RTE_MAX_LCORE];
-
-int
-guest_channel_host_connect(const char *path, unsigned lcore_id)
-{
- int flags, ret;
- struct channel_packet pkt;
- char fd_path[PATH_MAX];
- int fd = -1;
-
- if (lcore_id >= RTE_MAX_LCORE) {
- RTE_LOG(ERR, GUEST_CHANNEL, "Channel(%u) is out of range 0...%d\n",
- lcore_id, RTE_MAX_LCORE-1);
- return -1;
- }
- /* check if path is already open */
- if (global_fds[lcore_id] != 0) {
- RTE_LOG(ERR, GUEST_CHANNEL, "Channel(%u) is already open with fd %d\n",
- lcore_id, global_fds[lcore_id]);
- return -1;
- }
-
- snprintf(fd_path, PATH_MAX, "%s.%u", path, lcore_id);
- RTE_LOG(INFO, GUEST_CHANNEL, "Opening channel '%s' for lcore %u\n",
- fd_path, lcore_id);
- fd = open(fd_path, O_RDWR);
- if (fd < 0) {
- RTE_LOG(ERR, GUEST_CHANNEL, "Unable to to connect to '%s' with error "
- "%s\n", fd_path, strerror(errno));
- return -1;
- }
-
- flags = fcntl(fd, F_GETFL, 0);
- if (flags < 0) {
- RTE_LOG(ERR, GUEST_CHANNEL, "Failed on fcntl get flags for file %s\n",
- fd_path);
- goto error;
- }
-
- flags |= O_NONBLOCK;
- if (fcntl(fd, F_SETFL, flags) < 0) {
- RTE_LOG(ERR, GUEST_CHANNEL, "Failed on setting non-blocking mode for "
- "file %s", fd_path);
- goto error;
- }
- /* QEMU needs a delay after connection */
- sleep(1);
-
- /* Send a test packet, this command is ignored by the host, but a successful
- * send indicates that the host endpoint is monitoring.
- */
- pkt.command = CPU_POWER_CONNECT;
- global_fds[lcore_id] = fd;
- ret = guest_channel_send_msg(&pkt, lcore_id);
- if (ret != 0) {
- RTE_LOG(ERR, GUEST_CHANNEL, "Error on channel '%s' communications "
- "test: %s\n", fd_path, strerror(ret));
- goto error;
- }
- RTE_LOG(INFO, GUEST_CHANNEL, "Channel '%s' is now connected\n", fd_path);
- return 0;
-error:
- close(fd);
- global_fds[lcore_id] = 0;
- return -1;
-}
-
-int
-guest_channel_send_msg(struct channel_packet *pkt, unsigned lcore_id)
-{
- int ret, buffer_len = sizeof(*pkt);
- void *buffer = pkt;
-
- if (lcore_id >= RTE_MAX_LCORE) {
- RTE_LOG(ERR, GUEST_CHANNEL, "Channel(%u) is out of range 0...%d\n",
- lcore_id, RTE_MAX_LCORE-1);
- return -1;
- }
-
- if (global_fds[lcore_id] == 0) {
- RTE_LOG(ERR, GUEST_CHANNEL, "Channel is not connected\n");
- return -1;
- }
- while (buffer_len > 0) {
- ret = write(global_fds[lcore_id], buffer, buffer_len);
- if (ret == buffer_len)
- return 0;
- if (ret == -1) {
- if (errno == EINTR)
- continue;
- return errno;
- }
- buffer = (char *)buffer + ret;
- buffer_len -= ret;
- }
- return 0;
-}
-
-void
-guest_channel_host_disconnect(unsigned lcore_id)
-{
- if (lcore_id >= RTE_MAX_LCORE) {
- RTE_LOG(ERR, GUEST_CHANNEL, "Channel(%u) is out of range 0...%d\n",
- lcore_id, RTE_MAX_LCORE-1);
- return;
- }
- if (global_fds[lcore_id] == 0)
- return;
- close(global_fds[lcore_id]);
- global_fds[lcore_id] = 0;
-}
diff --git a/src/dpdk_lib18/librte_power/rte_power.c b/src/dpdk_lib18/librte_power/rte_power.c
deleted file mode 100755
index 998ed1c9..00000000
--- a/src/dpdk_lib18/librte_power/rte_power.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <rte_atomic.h>
-
-#include "rte_power.h"
-#include "rte_power_acpi_cpufreq.h"
-#include "rte_power_kvm_vm.h"
-#include "rte_power_common.h"
-
-enum power_management_env global_default_env = PM_ENV_NOT_SET;
-
-volatile uint32_t global_env_cfg_status = 0;
-
-/* function pointers */
-rte_power_freqs_t rte_power_freqs = NULL;
-rte_power_get_freq_t rte_power_get_freq = NULL;
-rte_power_set_freq_t rte_power_set_freq = NULL;
-rte_power_freq_change_t rte_power_freq_up = NULL;
-rte_power_freq_change_t rte_power_freq_down = NULL;
-rte_power_freq_change_t rte_power_freq_max = NULL;
-rte_power_freq_change_t rte_power_freq_min = NULL;
-
-int
-rte_power_set_env(enum power_management_env env)
-{
- if (rte_atomic32_cmpset(&global_env_cfg_status, 0, 1) == 0) {
- return 0;
- }
- if (env == PM_ENV_ACPI_CPUFREQ) {
- rte_power_freqs = rte_power_acpi_cpufreq_freqs;
- rte_power_get_freq = rte_power_acpi_cpufreq_get_freq;
- rte_power_set_freq = rte_power_acpi_cpufreq_set_freq;
- rte_power_freq_up = rte_power_acpi_cpufreq_freq_up;
- rte_power_freq_down = rte_power_acpi_cpufreq_freq_down;
- rte_power_freq_min = rte_power_acpi_cpufreq_freq_min;
- rte_power_freq_max = rte_power_acpi_cpufreq_freq_max;
- } else if (env == PM_ENV_KVM_VM) {
- rte_power_freqs = rte_power_kvm_vm_freqs;
- rte_power_get_freq = rte_power_kvm_vm_get_freq;
- rte_power_set_freq = rte_power_kvm_vm_set_freq;
- rte_power_freq_up = rte_power_kvm_vm_freq_up;
- rte_power_freq_down = rte_power_kvm_vm_freq_down;
- rte_power_freq_min = rte_power_kvm_vm_freq_min;
- rte_power_freq_max = rte_power_kvm_vm_freq_max;
- } else {
- RTE_LOG(ERR, POWER, "Invalid Power Management Environment(%d) set\n",
- env);
- rte_power_unset_env();
- return -1;
- }
- global_default_env = env;
- return 0;
-
-}
-
-void
-rte_power_unset_env(void)
-{
- if (rte_atomic32_cmpset(&global_env_cfg_status, 1, 0) != 0)
- global_default_env = PM_ENV_NOT_SET;
-}
-
-enum power_management_env
-rte_power_get_env(void) {
- return global_default_env;
-}
-
-int
-rte_power_init(unsigned lcore_id)
-{
- int ret = -1;
-
- if (global_default_env == PM_ENV_ACPI_CPUFREQ) {
- return rte_power_acpi_cpufreq_init(lcore_id);
- }
- if (global_default_env == PM_ENV_KVM_VM) {
- return rte_power_kvm_vm_init(lcore_id);
- }
- /* Auto detect Environment */
- RTE_LOG(INFO, POWER, "Attempting to initialise ACPI cpufreq power "
- "management...\n");
- ret = rte_power_acpi_cpufreq_init(lcore_id);
- if (ret == 0) {
- rte_power_set_env(PM_ENV_ACPI_CPUFREQ);
- goto out;
- }
-
- RTE_LOG(INFO, POWER, "Attempting to initialise VM power management...\n");
- ret = rte_power_kvm_vm_init(lcore_id);
- if (ret == 0) {
- rte_power_set_env(PM_ENV_KVM_VM);
- goto out;
- }
- RTE_LOG(ERR, POWER, "Unable to set Power Management Environment for lcore "
- "%u\n", lcore_id);
-out:
- return ret;
-}
-
-int
-rte_power_exit(unsigned lcore_id)
-{
- if (global_default_env == PM_ENV_ACPI_CPUFREQ)
- return rte_power_acpi_cpufreq_exit(lcore_id);
- if (global_default_env == PM_ENV_KVM_VM)
- return rte_power_kvm_vm_exit(lcore_id);
-
- RTE_LOG(ERR, POWER, "Environment has not been set, unable to exit "
- "gracefully\n");
- return -1;
-
-}
diff --git a/src/dpdk_lib18/librte_power/rte_power.h b/src/dpdk_lib18/librte_power/rte_power.h
deleted file mode 100755
index 93380693..00000000
--- a/src/dpdk_lib18/librte_power/rte_power.h
+++ /dev/null
@@ -1,251 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_POWER_H
-#define _RTE_POWER_H
-
-/**
- * @file
- * RTE Power Management
- */
-
-#include <rte_common.h>
-#include <rte_byteorder.h>
-#include <rte_log.h>
-#include <rte_string_fns.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Power Management Environment State */
-enum power_management_env {PM_ENV_NOT_SET, PM_ENV_ACPI_CPUFREQ, PM_ENV_KVM_VM};
-
-/**
- * Set the default power management implementation. If this is not called prior
- * to rte_power_init(), then auto-detect of the environment will take place.
- * It is not thread safe.
- *
- * @param env
- * env. The environment in which to initialise Power Management for.
- *
- * @return
- * - 0 on success.
- * - Negative on error.
- */
-int rte_power_set_env(enum power_management_env env);
-
-/**
- * Unset the global environment configuration.
- * This can only be called after all threads have completed.
- *
- * @param None.
- *
- * @return
- * None.
- */
-void rte_power_unset_env(void);
-
-/**
- * Get the default power management implementation.
- *
- * @param None.
- *
- * @return
- * power_management_env The configured environment.
- */
-enum power_management_env rte_power_get_env(void);
-
-/**
- * Initialize power management for a specific lcore. If rte_power_set_env() has
- * not been called then an auto-detect of the environment will start and
- * initialise the corresponding resources.
- *
- * @param lcore_id
- * lcore id.
- *
- * @return
- * - 0 on success.
- * - Negative on error.
- */
-int rte_power_init(unsigned lcore_id);
-
-/**
- * Exit power management on a specific lcore. This will call the environment
- * dependent exit function.
- *
- *
- * @param lcore_id
- * lcore id.
- *
- * @return
- * - 0 on success.
- * - Negative on error.
- */
-int rte_power_exit(unsigned lcore_id);
-
-/**
- * Get the available frequencies of a specific lcore.
- * Function pointer definition. Review each environments
- * specific documentation for usage.
- *
- * @param lcore_id
- * lcore id.
- * @param freqs
- * The buffer array to save the frequencies.
- * @param num
- * The number of frequencies to get.
- *
- * @return
- * The number of available frequencies.
- */
-typedef uint32_t (*rte_power_freqs_t)(unsigned lcore_id, uint32_t *freqs,
- uint32_t num);
-
-extern rte_power_freqs_t rte_power_freqs;
-
-/**
- * Return the current index of available frequencies of a specific lcore.
- * Function pointer definition. Review each environments
- * specific documentation for usage.
- *
- * @param lcore_id
- * lcore id.
- *
- * @return
- * The current index of available frequencies.
- */
-typedef uint32_t (*rte_power_get_freq_t)(unsigned lcore_id);
-
-extern rte_power_get_freq_t rte_power_get_freq;
-
-/**
- * Set the new frequency for a specific lcore by indicating the index of
- * available frequencies.
- * Function pointer definition. Review each environments
- * specific documentation for usage.
- *
- * @param lcore_id
- * lcore id.
- * @param index
- * The index of available frequencies.
- *
- * @return
- * - 1 on success with frequency changed.
- * - 0 on success without frequency changed.
- * - Negative on error.
- */
-typedef int (*rte_power_set_freq_t)(unsigned lcore_id, uint32_t index);
-
-extern rte_power_set_freq_t rte_power_set_freq;
-
-/**
- * Function pointer definition for generic frequency change functions. Review
- * each environments specific documentation for usage.
- *
- * @param lcore_id
- * lcore id.
- *
- * @return
- * - 1 on success with frequency changed.
- * - 0 on success without frequency changed.
- * - Negative on error.
- */
-typedef int (*rte_power_freq_change_t)(unsigned lcore_id);
-
-/**
- * Scale up the frequency of a specific lcore according to the available
- * frequencies.
- * Review each environments specific documentation for usage.
- *
- * @param lcore_id
- * lcore id.
- *
- * @return
- * - 1 on success with frequency changed.
- * - 0 on success without frequency changed.
- * - Negative on error.
- */
-extern rte_power_freq_change_t rte_power_freq_up;
-
-/**
- * Scale down the frequency of a specific lcore according to the available
- * frequencies.
- * Review each environments specific documentation for usage.
- *
- * @param lcore_id
- * lcore id.
- *
- * @return
- * - 1 on success with frequency changed.
- * - 0 on success without frequency changed.
- * - Negative on error.
- */
-
-extern rte_power_freq_change_t rte_power_freq_down;
-
-/**
- * Scale up the frequency of a specific lcore to the highest according to the
- * available frequencies.
- * Review each environments specific documentation for usage.
- *
- * @param lcore_id
- * lcore id.
- *
- * @return
- * - 1 on success with frequency changed.
- * - 0 on success without frequency changed.
- * - Negative on error.
- */
-extern rte_power_freq_change_t rte_power_freq_max;
-
-/**
- * Scale down the frequency of a specific lcore to the lowest according to the
- * available frequencies.
- * Review each environments specific documentation for usage..
- *
- * @param lcore_id
- * lcore id.
- *
- * @return
- * - 1 on success with frequency changed.
- * - 0 on success without frequency changed.
- * - Negative on error.
- */
-rte_power_freq_change_t rte_power_freq_min;
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/src/dpdk_lib18/librte_power/rte_power_acpi_cpufreq.c b/src/dpdk_lib18/librte_power/rte_power_acpi_cpufreq.c
deleted file mode 100755
index a56c9b59..00000000
--- a/src/dpdk_lib18/librte_power/rte_power_acpi_cpufreq.c
+++ /dev/null
@@ -1,545 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdio.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <signal.h>
-#include <limits.h>
-
-#include <rte_memcpy.h>
-#include <rte_atomic.h>
-
-#include "rte_power_acpi_cpufreq.h"
-#include "rte_power_common.h"
-
-#ifdef RTE_LIBRTE_POWER_DEBUG
-#define POWER_DEBUG_TRACE(fmt, args...) do { \
- RTE_LOG(ERR, POWER, "%s: " fmt, __func__, ## args); \
-} while (0)
-#else
-#define POWER_DEBUG_TRACE(fmt, args...)
-#endif
-
-#define FOPEN_OR_ERR_RET(f, retval) do { \
- if ((f) == NULL) { \
- RTE_LOG(ERR, POWER, "File not openned\n"); \
- return retval; \
- } \
-} while (0)
-
-#define FOPS_OR_NULL_GOTO(ret, label) do { \
- if ((ret) == NULL) { \
- RTE_LOG(ERR, POWER, "fgets returns nothing\n"); \
- goto label; \
- } \
-} while (0)
-
-#define FOPS_OR_ERR_GOTO(ret, label) do { \
- if ((ret) < 0) { \
- RTE_LOG(ERR, POWER, "File operations failed\n"); \
- goto label; \
- } \
-} while (0)
-
-#define STR_SIZE 1024
-#define POWER_CONVERT_TO_DECIMAL 10
-
-#define POWER_GOVERNOR_USERSPACE "userspace"
-#define POWER_SYSFILE_GOVERNOR \
- "/sys/devices/system/cpu/cpu%u/cpufreq/scaling_governor"
-#define POWER_SYSFILE_AVAIL_FREQ \
- "/sys/devices/system/cpu/cpu%u/cpufreq/scaling_available_frequencies"
-#define POWER_SYSFILE_SETSPEED \
- "/sys/devices/system/cpu/cpu%u/cpufreq/scaling_setspeed"
-
-enum power_state {
- POWER_IDLE = 0,
- POWER_ONGOING,
- POWER_USED,
- POWER_UNKNOWN
-};
-
-/**
- * Power info per lcore.
- */
-struct rte_power_info {
- unsigned lcore_id; /**< Logical core id */
- uint32_t freqs[RTE_MAX_LCORE_FREQS]; /**< Frequency array */
- uint32_t nb_freqs; /**< number of available freqs */
- FILE *f; /**< FD of scaling_setspeed */
- char governor_ori[32]; /**< Original governor name */
- uint32_t curr_idx; /**< Freq index in freqs array */
- volatile uint32_t state; /**< Power in use state */
-} __rte_cache_aligned;
-
-static struct rte_power_info lcore_power_info[RTE_MAX_LCORE];
-
-/**
- * It is to set specific freq for specific logical core, according to the index
- * of supported frequencies.
- */
-static int
-set_freq_internal(struct rte_power_info *pi, uint32_t idx)
-{
- if (idx >= RTE_MAX_LCORE_FREQS || idx >= pi->nb_freqs) {
- RTE_LOG(ERR, POWER, "Invalid frequency index %u, which "
- "should be less than %u\n", idx, pi->nb_freqs);
- return -1;
- }
-
- /* Check if it is the same as current */
- if (idx == pi->curr_idx)
- return 0;
-
- POWER_DEBUG_TRACE("Freqency[%u] %u to be set for lcore %u\n",
- idx, pi->freqs[idx], pi->lcore_id);
- if (fseek(pi->f, 0, SEEK_SET) < 0) {
- RTE_LOG(ERR, POWER, "Fail to set file position indicator to 0 "
- "for setting frequency for lcore %u\n", pi->lcore_id);
- return -1;
- }
- if (fprintf(pi->f, "%u", pi->freqs[idx]) < 0) {
- RTE_LOG(ERR, POWER, "Fail to write new frequency for "
- "lcore %u\n", pi->lcore_id);
- return -1;
- }
- fflush(pi->f);
- pi->curr_idx = idx;
-
- return 1;
-}
-
-/**
- * It is to check the current scaling governor by reading sys file, and then
- * set it into 'userspace' if it is not by writing the sys file. The original
- * governor will be saved for rolling back.
- */
-static int
-power_set_governor_userspace(struct rte_power_info *pi)
-{
- FILE *f;
- int ret = -1;
- char buf[BUFSIZ];
- char fullpath[PATH_MAX];
- char *s;
- int val;
-
- snprintf(fullpath, sizeof(fullpath), POWER_SYSFILE_GOVERNOR,
- pi->lcore_id);
- f = fopen(fullpath, "rw+");
- FOPEN_OR_ERR_RET(f, ret);
-
- s = fgets(buf, sizeof(buf), f);
- FOPS_OR_NULL_GOTO(s, out);
-
- /* Check if current governor is userspace */
- if (strncmp(buf, POWER_GOVERNOR_USERSPACE,
- sizeof(POWER_GOVERNOR_USERSPACE)) == 0) {
- ret = 0;
- POWER_DEBUG_TRACE("Power management governor of lcore %u is "
- "already userspace\n", pi->lcore_id);
- goto out;
- }
- /* Save the original governor */
- snprintf(pi->governor_ori, sizeof(pi->governor_ori), "%s", buf);
-
- /* Write 'userspace' to the governor */
- val = fseek(f, 0, SEEK_SET);
- FOPS_OR_ERR_GOTO(val, out);
-
- val = fputs(POWER_GOVERNOR_USERSPACE, f);
- FOPS_OR_ERR_GOTO(val, out);
-
- ret = 0;
- RTE_LOG(INFO, POWER, "Power management governor of lcore %u has been "
- "set to user space successfully\n", pi->lcore_id);
-out:
- fclose(f);
-
- return ret;
-}
-
-/**
- * It is to get the available frequencies of the specific lcore by reading the
- * sys file.
- */
-static int
-power_get_available_freqs(struct rte_power_info *pi)
-{
- FILE *f;
- int ret = -1, i, count;
- char *p;
- char buf[BUFSIZ];
- char fullpath[PATH_MAX];
- char *freqs[RTE_MAX_LCORE_FREQS];
- char *s;
-
- snprintf(fullpath, sizeof(fullpath), POWER_SYSFILE_AVAIL_FREQ,
- pi->lcore_id);
- f = fopen(fullpath, "r");
- FOPEN_OR_ERR_RET(f, ret);
-
- s = fgets(buf, sizeof(buf), f);
- FOPS_OR_NULL_GOTO(s, out);
-
- /* Strip the line break if there is */
- p = strchr(buf, '\n');
- if (p != NULL)
- *p = 0;
-
- /* Split string into at most RTE_MAX_LCORE_FREQS frequencies */
- count = rte_strsplit(buf, sizeof(buf), freqs,
- RTE_MAX_LCORE_FREQS, ' ');
- if (count <= 0) {
- RTE_LOG(ERR, POWER, "No available frequency in "
- ""POWER_SYSFILE_AVAIL_FREQ"\n", pi->lcore_id);
- goto out;
- }
- if (count >= RTE_MAX_LCORE_FREQS) {
- RTE_LOG(ERR, POWER, "Too many available frequencies : %d\n",
- count);
- goto out;
- }
-
- /* Store the available frequncies into power context */
- for (i = 0, pi->nb_freqs = 0; i < count; i++) {
- POWER_DEBUG_TRACE("Lcore %u frequency[%d]: %s\n", pi->lcore_id,
- i, freqs[i]);
- pi->freqs[pi->nb_freqs++] = strtoul(freqs[i], &p,
- POWER_CONVERT_TO_DECIMAL);
- }
-
- ret = 0;
- POWER_DEBUG_TRACE("%d frequencie(s) of lcore %u are available\n",
- count, pi->lcore_id);
-out:
- fclose(f);
-
- return ret;
-}
-
-/**
- * It is to fopen the sys file for the future setting the lcore frequency.
- */
-static int
-power_init_for_setting_freq(struct rte_power_info *pi)
-{
- FILE *f;
- char fullpath[PATH_MAX];
- char buf[BUFSIZ];
- uint32_t i, freq;
- char *s;
-
- snprintf(fullpath, sizeof(fullpath), POWER_SYSFILE_SETSPEED,
- pi->lcore_id);
- f = fopen(fullpath, "rw+");
- FOPEN_OR_ERR_RET(f, -1);
-
- s = fgets(buf, sizeof(buf), f);
- FOPS_OR_NULL_GOTO(s, out);
-
- freq = strtoul(buf, NULL, POWER_CONVERT_TO_DECIMAL);
- for (i = 0; i < pi->nb_freqs; i++) {
- if (freq == pi->freqs[i]) {
- pi->curr_idx = i;
- pi->f = f;
- return 0;
- }
- }
-
-out:
- fclose(f);
-
- return -1;
-}
-
-int
-rte_power_acpi_cpufreq_init(unsigned lcore_id)
-{
- struct rte_power_info *pi;
-
- if (lcore_id >= RTE_MAX_LCORE) {
- RTE_LOG(ERR, POWER, "Lcore id %u can not exceeds %u\n",
- lcore_id, RTE_MAX_LCORE - 1U);
- return -1;
- }
-
- pi = &lcore_power_info[lcore_id];
- if (rte_atomic32_cmpset(&(pi->state), POWER_IDLE, POWER_ONGOING)
- == 0) {
- RTE_LOG(INFO, POWER, "Power management of lcore %u is "
- "in use\n", lcore_id);
- return -1;
- }
-
- pi->lcore_id = lcore_id;
- /* Check and set the governor */
- if (power_set_governor_userspace(pi) < 0) {
- RTE_LOG(ERR, POWER, "Cannot set governor of lcore %u to "
- "userspace\n", lcore_id);
- goto fail;
- }
-
- /* Get the available frequencies */
- if (power_get_available_freqs(pi) < 0) {
- RTE_LOG(ERR, POWER, "Cannot get available frequencies of "
- "lcore %u\n", lcore_id);
- goto fail;
- }
-
- /* Init for setting lcore frequency */
- if (power_init_for_setting_freq(pi) < 0) {
- RTE_LOG(ERR, POWER, "Cannot init for setting frequency for "
- "lcore %u\n", lcore_id);
- goto fail;
- }
-
- /* Set freq to max by default */
- if (rte_power_acpi_cpufreq_freq_max(lcore_id) < 0) {
- RTE_LOG(ERR, POWER, "Cannot set frequency of lcore %u "
- "to max\n", lcore_id);
- goto fail;
- }
-
- RTE_LOG(INFO, POWER, "Initialized successfully for lcore %u "
- "power manamgement\n", lcore_id);
- rte_atomic32_cmpset(&(pi->state), POWER_ONGOING, POWER_USED);
-
- return 0;
-
-fail:
- rte_atomic32_cmpset(&(pi->state), POWER_ONGOING, POWER_UNKNOWN);
-
- return -1;
-}
-
-/**
- * It is to check the governor and then set the original governor back if
- * needed by writing the the sys file.
- */
-static int
-power_set_governor_original(struct rte_power_info *pi)
-{
- FILE *f;
- int ret = -1;
- char buf[BUFSIZ];
- char fullpath[PATH_MAX];
- char *s;
- int val;
-
- snprintf(fullpath, sizeof(fullpath), POWER_SYSFILE_GOVERNOR,
- pi->lcore_id);
- f = fopen(fullpath, "rw+");
- FOPEN_OR_ERR_RET(f, ret);
-
- s = fgets(buf, sizeof(buf), f);
- FOPS_OR_NULL_GOTO(s, out);
-
- /* Check if the governor to be set is the same as current */
- if (strncmp(buf, pi->governor_ori, sizeof(pi->governor_ori)) == 0) {
- ret = 0;
- POWER_DEBUG_TRACE("Power management governor of lcore %u "
- "has already been set to %s\n",
- pi->lcore_id, pi->governor_ori);
- goto out;
- }
-
- /* Write back the original governor */
- val = fseek(f, 0, SEEK_SET);
- FOPS_OR_ERR_GOTO(val, out);
-
- val = fputs(pi->governor_ori, f);
- FOPS_OR_ERR_GOTO(val, out);
-
- ret = 0;
- RTE_LOG(INFO, POWER, "Power management governor of lcore %u "
- "has been set back to %s successfully\n",
- pi->lcore_id, pi->governor_ori);
-out:
- fclose(f);
-
- return ret;
-}
-
-int
-rte_power_acpi_cpufreq_exit(unsigned lcore_id)
-{
- struct rte_power_info *pi;
-
- if (lcore_id >= RTE_MAX_LCORE) {
- RTE_LOG(ERR, POWER, "Lcore id %u can not exceeds %u\n",
- lcore_id, RTE_MAX_LCORE - 1U);
- return -1;
- }
- pi = &lcore_power_info[lcore_id];
- if (rte_atomic32_cmpset(&(pi->state), POWER_USED, POWER_ONGOING)
- == 0) {
- RTE_LOG(INFO, POWER, "Power management of lcore %u is "
- "not used\n", lcore_id);
- return -1;
- }
-
- /* Close FD of setting freq */
- fclose(pi->f);
- pi->f = NULL;
-
- /* Set the governor back to the original */
- if (power_set_governor_original(pi) < 0) {
- RTE_LOG(ERR, POWER, "Cannot set the governor of %u back "
- "to the original\n", lcore_id);
- goto fail;
- }
-
- RTE_LOG(INFO, POWER, "Power management of lcore %u has exited from "
- "'userspace' mode and been set back to the "
- "original\n", lcore_id);
- rte_atomic32_cmpset(&(pi->state), POWER_ONGOING, POWER_IDLE);
-
- return 0;
-
-fail:
- rte_atomic32_cmpset(&(pi->state), POWER_ONGOING, POWER_UNKNOWN);
-
- return -1;
-}
-
-uint32_t
-rte_power_acpi_cpufreq_freqs(unsigned lcore_id, uint32_t *freqs, uint32_t num)
-{
- struct rte_power_info *pi;
-
- if (lcore_id >= RTE_MAX_LCORE || !freqs) {
- RTE_LOG(ERR, POWER, "Invalid input parameter\n");
- return 0;
- }
-
- pi = &lcore_power_info[lcore_id];
- if (num < pi->nb_freqs) {
- RTE_LOG(ERR, POWER, "Buffer size is not enough\n");
- return 0;
- }
- rte_memcpy(freqs, pi->freqs, pi->nb_freqs * sizeof(uint32_t));
-
- return pi->nb_freqs;
-}
-
-uint32_t
-rte_power_acpi_cpufreq_get_freq(unsigned lcore_id)
-{
- if (lcore_id >= RTE_MAX_LCORE) {
- RTE_LOG(ERR, POWER, "Invalid lcore ID\n");
- return RTE_POWER_INVALID_FREQ_INDEX;
- }
-
- return lcore_power_info[lcore_id].curr_idx;
-}
-
-int
-rte_power_acpi_cpufreq_set_freq(unsigned lcore_id, uint32_t index)
-{
- if (lcore_id >= RTE_MAX_LCORE) {
- RTE_LOG(ERR, POWER, "Invalid lcore ID\n");
- return -1;
- }
-
- return set_freq_internal(&(lcore_power_info[lcore_id]), index);
-}
-
-int
-rte_power_acpi_cpufreq_freq_down(unsigned lcore_id)
-{
- struct rte_power_info *pi;
-
- if (lcore_id >= RTE_MAX_LCORE) {
- RTE_LOG(ERR, POWER, "Invalid lcore ID\n");
- return -1;
- }
-
- pi = &lcore_power_info[lcore_id];
- if (pi->curr_idx + 1 == pi->nb_freqs)
- return 0;
-
- /* Frequencies in the array are from high to low. */
- return set_freq_internal(pi, pi->curr_idx + 1);
-}
-
-int
-rte_power_acpi_cpufreq_freq_up(unsigned lcore_id)
-{
- struct rte_power_info *pi;
-
- if (lcore_id >= RTE_MAX_LCORE) {
- RTE_LOG(ERR, POWER, "Invalid lcore ID\n");
- return -1;
- }
-
- pi = &lcore_power_info[lcore_id];
- if (pi->curr_idx == 0)
- return 0;
-
- /* Frequencies in the array are from high to low. */
- return set_freq_internal(pi, pi->curr_idx - 1);
-}
-
-int
-rte_power_acpi_cpufreq_freq_max(unsigned lcore_id)
-{
- if (lcore_id >= RTE_MAX_LCORE) {
- RTE_LOG(ERR, POWER, "Invalid lcore ID\n");
- return -1;
- }
-
- /* Frequencies in the array are from high to low. */
- return set_freq_internal(&lcore_power_info[lcore_id], 0);
-}
-
-int
-rte_power_acpi_cpufreq_freq_min(unsigned lcore_id)
-{
- struct rte_power_info *pi;
-
- if (lcore_id >= RTE_MAX_LCORE) {
- RTE_LOG(ERR, POWER, "Invalid lcore ID\n");
- return -1;
- }
-
- pi = &lcore_power_info[lcore_id];
-
- /* Frequencies in the array are from high to low. */
- return set_freq_internal(pi, pi->nb_freqs - 1);
-}
diff --git a/src/dpdk_lib18/librte_power/rte_power_acpi_cpufreq.h b/src/dpdk_lib18/librte_power/rte_power_acpi_cpufreq.h
deleted file mode 100755
index 68578e9b..00000000
--- a/src/dpdk_lib18/librte_power/rte_power_acpi_cpufreq.h
+++ /dev/null
@@ -1,192 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_POWER_ACPI_CPUFREQ_H
-#define _RTE_POWER_ACPI_CPUFREQ_H
-
-/**
- * @file
- * RTE Power Management via userspace ACPI cpufreq
- */
-
-#include <rte_common.h>
-#include <rte_byteorder.h>
-#include <rte_log.h>
-#include <rte_string_fns.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
- * Initialize power management for a specific lcore. It will check and set the
- * governor to userspace for the lcore, get the available frequencies, and
- * prepare to set new lcore frequency.
- *
- * @param lcore_id
- * lcore id.
- *
- * @return
- * - 0 on success.
- * - Negative on error.
- */
-int rte_power_acpi_cpufreq_init(unsigned lcore_id);
-
-/**
- * Exit power management on a specific lcore. It will set the governor to which
- * is before initialized.
- *
- * @param lcore_id
- * lcore id.
- *
- * @return
- * - 0 on success.
- * - Negative on error.
- */
-int rte_power_acpi_cpufreq_exit(unsigned lcore_id);
-
-/**
- * Get the available frequencies of a specific lcore. The return value will be
- * the minimal one of the total number of available frequencies and the number
- * of buffer. The index of available frequencies used in other interfaces
- * should be in the range of 0 to this return value.
- * It should be protected outside of this function for threadsafe.
- *
- * @param lcore_id
- * lcore id.
- * @param freqs
- * The buffer array to save the frequencies.
- * @param num
- * The number of frequencies to get.
- *
- * @return
- * The number of available frequencies.
- */
-uint32_t rte_power_acpi_cpufreq_freqs(unsigned lcore_id, uint32_t *freqs,
- uint32_t num);
-
-/**
- * Return the current index of available frequencies of a specific lcore. It
- * will return 'RTE_POWER_INVALID_FREQ_INDEX = (~0)' if error.
- * It should be protected outside of this function for threadsafe.
- *
- * @param lcore_id
- * lcore id.
- *
- * @return
- * The current index of available frequencies.
- */
-uint32_t rte_power_acpi_cpufreq_get_freq(unsigned lcore_id);
-
-/**
- * Set the new frequency for a specific lcore by indicating the index of
- * available frequencies.
- * It should be protected outside of this function for threadsafe.
- *
- * @param lcore_id
- * lcore id.
- * @param index
- * The index of available frequencies.
- *
- * @return
- * - 1 on success with frequency changed.
- * - 0 on success without frequency changed.
- * - Negative on error.
- */
-int rte_power_acpi_cpufreq_set_freq(unsigned lcore_id, uint32_t index);
-
-/**
- * Scale up the frequency of a specific lcore according to the available
- * frequencies.
- * It should be protected outside of this function for threadsafe.
- *
- * @param lcore_id
- * lcore id.
- *
- * @return
- * - 1 on success with frequency changed.
- * - 0 on success without frequency changed.
- * - Negative on error.
- */
-int rte_power_acpi_cpufreq_freq_up(unsigned lcore_id);
-
-/**
- * Scale down the frequency of a specific lcore according to the available
- * frequencies.
- * It should be protected outside of this function for threadsafe.
- *
- * @param lcore_id
- * lcore id.
- *
- * @return
- * - 1 on success with frequency changed.
- * - 0 on success without frequency changed.
- * - Negative on error.
- */
-int rte_power_acpi_cpufreq_freq_down(unsigned lcore_id);
-
-/**
- * Scale up the frequency of a specific lcore to the highest according to the
- * available frequencies.
- * It should be protected outside of this function for threadsafe.
- *
- * @param lcore_id
- * lcore id.
- *
- * @return
- * - 1 on success with frequency changed.
- * - 0 on success without frequency changed.
- * - Negative on error.
- */
-int rte_power_acpi_cpufreq_freq_max(unsigned lcore_id);
-
-/**
- * Scale down the frequency of a specific lcore to the lowest according to the
- * available frequencies.
- * It should be protected outside of this function for threadsafe.
- *
- * @param lcore_id
- * lcore id.
- *
- * @return
- * - 1 on success with frequency changed.
- * - 0 on success without frequency chnaged.
- * - Negative on error.
- */
-int rte_power_acpi_cpufreq_freq_min(unsigned lcore_id);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/src/dpdk_lib18/librte_power/rte_power_kvm_vm.c b/src/dpdk_lib18/librte_power/rte_power_kvm_vm.c
deleted file mode 100755
index 11596c39..00000000
--- a/src/dpdk_lib18/librte_power/rte_power_kvm_vm.c
+++ /dev/null
@@ -1,136 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#include <errno.h>
-#include <string.h>
-
-#include <rte_log.h>
-#include <rte_config.h>
-
-#include "guest_channel.h"
-#include "channel_commands.h"
-#include "rte_power_kvm_vm.h"
-#include "rte_power_common.h"
-
-#define FD_PATH "/dev/virtio-ports/virtio.serial.port.poweragent"
-
-static struct channel_packet pkt[CHANNEL_CMDS_MAX_VM_CHANNELS];
-
-
-int
-rte_power_kvm_vm_init(unsigned lcore_id)
-{
- if (lcore_id >= CHANNEL_CMDS_MAX_VM_CHANNELS) {
- RTE_LOG(ERR, POWER, "Core(%u) is out of range 0...%d\n",
- lcore_id, CHANNEL_CMDS_MAX_VM_CHANNELS-1);
- return -1;
- }
- pkt[lcore_id].command = CPU_POWER;
- pkt[lcore_id].resource_id = lcore_id;
- return guest_channel_host_connect(FD_PATH, lcore_id);
-}
-
-int
-rte_power_kvm_vm_exit(unsigned lcore_id)
-{
- guest_channel_host_disconnect(lcore_id);
- return 0;
-}
-
-uint32_t
-rte_power_kvm_vm_freqs(__attribute__((unused)) unsigned lcore_id,
- __attribute__((unused)) uint32_t *freqs,
- __attribute__((unused)) uint32_t num)
-{
- RTE_LOG(ERR, POWER, "rte_power_freqs is not implemented "
- "for Virtual Machine Power Management\n");
- return -ENOTSUP;
-}
-
-uint32_t
-rte_power_kvm_vm_get_freq(__attribute__((unused)) unsigned lcore_id)
-{
- RTE_LOG(ERR, POWER, "rte_power_get_freq is not implemented "
- "for Virtual Machine Power Management\n");
- return -ENOTSUP;
-}
-
-int
-rte_power_kvm_vm_set_freq(__attribute__((unused)) unsigned lcore_id,
- __attribute__((unused)) uint32_t index)
-{
- RTE_LOG(ERR, POWER, "rte_power_set_freq is not implemented "
- "for Virtual Machine Power Management\n");
- return -ENOTSUP;
-}
-
-static inline int
-send_msg(unsigned lcore_id, uint32_t scale_direction)
-{
- int ret;
-
- if (lcore_id >= CHANNEL_CMDS_MAX_VM_CHANNELS) {
- RTE_LOG(ERR, POWER, "Core(%u) is out of range 0...%d\n",
- lcore_id, CHANNEL_CMDS_MAX_VM_CHANNELS-1);
- return -1;
- }
- pkt[lcore_id].unit = scale_direction;
- ret = guest_channel_send_msg(&pkt[lcore_id], lcore_id);
- if (ret == 0)
- return 1;
- RTE_LOG(DEBUG, POWER, "Error sending message: %s\n", strerror(ret));
- return -1;
-}
-
-int
-rte_power_kvm_vm_freq_up(unsigned lcore_id)
-{
- return send_msg(lcore_id, CPU_POWER_SCALE_UP);
-}
-
-int
-rte_power_kvm_vm_freq_down(unsigned lcore_id)
-{
- return send_msg(lcore_id, CPU_POWER_SCALE_DOWN);
-}
-
-int
-rte_power_kvm_vm_freq_max(unsigned lcore_id)
-{
- return send_msg(lcore_id, CPU_POWER_SCALE_MAX);
-}
-
-int
-rte_power_kvm_vm_freq_min(unsigned lcore_id)
-{
- return send_msg(lcore_id, CPU_POWER_SCALE_MIN);
-}
diff --git a/src/dpdk_lib18/librte_power/rte_power_kvm_vm.h b/src/dpdk_lib18/librte_power/rte_power_kvm_vm.h
deleted file mode 100755
index dcbc878a..00000000
--- a/src/dpdk_lib18/librte_power/rte_power_kvm_vm.h
+++ /dev/null
@@ -1,179 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_POWER_KVM_VM_H
-#define _RTE_POWER_KVM_VM_H
-
-/**
- * @file
- * RTE Power Management KVM VM
- */
-
-#include <rte_common.h>
-#include <rte_byteorder.h>
-#include <rte_log.h>
-#include <rte_string_fns.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
- * Initialize power management for a specific lcore.
- *
- * @param lcore_id
- * lcore id.
- *
- * @return
- * - 0 on success.
- * - Negative on error.
- */
-int rte_power_kvm_vm_init(unsigned lcore_id);
-
-/**
- * Exit power management on a specific lcore.
- *
- * @param lcore_id
- * lcore id.
- *
- * @return
- * - 0 on success.
- * - Negative on error.
- */
-int rte_power_kvm_vm_exit(unsigned lcore_id);
-
-/**
- * Get the available frequencies of a specific lcore.
- * It is not currently supported for VM Power Management.
- *
- * @param lcore_id
- * lcore id.
- * @param freqs
- * The buffer array to save the frequencies.
- * @param num
- * The number of frequencies to get.
- *
- * @return
- * -ENOTSUP
- */
-uint32_t rte_power_kvm_vm_freqs(unsigned lcore_id, uint32_t *freqs,
- uint32_t num);
-
-/**
- * Return the current index of available frequencies of a specific lcore.
- * It is not currently supported for VM Power Management.
- *
- * @param lcore_id
- * lcore id.
- *
- * @return
- * -ENOTSUP
- */
-uint32_t rte_power_kvm_vm_get_freq(unsigned lcore_id);
-
-/**
- * Set the new frequency for a specific lcore by indicating the index of
- * available frequencies.
- * It is not currently supported for VM Power Management.
- *
- * @param lcore_id
- * lcore id.
- * @param index
- * The index of available frequencies.
- *
- * @return
- * -ENOTSUP
- */
-int rte_power_kvm_vm_set_freq(unsigned lcore_id, uint32_t index);
-
-/**
- * Scale up the frequency of a specific lcore. This request is forwarded to the
- * host monitor.
- * It should be protected outside of this function for threadsafe.
- *
- * @param lcore_id
- * lcore id.
- *
- * @return
- * - 1 on success.
- * - Negative on error.
- */
-int rte_power_kvm_vm_freq_up(unsigned lcore_id);
-
-/**
- * Scale down the frequency of a specific lcore according to the available
- * frequencies.
- * It should be protected outside of this function for threadsafe.
- *
- * @param lcore_id
- * lcore id.
- *
- * @return
- * - 1 on success.
- * - Negative on error.
- */
-int rte_power_kvm_vm_freq_down(unsigned lcore_id);
-
-/**
- * Scale up the frequency of a specific lcore to the highest according to the
- * available frequencies.
- * It should be protected outside of this function for threadsafe.
- *
- * @param lcore_id
- * lcore id.
- *
- * @return
- * - 1 on success.
- * - Negative on error.
- */
-int rte_power_kvm_vm_freq_max(unsigned lcore_id);
-
-/**
- * Scale down the frequency of a specific lcore to the lowest according to the
- * available frequencies.
- * It should be protected outside of this function for threadsafe.
- *
- * @param lcore_id
- * lcore id.
- *
- * @return
- * - 1 on success.
- * - Negative on error.
- */
-int rte_power_kvm_vm_freq_min(unsigned lcore_id);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/src/dpdk_lib18/librte_ring/Makefile b/src/dpdk_lib18/librte_ring/Makefile
deleted file mode 100755
index 2380a43c..00000000
--- a/src/dpdk_lib18/librte_ring/Makefile
+++ /dev/null
@@ -1,48 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_ring.a
-
-CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
-
-# all source are stored in SRCS-y
-SRCS-$(CONFIG_RTE_LIBRTE_RING) := rte_ring.c
-
-# install includes
-SYMLINK-$(CONFIG_RTE_LIBRTE_RING)-include := rte_ring.h
-
-# this lib needs eal and rte_malloc
-DEPDIRS-$(CONFIG_RTE_LIBRTE_RING) += lib/librte_eal lib/librte_malloc
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_sched/Makefile b/src/dpdk_lib18/librte_sched/Makefile
deleted file mode 100755
index 1a25b211..00000000
--- a/src/dpdk_lib18/librte_sched/Makefile
+++ /dev/null
@@ -1,56 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-#
-# library name
-#
-LIB = librte_sched.a
-
-CFLAGS += -O3
-CFLAGS += $(WERROR_FLAGS)
-
-CFLAGS_rte_red.o := -D_GNU_SOURCE
-
-#
-# all source are stored in SRCS-y
-#
-SRCS-$(CONFIG_RTE_LIBRTE_SCHED) += rte_sched.c rte_red.c rte_approx.c
-
-# install includes
-SYMLINK-$(CONFIG_RTE_LIBRTE_SCHED)-include := rte_sched.h rte_bitmap.h rte_sched_common.h rte_red.h rte_approx.h
-
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_SCHED) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_SCHED) += lib/librte_net lib/librte_timer
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_sched/rte_approx.c b/src/dpdk_lib18/librte_sched/rte_approx.c
deleted file mode 100755
index 771c9518..00000000
--- a/src/dpdk_lib18/librte_sched/rte_approx.c
+++ /dev/null
@@ -1,196 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdlib.h>
-
-#include "rte_approx.h"
-
-/*
- * Based on paper "Approximating Rational Numbers by Fractions" by Michal
- * Forisek forisek@dcs.fmph.uniba.sk
- *
- * Given a rational number alpha with 0 < alpha < 1 and a precision d, the goal
- * is to find positive integers p, q such that alpha - d < p/q < alpha + d, and
- * q is minimal.
- *
- * http://people.ksp.sk/~misof/publications/2007approx.pdf
- */
-
-/* fraction comparison: compare (a/b) and (c/d) */
-static inline uint32_t
-less(uint32_t a, uint32_t b, uint32_t c, uint32_t d)
-{
- return (a*d < b*c);
-}
-
-static inline uint32_t
-less_or_equal(uint32_t a, uint32_t b, uint32_t c, uint32_t d)
-{
- return (a*d <= b*c);
-}
-
-/* check whether a/b is a valid approximation */
-static inline uint32_t
-matches(uint32_t a, uint32_t b,
- uint32_t alpha_num, uint32_t d_num, uint32_t denum)
-{
- if (less_or_equal(a, b, alpha_num - d_num, denum))
- return 0;
-
- if (less(a ,b, alpha_num + d_num, denum))
- return 1;
-
- return 0;
-}
-
-static inline void
-find_exact_solution_left(uint32_t p_a, uint32_t q_a, uint32_t p_b, uint32_t q_b,
- uint32_t alpha_num, uint32_t d_num, uint32_t denum, uint32_t *p, uint32_t *q)
-{
- uint32_t k_num = denum * p_b - (alpha_num + d_num) * q_b;
- uint32_t k_denum = (alpha_num + d_num) * q_a - denum * p_a;
- uint32_t k = (k_num / k_denum) + 1;
-
- *p = p_b + k * p_a;
- *q = q_b + k * q_a;
-}
-
-static inline void
-find_exact_solution_right(uint32_t p_a, uint32_t q_a, uint32_t p_b, uint32_t q_b,
- uint32_t alpha_num, uint32_t d_num, uint32_t denum, uint32_t *p, uint32_t *q)
-{
- uint32_t k_num = - denum * p_b + (alpha_num - d_num) * q_b;
- uint32_t k_denum = - (alpha_num - d_num) * q_a + denum * p_a;
- uint32_t k = (k_num / k_denum) + 1;
-
- *p = p_b + k * p_a;
- *q = q_b + k * q_a;
-}
-
-static int
-find_best_rational_approximation(uint32_t alpha_num, uint32_t d_num, uint32_t denum, uint32_t *p, uint32_t *q)
-{
- uint32_t p_a, q_a, p_b, q_b;
-
- /* check assumptions on the inputs */
- if (!((0 < d_num) && (d_num < alpha_num) && (alpha_num < denum) && (d_num + alpha_num < denum))) {
- return -1;
- }
-
- /* set initial bounds for the search */
- p_a = 0;
- q_a = 1;
- p_b = 1;
- q_b = 1;
-
- while (1) {
- uint32_t new_p_a, new_q_a, new_p_b, new_q_b;
- uint32_t x_num, x_denum, x;
- int aa, bb;
-
- /* compute the number of steps to the left */
- x_num = denum * p_b - alpha_num * q_b;
- x_denum = - denum * p_a + alpha_num * q_a;
- x = (x_num + x_denum - 1) / x_denum; /* x = ceil(x_num / x_denum) */
-
- /* check whether we have a valid approximation */
- aa = matches(p_b + x * p_a, q_b + x * q_a, alpha_num, d_num, denum);
- bb = matches(p_b + (x-1) * p_a, q_b + (x - 1) * q_a, alpha_num, d_num, denum);
- if (aa || bb) {
- find_exact_solution_left(p_a, q_a, p_b, q_b, alpha_num, d_num, denum, p, q);
- return 0;
- }
-
- /* update the interval */
- new_p_a = p_b + (x - 1) * p_a ;
- new_q_a = q_b + (x - 1) * q_a;
- new_p_b = p_b + x * p_a ;
- new_q_b = q_b + x * q_a;
-
- p_a = new_p_a ;
- q_a = new_q_a;
- p_b = new_p_b ;
- q_b = new_q_b;
-
- /* compute the number of steps to the right */
- x_num = alpha_num * q_b - denum * p_b;
- x_denum = - alpha_num * q_a + denum * p_a;
- x = (x_num + x_denum - 1) / x_denum; /* x = ceil(x_num / x_denum) */
-
- /* check whether we have a valid approximation */
- aa = matches(p_b + x * p_a, q_b + x * q_a, alpha_num, d_num, denum);
- bb = matches(p_b + (x - 1) * p_a, q_b + (x - 1) * q_a, alpha_num, d_num, denum);
- if (aa || bb) {
- find_exact_solution_right(p_a, q_a, p_b, q_b, alpha_num, d_num, denum, p, q);
- return 0;
- }
-
- /* update the interval */
- new_p_a = p_b + (x - 1) * p_a;
- new_q_a = q_b + (x - 1) * q_a;
- new_p_b = p_b + x * p_a;
- new_q_b = q_b + x * q_a;
-
- p_a = new_p_a;
- q_a = new_q_a;
- p_b = new_p_b;
- q_b = new_q_b;
- }
-}
-
-int rte_approx(double alpha, double d, uint32_t *p, uint32_t *q)
-{
- uint32_t alpha_num, d_num, denum;
-
- /* Check input arguments */
- if (!((0.0 < d) && (d < alpha) && (alpha < 1.0))) {
- return -1;
- }
-
- if ((p == NULL) || (q == NULL)) {
- return -2;
- }
-
- /* Compute alpha_num, d_num and denum */
- denum = 1;
- while (d < 1) {
- alpha *= 10;
- d *= 10;
- denum *= 10;
- }
- alpha_num = (uint32_t) alpha;
- d_num = (uint32_t) d;
-
- /* Perform approximation */
- return find_best_rational_approximation(alpha_num, d_num, denum, p, q);
-}
diff --git a/src/dpdk_lib18/librte_sched/rte_bitmap.h b/src/dpdk_lib18/librte_sched/rte_bitmap.h
deleted file mode 100755
index 95f3c0d3..00000000
--- a/src/dpdk_lib18/librte_sched/rte_bitmap.h
+++ /dev/null
@@ -1,563 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __INCLUDE_RTE_BITMAP_H__
-#define __INCLUDE_RTE_BITMAP_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
- * @file
- * RTE Bitmap
- *
- * The bitmap component provides a mechanism to manage large arrays of bits
- * through bit get/set/clear and bit array scan operations.
- *
- * The bitmap scan operation is optimized for 64-bit CPUs using 64-byte cache
- * lines. The bitmap is hierarchically organized using two arrays (array1 and
- * array2), with each bit in array1 being associated with a full cache line
- * (512 bits) of bitmap bits, which are stored in array2: the bit in array1 is
- * set only when there is at least one bit set within its associated array2
- * bits, otherwise the bit in array1 is cleared. The read and write operations
- * for array1 and array2 are always done in slabs of 64 bits.
- *
- * This bitmap is not thread safe. For lock free operation on a specific bitmap
- * instance, a single writer thread performing bit set/clear operations is
- * allowed, only the writer thread can do bitmap scan operations, while there
- * can be several reader threads performing bit get operations in parallel with
- * the writer thread. When the use of locking primitives is acceptable, the
- * serialization of the bit set/clear and bitmap scan operations needs to be
- * enforced by the caller, while the bit get operation does not require locking
- * the bitmap.
- *
- ***/
-
-#include <rte_common.h>
-#include <rte_debug.h>
-#include <rte_memory.h>
-#include <rte_branch_prediction.h>
-#include <rte_prefetch.h>
-
-#ifndef RTE_BITMAP_OPTIMIZATIONS
-#define RTE_BITMAP_OPTIMIZATIONS 1
-#endif
-#if RTE_BITMAP_OPTIMIZATIONS
-#include <tmmintrin.h>
-#endif
-
-/* Slab */
-#define RTE_BITMAP_SLAB_BIT_SIZE 64
-#define RTE_BITMAP_SLAB_BIT_SIZE_LOG2 6
-#define RTE_BITMAP_SLAB_BIT_MASK (RTE_BITMAP_SLAB_BIT_SIZE - 1)
-
-/* Cache line (CL) */
-#define RTE_BITMAP_CL_BIT_SIZE (RTE_CACHE_LINE_SIZE * 8)
-#define RTE_BITMAP_CL_BIT_SIZE_LOG2 9
-#define RTE_BITMAP_CL_BIT_MASK (RTE_BITMAP_CL_BIT_SIZE - 1)
-
-#define RTE_BITMAP_CL_SLAB_SIZE (RTE_BITMAP_CL_BIT_SIZE / RTE_BITMAP_SLAB_BIT_SIZE)
-#define RTE_BITMAP_CL_SLAB_SIZE_LOG2 3
-#define RTE_BITMAP_CL_SLAB_MASK (RTE_BITMAP_CL_SLAB_SIZE - 1)
-
-/** Bitmap data structure */
-struct rte_bitmap {
- /* Context for array1 and array2 */
- uint64_t *array1; /**< Bitmap array1 */
- uint64_t *array2; /**< Bitmap array2 */
- uint32_t array1_size; /**< Number of 64-bit slabs in array1 that are actually used */
- uint32_t array2_size; /**< Number of 64-bit slabs in array2 */
-
- /* Context for the "scan next" operation */
- uint32_t index1; /**< Bitmap scan: Index of current array1 slab */
- uint32_t offset1; /**< Bitmap scan: Offset of current bit within current array1 slab */
- uint32_t index2; /**< Bitmap scan: Index of current array2 slab */
- uint32_t go2; /**< Bitmap scan: Go/stop condition for current array2 cache line */
-
- /* Storage space for array1 and array2 */
- uint8_t memory[0];
-};
-
-static inline void
-__rte_bitmap_index1_inc(struct rte_bitmap *bmp)
-{
- bmp->index1 = (bmp->index1 + 1) & (bmp->array1_size - 1);
-}
-
-static inline uint64_t
-__rte_bitmap_mask1_get(struct rte_bitmap *bmp)
-{
- return ((~1lu) << bmp->offset1);
-}
-
-static inline void
-__rte_bitmap_index2_set(struct rte_bitmap *bmp)
-{
- bmp->index2 = (((bmp->index1 << RTE_BITMAP_SLAB_BIT_SIZE_LOG2) + bmp->offset1) << RTE_BITMAP_CL_SLAB_SIZE_LOG2);
-}
-
-#if RTE_BITMAP_OPTIMIZATIONS
-
-static inline int
-rte_bsf64(uint64_t slab, uint32_t *pos)
-{
- if (likely(slab == 0)) {
- return 0;
- }
-
- *pos = __builtin_ctzll(slab);
- return 1;
-}
-
-#else
-
-static inline int
-rte_bsf64(uint64_t slab, uint32_t *pos)
-{
- uint64_t mask;
- uint32_t i;
-
- if (likely(slab == 0)) {
- return 0;
- }
-
- for (i = 0, mask = 1; i < RTE_BITMAP_SLAB_BIT_SIZE; i ++, mask <<= 1) {
- if (unlikely(slab & mask)) {
- *pos = i;
- return 1;
- }
- }
-
- return 0;
-}
-
-#endif
-
-static inline uint32_t
-__rte_bitmap_get_memory_footprint(uint32_t n_bits,
- uint32_t *array1_byte_offset, uint32_t *array1_slabs,
- uint32_t *array2_byte_offset, uint32_t *array2_slabs)
-{
- uint32_t n_slabs_context, n_slabs_array1, n_cache_lines_context_and_array1;
- uint32_t n_cache_lines_array2;
- uint32_t n_bytes_total;
-
- n_cache_lines_array2 = (n_bits + RTE_BITMAP_CL_BIT_SIZE - 1) / RTE_BITMAP_CL_BIT_SIZE;
- n_slabs_array1 = (n_cache_lines_array2 + RTE_BITMAP_SLAB_BIT_SIZE - 1) / RTE_BITMAP_SLAB_BIT_SIZE;
- n_slabs_array1 = rte_align32pow2(n_slabs_array1);
- n_slabs_context = (sizeof(struct rte_bitmap) + (RTE_BITMAP_SLAB_BIT_SIZE / 8) - 1) / (RTE_BITMAP_SLAB_BIT_SIZE / 8);
- n_cache_lines_context_and_array1 = (n_slabs_context + n_slabs_array1 + RTE_BITMAP_CL_SLAB_SIZE - 1) / RTE_BITMAP_CL_SLAB_SIZE;
- n_bytes_total = (n_cache_lines_context_and_array1 + n_cache_lines_array2) * RTE_CACHE_LINE_SIZE;
-
- if (array1_byte_offset) {
- *array1_byte_offset = n_slabs_context * (RTE_BITMAP_SLAB_BIT_SIZE / 8);
- }
- if (array1_slabs) {
- *array1_slabs = n_slabs_array1;
- }
- if (array2_byte_offset) {
- *array2_byte_offset = n_cache_lines_context_and_array1 * RTE_CACHE_LINE_SIZE;
- }
- if (array2_slabs) {
- *array2_slabs = n_cache_lines_array2 * RTE_BITMAP_CL_SLAB_SIZE;
- }
-
- return n_bytes_total;
-}
-
-static inline void
-__rte_bitmap_scan_init(struct rte_bitmap *bmp)
-{
- bmp->index1 = bmp->array1_size - 1;
- bmp->offset1 = RTE_BITMAP_SLAB_BIT_SIZE - 1;
- __rte_bitmap_index2_set(bmp);
- bmp->index2 += RTE_BITMAP_CL_SLAB_SIZE;
-
- bmp->go2 = 0;
-}
-
-/**
- * Bitmap memory footprint calculation
- *
- * @param n_bits
- * Number of bits in the bitmap
- * @return
- * Bitmap memory footprint measured in bytes on success, 0 on error
- */
-static inline uint32_t
-rte_bitmap_get_memory_footprint(uint32_t n_bits) {
- /* Check input arguments */
- if (n_bits == 0) {
- return 0;
- }
-
- return __rte_bitmap_get_memory_footprint(n_bits, NULL, NULL, NULL, NULL);
-}
-
-/**
- * Bitmap initialization
- *
- * @param bmp
- * Handle to bitmap instance
- * @param array2
- * Base address of pre-allocated array2
- * @param n_bits
- * Number of pre-allocated bits in array2. Must be non-zero and multiple of 512.
- * @return
- * 0 upon success, error code otherwise
- */
-static inline struct rte_bitmap *
-rte_bitmap_init(uint32_t n_bits, uint8_t *mem, uint32_t mem_size)
-{
- struct rte_bitmap *bmp;
- uint32_t array1_byte_offset, array1_slabs, array2_byte_offset, array2_slabs;
- uint32_t size;
-
- /* Check input arguments */
- if (n_bits == 0) {
- return NULL;
- }
-
- if ((mem == NULL) || (((uintptr_t) mem) & RTE_CACHE_LINE_MASK)) {
- return NULL;
- }
-
- size = __rte_bitmap_get_memory_footprint(n_bits,
- &array1_byte_offset, &array1_slabs,
- &array2_byte_offset, &array2_slabs);
- if (size < mem_size) {
- return NULL;
- }
-
- /* Setup bitmap */
- memset(mem, 0, size);
- bmp = (struct rte_bitmap *) mem;
-
- bmp->array1 = (uint64_t *) &mem[array1_byte_offset];
- bmp->array1_size = array1_slabs;
- bmp->array2 = (uint64_t *) &mem[array2_byte_offset];
- bmp->array2_size = array2_slabs;
-
- __rte_bitmap_scan_init(bmp);
-
- return bmp;
-}
-
-/**
- * Bitmap free
- *
- * @param bmp
- * Handle to bitmap instance
- * @return
- * 0 upon success, error code otherwise
- */
-static inline int
-rte_bitmap_free(struct rte_bitmap *bmp)
-{
- /* Check input arguments */
- if (bmp == NULL) {
- return -1;
- }
-
- return 0;
-}
-
-/**
- * Bitmap reset
- *
- * @param bmp
- * Handle to bitmap instance
- */
-static inline void
-rte_bitmap_reset(struct rte_bitmap *bmp)
-{
- memset(bmp->array1, 0, bmp->array1_size * sizeof(uint64_t));
- memset(bmp->array2, 0, bmp->array2_size * sizeof(uint64_t));
- __rte_bitmap_scan_init(bmp);
-}
-
-/**
- * Bitmap location prefetch into CPU L1 cache
- *
- * @param bmp
- * Handle to bitmap instance
- * @param pos
- * Bit position
- * @return
- * 0 upon success, error code otherwise
- */
-static inline void
-rte_bitmap_prefetch0(struct rte_bitmap *bmp, uint32_t pos)
-{
- uint64_t *slab2;
- uint32_t index2;
-
- index2 = pos >> RTE_BITMAP_SLAB_BIT_SIZE_LOG2;
- slab2 = bmp->array2 + index2;
- rte_prefetch0((void *) slab2);
-}
-
-/**
- * Bitmap bit get
- *
- * @param bmp
- * Handle to bitmap instance
- * @param pos
- * Bit position
- * @return
- * 0 when bit is cleared, non-zero when bit is set
- */
-static inline uint64_t
-rte_bitmap_get(struct rte_bitmap *bmp, uint32_t pos)
-{
- uint64_t *slab2;
- uint32_t index2, offset2;
-
- index2 = pos >> RTE_BITMAP_SLAB_BIT_SIZE_LOG2;
- offset2 = pos & RTE_BITMAP_SLAB_BIT_MASK;
- slab2 = bmp->array2 + index2;
- return ((*slab2) & (1lu << offset2));
-}
-
-/**
- * Bitmap bit set
- *
- * @param bmp
- * Handle to bitmap instance
- * @param pos
- * Bit position
- */
-static inline void
-rte_bitmap_set(struct rte_bitmap *bmp, uint32_t pos)
-{
- uint64_t *slab1, *slab2;
- uint32_t index1, index2, offset1, offset2;
-
- /* Set bit in array2 slab and set bit in array1 slab */
- index2 = pos >> RTE_BITMAP_SLAB_BIT_SIZE_LOG2;
- offset2 = pos & RTE_BITMAP_SLAB_BIT_MASK;
- index1 = pos >> (RTE_BITMAP_SLAB_BIT_SIZE_LOG2 + RTE_BITMAP_CL_BIT_SIZE_LOG2);
- offset1 = (pos >> RTE_BITMAP_CL_BIT_SIZE_LOG2) & RTE_BITMAP_SLAB_BIT_MASK;
- slab2 = bmp->array2 + index2;
- slab1 = bmp->array1 + index1;
-
- *slab2 |= 1lu << offset2;
- *slab1 |= 1lu << offset1;
-}
-
-/**
- * Bitmap slab set
- *
- * @param bmp
- * Handle to bitmap instance
- * @param pos
- * Bit position identifying the array2 slab
- * @param slab
- * Value to be assigned to the 64-bit slab in array2
- */
-static inline void
-rte_bitmap_set_slab(struct rte_bitmap *bmp, uint32_t pos, uint64_t slab)
-{
- uint64_t *slab1, *slab2;
- uint32_t index1, index2, offset1;
-
- /* Set bits in array2 slab and set bit in array1 slab */
- index2 = pos >> RTE_BITMAP_SLAB_BIT_SIZE_LOG2;
- index1 = pos >> (RTE_BITMAP_SLAB_BIT_SIZE_LOG2 + RTE_BITMAP_CL_BIT_SIZE_LOG2);
- offset1 = (pos >> RTE_BITMAP_CL_BIT_SIZE_LOG2) & RTE_BITMAP_SLAB_BIT_MASK;
- slab2 = bmp->array2 + index2;
- slab1 = bmp->array1 + index1;
-
- *slab2 |= slab;
- *slab1 |= 1lu << offset1;
-}
-
-static inline uint64_t
-__rte_bitmap_line_not_empty(uint64_t *slab2)
-{
- uint64_t v1, v2, v3, v4;
-
- v1 = slab2[0] | slab2[1];
- v2 = slab2[2] | slab2[3];
- v3 = slab2[4] | slab2[5];
- v4 = slab2[6] | slab2[7];
- v1 |= v2;
- v3 |= v4;
-
- return (v1 | v3);
-}
-
-/**
- * Bitmap bit clear
- *
- * @param bmp
- * Handle to bitmap instance
- * @param pos
- * Bit position
- */
-static inline void
-rte_bitmap_clear(struct rte_bitmap *bmp, uint32_t pos)
-{
- uint64_t *slab1, *slab2;
- uint32_t index1, index2, offset1, offset2;
-
- /* Clear bit in array2 slab */
- index2 = pos >> RTE_BITMAP_SLAB_BIT_SIZE_LOG2;
- offset2 = pos & RTE_BITMAP_SLAB_BIT_MASK;
- slab2 = bmp->array2 + index2;
-
- /* Return if array2 slab is not all-zeros */
- *slab2 &= ~(1lu << offset2);
- if (*slab2){
- return;
- }
-
- /* Check the entire cache line of array2 for all-zeros */
- index2 &= ~ RTE_BITMAP_CL_SLAB_MASK;
- slab2 = bmp->array2 + index2;
- if (__rte_bitmap_line_not_empty(slab2)) {
- return;
- }
-
- /* The array2 cache line is all-zeros, so clear bit in array1 slab */
- index1 = pos >> (RTE_BITMAP_SLAB_BIT_SIZE_LOG2 + RTE_BITMAP_CL_BIT_SIZE_LOG2);
- offset1 = (pos >> RTE_BITMAP_CL_BIT_SIZE_LOG2) & RTE_BITMAP_SLAB_BIT_MASK;
- slab1 = bmp->array1 + index1;
- *slab1 &= ~(1lu << offset1);
-
- return;
-}
-
-static inline int
-__rte_bitmap_scan_search(struct rte_bitmap *bmp)
-{
- uint64_t value1;
- uint32_t i;
-
- /* Check current array1 slab */
- value1 = bmp->array1[bmp->index1];
- value1 &= __rte_bitmap_mask1_get(bmp);
-
- if (rte_bsf64(value1, &bmp->offset1)) {
- return 1;
- }
-
- __rte_bitmap_index1_inc(bmp);
- bmp->offset1 = 0;
-
- /* Look for another array1 slab */
- for (i = 0; i < bmp->array1_size; i ++, __rte_bitmap_index1_inc(bmp)) {
- value1 = bmp->array1[bmp->index1];
-
- if (rte_bsf64(value1, &bmp->offset1)) {
- return 1;
- }
- }
-
- return 0;
-}
-
-static inline void
-__rte_bitmap_scan_read_init(struct rte_bitmap *bmp)
-{
- __rte_bitmap_index2_set(bmp);
- bmp->go2 = 1;
- rte_prefetch1((void *)(bmp->array2 + bmp->index2 + 8));
-}
-
-static inline int
-__rte_bitmap_scan_read(struct rte_bitmap *bmp, uint32_t *pos, uint64_t *slab)
-{
- uint64_t *slab2;
-
- slab2 = bmp->array2 + bmp->index2;
- for ( ; bmp->go2 ; bmp->index2 ++, slab2 ++, bmp->go2 = bmp->index2 & RTE_BITMAP_CL_SLAB_MASK) {
- if (*slab2) {
- *pos = bmp->index2 << RTE_BITMAP_SLAB_BIT_SIZE_LOG2;
- *slab = *slab2;
-
- bmp->index2 ++;
- slab2 ++;
- bmp->go2 = bmp->index2 & RTE_BITMAP_CL_SLAB_MASK;
- return 1;
- }
- }
-
- return 0;
-}
-
-/**
- * Bitmap scan (with automatic wrap-around)
- *
- * @param bmp
- * Handle to bitmap instance
- * @param pos
- * When function call returns 1, pos contains the position of the next set
- * bit, otherwise not modified
- * @param slab
- * When function call returns 1, slab contains the value of the entire 64-bit
- * slab where the bit indicated by pos is located. Slabs are always 64-bit
- * aligned, so the position of the first bit of the slab (this bit is not
- * necessarily set) is pos / 64. Once a slab has been returned by the bitmap
- * scan operation, the internal pointers of the bitmap are updated to point
- * after this slab, so the same slab will not be returned again if it
- * contains more than one bit which is set. When function call returns 0,
- * slab is not modified.
- * @return
- * 0 if there is no bit set in the bitmap, 1 otherwise
- */
-static inline int
-rte_bitmap_scan(struct rte_bitmap *bmp, uint32_t *pos, uint64_t *slab)
-{
- /* Return data from current array2 line if available */
- if (__rte_bitmap_scan_read(bmp, pos, slab)) {
- return 1;
- }
-
- /* Look for non-empty array2 line */
- if (__rte_bitmap_scan_search(bmp)) {
- __rte_bitmap_scan_read_init(bmp);
- __rte_bitmap_scan_read(bmp, pos, slab);
- return 1;
- }
-
- /* Empty bitmap */
- return 0;
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* __INCLUDE_RTE_BITMAP_H__ */
diff --git a/src/dpdk_lib18/librte_sched/rte_red.c b/src/dpdk_lib18/librte_sched/rte_red.c
deleted file mode 100755
index fdf40576..00000000
--- a/src/dpdk_lib18/librte_sched/rte_red.c
+++ /dev/null
@@ -1,158 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <math.h>
-#include "rte_red.h"
-#include <rte_random.h>
-#include <rte_common.h>
-
-#ifdef __INTEL_COMPILER
-#pragma warning(disable:2259) /* conversion may lose significant bits */
-#endif
-
-static int rte_red_init_done = 0; /**< Flag to indicate that global initialisation is done */
-uint32_t rte_red_rand_val = 0; /**< Random value cache */
-uint32_t rte_red_rand_seed = 0; /**< Seed for random number generation */
-
-/**
- * table[i] = log2(1-Wq) * Scale * -1
- * Wq = 1/(2^i)
- */
-uint16_t rte_red_log2_1_minus_Wq[RTE_RED_WQ_LOG2_NUM];
-
-/**
- * table[i] = 2^(i/16) * Scale
- */
-uint16_t rte_red_pow2_frac_inv[16];
-
-/**
- * @brief Initialize tables used to compute average
- * queue size when queue is empty.
- */
-static void
-__rte_red_init_tables(void)
-{
- uint32_t i = 0;
- double scale = 0.0;
- double table_size = 0.0;
-
- scale = (double)(1 << RTE_RED_SCALING);
- table_size = (double)(RTE_DIM(rte_red_pow2_frac_inv));
-
- for (i = 0; i < RTE_DIM(rte_red_pow2_frac_inv); i++) {
- double m = (double)i;
-
- rte_red_pow2_frac_inv[i] = (uint16_t) round(scale / pow(2, m / table_size));
- }
-
- scale = 1024.0;
-
- RTE_RED_ASSERT(RTE_RED_WQ_LOG2_NUM == RTE_DIM(rte_red_log2_1_minus_Wq));
-
- for (i = RTE_RED_WQ_LOG2_MIN; i <= RTE_RED_WQ_LOG2_MAX; i++) {
- double n = (double)i;
- double Wq = pow(2, -n);
- uint32_t index = i - RTE_RED_WQ_LOG2_MIN;
-
- rte_red_log2_1_minus_Wq[index] = (uint16_t) round(-1.0 * scale * log2(1.0 - Wq));
- /**
- * Table entry of zero, corresponds to a Wq of zero
- * which is not valid (avg would remain constant no
- * matter how long the queue is empty). So we have
- * to check for zero and round up to one.
- */
- if (rte_red_log2_1_minus_Wq[index] == 0) {
- rte_red_log2_1_minus_Wq[index] = 1;
- }
- }
-}
-
-int
-rte_red_rt_data_init(struct rte_red *red)
-{
- if (red == NULL)
- return -1;
-
- red->avg = 0;
- red->count = 0;
- red->q_time = 0;
- return 0;
-}
-
-int
-rte_red_config_init(struct rte_red_config *red_cfg,
- const uint16_t wq_log2,
- const uint16_t min_th,
- const uint16_t max_th,
- const uint16_t maxp_inv)
-{
- if (red_cfg == NULL) {
- return -1;
- }
- if (max_th > RTE_RED_MAX_TH_MAX) {
- return -2;
- }
- if (min_th >= max_th) {
- return -3;
- }
- if (wq_log2 > RTE_RED_WQ_LOG2_MAX) {
- return -4;
- }
- if (wq_log2 < RTE_RED_WQ_LOG2_MIN) {
- return -5;
- }
- if (maxp_inv < RTE_RED_MAXP_INV_MIN) {
- return -6;
- }
- if (maxp_inv > RTE_RED_MAXP_INV_MAX) {
- return -7;
- }
-
- /**
- * Initialize the RED module if not already done
- */
- if (!rte_red_init_done) {
- rte_red_rand_seed = rte_rand();
- rte_red_rand_val = rte_fast_rand();
- __rte_red_init_tables();
- rte_red_init_done = 1;
- }
-
- red_cfg->min_th = ((uint32_t) min_th) << (wq_log2 + RTE_RED_SCALING);
- red_cfg->max_th = ((uint32_t) max_th) << (wq_log2 + RTE_RED_SCALING);
- red_cfg->pa_const = (2 * (max_th - min_th) * maxp_inv) << RTE_RED_SCALING;
- red_cfg->maxp_inv = maxp_inv;
- red_cfg->wq_log2 = wq_log2;
-
- return 0;
-}
diff --git a/src/dpdk_lib18/librte_sched/rte_red.h b/src/dpdk_lib18/librte_sched/rte_red.h
deleted file mode 100755
index 0d8412ff..00000000
--- a/src/dpdk_lib18/librte_sched/rte_red.h
+++ /dev/null
@@ -1,453 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __RTE_RED_H_INCLUDED__
-#define __RTE_RED_H_INCLUDED__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
- * @file
- * RTE Random Early Detection (RED)
- *
- *
- ***/
-
-#include <stdint.h>
-#include <limits.h>
-#include <rte_common.h>
-#include <rte_debug.h>
-#include <rte_cycles.h>
-#include <rte_branch_prediction.h>
-
-#define RTE_RED_SCALING 10 /**< Fraction size for fixed-point */
-#define RTE_RED_S (1 << 22) /**< Packet size multiplied by number of leaf queues */
-#define RTE_RED_MAX_TH_MAX 1023 /**< Max threshold limit in fixed point format */
-#define RTE_RED_WQ_LOG2_MIN 1 /**< Min inverse filter weight value */
-#define RTE_RED_WQ_LOG2_MAX 12 /**< Max inverse filter weight value */
-#define RTE_RED_MAXP_INV_MIN 1 /**< Min inverse mark probability value */
-#define RTE_RED_MAXP_INV_MAX 255 /**< Max inverse mark probability value */
-#define RTE_RED_2POW16 (1<<16) /**< 2 power 16 */
-#define RTE_RED_INT16_NBITS (sizeof(uint16_t) * CHAR_BIT)
-#define RTE_RED_WQ_LOG2_NUM (RTE_RED_WQ_LOG2_MAX - RTE_RED_WQ_LOG2_MIN + 1)
-
-#ifdef RTE_RED_DEBUG
-
-#define RTE_RED_ASSERT(exp) \
-if (!(exp)) { \
- rte_panic("line%d\tassert \"" #exp "\" failed\n", __LINE__); \
-}
-
-#else
-
-#define RTE_RED_ASSERT(exp) do { } while(0)
-
-#endif /* RTE_RED_DEBUG */
-
-/**
- * Externs
- *
- */
-extern uint32_t rte_red_rand_val;
-extern uint32_t rte_red_rand_seed;
-extern uint16_t rte_red_log2_1_minus_Wq[RTE_RED_WQ_LOG2_NUM];
-extern uint16_t rte_red_pow2_frac_inv[16];
-
-/**
- * RED configuration parameters passed by user
- *
- */
-struct rte_red_params {
- uint16_t min_th; /**< Minimum threshold for queue (max_th) */
- uint16_t max_th; /**< Maximum threshold for queue (max_th) */
- uint16_t maxp_inv; /**< Inverse of packet marking probability maximum value (maxp = 1 / maxp_inv) */
- uint16_t wq_log2; /**< Negated log2 of queue weight (wq = 1 / (2 ^ wq_log2)) */
-};
-
-/**
- * RED configuration parameters
- */
-struct rte_red_config {
- uint32_t min_th; /**< min_th scaled in fixed-point format */
- uint32_t max_th; /**< max_th scaled in fixed-point format */
- uint32_t pa_const; /**< Precomputed constant value used for pa calculation (scaled in fixed-point format) */
- uint8_t maxp_inv; /**< maxp_inv */
- uint8_t wq_log2; /**< wq_log2 */
-};
-
-/**
- * RED run-time data
- */
-struct rte_red {
- uint32_t avg; /**< Average queue size (avg), scaled in fixed-point format */
- uint32_t count; /**< Number of packets since last marked packet (count) */
- uint64_t q_time; /**< Start of the queue idle time (q_time) */
-};
-
-/**
- * @brief Initialises run-time data
- *
- * @param [in,out] data pointer to RED runtime data
- *
- * @return Operation status
- * @retval 0 success
- * @retval !0 error
- */
-int
-rte_red_rt_data_init(struct rte_red *red);
-
-/**
- * @brief Configures a single RED configuration parameter structure.
- *
- * @param [in,out] config pointer to a RED configuration parameter structure
- * @param [in] wq_log2 log2 of the filter weight, valid range is:
- * RTE_RED_WQ_LOG2_MIN <= wq_log2 <= RTE_RED_WQ_LOG2_MAX
- * @param [in] min_th queue minimum threshold in number of packets
- * @param [in] max_th queue maximum threshold in number of packets
- * @param [in] maxp_inv inverse maximum mark probability
- *
- * @return Operation status
- * @retval 0 success
- * @retval !0 error
- */
-int
-rte_red_config_init(struct rte_red_config *red_cfg,
- const uint16_t wq_log2,
- const uint16_t min_th,
- const uint16_t max_th,
- const uint16_t maxp_inv);
-
-/**
- * @brief Generate random number for RED
- *
- * Implemenetation based on:
- * http://software.intel.com/en-us/articles/fast-random-number-generator-on-the-intel-pentiumr-4-processor/
- *
- * 10 bit shift has been found through empirical tests (was 16).
- *
- * @return Random number between 0 and (2^22 - 1)
- */
-static inline uint32_t
-rte_fast_rand(void)
-{
- rte_red_rand_seed = (214013 * rte_red_rand_seed) + 2531011;
- return (rte_red_rand_seed >> 10);
-}
-
-/**
- * @brief calculate factor to scale average queue size when queue
- * becomes empty
- *
- * @param [in] wq_log2, where EWMA filter weight wq = 1/(2 ^ wq_log2)
- * @param [in] m exponent in the computed value (1 - wq) ^ m
- *
- * @return computed value
- * @retval ((1 - wq) ^ m) scaled in fixed-point format
- */
-static inline uint16_t
-__rte_red_calc_qempty_factor(uint8_t wq_log2, uint16_t m)
-{
- uint32_t n = 0;
- uint32_t f = 0;
-
- /**
- * Basic math tells us that:
- * a^b = 2^(b * log2(a) )
- *
- * in our case:
- * a = (1-Wq)
- * b = m
- * Wq = 1/ (2^log2n)
- *
- * So we are computing this equation:
- * factor = 2 ^ ( m * log2(1-Wq))
- *
- * First we are computing:
- * n = m * log2(1-Wq)
- *
- * To avoid dealing with signed numbers log2 values are positive
- * but they should be negative because (1-Wq) is always < 1.
- * Contents of log2 table values are also scaled for precision.
- */
-
- n = m * rte_red_log2_1_minus_Wq[wq_log2 - RTE_RED_WQ_LOG2_MIN];
-
- /**
- * The tricky part is computing 2^n, for this I split n into
- * integer part and fraction part.
- * f - is fraction part of n
- * n - is integer part of original n
- *
- * Now using basic math we compute 2^n:
- * 2^(f+n) = 2^f * 2^n
- * 2^f - we use lookup table
- * 2^n - can be replaced with bit shift right oeprations
- */
-
- f = (n >> 6) & 0xf;
- n >>= 10;
-
- if (n < RTE_RED_SCALING)
- return (uint16_t) ((rte_red_pow2_frac_inv[f] + (1 << (n - 1))) >> n);
-
- return 0;
-}
-
-/**
- * @brief Updates queue average in condition when queue is empty
- *
- * Note: packet is never dropped in this particular case.
- *
- * @param [in] config pointer to a RED configuration parameter structure
- * @param [in,out] data pointer to RED runtime data
- * @param [in] time current time stamp
- *
- * @return Operation status
- * @retval 0 enqueue the packet
- * @retval 1 drop the packet based on max threshold criterion
- * @retval 2 drop the packet based on mark probability criterion
- */
-static inline int
-rte_red_enqueue_empty(const struct rte_red_config *red_cfg,
- struct rte_red *red,
- const uint64_t time)
-{
- uint64_t time_diff = 0, m = 0;
-
- RTE_RED_ASSERT(red_cfg != NULL);
- RTE_RED_ASSERT(red != NULL);
-
- red->count ++;
-
- /**
- * We compute avg but we don't compare avg against
- * min_th or max_th, nor calculate drop probability
- */
- time_diff = time - red->q_time;
-
- /**
- * m is the number of packets that might have arrived while the queue was empty.
- * In this case we have time stamps provided by scheduler in byte units (bytes
- * transmitted on network port). Such time stamp translates into time units as
- * port speed is fixed but such approach simplifies the code.
- */
- m = time_diff / RTE_RED_S;
-
- /**
- * Check that m will fit into 16-bit unsigned integer
- */
- if (m >= RTE_RED_2POW16) {
- red->avg = 0;
- } else {
- red->avg = (red->avg >> RTE_RED_SCALING) * __rte_red_calc_qempty_factor(red_cfg->wq_log2, (uint16_t) m);
- }
-
- return 0;
-}
-
-/**
- * Drop probability (Sally Floyd and Van Jacobson):
- *
- * pb = (1 / maxp_inv) * (avg - min_th) / (max_th - min_th)
- * pa = pb / (2 - count * pb)
- *
- *
- * (1 / maxp_inv) * (avg - min_th)
- * ---------------------------------
- * max_th - min_th
- * pa = -----------------------------------------------
- * count * (1 / maxp_inv) * (avg - min_th)
- * 2 - -----------------------------------------
- * max_th - min_th
- *
- *
- * avg - min_th
- * pa = -----------------------------------------------------------
- * 2 * (max_th - min_th) * maxp_inv - count * (avg - min_th)
- *
- *
- * We define pa_const as: pa_const = 2 * (max_th - min_th) * maxp_inv. Then:
- *
- *
- * avg - min_th
- * pa = -----------------------------------
- * pa_const - count * (avg - min_th)
- */
-
-/**
- * @brief make a decision to drop or enqueue a packet based on mark probability
- * criteria
- *
- * @param [in] config pointer to structure defining RED parameters
- * @param [in,out] data pointer to RED runtime data
- *
- * @return operation status
- * @retval 0 enqueue the packet
- * @retval 1 drop the packet
- */
-static inline int
-__rte_red_drop(const struct rte_red_config *red_cfg, struct rte_red *red)
-{
- uint32_t pa_num = 0; /* numerator of drop-probability */
- uint32_t pa_den = 0; /* denominator of drop-probability */
- uint32_t pa_num_count = 0;
-
- pa_num = (red->avg - red_cfg->min_th) >> (red_cfg->wq_log2);
-
- pa_num_count = red->count * pa_num;
-
- if (red_cfg->pa_const <= pa_num_count)
- return 1;
-
- pa_den = red_cfg->pa_const - pa_num_count;
-
- /* If drop, generate and save random number to be used next time */
- if (unlikely((rte_red_rand_val % pa_den) < pa_num)) {
- rte_red_rand_val = rte_fast_rand();
-
- return 1;
- }
-
- /* No drop */
- return 0;
-}
-
-/**
- * @brief Decides if new packet should be enqeued or dropped in queue non-empty case
- *
- * @param [in] config pointer to a RED configuration parameter structure
- * @param [in,out] data pointer to RED runtime data
- * @param [in] q current queue size (measured in packets)
- *
- * @return Operation status
- * @retval 0 enqueue the packet
- * @retval 1 drop the packet based on max threshold criterion
- * @retval 2 drop the packet based on mark probability criterion
- */
-static inline int
-rte_red_enqueue_nonempty(const struct rte_red_config *red_cfg,
- struct rte_red *red,
- const unsigned q)
-{
- RTE_RED_ASSERT(red_cfg != NULL);
- RTE_RED_ASSERT(red != NULL);
-
- /**
- * EWMA filter (Sally Floyd and Van Jacobson):
- * avg = (1 - wq) * avg + wq * q
- * avg = avg + q * wq - avg * wq
- *
- * We select: wq = 2^(-n). Let scaled version of avg be: avg_s = avg * 2^(N+n). We get:
- * avg_s = avg_s + q * 2^N - avg_s * 2^(-n)
- *
- * By using shift left/right operations, we get:
- * avg_s = avg_s + (q << N) - (avg_s >> n)
- * avg_s += (q << N) - (avg_s >> n)
- */
-
- /* avg update */
- red->avg += (q << RTE_RED_SCALING) - (red->avg >> red_cfg->wq_log2);
-
- /* avg < min_th: do not mark the packet */
- if (red->avg < red_cfg->min_th) {
- red->count ++;
- return 0;
- }
-
- /* min_th <= avg < max_th: mark the packet with pa probability */
- if (red->avg < red_cfg->max_th) {
- if (!__rte_red_drop(red_cfg, red)) {
- red->count ++;
- return 0;
- }
-
- red->count = 0;
- return 2;
- }
-
- /* max_th <= avg: always mark the packet */
- red->count = 0;
- return 1;
-}
-
-/**
- * @brief Decides if new packet should be enqeued or dropped
- * Updates run time data based on new queue size value.
- * Based on new queue average and RED configuration parameters
- * gives verdict whether to enqueue or drop the packet.
- *
- * @param [in] config pointer to a RED configuration parameter structure
- * @param [in,out] data pointer to RED runtime data
- * @param [in] q updated queue size in packets
- * @param [in] time current time stamp
- *
- * @return Operation status
- * @retval 0 enqueue the packet
- * @retval 1 drop the packet based on max threshold criteria
- * @retval 2 drop the packet based on mark probability criteria
- */
-static inline int
-rte_red_enqueue(const struct rte_red_config *red_cfg,
- struct rte_red *red,
- const unsigned q,
- const uint64_t time)
-{
- RTE_RED_ASSERT(red_cfg != NULL);
- RTE_RED_ASSERT(red != NULL);
-
- if (q != 0) {
- return rte_red_enqueue_nonempty(red_cfg, red, q);
- } else {
- return rte_red_enqueue_empty(red_cfg, red, time);
- }
-}
-
-/**
- * @brief Callback to records time that queue became empty
- *
- * @param [in,out] data pointer to RED runtime data
- * @param [in] time current time stamp
- */
-static inline void
-rte_red_mark_queue_empty(struct rte_red *red, const uint64_t time)
-{
- red->q_time = time;
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* __RTE_RED_H_INCLUDED__ */
diff --git a/src/dpdk_lib18/librte_sched/rte_sched.c b/src/dpdk_lib18/librte_sched/rte_sched.c
deleted file mode 100755
index 95dee273..00000000
--- a/src/dpdk_lib18/librte_sched/rte_sched.c
+++ /dev/null
@@ -1,2150 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdio.h>
-#include <string.h>
-
-#include <rte_common.h>
-#include <rte_log.h>
-#include <rte_memory.h>
-#include <rte_malloc.h>
-#include <rte_cycles.h>
-#include <rte_prefetch.h>
-#include <rte_branch_prediction.h>
-#include <rte_mbuf.h>
-
-#include "rte_sched.h"
-#include "rte_bitmap.h"
-#include "rte_sched_common.h"
-#include "rte_approx.h"
-
-#ifdef __INTEL_COMPILER
-#pragma warning(disable:2259) /* conversion may lose significant bits */
-#endif
-
-#ifndef RTE_SCHED_DEBUG
-#define RTE_SCHED_DEBUG 0
-#endif
-
-#ifndef RTE_SCHED_OPTIMIZATIONS
-#define RTE_SCHED_OPTIMIZATIONS 0
-#endif
-
-#if RTE_SCHED_OPTIMIZATIONS
-#include <immintrin.h>
-#endif
-
-#define RTE_SCHED_ENQUEUE 1
-
-#define RTE_SCHED_TS 1
-
-#if RTE_SCHED_TS == 0 /* Infinite credits. Traffic shaping disabled. */
-#define RTE_SCHED_TS_CREDITS_UPDATE 0
-#define RTE_SCHED_TS_CREDITS_CHECK 0
-#else /* Real Credits. Full traffic shaping implemented. */
-#define RTE_SCHED_TS_CREDITS_UPDATE 1
-#define RTE_SCHED_TS_CREDITS_CHECK 1
-#endif
-
-#ifndef RTE_SCHED_TB_RATE_CONFIG_ERR
-#define RTE_SCHED_TB_RATE_CONFIG_ERR (1e-7)
-#endif
-
-#define RTE_SCHED_WRR 1
-
-#ifndef RTE_SCHED_WRR_SHIFT
-#define RTE_SCHED_WRR_SHIFT 3
-#endif
-
-#ifndef RTE_SCHED_PORT_N_GRINDERS
-#define RTE_SCHED_PORT_N_GRINDERS 8
-#endif
-#if (RTE_SCHED_PORT_N_GRINDERS == 0) || (RTE_SCHED_PORT_N_GRINDERS & (RTE_SCHED_PORT_N_GRINDERS - 1))
-#error Number of grinders must be non-zero and a power of 2
-#endif
-#if (RTE_SCHED_OPTIMIZATIONS && (RTE_SCHED_PORT_N_GRINDERS != 8))
-#error Number of grinders must be 8 when RTE_SCHED_OPTIMIZATIONS is set
-#endif
-
-#define RTE_SCHED_GRINDER_PCACHE_SIZE (64 / RTE_SCHED_QUEUES_PER_PIPE)
-
-#define RTE_SCHED_PIPE_INVALID UINT32_MAX
-
-#define RTE_SCHED_BMP_POS_INVALID UINT32_MAX
-
-struct rte_sched_subport {
- /* Token bucket (TB) */
- uint64_t tb_time; /* time of last update */
- uint32_t tb_period;
- uint32_t tb_credits_per_period;
- uint32_t tb_size;
- uint32_t tb_credits;
-
- /* Traffic classes (TCs) */
- uint64_t tc_time; /* time of next update */
- uint32_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
- uint32_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
- uint32_t tc_period;
-
- /* TC oversubscription */
- uint32_t tc_ov_wm;
- uint32_t tc_ov_wm_min;
- uint32_t tc_ov_wm_max;
- uint8_t tc_ov_period_id;
- uint8_t tc_ov;
- uint32_t tc_ov_n;
- double tc_ov_rate;
-
- /* Statistics */
- struct rte_sched_subport_stats stats;
-};
-
-struct rte_sched_pipe_profile {
- /* Token bucket (TB) */
- uint32_t tb_period;
- uint32_t tb_credits_per_period;
- uint32_t tb_size;
-
- /* Pipe traffic classes */
- uint32_t tc_period;
- uint32_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
- uint8_t tc_ov_weight;
-
- /* Pipe queues */
- uint8_t wrr_cost[RTE_SCHED_QUEUES_PER_PIPE];
-};
-
-struct rte_sched_pipe {
- /* Token bucket (TB) */
- uint64_t tb_time; /* time of last update */
- uint32_t tb_credits;
-
- /* Pipe profile and flags */
- uint32_t profile;
-
- /* Traffic classes (TCs) */
- uint64_t tc_time; /* time of next update */
- uint32_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
-
- /* Weighted Round Robin (WRR) */
- uint8_t wrr_tokens[RTE_SCHED_QUEUES_PER_PIPE];
-
- /* TC oversubscription */
- uint32_t tc_ov_credits;
- uint8_t tc_ov_period_id;
- uint8_t reserved[3];
-} __rte_cache_aligned;
-
-struct rte_sched_queue {
- uint16_t qw;
- uint16_t qr;
-};
-
-struct rte_sched_queue_extra {
- struct rte_sched_queue_stats stats;
-#ifdef RTE_SCHED_RED
- struct rte_red red;
-#endif
-};
-
-enum grinder_state {
- e_GRINDER_PREFETCH_PIPE = 0,
- e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS,
- e_GRINDER_PREFETCH_MBUF,
- e_GRINDER_READ_MBUF
-};
-
-struct rte_sched_grinder {
- /* Pipe cache */
- uint16_t pcache_qmask[RTE_SCHED_GRINDER_PCACHE_SIZE];
- uint32_t pcache_qindex[RTE_SCHED_GRINDER_PCACHE_SIZE];
- uint32_t pcache_w;
- uint32_t pcache_r;
-
- /* Current pipe */
- enum grinder_state state;
- uint32_t productive;
- uint32_t pindex;
- struct rte_sched_subport *subport;
- struct rte_sched_pipe *pipe;
- struct rte_sched_pipe_profile *pipe_params;
-
- /* TC cache */
- uint8_t tccache_qmask[4];
- uint32_t tccache_qindex[4];
- uint32_t tccache_w;
- uint32_t tccache_r;
-
- /* Current TC */
- uint32_t tc_index;
- struct rte_sched_queue *queue[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
- struct rte_mbuf **qbase[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
- uint32_t qindex[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
- uint16_t qsize;
- uint32_t qmask;
- uint32_t qpos;
- struct rte_mbuf *pkt;
-
- /* WRR */
- uint16_t wrr_tokens[RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS];
- uint16_t wrr_mask[RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS];
- uint8_t wrr_cost[RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS];
-};
-
-struct rte_sched_port {
- /* User parameters */
- uint32_t n_subports_per_port;
- uint32_t n_pipes_per_subport;
- uint32_t rate;
- uint32_t mtu;
- uint32_t frame_overhead;
- uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
- uint32_t n_pipe_profiles;
- uint32_t pipe_tc3_rate_max;
-#ifdef RTE_SCHED_RED
- struct rte_red_config red_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][e_RTE_METER_COLORS];
-#endif
-
- /* Timing */
- uint64_t time_cpu_cycles; /* Current CPU time measured in CPU cyles */
- uint64_t time_cpu_bytes; /* Current CPU time measured in bytes */
- uint64_t time; /* Current NIC TX time measured in bytes */
- double cycles_per_byte; /* CPU cycles per byte */
-
- /* Scheduling loop detection */
- uint32_t pipe_loop;
- uint32_t pipe_exhaustion;
-
- /* Bitmap */
- struct rte_bitmap *bmp;
- uint32_t grinder_base_bmp_pos[RTE_SCHED_PORT_N_GRINDERS] __rte_aligned_16;
-
- /* Grinders */
- struct rte_sched_grinder grinder[RTE_SCHED_PORT_N_GRINDERS];
- uint32_t busy_grinders;
- struct rte_mbuf **pkts_out;
- uint32_t n_pkts_out;
-
- /* Queue base calculation */
- uint32_t qsize_add[RTE_SCHED_QUEUES_PER_PIPE];
- uint32_t qsize_sum;
-
- /* Large data structures */
- struct rte_sched_subport *subport;
- struct rte_sched_pipe *pipe;
- struct rte_sched_queue *queue;
- struct rte_sched_queue_extra *queue_extra;
- struct rte_sched_pipe_profile *pipe_profiles;
- uint8_t *bmp_array;
- struct rte_mbuf **queue_array;
- uint8_t memory[0] __rte_cache_aligned;
-} __rte_cache_aligned;
-
-enum rte_sched_port_array {
- e_RTE_SCHED_PORT_ARRAY_SUBPORT = 0,
- e_RTE_SCHED_PORT_ARRAY_PIPE,
- e_RTE_SCHED_PORT_ARRAY_QUEUE,
- e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA,
- e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES,
- e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY,
- e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY,
- e_RTE_SCHED_PORT_ARRAY_TOTAL,
-};
-
-#ifdef RTE_SCHED_COLLECT_STATS
-
-static inline uint32_t
-rte_sched_port_queues_per_subport(struct rte_sched_port *port)
-{
- return RTE_SCHED_QUEUES_PER_PIPE * port->n_pipes_per_subport;
-}
-
-#endif
-
-static inline uint32_t
-rte_sched_port_queues_per_port(struct rte_sched_port *port)
-{
- return RTE_SCHED_QUEUES_PER_PIPE * port->n_pipes_per_subport * port->n_subports_per_port;
-}
-
-static int
-rte_sched_port_check_params(struct rte_sched_port_params *params)
-{
- uint32_t i, j;
-
- if (params == NULL) {
- return -1;
- }
-
- /* socket */
- if ((params->socket < 0) || (params->socket >= RTE_MAX_NUMA_NODES)) {
- return -3;
- }
-
- /* rate */
- if (params->rate == 0) {
- return -4;
- }
-
- /* mtu */
- if (params->mtu == 0) {
- return -5;
- }
-
- /* n_subports_per_port: non-zero, power of 2 */
- if ((params->n_subports_per_port == 0) || (!rte_is_power_of_2(params->n_subports_per_port))) {
- return -6;
- }
-
- /* n_pipes_per_subport: non-zero, power of 2 */
- if ((params->n_pipes_per_subport == 0) || (!rte_is_power_of_2(params->n_pipes_per_subport))) {
- return -7;
- }
-
- /* qsize: non-zero, power of 2, no bigger than 32K (due to 16-bit read/write pointers) */
- for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
- uint16_t qsize = params->qsize[i];
-
- if ((qsize == 0) || (!rte_is_power_of_2(qsize))) {
- return -8;
- }
- }
-
- /* pipe_profiles and n_pipe_profiles */
- if ((params->pipe_profiles == NULL) ||
- (params->n_pipe_profiles == 0) ||
- (params->n_pipe_profiles > RTE_SCHED_PIPE_PROFILES_PER_PORT)) {
- return -9;
- }
-
- for (i = 0; i < params->n_pipe_profiles; i ++) {
- struct rte_sched_pipe_params *p = params->pipe_profiles + i;
-
- /* TB rate: non-zero, not greater than port rate */
- if ((p->tb_rate == 0) || (p->tb_rate > params->rate)) {
- return -10;
- }
-
- /* TB size: non-zero */
- if (p->tb_size == 0) {
- return -11;
- }
-
- /* TC rate: non-zero, less than pipe rate */
- for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j ++) {
- if ((p->tc_rate[j] == 0) || (p->tc_rate[j] > p->tb_rate)) {
- return -12;
- }
- }
-
- /* TC period: non-zero */
- if (p->tc_period == 0) {
- return -13;
- }
-
-#ifdef RTE_SCHED_SUBPORT_TC_OV
- /* TC3 oversubscription weight: non-zero */
- if (p->tc_ov_weight == 0) {
- return -14;
- }
-#endif
-
- /* Queue WRR weights: non-zero */
- for (j = 0; j < RTE_SCHED_QUEUES_PER_PIPE; j ++) {
- if (p->wrr_weights[j] == 0) {
- return -15;
- }
- }
- }
-
- return 0;
-}
-
-static uint32_t
-rte_sched_port_get_array_base(struct rte_sched_port_params *params, enum rte_sched_port_array array)
-{
- uint32_t n_subports_per_port = params->n_subports_per_port;
- uint32_t n_pipes_per_subport = params->n_pipes_per_subport;
- uint32_t n_pipes_per_port = n_pipes_per_subport * n_subports_per_port;
- uint32_t n_queues_per_port = RTE_SCHED_QUEUES_PER_PIPE * n_pipes_per_subport * n_subports_per_port;
-
- uint32_t size_subport = n_subports_per_port * sizeof(struct rte_sched_subport);
- uint32_t size_pipe = n_pipes_per_port * sizeof(struct rte_sched_pipe);
- uint32_t size_queue = n_queues_per_port * sizeof(struct rte_sched_queue);
- uint32_t size_queue_extra = n_queues_per_port * sizeof(struct rte_sched_queue_extra);
- uint32_t size_pipe_profiles = RTE_SCHED_PIPE_PROFILES_PER_PORT * sizeof(struct rte_sched_pipe_profile);
- uint32_t size_bmp_array = rte_bitmap_get_memory_footprint(n_queues_per_port);
- uint32_t size_per_pipe_queue_array, size_queue_array;
-
- uint32_t base, i;
-
- size_per_pipe_queue_array = 0;
- for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
- size_per_pipe_queue_array += RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS * params->qsize[i] * sizeof(struct rte_mbuf *);
- }
- size_queue_array = n_pipes_per_port * size_per_pipe_queue_array;
-
- base = 0;
-
- if (array == e_RTE_SCHED_PORT_ARRAY_SUBPORT) return base;
- base += RTE_CACHE_LINE_ROUNDUP(size_subport);
-
- if (array == e_RTE_SCHED_PORT_ARRAY_PIPE) return base;
- base += RTE_CACHE_LINE_ROUNDUP(size_pipe);
-
- if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE) return base;
- base += RTE_CACHE_LINE_ROUNDUP(size_queue);
-
- if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA) return base;
- base += RTE_CACHE_LINE_ROUNDUP(size_queue_extra);
-
- if (array == e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES) return base;
- base += RTE_CACHE_LINE_ROUNDUP(size_pipe_profiles);
-
- if (array == e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY) return base;
- base += RTE_CACHE_LINE_ROUNDUP(size_bmp_array);
-
- if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY) return base;
- base += RTE_CACHE_LINE_ROUNDUP(size_queue_array);
-
- return base;
-}
-
-uint32_t
-rte_sched_port_get_memory_footprint(struct rte_sched_port_params *params)
-{
- uint32_t size0, size1;
- int status;
-
- status = rte_sched_port_check_params(params);
- if (status != 0) {
- RTE_LOG(INFO, SCHED, "Port scheduler params check failed (%d)\n", status);
-
- return 0;
- }
-
- size0 = sizeof(struct rte_sched_port);
- size1 = rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_TOTAL);
-
- return (size0 + size1);
-}
-
-static void
-rte_sched_port_config_qsize(struct rte_sched_port *port)
-{
- /* TC 0 */
- port->qsize_add[0] = 0;
- port->qsize_add[1] = port->qsize_add[0] + port->qsize[0];
- port->qsize_add[2] = port->qsize_add[1] + port->qsize[0];
- port->qsize_add[3] = port->qsize_add[2] + port->qsize[0];
-
- /* TC 1 */
- port->qsize_add[4] = port->qsize_add[3] + port->qsize[0];
- port->qsize_add[5] = port->qsize_add[4] + port->qsize[1];
- port->qsize_add[6] = port->qsize_add[5] + port->qsize[1];
- port->qsize_add[7] = port->qsize_add[6] + port->qsize[1];
-
- /* TC 2 */
- port->qsize_add[8] = port->qsize_add[7] + port->qsize[1];
- port->qsize_add[9] = port->qsize_add[8] + port->qsize[2];
- port->qsize_add[10] = port->qsize_add[9] + port->qsize[2];
- port->qsize_add[11] = port->qsize_add[10] + port->qsize[2];
-
- /* TC 3 */
- port->qsize_add[12] = port->qsize_add[11] + port->qsize[2];
- port->qsize_add[13] = port->qsize_add[12] + port->qsize[3];
- port->qsize_add[14] = port->qsize_add[13] + port->qsize[3];
- port->qsize_add[15] = port->qsize_add[14] + port->qsize[3];
-
- port->qsize_sum = port->qsize_add[15] + port->qsize[3];
-}
-
-static void
-rte_sched_port_log_pipe_profile(struct rte_sched_port *port, uint32_t i)
-{
- struct rte_sched_pipe_profile *p = port->pipe_profiles + i;
-
- RTE_LOG(INFO, SCHED, "Low level config for pipe profile %u:\n"
- "\tToken bucket: period = %u, credits per period = %u, size = %u\n"
- "\tTraffic classes: period = %u, credits per period = [%u, %u, %u, %u]\n"
- "\tTraffic class 3 oversubscription: weight = %hhu\n"
- "\tWRR cost: [%hhu, %hhu, %hhu, %hhu], [%hhu, %hhu, %hhu, %hhu], [%hhu, %hhu, %hhu, %hhu], [%hhu, %hhu, %hhu, %hhu]\n",
- i,
-
- /* Token bucket */
- p->tb_period,
- p->tb_credits_per_period,
- p->tb_size,
-
- /* Traffic classes */
- p->tc_period,
- p->tc_credits_per_period[0],
- p->tc_credits_per_period[1],
- p->tc_credits_per_period[2],
- p->tc_credits_per_period[3],
-
- /* Traffic class 3 oversubscription */
- p->tc_ov_weight,
-
- /* WRR */
- p->wrr_cost[ 0], p->wrr_cost[ 1], p->wrr_cost[ 2], p->wrr_cost[ 3],
- p->wrr_cost[ 4], p->wrr_cost[ 5], p->wrr_cost[ 6], p->wrr_cost[ 7],
- p->wrr_cost[ 8], p->wrr_cost[ 9], p->wrr_cost[10], p->wrr_cost[11],
- p->wrr_cost[12], p->wrr_cost[13], p->wrr_cost[14], p->wrr_cost[15]);
-}
-
-static inline uint64_t
-rte_sched_time_ms_to_bytes(uint32_t time_ms, uint32_t rate)
-{
- uint64_t time = time_ms;
- time = (time * rate) / 1000;
-
- return time;
-}
-
-static void
-rte_sched_port_config_pipe_profile_table(struct rte_sched_port *port, struct rte_sched_port_params *params)
-{
- uint32_t i, j;
-
- for (i = 0; i < port->n_pipe_profiles; i ++) {
- struct rte_sched_pipe_params *src = params->pipe_profiles + i;
- struct rte_sched_pipe_profile *dst = port->pipe_profiles + i;
-
- /* Token Bucket */
- if (src->tb_rate == params->rate) {
- dst->tb_credits_per_period = 1;
- dst->tb_period = 1;
- } else {
- double tb_rate = ((double) src->tb_rate) / ((double) params->rate);
- double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
-
- rte_approx(tb_rate, d, &dst->tb_credits_per_period, &dst->tb_period);
- }
- dst->tb_size = src->tb_size;
-
- /* Traffic Classes */
- dst->tc_period = (uint32_t) rte_sched_time_ms_to_bytes(src->tc_period, params->rate);
- for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j ++) {
- dst->tc_credits_per_period[j] = (uint32_t) rte_sched_time_ms_to_bytes(src->tc_period, src->tc_rate[j]);
- }
-#ifdef RTE_SCHED_SUBPORT_TC_OV
- dst->tc_ov_weight = src->tc_ov_weight;
-#endif
-
- /* WRR */
- for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j ++) {
- uint32_t wrr_cost[RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS];
- uint32_t lcd, lcd1, lcd2;
- uint32_t qindex;
-
- qindex = j * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
-
- wrr_cost[0] = src->wrr_weights[qindex];
- wrr_cost[1] = src->wrr_weights[qindex + 1];
- wrr_cost[2] = src->wrr_weights[qindex + 2];
- wrr_cost[3] = src->wrr_weights[qindex + 3];
-
- lcd1 = rte_get_lcd(wrr_cost[0], wrr_cost[1]);
- lcd2 = rte_get_lcd(wrr_cost[2], wrr_cost[3]);
- lcd = rte_get_lcd(lcd1, lcd2);
-
- wrr_cost[0] = lcd / wrr_cost[0];
- wrr_cost[1] = lcd / wrr_cost[1];
- wrr_cost[2] = lcd / wrr_cost[2];
- wrr_cost[3] = lcd / wrr_cost[3];
-
- dst->wrr_cost[qindex] = (uint8_t) wrr_cost[0];
- dst->wrr_cost[qindex + 1] = (uint8_t) wrr_cost[1];
- dst->wrr_cost[qindex + 2] = (uint8_t) wrr_cost[2];
- dst->wrr_cost[qindex + 3] = (uint8_t) wrr_cost[3];
- }
-
- rte_sched_port_log_pipe_profile(port, i);
- }
-
- port->pipe_tc3_rate_max = 0;
- for (i = 0; i < port->n_pipe_profiles; i ++) {
- struct rte_sched_pipe_params *src = params->pipe_profiles + i;
- uint32_t pipe_tc3_rate = src->tc_rate[3];
-
- if (port->pipe_tc3_rate_max < pipe_tc3_rate) {
- port->pipe_tc3_rate_max = pipe_tc3_rate;
- }
- }
-}
-
-struct rte_sched_port *
-rte_sched_port_config(struct rte_sched_port_params *params)
-{
- struct rte_sched_port *port = NULL;
- uint32_t mem_size, bmp_mem_size, n_queues_per_port, i;
-
- /* Check user parameters. Determine the amount of memory to allocate */
- mem_size = rte_sched_port_get_memory_footprint(params);
- if (mem_size == 0) {
- return NULL;
- }
-
- /* Allocate memory to store the data structures */
- port = rte_zmalloc("qos_params", mem_size, RTE_CACHE_LINE_SIZE);
- if (port == NULL) {
- return NULL;
- }
-
- /* User parameters */
- port->n_subports_per_port = params->n_subports_per_port;
- port->n_pipes_per_subport = params->n_pipes_per_subport;
- port->rate = params->rate;
- port->mtu = params->mtu + params->frame_overhead;
- port->frame_overhead = params->frame_overhead;
- memcpy(port->qsize, params->qsize, sizeof(params->qsize));
- port->n_pipe_profiles = params->n_pipe_profiles;
-
-#ifdef RTE_SCHED_RED
- for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
- uint32_t j;
-
- for (j = 0; j < e_RTE_METER_COLORS; j++) {
- if (rte_red_config_init(&port->red_config[i][j],
- params->red_params[i][j].wq_log2,
- params->red_params[i][j].min_th,
- params->red_params[i][j].max_th,
- params->red_params[i][j].maxp_inv) != 0) {
- return NULL;
- }
- }
- }
-#endif
-
- /* Timing */
- port->time_cpu_cycles = rte_get_tsc_cycles();
- port->time_cpu_bytes = 0;
- port->time = 0;
- port->cycles_per_byte = ((double) rte_get_tsc_hz()) / ((double) params->rate);
-
- /* Scheduling loop detection */
- port->pipe_loop = RTE_SCHED_PIPE_INVALID;
- port->pipe_exhaustion = 0;
-
- /* Grinders */
- port->busy_grinders = 0;
- port->pkts_out = NULL;
- port->n_pkts_out = 0;
-
- /* Queue base calculation */
- rte_sched_port_config_qsize(port);
-
- /* Large data structures */
- port->subport = (struct rte_sched_subport *) (port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_SUBPORT));
- port->pipe = (struct rte_sched_pipe *) (port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_PIPE));
- port->queue = (struct rte_sched_queue *) (port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_QUEUE));
- port->queue_extra = (struct rte_sched_queue_extra *) (port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA));
- port->pipe_profiles = (struct rte_sched_pipe_profile *) (port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES));
- port->bmp_array = port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY);
- port->queue_array = (struct rte_mbuf **) (port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY));
-
- /* Pipe profile table */
- rte_sched_port_config_pipe_profile_table(port, params);
-
- /* Bitmap */
- n_queues_per_port = rte_sched_port_queues_per_port(port);
- bmp_mem_size = rte_bitmap_get_memory_footprint(n_queues_per_port);
- port->bmp = rte_bitmap_init(n_queues_per_port, port->bmp_array, bmp_mem_size);
- if (port->bmp == NULL) {
- RTE_LOG(INFO, SCHED, "Bitmap init error\n");
- return NULL;
- }
- for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i ++) {
- port->grinder_base_bmp_pos[i] = RTE_SCHED_PIPE_INVALID;
- }
-
- return port;
-}
-
-void
-rte_sched_port_free(struct rte_sched_port *port)
-{
- /* Check user parameters */
- if (port == NULL){
- return;
- }
-
- rte_bitmap_free(port->bmp);
- rte_free(port);
-}
-
-static void
-rte_sched_port_log_subport_config(struct rte_sched_port *port, uint32_t i)
-{
- struct rte_sched_subport *s = port->subport + i;
-
- RTE_LOG(INFO, SCHED, "Low level config for subport %u:\n"
- "\tToken bucket: period = %u, credits per period = %u, size = %u\n"
- "\tTraffic classes: period = %u, credits per period = [%u, %u, %u, %u]\n"
- "\tTraffic class 3 oversubscription: wm min = %u, wm max = %u\n",
- i,
-
- /* Token bucket */
- s->tb_period,
- s->tb_credits_per_period,
- s->tb_size,
-
- /* Traffic classes */
- s->tc_period,
- s->tc_credits_per_period[0],
- s->tc_credits_per_period[1],
- s->tc_credits_per_period[2],
- s->tc_credits_per_period[3],
-
- /* Traffic class 3 oversubscription */
- s->tc_ov_wm_min,
- s->tc_ov_wm_max);
-}
-
-int
-rte_sched_subport_config(struct rte_sched_port *port,
- uint32_t subport_id,
- struct rte_sched_subport_params *params)
-{
- struct rte_sched_subport *s;
- uint32_t i;
-
- /* Check user parameters */
- if ((port == NULL) ||
- (subport_id >= port->n_subports_per_port) ||
- (params == NULL)) {
- return -1;
- }
-
- if ((params->tb_rate == 0) || (params->tb_rate > port->rate)) {
- return -2;
- }
-
- if (params->tb_size == 0) {
- return -3;
- }
-
- for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
- if ((params->tc_rate[i] == 0) || (params->tc_rate[i] > params->tb_rate)) {
- return -4;
- }
- }
-
- if (params->tc_period == 0) {
- return -5;
- }
-
- s = port->subport + subport_id;
-
- /* Token Bucket (TB) */
- if (params->tb_rate == port->rate) {
- s->tb_credits_per_period = 1;
- s->tb_period = 1;
- } else {
- double tb_rate = ((double) params->tb_rate) / ((double) port->rate);
- double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
-
- rte_approx(tb_rate, d, &s->tb_credits_per_period, &s->tb_period);
- }
- s->tb_size = params->tb_size;
- s->tb_time = port->time;
- s->tb_credits = s->tb_size / 2;
-
- /* Traffic Classes (TCs) */
- s->tc_period = (uint32_t) rte_sched_time_ms_to_bytes(params->tc_period, port->rate);
- for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
- s->tc_credits_per_period[i] = (uint32_t) rte_sched_time_ms_to_bytes(params->tc_period, params->tc_rate[i]);
- }
- s->tc_time = port->time + s->tc_period;
- for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
- s->tc_credits[i] = s->tc_credits_per_period[i];
- }
-
-#ifdef RTE_SCHED_SUBPORT_TC_OV
- /* TC oversubscription */
- s->tc_ov_wm_min = port->mtu;
- s->tc_ov_wm_max = (uint32_t) rte_sched_time_ms_to_bytes(params->tc_period, port->pipe_tc3_rate_max);
- s->tc_ov_wm = s->tc_ov_wm_max;
- s->tc_ov_period_id = 0;
- s->tc_ov = 0;
- s->tc_ov_n = 0;
- s->tc_ov_rate = 0;
-#endif
-
- rte_sched_port_log_subport_config(port, subport_id);
-
- return 0;
-}
-
-int
-rte_sched_pipe_config(struct rte_sched_port *port,
- uint32_t subport_id,
- uint32_t pipe_id,
- int32_t pipe_profile)
-{
- struct rte_sched_subport *s;
- struct rte_sched_pipe *p;
- struct rte_sched_pipe_profile *params;
- uint32_t deactivate, profile, i;
-
- /* Check user parameters */
- profile = (uint32_t) pipe_profile;
- deactivate = (pipe_profile < 0);
- if ((port == NULL) ||
- (subport_id >= port->n_subports_per_port) ||
- (pipe_id >= port->n_pipes_per_subport) ||
- ((!deactivate) && (profile >= port->n_pipe_profiles))) {
- return -1;
- }
-
- /* Check that subport configuration is valid */
- s = port->subport + subport_id;
- if (s->tb_period == 0) {
- return -2;
- }
-
- p = port->pipe + (subport_id * port->n_pipes_per_subport + pipe_id);
-
- /* Handle the case when pipe already has a valid configuration */
- if (p->tb_time) {
- params = port->pipe_profiles + p->profile;
-
-#ifdef RTE_SCHED_SUBPORT_TC_OV
- double subport_tc3_rate = ((double) s->tc_credits_per_period[3]) / ((double) s->tc_period);
- double pipe_tc3_rate = ((double) params->tc_credits_per_period[3]) / ((double) params->tc_period);
- uint32_t tc3_ov = s->tc_ov;
-
- /* Unplug pipe from its subport */
- s->tc_ov_n -= params->tc_ov_weight;
- s->tc_ov_rate -= pipe_tc3_rate;
- s->tc_ov = s->tc_ov_rate > subport_tc3_rate;
-
- if (s->tc_ov != tc3_ov) {
- RTE_LOG(INFO, SCHED, "Subport %u TC3 oversubscription is OFF (%.4lf >= %.4lf)\n",
- subport_id, subport_tc3_rate, s->tc_ov_rate);
- }
-#endif
-
- /* Reset the pipe */
- memset(p, 0, sizeof(struct rte_sched_pipe));
- }
-
- if (deactivate) {
- return 0;
- }
-
- /* Apply the new pipe configuration */
- p->profile = profile;
- params = port->pipe_profiles + p->profile;
-
- /* Token Bucket (TB) */
- p->tb_time = port->time;
- p->tb_credits = params->tb_size / 2;
-
- /* Traffic Classes (TCs) */
- p->tc_time = port->time + params->tc_period;
- for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
- p->tc_credits[i] = params->tc_credits_per_period[i];
- }
-
-#ifdef RTE_SCHED_SUBPORT_TC_OV
- {
- /* Subport TC3 oversubscription */
- double subport_tc3_rate = ((double) s->tc_credits_per_period[3]) / ((double) s->tc_period);
- double pipe_tc3_rate = ((double) params->tc_credits_per_period[3]) / ((double) params->tc_period);
- uint32_t tc3_ov = s->tc_ov;
-
- s->tc_ov_n += params->tc_ov_weight;
- s->tc_ov_rate += pipe_tc3_rate;
- s->tc_ov = s->tc_ov_rate > subport_tc3_rate;
-
- if (s->tc_ov != tc3_ov) {
- RTE_LOG(INFO, SCHED, "Subport %u TC3 oversubscription is ON (%.4lf < %.4lf)\n",
- subport_id, subport_tc3_rate, s->tc_ov_rate);
- }
- p->tc_ov_period_id = s->tc_ov_period_id;
- p->tc_ov_credits = s->tc_ov_wm;
- }
-#endif
-
- return 0;
-}
-
-int
-rte_sched_subport_read_stats(struct rte_sched_port *port,
- uint32_t subport_id,
- struct rte_sched_subport_stats *stats,
- uint32_t *tc_ov)
-{
- struct rte_sched_subport *s;
-
- /* Check user parameters */
- if ((port == NULL) ||
- (subport_id >= port->n_subports_per_port) ||
- (stats == NULL) ||
- (tc_ov == NULL)) {
- return -1;
- }
- s = port->subport + subport_id;
-
- /* Copy subport stats and clear */
- memcpy(stats, &s->stats, sizeof(struct rte_sched_subport_stats));
- memset(&s->stats, 0, sizeof(struct rte_sched_subport_stats));
-
- /* Subport TC ovesubscription status */
- *tc_ov = s->tc_ov;
-
- return 0;
-}
-
-int
-rte_sched_queue_read_stats(struct rte_sched_port *port,
- uint32_t queue_id,
- struct rte_sched_queue_stats *stats,
- uint16_t *qlen)
-{
- struct rte_sched_queue *q;
- struct rte_sched_queue_extra *qe;
-
- /* Check user parameters */
- if ((port == NULL) ||
- (queue_id >= rte_sched_port_queues_per_port(port)) ||
- (stats == NULL) ||
- (qlen == NULL)) {
- return -1;
- }
- q = port->queue + queue_id;
- qe = port->queue_extra + queue_id;
-
- /* Copy queue stats and clear */
- memcpy(stats, &qe->stats, sizeof(struct rte_sched_queue_stats));
- memset(&qe->stats, 0, sizeof(struct rte_sched_queue_stats));
-
- /* Queue length */
- *qlen = q->qw - q->qr;
-
- return 0;
-}
-
-static inline uint32_t
-rte_sched_port_qindex(struct rte_sched_port *port, uint32_t subport, uint32_t pipe, uint32_t traffic_class, uint32_t queue)
-{
- uint32_t result;
-
- result = subport * port->n_pipes_per_subport + pipe;
- result = result * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + traffic_class;
- result = result * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue;
-
- return result;
-}
-
-static inline struct rte_mbuf **
-rte_sched_port_qbase(struct rte_sched_port *port, uint32_t qindex)
-{
- uint32_t pindex = qindex >> 4;
- uint32_t qpos = qindex & 0xF;
-
- return (port->queue_array + pindex * port->qsize_sum + port->qsize_add[qpos]);
-}
-
-static inline uint16_t
-rte_sched_port_qsize(struct rte_sched_port *port, uint32_t qindex)
-{
- uint32_t tc = (qindex >> 2) & 0x3;
-
- return port->qsize[tc];
-}
-
-#if RTE_SCHED_DEBUG
-
-static inline int
-rte_sched_port_queue_is_empty(struct rte_sched_port *port, uint32_t qindex)
-{
- struct rte_sched_queue *queue = port->queue + qindex;
-
- return (queue->qr == queue->qw);
-}
-
-static inline int
-rte_sched_port_queue_is_full(struct rte_sched_port *port, uint32_t qindex)
-{
- struct rte_sched_queue *queue = port->queue + qindex;
- uint16_t qsize = rte_sched_port_qsize(port, qindex);
- uint16_t qlen = queue->qw - queue->qr;
-
- return (qlen >= qsize);
-}
-
-#endif /* RTE_SCHED_DEBUG */
-
-#ifdef RTE_SCHED_COLLECT_STATS
-
-static inline void
-rte_sched_port_update_subport_stats(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
-{
- struct rte_sched_subport *s = port->subport + (qindex / rte_sched_port_queues_per_subport(port));
- uint32_t tc_index = (qindex >> 2) & 0x3;
- uint32_t pkt_len = pkt->pkt_len;
-
- s->stats.n_pkts_tc[tc_index] += 1;
- s->stats.n_bytes_tc[tc_index] += pkt_len;
-}
-
-static inline void
-rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
-{
- struct rte_sched_subport *s = port->subport + (qindex / rte_sched_port_queues_per_subport(port));
- uint32_t tc_index = (qindex >> 2) & 0x3;
- uint32_t pkt_len = pkt->pkt_len;
-
- s->stats.n_pkts_tc_dropped[tc_index] += 1;
- s->stats.n_bytes_tc_dropped[tc_index] += pkt_len;
-}
-
-static inline void
-rte_sched_port_update_queue_stats(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
-{
- struct rte_sched_queue_extra *qe = port->queue_extra + qindex;
- uint32_t pkt_len = pkt->pkt_len;
-
- qe->stats.n_pkts += 1;
- qe->stats.n_bytes += pkt_len;
-}
-
-static inline void
-rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
-{
- struct rte_sched_queue_extra *qe = port->queue_extra + qindex;
- uint32_t pkt_len = pkt->pkt_len;
-
- qe->stats.n_pkts_dropped += 1;
- qe->stats.n_bytes_dropped += pkt_len;
-}
-
-#endif /* RTE_SCHED_COLLECT_STATS */
-
-#ifdef RTE_SCHED_RED
-
-static inline int
-rte_sched_port_red_drop(struct rte_sched_port *port, struct rte_mbuf *pkt, uint32_t qindex, uint16_t qlen)
-{
- struct rte_sched_queue_extra *qe;
- struct rte_red_config *red_cfg;
- struct rte_red *red;
- uint32_t tc_index;
- enum rte_meter_color color;
-
- tc_index = (qindex >> 2) & 0x3;
- color = rte_sched_port_pkt_read_color(pkt);
- red_cfg = &port->red_config[tc_index][color];
-
- qe = port->queue_extra + qindex;
- red = &qe->red;
-
- return rte_red_enqueue(red_cfg, red, qlen, port->time);
-}
-
-static inline void
-rte_sched_port_set_queue_empty_timestamp(struct rte_sched_port *port, uint32_t qindex)
-{
- struct rte_sched_queue_extra *qe;
- struct rte_red *red;
-
- qe = port->queue_extra + qindex;
- red = &qe->red;
-
- rte_red_mark_queue_empty(red, port->time);
-}
-
-#else
-
-#define rte_sched_port_red_drop(port, pkt, qindex, qlen) 0
-
-#define rte_sched_port_set_queue_empty_timestamp(port, qindex)
-
-#endif /* RTE_SCHED_RED */
-
-#if RTE_SCHED_DEBUG
-
-static inline int
-debug_pipe_is_empty(struct rte_sched_port *port, uint32_t pindex)
-{
- uint32_t qindex, i;
-
- qindex = pindex << 4;
-
- for (i = 0; i < 16; i ++){
- uint32_t queue_empty = rte_sched_port_queue_is_empty(port, qindex + i);
- uint32_t bmp_bit_clear = (rte_bitmap_get(port->bmp, qindex + i) == 0);
-
- if (queue_empty != bmp_bit_clear){
- rte_panic("Queue status mismatch for queue %u of pipe %u\n", i, pindex);
- }
-
- if (!queue_empty){
- return 0;
- }
- }
-
- return 1;
-}
-
-static inline void
-debug_check_queue_slab(struct rte_sched_port *port, uint32_t bmp_pos, uint64_t bmp_slab)
-{
- uint64_t mask;
- uint32_t i, panic;
-
- if (bmp_slab == 0){
- rte_panic("Empty slab at position %u\n", bmp_pos);
- }
-
- panic = 0;
- for (i = 0, mask = 1; i < 64; i ++, mask <<= 1) {
- if (mask & bmp_slab){
- if (rte_sched_port_queue_is_empty(port, bmp_pos + i)) {
- printf("Queue %u (slab offset %u) is empty\n", bmp_pos + i, i);
- panic = 1;
- }
- }
- }
-
- if (panic){
- rte_panic("Empty queues in slab 0x%" PRIx64 "starting at position %u\n",
- bmp_slab, bmp_pos);
- }
-}
-
-#endif /* RTE_SCHED_DEBUG */
-
-static inline uint32_t
-rte_sched_port_enqueue_qptrs_prefetch0(struct rte_sched_port *port, struct rte_mbuf *pkt)
-{
- struct rte_sched_queue *q;
-#ifdef RTE_SCHED_COLLECT_STATS
- struct rte_sched_queue_extra *qe;
-#endif
- uint32_t subport, pipe, traffic_class, queue, qindex;
-
- rte_sched_port_pkt_read_tree_path(pkt, &subport, &pipe, &traffic_class, &queue);
-
- qindex = rte_sched_port_qindex(port, subport, pipe, traffic_class, queue);
- q = port->queue + qindex;
- rte_prefetch0(q);
-#ifdef RTE_SCHED_COLLECT_STATS
- qe = port->queue_extra + qindex;
- rte_prefetch0(qe);
-#endif
-
- return qindex;
-}
-
-static inline void
-rte_sched_port_enqueue_qwa_prefetch0(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf **qbase)
-{
- struct rte_sched_queue *q;
- struct rte_mbuf **q_qw;
- uint16_t qsize;
-
- q = port->queue + qindex;
- qsize = rte_sched_port_qsize(port, qindex);
- q_qw = qbase + (q->qw & (qsize - 1));
-
- rte_prefetch0(q_qw);
- rte_bitmap_prefetch0(port->bmp, qindex);
-}
-
-static inline int
-rte_sched_port_enqueue_qwa(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf **qbase, struct rte_mbuf *pkt)
-{
- struct rte_sched_queue *q;
- uint16_t qsize;
- uint16_t qlen;
-
- q = port->queue + qindex;
- qsize = rte_sched_port_qsize(port, qindex);
- qlen = q->qw - q->qr;
-
- /* Drop the packet (and update drop stats) when queue is full */
- if (unlikely(rte_sched_port_red_drop(port, pkt, qindex, qlen) || (qlen >= qsize))) {
- rte_pktmbuf_free(pkt);
-#ifdef RTE_SCHED_COLLECT_STATS
- rte_sched_port_update_subport_stats_on_drop(port, qindex, pkt);
- rte_sched_port_update_queue_stats_on_drop(port, qindex, pkt);
-#endif
- return 0;
- }
-
- /* Enqueue packet */
- qbase[q->qw & (qsize - 1)] = pkt;
- q->qw ++;
-
- /* Activate queue in the port bitmap */
- rte_bitmap_set(port->bmp, qindex);
-
- /* Statistics */
-#ifdef RTE_SCHED_COLLECT_STATS
- rte_sched_port_update_subport_stats(port, qindex, pkt);
- rte_sched_port_update_queue_stats(port, qindex, pkt);
-#endif
-
- return 1;
-}
-
-#if RTE_SCHED_ENQUEUE == 0
-
-int
-rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts)
-{
- uint32_t result, i;
-
- result = 0;
-
- for (i = 0; i < n_pkts; i ++) {
- struct rte_mbuf *pkt;
- struct rte_mbuf **q_base;
- uint32_t subport, pipe, traffic_class, queue, qindex;
-
- pkt = pkts[i];
-
- rte_sched_port_pkt_read_tree_path(pkt, &subport, &pipe, &traffic_class, &queue);
-
- qindex = rte_sched_port_qindex(port, subport, pipe, traffic_class, queue);
-
- q_base = rte_sched_port_qbase(port, qindex);
-
- result += rte_sched_port_enqueue_qwa(port, qindex, q_base, pkt);
- }
-
- return result;
-}
-
-#else
-
-/* The enqueue function implements a 4-level pipeline with each stage processing
- * two different packets. The purpose of using a pipeline is to hide the latency
- * of prefetching the data structures. The naming convention is presented in the
- * diagram below:
- *
- * p00 _______ p10 _______ p20 _______ p30 _______
- * ----->| |----->| |----->| |----->| |----->
- * | 0 | | 1 | | 2 | | 3 |
- * ----->|_______|----->|_______|----->|_______|----->|_______|----->
- * p01 p11 p21 p31
- *
- ***/
-int
-rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts)
-{
- struct rte_mbuf *pkt00, *pkt01, *pkt10, *pkt11, *pkt20, *pkt21, *pkt30, *pkt31, *pkt_last;
- struct rte_mbuf **q00_base, **q01_base, **q10_base, **q11_base, **q20_base, **q21_base, **q30_base, **q31_base, **q_last_base;
- uint32_t q00, q01, q10, q11, q20, q21, q30, q31, q_last;
- uint32_t r00, r01, r10, r11, r20, r21, r30, r31, r_last;
- uint32_t result, i;
-
- result = 0;
-
- /* Less then 6 input packets available, which is not enough to feed the pipeline */
- if (unlikely(n_pkts < 6)) {
- struct rte_mbuf **q_base[5];
- uint32_t q[5];
-
- /* Prefetch the mbuf structure of each packet */
- for (i = 0; i < n_pkts; i ++) {
- rte_prefetch0(pkts[i]);
- }
-
- /* Prefetch the queue structure for each queue */
- for (i = 0; i < n_pkts; i ++) {
- q[i] = rte_sched_port_enqueue_qptrs_prefetch0(port, pkts[i]);
- }
-
- /* Prefetch the write pointer location of each queue */
- for (i = 0; i < n_pkts; i ++) {
- q_base[i] = rte_sched_port_qbase(port, q[i]);
- rte_sched_port_enqueue_qwa_prefetch0(port, q[i], q_base[i]);
- }
-
- /* Write each packet to its queue */
- for (i = 0; i < n_pkts; i ++) {
- result += rte_sched_port_enqueue_qwa(port, q[i], q_base[i], pkts[i]);
- }
-
- return result;
- }
-
- /* Feed the first 3 stages of the pipeline (6 packets needed) */
- pkt20 = pkts[0];
- pkt21 = pkts[1];
- rte_prefetch0(pkt20);
- rte_prefetch0(pkt21);
-
- pkt10 = pkts[2];
- pkt11 = pkts[3];
- rte_prefetch0(pkt10);
- rte_prefetch0(pkt11);
-
- q20 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt20);
- q21 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt21);
-
- pkt00 = pkts[4];
- pkt01 = pkts[5];
- rte_prefetch0(pkt00);
- rte_prefetch0(pkt01);
-
- q10 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt10);
- q11 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt11);
-
- q20_base = rte_sched_port_qbase(port, q20);
- q21_base = rte_sched_port_qbase(port, q21);
- rte_sched_port_enqueue_qwa_prefetch0(port, q20, q20_base);
- rte_sched_port_enqueue_qwa_prefetch0(port, q21, q21_base);
-
- /* Run the pipeline */
- for (i = 6; i < (n_pkts & (~1)); i += 2) {
- /* Propagate stage inputs */
- pkt30 = pkt20;
- pkt31 = pkt21;
- pkt20 = pkt10;
- pkt21 = pkt11;
- pkt10 = pkt00;
- pkt11 = pkt01;
- q30 = q20;
- q31 = q21;
- q20 = q10;
- q21 = q11;
- q30_base = q20_base;
- q31_base = q21_base;
-
- /* Stage 0: Get packets in */
- pkt00 = pkts[i];
- pkt01 = pkts[i + 1];
- rte_prefetch0(pkt00);
- rte_prefetch0(pkt01);
-
- /* Stage 1: Prefetch queue structure storing queue pointers */
- q10 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt10);
- q11 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt11);
-
- /* Stage 2: Prefetch queue write location */
- q20_base = rte_sched_port_qbase(port, q20);
- q21_base = rte_sched_port_qbase(port, q21);
- rte_sched_port_enqueue_qwa_prefetch0(port, q20, q20_base);
- rte_sched_port_enqueue_qwa_prefetch0(port, q21, q21_base);
-
- /* Stage 3: Write packet to queue and activate queue */
- r30 = rte_sched_port_enqueue_qwa(port, q30, q30_base, pkt30);
- r31 = rte_sched_port_enqueue_qwa(port, q31, q31_base, pkt31);
- result += r30 + r31;
- }
-
- /* Drain the pipeline (exactly 6 packets). Handle the last packet in the case
- of an odd number of input packets. */
- pkt_last = pkts[n_pkts - 1];
- rte_prefetch0(pkt_last);
-
- q00 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt00);
- q01 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt01);
-
- q10_base = rte_sched_port_qbase(port, q10);
- q11_base = rte_sched_port_qbase(port, q11);
- rte_sched_port_enqueue_qwa_prefetch0(port, q10, q10_base);
- rte_sched_port_enqueue_qwa_prefetch0(port, q11, q11_base);
-
- r20 = rte_sched_port_enqueue_qwa(port, q20, q20_base, pkt20);
- r21 = rte_sched_port_enqueue_qwa(port, q21, q21_base, pkt21);
- result += r20 + r21;
-
- q_last = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt_last);
-
- q00_base = rte_sched_port_qbase(port, q00);
- q01_base = rte_sched_port_qbase(port, q01);
- rte_sched_port_enqueue_qwa_prefetch0(port, q00, q00_base);
- rte_sched_port_enqueue_qwa_prefetch0(port, q01, q01_base);
-
- r10 = rte_sched_port_enqueue_qwa(port, q10, q10_base, pkt10);
- r11 = rte_sched_port_enqueue_qwa(port, q11, q11_base, pkt11);
- result += r10 + r11;
-
- q_last_base = rte_sched_port_qbase(port, q_last);
- rte_sched_port_enqueue_qwa_prefetch0(port, q_last, q_last_base);
-
- r00 = rte_sched_port_enqueue_qwa(port, q00, q00_base, pkt00);
- r01 = rte_sched_port_enqueue_qwa(port, q01, q01_base, pkt01);
- result += r00 + r01;
-
- if (n_pkts & 1) {
- r_last = rte_sched_port_enqueue_qwa(port, q_last, q_last_base, pkt_last);
- result += r_last;
- }
-
- return result;
-}
-
-#endif /* RTE_SCHED_ENQUEUE */
-
-#if RTE_SCHED_TS_CREDITS_UPDATE == 0
-
-#define grinder_credits_update(port, pos)
-
-#elif !defined(RTE_SCHED_SUBPORT_TC_OV)
-
-static inline void
-grinder_credits_update(struct rte_sched_port *port, uint32_t pos)
-{
- struct rte_sched_grinder *grinder = port->grinder + pos;
- struct rte_sched_subport *subport = grinder->subport;
- struct rte_sched_pipe *pipe = grinder->pipe;
- struct rte_sched_pipe_profile *params = grinder->pipe_params;
- uint64_t n_periods;
-
- /* Subport TB */
- n_periods = (port->time - subport->tb_time) / subport->tb_period;
- subport->tb_credits += n_periods * subport->tb_credits_per_period;
- subport->tb_credits = rte_sched_min_val_2_u32(subport->tb_credits, subport->tb_size);
- subport->tb_time += n_periods * subport->tb_period;
-
- /* Pipe TB */
- n_periods = (port->time - pipe->tb_time) / params->tb_period;
- pipe->tb_credits += n_periods * params->tb_credits_per_period;
- pipe->tb_credits = rte_sched_min_val_2_u32(pipe->tb_credits, params->tb_size);
- pipe->tb_time += n_periods * params->tb_period;
-
- /* Subport TCs */
- if (unlikely(port->time >= subport->tc_time)) {
- subport->tc_credits[0] = subport->tc_credits_per_period[0];
- subport->tc_credits[1] = subport->tc_credits_per_period[1];
- subport->tc_credits[2] = subport->tc_credits_per_period[2];
- subport->tc_credits[3] = subport->tc_credits_per_period[3];
- subport->tc_time = port->time + subport->tc_period;
- }
-
- /* Pipe TCs */
- if (unlikely(port->time >= pipe->tc_time)) {
- pipe->tc_credits[0] = params->tc_credits_per_period[0];
- pipe->tc_credits[1] = params->tc_credits_per_period[1];
- pipe->tc_credits[2] = params->tc_credits_per_period[2];
- pipe->tc_credits[3] = params->tc_credits_per_period[3];
- pipe->tc_time = port->time + params->tc_period;
- }
-}
-
-#else
-
-static inline uint32_t
-grinder_tc_ov_credits_update(struct rte_sched_port *port, uint32_t pos)
-{
- struct rte_sched_grinder *grinder = port->grinder + pos;
- struct rte_sched_subport *subport = grinder->subport;
- uint32_t tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
- uint32_t tc_ov_consumption_max;
- uint32_t tc_ov_wm = subport->tc_ov_wm;
-
- if (subport->tc_ov == 0) {
- return subport->tc_ov_wm_max;
- }
-
- tc_ov_consumption[0] = subport->tc_credits_per_period[0] - subport->tc_credits[0];
- tc_ov_consumption[1] = subport->tc_credits_per_period[1] - subport->tc_credits[1];
- tc_ov_consumption[2] = subport->tc_credits_per_period[2] - subport->tc_credits[2];
- tc_ov_consumption[3] = subport->tc_credits_per_period[3] - subport->tc_credits[3];
-
- tc_ov_consumption_max = subport->tc_credits_per_period[3] -
- (tc_ov_consumption[0] + tc_ov_consumption[1] + tc_ov_consumption[2]);
-
- if (tc_ov_consumption[3] > (tc_ov_consumption_max - port->mtu)) {
- tc_ov_wm -= tc_ov_wm >> 7;
- if (tc_ov_wm < subport->tc_ov_wm_min) {
- tc_ov_wm = subport->tc_ov_wm_min;
- }
- return tc_ov_wm;
- }
-
- tc_ov_wm += (tc_ov_wm >> 7) + 1;
- if (tc_ov_wm > subport->tc_ov_wm_max) {
- tc_ov_wm = subport->tc_ov_wm_max;
- }
- return tc_ov_wm;
-}
-
-static inline void
-grinder_credits_update(struct rte_sched_port *port, uint32_t pos)
-{
- struct rte_sched_grinder *grinder = port->grinder + pos;
- struct rte_sched_subport *subport = grinder->subport;
- struct rte_sched_pipe *pipe = grinder->pipe;
- struct rte_sched_pipe_profile *params = grinder->pipe_params;
- uint64_t n_periods;
-
- /* Subport TB */
- n_periods = (port->time - subport->tb_time) / subport->tb_period;
- subport->tb_credits += n_periods * subport->tb_credits_per_period;
- subport->tb_credits = rte_sched_min_val_2_u32(subport->tb_credits, subport->tb_size);
- subport->tb_time += n_periods * subport->tb_period;
-
- /* Pipe TB */
- n_periods = (port->time - pipe->tb_time) / params->tb_period;
- pipe->tb_credits += n_periods * params->tb_credits_per_period;
- pipe->tb_credits = rte_sched_min_val_2_u32(pipe->tb_credits, params->tb_size);
- pipe->tb_time += n_periods * params->tb_period;
-
- /* Subport TCs */
- if (unlikely(port->time >= subport->tc_time)) {
- subport->tc_ov_wm = grinder_tc_ov_credits_update(port, pos);
-
- subport->tc_credits[0] = subport->tc_credits_per_period[0];
- subport->tc_credits[1] = subport->tc_credits_per_period[1];
- subport->tc_credits[2] = subport->tc_credits_per_period[2];
- subport->tc_credits[3] = subport->tc_credits_per_period[3];
-
- subport->tc_time = port->time + subport->tc_period;
- subport->tc_ov_period_id ++;
- }
-
- /* Pipe TCs */
- if (unlikely(port->time >= pipe->tc_time)) {
- pipe->tc_credits[0] = params->tc_credits_per_period[0];
- pipe->tc_credits[1] = params->tc_credits_per_period[1];
- pipe->tc_credits[2] = params->tc_credits_per_period[2];
- pipe->tc_credits[3] = params->tc_credits_per_period[3];
- pipe->tc_time = port->time + params->tc_period;
- }
-
- /* Pipe TCs - Oversubscription */
- if (unlikely(pipe->tc_ov_period_id != subport->tc_ov_period_id)) {
- pipe->tc_ov_credits = subport->tc_ov_wm * params->tc_ov_weight;
-
- pipe->tc_ov_period_id = subport->tc_ov_period_id;
- }
-}
-
-#endif /* RTE_SCHED_TS_CREDITS_UPDATE, RTE_SCHED_SUBPORT_TC_OV */
-
-#if RTE_SCHED_TS_CREDITS_CHECK
-
-#ifndef RTE_SCHED_SUBPORT_TC_OV
-
-static inline int
-grinder_credits_check(struct rte_sched_port *port, uint32_t pos)
-{
- struct rte_sched_grinder *grinder = port->grinder + pos;
- struct rte_sched_subport *subport = grinder->subport;
- struct rte_sched_pipe *pipe = grinder->pipe;
- struct rte_mbuf *pkt = grinder->pkt;
- uint32_t tc_index = grinder->tc_index;
- uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
- uint32_t subport_tb_credits = subport->tb_credits;
- uint32_t subport_tc_credits = subport->tc_credits[tc_index];
- uint32_t pipe_tb_credits = pipe->tb_credits;
- uint32_t pipe_tc_credits = pipe->tc_credits[tc_index];
- int enough_credits;
-
- /* Check queue credits */
- enough_credits = (pkt_len <= subport_tb_credits) &&
- (pkt_len <= subport_tc_credits) &&
- (pkt_len <= pipe_tb_credits) &&
- (pkt_len <= pipe_tc_credits);
-
- if (!enough_credits) {
- return 0;
- }
-
- /* Update port credits */
- subport->tb_credits -= pkt_len;
- subport->tc_credits[tc_index] -= pkt_len;
- pipe->tb_credits -= pkt_len;
- pipe->tc_credits[tc_index] -= pkt_len;
-
- return 1;
-}
-
-#else
-
-static inline int
-grinder_credits_check(struct rte_sched_port *port, uint32_t pos)
-{
- struct rte_sched_grinder *grinder = port->grinder + pos;
- struct rte_sched_subport *subport = grinder->subport;
- struct rte_sched_pipe *pipe = grinder->pipe;
- struct rte_mbuf *pkt = grinder->pkt;
- uint32_t tc_index = grinder->tc_index;
- uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
- uint32_t subport_tb_credits = subport->tb_credits;
- uint32_t subport_tc_credits = subport->tc_credits[tc_index];
- uint32_t pipe_tb_credits = pipe->tb_credits;
- uint32_t pipe_tc_credits = pipe->tc_credits[tc_index];
- uint32_t pipe_tc_ov_mask1[] = {UINT32_MAX, UINT32_MAX, UINT32_MAX, pipe->tc_ov_credits};
- uint32_t pipe_tc_ov_mask2[] = {0, 0, 0, UINT32_MAX};
- uint32_t pipe_tc_ov_credits = pipe_tc_ov_mask1[tc_index];
- int enough_credits;
-
- /* Check pipe and subport credits */
- enough_credits = (pkt_len <= subport_tb_credits) &&
- (pkt_len <= subport_tc_credits) &&
- (pkt_len <= pipe_tb_credits) &&
- (pkt_len <= pipe_tc_credits) &&
- (pkt_len <= pipe_tc_ov_credits);
-
- if (!enough_credits) {
- return 0;
- }
-
- /* Update pipe and subport credits */
- subport->tb_credits -= pkt_len;
- subport->tc_credits[tc_index] -= pkt_len;
- pipe->tb_credits -= pkt_len;
- pipe->tc_credits[tc_index] -= pkt_len;
- pipe->tc_ov_credits -= pipe_tc_ov_mask2[tc_index] & pkt_len;
-
- return 1;
-}
-
-#endif /* RTE_SCHED_SUBPORT_TC_OV */
-
-#endif /* RTE_SCHED_TS_CREDITS_CHECK */
-
-static inline int
-grinder_schedule(struct rte_sched_port *port, uint32_t pos)
-{
- struct rte_sched_grinder *grinder = port->grinder + pos;
- struct rte_sched_queue *queue = grinder->queue[grinder->qpos];
- struct rte_mbuf *pkt = grinder->pkt;
- uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
-
-#if RTE_SCHED_TS_CREDITS_CHECK
- if (!grinder_credits_check(port, pos)) {
- return 0;
- }
-#endif
-
- /* Advance port time */
- port->time += pkt_len;
-
- /* Send packet */
- port->pkts_out[port->n_pkts_out ++] = pkt;
- queue->qr ++;
- grinder->wrr_tokens[grinder->qpos] += pkt_len * grinder->wrr_cost[grinder->qpos];
- if (queue->qr == queue->qw) {
- uint32_t qindex = grinder->qindex[grinder->qpos];
-
- rte_bitmap_clear(port->bmp, qindex);
- grinder->qmask &= ~(1 << grinder->qpos);
- grinder->wrr_mask[grinder->qpos] = 0;
- rte_sched_port_set_queue_empty_timestamp(port, qindex);
- }
-
- /* Reset pipe loop detection */
- port->pipe_loop = RTE_SCHED_PIPE_INVALID;
- grinder->productive = 1;
-
- return 1;
-}
-
-#if RTE_SCHED_OPTIMIZATIONS
-
-static inline int
-grinder_pipe_exists(struct rte_sched_port *port, uint32_t base_pipe)
-{
- __m128i index = _mm_set1_epi32 (base_pipe);
- __m128i pipes = _mm_load_si128((__m128i *)port->grinder_base_bmp_pos);
- __m128i res = _mm_cmpeq_epi32(pipes, index);
- pipes = _mm_load_si128((__m128i *)(port->grinder_base_bmp_pos + 4));
- pipes = _mm_cmpeq_epi32(pipes, index);
- res = _mm_or_si128(res, pipes);
-
- if (_mm_testz_si128(res, res))
- return 0;
-
- return 1;
-}
-
-#else
-
-static inline int
-grinder_pipe_exists(struct rte_sched_port *port, uint32_t base_pipe)
-{
- uint32_t i;
-
- for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i ++) {
- if (port->grinder_base_bmp_pos[i] == base_pipe) {
- return 1;
- }
- }
-
- return 0;
-}
-
-#endif /* RTE_SCHED_OPTIMIZATIONS */
-
-static inline void
-grinder_pcache_populate(struct rte_sched_port *port, uint32_t pos, uint32_t bmp_pos, uint64_t bmp_slab)
-{
- struct rte_sched_grinder *grinder = port->grinder + pos;
- uint16_t w[4];
-
- grinder->pcache_w = 0;
- grinder->pcache_r = 0;
-
- w[0] = (uint16_t) bmp_slab;
- w[1] = (uint16_t) (bmp_slab >> 16);
- w[2] = (uint16_t) (bmp_slab >> 32);
- w[3] = (uint16_t) (bmp_slab >> 48);
-
- grinder->pcache_qmask[grinder->pcache_w] = w[0];
- grinder->pcache_qindex[grinder->pcache_w] = bmp_pos;
- grinder->pcache_w += (w[0] != 0);
-
- grinder->pcache_qmask[grinder->pcache_w] = w[1];
- grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 16;
- grinder->pcache_w += (w[1] != 0);
-
- grinder->pcache_qmask[grinder->pcache_w] = w[2];
- grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 32;
- grinder->pcache_w += (w[2] != 0);
-
- grinder->pcache_qmask[grinder->pcache_w] = w[3];
- grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 48;
- grinder->pcache_w += (w[3] != 0);
-}
-
-static inline void
-grinder_tccache_populate(struct rte_sched_port *port, uint32_t pos, uint32_t qindex, uint16_t qmask)
-{
- struct rte_sched_grinder *grinder = port->grinder + pos;
- uint8_t b[4];
-
- grinder->tccache_w = 0;
- grinder->tccache_r = 0;
-
- b[0] = (uint8_t) (qmask & 0xF);
- b[1] = (uint8_t) ((qmask >> 4) & 0xF);
- b[2] = (uint8_t) ((qmask >> 8) & 0xF);
- b[3] = (uint8_t) ((qmask >> 12) & 0xF);
-
- grinder->tccache_qmask[grinder->tccache_w] = b[0];
- grinder->tccache_qindex[grinder->tccache_w] = qindex;
- grinder->tccache_w += (b[0] != 0);
-
- grinder->tccache_qmask[grinder->tccache_w] = b[1];
- grinder->tccache_qindex[grinder->tccache_w] = qindex + 4;
- grinder->tccache_w += (b[1] != 0);
-
- grinder->tccache_qmask[grinder->tccache_w] = b[2];
- grinder->tccache_qindex[grinder->tccache_w] = qindex + 8;
- grinder->tccache_w += (b[2] != 0);
-
- grinder->tccache_qmask[grinder->tccache_w] = b[3];
- grinder->tccache_qindex[grinder->tccache_w] = qindex + 12;
- grinder->tccache_w += (b[3] != 0);
-}
-
-static inline int
-grinder_next_tc(struct rte_sched_port *port, uint32_t pos)
-{
- struct rte_sched_grinder *grinder = port->grinder + pos;
- struct rte_mbuf **qbase;
- uint32_t qindex;
- uint16_t qsize;
-
- if (grinder->tccache_r == grinder->tccache_w) {
- return 0;
- }
-
- qindex = grinder->tccache_qindex[grinder->tccache_r];
- qbase = rte_sched_port_qbase(port, qindex);
- qsize = rte_sched_port_qsize(port, qindex);
-
- grinder->tc_index = (qindex >> 2) & 0x3;
- grinder->qmask = grinder->tccache_qmask[grinder->tccache_r];
- grinder->qsize = qsize;
-
- grinder->qindex[0] = qindex;
- grinder->qindex[1] = qindex + 1;
- grinder->qindex[2] = qindex + 2;
- grinder->qindex[3] = qindex + 3;
-
- grinder->queue[0] = port->queue + qindex;
- grinder->queue[1] = port->queue + qindex + 1;
- grinder->queue[2] = port->queue + qindex + 2;
- grinder->queue[3] = port->queue + qindex + 3;
-
- grinder->qbase[0] = qbase;
- grinder->qbase[1] = qbase + qsize;
- grinder->qbase[2] = qbase + 2 * qsize;
- grinder->qbase[3] = qbase + 3 * qsize;
-
- grinder->tccache_r ++;
- return 1;
-}
-
-static inline int
-grinder_next_pipe(struct rte_sched_port *port, uint32_t pos)
-{
- struct rte_sched_grinder *grinder = port->grinder + pos;
- uint32_t pipe_qindex;
- uint16_t pipe_qmask;
-
- if (grinder->pcache_r < grinder->pcache_w) {
- pipe_qmask = grinder->pcache_qmask[grinder->pcache_r];
- pipe_qindex = grinder->pcache_qindex[grinder->pcache_r];
- grinder->pcache_r ++;
- } else {
- uint64_t bmp_slab = 0;
- uint32_t bmp_pos = 0;
-
- /* Get another non-empty pipe group */
- if (unlikely(rte_bitmap_scan(port->bmp, &bmp_pos, &bmp_slab) <= 0)) {
- return 0;
- }
-
-#if RTE_SCHED_DEBUG
- debug_check_queue_slab(port, bmp_pos, bmp_slab);
-#endif
-
- /* Return if pipe group already in one of the other grinders */
- port->grinder_base_bmp_pos[pos] = RTE_SCHED_BMP_POS_INVALID;
- if (unlikely(grinder_pipe_exists(port, bmp_pos))) {
- return 0;
- }
- port->grinder_base_bmp_pos[pos] = bmp_pos;
-
- /* Install new pipe group into grinder's pipe cache */
- grinder_pcache_populate(port, pos, bmp_pos, bmp_slab);
-
- pipe_qmask = grinder->pcache_qmask[0];
- pipe_qindex = grinder->pcache_qindex[0];
- grinder->pcache_r = 1;
- }
-
- /* Install new pipe in the grinder */
- grinder->pindex = pipe_qindex >> 4;
- grinder->subport = port->subport + (grinder->pindex / port->n_pipes_per_subport);
- grinder->pipe = port->pipe + grinder->pindex;
- grinder->pipe_params = NULL; /* to be set after the pipe structure is prefetched */
- grinder->productive = 0;
-
- grinder_tccache_populate(port, pos, pipe_qindex, pipe_qmask);
- grinder_next_tc(port, pos);
-
- /* Check for pipe exhaustion */
- if (grinder->pindex == port->pipe_loop) {
- port->pipe_exhaustion = 1;
- port->pipe_loop = RTE_SCHED_PIPE_INVALID;
- }
-
- return 1;
-}
-
-#if RTE_SCHED_WRR == 0
-
-#define grinder_wrr_load(a,b)
-
-#define grinder_wrr_store(a,b)
-
-static inline void
-grinder_wrr(struct rte_sched_port *port, uint32_t pos)
-{
- struct rte_sched_grinder *grinder = port->grinder + pos;
- uint64_t slab = grinder->qmask;
-
- if (rte_bsf64(slab, &grinder->qpos) == 0) {
- rte_panic("grinder wrr\n");
- }
-}
-
-#elif RTE_SCHED_WRR == 1
-
-static inline void
-grinder_wrr_load(struct rte_sched_port *port, uint32_t pos)
-{
- struct rte_sched_grinder *grinder = port->grinder + pos;
- struct rte_sched_pipe *pipe = grinder->pipe;
- struct rte_sched_pipe_profile *pipe_params = grinder->pipe_params;
- uint32_t tc_index = grinder->tc_index;
- uint32_t qmask = grinder->qmask;
- uint32_t qindex;
-
- qindex = tc_index * 4;
-
- grinder->wrr_tokens[0] = ((uint16_t) pipe->wrr_tokens[qindex]) << RTE_SCHED_WRR_SHIFT;
- grinder->wrr_tokens[1] = ((uint16_t) pipe->wrr_tokens[qindex + 1]) << RTE_SCHED_WRR_SHIFT;
- grinder->wrr_tokens[2] = ((uint16_t) pipe->wrr_tokens[qindex + 2]) << RTE_SCHED_WRR_SHIFT;
- grinder->wrr_tokens[3] = ((uint16_t) pipe->wrr_tokens[qindex + 3]) << RTE_SCHED_WRR_SHIFT;
-
- grinder->wrr_mask[0] = (qmask & 0x1) * 0xFFFF;
- grinder->wrr_mask[1] = ((qmask >> 1) & 0x1) * 0xFFFF;
- grinder->wrr_mask[2] = ((qmask >> 2) & 0x1) * 0xFFFF;
- grinder->wrr_mask[3] = ((qmask >> 3) & 0x1) * 0xFFFF;
-
- grinder->wrr_cost[0] = pipe_params->wrr_cost[qindex];
- grinder->wrr_cost[1] = pipe_params->wrr_cost[qindex + 1];
- grinder->wrr_cost[2] = pipe_params->wrr_cost[qindex + 2];
- grinder->wrr_cost[3] = pipe_params->wrr_cost[qindex + 3];
-}
-
-static inline void
-grinder_wrr_store(struct rte_sched_port *port, uint32_t pos)
-{
- struct rte_sched_grinder *grinder = port->grinder + pos;
- struct rte_sched_pipe *pipe = grinder->pipe;
- uint32_t tc_index = grinder->tc_index;
- uint32_t qindex;
-
- qindex = tc_index * 4;
-
- pipe->wrr_tokens[qindex] = (uint8_t) ((grinder->wrr_tokens[0] & grinder->wrr_mask[0]) >> RTE_SCHED_WRR_SHIFT);
- pipe->wrr_tokens[qindex + 1] = (uint8_t) ((grinder->wrr_tokens[1] & grinder->wrr_mask[1]) >> RTE_SCHED_WRR_SHIFT);
- pipe->wrr_tokens[qindex + 2] = (uint8_t) ((grinder->wrr_tokens[2] & grinder->wrr_mask[2]) >> RTE_SCHED_WRR_SHIFT);
- pipe->wrr_tokens[qindex + 3] = (uint8_t) ((grinder->wrr_tokens[3] & grinder->wrr_mask[3]) >> RTE_SCHED_WRR_SHIFT);
-}
-
-static inline void
-grinder_wrr(struct rte_sched_port *port, uint32_t pos)
-{
- struct rte_sched_grinder *grinder = port->grinder + pos;
- uint16_t wrr_tokens_min;
-
- grinder->wrr_tokens[0] |= ~grinder->wrr_mask[0];
- grinder->wrr_tokens[1] |= ~grinder->wrr_mask[1];
- grinder->wrr_tokens[2] |= ~grinder->wrr_mask[2];
- grinder->wrr_tokens[3] |= ~grinder->wrr_mask[3];
-
- grinder->qpos = rte_min_pos_4_u16(grinder->wrr_tokens);
- wrr_tokens_min = grinder->wrr_tokens[grinder->qpos];
-
- grinder->wrr_tokens[0] -= wrr_tokens_min;
- grinder->wrr_tokens[1] -= wrr_tokens_min;
- grinder->wrr_tokens[2] -= wrr_tokens_min;
- grinder->wrr_tokens[3] -= wrr_tokens_min;
-}
-
-#else
-
-#error Invalid value for RTE_SCHED_WRR
-
-#endif /* RTE_SCHED_WRR */
-
-#define grinder_evict(port, pos)
-
-static inline void
-grinder_prefetch_pipe(struct rte_sched_port *port, uint32_t pos)
-{
- struct rte_sched_grinder *grinder = port->grinder + pos;
-
- rte_prefetch0(grinder->pipe);
- rte_prefetch0(grinder->queue[0]);
-}
-
-static inline void
-grinder_prefetch_tc_queue_arrays(struct rte_sched_port *port, uint32_t pos)
-{
- struct rte_sched_grinder *grinder = port->grinder + pos;
- uint16_t qsize, qr[4];
-
- qsize = grinder->qsize;
- qr[0] = grinder->queue[0]->qr & (qsize - 1);
- qr[1] = grinder->queue[1]->qr & (qsize - 1);
- qr[2] = grinder->queue[2]->qr & (qsize - 1);
- qr[3] = grinder->queue[3]->qr & (qsize - 1);
-
- rte_prefetch0(grinder->qbase[0] + qr[0]);
- rte_prefetch0(grinder->qbase[1] + qr[1]);
-
- grinder_wrr_load(port, pos);
- grinder_wrr(port, pos);
-
- rte_prefetch0(grinder->qbase[2] + qr[2]);
- rte_prefetch0(grinder->qbase[3] + qr[3]);
-}
-
-static inline void
-grinder_prefetch_mbuf(struct rte_sched_port *port, uint32_t pos)
-{
- struct rte_sched_grinder *grinder = port->grinder + pos;
- uint32_t qpos = grinder->qpos;
- struct rte_mbuf **qbase = grinder->qbase[qpos];
- uint16_t qsize = grinder->qsize;
- uint16_t qr = grinder->queue[qpos]->qr & (qsize - 1);
-
- grinder->pkt = qbase[qr];
- rte_prefetch0(grinder->pkt);
-
- if (unlikely((qr & 0x7) == 7)) {
- uint16_t qr_next = (grinder->queue[qpos]->qr + 1) & (qsize - 1);
-
- rte_prefetch0(qbase + qr_next);
- }
-}
-
-static inline uint32_t
-grinder_handle(struct rte_sched_port *port, uint32_t pos)
-{
- struct rte_sched_grinder *grinder = port->grinder + pos;
-
- switch (grinder->state) {
- case e_GRINDER_PREFETCH_PIPE:
- {
- if (grinder_next_pipe(port, pos)) {
- grinder_prefetch_pipe(port, pos);
- port->busy_grinders ++;
-
- grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS;
- return 0;
- }
-
- return 0;
- }
-
- case e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS:
- {
- struct rte_sched_pipe *pipe = grinder->pipe;
-
- grinder->pipe_params = port->pipe_profiles + pipe->profile;
- grinder_prefetch_tc_queue_arrays(port, pos);
- grinder_credits_update(port, pos);
-
- grinder->state = e_GRINDER_PREFETCH_MBUF;
- return 0;
- }
-
- case e_GRINDER_PREFETCH_MBUF:
- {
- grinder_prefetch_mbuf(port, pos);
-
- grinder->state = e_GRINDER_READ_MBUF;
- return 0;
- }
-
- case e_GRINDER_READ_MBUF:
- {
- uint32_t result = 0;
-
- result = grinder_schedule(port, pos);
-
- /* Look for next packet within the same TC */
- if (result && grinder->qmask) {
- grinder_wrr(port, pos);
- grinder_prefetch_mbuf(port, pos);
-
- return 1;
- }
- grinder_wrr_store(port, pos);
-
- /* Look for another active TC within same pipe */
- if (grinder_next_tc(port, pos)) {
- grinder_prefetch_tc_queue_arrays(port, pos);
-
- grinder->state = e_GRINDER_PREFETCH_MBUF;
- return result;
- }
- if ((grinder->productive == 0) && (port->pipe_loop == RTE_SCHED_PIPE_INVALID)) {
- port->pipe_loop = grinder->pindex;
- }
- grinder_evict(port, pos);
-
- /* Look for another active pipe */
- if (grinder_next_pipe(port, pos)) {
- grinder_prefetch_pipe(port, pos);
-
- grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS;
- return result;
- }
-
- /* No active pipe found */
- port->busy_grinders --;
-
- grinder->state = e_GRINDER_PREFETCH_PIPE;
- return result;
- }
-
- default:
- rte_panic("Algorithmic error (invalid state)\n");
- return 0;
- }
-}
-
-static inline void
-rte_sched_port_time_resync(struct rte_sched_port *port)
-{
- uint64_t cycles = rte_get_tsc_cycles();
- uint64_t cycles_diff = cycles - port->time_cpu_cycles;
- double bytes_diff = ((double) cycles_diff) / port->cycles_per_byte;
-
- /* Advance port time */
- port->time_cpu_cycles = cycles;
- port->time_cpu_bytes += (uint64_t) bytes_diff;
- if (port->time < port->time_cpu_bytes) {
- port->time = port->time_cpu_bytes;
- }
-
- /* Reset pipe loop detection */
- port->pipe_loop = RTE_SCHED_PIPE_INVALID;
-}
-
-static inline int
-rte_sched_port_exceptions(struct rte_sched_port *port, int second_pass)
-{
- int exceptions;
-
- /* Check if any exception flag is set */
- exceptions = (second_pass && port->busy_grinders == 0) ||
- (port->pipe_exhaustion == 1);
-
- /* Clear exception flags */
- port->pipe_exhaustion = 0;
-
- return exceptions;
-}
-
-int
-rte_sched_port_dequeue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts)
-{
- uint32_t i, count;
-
- port->pkts_out = pkts;
- port->n_pkts_out = 0;
-
- rte_sched_port_time_resync(port);
-
- /* Take each queue in the grinder one step further */
- for (i = 0, count = 0; ; i ++) {
- count += grinder_handle(port, i & (RTE_SCHED_PORT_N_GRINDERS - 1));
- if ((count == n_pkts) ||
- rte_sched_port_exceptions(port, i >= RTE_SCHED_PORT_N_GRINDERS)) {
- break;
- }
- }
-
- return count;
-}
diff --git a/src/dpdk_lib18/librte_sched/rte_sched.h b/src/dpdk_lib18/librte_sched/rte_sched.h
deleted file mode 100755
index e6bba22e..00000000
--- a/src/dpdk_lib18/librte_sched/rte_sched.h
+++ /dev/null
@@ -1,442 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __INCLUDE_RTE_SCHED_H__
-#define __INCLUDE_RTE_SCHED_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
- * @file
- * RTE Hierarchical Scheduler
- *
- * The hierarchical scheduler prioritizes the transmission of packets from different
- * users and traffic classes according to the Service Level Agreements (SLAs) defined
- * for the current network node.
- *
- * The scheduler supports thousands of packet queues grouped under a 5-level hierarchy:
- * 1. Port:
- * - Typical usage: output Ethernet port;
- * - Multiple ports are scheduled in round robin order with equal priority;
- * 2. Subport:
- * - Typical usage: group of users;
- * - Traffic shaping using the token bucket algorithm (one bucket per subport);
- * - Upper limit enforced per traffic class at subport level;
- * - Lower priority traffic classes able to reuse subport bandwidth currently
- * unused by higher priority traffic classes of the same subport;
- * - When any subport traffic class is oversubscribed (configuration time
- * event), the usage of subport member pipes with high demand for that
- * traffic class pipes is truncated to a dynamically adjusted value with no
- * impact to low demand pipes;
- * 3. Pipe:
- * - Typical usage: individual user/subscriber;
- * - Traffic shaping using the token bucket algorithm (one bucket per pipe);
- * 4. Traffic class:
- * - Traffic classes of the same pipe handled in strict priority order;
- * - Upper limit enforced per traffic class at the pipe level;
- * - Lower priority traffic classes able to reuse pipe bandwidth currently
- * unused by higher priority traffic classes of the same pipe;
- * 5. Queue:
- * - Typical usage: queue hosting packets from one or multiple connections
- * of same traffic class belonging to the same user;
- * - Weighted Round Robin (WRR) is used to service the queues within same
- * pipe traffic class.
- *
- ***/
-
-#include <sys/types.h>
-#include <rte_mbuf.h>
-#include <rte_meter.h>
-
-/** Random Early Detection (RED) */
-#ifdef RTE_SCHED_RED
-#include "rte_red.h"
-#endif
-
-/** Number of traffic classes per pipe (as well as subport). Cannot be changed. */
-#define RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE 4
-
-/** Number of queues per pipe traffic class. Cannot be changed. */
-#define RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS 4
-
-/** Number of queues per pipe. */
-#define RTE_SCHED_QUEUES_PER_PIPE \
- (RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * \
- RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS)
-
-/** Maximum number of pipe profiles that can be defined per port. Compile-time configurable.*/
-#ifndef RTE_SCHED_PIPE_PROFILES_PER_PORT
-#define RTE_SCHED_PIPE_PROFILES_PER_PORT 256
-#endif
-
-/** Ethernet framing overhead. Overhead fields per Ethernet frame:
- 1. Preamble: 7 bytes;
- 2. Start of Frame Delimiter (SFD): 1 byte;
- 3. Frame Check Sequence (FCS): 4 bytes;
- 4. Inter Frame Gap (IFG): 12 bytes.
-The FCS is considered overhead only if not included in the packet length (field pkt_len
-of struct rte_mbuf). */
-#ifndef RTE_SCHED_FRAME_OVERHEAD_DEFAULT
-#define RTE_SCHED_FRAME_OVERHEAD_DEFAULT 24
-#endif
-
-/** Subport configuration parameters. The period and credits_per_period parameters are measured
-in bytes, with one byte meaning the time duration associated with the transmission of one byte
-on the physical medium of the output port, with pipe or pipe traffic class rate (measured as
-percentage of output port rate) determined as credits_per_period divided by period. One credit
-represents one byte. */
-struct rte_sched_subport_params {
- /* Subport token bucket */
- uint32_t tb_rate; /**< Subport token bucket rate (measured in bytes per second) */
- uint32_t tb_size; /**< Subport token bucket size (measured in credits) */
-
- /* Subport traffic classes */
- uint32_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**< Subport traffic class rates (measured in bytes per second) */
- uint32_t tc_period; /**< Enforcement period for traffic class rates (measured in milliseconds) */
-};
-
-/** Subport statistics */
-struct rte_sched_subport_stats {
- /* Packets */
- uint32_t n_pkts_tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**< Number of packets successfully written to current
- subport for each traffic class */
- uint32_t n_pkts_tc_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**< Number of packets dropped by the current
- subport for each traffic class due to subport queues being full or congested*/
-
- /* Bytes */
- uint32_t n_bytes_tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**< Number of bytes successfully written to current
- subport for each traffic class*/
- uint32_t n_bytes_tc_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**< Number of bytes dropped by the current
- subport for each traffic class due to subport queues being full or congested */
-};
-
-/** Pipe configuration parameters. The period and credits_per_period parameters are measured
-in bytes, with one byte meaning the time duration associated with the transmission of one byte
-on the physical medium of the output port, with pipe or pipe traffic class rate (measured as
-percentage of output port rate) determined as credits_per_period divided by period. One credit
-represents one byte. */
-struct rte_sched_pipe_params {
- /* Pipe token bucket */
- uint32_t tb_rate; /**< Pipe token bucket rate (measured in bytes per second) */
- uint32_t tb_size; /**< Pipe token bucket size (measured in credits) */
-
- /* Pipe traffic classes */
- uint32_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**< Pipe traffic class rates (measured in bytes per second) */
- uint32_t tc_period; /**< Enforcement period for pipe traffic class rates (measured in milliseconds) */
-#ifdef RTE_SCHED_SUBPORT_TC_OV
- uint8_t tc_ov_weight; /**< Weight for the current pipe in the event of subport traffic class 3 oversubscription */
-#endif
-
- /* Pipe queues */
- uint8_t wrr_weights[RTE_SCHED_QUEUES_PER_PIPE]; /**< WRR weights for the queues of the current pipe */
-};
-
-/** Queue statistics */
-struct rte_sched_queue_stats {
- /* Packets */
- uint32_t n_pkts; /**< Number of packets successfully written to current queue */
- uint32_t n_pkts_dropped; /**< Number of packets dropped due to current queue being full or congested */
-
- /* Bytes */
- uint32_t n_bytes; /**< Number of bytes successfully written to current queue */
- uint32_t n_bytes_dropped; /**< Number of bytes dropped due to current queue being full or congested */
-};
-
-/** Port configuration parameters. */
-struct rte_sched_port_params {
- const char *name; /**< Literal string to be associated to the current port scheduler instance */
- int socket; /**< CPU socket ID where the memory for port scheduler should be allocated */
- uint32_t rate; /**< Output port rate (measured in bytes per second) */
- uint32_t mtu; /**< Maximum Ethernet frame size (measured in bytes). Should not include the framing overhead. */
- uint32_t frame_overhead; /**< Framing overhead per packet (measured in bytes) */
- uint32_t n_subports_per_port; /**< Number of subports for the current port scheduler instance*/
- uint32_t n_pipes_per_subport; /**< Number of pipes for each port scheduler subport */
- uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**< Packet queue size for each traffic class. All queues
- within the same pipe traffic class have the same size. Queues from
- different pipes serving the same traffic class have the same size. */
- struct rte_sched_pipe_params *pipe_profiles; /**< Pipe profile table defined for current port scheduler instance.
- Every pipe of the current port scheduler is configured using one of the
- profiles from this table. */
- uint32_t n_pipe_profiles; /**< Number of profiles in the pipe profile table */
-#ifdef RTE_SCHED_RED
- struct rte_red_params red_params[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][e_RTE_METER_COLORS]; /**< RED parameters */
-#endif
-};
-
-/** Path through the scheduler hierarchy used by the scheduler enqueue operation to
-identify the destination queue for the current packet. Stored in the field hash.sched
-of struct rte_mbuf of each packet, typically written by the classification stage and read by
-scheduler enqueue.*/
-struct rte_sched_port_hierarchy {
- uint32_t queue:2; /**< Queue ID (0 .. 3) */
- uint32_t traffic_class:2; /**< Traffic class ID (0 .. 3)*/
- uint32_t pipe:20; /**< Pipe ID */
- uint32_t subport:6; /**< Subport ID */
- uint32_t color:2; /**< Color */
-};
-
-/*
- * Configuration
- *
- ***/
-
-/**
- * Hierarchical scheduler port configuration
- *
- * @param params
- * Port scheduler configuration parameter structure
- * @return
- * Handle to port scheduler instance upon success or NULL otherwise.
- */
-struct rte_sched_port *
-rte_sched_port_config(struct rte_sched_port_params *params);
-
-/**
- * Hierarchical scheduler port free
- *
- * @param port
- * Handle to port scheduler instance
- */
-void
-rte_sched_port_free(struct rte_sched_port *port);
-
-/**
- * Hierarchical scheduler subport configuration
- *
- * @param port
- * Handle to port scheduler instance
- * @param subport_id
- * Subport ID
- * @param params
- * Subport configuration parameters
- * @return
- * 0 upon success, error code otherwise
- */
-int
-rte_sched_subport_config(struct rte_sched_port *port,
- uint32_t subport_id,
- struct rte_sched_subport_params *params);
-
-/**
- * Hierarchical scheduler pipe configuration
- *
- * @param port
- * Handle to port scheduler instance
- * @param subport_id
- * Subport ID
- * @param pipe_id
- * Pipe ID within subport
- * @param pipe_profile
- * ID of port-level pre-configured pipe profile
- * @return
- * 0 upon success, error code otherwise
- */
-int
-rte_sched_pipe_config(struct rte_sched_port *port,
- uint32_t subport_id,
- uint32_t pipe_id,
- int32_t pipe_profile);
-
-/**
- * Hierarchical scheduler memory footprint size per port
- *
- * @param params
- * Port scheduler configuration parameter structure
- * @return
- * Memory footprint size in bytes upon success, 0 otherwise
- */
-uint32_t
-rte_sched_port_get_memory_footprint(struct rte_sched_port_params *params);
-
-/*
- * Statistics
- *
- ***/
-
-/**
- * Hierarchical scheduler subport statistics read
- *
- * @param port
- * Handle to port scheduler instance
- * @param subport_id
- * Subport ID
- * @param stats
- * Pointer to pre-allocated subport statistics structure where the statistics
- * counters should be stored
- * @param tc_ov
- * Pointer to pre-allocated 4-entry array where the oversubscription status for
- * each of the 4 subport traffic classes should be stored.
- * @return
- * 0 upon success, error code otherwise
- */
-int
-rte_sched_subport_read_stats(struct rte_sched_port *port,
- uint32_t subport_id,
- struct rte_sched_subport_stats *stats,
- uint32_t *tc_ov);
-
-/**
- * Hierarchical scheduler queue statistics read
- *
- * @param port
- * Handle to port scheduler instance
- * @param queue_id
- * Queue ID within port scheduler
- * @param stats
- * Pointer to pre-allocated subport statistics structure where the statistics
- * counters should be stored
- * @param qlen
- * Pointer to pre-allocated variable where the current queue length should be stored.
- * @return
- * 0 upon success, error code otherwise
- */
-int
-rte_sched_queue_read_stats(struct rte_sched_port *port,
- uint32_t queue_id,
- struct rte_sched_queue_stats *stats,
- uint16_t *qlen);
-
-/*
- * Run-time
- *
- ***/
-
-/**
- * Scheduler hierarchy path write to packet descriptor. Typically called by the
- * packet classification stage.
- *
- * @param pkt
- * Packet descriptor handle
- * @param subport
- * Subport ID
- * @param pipe
- * Pipe ID within subport
- * @param traffic_class
- * Traffic class ID within pipe (0 .. 3)
- * @param queue
- * Queue ID within pipe traffic class (0 .. 3)
- */
-static inline void
-rte_sched_port_pkt_write(struct rte_mbuf *pkt,
- uint32_t subport, uint32_t pipe, uint32_t traffic_class, uint32_t queue, enum rte_meter_color color)
-{
- struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
-
- sched->color = (uint32_t) color;
- sched->subport = subport;
- sched->pipe = pipe;
- sched->traffic_class = traffic_class;
- sched->queue = queue;
-}
-
-/**
- * Scheduler hierarchy path read from packet descriptor (struct rte_mbuf). Typically
- * called as part of the hierarchical scheduler enqueue operation. The subport,
- * pipe, traffic class and queue parameters need to be pre-allocated by the caller.
- *
- * @param pkt
- * Packet descriptor handle
- * @param subport
- * Subport ID
- * @param pipe
- * Pipe ID within subport
- * @param traffic_class
- * Traffic class ID within pipe (0 .. 3)
- * @param queue
- * Queue ID within pipe traffic class (0 .. 3)
- *
- */
-static inline void
-rte_sched_port_pkt_read_tree_path(struct rte_mbuf *pkt, uint32_t *subport, uint32_t *pipe, uint32_t *traffic_class, uint32_t *queue)
-{
- struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
-
- *subport = sched->subport;
- *pipe = sched->pipe;
- *traffic_class = sched->traffic_class;
- *queue = sched->queue;
-}
-
-static inline enum rte_meter_color
-rte_sched_port_pkt_read_color(struct rte_mbuf *pkt)
-{
- struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
-
- return (enum rte_meter_color) sched->color;
-}
-
-/**
- * Hierarchical scheduler port enqueue. Writes up to n_pkts to port scheduler and
- * returns the number of packets actually written. For each packet, the port scheduler
- * queue to write the packet to is identified by reading the hierarchy path from the
- * packet descriptor; if the queue is full or congested and the packet is not written
- * to the queue, then the packet is automatically dropped without any action required
- * from the caller.
- *
- * @param port
- * Handle to port scheduler instance
- * @param pkts
- * Array storing the packet descriptor handles
- * @param n_pkts
- * Number of packets to enqueue from the pkts array into the port scheduler
- * @return
- * Number of packets successfully enqueued
- */
-int
-rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts);
-
-/**
- * Hierarchical scheduler port dequeue. Reads up to n_pkts from the port scheduler
- * and stores them in the pkts array and returns the number of packets actually read.
- * The pkts array needs to be pre-allocated by the caller with at least n_pkts entries.
- *
- * @param port
- * Handle to port scheduler instance
- * @param pkts
- * Pre-allocated packet descriptor array where the packets dequeued from the port
- * scheduler should be stored
- * @param n_pkts
- * Number of packets to dequeue from the port scheduler
- * @return
- * Number of packets successfully dequeued and placed in the pkts array
- */
-int
-rte_sched_port_dequeue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* __INCLUDE_RTE_SCHED_H__ */
diff --git a/src/dpdk_lib18/librte_sched/rte_sched_common.h b/src/dpdk_lib18/librte_sched/rte_sched_common.h
deleted file mode 100755
index 8920adec..00000000
--- a/src/dpdk_lib18/librte_sched/rte_sched_common.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __INCLUDE_RTE_SCHED_COMMON_H__
-#define __INCLUDE_RTE_SCHED_COMMON_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <sys/types.h>
-
-#define __rte_aligned_16 __attribute__((__aligned__(16)))
-
-static inline uint32_t
-rte_sched_min_val_2_u32(uint32_t x, uint32_t y)
-{
- return (x < y)? x : y;
-}
-
-#if 0
-static inline uint32_t
-rte_min_pos_4_u16(uint16_t *x)
-{
- uint32_t pos0, pos1;
-
- pos0 = (x[0] <= x[1])? 0 : 1;
- pos1 = (x[2] <= x[3])? 2 : 3;
-
- return (x[pos0] <= x[pos1])? pos0 : pos1;
-}
-
-#else
-
-/* simplified version to remove branches with CMOV instruction */
-static inline uint32_t
-rte_min_pos_4_u16(uint16_t *x)
-{
- uint32_t pos0 = 0;
- uint32_t pos1 = 2;
-
- if (x[1] <= x[0]) pos0 = 1;
- if (x[3] <= x[2]) pos1 = 3;
- if (x[pos1] <= x[pos0]) pos0 = pos1;
-
- return pos0;
-}
-
-#endif
-
-/*
- * Compute the Greatest Common Divisor (GCD) of two numbers.
- * This implementation uses Euclid's algorithm:
- * gcd(a, 0) = a
- * gcd(a, b) = gcd(b, a mod b)
- *
- */
-static inline uint32_t
-rte_get_gcd(uint32_t a, uint32_t b)
-{
- uint32_t c;
-
- if (a == 0)
- return b;
- if (b == 0)
- return a;
-
- if (a < b) {
- c = a;
- a = b;
- b = c;
- }
-
- while (b != 0) {
- c = a % b;
- a = b;
- b = c;
- }
-
- return a;
-}
-
-/*
- * Compute the Lowest Common Denominator (LCD) of two numbers.
- * This implementation computes GCD first:
- * LCD(a, b) = (a * b) / GCD(a, b)
- *
- */
-static inline uint32_t
-rte_get_lcd(uint32_t a, uint32_t b)
-{
- return (a * b) / rte_get_gcd(a, b);
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* __INCLUDE_RTE_SCHED_COMMON_H__ */
diff --git a/src/dpdk_lib18/librte_table/Makefile b/src/dpdk_lib18/librte_table/Makefile
deleted file mode 100755
index dd684ccb..00000000
--- a/src/dpdk_lib18/librte_table/Makefile
+++ /dev/null
@@ -1,82 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-#
-# library name
-#
-LIB = librte_table.a
-
-CFLAGS += -O3
-CFLAGS += $(WERROR_FLAGS)
-
-#
-# all source are stored in SRCS-y
-#
-SRCS-$(CONFIG_RTE_LIBRTE_TABLE) += rte_table_lpm.c
-SRCS-$(CONFIG_RTE_LIBRTE_TABLE) += rte_table_lpm_ipv6.c
-ifeq ($(CONFIG_RTE_LIBRTE_ACL),y)
-SRCS-$(CONFIG_RTE_LIBRTE_TABLE) += rte_table_acl.c
-endif
-SRCS-$(CONFIG_RTE_LIBRTE_TABLE) += rte_table_hash_key8.c
-SRCS-$(CONFIG_RTE_LIBRTE_TABLE) += rte_table_hash_key16.c
-SRCS-$(CONFIG_RTE_LIBRTE_TABLE) += rte_table_hash_key32.c
-SRCS-$(CONFIG_RTE_LIBRTE_TABLE) += rte_table_hash_ext.c
-SRCS-$(CONFIG_RTE_LIBRTE_TABLE) += rte_table_hash_lru.c
-SRCS-$(CONFIG_RTE_LIBRTE_TABLE) += rte_table_array.c
-SRCS-$(CONFIG_RTE_LIBRTE_TABLE) += rte_table_stub.c
-
-# install includes
-SYMLINK-$(CONFIG_RTE_LIBRTE_TABLE)-include += rte_table.h
-SYMLINK-$(CONFIG_RTE_LIBRTE_TABLE)-include += rte_table_lpm.h
-SYMLINK-$(CONFIG_RTE_LIBRTE_TABLE)-include += rte_table_lpm_ipv6.h
-ifeq ($(CONFIG_RTE_LIBRTE_ACL),y)
-SYMLINK-$(CONFIG_RTE_LIBRTE_TABLE)-include += rte_table_acl.h
-endif
-SYMLINK-$(CONFIG_RTE_LIBRTE_TABLE)-include += rte_table_hash.h
-SYMLINK-$(CONFIG_RTE_LIBRTE_TABLE)-include += rte_lru.h
-SYMLINK-$(CONFIG_RTE_LIBRTE_TABLE)-include += rte_table_array.h
-SYMLINK-$(CONFIG_RTE_LIBRTE_TABLE)-include += rte_table_stub.h
-
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_TABLE) := lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_TABLE) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_TABLE) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_TABLE) += lib/librte_malloc
-DEPDIRS-$(CONFIG_RTE_LIBRTE_TABLE) += lib/librte_port
-DEPDIRS-$(CONFIG_RTE_LIBRTE_TABLE) += lib/librte_lpm
-ifeq ($(CONFIG_RTE_LIBRTE_ACL),y)
-DEPDIRS-$(CONFIG_RTE_LIBRTE_TABLE) += lib/librte_acl
-endif
-DEPDIRS-$(CONFIG_RTE_LIBRTE_TABLE) += lib/librte_hash
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_table/rte_table_acl.c b/src/dpdk_lib18/librte_table/rte_table_acl.c
deleted file mode 100755
index 4416311b..00000000
--- a/src/dpdk_lib18/librte_table/rte_table_acl.c
+++ /dev/null
@@ -1,491 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <string.h>
-#include <stdio.h>
-
-#include <rte_common.h>
-#include <rte_mbuf.h>
-#include <rte_memory.h>
-#include <rte_malloc.h>
-#include <rte_log.h>
-
-#include "rte_table_acl.h"
-#include <rte_ether.h>
-
-struct rte_table_acl {
- /* Low-level ACL table */
- char name[2][RTE_ACL_NAMESIZE];
- struct rte_acl_param acl_params; /* for creating low level acl table */
- struct rte_acl_config cfg; /* Holds the field definitions (metadata) */
- struct rte_acl_ctx *ctx;
- uint32_t name_id;
-
- /* Input parameters */
- uint32_t n_rules;
- uint32_t entry_size;
-
- /* Internal tables */
- uint8_t *action_table;
- struct rte_acl_rule **acl_rule_list; /* Array of pointers to rules */
- uint8_t *acl_rule_memory; /* Memory to store the rules */
-
- /* Memory to store the action table and stack of free entries */
- uint8_t memory[0] __rte_cache_aligned;
-};
-
-
-static void *
-rte_table_acl_create(
- void *params,
- int socket_id,
- uint32_t entry_size)
-{
- struct rte_table_acl_params *p = (struct rte_table_acl_params *) params;
- struct rte_table_acl *acl;
- uint32_t action_table_size, acl_rule_list_size, acl_rule_memory_size;
- uint32_t total_size;
-
- RTE_BUILD_BUG_ON(((sizeof(struct rte_table_acl) % RTE_CACHE_LINE_SIZE)
- != 0));
-
- /* Check input parameters */
- if (p == NULL) {
- RTE_LOG(ERR, TABLE, "%s: Invalid value for params\n", __func__);
- return NULL;
- }
- if (p->name == NULL) {
- RTE_LOG(ERR, TABLE, "%s: Invalid value for name\n", __func__);
- return NULL;
- }
- if (p->n_rules == 0) {
- RTE_LOG(ERR, TABLE, "%s: Invalid value for n_rules\n",
- __func__);
- return NULL;
- }
- if ((p->n_rule_fields == 0) ||
- (p->n_rule_fields > RTE_ACL_MAX_FIELDS)) {
- RTE_LOG(ERR, TABLE, "%s: Invalid value for n_rule_fields\n",
- __func__);
- return NULL;
- }
-
- entry_size = RTE_ALIGN(entry_size, sizeof(uint64_t));
-
- /* Memory allocation */
- action_table_size = RTE_CACHE_LINE_ROUNDUP(p->n_rules * entry_size);
- acl_rule_list_size =
- RTE_CACHE_LINE_ROUNDUP(p->n_rules * sizeof(struct rte_acl_rule *));
- acl_rule_memory_size = RTE_CACHE_LINE_ROUNDUP(p->n_rules *
- RTE_ACL_RULE_SZ(p->n_rule_fields));
- total_size = sizeof(struct rte_table_acl) + action_table_size +
- acl_rule_list_size + acl_rule_memory_size;
-
- acl = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE,
- socket_id);
- if (acl == NULL) {
- RTE_LOG(ERR, TABLE,
- "%s: Cannot allocate %u bytes for ACL table\n",
- __func__, total_size);
- return NULL;
- }
-
- acl->action_table = &acl->memory[0];
- acl->acl_rule_list =
- (struct rte_acl_rule **) &acl->memory[action_table_size];
- acl->acl_rule_memory = (uint8_t *)
- &acl->memory[action_table_size + acl_rule_list_size];
-
- /* Initialization of internal fields */
- snprintf(acl->name[0], RTE_ACL_NAMESIZE, "%s_a", p->name);
- snprintf(acl->name[1], RTE_ACL_NAMESIZE, "%s_b", p->name);
- acl->name_id = 1;
-
- acl->acl_params.name = acl->name[acl->name_id];
- acl->acl_params.socket_id = socket_id;
- acl->acl_params.rule_size = RTE_ACL_RULE_SZ(p->n_rule_fields);
- acl->acl_params.max_rule_num = p->n_rules;
-
- acl->cfg.num_categories = 1;
- acl->cfg.num_fields = p->n_rule_fields;
- memcpy(&acl->cfg.defs[0], &p->field_format[0],
- p->n_rule_fields * sizeof(struct rte_acl_field_def));
-
- acl->ctx = NULL;
-
- acl->n_rules = p->n_rules;
- acl->entry_size = entry_size;
-
- return acl;
-}
-
-static int
-rte_table_acl_free(void *table)
-{
- struct rte_table_acl *acl = (struct rte_table_acl *) table;
-
- /* Check input parameters */
- if (table == NULL) {
- RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
- return -EINVAL;
- }
-
- /* Free previously allocated resources */
- if (acl->ctx != NULL)
- rte_acl_free(acl->ctx);
-
- rte_free(acl);
-
- return 0;
-}
-
-RTE_ACL_RULE_DEF(rte_pipeline_acl_rule, RTE_ACL_MAX_FIELDS);
-
-static int
-rte_table_acl_build(struct rte_table_acl *acl, struct rte_acl_ctx **acl_ctx)
-{
- struct rte_acl_ctx *ctx = NULL;
- uint32_t n_rules, i;
- int status;
-
- /* Create low level ACL table */
- ctx = rte_acl_create(&acl->acl_params);
- if (ctx == NULL) {
- RTE_LOG(ERR, TABLE, "%s: Cannot create low level ACL table\n",
- __func__);
- return -1;
- }
-
- /* Add rules to low level ACL table */
- n_rules = 0;
- for (i = 1; i < acl->n_rules; i++) {
- if (acl->acl_rule_list[i] != NULL) {
- status = rte_acl_add_rules(ctx, acl->acl_rule_list[i],
- 1);
- if (status != 0) {
- RTE_LOG(ERR, TABLE,
- "%s: Cannot add rule to low level ACL table\n",
- __func__);
- rte_acl_free(ctx);
- return -1;
- }
-
- n_rules++;
- }
- }
-
- if (n_rules == 0) {
- rte_acl_free(ctx);
- *acl_ctx = NULL;
- return 0;
- }
-
- /* Build low level ACl table */
- status = rte_acl_build(ctx, &acl->cfg);
- if (status != 0) {
- RTE_LOG(ERR, TABLE,
- "%s: Cannot build the low level ACL table\n",
- __func__);
- rte_acl_free(ctx);
- return -1;
- }
-
- rte_acl_dump(ctx);
-
- *acl_ctx = ctx;
- return 0;
-}
-
-static int
-rte_table_acl_entry_add(
- void *table,
- void *key,
- void *entry,
- int *key_found,
- void **entry_ptr)
-{
- struct rte_table_acl *acl = (struct rte_table_acl *) table;
- struct rte_table_acl_rule_add_params *rule =
- (struct rte_table_acl_rule_add_params *) key;
- struct rte_pipeline_acl_rule acl_rule;
- struct rte_acl_rule *rule_location;
- struct rte_acl_ctx *ctx;
- uint32_t free_pos, free_pos_valid, i;
- int status;
-
- /* Check input parameters */
- if (table == NULL) {
- RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
- return -EINVAL;
- }
- if (key == NULL) {
- RTE_LOG(ERR, TABLE, "%s: key parameter is NULL\n", __func__);
- return -EINVAL;
- }
- if (entry == NULL) {
- RTE_LOG(ERR, TABLE, "%s: entry parameter is NULL\n", __func__);
- return -EINVAL;
- }
- if (key_found == NULL) {
- RTE_LOG(ERR, TABLE, "%s: key_found parameter is NULL\n",
- __func__);
- return -EINVAL;
- }
- if (entry_ptr == NULL) {
- RTE_LOG(ERR, TABLE, "%s: entry_ptr parameter is NULL\n",
- __func__);
- return -EINVAL;
- }
- if (rule->priority > RTE_ACL_MAX_PRIORITY) {
- RTE_LOG(ERR, TABLE, "%s: Priority is too high\n", __func__);
- return -EINVAL;
- }
-
- /* Setup rule data structure */
- memset(&acl_rule, 0, sizeof(acl_rule));
- acl_rule.data.category_mask = 1;
- acl_rule.data.priority = RTE_ACL_MAX_PRIORITY - rule->priority;
- acl_rule.data.userdata = 0; /* To be set up later */
- memcpy(&acl_rule.field[0],
- &rule->field_value[0],
- acl->cfg.num_fields * sizeof(struct rte_acl_field));
-
- /* Look to see if the rule exists already in the table */
- free_pos = 0;
- free_pos_valid = 0;
- for (i = 1; i < acl->n_rules; i++) {
- if (acl->acl_rule_list[i] == NULL) {
- if (free_pos_valid == 0) {
- free_pos = i;
- free_pos_valid = 1;
- }
-
- continue;
- }
-
- /* Compare the key fields */
- status = memcmp(&acl->acl_rule_list[i]->field[0],
- &rule->field_value[0],
- acl->cfg.num_fields * sizeof(struct rte_acl_field));
-
- /* Rule found: update data associated with the rule */
- if (status == 0) {
- *key_found = 1;
- *entry_ptr = &acl->memory[i * acl->entry_size];
- memcpy(*entry_ptr, entry, acl->entry_size);
-
- return 0;
- }
- }
-
- /* Return if max rules */
- if (free_pos_valid == 0) {
- RTE_LOG(ERR, TABLE, "%s: Max number of rules reached\n",
- __func__);
- return -ENOSPC;
- }
-
- /* Add the new rule to the rule set */
- acl_rule.data.userdata = free_pos;
- rule_location = (struct rte_acl_rule *)
- &acl->acl_rule_memory[free_pos * acl->acl_params.rule_size];
- memcpy(rule_location, &acl_rule, acl->acl_params.rule_size);
- acl->acl_rule_list[free_pos] = rule_location;
-
- /* Build low level ACL table */
- acl->name_id ^= 1;
- acl->acl_params.name = acl->name[acl->name_id];
- status = rte_table_acl_build(acl, &ctx);
- if (status != 0) {
- /* Roll back changes */
- acl->acl_rule_list[free_pos] = NULL;
- acl->name_id ^= 1;
-
- return -EINVAL;
- }
-
- /* Commit changes */
- if (acl->ctx != NULL)
- rte_acl_free(acl->ctx);
- acl->ctx = ctx;
- *key_found = 0;
- *entry_ptr = &acl->memory[free_pos * acl->entry_size];
- memcpy(*entry_ptr, entry, acl->entry_size);
-
- return 0;
-}
-
-static int
-rte_table_acl_entry_delete(
- void *table,
- void *key,
- int *key_found,
- void *entry)
-{
- struct rte_table_acl *acl = (struct rte_table_acl *) table;
- struct rte_table_acl_rule_delete_params *rule =
- (struct rte_table_acl_rule_delete_params *) key;
- struct rte_acl_rule *deleted_rule = NULL;
- struct rte_acl_ctx *ctx;
- uint32_t pos, pos_valid, i;
- int status;
-
- /* Check input parameters */
- if (table == NULL) {
- RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
- return -EINVAL;
- }
- if (key == NULL) {
- RTE_LOG(ERR, TABLE, "%s: key parameter is NULL\n", __func__);
- return -EINVAL;
- }
- if (key_found == NULL) {
- RTE_LOG(ERR, TABLE, "%s: key_found parameter is NULL\n",
- __func__);
- return -EINVAL;
- }
-
- /* Look for the rule in the table */
- pos = 0;
- pos_valid = 0;
- for (i = 1; i < acl->n_rules; i++) {
- if (acl->acl_rule_list[i] != NULL) {
- /* Compare the key fields */
- status = memcmp(&acl->acl_rule_list[i]->field[0],
- &rule->field_value[0], acl->cfg.num_fields *
- sizeof(struct rte_acl_field));
-
- /* Rule found: remove from table */
- if (status == 0) {
- pos = i;
- pos_valid = 1;
-
- deleted_rule = acl->acl_rule_list[i];
- acl->acl_rule_list[i] = NULL;
- }
- }
- }
-
- /* Return if rule not found */
- if (pos_valid == 0) {
- *key_found = 0;
- return 0;
- }
-
- /* Build low level ACL table */
- acl->name_id ^= 1;
- acl->acl_params.name = acl->name[acl->name_id];
- status = rte_table_acl_build(acl, &ctx);
- if (status != 0) {
- /* Roll back changes */
- acl->acl_rule_list[pos] = deleted_rule;
- acl->name_id ^= 1;
-
- return -EINVAL;
- }
-
- /* Commit changes */
- if (acl->ctx != NULL)
- rte_acl_free(acl->ctx);
-
- acl->ctx = ctx;
- *key_found = 1;
- if (entry != NULL)
- memcpy(entry, &acl->memory[pos * acl->entry_size],
- acl->entry_size);
-
- return 0;
-}
-
-static int
-rte_table_acl_lookup(
- void *table,
- struct rte_mbuf **pkts,
- uint64_t pkts_mask,
- uint64_t *lookup_hit_mask,
- void **entries)
-{
- struct rte_table_acl *acl = (struct rte_table_acl *) table;
- const uint8_t *pkts_data[RTE_PORT_IN_BURST_SIZE_MAX];
- uint32_t results[RTE_PORT_IN_BURST_SIZE_MAX];
- uint64_t pkts_out_mask;
- uint32_t n_pkts, i, j;
-
- /* Input conversion */
- for (i = 0, j = 0; i < (uint32_t)(RTE_PORT_IN_BURST_SIZE_MAX -
- __builtin_clzll(pkts_mask)); i++) {
- uint64_t pkt_mask = 1LLU << i;
-
- if (pkt_mask & pkts_mask) {
- pkts_data[j] = rte_pktmbuf_mtod(pkts[i], uint8_t *);
- j++;
- }
- }
- n_pkts = j;
-
- /* Low-level ACL table lookup */
- if (acl->ctx != NULL)
- rte_acl_classify(acl->ctx, pkts_data, results, n_pkts, 1);
- else
- n_pkts = 0;
-
- /* Output conversion */
- pkts_out_mask = 0;
- for (i = 0; i < n_pkts; i++) {
- uint32_t action_table_pos = results[i];
- uint32_t pkt_pos = __builtin_ctzll(pkts_mask);
- uint64_t pkt_mask = 1LLU << pkt_pos;
-
- pkts_mask &= ~pkt_mask;
-
- if (action_table_pos != RTE_ACL_INVALID_USERDATA) {
- pkts_out_mask |= pkt_mask;
- entries[pkt_pos] = (void *)
- &acl->memory[action_table_pos *
- acl->entry_size];
- rte_prefetch0(entries[pkt_pos]);
- }
- }
-
- *lookup_hit_mask = pkts_out_mask;
-
- return 0;
-}
-
-struct rte_table_ops rte_table_acl_ops = {
- .f_create = rte_table_acl_create,
- .f_free = rte_table_acl_free,
- .f_add = rte_table_acl_entry_add,
- .f_delete = rte_table_acl_entry_delete,
- .f_lookup = rte_table_acl_lookup,
-};
diff --git a/src/dpdk_lib18/librte_table/rte_table_array.c b/src/dpdk_lib18/librte_table/rte_table_array.c
deleted file mode 100755
index c0310700..00000000
--- a/src/dpdk_lib18/librte_table/rte_table_array.c
+++ /dev/null
@@ -1,205 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <string.h>
-#include <stdio.h>
-
-#include <rte_common.h>
-#include <rte_mbuf.h>
-#include <rte_memory.h>
-#include <rte_malloc.h>
-#include <rte_log.h>
-
-#include "rte_table_array.h"
-
-struct rte_table_array {
- /* Input parameters */
- uint32_t entry_size;
- uint32_t n_entries;
- uint32_t offset;
-
- /* Internal fields */
- uint32_t entry_pos_mask;
-
- /* Internal table */
- uint8_t array[0] __rte_cache_aligned;
-} __rte_cache_aligned;
-
-static void *
-rte_table_array_create(void *params, int socket_id, uint32_t entry_size)
-{
- struct rte_table_array_params *p =
- (struct rte_table_array_params *) params;
- struct rte_table_array *t;
- uint32_t total_cl_size, total_size;
-
- /* Check input parameters */
- if ((p == NULL) ||
- (p->n_entries == 0) ||
- (!rte_is_power_of_2(p->n_entries)) ||
- ((p->offset & 0x3) != 0)) {
- return NULL;
- }
-
- /* Memory allocation */
- total_cl_size = (sizeof(struct rte_table_array) +
- RTE_CACHE_LINE_SIZE) / RTE_CACHE_LINE_SIZE;
- total_cl_size += (p->n_entries * entry_size +
- RTE_CACHE_LINE_SIZE) / RTE_CACHE_LINE_SIZE;
- total_size = total_cl_size * RTE_CACHE_LINE_SIZE;
- t = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
- if (t == NULL) {
- RTE_LOG(ERR, TABLE,
- "%s: Cannot allocate %u bytes for array table\n",
- __func__, total_size);
- return NULL;
- }
-
- /* Memory initialization */
- t->entry_size = entry_size;
- t->n_entries = p->n_entries;
- t->offset = p->offset;
- t->entry_pos_mask = t->n_entries - 1;
-
- return t;
-}
-
-static int
-rte_table_array_free(void *table)
-{
- struct rte_table_array *t = (struct rte_table_array *) table;
-
- /* Check input parameters */
- if (t == NULL) {
- RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
- return -EINVAL;
- }
-
- /* Free previously allocated resources */
- rte_free(t);
-
- return 0;
-}
-
-static int
-rte_table_array_entry_add(
- void *table,
- void *key,
- void *entry,
- int *key_found,
- void **entry_ptr)
-{
- struct rte_table_array *t = (struct rte_table_array *) table;
- struct rte_table_array_key *k = (struct rte_table_array_key *) key;
- uint8_t *table_entry;
-
- /* Check input parameters */
- if (table == NULL) {
- RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
- return -EINVAL;
- }
- if (key == NULL) {
- RTE_LOG(ERR, TABLE, "%s: key parameter is NULL\n", __func__);
- return -EINVAL;
- }
- if (entry == NULL) {
- RTE_LOG(ERR, TABLE, "%s: entry parameter is NULL\n", __func__);
- return -EINVAL;
- }
- if (key_found == NULL) {
- RTE_LOG(ERR, TABLE, "%s: key_found parameter is NULL\n",
- __func__);
- return -EINVAL;
- }
- if (entry_ptr == NULL) {
- RTE_LOG(ERR, TABLE, "%s: entry_ptr parameter is NULL\n",
- __func__);
- return -EINVAL;
- }
-
- table_entry = &t->array[k->pos * t->entry_size];
- memcpy(table_entry, entry, t->entry_size);
- *key_found = 1;
- *entry_ptr = (void *) table_entry;
-
- return 0;
-}
-
-static int
-rte_table_array_lookup(
- void *table,
- struct rte_mbuf **pkts,
- uint64_t pkts_mask,
- uint64_t *lookup_hit_mask,
- void **entries)
-{
- struct rte_table_array *t = (struct rte_table_array *) table;
-
- *lookup_hit_mask = pkts_mask;
-
- if ((pkts_mask & (pkts_mask + 1)) == 0) {
- uint64_t n_pkts = __builtin_popcountll(pkts_mask);
- uint32_t i;
-
- for (i = 0; i < n_pkts; i++) {
- struct rte_mbuf *pkt = pkts[i];
- uint32_t entry_pos = RTE_MBUF_METADATA_UINT32(pkt,
- t->offset) & t->entry_pos_mask;
-
- entries[i] = (void *) &t->array[entry_pos *
- t->entry_size];
- }
- } else {
- for ( ; pkts_mask; ) {
- uint32_t pkt_index = __builtin_ctzll(pkts_mask);
- uint64_t pkt_mask = 1LLU << pkt_index;
- struct rte_mbuf *pkt = pkts[pkt_index];
- uint32_t entry_pos = RTE_MBUF_METADATA_UINT32(pkt,
- t->offset) & t->entry_pos_mask;
-
- entries[pkt_index] = (void *) &t->array[entry_pos *
- t->entry_size];
- pkts_mask &= ~pkt_mask;
- }
- }
-
- return 0;
-}
-
-struct rte_table_ops rte_table_array_ops = {
- .f_create = rte_table_array_create,
- .f_free = rte_table_array_free,
- .f_add = rte_table_array_entry_add,
- .f_delete = NULL,
- .f_lookup = rte_table_array_lookup,
-};
diff --git a/src/dpdk_lib18/librte_table/rte_table_hash_ext.c b/src/dpdk_lib18/librte_table/rte_table_hash_ext.c
deleted file mode 100755
index 66e416bb..00000000
--- a/src/dpdk_lib18/librte_table/rte_table_hash_ext.c
+++ /dev/null
@@ -1,1122 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <string.h>
-#include <stdio.h>
-
-#include <rte_common.h>
-#include <rte_mbuf.h>
-#include <rte_memory.h>
-#include <rte_malloc.h>
-#include <rte_log.h>
-
-#include "rte_table_hash.h"
-
-#define KEYS_PER_BUCKET 4
-
-struct bucket {
- union {
- uintptr_t next;
- uint64_t lru_list;
- };
- uint16_t sig[KEYS_PER_BUCKET];
- uint32_t key_pos[KEYS_PER_BUCKET];
-};
-
-#define BUCKET_NEXT(bucket) \
- ((void *) ((bucket)->next & (~1LU)))
-
-#define BUCKET_NEXT_VALID(bucket) \
- ((bucket)->next & 1LU)
-
-#define BUCKET_NEXT_SET(bucket, bucket_next) \
-do \
- (bucket)->next = (((uintptr_t) ((void *) (bucket_next))) | 1LU);\
-while (0)
-
-#define BUCKET_NEXT_SET_NULL(bucket) \
-do \
- (bucket)->next = 0; \
-while (0)
-
-#define BUCKET_NEXT_COPY(bucket, bucket2) \
-do \
- (bucket)->next = (bucket2)->next; \
-while (0)
-
-struct grinder {
- struct bucket *bkt;
- uint64_t sig;
- uint64_t match;
- uint32_t key_index;
-};
-
-struct rte_table_hash {
- /* Input parameters */
- uint32_t key_size;
- uint32_t entry_size;
- uint32_t n_keys;
- uint32_t n_buckets;
- uint32_t n_buckets_ext;
- rte_table_hash_op_hash f_hash;
- uint64_t seed;
- uint32_t signature_offset;
- uint32_t key_offset;
-
- /* Internal */
- uint64_t bucket_mask;
- uint32_t key_size_shl;
- uint32_t data_size_shl;
- uint32_t key_stack_tos;
- uint32_t bkt_ext_stack_tos;
-
- /* Grinder */
- struct grinder grinders[RTE_PORT_IN_BURST_SIZE_MAX];
-
- /* Tables */
- struct bucket *buckets;
- struct bucket *buckets_ext;
- uint8_t *key_mem;
- uint8_t *data_mem;
- uint32_t *key_stack;
- uint32_t *bkt_ext_stack;
-
- /* Table memory */
- uint8_t memory[0] __rte_cache_aligned;
-};
-
-static int
-check_params_create(struct rte_table_hash_ext_params *params)
-{
- uint32_t n_buckets_min;
-
- /* key_size */
- if ((params->key_size == 0) ||
- (!rte_is_power_of_2(params->key_size))) {
- RTE_LOG(ERR, TABLE, "%s: key_size invalid value\n", __func__);
- return -EINVAL;
- }
-
- /* n_keys */
- if ((params->n_keys == 0) ||
- (!rte_is_power_of_2(params->n_keys))) {
- RTE_LOG(ERR, TABLE, "%s: n_keys invalid value\n", __func__);
- return -EINVAL;
- }
-
- /* n_buckets */
- n_buckets_min = (params->n_keys + KEYS_PER_BUCKET - 1) / params->n_keys;
- if ((params->n_buckets == 0) ||
- (!rte_is_power_of_2(params->n_keys)) ||
- (params->n_buckets < n_buckets_min)) {
- RTE_LOG(ERR, TABLE, "%s: n_buckets invalid value\n", __func__);
- return -EINVAL;
- }
-
- /* f_hash */
- if (params->f_hash == NULL) {
- RTE_LOG(ERR, TABLE, "%s: f_hash invalid value\n", __func__);
- return -EINVAL;
- }
-
- /* signature offset */
- if ((params->signature_offset & 0x3) != 0) {
- RTE_LOG(ERR, TABLE, "%s: signature_offset invalid value\n",
- __func__);
- return -EINVAL;
- }
-
- /* key offset */
- if ((params->key_offset & 0x7) != 0) {
- RTE_LOG(ERR, TABLE, "%s: key_offset invalid value\n", __func__);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void *
-rte_table_hash_ext_create(void *params, int socket_id, uint32_t entry_size)
-{
- struct rte_table_hash_ext_params *p =
- (struct rte_table_hash_ext_params *) params;
- struct rte_table_hash *t;
- uint32_t total_size, table_meta_sz;
- uint32_t bucket_sz, bucket_ext_sz, key_sz;
- uint32_t key_stack_sz, bkt_ext_stack_sz, data_sz;
- uint32_t bucket_offset, bucket_ext_offset, key_offset;
- uint32_t key_stack_offset, bkt_ext_stack_offset, data_offset;
- uint32_t i;
-
- /* Check input parameters */
- if ((check_params_create(p) != 0) ||
- (!rte_is_power_of_2(entry_size)) ||
- ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
- (sizeof(struct bucket) != (RTE_CACHE_LINE_SIZE / 2)))
- return NULL;
-
- /* Memory allocation */
- table_meta_sz = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_table_hash));
- bucket_sz = RTE_CACHE_LINE_ROUNDUP(p->n_buckets * sizeof(struct bucket));
- bucket_ext_sz =
- RTE_CACHE_LINE_ROUNDUP(p->n_buckets_ext * sizeof(struct bucket));
- key_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * p->key_size);
- key_stack_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * sizeof(uint32_t));
- bkt_ext_stack_sz =
- RTE_CACHE_LINE_ROUNDUP(p->n_buckets_ext * sizeof(uint32_t));
- data_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * entry_size);
- total_size = table_meta_sz + bucket_sz + bucket_ext_sz + key_sz +
- key_stack_sz + bkt_ext_stack_sz + data_sz;
-
- t = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
- if (t == NULL) {
- RTE_LOG(ERR, TABLE,
- "%s: Cannot allocate %u bytes for hash table\n",
- __func__, total_size);
- return NULL;
- }
- RTE_LOG(INFO, TABLE, "%s (%u-byte key): Hash table memory footprint is "
- "%u bytes\n", __func__, p->key_size, total_size);
-
- /* Memory initialization */
- t->key_size = p->key_size;
- t->entry_size = entry_size;
- t->n_keys = p->n_keys;
- t->n_buckets = p->n_buckets;
- t->n_buckets_ext = p->n_buckets_ext;
- t->f_hash = p->f_hash;
- t->seed = p->seed;
- t->signature_offset = p->signature_offset;
- t->key_offset = p->key_offset;
-
- /* Internal */
- t->bucket_mask = t->n_buckets - 1;
- t->key_size_shl = __builtin_ctzl(p->key_size);
- t->data_size_shl = __builtin_ctzl(entry_size);
-
- /* Tables */
- bucket_offset = 0;
- bucket_ext_offset = bucket_offset + bucket_sz;
- key_offset = bucket_ext_offset + bucket_ext_sz;
- key_stack_offset = key_offset + key_sz;
- bkt_ext_stack_offset = key_stack_offset + key_stack_sz;
- data_offset = bkt_ext_stack_offset + bkt_ext_stack_sz;
-
- t->buckets = (struct bucket *) &t->memory[bucket_offset];
- t->buckets_ext = (struct bucket *) &t->memory[bucket_ext_offset];
- t->key_mem = &t->memory[key_offset];
- t->key_stack = (uint32_t *) &t->memory[key_stack_offset];
- t->bkt_ext_stack = (uint32_t *) &t->memory[bkt_ext_stack_offset];
- t->data_mem = &t->memory[data_offset];
-
- /* Key stack */
- for (i = 0; i < t->n_keys; i++)
- t->key_stack[i] = t->n_keys - 1 - i;
- t->key_stack_tos = t->n_keys;
-
- /* Bucket ext stack */
- for (i = 0; i < t->n_buckets_ext; i++)
- t->bkt_ext_stack[i] = t->n_buckets_ext - 1 - i;
- t->bkt_ext_stack_tos = t->n_buckets_ext;
-
- return t;
-}
-
-static int
-rte_table_hash_ext_free(void *table)
-{
- struct rte_table_hash *t = (struct rte_table_hash *) table;
-
- /* Check input parameters */
- if (t == NULL)
- return -EINVAL;
-
- rte_free(t);
- return 0;
-}
-
-static int
-rte_table_hash_ext_entry_add(void *table, void *key, void *entry,
- int *key_found, void **entry_ptr)
-{
- struct rte_table_hash *t = (struct rte_table_hash *) table;
- struct bucket *bkt0, *bkt, *bkt_prev;
- uint64_t sig;
- uint32_t bkt_index, i;
-
- sig = t->f_hash(key, t->key_size, t->seed);
- bkt_index = sig & t->bucket_mask;
- bkt0 = &t->buckets[bkt_index];
- sig = (sig >> 16) | 1LLU;
-
- /* Key is present in the bucket */
- for (bkt = bkt0; bkt != NULL; bkt = BUCKET_NEXT(bkt))
- for (i = 0; i < KEYS_PER_BUCKET; i++) {
- uint64_t bkt_sig = (uint64_t) bkt->sig[i];
- uint32_t bkt_key_index = bkt->key_pos[i];
- uint8_t *bkt_key =
- &t->key_mem[bkt_key_index << t->key_size_shl];
-
- if ((sig == bkt_sig) && (memcmp(key, bkt_key,
- t->key_size) == 0)) {
- uint8_t *data = &t->data_mem[bkt_key_index <<
- t->data_size_shl];
-
- memcpy(data, entry, t->entry_size);
- *key_found = 1;
- *entry_ptr = (void *) data;
- return 0;
- }
- }
-
- /* Key is not present in the bucket */
- for (bkt_prev = NULL, bkt = bkt0; bkt != NULL; bkt_prev = bkt,
- bkt = BUCKET_NEXT(bkt))
- for (i = 0; i < KEYS_PER_BUCKET; i++) {
- uint64_t bkt_sig = (uint64_t) bkt->sig[i];
-
- if (bkt_sig == 0) {
- uint32_t bkt_key_index;
- uint8_t *bkt_key, *data;
-
- /* Allocate new key */
- if (t->key_stack_tos == 0) /* No free keys */
- return -ENOSPC;
-
- bkt_key_index = t->key_stack[
- --t->key_stack_tos];
-
- /* Install new key */
- bkt_key = &t->key_mem[bkt_key_index <<
- t->key_size_shl];
- data = &t->data_mem[bkt_key_index <<
- t->data_size_shl];
-
- bkt->sig[i] = (uint16_t) sig;
- bkt->key_pos[i] = bkt_key_index;
- memcpy(bkt_key, key, t->key_size);
- memcpy(data, entry, t->entry_size);
-
- *key_found = 0;
- *entry_ptr = (void *) data;
- return 0;
- }
- }
-
- /* Bucket full: extend bucket */
- if ((t->bkt_ext_stack_tos > 0) && (t->key_stack_tos > 0)) {
- uint32_t bkt_key_index;
- uint8_t *bkt_key, *data;
-
- /* Allocate new bucket ext */
- bkt_index = t->bkt_ext_stack[--t->bkt_ext_stack_tos];
- bkt = &t->buckets_ext[bkt_index];
-
- /* Chain the new bucket ext */
- BUCKET_NEXT_SET(bkt_prev, bkt);
- BUCKET_NEXT_SET_NULL(bkt);
-
- /* Allocate new key */
- bkt_key_index = t->key_stack[--t->key_stack_tos];
- bkt_key = &t->key_mem[bkt_key_index << t->key_size_shl];
-
- data = &t->data_mem[bkt_key_index << t->data_size_shl];
-
- /* Install new key into bucket */
- bkt->sig[0] = (uint16_t) sig;
- bkt->key_pos[0] = bkt_key_index;
- memcpy(bkt_key, key, t->key_size);
- memcpy(data, entry, t->entry_size);
-
- *key_found = 0;
- *entry_ptr = (void *) data;
- return 0;
- }
-
- return -ENOSPC;
-}
-
-static int
-rte_table_hash_ext_entry_delete(void *table, void *key, int *key_found,
-void *entry)
-{
- struct rte_table_hash *t = (struct rte_table_hash *) table;
- struct bucket *bkt0, *bkt, *bkt_prev;
- uint64_t sig;
- uint32_t bkt_index, i;
-
- sig = t->f_hash(key, t->key_size, t->seed);
- bkt_index = sig & t->bucket_mask;
- bkt0 = &t->buckets[bkt_index];
- sig = (sig >> 16) | 1LLU;
-
- /* Key is present in the bucket */
- for (bkt_prev = NULL, bkt = bkt0; bkt != NULL; bkt_prev = bkt,
- bkt = BUCKET_NEXT(bkt))
- for (i = 0; i < KEYS_PER_BUCKET; i++) {
- uint64_t bkt_sig = (uint64_t) bkt->sig[i];
- uint32_t bkt_key_index = bkt->key_pos[i];
- uint8_t *bkt_key = &t->key_mem[bkt_key_index <<
- t->key_size_shl];
-
- if ((sig == bkt_sig) && (memcmp(key, bkt_key,
- t->key_size) == 0)) {
- uint8_t *data = &t->data_mem[bkt_key_index <<
- t->data_size_shl];
-
- /* Uninstall key from bucket */
- bkt->sig[i] = 0;
- *key_found = 1;
- if (entry)
- memcpy(entry, data, t->entry_size);
-
- /* Free key */
- t->key_stack[t->key_stack_tos++] =
- bkt_key_index;
-
- /*Check if bucket is unused */
- if ((bkt_prev != NULL) &&
- (bkt->sig[0] == 0) && (bkt->sig[1] == 0) &&
- (bkt->sig[2] == 0) && (bkt->sig[3] == 0)) {
- /* Unchain bucket */
- BUCKET_NEXT_COPY(bkt_prev, bkt);
-
- /* Clear bucket */
- memset(bkt, 0, sizeof(struct bucket));
-
- /* Free bucket back to buckets ext */
- bkt_index = bkt - t->buckets_ext;
- t->bkt_ext_stack[t->bkt_ext_stack_tos++]
- = bkt_index;
- }
-
- return 0;
- }
- }
-
- /* Key is not present in the bucket */
- *key_found = 0;
- return 0;
-}
-
-static int rte_table_hash_ext_lookup_unoptimized(
- void *table,
- struct rte_mbuf **pkts,
- uint64_t pkts_mask,
- uint64_t *lookup_hit_mask,
- void **entries,
- int dosig)
-{
- struct rte_table_hash *t = (struct rte_table_hash *) table;
- uint64_t pkts_mask_out = 0;
-
- for ( ; pkts_mask; ) {
- struct bucket *bkt0, *bkt;
- struct rte_mbuf *pkt;
- uint8_t *key;
- uint64_t pkt_mask, sig;
- uint32_t pkt_index, bkt_index, i;
-
- pkt_index = __builtin_ctzll(pkts_mask);
- pkt_mask = 1LLU << pkt_index;
- pkts_mask &= ~pkt_mask;
-
- pkt = pkts[pkt_index];
- key = RTE_MBUF_METADATA_UINT8_PTR(pkt, t->key_offset);
- if (dosig)
- sig = (uint64_t) t->f_hash(key, t->key_size, t->seed);
- else
- sig = RTE_MBUF_METADATA_UINT32(pkt,
- t->signature_offset);
-
- bkt_index = sig & t->bucket_mask;
- bkt0 = &t->buckets[bkt_index];
- sig = (sig >> 16) | 1LLU;
-
- /* Key is present in the bucket */
- for (bkt = bkt0; bkt != NULL; bkt = BUCKET_NEXT(bkt))
- for (i = 0; i < KEYS_PER_BUCKET; i++) {
- uint64_t bkt_sig = (uint64_t) bkt->sig[i];
- uint32_t bkt_key_index = bkt->key_pos[i];
- uint8_t *bkt_key = &t->key_mem[bkt_key_index <<
- t->key_size_shl];
-
- if ((sig == bkt_sig) && (memcmp(key, bkt_key,
- t->key_size) == 0)) {
- uint8_t *data = &t->data_mem[
- bkt_key_index << t->data_size_shl];
-
- pkts_mask_out |= pkt_mask;
- entries[pkt_index] = (void *) data;
- break;
- }
- }
- }
-
- *lookup_hit_mask = pkts_mask_out;
- return 0;
-}
-
-/***
- *
- * mask = match bitmask
- * match = at least one match
- * match_many = more than one match
- * match_pos = position of first match
- *
- *----------------------------------------
- * mask match match_many match_pos
- *----------------------------------------
- * 0000 0 0 00
- * 0001 1 0 00
- * 0010 1 0 01
- * 0011 1 1 00
- *----------------------------------------
- * 0100 1 0 10
- * 0101 1 1 00
- * 0110 1 1 01
- * 0111 1 1 00
- *----------------------------------------
- * 1000 1 0 11
- * 1001 1 1 00
- * 1010 1 1 01
- * 1011 1 1 00
- *----------------------------------------
- * 1100 1 1 10
- * 1101 1 1 00
- * 1110 1 1 01
- * 1111 1 1 00
- *----------------------------------------
- *
- * match = 1111_1111_1111_1110
- * match_many = 1111_1110_1110_1000
- * match_pos = 0001_0010_0001_0011__0001_0010_0001_0000
- *
- * match = 0xFFFELLU
- * match_many = 0xFEE8LLU
- * match_pos = 0x12131210LLU
- *
- ***/
-
-#define LUT_MATCH 0xFFFELLU
-#define LUT_MATCH_MANY 0xFEE8LLU
-#define LUT_MATCH_POS 0x12131210LLU
-
-#define lookup_cmp_sig(mbuf_sig, bucket, match, match_many, match_pos) \
-{ \
- uint64_t bucket_sig[4], mask[4], mask_all; \
- \
- bucket_sig[0] = bucket->sig[0]; \
- bucket_sig[1] = bucket->sig[1]; \
- bucket_sig[2] = bucket->sig[2]; \
- bucket_sig[3] = bucket->sig[3]; \
- \
- bucket_sig[0] ^= mbuf_sig; \
- bucket_sig[1] ^= mbuf_sig; \
- bucket_sig[2] ^= mbuf_sig; \
- bucket_sig[3] ^= mbuf_sig; \
- \
- mask[0] = 0; \
- mask[1] = 0; \
- mask[2] = 0; \
- mask[3] = 0; \
- \
- if (bucket_sig[0] == 0) \
- mask[0] = 1; \
- if (bucket_sig[1] == 0) \
- mask[1] = 2; \
- if (bucket_sig[2] == 0) \
- mask[2] = 4; \
- if (bucket_sig[3] == 0) \
- mask[3] = 8; \
- \
- mask_all = (mask[0] | mask[1]) | (mask[2] | mask[3]); \
- \
- match = (LUT_MATCH >> mask_all) & 1; \
- match_many = (LUT_MATCH_MANY >> mask_all) & 1; \
- match_pos = (LUT_MATCH_POS >> (mask_all << 1)) & 3; \
-}
-
-#define lookup_cmp_key(mbuf, key, match_key, f) \
-{ \
- uint64_t *pkt_key = RTE_MBUF_METADATA_UINT64_PTR(mbuf, f->key_offset);\
- uint64_t *bkt_key = (uint64_t *) key; \
- \
- switch (f->key_size) { \
- case 8: \
- { \
- uint64_t xor = pkt_key[0] ^ bkt_key[0]; \
- match_key = 0; \
- if (xor == 0) \
- match_key = 1; \
- } \
- break; \
- \
- case 16: \
- { \
- uint64_t xor[2], or; \
- \
- xor[0] = pkt_key[0] ^ bkt_key[0]; \
- xor[1] = pkt_key[1] ^ bkt_key[1]; \
- or = xor[0] | xor[1]; \
- match_key = 0; \
- if (or == 0) \
- match_key = 1; \
- } \
- break; \
- \
- case 32: \
- { \
- uint64_t xor[4], or; \
- \
- xor[0] = pkt_key[0] ^ bkt_key[0]; \
- xor[1] = pkt_key[1] ^ bkt_key[1]; \
- xor[2] = pkt_key[2] ^ bkt_key[2]; \
- xor[3] = pkt_key[3] ^ bkt_key[3]; \
- or = xor[0] | xor[1] | xor[2] | xor[3]; \
- match_key = 0; \
- if (or == 0) \
- match_key = 1; \
- } \
- break; \
- \
- case 64: \
- { \
- uint64_t xor[8], or; \
- \
- xor[0] = pkt_key[0] ^ bkt_key[0]; \
- xor[1] = pkt_key[1] ^ bkt_key[1]; \
- xor[2] = pkt_key[2] ^ bkt_key[2]; \
- xor[3] = pkt_key[3] ^ bkt_key[3]; \
- xor[4] = pkt_key[4] ^ bkt_key[4]; \
- xor[5] = pkt_key[5] ^ bkt_key[5]; \
- xor[6] = pkt_key[6] ^ bkt_key[6]; \
- xor[7] = pkt_key[7] ^ bkt_key[7]; \
- or = xor[0] | xor[1] | xor[2] | xor[3] | \
- xor[4] | xor[5] | xor[6] | xor[7]; \
- match_key = 0; \
- if (or == 0) \
- match_key = 1; \
- } \
- break; \
- \
- default: \
- match_key = 0; \
- if (memcmp(pkt_key, bkt_key, f->key_size) == 0) \
- match_key = 1; \
- } \
-}
-
-#define lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index) \
-{ \
- uint64_t pkt00_mask, pkt01_mask; \
- struct rte_mbuf *mbuf00, *mbuf01; \
- \
- pkt00_index = __builtin_ctzll(pkts_mask); \
- pkt00_mask = 1LLU << pkt00_index; \
- pkts_mask &= ~pkt00_mask; \
- mbuf00 = pkts[pkt00_index]; \
- \
- pkt01_index = __builtin_ctzll(pkts_mask); \
- pkt01_mask = 1LLU << pkt01_index; \
- pkts_mask &= ~pkt01_mask; \
- mbuf01 = pkts[pkt01_index]; \
- \
- rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, 0)); \
- rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, 0)); \
-}
-
-#define lookup2_stage0_with_odd_support(t, g, pkts, pkts_mask, pkt00_index, \
- pkt01_index) \
-{ \
- uint64_t pkt00_mask, pkt01_mask; \
- struct rte_mbuf *mbuf00, *mbuf01; \
- \
- pkt00_index = __builtin_ctzll(pkts_mask); \
- pkt00_mask = 1LLU << pkt00_index; \
- pkts_mask &= ~pkt00_mask; \
- mbuf00 = pkts[pkt00_index]; \
- \
- pkt01_index = __builtin_ctzll(pkts_mask); \
- if (pkts_mask == 0) \
- pkt01_index = pkt00_index; \
- pkt01_mask = 1LLU << pkt01_index; \
- pkts_mask &= ~pkt01_mask; \
- mbuf01 = pkts[pkt01_index]; \
- \
- rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, 0)); \
- rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, 0)); \
-}
-
-#define lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index) \
-{ \
- struct grinder *g10, *g11; \
- uint64_t sig10, sig11, bkt10_index, bkt11_index; \
- struct rte_mbuf *mbuf10, *mbuf11; \
- struct bucket *bkt10, *bkt11, *buckets = t->buckets; \
- uint64_t bucket_mask = t->bucket_mask; \
- uint32_t signature_offset = t->signature_offset; \
- \
- mbuf10 = pkts[pkt10_index]; \
- sig10 = (uint64_t) RTE_MBUF_METADATA_UINT32(mbuf10, signature_offset);\
- bkt10_index = sig10 & bucket_mask; \
- bkt10 = &buckets[bkt10_index]; \
- \
- mbuf11 = pkts[pkt11_index]; \
- sig11 = (uint64_t) RTE_MBUF_METADATA_UINT32(mbuf11, signature_offset);\
- bkt11_index = sig11 & bucket_mask; \
- bkt11 = &buckets[bkt11_index]; \
- \
- rte_prefetch0(bkt10); \
- rte_prefetch0(bkt11); \
- \
- g10 = &g[pkt10_index]; \
- g10->sig = sig10; \
- g10->bkt = bkt10; \
- \
- g11 = &g[pkt11_index]; \
- g11->sig = sig11; \
- g11->bkt = bkt11; \
-}
-
-#define lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index) \
-{ \
- struct grinder *g10, *g11; \
- uint64_t sig10, sig11, bkt10_index, bkt11_index; \
- struct rte_mbuf *mbuf10, *mbuf11; \
- struct bucket *bkt10, *bkt11, *buckets = t->buckets; \
- uint8_t *key10, *key11; \
- uint64_t bucket_mask = t->bucket_mask; \
- rte_table_hash_op_hash f_hash = t->f_hash; \
- uint64_t seed = t->seed; \
- uint32_t key_size = t->key_size; \
- uint32_t key_offset = t->key_offset; \
- \
- mbuf10 = pkts[pkt10_index]; \
- key10 = RTE_MBUF_METADATA_UINT8_PTR(mbuf10, key_offset); \
- sig10 = (uint64_t) f_hash(key10, key_size, seed); \
- bkt10_index = sig10 & bucket_mask; \
- bkt10 = &buckets[bkt10_index]; \
- \
- mbuf11 = pkts[pkt11_index]; \
- key11 = RTE_MBUF_METADATA_UINT8_PTR(mbuf11, key_offset); \
- sig11 = (uint64_t) f_hash(key11, key_size, seed); \
- bkt11_index = sig11 & bucket_mask; \
- bkt11 = &buckets[bkt11_index]; \
- \
- rte_prefetch0(bkt10); \
- rte_prefetch0(bkt11); \
- \
- g10 = &g[pkt10_index]; \
- g10->sig = sig10; \
- g10->bkt = bkt10; \
- \
- g11 = &g[pkt11_index]; \
- g11->sig = sig11; \
- g11->bkt = bkt11; \
-}
-
-#define lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many)\
-{ \
- struct grinder *g20, *g21; \
- uint64_t sig20, sig21; \
- struct bucket *bkt20, *bkt21; \
- uint8_t *key20, *key21, *key_mem = t->key_mem; \
- uint64_t match20, match21, match_many20, match_many21; \
- uint64_t match_pos20, match_pos21; \
- uint32_t key20_index, key21_index, key_size_shl = t->key_size_shl;\
- \
- g20 = &g[pkt20_index]; \
- sig20 = g20->sig; \
- bkt20 = g20->bkt; \
- sig20 = (sig20 >> 16) | 1LLU; \
- lookup_cmp_sig(sig20, bkt20, match20, match_many20, match_pos20);\
- match20 <<= pkt20_index; \
- match_many20 |= BUCKET_NEXT_VALID(bkt20); \
- match_many20 <<= pkt20_index; \
- key20_index = bkt20->key_pos[match_pos20]; \
- key20 = &key_mem[key20_index << key_size_shl]; \
- \
- g21 = &g[pkt21_index]; \
- sig21 = g21->sig; \
- bkt21 = g21->bkt; \
- sig21 = (sig21 >> 16) | 1LLU; \
- lookup_cmp_sig(sig21, bkt21, match21, match_many21, match_pos21);\
- match21 <<= pkt21_index; \
- match_many21 |= BUCKET_NEXT_VALID(bkt21); \
- match_many21 <<= pkt21_index; \
- key21_index = bkt21->key_pos[match_pos21]; \
- key21 = &key_mem[key21_index << key_size_shl]; \
- \
- rte_prefetch0(key20); \
- rte_prefetch0(key21); \
- \
- pkts_mask_match_many |= match_many20 | match_many21; \
- \
- g20->match = match20; \
- g20->key_index = key20_index; \
- \
- g21->match = match21; \
- g21->key_index = key21_index; \
-}
-
-#define lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out, \
- entries) \
-{ \
- struct grinder *g30, *g31; \
- struct rte_mbuf *mbuf30, *mbuf31; \
- uint8_t *key30, *key31, *key_mem = t->key_mem; \
- uint8_t *data30, *data31, *data_mem = t->data_mem; \
- uint64_t match30, match31, match_key30, match_key31, match_keys;\
- uint32_t key30_index, key31_index; \
- uint32_t key_size_shl = t->key_size_shl; \
- uint32_t data_size_shl = t->data_size_shl; \
- \
- mbuf30 = pkts[pkt30_index]; \
- g30 = &g[pkt30_index]; \
- match30 = g30->match; \
- key30_index = g30->key_index; \
- key30 = &key_mem[key30_index << key_size_shl]; \
- lookup_cmp_key(mbuf30, key30, match_key30, t); \
- match_key30 <<= pkt30_index; \
- match_key30 &= match30; \
- data30 = &data_mem[key30_index << data_size_shl]; \
- entries[pkt30_index] = data30; \
- \
- mbuf31 = pkts[pkt31_index]; \
- g31 = &g[pkt31_index]; \
- match31 = g31->match; \
- key31_index = g31->key_index; \
- key31 = &key_mem[key31_index << key_size_shl]; \
- lookup_cmp_key(mbuf31, key31, match_key31, t); \
- match_key31 <<= pkt31_index; \
- match_key31 &= match31; \
- data31 = &data_mem[key31_index << data_size_shl]; \
- entries[pkt31_index] = data31; \
- \
- rte_prefetch0(data30); \
- rte_prefetch0(data31); \
- \
- match_keys = match_key30 | match_key31; \
- pkts_mask_out |= match_keys; \
-}
-
-/***
-* The lookup function implements a 4-stage pipeline, with each stage processing
-* two different packets. The purpose of pipelined implementation is to hide the
-* latency of prefetching the data structures and loosen the data dependency
-* between instructions.
-*
-* p00 _______ p10 _______ p20 _______ p30 _______
-*----->| |----->| |----->| |----->| |----->
-* | 0 | | 1 | | 2 | | 3 |
-*----->|_______|----->|_______|----->|_______|----->|_______|----->
-* p01 p11 p21 p31
-*
-* The naming convention is:
-* pXY = packet Y of stage X, X = 0 .. 3, Y = 0 .. 1
-*
-***/
-static int rte_table_hash_ext_lookup(
- void *table,
- struct rte_mbuf **pkts,
- uint64_t pkts_mask,
- uint64_t *lookup_hit_mask,
- void **entries)
-{
- struct rte_table_hash *t = (struct rte_table_hash *) table;
- struct grinder *g = t->grinders;
- uint64_t pkt00_index, pkt01_index, pkt10_index, pkt11_index;
- uint64_t pkt20_index, pkt21_index, pkt30_index, pkt31_index;
- uint64_t pkts_mask_out = 0, pkts_mask_match_many = 0;
- int status = 0;
-
- /* Cannot run the pipeline with less than 7 packets */
- if (__builtin_popcountll(pkts_mask) < 7)
- return rte_table_hash_ext_lookup_unoptimized(table, pkts,
- pkts_mask, lookup_hit_mask, entries, 0);
-
- /* Pipeline stage 0 */
- lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
-
- /* Pipeline feed */
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
-
- /* Pipeline stage 1 */
- lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
-
- /* Pipeline feed */
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
-
- /* Pipeline stage 1 */
- lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
-
- /* Pipeline stage 2 */
- lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
-
- /*
- * Pipeline run
- *
- */
- for ( ; pkts_mask; ) {
- /* Pipeline feed */
- pkt30_index = pkt20_index;
- pkt31_index = pkt21_index;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0_with_odd_support(t, g, pkts, pkts_mask,
- pkt00_index, pkt01_index);
-
- /* Pipeline stage 1 */
- lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
-
- /* Pipeline stage 2 */
- lookup2_stage2(t, g, pkt20_index, pkt21_index,
- pkts_mask_match_many);
-
- /* Pipeline stage 3 */
- lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index,
- pkts_mask_out, entries);
- }
-
- /* Pipeline feed */
- pkt30_index = pkt20_index;
- pkt31_index = pkt21_index;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 1 */
- lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
-
- /* Pipeline stage 2 */
- lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
-
- /* Pipeline stage 3 */
- lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
- entries);
-
- /* Pipeline feed */
- pkt30_index = pkt20_index;
- pkt31_index = pkt21_index;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
-
- /* Pipeline stage 2 */
- lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
-
- /* Pipeline stage 3 */
- lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
- entries);
-
- /* Pipeline feed */
- pkt30_index = pkt20_index;
- pkt31_index = pkt21_index;
-
- /* Pipeline stage 3 */
- lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
- entries);
-
- /* Slow path */
- pkts_mask_match_many &= ~pkts_mask_out;
- if (pkts_mask_match_many) {
- uint64_t pkts_mask_out_slow = 0;
-
- status = rte_table_hash_ext_lookup_unoptimized(table, pkts,
- pkts_mask_match_many, &pkts_mask_out_slow, entries, 0);
- pkts_mask_out |= pkts_mask_out_slow;
- }
-
- *lookup_hit_mask = pkts_mask_out;
- return status;
-}
-
-static int rte_table_hash_ext_lookup_dosig(
- void *table,
- struct rte_mbuf **pkts,
- uint64_t pkts_mask,
- uint64_t *lookup_hit_mask,
- void **entries)
-{
- struct rte_table_hash *t = (struct rte_table_hash *) table;
- struct grinder *g = t->grinders;
- uint64_t pkt00_index, pkt01_index, pkt10_index, pkt11_index;
- uint64_t pkt20_index, pkt21_index, pkt30_index, pkt31_index;
- uint64_t pkts_mask_out = 0, pkts_mask_match_many = 0;
- int status = 0;
-
- /* Cannot run the pipeline with less than 7 packets */
- if (__builtin_popcountll(pkts_mask) < 7)
- return rte_table_hash_ext_lookup_unoptimized(table, pkts,
- pkts_mask, lookup_hit_mask, entries, 1);
-
- /* Pipeline stage 0 */
- lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
-
- /* Pipeline feed */
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index);
-
- /* Pipeline feed */
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index);
-
- /* Pipeline stage 2 */
- lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
-
- /*
- * Pipeline run
- *
- */
- for ( ; pkts_mask; ) {
- /* Pipeline feed */
- pkt30_index = pkt20_index;
- pkt31_index = pkt21_index;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0_with_odd_support(t, g, pkts, pkts_mask,
- pkt00_index, pkt01_index);
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index);
-
- /* Pipeline stage 2 */
- lookup2_stage2(t, g, pkt20_index, pkt21_index,
- pkts_mask_match_many);
-
- /* Pipeline stage 3 */
- lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index,
- pkts_mask_out, entries);
- }
-
- /* Pipeline feed */
- pkt30_index = pkt20_index;
- pkt31_index = pkt21_index;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index);
-
- /* Pipeline stage 2 */
- lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
-
- /* Pipeline stage 3 */
- lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
- entries);
-
- /* Pipeline feed */
- pkt30_index = pkt20_index;
- pkt31_index = pkt21_index;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
-
- /* Pipeline stage 2 */
- lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
-
- /* Pipeline stage 3 */
- lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
- entries);
-
- /* Pipeline feed */
- pkt30_index = pkt20_index;
- pkt31_index = pkt21_index;
-
- /* Pipeline stage 3 */
- lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
- entries);
-
- /* Slow path */
- pkts_mask_match_many &= ~pkts_mask_out;
- if (pkts_mask_match_many) {
- uint64_t pkts_mask_out_slow = 0;
-
- status = rte_table_hash_ext_lookup_unoptimized(table, pkts,
- pkts_mask_match_many, &pkts_mask_out_slow, entries, 1);
- pkts_mask_out |= pkts_mask_out_slow;
- }
-
- *lookup_hit_mask = pkts_mask_out;
- return status;
-}
-
-struct rte_table_ops rte_table_hash_ext_ops = {
- .f_create = rte_table_hash_ext_create,
- .f_free = rte_table_hash_ext_free,
- .f_add = rte_table_hash_ext_entry_add,
- .f_delete = rte_table_hash_ext_entry_delete,
- .f_lookup = rte_table_hash_ext_lookup,
-};
-
-struct rte_table_ops rte_table_hash_ext_dosig_ops = {
- .f_create = rte_table_hash_ext_create,
- .f_free = rte_table_hash_ext_free,
- .f_add = rte_table_hash_ext_entry_add,
- .f_delete = rte_table_hash_ext_entry_delete,
- .f_lookup = rte_table_hash_ext_lookup_dosig,
-};
diff --git a/src/dpdk_lib18/librte_table/rte_table_hash_key16.c b/src/dpdk_lib18/librte_table/rte_table_hash_key16.c
deleted file mode 100755
index ee5f639b..00000000
--- a/src/dpdk_lib18/librte_table/rte_table_hash_key16.c
+++ /dev/null
@@ -1,1101 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#include <string.h>
-#include <stdio.h>
-
-#include <rte_common.h>
-#include <rte_mbuf.h>
-#include <rte_memory.h>
-#include <rte_malloc.h>
-#include <rte_log.h>
-
-#include "rte_table_hash.h"
-#include "rte_lru.h"
-
-#define RTE_TABLE_HASH_KEY_SIZE 16
-
-#define RTE_BUCKET_ENTRY_VALID 0x1LLU
-
-struct rte_bucket_4_16 {
- /* Cache line 0 */
- uint64_t signature[4 + 1];
- uint64_t lru_list;
- struct rte_bucket_4_16 *next;
- uint64_t next_valid;
-
- /* Cache line 1 */
- uint64_t key[4][2];
-
- /* Cache line 2 */
- uint8_t data[0];
-};
-
-struct rte_table_hash {
- /* Input parameters */
- uint32_t n_buckets;
- uint32_t n_entries_per_bucket;
- uint32_t key_size;
- uint32_t entry_size;
- uint32_t bucket_size;
- uint32_t signature_offset;
- uint32_t key_offset;
- rte_table_hash_op_hash f_hash;
- uint64_t seed;
-
- /* Extendible buckets */
- uint32_t n_buckets_ext;
- uint32_t stack_pos;
- uint32_t *stack;
-
- /* Lookup table */
- uint8_t memory[0] __rte_cache_aligned;
-};
-
-static int
-check_params_create_lru(struct rte_table_hash_key16_lru_params *params) {
- /* n_entries */
- if (params->n_entries == 0) {
- RTE_LOG(ERR, TABLE, "%s: n_entries is zero\n", __func__);
- return -EINVAL;
- }
-
- /* signature offset */
- if ((params->signature_offset & 0x3) != 0) {
- RTE_LOG(ERR, TABLE, "%s: invalid signature_offset\n", __func__);
- return -EINVAL;
- }
-
- /* key offset */
- if ((params->key_offset & 0x7) != 0) {
- RTE_LOG(ERR, TABLE, "%s: invalid key_offset\n", __func__);
- return -EINVAL;
- }
-
- /* f_hash */
- if (params->f_hash == NULL) {
- RTE_LOG(ERR, TABLE,
- "%s: f_hash function pointer is NULL\n", __func__);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void *
-rte_table_hash_create_key16_lru(void *params,
- int socket_id,
- uint32_t entry_size)
-{
- struct rte_table_hash_key16_lru_params *p =
- (struct rte_table_hash_key16_lru_params *) params;
- struct rte_table_hash *f;
- uint32_t n_buckets, n_entries_per_bucket,
- key_size, bucket_size_cl, total_size, i;
-
- /* Check input parameters */
- if ((check_params_create_lru(p) != 0) ||
- ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
- ((sizeof(struct rte_bucket_4_16) % RTE_CACHE_LINE_SIZE) != 0))
- return NULL;
- n_entries_per_bucket = 4;
- key_size = 16;
-
- /* Memory allocation */
- n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
- n_entries_per_bucket);
- bucket_size_cl = (sizeof(struct rte_bucket_4_16) + n_entries_per_bucket
- * entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
- total_size = sizeof(struct rte_table_hash) + n_buckets *
- bucket_size_cl * RTE_CACHE_LINE_SIZE;
-
- f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
- if (f == NULL) {
- RTE_LOG(ERR, TABLE,
- "%s: Cannot allocate %u bytes for hash table\n",
- __func__, total_size);
- return NULL;
- }
- RTE_LOG(INFO, TABLE,
- "%s: Hash table memory footprint is %u bytes\n",
- __func__, total_size);
-
- /* Memory initialization */
- f->n_buckets = n_buckets;
- f->n_entries_per_bucket = n_entries_per_bucket;
- f->key_size = key_size;
- f->entry_size = entry_size;
- f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
- f->signature_offset = p->signature_offset;
- f->key_offset = p->key_offset;
- f->f_hash = p->f_hash;
- f->seed = p->seed;
-
- for (i = 0; i < n_buckets; i++) {
- struct rte_bucket_4_16 *bucket;
-
- bucket = (struct rte_bucket_4_16 *) &f->memory[i *
- f->bucket_size];
- lru_init(bucket);
- }
-
- return f;
-}
-
-static int
-rte_table_hash_free_key16_lru(void *table)
-{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
-
- /* Check input parameters */
- if (f == NULL) {
- RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
- return -EINVAL;
- }
-
- rte_free(f);
- return 0;
-}
-
-static int
-rte_table_hash_entry_add_key16_lru(
- void *table,
- void *key,
- void *entry,
- int *key_found,
- void **entry_ptr)
-{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
- struct rte_bucket_4_16 *bucket;
- uint64_t signature, pos;
- uint32_t bucket_index, i;
-
- signature = f->f_hash(key, f->key_size, f->seed);
- bucket_index = signature & (f->n_buckets - 1);
- bucket = (struct rte_bucket_4_16 *)
- &f->memory[bucket_index * f->bucket_size];
- signature |= RTE_BUCKET_ENTRY_VALID;
-
- /* Key is present in the bucket */
- for (i = 0; i < 4; i++) {
- uint64_t bucket_signature = bucket->signature[i];
- uint8_t *bucket_key = (uint8_t *) bucket->key[i];
-
- if ((bucket_signature == signature) &&
- (memcmp(key, bucket_key, f->key_size) == 0)) {
- uint8_t *bucket_data = &bucket->data[i * f->entry_size];
-
- memcpy(bucket_data, entry, f->entry_size);
- lru_update(bucket, i);
- *key_found = 1;
- *entry_ptr = (void *) bucket_data;
- return 0;
- }
- }
-
- /* Key is not present in the bucket */
- for (i = 0; i < 4; i++) {
- uint64_t bucket_signature = bucket->signature[i];
- uint8_t *bucket_key = (uint8_t *) bucket->key[i];
-
- if (bucket_signature == 0) {
- uint8_t *bucket_data = &bucket->data[i * f->entry_size];
-
- bucket->signature[i] = signature;
- memcpy(bucket_key, key, f->key_size);
- memcpy(bucket_data, entry, f->entry_size);
- lru_update(bucket, i);
- *key_found = 0;
- *entry_ptr = (void *) bucket_data;
-
- return 0;
- }
- }
-
- /* Bucket full: replace LRU entry */
- pos = lru_pos(bucket);
- bucket->signature[pos] = signature;
- memcpy(bucket->key[pos], key, f->key_size);
- memcpy(&bucket->data[pos * f->entry_size], entry, f->entry_size);
- lru_update(bucket, pos);
- *key_found = 0;
- *entry_ptr = (void *) &bucket->data[pos * f->entry_size];
-
- return 0;
-}
-
-static int
-rte_table_hash_entry_delete_key16_lru(
- void *table,
- void *key,
- int *key_found,
- void *entry)
-{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
- struct rte_bucket_4_16 *bucket;
- uint64_t signature;
- uint32_t bucket_index, i;
-
- signature = f->f_hash(key, f->key_size, f->seed);
- bucket_index = signature & (f->n_buckets - 1);
- bucket = (struct rte_bucket_4_16 *)
- &f->memory[bucket_index * f->bucket_size];
- signature |= RTE_BUCKET_ENTRY_VALID;
-
- /* Key is present in the bucket */
- for (i = 0; i < 4; i++) {
- uint64_t bucket_signature = bucket->signature[i];
- uint8_t *bucket_key = (uint8_t *) bucket->key[i];
-
- if ((bucket_signature == signature) &&
- (memcmp(key, bucket_key, f->key_size) == 0)) {
- uint8_t *bucket_data = &bucket->data[i * f->entry_size];
-
- bucket->signature[i] = 0;
- *key_found = 1;
- if (entry)
- memcpy(entry, bucket_data, f->entry_size);
- return 0;
- }
- }
-
- /* Key is not present in the bucket */
- *key_found = 0;
- return 0;
-}
-
-static int
-check_params_create_ext(struct rte_table_hash_key16_ext_params *params) {
- /* n_entries */
- if (params->n_entries == 0) {
- RTE_LOG(ERR, TABLE, "%s: n_entries is zero\n", __func__);
- return -EINVAL;
- }
-
- /* n_entries_ext */
- if (params->n_entries_ext == 0) {
- RTE_LOG(ERR, TABLE, "%s: n_entries_ext is zero\n", __func__);
- return -EINVAL;
- }
-
- /* signature offset */
- if ((params->signature_offset & 0x3) != 0) {
- RTE_LOG(ERR, TABLE, "%s: invalid signature offset\n", __func__);
- return -EINVAL;
- }
-
- /* key offset */
- if ((params->key_offset & 0x7) != 0) {
- RTE_LOG(ERR, TABLE, "%s: invalid key offset\n", __func__);
- return -EINVAL;
- }
-
- /* f_hash */
- if (params->f_hash == NULL) {
- RTE_LOG(ERR, TABLE,
- "%s: f_hash function pointer is NULL\n", __func__);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void *
-rte_table_hash_create_key16_ext(void *params,
- int socket_id,
- uint32_t entry_size)
-{
- struct rte_table_hash_key16_ext_params *p =
- (struct rte_table_hash_key16_ext_params *) params;
- struct rte_table_hash *f;
- uint32_t n_buckets, n_buckets_ext, n_entries_per_bucket, key_size,
- bucket_size_cl, stack_size_cl, total_size, i;
-
- /* Check input parameters */
- if ((check_params_create_ext(p) != 0) ||
- ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
- ((sizeof(struct rte_bucket_4_16) % RTE_CACHE_LINE_SIZE) != 0))
- return NULL;
-
- n_entries_per_bucket = 4;
- key_size = 16;
-
- /* Memory allocation */
- n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
- n_entries_per_bucket);
- n_buckets_ext = (p->n_entries_ext + n_entries_per_bucket - 1) /
- n_entries_per_bucket;
- bucket_size_cl = (sizeof(struct rte_bucket_4_16) + n_entries_per_bucket
- * entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
- stack_size_cl = (n_buckets_ext * sizeof(uint32_t) + RTE_CACHE_LINE_SIZE - 1)
- / RTE_CACHE_LINE_SIZE;
- total_size = sizeof(struct rte_table_hash) +
- ((n_buckets + n_buckets_ext) * bucket_size_cl + stack_size_cl) *
- RTE_CACHE_LINE_SIZE;
-
- f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
- if (f == NULL) {
- RTE_LOG(ERR, TABLE,
- "%s: Cannot allocate %u bytes for hash table\n",
- __func__, total_size);
- return NULL;
- }
- RTE_LOG(INFO, TABLE,
- "%s: Hash table memory footprint is %u bytes\n",
- __func__, total_size);
-
- /* Memory initialization */
- f->n_buckets = n_buckets;
- f->n_entries_per_bucket = n_entries_per_bucket;
- f->key_size = key_size;
- f->entry_size = entry_size;
- f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
- f->signature_offset = p->signature_offset;
- f->key_offset = p->key_offset;
- f->f_hash = p->f_hash;
- f->seed = p->seed;
-
- f->n_buckets_ext = n_buckets_ext;
- f->stack_pos = n_buckets_ext;
- f->stack = (uint32_t *)
- &f->memory[(n_buckets + n_buckets_ext) * f->bucket_size];
-
- for (i = 0; i < n_buckets_ext; i++)
- f->stack[i] = i;
-
- return f;
-}
-
-static int
-rte_table_hash_free_key16_ext(void *table)
-{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
-
- /* Check input parameters */
- if (f == NULL) {
- RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
- return -EINVAL;
- }
-
- rte_free(f);
- return 0;
-}
-
-static int
-rte_table_hash_entry_add_key16_ext(
- void *table,
- void *key,
- void *entry,
- int *key_found,
- void **entry_ptr)
-{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
- struct rte_bucket_4_16 *bucket0, *bucket, *bucket_prev;
- uint64_t signature;
- uint32_t bucket_index, i;
-
- signature = f->f_hash(key, f->key_size, f->seed);
- bucket_index = signature & (f->n_buckets - 1);
- bucket0 = (struct rte_bucket_4_16 *)
- &f->memory[bucket_index * f->bucket_size];
- signature |= RTE_BUCKET_ENTRY_VALID;
-
- /* Key is present in the bucket */
- for (bucket = bucket0; bucket != NULL; bucket = bucket->next)
- for (i = 0; i < 4; i++) {
- uint64_t bucket_signature = bucket->signature[i];
- uint8_t *bucket_key = (uint8_t *) bucket->key[i];
-
- if ((bucket_signature == signature) &&
- (memcmp(key, bucket_key, f->key_size) == 0)) {
- uint8_t *bucket_data = &bucket->data[i *
- f->entry_size];
-
- memcpy(bucket_data, entry, f->entry_size);
- *key_found = 1;
- *entry_ptr = (void *) bucket_data;
- return 0;
- }
- }
-
- /* Key is not present in the bucket */
- for (bucket_prev = NULL, bucket = bucket0; bucket != NULL;
- bucket_prev = bucket, bucket = bucket->next)
- for (i = 0; i < 4; i++) {
- uint64_t bucket_signature = bucket->signature[i];
- uint8_t *bucket_key = (uint8_t *) bucket->key[i];
-
- if (bucket_signature == 0) {
- uint8_t *bucket_data = &bucket->data[i *
- f->entry_size];
-
- bucket->signature[i] = signature;
- memcpy(bucket_key, key, f->key_size);
- memcpy(bucket_data, entry, f->entry_size);
- *key_found = 0;
- *entry_ptr = (void *) bucket_data;
-
- return 0;
- }
- }
-
- /* Bucket full: extend bucket */
- if (f->stack_pos > 0) {
- bucket_index = f->stack[--f->stack_pos];
-
- bucket = (struct rte_bucket_4_16 *) &f->memory[(f->n_buckets +
- bucket_index) * f->bucket_size];
- bucket_prev->next = bucket;
- bucket_prev->next_valid = 1;
-
- bucket->signature[0] = signature;
- memcpy(bucket->key[0], key, f->key_size);
- memcpy(&bucket->data[0], entry, f->entry_size);
- *key_found = 0;
- *entry_ptr = (void *) &bucket->data[0];
- return 0;
- }
-
- return -ENOSPC;
-}
-
-static int
-rte_table_hash_entry_delete_key16_ext(
- void *table,
- void *key,
- int *key_found,
- void *entry)
-{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
- struct rte_bucket_4_16 *bucket0, *bucket, *bucket_prev;
- uint64_t signature;
- uint32_t bucket_index, i;
-
- signature = f->f_hash(key, f->key_size, f->seed);
- bucket_index = signature & (f->n_buckets - 1);
- bucket0 = (struct rte_bucket_4_16 *)
- &f->memory[bucket_index * f->bucket_size];
- signature |= RTE_BUCKET_ENTRY_VALID;
-
- /* Key is present in the bucket */
- for (bucket_prev = NULL, bucket = bucket0; bucket != NULL;
- bucket_prev = bucket, bucket = bucket->next)
- for (i = 0; i < 4; i++) {
- uint64_t bucket_signature = bucket->signature[i];
- uint8_t *bucket_key = (uint8_t *) bucket->key[i];
-
- if ((bucket_signature == signature) &&
- (memcmp(key, bucket_key, f->key_size) == 0)) {
- uint8_t *bucket_data = &bucket->data[i *
- f->entry_size];
-
- bucket->signature[i] = 0;
- *key_found = 1;
- if (entry)
- memcpy(entry, bucket_data,
- f->entry_size);
-
- if ((bucket->signature[0] == 0) &&
- (bucket->signature[1] == 0) &&
- (bucket->signature[2] == 0) &&
- (bucket->signature[3] == 0) &&
- (bucket_prev != NULL)) {
- bucket_prev->next = bucket->next;
- bucket_prev->next_valid =
- bucket->next_valid;
-
- memset(bucket, 0,
- sizeof(struct rte_bucket_4_16));
- bucket_index = (bucket -
- ((struct rte_bucket_4_16 *)
- f->memory)) - f->n_buckets;
- f->stack[f->stack_pos++] = bucket_index;
- }
-
- return 0;
- }
- }
-
- /* Key is not present in the bucket */
- *key_found = 0;
- return 0;
-}
-
-#define lookup_key16_cmp(key_in, bucket, pos) \
-{ \
- uint64_t xor[4][2], or[4], signature[4]; \
- \
- signature[0] = (~bucket->signature[0]) & 1; \
- signature[1] = (~bucket->signature[1]) & 1; \
- signature[2] = (~bucket->signature[2]) & 1; \
- signature[3] = (~bucket->signature[3]) & 1; \
- \
- xor[0][0] = key_in[0] ^ bucket->key[0][0]; \
- xor[0][1] = key_in[1] ^ bucket->key[0][1]; \
- \
- xor[1][0] = key_in[0] ^ bucket->key[1][0]; \
- xor[1][1] = key_in[1] ^ bucket->key[1][1]; \
- \
- xor[2][0] = key_in[0] ^ bucket->key[2][0]; \
- xor[2][1] = key_in[1] ^ bucket->key[2][1]; \
- \
- xor[3][0] = key_in[0] ^ bucket->key[3][0]; \
- xor[3][1] = key_in[1] ^ bucket->key[3][1]; \
- \
- or[0] = xor[0][0] | xor[0][1] | signature[0]; \
- or[1] = xor[1][0] | xor[1][1] | signature[1]; \
- or[2] = xor[2][0] | xor[2][1] | signature[2]; \
- or[3] = xor[3][0] | xor[3][1] | signature[3]; \
- \
- pos = 4; \
- if (or[0] == 0) \
- pos = 0; \
- if (or[1] == 0) \
- pos = 1; \
- if (or[2] == 0) \
- pos = 2; \
- if (or[3] == 0) \
- pos = 3; \
-}
-
-#define lookup1_stage0(pkt0_index, mbuf0, pkts, pkts_mask) \
-{ \
- uint64_t pkt_mask; \
- \
- pkt0_index = __builtin_ctzll(pkts_mask); \
- pkt_mask = 1LLU << pkt0_index; \
- pkts_mask &= ~pkt_mask; \
- \
- mbuf0 = pkts[pkt0_index]; \
- rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf0, 0)); \
-}
-
-#define lookup1_stage1(mbuf1, bucket1, f) \
-{ \
- uint64_t signature; \
- uint32_t bucket_index; \
- \
- signature = RTE_MBUF_METADATA_UINT32(mbuf1, f->signature_offset);\
- bucket_index = signature & (f->n_buckets - 1); \
- bucket1 = (struct rte_bucket_4_16 *) \
- &f->memory[bucket_index * f->bucket_size]; \
- rte_prefetch0(bucket1); \
- rte_prefetch0((void *)(((uintptr_t) bucket1) + RTE_CACHE_LINE_SIZE));\
-}
-
-#define lookup1_stage2_lru(pkt2_index, mbuf2, bucket2, \
- pkts_mask_out, entries, f) \
-{ \
- void *a; \
- uint64_t pkt_mask; \
- uint64_t *key; \
- uint32_t pos; \
- \
- key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
- \
- lookup_key16_cmp(key, bucket2, pos); \
- \
- pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
- pkts_mask_out |= pkt_mask; \
- \
- a = (void *) &bucket2->data[pos * f->entry_size]; \
- rte_prefetch0(a); \
- entries[pkt2_index] = a; \
- lru_update(bucket2, pos); \
-}
-
-#define lookup1_stage2_ext(pkt2_index, mbuf2, bucket2, pkts_mask_out, entries, \
- buckets_mask, buckets, keys, f) \
-{ \
- struct rte_bucket_4_16 *bucket_next; \
- void *a; \
- uint64_t pkt_mask, bucket_mask; \
- uint64_t *key; \
- uint32_t pos; \
- \
- key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
- \
- lookup_key16_cmp(key, bucket2, pos); \
- \
- pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
- pkts_mask_out |= pkt_mask; \
- \
- a = (void *) &bucket2->data[pos * f->entry_size]; \
- rte_prefetch0(a); \
- entries[pkt2_index] = a; \
- \
- bucket_mask = (~pkt_mask) & (bucket2->next_valid << pkt2_index);\
- buckets_mask |= bucket_mask; \
- bucket_next = bucket2->next; \
- buckets[pkt2_index] = bucket_next; \
- keys[pkt2_index] = key; \
-}
-
-#define lookup_grinder(pkt_index, buckets, keys, pkts_mask_out, entries,\
- buckets_mask, f) \
-{ \
- struct rte_bucket_4_16 *bucket, *bucket_next; \
- void *a; \
- uint64_t pkt_mask, bucket_mask; \
- uint64_t *key; \
- uint32_t pos; \
- \
- bucket = buckets[pkt_index]; \
- key = keys[pkt_index]; \
- \
- lookup_key16_cmp(key, bucket, pos); \
- \
- pkt_mask = (bucket->signature[pos] & 1LLU) << pkt_index;\
- pkts_mask_out |= pkt_mask; \
- \
- a = (void *) &bucket->data[pos * f->entry_size]; \
- rte_prefetch0(a); \
- entries[pkt_index] = a; \
- \
- bucket_mask = (~pkt_mask) & (bucket->next_valid << pkt_index);\
- buckets_mask |= bucket_mask; \
- bucket_next = bucket->next; \
- rte_prefetch0(bucket_next); \
- rte_prefetch0((void *)(((uintptr_t) bucket_next) + RTE_CACHE_LINE_SIZE));\
- buckets[pkt_index] = bucket_next; \
- keys[pkt_index] = key; \
-}
-
-#define lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01,\
- pkts, pkts_mask) \
-{ \
- uint64_t pkt00_mask, pkt01_mask; \
- \
- pkt00_index = __builtin_ctzll(pkts_mask); \
- pkt00_mask = 1LLU << pkt00_index; \
- pkts_mask &= ~pkt00_mask; \
- \
- mbuf00 = pkts[pkt00_index]; \
- rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, 0)); \
- \
- pkt01_index = __builtin_ctzll(pkts_mask); \
- pkt01_mask = 1LLU << pkt01_index; \
- pkts_mask &= ~pkt01_mask; \
- \
- mbuf01 = pkts[pkt01_index]; \
- rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, 0)); \
-}
-
-#define lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,\
- mbuf00, mbuf01, pkts, pkts_mask) \
-{ \
- uint64_t pkt00_mask, pkt01_mask; \
- \
- pkt00_index = __builtin_ctzll(pkts_mask); \
- pkt00_mask = 1LLU << pkt00_index; \
- pkts_mask &= ~pkt00_mask; \
- \
- mbuf00 = pkts[pkt00_index]; \
- rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, 0)); \
- \
- pkt01_index = __builtin_ctzll(pkts_mask); \
- if (pkts_mask == 0) \
- pkt01_index = pkt00_index; \
- pkt01_mask = 1LLU << pkt01_index; \
- pkts_mask &= ~pkt01_mask; \
- \
- mbuf01 = pkts[pkt01_index]; \
- rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, 0)); \
-}
-
-#define lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f) \
-{ \
- uint64_t signature10, signature11; \
- uint32_t bucket10_index, bucket11_index; \
- \
- signature10 = RTE_MBUF_METADATA_UINT32(mbuf10, f->signature_offset);\
- bucket10_index = signature10 & (f->n_buckets - 1); \
- bucket10 = (struct rte_bucket_4_16 *) \
- &f->memory[bucket10_index * f->bucket_size]; \
- rte_prefetch0(bucket10); \
- rte_prefetch0((void *)(((uintptr_t) bucket10) + RTE_CACHE_LINE_SIZE));\
- \
- signature11 = RTE_MBUF_METADATA_UINT32(mbuf11, f->signature_offset);\
- bucket11_index = signature11 & (f->n_buckets - 1); \
- bucket11 = (struct rte_bucket_4_16 *) \
- &f->memory[bucket11_index * f->bucket_size]; \
- rte_prefetch0(bucket11); \
- rte_prefetch0((void *)(((uintptr_t) bucket11) + RTE_CACHE_LINE_SIZE));\
-}
-
-#define lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,\
- bucket20, bucket21, pkts_mask_out, entries, f) \
-{ \
- void *a20, *a21; \
- uint64_t pkt20_mask, pkt21_mask; \
- uint64_t *key20, *key21; \
- uint32_t pos20, pos21; \
- \
- key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
- key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
- \
- lookup_key16_cmp(key20, bucket20, pos20); \
- lookup_key16_cmp(key21, bucket21, pos21); \
- \
- pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
- pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
- pkts_mask_out |= pkt20_mask | pkt21_mask; \
- \
- a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
- a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
- rte_prefetch0(a20); \
- rte_prefetch0(a21); \
- entries[pkt20_index] = a20; \
- entries[pkt21_index] = a21; \
- lru_update(bucket20, pos20); \
- lru_update(bucket21, pos21); \
-}
-
-#define lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21, bucket20, \
- bucket21, pkts_mask_out, entries, buckets_mask, buckets, keys, f) \
-{ \
- struct rte_bucket_4_16 *bucket20_next, *bucket21_next; \
- void *a20, *a21; \
- uint64_t pkt20_mask, pkt21_mask, bucket20_mask, bucket21_mask;\
- uint64_t *key20, *key21; \
- uint32_t pos20, pos21; \
- \
- key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
- key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
- \
- lookup_key16_cmp(key20, bucket20, pos20); \
- lookup_key16_cmp(key21, bucket21, pos21); \
- \
- pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
- pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
- pkts_mask_out |= pkt20_mask | pkt21_mask; \
- \
- a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
- a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
- rte_prefetch0(a20); \
- rte_prefetch0(a21); \
- entries[pkt20_index] = a20; \
- entries[pkt21_index] = a21; \
- \
- bucket20_mask = (~pkt20_mask) & (bucket20->next_valid << pkt20_index);\
- bucket21_mask = (~pkt21_mask) & (bucket21->next_valid << pkt21_index);\
- buckets_mask |= bucket20_mask | bucket21_mask; \
- bucket20_next = bucket20->next; \
- bucket21_next = bucket21->next; \
- buckets[pkt20_index] = bucket20_next; \
- buckets[pkt21_index] = bucket21_next; \
- keys[pkt20_index] = key20; \
- keys[pkt21_index] = key21; \
-}
-
-static int
-rte_table_hash_lookup_key16_lru(
- void *table,
- struct rte_mbuf **pkts,
- uint64_t pkts_mask,
- uint64_t *lookup_hit_mask,
- void **entries)
-{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
- struct rte_bucket_4_16 *bucket10, *bucket11, *bucket20, *bucket21;
- struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
- uint32_t pkt00_index, pkt01_index, pkt10_index;
- uint32_t pkt11_index, pkt20_index, pkt21_index;
- uint64_t pkts_mask_out = 0;
-
- /* Cannot run the pipeline with less than 5 packets */
- if (__builtin_popcountll(pkts_mask) < 5) {
- for ( ; pkts_mask; ) {
- struct rte_bucket_4_16 *bucket;
- struct rte_mbuf *mbuf;
- uint32_t pkt_index;
-
- lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask);
- lookup1_stage1(mbuf, bucket, f);
- lookup1_stage2_lru(pkt_index, mbuf, bucket,
- pkts_mask_out, entries, f);
- }
-
- *lookup_hit_mask = pkts_mask_out;
- return 0;
- }
-
- /*
- * Pipeline fill
- *
- */
- /* Pipeline stage 0 */
- lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
- pkts_mask);
-
- /* Pipeline feed */
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
- pkts_mask);
-
- /* Pipeline stage 1 */
- lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /*
- * Pipeline run
- *
- */
- for ( ; pkts_mask; ) {
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
- mbuf00, mbuf01, pkts, pkts_mask);
-
- /* Pipeline stage 1 */
- lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /* Pipeline stage 2 */
- lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries, f);
- }
-
- /*
- * Pipeline flush
- *
- */
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 1 */
- lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /* Pipeline stage 2 */
- lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries, f);
-
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
-
- /* Pipeline stage 2 */
- lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries, f);
-
- *lookup_hit_mask = pkts_mask_out;
- return 0;
-} /* rte_table_hash_lookup_key16_lru() */
-
-static int
-rte_table_hash_lookup_key16_ext(
- void *table,
- struct rte_mbuf **pkts,
- uint64_t pkts_mask,
- uint64_t *lookup_hit_mask,
- void **entries)
-{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
- struct rte_bucket_4_16 *bucket10, *bucket11, *bucket20, *bucket21;
- struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
- uint32_t pkt00_index, pkt01_index, pkt10_index;
- uint32_t pkt11_index, pkt20_index, pkt21_index;
- uint64_t pkts_mask_out = 0, buckets_mask = 0;
- struct rte_bucket_4_16 *buckets[RTE_PORT_IN_BURST_SIZE_MAX];
- uint64_t *keys[RTE_PORT_IN_BURST_SIZE_MAX];
-
- /* Cannot run the pipeline with less than 5 packets */
- if (__builtin_popcountll(pkts_mask) < 5) {
- for ( ; pkts_mask; ) {
- struct rte_bucket_4_16 *bucket;
- struct rte_mbuf *mbuf;
- uint32_t pkt_index;
-
- lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask);
- lookup1_stage1(mbuf, bucket, f);
- lookup1_stage2_ext(pkt_index, mbuf, bucket,
- pkts_mask_out, entries, buckets_mask,
- buckets, keys, f);
- }
-
- goto grind_next_buckets;
- }
-
- /*
- * Pipeline fill
- *
- */
- /* Pipeline stage 0 */
- lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
- pkts_mask);
-
- /* Pipeline feed */
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
- pkts_mask);
-
- /* Pipeline stage 1 */
- lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /*
- * Pipeline run
- *
- */
- for ( ; pkts_mask; ) {
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
- mbuf00, mbuf01, pkts, pkts_mask);
-
- /* Pipeline stage 1 */
- lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /* Pipeline stage 2 */
- lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries,
- buckets_mask, buckets, keys, f);
- }
-
- /*
- * Pipeline flush
- *
- */
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 1 */
- lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /* Pipeline stage 2 */
- lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries,
- buckets_mask, buckets, keys, f);
-
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
-
- /* Pipeline stage 2 */
- lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries,
- buckets_mask, buckets, keys, f);
-
-grind_next_buckets:
- /* Grind next buckets */
- for ( ; buckets_mask; ) {
- uint64_t buckets_mask_next = 0;
-
- for ( ; buckets_mask; ) {
- uint64_t pkt_mask;
- uint32_t pkt_index;
-
- pkt_index = __builtin_ctzll(buckets_mask);
- pkt_mask = 1LLU << pkt_index;
- buckets_mask &= ~pkt_mask;
-
- lookup_grinder(pkt_index, buckets, keys, pkts_mask_out,
- entries, buckets_mask_next, f);
- }
-
- buckets_mask = buckets_mask_next;
- }
-
- *lookup_hit_mask = pkts_mask_out;
- return 0;
-} /* rte_table_hash_lookup_key16_ext() */
-
-struct rte_table_ops rte_table_hash_key16_lru_ops = {
- .f_create = rte_table_hash_create_key16_lru,
- .f_free = rte_table_hash_free_key16_lru,
- .f_add = rte_table_hash_entry_add_key16_lru,
- .f_delete = rte_table_hash_entry_delete_key16_lru,
- .f_lookup = rte_table_hash_lookup_key16_lru,
-};
-
-struct rte_table_ops rte_table_hash_key16_ext_ops = {
- .f_create = rte_table_hash_create_key16_ext,
- .f_free = rte_table_hash_free_key16_ext,
- .f_add = rte_table_hash_entry_add_key16_ext,
- .f_delete = rte_table_hash_entry_delete_key16_ext,
- .f_lookup = rte_table_hash_lookup_key16_ext,
-};
diff --git a/src/dpdk_lib18/librte_table/rte_table_hash_key32.c b/src/dpdk_lib18/librte_table/rte_table_hash_key32.c
deleted file mode 100755
index da0ce6af..00000000
--- a/src/dpdk_lib18/librte_table/rte_table_hash_key32.c
+++ /dev/null
@@ -1,1121 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#include <string.h>
-#include <stdio.h>
-
-#include <rte_common.h>
-#include <rte_mbuf.h>
-#include <rte_memory.h>
-#include <rte_malloc.h>
-#include <rte_log.h>
-
-#include "rte_table_hash.h"
-#include "rte_lru.h"
-
-#define RTE_TABLE_HASH_KEY_SIZE 32
-
-#define RTE_BUCKET_ENTRY_VALID 0x1LLU
-
-struct rte_bucket_4_32 {
- /* Cache line 0 */
- uint64_t signature[4 + 1];
- uint64_t lru_list;
- struct rte_bucket_4_32 *next;
- uint64_t next_valid;
-
- /* Cache lines 1 and 2 */
- uint64_t key[4][4];
-
- /* Cache line 3 */
- uint8_t data[0];
-};
-
-struct rte_table_hash {
- /* Input parameters */
- uint32_t n_buckets;
- uint32_t n_entries_per_bucket;
- uint32_t key_size;
- uint32_t entry_size;
- uint32_t bucket_size;
- uint32_t signature_offset;
- uint32_t key_offset;
- rte_table_hash_op_hash f_hash;
- uint64_t seed;
-
- /* Extendible buckets */
- uint32_t n_buckets_ext;
- uint32_t stack_pos;
- uint32_t *stack;
-
- /* Lookup table */
- uint8_t memory[0] __rte_cache_aligned;
-};
-
-static int
-check_params_create_lru(struct rte_table_hash_key32_lru_params *params) {
- /* n_entries */
- if (params->n_entries == 0) {
- RTE_LOG(ERR, TABLE, "%s: n_entries is zero\n", __func__);
- return -EINVAL;
- }
-
- /* signature offset */
- if ((params->signature_offset & 0x3) != 0) {
- RTE_LOG(ERR, TABLE, "%s: invalid signature offset\n", __func__);
- return -EINVAL;
- }
-
- /* key offset */
- if ((params->key_offset & 0x7) != 0) {
- RTE_LOG(ERR, TABLE, "%s: invalid key offset\n", __func__);
- return -EINVAL;
- }
-
- /* f_hash */
- if (params->f_hash == NULL) {
- RTE_LOG(ERR, TABLE, "%s: f_hash function pointer is NULL\n",
- __func__);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void *
-rte_table_hash_create_key32_lru(void *params,
- int socket_id,
- uint32_t entry_size)
-{
- struct rte_table_hash_key32_lru_params *p =
- (struct rte_table_hash_key32_lru_params *) params;
- struct rte_table_hash *f;
- uint32_t n_buckets, n_entries_per_bucket, key_size, bucket_size_cl;
- uint32_t total_size, i;
-
- /* Check input parameters */
- if ((check_params_create_lru(p) != 0) ||
- ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
- ((sizeof(struct rte_bucket_4_32) % RTE_CACHE_LINE_SIZE) != 0)) {
- return NULL;
- }
- n_entries_per_bucket = 4;
- key_size = 32;
-
- /* Memory allocation */
- n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
- n_entries_per_bucket);
- bucket_size_cl = (sizeof(struct rte_bucket_4_32) + n_entries_per_bucket
- * entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
- total_size = sizeof(struct rte_table_hash) + n_buckets *
- bucket_size_cl * RTE_CACHE_LINE_SIZE;
-
- f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
- if (f == NULL) {
- RTE_LOG(ERR, TABLE,
- "%s: Cannot allocate %u bytes for hash table\n",
- __func__, total_size);
- return NULL;
- }
- RTE_LOG(INFO, TABLE,
- "%s: Hash table memory footprint is %u bytes\n", __func__,
- total_size);
-
- /* Memory initialization */
- f->n_buckets = n_buckets;
- f->n_entries_per_bucket = n_entries_per_bucket;
- f->key_size = key_size;
- f->entry_size = entry_size;
- f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
- f->signature_offset = p->signature_offset;
- f->key_offset = p->key_offset;
- f->f_hash = p->f_hash;
- f->seed = p->seed;
-
- for (i = 0; i < n_buckets; i++) {
- struct rte_bucket_4_32 *bucket;
-
- bucket = (struct rte_bucket_4_32 *) &f->memory[i *
- f->bucket_size];
- bucket->lru_list = 0x0000000100020003LLU;
- }
-
- return f;
-}
-
-static int
-rte_table_hash_free_key32_lru(void *table)
-{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
-
- /* Check input parameters */
- if (f == NULL) {
- RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
- return -EINVAL;
- }
-
- rte_free(f);
- return 0;
-}
-
-static int
-rte_table_hash_entry_add_key32_lru(
- void *table,
- void *key,
- void *entry,
- int *key_found,
- void **entry_ptr)
-{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
- struct rte_bucket_4_32 *bucket;
- uint64_t signature, pos;
- uint32_t bucket_index, i;
-
- signature = f->f_hash(key, f->key_size, f->seed);
- bucket_index = signature & (f->n_buckets - 1);
- bucket = (struct rte_bucket_4_32 *)
- &f->memory[bucket_index * f->bucket_size];
- signature |= RTE_BUCKET_ENTRY_VALID;
-
- /* Key is present in the bucket */
- for (i = 0; i < 4; i++) {
- uint64_t bucket_signature = bucket->signature[i];
- uint8_t *bucket_key = (uint8_t *) bucket->key[i];
-
- if ((bucket_signature == signature) &&
- (memcmp(key, bucket_key, f->key_size) == 0)) {
- uint8_t *bucket_data = &bucket->data[i * f->entry_size];
-
- memcpy(bucket_data, entry, f->entry_size);
- lru_update(bucket, i);
- *key_found = 1;
- *entry_ptr = (void *) bucket_data;
- return 0;
- }
- }
-
- /* Key is not present in the bucket */
- for (i = 0; i < 4; i++) {
- uint64_t bucket_signature = bucket->signature[i];
- uint8_t *bucket_key = (uint8_t *) bucket->key[i];
-
- if (bucket_signature == 0) {
- uint8_t *bucket_data = &bucket->data[i * f->entry_size];
-
- bucket->signature[i] = signature;
- memcpy(bucket_key, key, f->key_size);
- memcpy(bucket_data, entry, f->entry_size);
- lru_update(bucket, i);
- *key_found = 0;
- *entry_ptr = (void *) bucket_data;
-
- return 0;
- }
- }
-
- /* Bucket full: replace LRU entry */
- pos = lru_pos(bucket);
- bucket->signature[pos] = signature;
- memcpy(bucket->key[pos], key, f->key_size);
- memcpy(&bucket->data[pos * f->entry_size], entry, f->entry_size);
- lru_update(bucket, pos);
- *key_found = 0;
- *entry_ptr = (void *) &bucket->data[pos * f->entry_size];
-
- return 0;
-}
-
-static int
-rte_table_hash_entry_delete_key32_lru(
- void *table,
- void *key,
- int *key_found,
- void *entry)
-{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
- struct rte_bucket_4_32 *bucket;
- uint64_t signature;
- uint32_t bucket_index, i;
-
- signature = f->f_hash(key, f->key_size, f->seed);
- bucket_index = signature & (f->n_buckets - 1);
- bucket = (struct rte_bucket_4_32 *)
- &f->memory[bucket_index * f->bucket_size];
- signature |= RTE_BUCKET_ENTRY_VALID;
-
- /* Key is present in the bucket */
- for (i = 0; i < 4; i++) {
- uint64_t bucket_signature = bucket->signature[i];
- uint8_t *bucket_key = (uint8_t *) bucket->key[i];
-
- if ((bucket_signature == signature) &&
- (memcmp(key, bucket_key, f->key_size) == 0)) {
- uint8_t *bucket_data = &bucket->data[i * f->entry_size];
-
- bucket->signature[i] = 0;
- *key_found = 1;
- if (entry)
- memcpy(entry, bucket_data, f->entry_size);
-
- return 0;
- }
- }
-
- /* Key is not present in the bucket */
- *key_found = 0;
- return 0;
-}
-
-static int
-check_params_create_ext(struct rte_table_hash_key32_ext_params *params) {
- /* n_entries */
- if (params->n_entries == 0) {
- RTE_LOG(ERR, TABLE, "%s: n_entries is zero\n", __func__);
- return -EINVAL;
- }
-
- /* n_entries_ext */
- if (params->n_entries_ext == 0) {
- RTE_LOG(ERR, TABLE, "%s: n_entries_ext is zero\n", __func__);
- return -EINVAL;
- }
-
- /* signature offset */
- if ((params->signature_offset & 0x3) != 0) {
- RTE_LOG(ERR, TABLE, "%s: invalid signature offset\n", __func__);
- return -EINVAL;
- }
-
- /* key offset */
- if ((params->key_offset & 0x7) != 0) {
- RTE_LOG(ERR, TABLE, "%s: invalid key offset\n", __func__);
- return -EINVAL;
- }
-
- /* f_hash */
- if (params->f_hash == NULL) {
- RTE_LOG(ERR, TABLE, "%s: f_hash function pointer is NULL\n",
- __func__);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void *
-rte_table_hash_create_key32_ext(void *params,
- int socket_id,
- uint32_t entry_size)
-{
- struct rte_table_hash_key32_ext_params *p =
- (struct rte_table_hash_key32_ext_params *) params;
- struct rte_table_hash *f;
- uint32_t n_buckets, n_buckets_ext, n_entries_per_bucket;
- uint32_t key_size, bucket_size_cl, stack_size_cl, total_size, i;
-
- /* Check input parameters */
- if ((check_params_create_ext(p) != 0) ||
- ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
- ((sizeof(struct rte_bucket_4_32) % RTE_CACHE_LINE_SIZE) != 0))
- return NULL;
-
- n_entries_per_bucket = 4;
- key_size = 32;
-
- /* Memory allocation */
- n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
- n_entries_per_bucket);
- n_buckets_ext = (p->n_entries_ext + n_entries_per_bucket - 1) /
- n_entries_per_bucket;
- bucket_size_cl = (sizeof(struct rte_bucket_4_32) + n_entries_per_bucket
- * entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
- stack_size_cl = (n_buckets_ext * sizeof(uint32_t) + RTE_CACHE_LINE_SIZE - 1)
- / RTE_CACHE_LINE_SIZE;
- total_size = sizeof(struct rte_table_hash) +
- ((n_buckets + n_buckets_ext) * bucket_size_cl + stack_size_cl) *
- RTE_CACHE_LINE_SIZE;
-
- f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
- if (f == NULL) {
- RTE_LOG(ERR, TABLE,
- "%s: Cannot allocate %u bytes for hash table\n",
- __func__, total_size);
- return NULL;
- }
- RTE_LOG(INFO, TABLE,
- "%s: Hash table memory footprint is %u bytes\n", __func__,
- total_size);
-
- /* Memory initialization */
- f->n_buckets = n_buckets;
- f->n_entries_per_bucket = n_entries_per_bucket;
- f->key_size = key_size;
- f->entry_size = entry_size;
- f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
- f->signature_offset = p->signature_offset;
- f->key_offset = p->key_offset;
- f->f_hash = p->f_hash;
- f->seed = p->seed;
-
- f->n_buckets_ext = n_buckets_ext;
- f->stack_pos = n_buckets_ext;
- f->stack = (uint32_t *)
- &f->memory[(n_buckets + n_buckets_ext) * f->bucket_size];
-
- for (i = 0; i < n_buckets_ext; i++)
- f->stack[i] = i;
-
- return f;
-}
-
-static int
-rte_table_hash_free_key32_ext(void *table)
-{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
-
- /* Check input parameters */
- if (f == NULL) {
- RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
- return -EINVAL;
- }
-
- rte_free(f);
- return 0;
-}
-
-static int
-rte_table_hash_entry_add_key32_ext(
- void *table,
- void *key,
- void *entry,
- int *key_found,
- void **entry_ptr)
-{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
- struct rte_bucket_4_32 *bucket0, *bucket, *bucket_prev;
- uint64_t signature;
- uint32_t bucket_index, i;
-
- signature = f->f_hash(key, f->key_size, f->seed);
- bucket_index = signature & (f->n_buckets - 1);
- bucket0 = (struct rte_bucket_4_32 *)
- &f->memory[bucket_index * f->bucket_size];
- signature |= RTE_BUCKET_ENTRY_VALID;
-
- /* Key is present in the bucket */
- for (bucket = bucket0; bucket != NULL; bucket = bucket->next) {
- for (i = 0; i < 4; i++) {
- uint64_t bucket_signature = bucket->signature[i];
- uint8_t *bucket_key = (uint8_t *) bucket->key[i];
-
- if ((bucket_signature == signature) &&
- (memcmp(key, bucket_key, f->key_size) == 0)) {
- uint8_t *bucket_data = &bucket->data[i *
- f->entry_size];
-
- memcpy(bucket_data, entry, f->entry_size);
- *key_found = 1;
- *entry_ptr = (void *) bucket_data;
-
- return 0;
- }
- }
- }
-
- /* Key is not present in the bucket */
- for (bucket_prev = NULL, bucket = bucket0; bucket != NULL;
- bucket_prev = bucket, bucket = bucket->next)
- for (i = 0; i < 4; i++) {
- uint64_t bucket_signature = bucket->signature[i];
- uint8_t *bucket_key = (uint8_t *) bucket->key[i];
-
- if (bucket_signature == 0) {
- uint8_t *bucket_data = &bucket->data[i *
- f->entry_size];
-
- bucket->signature[i] = signature;
- memcpy(bucket_key, key, f->key_size);
- memcpy(bucket_data, entry, f->entry_size);
- *key_found = 0;
- *entry_ptr = (void *) bucket_data;
-
- return 0;
- }
- }
-
- /* Bucket full: extend bucket */
- if (f->stack_pos > 0) {
- bucket_index = f->stack[--f->stack_pos];
-
- bucket = (struct rte_bucket_4_32 *)
- &f->memory[(f->n_buckets + bucket_index) *
- f->bucket_size];
- bucket_prev->next = bucket;
- bucket_prev->next_valid = 1;
-
- bucket->signature[0] = signature;
- memcpy(bucket->key[0], key, f->key_size);
- memcpy(&bucket->data[0], entry, f->entry_size);
- *key_found = 0;
- *entry_ptr = (void *) &bucket->data[0];
- return 0;
- }
-
- return -ENOSPC;
-}
-
-static int
-rte_table_hash_entry_delete_key32_ext(
- void *table,
- void *key,
- int *key_found,
- void *entry)
-{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
- struct rte_bucket_4_32 *bucket0, *bucket, *bucket_prev;
- uint64_t signature;
- uint32_t bucket_index, i;
-
- signature = f->f_hash(key, f->key_size, f->seed);
- bucket_index = signature & (f->n_buckets - 1);
- bucket0 = (struct rte_bucket_4_32 *)
- &f->memory[bucket_index * f->bucket_size];
- signature |= RTE_BUCKET_ENTRY_VALID;
-
- /* Key is present in the bucket */
- for (bucket_prev = NULL, bucket = bucket0; bucket != NULL;
- bucket_prev = bucket, bucket = bucket->next)
- for (i = 0; i < 4; i++) {
- uint64_t bucket_signature = bucket->signature[i];
- uint8_t *bucket_key = (uint8_t *) bucket->key[i];
-
- if ((bucket_signature == signature) &&
- (memcmp(key, bucket_key, f->key_size) == 0)) {
- uint8_t *bucket_data = &bucket->data[i *
- f->entry_size];
-
- bucket->signature[i] = 0;
- *key_found = 1;
- if (entry)
- memcpy(entry, bucket_data,
- f->entry_size);
-
- if ((bucket->signature[0] == 0) &&
- (bucket->signature[1] == 0) &&
- (bucket->signature[2] == 0) &&
- (bucket->signature[3] == 0) &&
- (bucket_prev != NULL)) {
- bucket_prev->next = bucket->next;
- bucket_prev->next_valid =
- bucket->next_valid;
-
- memset(bucket, 0,
- sizeof(struct rte_bucket_4_32));
- bucket_index = (bucket -
- ((struct rte_bucket_4_32 *)
- f->memory)) - f->n_buckets;
- f->stack[f->stack_pos++] = bucket_index;
- }
-
- return 0;
- }
- }
-
- /* Key is not present in the bucket */
- *key_found = 0;
- return 0;
-}
-
-#define lookup_key32_cmp(key_in, bucket, pos) \
-{ \
- uint64_t xor[4][4], or[4], signature[4]; \
- \
- signature[0] = ((~bucket->signature[0]) & 1); \
- signature[1] = ((~bucket->signature[1]) & 1); \
- signature[2] = ((~bucket->signature[2]) & 1); \
- signature[3] = ((~bucket->signature[3]) & 1); \
- \
- xor[0][0] = key_in[0] ^ bucket->key[0][0]; \
- xor[0][1] = key_in[1] ^ bucket->key[0][1]; \
- xor[0][2] = key_in[2] ^ bucket->key[0][2]; \
- xor[0][3] = key_in[3] ^ bucket->key[0][3]; \
- \
- xor[1][0] = key_in[0] ^ bucket->key[1][0]; \
- xor[1][1] = key_in[1] ^ bucket->key[1][1]; \
- xor[1][2] = key_in[2] ^ bucket->key[1][2]; \
- xor[1][3] = key_in[3] ^ bucket->key[1][3]; \
- \
- xor[2][0] = key_in[0] ^ bucket->key[2][0]; \
- xor[2][1] = key_in[1] ^ bucket->key[2][1]; \
- xor[2][2] = key_in[2] ^ bucket->key[2][2]; \
- xor[2][3] = key_in[3] ^ bucket->key[2][3]; \
- \
- xor[3][0] = key_in[0] ^ bucket->key[3][0]; \
- xor[3][1] = key_in[1] ^ bucket->key[3][1]; \
- xor[3][2] = key_in[2] ^ bucket->key[3][2]; \
- xor[3][3] = key_in[3] ^ bucket->key[3][3]; \
- \
- or[0] = xor[0][0] | xor[0][1] | xor[0][2] | xor[0][3] | signature[0];\
- or[1] = xor[1][0] | xor[1][1] | xor[1][2] | xor[1][3] | signature[1];\
- or[2] = xor[2][0] | xor[2][1] | xor[2][2] | xor[2][3] | signature[2];\
- or[3] = xor[3][0] | xor[3][1] | xor[3][2] | xor[3][3] | signature[3];\
- \
- pos = 4; \
- if (or[0] == 0) \
- pos = 0; \
- if (or[1] == 0) \
- pos = 1; \
- if (or[2] == 0) \
- pos = 2; \
- if (or[3] == 0) \
- pos = 3; \
-}
-
-#define lookup1_stage0(pkt0_index, mbuf0, pkts, pkts_mask) \
-{ \
- uint64_t pkt_mask; \
- \
- pkt0_index = __builtin_ctzll(pkts_mask); \
- pkt_mask = 1LLU << pkt0_index; \
- pkts_mask &= ~pkt_mask; \
- \
- mbuf0 = pkts[pkt0_index]; \
- rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf0, 0)); \
-}
-
-#define lookup1_stage1(mbuf1, bucket1, f) \
-{ \
- uint64_t signature; \
- uint32_t bucket_index; \
- \
- signature = RTE_MBUF_METADATA_UINT32(mbuf1, f->signature_offset);\
- bucket_index = signature & (f->n_buckets - 1); \
- bucket1 = (struct rte_bucket_4_32 *) \
- &f->memory[bucket_index * f->bucket_size]; \
- rte_prefetch0(bucket1); \
- rte_prefetch0((void *)(((uintptr_t) bucket1) + RTE_CACHE_LINE_SIZE));\
- rte_prefetch0((void *)(((uintptr_t) bucket1) + 2 * RTE_CACHE_LINE_SIZE));\
-}
-
-#define lookup1_stage2_lru(pkt2_index, mbuf2, bucket2, \
- pkts_mask_out, entries, f) \
-{ \
- void *a; \
- uint64_t pkt_mask; \
- uint64_t *key; \
- uint32_t pos; \
- \
- key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
- \
- lookup_key32_cmp(key, bucket2, pos); \
- \
- pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
- pkts_mask_out |= pkt_mask; \
- \
- a = (void *) &bucket2->data[pos * f->entry_size]; \
- rte_prefetch0(a); \
- entries[pkt2_index] = a; \
- lru_update(bucket2, pos); \
-}
-
-#define lookup1_stage2_ext(pkt2_index, mbuf2, bucket2, pkts_mask_out,\
- entries, buckets_mask, buckets, keys, f) \
-{ \
- struct rte_bucket_4_32 *bucket_next; \
- void *a; \
- uint64_t pkt_mask, bucket_mask; \
- uint64_t *key; \
- uint32_t pos; \
- \
- key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
- \
- lookup_key32_cmp(key, bucket2, pos); \
- \
- pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
- pkts_mask_out |= pkt_mask; \
- \
- a = (void *) &bucket2->data[pos * f->entry_size]; \
- rte_prefetch0(a); \
- entries[pkt2_index] = a; \
- \
- bucket_mask = (~pkt_mask) & (bucket2->next_valid << pkt2_index);\
- buckets_mask |= bucket_mask; \
- bucket_next = bucket2->next; \
- buckets[pkt2_index] = bucket_next; \
- keys[pkt2_index] = key; \
-}
-
-#define lookup_grinder(pkt_index, buckets, keys, pkts_mask_out, \
- entries, buckets_mask, f) \
-{ \
- struct rte_bucket_4_32 *bucket, *bucket_next; \
- void *a; \
- uint64_t pkt_mask, bucket_mask; \
- uint64_t *key; \
- uint32_t pos; \
- \
- bucket = buckets[pkt_index]; \
- key = keys[pkt_index]; \
- \
- lookup_key32_cmp(key, bucket, pos); \
- \
- pkt_mask = (bucket->signature[pos] & 1LLU) << pkt_index;\
- pkts_mask_out |= pkt_mask; \
- \
- a = (void *) &bucket->data[pos * f->entry_size]; \
- rte_prefetch0(a); \
- entries[pkt_index] = a; \
- \
- bucket_mask = (~pkt_mask) & (bucket->next_valid << pkt_index);\
- buckets_mask |= bucket_mask; \
- bucket_next = bucket->next; \
- rte_prefetch0(bucket_next); \
- rte_prefetch0((void *)(((uintptr_t) bucket_next) + RTE_CACHE_LINE_SIZE));\
- rte_prefetch0((void *)(((uintptr_t) bucket_next) + \
- 2 * RTE_CACHE_LINE_SIZE)); \
- buckets[pkt_index] = bucket_next; \
- keys[pkt_index] = key; \
-}
-
-#define lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01,\
- pkts, pkts_mask) \
-{ \
- uint64_t pkt00_mask, pkt01_mask; \
- \
- pkt00_index = __builtin_ctzll(pkts_mask); \
- pkt00_mask = 1LLU << pkt00_index; \
- pkts_mask &= ~pkt00_mask; \
- \
- mbuf00 = pkts[pkt00_index]; \
- rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, 0)); \
- \
- pkt01_index = __builtin_ctzll(pkts_mask); \
- pkt01_mask = 1LLU << pkt01_index; \
- pkts_mask &= ~pkt01_mask; \
- \
- mbuf01 = pkts[pkt01_index]; \
- rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, 0)); \
-}
-
-#define lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,\
- mbuf00, mbuf01, pkts, pkts_mask) \
-{ \
- uint64_t pkt00_mask, pkt01_mask; \
- \
- pkt00_index = __builtin_ctzll(pkts_mask); \
- pkt00_mask = 1LLU << pkt00_index; \
- pkts_mask &= ~pkt00_mask; \
- \
- mbuf00 = pkts[pkt00_index]; \
- rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, 0)); \
- \
- pkt01_index = __builtin_ctzll(pkts_mask); \
- if (pkts_mask == 0) \
- pkt01_index = pkt00_index; \
- \
- pkt01_mask = 1LLU << pkt01_index; \
- pkts_mask &= ~pkt01_mask; \
- \
- mbuf01 = pkts[pkt01_index]; \
- rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, 0)); \
-}
-
-#define lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f) \
-{ \
- uint64_t signature10, signature11; \
- uint32_t bucket10_index, bucket11_index; \
- \
- signature10 = RTE_MBUF_METADATA_UINT32(mbuf10, f->signature_offset);\
- bucket10_index = signature10 & (f->n_buckets - 1); \
- bucket10 = (struct rte_bucket_4_32 *) \
- &f->memory[bucket10_index * f->bucket_size]; \
- rte_prefetch0(bucket10); \
- rte_prefetch0((void *)(((uintptr_t) bucket10) + RTE_CACHE_LINE_SIZE));\
- rte_prefetch0((void *)(((uintptr_t) bucket10) + 2 * RTE_CACHE_LINE_SIZE));\
- \
- signature11 = RTE_MBUF_METADATA_UINT32(mbuf11, f->signature_offset);\
- bucket11_index = signature11 & (f->n_buckets - 1); \
- bucket11 = (struct rte_bucket_4_32 *) \
- &f->memory[bucket11_index * f->bucket_size]; \
- rte_prefetch0(bucket11); \
- rte_prefetch0((void *)(((uintptr_t) bucket11) + RTE_CACHE_LINE_SIZE));\
- rte_prefetch0((void *)(((uintptr_t) bucket11) + 2 * RTE_CACHE_LINE_SIZE));\
-}
-
-#define lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,\
- bucket20, bucket21, pkts_mask_out, entries, f) \
-{ \
- void *a20, *a21; \
- uint64_t pkt20_mask, pkt21_mask; \
- uint64_t *key20, *key21; \
- uint32_t pos20, pos21; \
- \
- key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
- key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
- \
- lookup_key32_cmp(key20, bucket20, pos20); \
- lookup_key32_cmp(key21, bucket21, pos21); \
- \
- pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
- pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
- pkts_mask_out |= pkt20_mask | pkt21_mask; \
- \
- a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
- a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
- rte_prefetch0(a20); \
- rte_prefetch0(a21); \
- entries[pkt20_index] = a20; \
- entries[pkt21_index] = a21; \
- lru_update(bucket20, pos20); \
- lru_update(bucket21, pos21); \
-}
-
-#define lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21, bucket20, \
- bucket21, pkts_mask_out, entries, buckets_mask, buckets, keys, f)\
-{ \
- struct rte_bucket_4_32 *bucket20_next, *bucket21_next; \
- void *a20, *a21; \
- uint64_t pkt20_mask, pkt21_mask, bucket20_mask, bucket21_mask;\
- uint64_t *key20, *key21; \
- uint32_t pos20, pos21; \
- \
- key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
- key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
- \
- lookup_key32_cmp(key20, bucket20, pos20); \
- lookup_key32_cmp(key21, bucket21, pos21); \
- \
- pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
- pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
- pkts_mask_out |= pkt20_mask | pkt21_mask; \
- \
- a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
- a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
- rte_prefetch0(a20); \
- rte_prefetch0(a21); \
- entries[pkt20_index] = a20; \
- entries[pkt21_index] = a21; \
- \
- bucket20_mask = (~pkt20_mask) & (bucket20->next_valid << pkt20_index);\
- bucket21_mask = (~pkt21_mask) & (bucket21->next_valid << pkt21_index);\
- buckets_mask |= bucket20_mask | bucket21_mask; \
- bucket20_next = bucket20->next; \
- bucket21_next = bucket21->next; \
- buckets[pkt20_index] = bucket20_next; \
- buckets[pkt21_index] = bucket21_next; \
- keys[pkt20_index] = key20; \
- keys[pkt21_index] = key21; \
-}
-
-static int
-rte_table_hash_lookup_key32_lru(
- void *table,
- struct rte_mbuf **pkts,
- uint64_t pkts_mask,
- uint64_t *lookup_hit_mask,
- void **entries)
-{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
- struct rte_bucket_4_32 *bucket10, *bucket11, *bucket20, *bucket21;
- struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
- uint32_t pkt00_index, pkt01_index, pkt10_index;
- uint32_t pkt11_index, pkt20_index, pkt21_index;
- uint64_t pkts_mask_out = 0;
-
- /* Cannot run the pipeline with less than 5 packets */
- if (__builtin_popcountll(pkts_mask) < 5) {
- for ( ; pkts_mask; ) {
- struct rte_bucket_4_32 *bucket;
- struct rte_mbuf *mbuf;
- uint32_t pkt_index;
-
- lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask);
- lookup1_stage1(mbuf, bucket, f);
- lookup1_stage2_lru(pkt_index, mbuf, bucket,
- pkts_mask_out, entries, f);
- }
-
- *lookup_hit_mask = pkts_mask_out;
- return 0;
- }
-
- /*
- * Pipeline fill
- *
- */
- /* Pipeline stage 0 */
- lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
- pkts_mask);
-
- /* Pipeline feed */
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
- pkts_mask);
-
- /* Pipeline stage 1 */
- lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /*
- * Pipeline run
- *
- */
- for ( ; pkts_mask; ) {
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
- mbuf00, mbuf01, pkts, pkts_mask);
-
- /* Pipeline stage 1 */
- lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /* Pipeline stage 2 */
- lookup2_stage2_lru(pkt20_index, pkt21_index,
- mbuf20, mbuf21, bucket20, bucket21, pkts_mask_out,
- entries, f);
- }
-
- /*
- * Pipeline flush
- *
- */
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 1 */
- lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /* Pipeline stage 2 */
- lookup2_stage2_lru(pkt20_index, pkt21_index,
- mbuf20, mbuf21, bucket20, bucket21, pkts_mask_out, entries, f);
-
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
-
- /* Pipeline stage 2 */
- lookup2_stage2_lru(pkt20_index, pkt21_index,
- mbuf20, mbuf21, bucket20, bucket21, pkts_mask_out, entries, f);
-
- *lookup_hit_mask = pkts_mask_out;
- return 0;
-} /* rte_table_hash_lookup_key32_lru() */
-
-static int
-rte_table_hash_lookup_key32_ext(
- void *table,
- struct rte_mbuf **pkts,
- uint64_t pkts_mask,
- uint64_t *lookup_hit_mask,
- void **entries)
-{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
- struct rte_bucket_4_32 *bucket10, *bucket11, *bucket20, *bucket21;
- struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
- uint32_t pkt00_index, pkt01_index, pkt10_index;
- uint32_t pkt11_index, pkt20_index, pkt21_index;
- uint64_t pkts_mask_out = 0, buckets_mask = 0;
- struct rte_bucket_4_32 *buckets[RTE_PORT_IN_BURST_SIZE_MAX];
- uint64_t *keys[RTE_PORT_IN_BURST_SIZE_MAX];
-
- /* Cannot run the pipeline with less than 5 packets */
- if (__builtin_popcountll(pkts_mask) < 5) {
- for ( ; pkts_mask; ) {
- struct rte_bucket_4_32 *bucket;
- struct rte_mbuf *mbuf;
- uint32_t pkt_index;
-
- lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask);
- lookup1_stage1(mbuf, bucket, f);
- lookup1_stage2_ext(pkt_index, mbuf, bucket,
- pkts_mask_out, entries, buckets_mask, buckets,
- keys, f);
- }
-
- goto grind_next_buckets;
- }
-
- /*
- * Pipeline fill
- *
- */
- /* Pipeline stage 0 */
- lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
- pkts_mask);
-
- /* Pipeline feed */
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
- pkts_mask);
-
- /* Pipeline stage 1 */
- lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /*
- * Pipeline run
- *
- */
- for ( ; pkts_mask; ) {
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
- mbuf00, mbuf01, pkts, pkts_mask);
-
- /* Pipeline stage 1 */
- lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /* Pipeline stage 2 */
- lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries,
- buckets_mask, buckets, keys, f);
- }
-
- /*
- * Pipeline flush
- *
- */
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 1 */
- lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /* Pipeline stage 2 */
- lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries,
- buckets_mask, buckets, keys, f);
-
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
-
- /* Pipeline stage 2 */
- lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries,
- buckets_mask, buckets, keys, f);
-
-grind_next_buckets:
- /* Grind next buckets */
- for ( ; buckets_mask; ) {
- uint64_t buckets_mask_next = 0;
-
- for ( ; buckets_mask; ) {
- uint64_t pkt_mask;
- uint32_t pkt_index;
-
- pkt_index = __builtin_ctzll(buckets_mask);
- pkt_mask = 1LLU << pkt_index;
- buckets_mask &= ~pkt_mask;
-
- lookup_grinder(pkt_index, buckets, keys, pkts_mask_out,
- entries, buckets_mask_next, f);
- }
-
- buckets_mask = buckets_mask_next;
- }
-
- *lookup_hit_mask = pkts_mask_out;
- return 0;
-} /* rte_table_hash_lookup_key32_ext() */
-
-struct rte_table_ops rte_table_hash_key32_lru_ops = {
- .f_create = rte_table_hash_create_key32_lru,
- .f_free = rte_table_hash_free_key32_lru,
- .f_add = rte_table_hash_entry_add_key32_lru,
- .f_delete = rte_table_hash_entry_delete_key32_lru,
- .f_lookup = rte_table_hash_lookup_key32_lru,
-};
-
-struct rte_table_ops rte_table_hash_key32_ext_ops = {
- .f_create = rte_table_hash_create_key32_ext,
- .f_free = rte_table_hash_free_key32_ext,
- .f_add = rte_table_hash_entry_add_key32_ext,
- .f_delete = rte_table_hash_entry_delete_key32_ext,
- .f_lookup = rte_table_hash_lookup_key32_ext,
-};
diff --git a/src/dpdk_lib18/librte_table/rte_table_hash_key8.c b/src/dpdk_lib18/librte_table/rte_table_hash_key8.c
deleted file mode 100755
index 443ca7da..00000000
--- a/src/dpdk_lib18/librte_table/rte_table_hash_key8.c
+++ /dev/null
@@ -1,1399 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#include <string.h>
-#include <stdio.h>
-
-#include <rte_common.h>
-#include <rte_mbuf.h>
-#include <rte_memory.h>
-#include <rte_malloc.h>
-#include <rte_log.h>
-
-#include "rte_table_hash.h"
-#include "rte_lru.h"
-
-#define RTE_TABLE_HASH_KEY_SIZE 8
-
-struct rte_bucket_4_8 {
- /* Cache line 0 */
- uint64_t signature;
- uint64_t lru_list;
- struct rte_bucket_4_8 *next;
- uint64_t next_valid;
-
- uint64_t key[4];
-
- /* Cache line 1 */
- uint8_t data[0];
-};
-
-struct rte_table_hash {
- /* Input parameters */
- uint32_t n_buckets;
- uint32_t n_entries_per_bucket;
- uint32_t key_size;
- uint32_t entry_size;
- uint32_t bucket_size;
- uint32_t signature_offset;
- uint32_t key_offset;
- rte_table_hash_op_hash f_hash;
- uint64_t seed;
-
- /* Extendible buckets */
- uint32_t n_buckets_ext;
- uint32_t stack_pos;
- uint32_t *stack;
-
- /* Lookup table */
- uint8_t memory[0] __rte_cache_aligned;
-};
-
-static int
-check_params_create_lru(struct rte_table_hash_key8_lru_params *params) {
- /* n_entries */
- if (params->n_entries == 0) {
- RTE_LOG(ERR, TABLE, "%s: n_entries is zero\n", __func__);
- return -EINVAL;
- }
-
- /* signature offset */
- if ((params->signature_offset & 0x3) != 0) {
- RTE_LOG(ERR, TABLE, "%s: invalid signature_offset\n", __func__);
- return -EINVAL;
- }
-
- /* key offset */
- if ((params->key_offset & 0x7) != 0) {
- RTE_LOG(ERR, TABLE, "%s: invalid key_offset\n", __func__);
- return -EINVAL;
- }
-
- /* f_hash */
- if (params->f_hash == NULL) {
- RTE_LOG(ERR, TABLE, "%s: f_hash function pointer is NULL\n",
- __func__);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void *
-rte_table_hash_create_key8_lru(void *params, int socket_id, uint32_t entry_size)
-{
- struct rte_table_hash_key8_lru_params *p =
- (struct rte_table_hash_key8_lru_params *) params;
- struct rte_table_hash *f;
- uint32_t n_buckets, n_entries_per_bucket, key_size, bucket_size_cl;
- uint32_t total_size, i;
-
- /* Check input parameters */
- if ((check_params_create_lru(p) != 0) ||
- ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
- ((sizeof(struct rte_bucket_4_8) % RTE_CACHE_LINE_SIZE) != 0)) {
- return NULL;
- }
- n_entries_per_bucket = 4;
- key_size = 8;
-
- /* Memory allocation */
- n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
- n_entries_per_bucket);
- bucket_size_cl = (sizeof(struct rte_bucket_4_8) + n_entries_per_bucket *
- entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
- total_size = sizeof(struct rte_table_hash) + n_buckets *
- bucket_size_cl * RTE_CACHE_LINE_SIZE;
-
- f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
- if (f == NULL) {
- RTE_LOG(ERR, TABLE,
- "%s: Cannot allocate %u bytes for hash table\n",
- __func__, total_size);
- return NULL;
- }
- RTE_LOG(INFO, TABLE,
- "%s: Hash table memory footprint is %u bytes\n",
- __func__, total_size);
-
- /* Memory initialization */
- f->n_buckets = n_buckets;
- f->n_entries_per_bucket = n_entries_per_bucket;
- f->key_size = key_size;
- f->entry_size = entry_size;
- f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
- f->signature_offset = p->signature_offset;
- f->key_offset = p->key_offset;
- f->f_hash = p->f_hash;
- f->seed = p->seed;
-
- for (i = 0; i < n_buckets; i++) {
- struct rte_bucket_4_8 *bucket;
-
- bucket = (struct rte_bucket_4_8 *) &f->memory[i *
- f->bucket_size];
- bucket->lru_list = 0x0000000100020003LLU;
- }
-
- return f;
-}
-
-static int
-rte_table_hash_free_key8_lru(void *table)
-{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
-
- /* Check input parameters */
- if (f == NULL) {
- RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
- return -EINVAL;
- }
-
- rte_free(f);
- return 0;
-}
-
-static int
-rte_table_hash_entry_add_key8_lru(
- void *table,
- void *key,
- void *entry,
- int *key_found,
- void **entry_ptr)
-{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
- struct rte_bucket_4_8 *bucket;
- uint64_t signature, mask, pos;
- uint32_t bucket_index, i;
-
- signature = f->f_hash(key, f->key_size, f->seed);
- bucket_index = signature & (f->n_buckets - 1);
- bucket = (struct rte_bucket_4_8 *)
- &f->memory[bucket_index * f->bucket_size];
-
- /* Key is present in the bucket */
- for (i = 0, mask = 1LLU; i < 4; i++, mask <<= 1) {
- uint64_t bucket_signature = bucket->signature;
- uint64_t bucket_key = bucket->key[i];
-
- if ((bucket_signature & mask) &&
- (*((uint64_t *) key) == bucket_key)) {
- uint8_t *bucket_data = &bucket->data[i * f->entry_size];
-
- memcpy(bucket_data, entry, f->entry_size);
- lru_update(bucket, i);
- *key_found = 1;
- *entry_ptr = (void *) bucket_data;
- return 0;
- }
- }
-
- /* Key is not present in the bucket */
- for (i = 0, mask = 1LLU; i < 4; i++, mask <<= 1) {
- uint64_t bucket_signature = bucket->signature;
-
- if ((bucket_signature & mask) == 0) {
- uint8_t *bucket_data = &bucket->data[i * f->entry_size];
-
- bucket->signature |= mask;
- bucket->key[i] = *((uint64_t *) key);
- memcpy(bucket_data, entry, f->entry_size);
- lru_update(bucket, i);
- *key_found = 0;
- *entry_ptr = (void *) bucket_data;
-
- return 0;
- }
- }
-
- /* Bucket full: replace LRU entry */
- pos = lru_pos(bucket);
- bucket->key[pos] = *((uint64_t *) key);
- memcpy(&bucket->data[pos * f->entry_size], entry, f->entry_size);
- lru_update(bucket, pos);
- *key_found = 0;
- *entry_ptr = (void *) &bucket->data[pos * f->entry_size];
-
- return 0;
-}
-
-static int
-rte_table_hash_entry_delete_key8_lru(
- void *table,
- void *key,
- int *key_found,
- void *entry)
-{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
- struct rte_bucket_4_8 *bucket;
- uint64_t signature, mask;
- uint32_t bucket_index, i;
-
- signature = f->f_hash(key, f->key_size, f->seed);
- bucket_index = signature & (f->n_buckets - 1);
- bucket = (struct rte_bucket_4_8 *)
- &f->memory[bucket_index * f->bucket_size];
-
- /* Key is present in the bucket */
- for (i = 0, mask = 1LLU; i < 4; i++, mask <<= 1) {
- uint64_t bucket_signature = bucket->signature;
- uint64_t bucket_key = bucket->key[i];
-
- if ((bucket_signature & mask) &&
- (*((uint64_t *) key) == bucket_key)) {
- uint8_t *bucket_data = &bucket->data[i * f->entry_size];
-
- bucket->signature &= ~mask;
- *key_found = 1;
- if (entry)
- memcpy(entry, bucket_data, f->entry_size);
-
- return 0;
- }
- }
-
- /* Key is not present in the bucket */
- *key_found = 0;
- return 0;
-}
-
-static int
-check_params_create_ext(struct rte_table_hash_key8_ext_params *params) {
- /* n_entries */
- if (params->n_entries == 0) {
- RTE_LOG(ERR, TABLE, "%s: n_entries is zero\n", __func__);
- return -EINVAL;
- }
-
- /* n_entries_ext */
- if (params->n_entries_ext == 0) {
- RTE_LOG(ERR, TABLE, "%s: n_entries_ext is zero\n", __func__);
- return -EINVAL;
- }
-
- /* signature offset */
- if ((params->signature_offset & 0x3) != 0) {
- RTE_LOG(ERR, TABLE, "%s: invalid signature_offset\n", __func__);
- return -EINVAL;
- }
-
- /* key offset */
- if ((params->key_offset & 0x7) != 0) {
- RTE_LOG(ERR, TABLE, "%s: invalid key_offset\n", __func__);
- return -EINVAL;
- }
-
- /* f_hash */
- if (params->f_hash == NULL) {
- RTE_LOG(ERR, TABLE, "%s: f_hash function pointer is NULL\n",
- __func__);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void *
-rte_table_hash_create_key8_ext(void *params, int socket_id, uint32_t entry_size)
-{
- struct rte_table_hash_key8_ext_params *p =
- (struct rte_table_hash_key8_ext_params *) params;
- struct rte_table_hash *f;
- uint32_t n_buckets, n_buckets_ext, n_entries_per_bucket, key_size;
- uint32_t bucket_size_cl, stack_size_cl, total_size, i;
-
- /* Check input parameters */
- if ((check_params_create_ext(p) != 0) ||
- ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
- ((sizeof(struct rte_bucket_4_8) % RTE_CACHE_LINE_SIZE) != 0))
- return NULL;
-
- n_entries_per_bucket = 4;
- key_size = 8;
-
- /* Memory allocation */
- n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
- n_entries_per_bucket);
- n_buckets_ext = (p->n_entries_ext + n_entries_per_bucket - 1) /
- n_entries_per_bucket;
- bucket_size_cl = (sizeof(struct rte_bucket_4_8) + n_entries_per_bucket *
- entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
- stack_size_cl = (n_buckets_ext * sizeof(uint32_t) + RTE_CACHE_LINE_SIZE - 1)
- / RTE_CACHE_LINE_SIZE;
- total_size = sizeof(struct rte_table_hash) + ((n_buckets +
- n_buckets_ext) * bucket_size_cl + stack_size_cl) *
- RTE_CACHE_LINE_SIZE;
-
- f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
- if (f == NULL) {
- RTE_LOG(ERR, TABLE,
- "%s: Cannot allocate %u bytes for hash table\n",
- __func__, total_size);
- return NULL;
- }
- RTE_LOG(INFO, TABLE,
- "%s: Hash table memory footprint is %u bytes\n",
- __func__, total_size);
-
- /* Memory initialization */
- f->n_buckets = n_buckets;
- f->n_entries_per_bucket = n_entries_per_bucket;
- f->key_size = key_size;
- f->entry_size = entry_size;
- f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
- f->signature_offset = p->signature_offset;
- f->key_offset = p->key_offset;
- f->f_hash = p->f_hash;
- f->seed = p->seed;
-
- f->n_buckets_ext = n_buckets_ext;
- f->stack_pos = n_buckets_ext;
- f->stack = (uint32_t *)
- &f->memory[(n_buckets + n_buckets_ext) * f->bucket_size];
-
- for (i = 0; i < n_buckets_ext; i++)
- f->stack[i] = i;
-
- return f;
-}
-
-static int
-rte_table_hash_free_key8_ext(void *table)
-{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
-
- /* Check input parameters */
- if (f == NULL) {
- RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
- return -EINVAL;
- }
-
- rte_free(f);
- return 0;
-}
-
-static int
-rte_table_hash_entry_add_key8_ext(
- void *table,
- void *key,
- void *entry,
- int *key_found,
- void **entry_ptr)
-{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
- struct rte_bucket_4_8 *bucket0, *bucket, *bucket_prev;
- uint64_t signature;
- uint32_t bucket_index, i;
-
- signature = f->f_hash(key, f->key_size, f->seed);
- bucket_index = signature & (f->n_buckets - 1);
- bucket0 = (struct rte_bucket_4_8 *)
- &f->memory[bucket_index * f->bucket_size];
-
- /* Key is present in the bucket */
- for (bucket = bucket0; bucket != NULL; bucket = bucket->next) {
- uint64_t mask;
-
- for (i = 0, mask = 1LLU; i < 4; i++, mask <<= 1) {
- uint64_t bucket_signature = bucket->signature;
- uint64_t bucket_key = bucket->key[i];
-
- if ((bucket_signature & mask) &&
- (*((uint64_t *) key) == bucket_key)) {
- uint8_t *bucket_data = &bucket->data[i *
- f->entry_size];
-
- memcpy(bucket_data, entry, f->entry_size);
- *key_found = 1;
- *entry_ptr = (void *) bucket_data;
- return 0;
- }
- }
- }
-
- /* Key is not present in the bucket */
- for (bucket_prev = NULL, bucket = bucket0;
- bucket != NULL; bucket_prev = bucket, bucket = bucket->next) {
- uint64_t mask;
-
- for (i = 0, mask = 1LLU; i < 4; i++, mask <<= 1) {
- uint64_t bucket_signature = bucket->signature;
-
- if ((bucket_signature & mask) == 0) {
- uint8_t *bucket_data = &bucket->data[i *
- f->entry_size];
-
- bucket->signature |= mask;
- bucket->key[i] = *((uint64_t *) key);
- memcpy(bucket_data, entry, f->entry_size);
- *key_found = 0;
- *entry_ptr = (void *) bucket_data;
-
- return 0;
- }
- }
- }
-
- /* Bucket full: extend bucket */
- if (f->stack_pos > 0) {
- bucket_index = f->stack[--f->stack_pos];
-
- bucket = (struct rte_bucket_4_8 *) &f->memory[(f->n_buckets +
- bucket_index) * f->bucket_size];
- bucket_prev->next = bucket;
- bucket_prev->next_valid = 1;
-
- bucket->signature = 1;
- bucket->key[0] = *((uint64_t *) key);
- memcpy(&bucket->data[0], entry, f->entry_size);
- *key_found = 0;
- *entry_ptr = (void *) &bucket->data[0];
- return 0;
- }
-
- return -ENOSPC;
-}
-
-static int
-rte_table_hash_entry_delete_key8_ext(
- void *table,
- void *key,
- int *key_found,
- void *entry)
-{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
- struct rte_bucket_4_8 *bucket0, *bucket, *bucket_prev;
- uint64_t signature;
- uint32_t bucket_index, i;
-
- signature = f->f_hash(key, f->key_size, f->seed);
- bucket_index = signature & (f->n_buckets - 1);
- bucket0 = (struct rte_bucket_4_8 *)
- &f->memory[bucket_index * f->bucket_size];
-
- /* Key is present in the bucket */
- for (bucket_prev = NULL, bucket = bucket0; bucket != NULL;
- bucket_prev = bucket, bucket = bucket->next) {
- uint64_t mask;
-
- for (i = 0, mask = 1LLU; i < 4; i++, mask <<= 1) {
- uint64_t bucket_signature = bucket->signature;
- uint64_t bucket_key = bucket->key[i];
-
- if ((bucket_signature & mask) &&
- (*((uint64_t *) key) == bucket_key)) {
- uint8_t *bucket_data = &bucket->data[i *
- f->entry_size];
-
- bucket->signature &= ~mask;
- *key_found = 1;
- if (entry)
- memcpy(entry, bucket_data,
- f->entry_size);
-
- if ((bucket->signature == 0) &&
- (bucket_prev != NULL)) {
- bucket_prev->next = bucket->next;
- bucket_prev->next_valid =
- bucket->next_valid;
-
- memset(bucket, 0,
- sizeof(struct rte_bucket_4_8));
- bucket_index = (bucket -
- ((struct rte_bucket_4_8 *)
- f->memory)) - f->n_buckets;
- f->stack[f->stack_pos++] = bucket_index;
- }
-
- return 0;
- }
- }
- }
-
- /* Key is not present in the bucket */
- *key_found = 0;
- return 0;
-}
-
-#define lookup_key8_cmp(key_in, bucket, pos) \
-{ \
- uint64_t xor[4], signature; \
- \
- signature = ~bucket->signature; \
- \
- xor[0] = (key_in[0] ^ bucket->key[0]) | (signature & 1);\
- xor[1] = (key_in[0] ^ bucket->key[1]) | (signature & 2);\
- xor[2] = (key_in[0] ^ bucket->key[2]) | (signature & 4);\
- xor[3] = (key_in[0] ^ bucket->key[3]) | (signature & 8);\
- \
- pos = 4; \
- if (xor[0] == 0) \
- pos = 0; \
- if (xor[1] == 0) \
- pos = 1; \
- if (xor[2] == 0) \
- pos = 2; \
- if (xor[3] == 0) \
- pos = 3; \
-}
-
-#define lookup1_stage0(pkt0_index, mbuf0, pkts, pkts_mask) \
-{ \
- uint64_t pkt_mask; \
- \
- pkt0_index = __builtin_ctzll(pkts_mask); \
- pkt_mask = 1LLU << pkt0_index; \
- pkts_mask &= ~pkt_mask; \
- \
- mbuf0 = pkts[pkt0_index]; \
- rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf0, 0)); \
-}
-
-#define lookup1_stage1(mbuf1, bucket1, f) \
-{ \
- uint64_t signature; \
- uint32_t bucket_index; \
- \
- signature = RTE_MBUF_METADATA_UINT32(mbuf1, f->signature_offset);\
- bucket_index = signature & (f->n_buckets - 1); \
- bucket1 = (struct rte_bucket_4_8 *) \
- &f->memory[bucket_index * f->bucket_size]; \
- rte_prefetch0(bucket1); \
-}
-
-#define lookup1_stage1_dosig(mbuf1, bucket1, f) \
-{ \
- uint64_t *key; \
- uint64_t signature; \
- uint32_t bucket_index; \
- \
- key = RTE_MBUF_METADATA_UINT64_PTR(mbuf1, f->key_offset);\
- signature = f->f_hash(key, RTE_TABLE_HASH_KEY_SIZE, f->seed);\
- bucket_index = signature & (f->n_buckets - 1); \
- bucket1 = (struct rte_bucket_4_8 *) \
- &f->memory[bucket_index * f->bucket_size]; \
- rte_prefetch0(bucket1); \
-}
-
-#define lookup1_stage2_lru(pkt2_index, mbuf2, bucket2, \
- pkts_mask_out, entries, f) \
-{ \
- void *a; \
- uint64_t pkt_mask; \
- uint64_t *key; \
- uint32_t pos; \
- \
- key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
- \
- lookup_key8_cmp(key, bucket2, pos); \
- \
- pkt_mask = ((bucket2->signature >> pos) & 1LLU) << pkt2_index;\
- pkts_mask_out |= pkt_mask; \
- \
- a = (void *) &bucket2->data[pos * f->entry_size]; \
- rte_prefetch0(a); \
- entries[pkt2_index] = a; \
- lru_update(bucket2, pos); \
-}
-
-#define lookup1_stage2_ext(pkt2_index, mbuf2, bucket2, pkts_mask_out,\
- entries, buckets_mask, buckets, keys, f) \
-{ \
- struct rte_bucket_4_8 *bucket_next; \
- void *a; \
- uint64_t pkt_mask, bucket_mask; \
- uint64_t *key; \
- uint32_t pos; \
- \
- key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
- \
- lookup_key8_cmp(key, bucket2, pos); \
- \
- pkt_mask = ((bucket2->signature >> pos) & 1LLU) << pkt2_index;\
- pkts_mask_out |= pkt_mask; \
- \
- a = (void *) &bucket2->data[pos * f->entry_size]; \
- rte_prefetch0(a); \
- entries[pkt2_index] = a; \
- \
- bucket_mask = (~pkt_mask) & (bucket2->next_valid << pkt2_index);\
- buckets_mask |= bucket_mask; \
- bucket_next = bucket2->next; \
- buckets[pkt2_index] = bucket_next; \
- keys[pkt2_index] = key; \
-}
-
-#define lookup_grinder(pkt_index, buckets, keys, pkts_mask_out, entries,\
- buckets_mask, f) \
-{ \
- struct rte_bucket_4_8 *bucket, *bucket_next; \
- void *a; \
- uint64_t pkt_mask, bucket_mask; \
- uint64_t *key; \
- uint32_t pos; \
- \
- bucket = buckets[pkt_index]; \
- key = keys[pkt_index]; \
- \
- lookup_key8_cmp(key, bucket, pos); \
- \
- pkt_mask = ((bucket->signature >> pos) & 1LLU) << pkt_index;\
- pkts_mask_out |= pkt_mask; \
- \
- a = (void *) &bucket->data[pos * f->entry_size]; \
- rte_prefetch0(a); \
- entries[pkt_index] = a; \
- \
- bucket_mask = (~pkt_mask) & (bucket->next_valid << pkt_index);\
- buckets_mask |= bucket_mask; \
- bucket_next = bucket->next; \
- rte_prefetch0(bucket_next); \
- buckets[pkt_index] = bucket_next; \
- keys[pkt_index] = key; \
-}
-
-#define lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01,\
- pkts, pkts_mask) \
-{ \
- uint64_t pkt00_mask, pkt01_mask; \
- \
- pkt00_index = __builtin_ctzll(pkts_mask); \
- pkt00_mask = 1LLU << pkt00_index; \
- pkts_mask &= ~pkt00_mask; \
- \
- mbuf00 = pkts[pkt00_index]; \
- rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, 0)); \
- \
- pkt01_index = __builtin_ctzll(pkts_mask); \
- pkt01_mask = 1LLU << pkt01_index; \
- pkts_mask &= ~pkt01_mask; \
- \
- mbuf01 = pkts[pkt01_index]; \
- rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, 0)); \
-}
-
-#define lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,\
- mbuf00, mbuf01, pkts, pkts_mask) \
-{ \
- uint64_t pkt00_mask, pkt01_mask; \
- \
- pkt00_index = __builtin_ctzll(pkts_mask); \
- pkt00_mask = 1LLU << pkt00_index; \
- pkts_mask &= ~pkt00_mask; \
- \
- mbuf00 = pkts[pkt00_index]; \
- rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, 0)); \
- \
- pkt01_index = __builtin_ctzll(pkts_mask); \
- if (pkts_mask == 0) \
- pkt01_index = pkt00_index; \
- \
- pkt01_mask = 1LLU << pkt01_index; \
- pkts_mask &= ~pkt01_mask; \
- \
- mbuf01 = pkts[pkt01_index]; \
- rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, 0)); \
-}
-
-#define lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f) \
-{ \
- uint64_t signature10, signature11; \
- uint32_t bucket10_index, bucket11_index; \
- \
- signature10 = RTE_MBUF_METADATA_UINT32(mbuf10, f->signature_offset);\
- bucket10_index = signature10 & (f->n_buckets - 1); \
- bucket10 = (struct rte_bucket_4_8 *) \
- &f->memory[bucket10_index * f->bucket_size]; \
- rte_prefetch0(bucket10); \
- \
- signature11 = RTE_MBUF_METADATA_UINT32(mbuf11, f->signature_offset);\
- bucket11_index = signature11 & (f->n_buckets - 1); \
- bucket11 = (struct rte_bucket_4_8 *) \
- &f->memory[bucket11_index * f->bucket_size]; \
- rte_prefetch0(bucket11); \
-}
-
-#define lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f)\
-{ \
- uint64_t *key10, *key11; \
- uint64_t signature10, signature11; \
- uint32_t bucket10_index, bucket11_index; \
- rte_table_hash_op_hash f_hash = f->f_hash; \
- uint64_t seed = f->seed; \
- uint32_t key_offset = f->key_offset; \
- \
- key10 = RTE_MBUF_METADATA_UINT64_PTR(mbuf10, key_offset);\
- key11 = RTE_MBUF_METADATA_UINT64_PTR(mbuf11, key_offset);\
- \
- signature10 = f_hash(key10, RTE_TABLE_HASH_KEY_SIZE, seed);\
- bucket10_index = signature10 & (f->n_buckets - 1); \
- bucket10 = (struct rte_bucket_4_8 *) \
- &f->memory[bucket10_index * f->bucket_size]; \
- rte_prefetch0(bucket10); \
- \
- signature11 = f_hash(key11, RTE_TABLE_HASH_KEY_SIZE, seed);\
- bucket11_index = signature11 & (f->n_buckets - 1); \
- bucket11 = (struct rte_bucket_4_8 *) \
- &f->memory[bucket11_index * f->bucket_size]; \
- rte_prefetch0(bucket11); \
-}
-
-#define lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,\
- bucket20, bucket21, pkts_mask_out, entries, f) \
-{ \
- void *a20, *a21; \
- uint64_t pkt20_mask, pkt21_mask; \
- uint64_t *key20, *key21; \
- uint32_t pos20, pos21; \
- \
- key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
- key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
- \
- lookup_key8_cmp(key20, bucket20, pos20); \
- lookup_key8_cmp(key21, bucket21, pos21); \
- \
- pkt20_mask = ((bucket20->signature >> pos20) & 1LLU) << pkt20_index;\
- pkt21_mask = ((bucket21->signature >> pos21) & 1LLU) << pkt21_index;\
- pkts_mask_out |= pkt20_mask | pkt21_mask; \
- \
- a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
- a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
- rte_prefetch0(a20); \
- rte_prefetch0(a21); \
- entries[pkt20_index] = a20; \
- entries[pkt21_index] = a21; \
- lru_update(bucket20, pos20); \
- lru_update(bucket21, pos21); \
-}
-
-#define lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21, bucket20, \
- bucket21, pkts_mask_out, entries, buckets_mask, buckets, keys, f)\
-{ \
- struct rte_bucket_4_8 *bucket20_next, *bucket21_next; \
- void *a20, *a21; \
- uint64_t pkt20_mask, pkt21_mask, bucket20_mask, bucket21_mask;\
- uint64_t *key20, *key21; \
- uint32_t pos20, pos21; \
- \
- key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
- key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
- \
- lookup_key8_cmp(key20, bucket20, pos20); \
- lookup_key8_cmp(key21, bucket21, pos21); \
- \
- pkt20_mask = ((bucket20->signature >> pos20) & 1LLU) << pkt20_index;\
- pkt21_mask = ((bucket21->signature >> pos21) & 1LLU) << pkt21_index;\
- pkts_mask_out |= pkt20_mask | pkt21_mask; \
- \
- a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
- a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
- rte_prefetch0(a20); \
- rte_prefetch0(a21); \
- entries[pkt20_index] = a20; \
- entries[pkt21_index] = a21; \
- \
- bucket20_mask = (~pkt20_mask) & (bucket20->next_valid << pkt20_index);\
- bucket21_mask = (~pkt21_mask) & (bucket21->next_valid << pkt21_index);\
- buckets_mask |= bucket20_mask | bucket21_mask; \
- bucket20_next = bucket20->next; \
- bucket21_next = bucket21->next; \
- buckets[pkt20_index] = bucket20_next; \
- buckets[pkt21_index] = bucket21_next; \
- keys[pkt20_index] = key20; \
- keys[pkt21_index] = key21; \
-}
-
-static int
-rte_table_hash_lookup_key8_lru(
- void *table,
- struct rte_mbuf **pkts,
- uint64_t pkts_mask,
- uint64_t *lookup_hit_mask,
- void **entries)
-{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
- struct rte_bucket_4_8 *bucket10, *bucket11, *bucket20, *bucket21;
- struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
- uint32_t pkt00_index, pkt01_index, pkt10_index,
- pkt11_index, pkt20_index, pkt21_index;
- uint64_t pkts_mask_out = 0;
-
- /* Cannot run the pipeline with less than 5 packets */
- if (__builtin_popcountll(pkts_mask) < 5) {
- for ( ; pkts_mask; ) {
- struct rte_bucket_4_8 *bucket;
- struct rte_mbuf *mbuf;
- uint32_t pkt_index;
-
- lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask);
- lookup1_stage1(mbuf, bucket, f);
- lookup1_stage2_lru(pkt_index, mbuf, bucket,
- pkts_mask_out, entries, f);
- }
-
- *lookup_hit_mask = pkts_mask_out;
- return 0;
- }
-
- /*
- * Pipeline fill
- *
- */
- /* Pipeline stage 0 */
- lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
- pkts_mask);
-
- /* Pipeline feed */
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
- pkts_mask);
-
- /* Pipeline stage 1 */
- lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /*
- * Pipeline run
- *
- */
- for ( ; pkts_mask; ) {
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
- mbuf00, mbuf01, pkts, pkts_mask);
-
- /* Pipeline stage 1 */
- lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /* Pipeline stage 2 */
- lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries, f);
- }
-
- /*
- * Pipeline flush
- *
- */
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 1 */
- lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /* Pipeline stage 2 */
- lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries, f);
-
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
-
- /* Pipeline stage 2 */
- lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries, f);
-
- *lookup_hit_mask = pkts_mask_out;
- return 0;
-} /* rte_table_hash_lookup_key8_lru() */
-
-static int
-rte_table_hash_lookup_key8_lru_dosig(
- void *table,
- struct rte_mbuf **pkts,
- uint64_t pkts_mask,
- uint64_t *lookup_hit_mask,
- void **entries)
-{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
- struct rte_bucket_4_8 *bucket10, *bucket11, *bucket20, *bucket21;
- struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
- uint32_t pkt00_index, pkt01_index, pkt10_index;
- uint32_t pkt11_index, pkt20_index, pkt21_index;
- uint64_t pkts_mask_out = 0;
-
- /* Cannot run the pipeline with less than 5 packets */
- if (__builtin_popcountll(pkts_mask) < 5) {
- for ( ; pkts_mask; ) {
- struct rte_bucket_4_8 *bucket;
- struct rte_mbuf *mbuf;
- uint32_t pkt_index;
-
- lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask);
- lookup1_stage1_dosig(mbuf, bucket, f);
- lookup1_stage2_lru(pkt_index, mbuf, bucket,
- pkts_mask_out, entries, f);
- }
-
- *lookup_hit_mask = pkts_mask_out;
- return 0;
- }
-
- /*
- * Pipeline fill
- *
- */
- /* Pipeline stage 0 */
- lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
- pkts_mask);
-
- /* Pipeline feed */
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
- pkts_mask);
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /*
- * Pipeline run
- *
- */
- for ( ; pkts_mask; ) {
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
- mbuf00, mbuf01, pkts, pkts_mask);
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /* Pipeline stage 2 */
- lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries, f);
- }
-
- /*
- * Pipeline flush
- *
- */
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /* Pipeline stage 2 */
- lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries, f);
-
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
-
- /* Pipeline stage 2 */
- lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries, f);
-
- *lookup_hit_mask = pkts_mask_out;
- return 0;
-} /* rte_table_hash_lookup_key8_lru_dosig() */
-
-static int
-rte_table_hash_lookup_key8_ext(
- void *table,
- struct rte_mbuf **pkts,
- uint64_t pkts_mask,
- uint64_t *lookup_hit_mask,
- void **entries)
-{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
- struct rte_bucket_4_8 *bucket10, *bucket11, *bucket20, *bucket21;
- struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
- uint32_t pkt00_index, pkt01_index, pkt10_index;
- uint32_t pkt11_index, pkt20_index, pkt21_index;
- uint64_t pkts_mask_out = 0, buckets_mask = 0;
- struct rte_bucket_4_8 *buckets[RTE_PORT_IN_BURST_SIZE_MAX];
- uint64_t *keys[RTE_PORT_IN_BURST_SIZE_MAX];
-
- /* Cannot run the pipeline with less than 5 packets */
- if (__builtin_popcountll(pkts_mask) < 5) {
- for ( ; pkts_mask; ) {
- struct rte_bucket_4_8 *bucket;
- struct rte_mbuf *mbuf;
- uint32_t pkt_index;
-
- lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask);
- lookup1_stage1(mbuf, bucket, f);
- lookup1_stage2_ext(pkt_index, mbuf, bucket,
- pkts_mask_out, entries, buckets_mask, buckets,
- keys, f);
- }
-
- goto grind_next_buckets;
- }
-
- /*
- * Pipeline fill
- *
- */
- /* Pipeline stage 0 */
- lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
- pkts_mask);
-
- /* Pipeline feed */
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
- pkts_mask);
-
- /* Pipeline stage 1 */
- lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /*
- * Pipeline run
- *
- */
- for ( ; pkts_mask; ) {
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
- mbuf00, mbuf01, pkts, pkts_mask);
-
- /* Pipeline stage 1 */
- lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /* Pipeline stage 2 */
- lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries,
- buckets_mask, buckets, keys, f);
- }
-
- /*
- * Pipeline flush
- *
- */
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 1 */
- lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /* Pipeline stage 2 */
- lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries,
- buckets_mask, buckets, keys, f);
-
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
-
- /* Pipeline stage 2 */
- lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries,
- buckets_mask, buckets, keys, f);
-
-grind_next_buckets:
- /* Grind next buckets */
- for ( ; buckets_mask; ) {
- uint64_t buckets_mask_next = 0;
-
- for ( ; buckets_mask; ) {
- uint64_t pkt_mask;
- uint32_t pkt_index;
-
- pkt_index = __builtin_ctzll(buckets_mask);
- pkt_mask = 1LLU << pkt_index;
- buckets_mask &= ~pkt_mask;
-
- lookup_grinder(pkt_index, buckets, keys, pkts_mask_out,
- entries, buckets_mask_next, f);
- }
-
- buckets_mask = buckets_mask_next;
- }
-
- *lookup_hit_mask = pkts_mask_out;
- return 0;
-} /* rte_table_hash_lookup_key8_ext() */
-
-static int
-rte_table_hash_lookup_key8_ext_dosig(
- void *table,
- struct rte_mbuf **pkts,
- uint64_t pkts_mask,
- uint64_t *lookup_hit_mask,
- void **entries)
-{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
- struct rte_bucket_4_8 *bucket10, *bucket11, *bucket20, *bucket21;
- struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
- uint32_t pkt00_index, pkt01_index, pkt10_index;
- uint32_t pkt11_index, pkt20_index, pkt21_index;
- uint64_t pkts_mask_out = 0, buckets_mask = 0;
- struct rte_bucket_4_8 *buckets[RTE_PORT_IN_BURST_SIZE_MAX];
- uint64_t *keys[RTE_PORT_IN_BURST_SIZE_MAX];
-
- /* Cannot run the pipeline with less than 5 packets */
- if (__builtin_popcountll(pkts_mask) < 5) {
- for ( ; pkts_mask; ) {
- struct rte_bucket_4_8 *bucket;
- struct rte_mbuf *mbuf;
- uint32_t pkt_index;
-
- lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask);
- lookup1_stage1_dosig(mbuf, bucket, f);
- lookup1_stage2_ext(pkt_index, mbuf, bucket,
- pkts_mask_out, entries, buckets_mask,
- buckets, keys, f);
- }
-
- goto grind_next_buckets;
- }
-
- /*
- * Pipeline fill
- *
- */
- /* Pipeline stage 0 */
- lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
- pkts_mask);
-
- /* Pipeline feed */
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
- pkts_mask);
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /*
- * Pipeline run
- *
- */
- for ( ; pkts_mask; ) {
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
- mbuf00, mbuf01, pkts, pkts_mask);
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /* Pipeline stage 2 */
- lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries,
- buckets_mask, buckets, keys, f);
- }
-
- /*
- * Pipeline flush
- *
- */
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /* Pipeline stage 2 */
- lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries,
- buckets_mask, buckets, keys, f);
-
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
-
- /* Pipeline stage 2 */
- lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries,
- buckets_mask, buckets, keys, f);
-
-grind_next_buckets:
- /* Grind next buckets */
- for ( ; buckets_mask; ) {
- uint64_t buckets_mask_next = 0;
-
- for ( ; buckets_mask; ) {
- uint64_t pkt_mask;
- uint32_t pkt_index;
-
- pkt_index = __builtin_ctzll(buckets_mask);
- pkt_mask = 1LLU << pkt_index;
- buckets_mask &= ~pkt_mask;
-
- lookup_grinder(pkt_index, buckets, keys, pkts_mask_out,
- entries, buckets_mask_next, f);
- }
-
- buckets_mask = buckets_mask_next;
- }
-
- *lookup_hit_mask = pkts_mask_out;
- return 0;
-} /* rte_table_hash_lookup_key8_dosig_ext() */
-
-struct rte_table_ops rte_table_hash_key8_lru_ops = {
- .f_create = rte_table_hash_create_key8_lru,
- .f_free = rte_table_hash_free_key8_lru,
- .f_add = rte_table_hash_entry_add_key8_lru,
- .f_delete = rte_table_hash_entry_delete_key8_lru,
- .f_lookup = rte_table_hash_lookup_key8_lru,
-};
-
-struct rte_table_ops rte_table_hash_key8_lru_dosig_ops = {
- .f_create = rte_table_hash_create_key8_lru,
- .f_free = rte_table_hash_free_key8_lru,
- .f_add = rte_table_hash_entry_add_key8_lru,
- .f_delete = rte_table_hash_entry_delete_key8_lru,
- .f_lookup = rte_table_hash_lookup_key8_lru_dosig,
-};
-
-struct rte_table_ops rte_table_hash_key8_ext_ops = {
- .f_create = rte_table_hash_create_key8_ext,
- .f_free = rte_table_hash_free_key8_ext,
- .f_add = rte_table_hash_entry_add_key8_ext,
- .f_delete = rte_table_hash_entry_delete_key8_ext,
- .f_lookup = rte_table_hash_lookup_key8_ext,
-};
-
-struct rte_table_ops rte_table_hash_key8_ext_dosig_ops = {
- .f_create = rte_table_hash_create_key8_ext,
- .f_free = rte_table_hash_free_key8_ext,
- .f_add = rte_table_hash_entry_add_key8_ext,
- .f_delete = rte_table_hash_entry_delete_key8_ext,
- .f_lookup = rte_table_hash_lookup_key8_ext_dosig,
-};
diff --git a/src/dpdk_lib18/librte_table/rte_table_hash_lru.c b/src/dpdk_lib18/librte_table/rte_table_hash_lru.c
deleted file mode 100755
index c9a8afd7..00000000
--- a/src/dpdk_lib18/librte_table/rte_table_hash_lru.c
+++ /dev/null
@@ -1,1065 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <string.h>
-#include <stdio.h>
-
-#include <rte_common.h>
-#include <rte_mbuf.h>
-#include <rte_memory.h>
-#include <rte_malloc.h>
-#include <rte_log.h>
-
-#include "rte_table_hash.h"
-#include "rte_lru.h"
-
-#define KEYS_PER_BUCKET 4
-
-struct bucket {
- union {
- struct bucket *next;
- uint64_t lru_list;
- };
- uint16_t sig[KEYS_PER_BUCKET];
- uint32_t key_pos[KEYS_PER_BUCKET];
-};
-
-struct grinder {
- struct bucket *bkt;
- uint64_t sig;
- uint64_t match;
- uint64_t match_pos;
- uint32_t key_index;
-};
-
-struct rte_table_hash {
- /* Input parameters */
- uint32_t key_size;
- uint32_t entry_size;
- uint32_t n_keys;
- uint32_t n_buckets;
- rte_table_hash_op_hash f_hash;
- uint64_t seed;
- uint32_t signature_offset;
- uint32_t key_offset;
-
- /* Internal */
- uint64_t bucket_mask;
- uint32_t key_size_shl;
- uint32_t data_size_shl;
- uint32_t key_stack_tos;
-
- /* Grinder */
- struct grinder grinders[RTE_PORT_IN_BURST_SIZE_MAX];
-
- /* Tables */
- struct bucket *buckets;
- uint8_t *key_mem;
- uint8_t *data_mem;
- uint32_t *key_stack;
-
- /* Table memory */
- uint8_t memory[0] __rte_cache_aligned;
-};
-
-static int
-check_params_create(struct rte_table_hash_lru_params *params)
-{
- uint32_t n_buckets_min;
-
- /* key_size */
- if ((params->key_size == 0) ||
- (!rte_is_power_of_2(params->key_size))) {
- RTE_LOG(ERR, TABLE, "%s: key_size invalid value\n", __func__);
- return -EINVAL;
- }
-
- /* n_keys */
- if ((params->n_keys == 0) ||
- (!rte_is_power_of_2(params->n_keys))) {
- RTE_LOG(ERR, TABLE, "%s: n_keys invalid value\n", __func__);
- return -EINVAL;
- }
-
- /* n_buckets */
- n_buckets_min = (params->n_keys + KEYS_PER_BUCKET - 1) / params->n_keys;
- if ((params->n_buckets == 0) ||
- (!rte_is_power_of_2(params->n_keys)) ||
- (params->n_buckets < n_buckets_min)) {
- RTE_LOG(ERR, TABLE, "%s: n_buckets invalid value\n", __func__);
- return -EINVAL;
- }
-
- /* f_hash */
- if (params->f_hash == NULL) {
- RTE_LOG(ERR, TABLE, "%s: f_hash invalid value\n", __func__);
- return -EINVAL;
- }
-
- /* signature offset */
- if ((params->signature_offset & 0x3) != 0) {
- RTE_LOG(ERR, TABLE, "%s: signature_offset invalid value\n",
- __func__);
- return -EINVAL;
- }
-
- /* key offset */
- if ((params->key_offset & 0x7) != 0) {
- RTE_LOG(ERR, TABLE, "%s: key_offset invalid value\n", __func__);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void *
-rte_table_hash_lru_create(void *params, int socket_id, uint32_t entry_size)
-{
- struct rte_table_hash_lru_params *p =
- (struct rte_table_hash_lru_params *) params;
- struct rte_table_hash *t;
- uint32_t total_size, table_meta_sz;
- uint32_t bucket_sz, key_sz, key_stack_sz, data_sz;
- uint32_t bucket_offset, key_offset, key_stack_offset, data_offset;
- uint32_t i;
-
- /* Check input parameters */
- if ((check_params_create(p) != 0) ||
- (!rte_is_power_of_2(entry_size)) ||
- ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
- (sizeof(struct bucket) != (RTE_CACHE_LINE_SIZE / 2))) {
- return NULL;
- }
-
- /* Memory allocation */
- table_meta_sz = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_table_hash));
- bucket_sz = RTE_CACHE_LINE_ROUNDUP(p->n_buckets * sizeof(struct bucket));
- key_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * p->key_size);
- key_stack_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * sizeof(uint32_t));
- data_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * entry_size);
- total_size = table_meta_sz + bucket_sz + key_sz + key_stack_sz +
- data_sz;
-
- t = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
- if (t == NULL) {
- RTE_LOG(ERR, TABLE,
- "%s: Cannot allocate %u bytes for hash table\n",
- __func__, total_size);
- return NULL;
- }
- RTE_LOG(INFO, TABLE, "%s (%u-byte key): Hash table memory footprint is "
- "%u bytes\n", __func__, p->key_size, total_size);
-
- /* Memory initialization */
- t->key_size = p->key_size;
- t->entry_size = entry_size;
- t->n_keys = p->n_keys;
- t->n_buckets = p->n_buckets;
- t->f_hash = p->f_hash;
- t->seed = p->seed;
- t->signature_offset = p->signature_offset;
- t->key_offset = p->key_offset;
-
- /* Internal */
- t->bucket_mask = t->n_buckets - 1;
- t->key_size_shl = __builtin_ctzl(p->key_size);
- t->data_size_shl = __builtin_ctzl(entry_size);
-
- /* Tables */
- bucket_offset = 0;
- key_offset = bucket_offset + bucket_sz;
- key_stack_offset = key_offset + key_sz;
- data_offset = key_stack_offset + key_stack_sz;
-
- t->buckets = (struct bucket *) &t->memory[bucket_offset];
- t->key_mem = &t->memory[key_offset];
- t->key_stack = (uint32_t *) &t->memory[key_stack_offset];
- t->data_mem = &t->memory[data_offset];
-
- /* Key stack */
- for (i = 0; i < t->n_keys; i++)
- t->key_stack[i] = t->n_keys - 1 - i;
- t->key_stack_tos = t->n_keys;
-
- /* LRU */
- for (i = 0; i < t->n_buckets; i++) {
- struct bucket *bkt = &t->buckets[i];
-
- lru_init(bkt);
- }
-
- return t;
-}
-
-static int
-rte_table_hash_lru_free(void *table)
-{
- struct rte_table_hash *t = (struct rte_table_hash *) table;
-
- /* Check input parameters */
- if (t == NULL)
- return -EINVAL;
-
- rte_free(t);
- return 0;
-}
-
-static int
-rte_table_hash_lru_entry_add(void *table, void *key, void *entry,
- int *key_found, void **entry_ptr)
-{
- struct rte_table_hash *t = (struct rte_table_hash *) table;
- struct bucket *bkt;
- uint64_t sig;
- uint32_t bkt_index, i;
-
- sig = t->f_hash(key, t->key_size, t->seed);
- bkt_index = sig & t->bucket_mask;
- bkt = &t->buckets[bkt_index];
- sig = (sig >> 16) | 1LLU;
-
- /* Key is present in the bucket */
- for (i = 0; i < KEYS_PER_BUCKET; i++) {
- uint64_t bkt_sig = (uint64_t) bkt->sig[i];
- uint32_t bkt_key_index = bkt->key_pos[i];
- uint8_t *bkt_key = &t->key_mem[bkt_key_index <<
- t->key_size_shl];
-
- if ((sig == bkt_sig) && (memcmp(key, bkt_key, t->key_size)
- == 0)) {
- uint8_t *data = &t->data_mem[bkt_key_index <<
- t->data_size_shl];
-
- memcpy(data, entry, t->entry_size);
- lru_update(bkt, i);
- *key_found = 1;
- *entry_ptr = (void *) data;
- return 0;
- }
- }
-
- /* Key is not present in the bucket */
- for (i = 0; i < KEYS_PER_BUCKET; i++) {
- uint64_t bkt_sig = (uint64_t) bkt->sig[i];
-
- if (bkt_sig == 0) {
- uint32_t bkt_key_index;
- uint8_t *bkt_key, *data;
-
- /* Allocate new key */
- if (t->key_stack_tos == 0) {
- /* No keys available */
- return -ENOSPC;
- }
- bkt_key_index = t->key_stack[--t->key_stack_tos];
-
- /* Install new key */
- bkt_key = &t->key_mem[bkt_key_index << t->key_size_shl];
- data = &t->data_mem[bkt_key_index << t->data_size_shl];
-
- bkt->sig[i] = (uint16_t) sig;
- bkt->key_pos[i] = bkt_key_index;
- memcpy(bkt_key, key, t->key_size);
- memcpy(data, entry, t->entry_size);
- lru_update(bkt, i);
-
- *key_found = 0;
- *entry_ptr = (void *) data;
- return 0;
- }
- }
-
- /* Bucket full */
- {
- uint64_t pos = lru_pos(bkt);
- uint32_t bkt_key_index = bkt->key_pos[pos];
- uint8_t *bkt_key = &t->key_mem[bkt_key_index <<
- t->key_size_shl];
- uint8_t *data = &t->data_mem[bkt_key_index << t->data_size_shl];
-
- bkt->sig[pos] = (uint16_t) sig;
- memcpy(bkt_key, key, t->key_size);
- memcpy(data, entry, t->entry_size);
- lru_update(bkt, pos);
-
- *key_found = 0;
- *entry_ptr = (void *) data;
- return 0;
- }
-}
-
-static int
-rte_table_hash_lru_entry_delete(void *table, void *key, int *key_found,
- void *entry)
-{
- struct rte_table_hash *t = (struct rte_table_hash *) table;
- struct bucket *bkt;
- uint64_t sig;
- uint32_t bkt_index, i;
-
- sig = t->f_hash(key, t->key_size, t->seed);
- bkt_index = sig & t->bucket_mask;
- bkt = &t->buckets[bkt_index];
- sig = (sig >> 16) | 1LLU;
-
- /* Key is present in the bucket */
- for (i = 0; i < KEYS_PER_BUCKET; i++) {
- uint64_t bkt_sig = (uint64_t) bkt->sig[i];
- uint32_t bkt_key_index = bkt->key_pos[i];
- uint8_t *bkt_key = &t->key_mem[bkt_key_index <<
- t->key_size_shl];
-
- if ((sig == bkt_sig) &&
- (memcmp(key, bkt_key, t->key_size) == 0)) {
- uint8_t *data = &t->data_mem[bkt_key_index <<
- t->data_size_shl];
-
- bkt->sig[i] = 0;
- t->key_stack[t->key_stack_tos++] = bkt_key_index;
- *key_found = 1;
- memcpy(entry, data, t->entry_size);
- return 0;
- }
- }
-
- /* Key is not present in the bucket */
- *key_found = 0;
- return 0;
-}
-
-static int rte_table_hash_lru_lookup_unoptimized(
- void *table,
- struct rte_mbuf **pkts,
- uint64_t pkts_mask,
- uint64_t *lookup_hit_mask,
- void **entries,
- int dosig)
-{
- struct rte_table_hash *t = (struct rte_table_hash *) table;
- uint64_t pkts_mask_out = 0;
-
- for ( ; pkts_mask; ) {
- struct bucket *bkt;
- struct rte_mbuf *pkt;
- uint8_t *key;
- uint64_t pkt_mask, sig;
- uint32_t pkt_index, bkt_index, i;
-
- pkt_index = __builtin_ctzll(pkts_mask);
- pkt_mask = 1LLU << pkt_index;
- pkts_mask &= ~pkt_mask;
-
- pkt = pkts[pkt_index];
- key = RTE_MBUF_METADATA_UINT8_PTR(pkt, t->key_offset);
- if (dosig)
- sig = (uint64_t) t->f_hash(key, t->key_size, t->seed);
- else
- sig = RTE_MBUF_METADATA_UINT32(pkt,
- t->signature_offset);
-
- bkt_index = sig & t->bucket_mask;
- bkt = &t->buckets[bkt_index];
- sig = (sig >> 16) | 1LLU;
-
- /* Key is present in the bucket */
- for (i = 0; i < KEYS_PER_BUCKET; i++) {
- uint64_t bkt_sig = (uint64_t) bkt->sig[i];
- uint32_t bkt_key_index = bkt->key_pos[i];
- uint8_t *bkt_key = &t->key_mem[bkt_key_index <<
- t->key_size_shl];
-
- if ((sig == bkt_sig) && (memcmp(key, bkt_key,
- t->key_size) == 0)) {
- uint8_t *data = &t->data_mem[bkt_key_index <<
- t->data_size_shl];
-
- lru_update(bkt, i);
- pkts_mask_out |= pkt_mask;
- entries[pkt_index] = (void *) data;
- break;
- }
- }
- }
-
- *lookup_hit_mask = pkts_mask_out;
- return 0;
-}
-
-/***
-*
-* mask = match bitmask
-* match = at least one match
-* match_many = more than one match
-* match_pos = position of first match
-*
-* ----------------------------------------
-* mask match match_many match_pos
-* ----------------------------------------
-* 0000 0 0 00
-* 0001 1 0 00
-* 0010 1 0 01
-* 0011 1 1 00
-* ----------------------------------------
-* 0100 1 0 10
-* 0101 1 1 00
-* 0110 1 1 01
-* 0111 1 1 00
-* ----------------------------------------
-* 1000 1 0 11
-* 1001 1 1 00
-* 1010 1 1 01
-* 1011 1 1 00
-* ----------------------------------------
-* 1100 1 1 10
-* 1101 1 1 00
-* 1110 1 1 01
-* 1111 1 1 00
-* ----------------------------------------
-*
-* match = 1111_1111_1111_1110
-* match_many = 1111_1110_1110_1000
-* match_pos = 0001_0010_0001_0011__0001_0010_0001_0000
-*
-* match = 0xFFFELLU
-* match_many = 0xFEE8LLU
-* match_pos = 0x12131210LLU
-*
-***/
-
-#define LUT_MATCH 0xFFFELLU
-#define LUT_MATCH_MANY 0xFEE8LLU
-#define LUT_MATCH_POS 0x12131210LLU
-
-#define lookup_cmp_sig(mbuf_sig, bucket, match, match_many, match_pos)\
-{ \
- uint64_t bucket_sig[4], mask[4], mask_all; \
- \
- bucket_sig[0] = bucket->sig[0]; \
- bucket_sig[1] = bucket->sig[1]; \
- bucket_sig[2] = bucket->sig[2]; \
- bucket_sig[3] = bucket->sig[3]; \
- \
- bucket_sig[0] ^= mbuf_sig; \
- bucket_sig[1] ^= mbuf_sig; \
- bucket_sig[2] ^= mbuf_sig; \
- bucket_sig[3] ^= mbuf_sig; \
- \
- mask[0] = 0; \
- mask[1] = 0; \
- mask[2] = 0; \
- mask[3] = 0; \
- \
- if (bucket_sig[0] == 0) \
- mask[0] = 1; \
- if (bucket_sig[1] == 0) \
- mask[1] = 2; \
- if (bucket_sig[2] == 0) \
- mask[2] = 4; \
- if (bucket_sig[3] == 0) \
- mask[3] = 8; \
- \
- mask_all = (mask[0] | mask[1]) | (mask[2] | mask[3]); \
- \
- match = (LUT_MATCH >> mask_all) & 1; \
- match_many = (LUT_MATCH_MANY >> mask_all) & 1; \
- match_pos = (LUT_MATCH_POS >> (mask_all << 1)) & 3; \
-}
-
-#define lookup_cmp_key(mbuf, key, match_key, f) \
-{ \
- uint64_t *pkt_key = RTE_MBUF_METADATA_UINT64_PTR(mbuf, f->key_offset);\
- uint64_t *bkt_key = (uint64_t *) key; \
- \
- switch (f->key_size) { \
- case 8: \
- { \
- uint64_t xor = pkt_key[0] ^ bkt_key[0]; \
- match_key = 0; \
- if (xor == 0) \
- match_key = 1; \
- } \
- break; \
- \
- case 16: \
- { \
- uint64_t xor[2], or; \
- \
- xor[0] = pkt_key[0] ^ bkt_key[0]; \
- xor[1] = pkt_key[1] ^ bkt_key[1]; \
- or = xor[0] | xor[1]; \
- match_key = 0; \
- if (or == 0) \
- match_key = 1; \
- } \
- break; \
- \
- case 32: \
- { \
- uint64_t xor[4], or; \
- \
- xor[0] = pkt_key[0] ^ bkt_key[0]; \
- xor[1] = pkt_key[1] ^ bkt_key[1]; \
- xor[2] = pkt_key[2] ^ bkt_key[2]; \
- xor[3] = pkt_key[3] ^ bkt_key[3]; \
- or = xor[0] | xor[1] | xor[2] | xor[3]; \
- match_key = 0; \
- if (or == 0) \
- match_key = 1; \
- } \
- break; \
- \
- case 64: \
- { \
- uint64_t xor[8], or; \
- \
- xor[0] = pkt_key[0] ^ bkt_key[0]; \
- xor[1] = pkt_key[1] ^ bkt_key[1]; \
- xor[2] = pkt_key[2] ^ bkt_key[2]; \
- xor[3] = pkt_key[3] ^ bkt_key[3]; \
- xor[4] = pkt_key[4] ^ bkt_key[4]; \
- xor[5] = pkt_key[5] ^ bkt_key[5]; \
- xor[6] = pkt_key[6] ^ bkt_key[6]; \
- xor[7] = pkt_key[7] ^ bkt_key[7]; \
- or = xor[0] | xor[1] | xor[2] | xor[3] | \
- xor[4] | xor[5] | xor[6] | xor[7]; \
- match_key = 0; \
- if (or == 0) \
- match_key = 1; \
- } \
- break; \
- \
- default: \
- match_key = 0; \
- if (memcmp(pkt_key, bkt_key, f->key_size) == 0) \
- match_key = 1; \
- } \
-}
-
-#define lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index)\
-{ \
- uint64_t pkt00_mask, pkt01_mask; \
- struct rte_mbuf *mbuf00, *mbuf01; \
- \
- pkt00_index = __builtin_ctzll(pkts_mask); \
- pkt00_mask = 1LLU << pkt00_index; \
- pkts_mask &= ~pkt00_mask; \
- mbuf00 = pkts[pkt00_index]; \
- \
- pkt01_index = __builtin_ctzll(pkts_mask); \
- pkt01_mask = 1LLU << pkt01_index; \
- pkts_mask &= ~pkt01_mask; \
- mbuf01 = pkts[pkt01_index]; \
- \
- rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, 0)); \
- rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, 0)); \
-}
-
-#define lookup2_stage0_with_odd_support(t, g, pkts, pkts_mask, pkt00_index, \
- pkt01_index) \
-{ \
- uint64_t pkt00_mask, pkt01_mask; \
- struct rte_mbuf *mbuf00, *mbuf01; \
- \
- pkt00_index = __builtin_ctzll(pkts_mask); \
- pkt00_mask = 1LLU << pkt00_index; \
- pkts_mask &= ~pkt00_mask; \
- mbuf00 = pkts[pkt00_index]; \
- \
- pkt01_index = __builtin_ctzll(pkts_mask); \
- if (pkts_mask == 0) \
- pkt01_index = pkt00_index; \
- \
- pkt01_mask = 1LLU << pkt01_index; \
- pkts_mask &= ~pkt01_mask; \
- mbuf01 = pkts[pkt01_index]; \
- \
- rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, 0)); \
- rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, 0)); \
-}
-
-#define lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index) \
-{ \
- struct grinder *g10, *g11; \
- uint64_t sig10, sig11, bkt10_index, bkt11_index; \
- struct rte_mbuf *mbuf10, *mbuf11; \
- struct bucket *bkt10, *bkt11, *buckets = t->buckets; \
- uint64_t bucket_mask = t->bucket_mask; \
- uint32_t signature_offset = t->signature_offset; \
- \
- mbuf10 = pkts[pkt10_index]; \
- sig10 = (uint64_t) RTE_MBUF_METADATA_UINT32(mbuf10, signature_offset);\
- bkt10_index = sig10 & bucket_mask; \
- bkt10 = &buckets[bkt10_index]; \
- \
- mbuf11 = pkts[pkt11_index]; \
- sig11 = (uint64_t) RTE_MBUF_METADATA_UINT32(mbuf11, signature_offset);\
- bkt11_index = sig11 & bucket_mask; \
- bkt11 = &buckets[bkt11_index]; \
- \
- rte_prefetch0(bkt10); \
- rte_prefetch0(bkt11); \
- \
- g10 = &g[pkt10_index]; \
- g10->sig = sig10; \
- g10->bkt = bkt10; \
- \
- g11 = &g[pkt11_index]; \
- g11->sig = sig11; \
- g11->bkt = bkt11; \
-}
-
-#define lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index)\
-{ \
- struct grinder *g10, *g11; \
- uint64_t sig10, sig11, bkt10_index, bkt11_index; \
- struct rte_mbuf *mbuf10, *mbuf11; \
- struct bucket *bkt10, *bkt11, *buckets = t->buckets; \
- uint8_t *key10, *key11; \
- uint64_t bucket_mask = t->bucket_mask; \
- rte_table_hash_op_hash f_hash = t->f_hash; \
- uint64_t seed = t->seed; \
- uint32_t key_size = t->key_size; \
- uint32_t key_offset = t->key_offset; \
- \
- mbuf10 = pkts[pkt10_index]; \
- key10 = RTE_MBUF_METADATA_UINT8_PTR(mbuf10, key_offset);\
- sig10 = (uint64_t) f_hash(key10, key_size, seed); \
- bkt10_index = sig10 & bucket_mask; \
- bkt10 = &buckets[bkt10_index]; \
- \
- mbuf11 = pkts[pkt11_index]; \
- key11 = RTE_MBUF_METADATA_UINT8_PTR(mbuf11, key_offset);\
- sig11 = (uint64_t) f_hash(key11, key_size, seed); \
- bkt11_index = sig11 & bucket_mask; \
- bkt11 = &buckets[bkt11_index]; \
- \
- rte_prefetch0(bkt10); \
- rte_prefetch0(bkt11); \
- \
- g10 = &g[pkt10_index]; \
- g10->sig = sig10; \
- g10->bkt = bkt10; \
- \
- g11 = &g[pkt11_index]; \
- g11->sig = sig11; \
- g11->bkt = bkt11; \
-}
-
-#define lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many)\
-{ \
- struct grinder *g20, *g21; \
- uint64_t sig20, sig21; \
- struct bucket *bkt20, *bkt21; \
- uint8_t *key20, *key21, *key_mem = t->key_mem; \
- uint64_t match20, match21, match_many20, match_many21; \
- uint64_t match_pos20, match_pos21; \
- uint32_t key20_index, key21_index, key_size_shl = t->key_size_shl;\
- \
- g20 = &g[pkt20_index]; \
- sig20 = g20->sig; \
- bkt20 = g20->bkt; \
- sig20 = (sig20 >> 16) | 1LLU; \
- lookup_cmp_sig(sig20, bkt20, match20, match_many20, match_pos20);\
- match20 <<= pkt20_index; \
- match_many20 <<= pkt20_index; \
- key20_index = bkt20->key_pos[match_pos20]; \
- key20 = &key_mem[key20_index << key_size_shl]; \
- \
- g21 = &g[pkt21_index]; \
- sig21 = g21->sig; \
- bkt21 = g21->bkt; \
- sig21 = (sig21 >> 16) | 1LLU; \
- lookup_cmp_sig(sig21, bkt21, match21, match_many21, match_pos21);\
- match21 <<= pkt21_index; \
- match_many21 <<= pkt21_index; \
- key21_index = bkt21->key_pos[match_pos21]; \
- key21 = &key_mem[key21_index << key_size_shl]; \
- \
- rte_prefetch0(key20); \
- rte_prefetch0(key21); \
- \
- pkts_mask_match_many |= match_many20 | match_many21; \
- \
- g20->match = match20; \
- g20->match_pos = match_pos20; \
- g20->key_index = key20_index; \
- \
- g21->match = match21; \
- g21->match_pos = match_pos21; \
- g21->key_index = key21_index; \
-}
-
-#define lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out, \
- entries) \
-{ \
- struct grinder *g30, *g31; \
- struct rte_mbuf *mbuf30, *mbuf31; \
- struct bucket *bkt30, *bkt31; \
- uint8_t *key30, *key31, *key_mem = t->key_mem; \
- uint8_t *data30, *data31, *data_mem = t->data_mem; \
- uint64_t match30, match31, match_pos30, match_pos31; \
- uint64_t match_key30, match_key31, match_keys; \
- uint32_t key30_index, key31_index; \
- uint32_t key_size_shl = t->key_size_shl; \
- uint32_t data_size_shl = t->data_size_shl; \
- \
- mbuf30 = pkts[pkt30_index]; \
- g30 = &g[pkt30_index]; \
- bkt30 = g30->bkt; \
- match30 = g30->match; \
- match_pos30 = g30->match_pos; \
- key30_index = g30->key_index; \
- key30 = &key_mem[key30_index << key_size_shl]; \
- lookup_cmp_key(mbuf30, key30, match_key30, t); \
- match_key30 <<= pkt30_index; \
- match_key30 &= match30; \
- data30 = &data_mem[key30_index << data_size_shl]; \
- entries[pkt30_index] = data30; \
- \
- mbuf31 = pkts[pkt31_index]; \
- g31 = &g[pkt31_index]; \
- bkt31 = g31->bkt; \
- match31 = g31->match; \
- match_pos31 = g31->match_pos; \
- key31_index = g31->key_index; \
- key31 = &key_mem[key31_index << key_size_shl]; \
- lookup_cmp_key(mbuf31, key31, match_key31, t); \
- match_key31 <<= pkt31_index; \
- match_key31 &= match31; \
- data31 = &data_mem[key31_index << data_size_shl]; \
- entries[pkt31_index] = data31; \
- \
- rte_prefetch0(data30); \
- rte_prefetch0(data31); \
- \
- match_keys = match_key30 | match_key31; \
- pkts_mask_out |= match_keys; \
- \
- if (match_key30 == 0) \
- match_pos30 = 4; \
- lru_update(bkt30, match_pos30); \
- \
- if (match_key31 == 0) \
- match_pos31 = 4; \
- lru_update(bkt31, match_pos31); \
-}
-
-/***
-* The lookup function implements a 4-stage pipeline, with each stage processing
-* two different packets. The purpose of pipelined implementation is to hide the
-* latency of prefetching the data structures and loosen the data dependency
-* between instructions.
-*
-* p00 _______ p10 _______ p20 _______ p30 _______
-* ----->| |----->| |----->| |----->| |----->
-* | 0 | | 1 | | 2 | | 3 |
-* ----->|_______|----->|_______|----->|_______|----->|_______|----->
-* p01 p11 p21 p31
-*
-* The naming convention is:
-* pXY = packet Y of stage X, X = 0 .. 3, Y = 0 .. 1
-*
-***/
-static int rte_table_hash_lru_lookup(
- void *table,
- struct rte_mbuf **pkts,
- uint64_t pkts_mask,
- uint64_t *lookup_hit_mask,
- void **entries)
-{
- struct rte_table_hash *t = (struct rte_table_hash *) table;
- struct grinder *g = t->grinders;
- uint64_t pkt00_index, pkt01_index, pkt10_index, pkt11_index;
- uint64_t pkt20_index, pkt21_index, pkt30_index, pkt31_index;
- uint64_t pkts_mask_out = 0, pkts_mask_match_many = 0;
- int status = 0;
-
- /* Cannot run the pipeline with less than 7 packets */
- if (__builtin_popcountll(pkts_mask) < 7)
- return rte_table_hash_lru_lookup_unoptimized(table, pkts,
- pkts_mask, lookup_hit_mask, entries, 0);
-
- /* Pipeline stage 0 */
- lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
-
- /* Pipeline feed */
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
-
- /* Pipeline stage 1 */
- lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
-
- /* Pipeline feed */
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
-
- /* Pipeline stage 1 */
- lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
-
- /* Pipeline stage 2 */
- lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
-
- /*
- * Pipeline run
- *
- */
- for ( ; pkts_mask; ) {
- /* Pipeline feed */
- pkt30_index = pkt20_index;
- pkt31_index = pkt21_index;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0_with_odd_support(t, g, pkts, pkts_mask,
- pkt00_index, pkt01_index);
-
- /* Pipeline stage 1 */
- lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
-
- /* Pipeline stage 2 */
- lookup2_stage2(t, g, pkt20_index, pkt21_index,
- pkts_mask_match_many);
-
- /* Pipeline stage 3 */
- lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index,
- pkts_mask_out, entries);
- }
-
- /* Pipeline feed */
- pkt30_index = pkt20_index;
- pkt31_index = pkt21_index;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 1 */
- lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
-
- /* Pipeline stage 2 */
- lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
-
- /* Pipeline stage 3 */
- lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
- entries);
-
- /* Pipeline feed */
- pkt30_index = pkt20_index;
- pkt31_index = pkt21_index;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
-
- /* Pipeline stage 2 */
- lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
-
- /* Pipeline stage 3 */
- lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
- entries);
-
- /* Pipeline feed */
- pkt30_index = pkt20_index;
- pkt31_index = pkt21_index;
-
- /* Pipeline stage 3 */
- lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
- entries);
-
- /* Slow path */
- pkts_mask_match_many &= ~pkts_mask_out;
- if (pkts_mask_match_many) {
- uint64_t pkts_mask_out_slow = 0;
-
- status = rte_table_hash_lru_lookup_unoptimized(table, pkts,
- pkts_mask_match_many, &pkts_mask_out_slow, entries, 0);
- pkts_mask_out |= pkts_mask_out_slow;
- }
-
- *lookup_hit_mask = pkts_mask_out;
- return status;
-}
-
-static int rte_table_hash_lru_lookup_dosig(
- void *table,
- struct rte_mbuf **pkts,
- uint64_t pkts_mask,
- uint64_t *lookup_hit_mask,
- void **entries)
-{
- struct rte_table_hash *t = (struct rte_table_hash *) table;
- struct grinder *g = t->grinders;
- uint64_t pkt00_index, pkt01_index, pkt10_index, pkt11_index;
- uint64_t pkt20_index, pkt21_index, pkt30_index, pkt31_index;
- uint64_t pkts_mask_out = 0, pkts_mask_match_many = 0;
- int status = 0;
-
- /* Cannot run the pipeline with less than 7 packets */
- if (__builtin_popcountll(pkts_mask) < 7)
- return rte_table_hash_lru_lookup_unoptimized(table, pkts,
- pkts_mask, lookup_hit_mask, entries, 1);
-
- /* Pipeline stage 0 */
- lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
-
- /* Pipeline feed */
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index);
-
- /* Pipeline feed */
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index);
-
- /* Pipeline stage 2 */
- lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
-
- /*
- * Pipeline run
- *
- */
- for ( ; pkts_mask; ) {
- /* Pipeline feed */
- pkt30_index = pkt20_index;
- pkt31_index = pkt21_index;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0_with_odd_support(t, g, pkts, pkts_mask,
- pkt00_index, pkt01_index);
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index);
-
- /* Pipeline stage 2 */
- lookup2_stage2(t, g, pkt20_index, pkt21_index,
- pkts_mask_match_many);
-
- /* Pipeline stage 3 */
- lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index,
- pkts_mask_out, entries);
- }
-
- /* Pipeline feed */
- pkt30_index = pkt20_index;
- pkt31_index = pkt21_index;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index);
-
- /* Pipeline stage 2 */
- lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
-
- /* Pipeline stage 3 */
- lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
- entries);
-
- /* Pipeline feed */
- pkt30_index = pkt20_index;
- pkt31_index = pkt21_index;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
-
- /* Pipeline stage 2 */
- lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
-
- /* Pipeline stage 3 */
- lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
- entries);
-
- /* Pipeline feed */
- pkt30_index = pkt20_index;
- pkt31_index = pkt21_index;
-
- /* Pipeline stage 3 */
- lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
- entries);
-
- /* Slow path */
- pkts_mask_match_many &= ~pkts_mask_out;
- if (pkts_mask_match_many) {
- uint64_t pkts_mask_out_slow = 0;
-
- status = rte_table_hash_lru_lookup_unoptimized(table, pkts,
- pkts_mask_match_many, &pkts_mask_out_slow, entries, 1);
- pkts_mask_out |= pkts_mask_out_slow;
- }
-
- *lookup_hit_mask = pkts_mask_out;
- return status;
-}
-
-struct rte_table_ops rte_table_hash_lru_ops = {
- .f_create = rte_table_hash_lru_create,
- .f_free = rte_table_hash_lru_free,
- .f_add = rte_table_hash_lru_entry_add,
- .f_delete = rte_table_hash_lru_entry_delete,
- .f_lookup = rte_table_hash_lru_lookup,
-};
-
-struct rte_table_ops rte_table_hash_lru_dosig_ops = {
- .f_create = rte_table_hash_lru_create,
- .f_free = rte_table_hash_lru_free,
- .f_add = rte_table_hash_lru_entry_add,
- .f_delete = rte_table_hash_lru_entry_delete,
- .f_lookup = rte_table_hash_lru_lookup_dosig,
-};
diff --git a/src/dpdk_lib18/librte_table/rte_table_lpm.c b/src/dpdk_lib18/librte_table/rte_table_lpm.c
deleted file mode 100755
index 64c684d0..00000000
--- a/src/dpdk_lib18/librte_table/rte_table_lpm.c
+++ /dev/null
@@ -1,348 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <string.h>
-#include <stdio.h>
-
-#include <rte_common.h>
-#include <rte_mbuf.h>
-#include <rte_memory.h>
-#include <rte_malloc.h>
-#include <rte_byteorder.h>
-#include <rte_log.h>
-#include <rte_lpm.h>
-
-#include "rte_table_lpm.h"
-
-#define RTE_TABLE_LPM_MAX_NEXT_HOPS 256
-
-struct rte_table_lpm {
- /* Input parameters */
- uint32_t entry_size;
- uint32_t entry_unique_size;
- uint32_t n_rules;
- uint32_t offset;
-
- /* Handle to low-level LPM table */
- struct rte_lpm *lpm;
-
- /* Next Hop Table (NHT) */
- uint32_t nht_users[RTE_TABLE_LPM_MAX_NEXT_HOPS];
- uint8_t nht[0] __rte_cache_aligned;
-};
-
-static void *
-rte_table_lpm_create(void *params, int socket_id, uint32_t entry_size)
-{
- struct rte_table_lpm_params *p = (struct rte_table_lpm_params *) params;
- struct rte_table_lpm *lpm;
- uint32_t total_size, nht_size;
-
- /* Check input parameters */
- if (p == NULL) {
- RTE_LOG(ERR, TABLE, "%s: NULL input parameters\n", __func__);
- return NULL;
- }
- if (p->n_rules == 0) {
- RTE_LOG(ERR, TABLE, "%s: Invalid n_rules\n", __func__);
- return NULL;
- }
- if (p->entry_unique_size == 0) {
- RTE_LOG(ERR, TABLE, "%s: Invalid entry_unique_size\n",
- __func__);
- return NULL;
- }
- if (p->entry_unique_size > entry_size) {
- RTE_LOG(ERR, TABLE, "%s: Invalid entry_unique_size\n",
- __func__);
- return NULL;
- }
- if ((p->offset & 0x3) != 0) {
- RTE_LOG(ERR, TABLE, "%s: Invalid offset\n", __func__);
- return NULL;
- }
-
- entry_size = RTE_ALIGN(entry_size, sizeof(uint64_t));
-
- /* Memory allocation */
- nht_size = RTE_TABLE_LPM_MAX_NEXT_HOPS * entry_size;
- total_size = sizeof(struct rte_table_lpm) + nht_size;
- lpm = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE,
- socket_id);
- if (lpm == NULL) {
- RTE_LOG(ERR, TABLE,
- "%s: Cannot allocate %u bytes for LPM table\n",
- __func__, total_size);
- return NULL;
- }
-
- /* LPM low-level table creation */
- lpm->lpm = rte_lpm_create("LPM", socket_id, p->n_rules, 0);
- if (lpm->lpm == NULL) {
- rte_free(lpm);
- RTE_LOG(ERR, TABLE, "Unable to create low-level LPM table\n");
- return NULL;
- }
-
- /* Memory initialization */
- lpm->entry_size = entry_size;
- lpm->entry_unique_size = p->entry_unique_size;
- lpm->n_rules = p->n_rules;
- lpm->offset = p->offset;
-
- return lpm;
-}
-
-static int
-rte_table_lpm_free(void *table)
-{
- struct rte_table_lpm *lpm = (struct rte_table_lpm *) table;
-
- /* Check input parameters */
- if (lpm == NULL) {
- RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
- return -EINVAL;
- }
-
- /* Free previously allocated resources */
- rte_lpm_free(lpm->lpm);
- rte_free(lpm);
-
- return 0;
-}
-
-static int
-nht_find_free(struct rte_table_lpm *lpm, uint32_t *pos)
-{
- uint32_t i;
-
- for (i = 0; i < RTE_TABLE_LPM_MAX_NEXT_HOPS; i++) {
- if (lpm->nht_users[i] == 0) {
- *pos = i;
- return 1;
- }
- }
-
- return 0;
-}
-
-static int
-nht_find_existing(struct rte_table_lpm *lpm, void *entry, uint32_t *pos)
-{
- uint32_t i;
-
- for (i = 0; i < RTE_TABLE_LPM_MAX_NEXT_HOPS; i++) {
- uint8_t *nht_entry = &lpm->nht[i * lpm->entry_size];
-
- if ((lpm->nht_users[i] > 0) && (memcmp(nht_entry, entry,
- lpm->entry_unique_size) == 0)) {
- *pos = i;
- return 1;
- }
- }
-
- return 0;
-}
-
-static int
-rte_table_lpm_entry_add(
- void *table,
- void *key,
- void *entry,
- int *key_found,
- void **entry_ptr)
-{
- struct rte_table_lpm *lpm = (struct rte_table_lpm *) table;
- struct rte_table_lpm_key *ip_prefix = (struct rte_table_lpm_key *) key;
- uint32_t nht_pos, nht_pos0_valid;
- int status;
- uint8_t nht_pos0 = 0;
-
- /* Check input parameters */
- if (lpm == NULL) {
- RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
- return -EINVAL;
- }
- if (ip_prefix == NULL) {
- RTE_LOG(ERR, TABLE, "%s: ip_prefix parameter is NULL\n",
- __func__);
- return -EINVAL;
- }
- if (entry == NULL) {
- RTE_LOG(ERR, TABLE, "%s: entry parameter is NULL\n", __func__);
- return -EINVAL;
- }
-
- if ((ip_prefix->depth == 0) || (ip_prefix->depth > 32)) {
- RTE_LOG(ERR, TABLE, "%s: invalid depth (%d)\n",
- __func__, ip_prefix->depth);
- return -EINVAL;
- }
-
- /* Check if rule is already present in the table */
- status = rte_lpm_is_rule_present(lpm->lpm, ip_prefix->ip,
- ip_prefix->depth, &nht_pos0);
- nht_pos0_valid = status > 0;
-
- /* Find existing or free NHT entry */
- if (nht_find_existing(lpm, entry, &nht_pos) == 0) {
- uint8_t *nht_entry;
-
- if (nht_find_free(lpm, &nht_pos) == 0) {
- RTE_LOG(ERR, TABLE, "%s: NHT full\n", __func__);
- return -1;
- }
-
- nht_entry = &lpm->nht[nht_pos * lpm->entry_size];
- memcpy(nht_entry, entry, lpm->entry_size);
- }
-
- /* Add rule to low level LPM table */
- if (rte_lpm_add(lpm->lpm, ip_prefix->ip, ip_prefix->depth,
- (uint8_t) nht_pos) < 0) {
- RTE_LOG(ERR, TABLE, "%s: LPM rule add failed\n", __func__);
- return -1;
- }
-
- /* Commit NHT changes */
- lpm->nht_users[nht_pos]++;
- lpm->nht_users[nht_pos0] -= nht_pos0_valid;
-
- *key_found = nht_pos0_valid;
- *entry_ptr = (void *) &lpm->nht[nht_pos * lpm->entry_size];
- return 0;
-}
-
-static int
-rte_table_lpm_entry_delete(
- void *table,
- void *key,
- int *key_found,
- void *entry)
-{
- struct rte_table_lpm *lpm = (struct rte_table_lpm *) table;
- struct rte_table_lpm_key *ip_prefix = (struct rte_table_lpm_key *) key;
- uint8_t nht_pos;
- int status;
-
- /* Check input parameters */
- if (lpm == NULL) {
- RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
- return -EINVAL;
- }
- if (ip_prefix == NULL) {
- RTE_LOG(ERR, TABLE, "%s: ip_prefix parameter is NULL\n",
- __func__);
- return -EINVAL;
- }
- if ((ip_prefix->depth == 0) || (ip_prefix->depth > 32)) {
- RTE_LOG(ERR, TABLE, "%s: invalid depth (%d)\n", __func__,
- ip_prefix->depth);
- return -EINVAL;
- }
-
- /* Return if rule is not present in the table */
- status = rte_lpm_is_rule_present(lpm->lpm, ip_prefix->ip,
- ip_prefix->depth, &nht_pos);
- if (status < 0) {
- RTE_LOG(ERR, TABLE, "%s: LPM algorithmic error\n", __func__);
- return -1;
- }
- if (status == 0) {
- *key_found = 0;
- return 0;
- }
-
- /* Delete rule from the low-level LPM table */
- status = rte_lpm_delete(lpm->lpm, ip_prefix->ip, ip_prefix->depth);
- if (status) {
- RTE_LOG(ERR, TABLE, "%s: LPM rule delete failed\n", __func__);
- return -1;
- }
-
- /* Commit NHT changes */
- lpm->nht_users[nht_pos]--;
-
- *key_found = 1;
- if (entry)
- memcpy(entry, &lpm->nht[nht_pos * lpm->entry_size],
- lpm->entry_size);
-
- return 0;
-}
-
-static int
-rte_table_lpm_lookup(
- void *table,
- struct rte_mbuf **pkts,
- uint64_t pkts_mask,
- uint64_t *lookup_hit_mask,
- void **entries)
-{
- struct rte_table_lpm *lpm = (struct rte_table_lpm *) table;
- uint64_t pkts_out_mask = 0;
- uint32_t i;
-
- pkts_out_mask = 0;
- for (i = 0; i < (uint32_t)(RTE_PORT_IN_BURST_SIZE_MAX -
- __builtin_clzll(pkts_mask)); i++) {
- uint64_t pkt_mask = 1LLU << i;
-
- if (pkt_mask & pkts_mask) {
- struct rte_mbuf *pkt = pkts[i];
- uint32_t ip = rte_bswap32(
- RTE_MBUF_METADATA_UINT32(pkt, lpm->offset));
- int status;
- uint8_t nht_pos;
-
- status = rte_lpm_lookup(lpm->lpm, ip, &nht_pos);
- if (status == 0) {
- pkts_out_mask |= pkt_mask;
- entries[i] = (void *) &lpm->nht[nht_pos *
- lpm->entry_size];
- }
- }
- }
-
- *lookup_hit_mask = pkts_out_mask;
-
- return 0;
-}
-
-struct rte_table_ops rte_table_lpm_ops = {
- .f_create = rte_table_lpm_create,
- .f_free = rte_table_lpm_free,
- .f_add = rte_table_lpm_entry_add,
- .f_delete = rte_table_lpm_entry_delete,
- .f_lookup = rte_table_lpm_lookup,
-};
diff --git a/src/dpdk_lib18/librte_table/rte_table_lpm_ipv6.c b/src/dpdk_lib18/librte_table/rte_table_lpm_ipv6.c
deleted file mode 100755
index ce4ddc0b..00000000
--- a/src/dpdk_lib18/librte_table/rte_table_lpm_ipv6.c
+++ /dev/null
@@ -1,362 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <string.h>
-#include <stdio.h>
-
-#include <rte_common.h>
-#include <rte_mbuf.h>
-#include <rte_memory.h>
-#include <rte_malloc.h>
-#include <rte_byteorder.h>
-#include <rte_log.h>
-#include <rte_lpm6.h>
-
-#include "rte_table_lpm_ipv6.h"
-
-#define RTE_TABLE_LPM_MAX_NEXT_HOPS 256
-
-struct rte_table_lpm_ipv6 {
- /* Input parameters */
- uint32_t entry_size;
- uint32_t entry_unique_size;
- uint32_t n_rules;
- uint32_t offset;
-
- /* Handle to low-level LPM table */
- struct rte_lpm6 *lpm;
-
- /* Next Hop Table (NHT) */
- uint32_t nht_users[RTE_TABLE_LPM_MAX_NEXT_HOPS];
- uint8_t nht[0] __rte_cache_aligned;
-};
-
-static void *
-rte_table_lpm_ipv6_create(void *params, int socket_id, uint32_t entry_size)
-{
- struct rte_table_lpm_ipv6_params *p =
- (struct rte_table_lpm_ipv6_params *) params;
- struct rte_table_lpm_ipv6 *lpm;
- struct rte_lpm6_config lpm6_config;
- uint32_t total_size, nht_size;
-
- /* Check input parameters */
- if (p == NULL) {
- RTE_LOG(ERR, TABLE, "%s: NULL input parameters\n", __func__);
- return NULL;
- }
- if (p->n_rules == 0) {
- RTE_LOG(ERR, TABLE, "%s: Invalid n_rules\n", __func__);
- return NULL;
- }
- if (p->number_tbl8s == 0) {
- RTE_LOG(ERR, TABLE, "%s: Invalid n_rules\n", __func__);
- return NULL;
- }
- if (p->entry_unique_size == 0) {
- RTE_LOG(ERR, TABLE, "%s: Invalid entry_unique_size\n",
- __func__);
- return NULL;
- }
- if (p->entry_unique_size > entry_size) {
- RTE_LOG(ERR, TABLE, "%s: Invalid entry_unique_size\n",
- __func__);
- return NULL;
- }
- if ((p->offset & 0x3) != 0) {
- RTE_LOG(ERR, TABLE, "%s: Invalid offset\n", __func__);
- return NULL;
- }
-
- entry_size = RTE_ALIGN(entry_size, sizeof(uint64_t));
-
- /* Memory allocation */
- nht_size = RTE_TABLE_LPM_MAX_NEXT_HOPS * entry_size;
- total_size = sizeof(struct rte_table_lpm_ipv6) + nht_size;
- lpm = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE,
- socket_id);
- if (lpm == NULL) {
- RTE_LOG(ERR, TABLE,
- "%s: Cannot allocate %u bytes for LPM IPv6 table\n",
- __func__, total_size);
- return NULL;
- }
-
- /* LPM low-level table creation */
- lpm6_config.max_rules = p->n_rules;
- lpm6_config.number_tbl8s = p->number_tbl8s;
- lpm6_config.flags = 0;
- lpm->lpm = rte_lpm6_create("LPM IPv6", socket_id, &lpm6_config);
- if (lpm->lpm == NULL) {
- rte_free(lpm);
- RTE_LOG(ERR, TABLE,
- "Unable to create low-level LPM IPv6 table\n");
- return NULL;
- }
-
- /* Memory initialization */
- lpm->entry_size = entry_size;
- lpm->entry_unique_size = p->entry_unique_size;
- lpm->n_rules = p->n_rules;
- lpm->offset = p->offset;
-
- return lpm;
-}
-
-static int
-rte_table_lpm_ipv6_free(void *table)
-{
- struct rte_table_lpm_ipv6 *lpm = (struct rte_table_lpm_ipv6 *) table;
-
- /* Check input parameters */
- if (lpm == NULL) {
- RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
- return -EINVAL;
- }
-
- /* Free previously allocated resources */
- rte_lpm6_free(lpm->lpm);
- rte_free(lpm);
-
- return 0;
-}
-
-static int
-nht_find_free(struct rte_table_lpm_ipv6 *lpm, uint32_t *pos)
-{
- uint32_t i;
-
- for (i = 0; i < RTE_TABLE_LPM_MAX_NEXT_HOPS; i++) {
- if (lpm->nht_users[i] == 0) {
- *pos = i;
- return 1;
- }
- }
-
- return 0;
-}
-
-static int
-nht_find_existing(struct rte_table_lpm_ipv6 *lpm, void *entry, uint32_t *pos)
-{
- uint32_t i;
-
- for (i = 0; i < RTE_TABLE_LPM_MAX_NEXT_HOPS; i++) {
- uint8_t *nht_entry = &lpm->nht[i * lpm->entry_size];
-
- if ((lpm->nht_users[i] > 0) && (memcmp(nht_entry, entry,
- lpm->entry_unique_size) == 0)) {
- *pos = i;
- return 1;
- }
- }
-
- return 0;
-}
-
-static int
-rte_table_lpm_ipv6_entry_add(
- void *table,
- void *key,
- void *entry,
- int *key_found,
- void **entry_ptr)
-{
- struct rte_table_lpm_ipv6 *lpm = (struct rte_table_lpm_ipv6 *) table;
- struct rte_table_lpm_ipv6_key *ip_prefix =
- (struct rte_table_lpm_ipv6_key *) key;
- uint32_t nht_pos, nht_pos0_valid;
- int status;
- uint8_t nht_pos0;
-
- /* Check input parameters */
- if (lpm == NULL) {
- RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
- return -EINVAL;
- }
- if (ip_prefix == NULL) {
- RTE_LOG(ERR, TABLE, "%s: ip_prefix parameter is NULL\n",
- __func__);
- return -EINVAL;
- }
- if (entry == NULL) {
- RTE_LOG(ERR, TABLE, "%s: entry parameter is NULL\n", __func__);
- return -EINVAL;
- }
-
- if ((ip_prefix->depth == 0) || (ip_prefix->depth > 128)) {
- RTE_LOG(ERR, TABLE, "%s: invalid depth (%d)\n", __func__,
- ip_prefix->depth);
- return -EINVAL;
- }
-
- /* Check if rule is already present in the table */
- status = rte_lpm6_is_rule_present(lpm->lpm, ip_prefix->ip,
- ip_prefix->depth, &nht_pos0);
- nht_pos0_valid = status > 0;
-
- /* Find existing or free NHT entry */
- if (nht_find_existing(lpm, entry, &nht_pos) == 0) {
- uint8_t *nht_entry;
-
- if (nht_find_free(lpm, &nht_pos) == 0) {
- RTE_LOG(ERR, TABLE, "%s: NHT full\n", __func__);
- return -1;
- }
-
- nht_entry = &lpm->nht[nht_pos * lpm->entry_size];
- memcpy(nht_entry, entry, lpm->entry_size);
- }
-
- /* Add rule to low level LPM table */
- if (rte_lpm6_add(lpm->lpm, ip_prefix->ip, ip_prefix->depth,
- (uint8_t) nht_pos) < 0) {
- RTE_LOG(ERR, TABLE, "%s: LPM IPv6 rule add failed\n", __func__);
- return -1;
- }
-
- /* Commit NHT changes */
- lpm->nht_users[nht_pos]++;
- lpm->nht_users[nht_pos0] -= nht_pos0_valid;
-
- *key_found = nht_pos0_valid;
- *entry_ptr = (void *) &lpm->nht[nht_pos * lpm->entry_size];
- return 0;
-}
-
-static int
-rte_table_lpm_ipv6_entry_delete(
- void *table,
- void *key,
- int *key_found,
- void *entry)
-{
- struct rte_table_lpm_ipv6 *lpm = (struct rte_table_lpm_ipv6 *) table;
- struct rte_table_lpm_ipv6_key *ip_prefix =
- (struct rte_table_lpm_ipv6_key *) key;
- uint8_t nht_pos;
- int status;
-
- /* Check input parameters */
- if (lpm == NULL) {
- RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
- return -EINVAL;
- }
- if (ip_prefix == NULL) {
- RTE_LOG(ERR, TABLE, "%s: ip_prefix parameter is NULL\n",
- __func__);
- return -EINVAL;
- }
- if ((ip_prefix->depth == 0) || (ip_prefix->depth > 128)) {
- RTE_LOG(ERR, TABLE, "%s: invalid depth (%d)\n", __func__,
- ip_prefix->depth);
- return -EINVAL;
- }
-
- /* Return if rule is not present in the table */
- status = rte_lpm6_is_rule_present(lpm->lpm, ip_prefix->ip,
- ip_prefix->depth, &nht_pos);
- if (status < 0) {
- RTE_LOG(ERR, TABLE, "%s: LPM IPv6 algorithmic error\n",
- __func__);
- return -1;
- }
- if (status == 0) {
- *key_found = 0;
- return 0;
- }
-
- /* Delete rule from the low-level LPM table */
- status = rte_lpm6_delete(lpm->lpm, ip_prefix->ip, ip_prefix->depth);
- if (status) {
- RTE_LOG(ERR, TABLE, "%s: LPM IPv6 rule delete failed\n",
- __func__);
- return -1;
- }
-
- /* Commit NHT changes */
- lpm->nht_users[nht_pos]--;
-
- *key_found = 1;
- if (entry)
- memcpy(entry, &lpm->nht[nht_pos * lpm->entry_size],
- lpm->entry_size);
-
- return 0;
-}
-
-static int
-rte_table_lpm_ipv6_lookup(
- void *table,
- struct rte_mbuf **pkts,
- uint64_t pkts_mask,
- uint64_t *lookup_hit_mask,
- void **entries)
-{
- struct rte_table_lpm_ipv6 *lpm = (struct rte_table_lpm_ipv6 *) table;
- uint64_t pkts_out_mask = 0;
- uint32_t i;
-
- pkts_out_mask = 0;
- for (i = 0; i < (uint32_t)(RTE_PORT_IN_BURST_SIZE_MAX -
- __builtin_clzll(pkts_mask)); i++) {
- uint64_t pkt_mask = 1LLU << i;
-
- if (pkt_mask & pkts_mask) {
- struct rte_mbuf *pkt = pkts[i];
- uint8_t *ip = RTE_MBUF_METADATA_UINT8_PTR(pkt,
- lpm->offset);
- int status;
- uint8_t nht_pos;
-
- status = rte_lpm6_lookup(lpm->lpm, ip, &nht_pos);
- if (status == 0) {
- pkts_out_mask |= pkt_mask;
- entries[i] = (void *) &lpm->nht[nht_pos *
- lpm->entry_size];
- }
- }
- }
-
- *lookup_hit_mask = pkts_out_mask;
-
- return 0;
-}
-
-struct rte_table_ops rte_table_lpm_ipv6_ops = {
- .f_create = rte_table_lpm_ipv6_create,
- .f_free = rte_table_lpm_ipv6_free,
- .f_add = rte_table_lpm_ipv6_entry_add,
- .f_delete = rte_table_lpm_ipv6_entry_delete,
- .f_lookup = rte_table_lpm_ipv6_lookup,
-};
diff --git a/src/dpdk_lib18/librte_table/rte_table_stub.c b/src/dpdk_lib18/librte_table/rte_table_stub.c
deleted file mode 100755
index 876b7e49..00000000
--- a/src/dpdk_lib18/librte_table/rte_table_stub.c
+++ /dev/null
@@ -1,65 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <rte_mbuf.h>
-
-#include "rte_table_stub.h"
-
-static void *
-rte_table_stub_create(__rte_unused void *params,
- __rte_unused int socket_id,
- __rte_unused uint32_t entry_size)
-{
- return (void *) 1;
-}
-
-static int
-rte_table_stub_lookup(
- __rte_unused void *table,
- __rte_unused struct rte_mbuf **pkts,
- __rte_unused uint64_t pkts_mask,
- uint64_t *lookup_hit_mask,
- __rte_unused void **entries)
-{
- *lookup_hit_mask = 0;
-
- return 0;
-}
-
-struct rte_table_ops rte_table_stub_ops = {
- .f_create = rte_table_stub_create,
- .f_free = NULL,
- .f_add = NULL,
- .f_delete = NULL,
- .f_lookup = rte_table_stub_lookup,
-};
diff --git a/src/dpdk_lib18/librte_timer/Makefile b/src/dpdk_lib18/librte_timer/Makefile
deleted file mode 100755
index 07eb0c63..00000000
--- a/src/dpdk_lib18/librte_timer/Makefile
+++ /dev/null
@@ -1,48 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_timer.a
-
-CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
-
-# all source are stored in SRCS-y
-SRCS-$(CONFIG_RTE_LIBRTE_TIMER) := rte_timer.c
-
-# install this header file
-SYMLINK-$(CONFIG_RTE_LIBRTE_TIMER)-include := rte_timer.h
-
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_TIMER) += lib/librte_eal
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_timer/rte_timer.c b/src/dpdk_lib18/librte_timer/rte_timer.c
deleted file mode 100755
index 269a992b..00000000
--- a/src/dpdk_lib18/librte_timer/rte_timer.c
+++ /dev/null
@@ -1,610 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <string.h>
-#include <stdio.h>
-#include <stdint.h>
-#include <inttypes.h>
-
-#include <rte_atomic.h>
-#include <rte_common.h>
-#include <rte_cycles.h>
-#include <rte_per_lcore.h>
-#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_launch.h>
-#include <rte_tailq.h>
-#include <rte_eal.h>
-#include <rte_per_lcore.h>
-#include <rte_lcore.h>
-#include <rte_branch_prediction.h>
-#include <rte_spinlock.h>
-#include <rte_random.h>
-
-#include "rte_timer.h"
-
-LIST_HEAD(rte_timer_list, rte_timer);
-
-struct priv_timer {
- struct rte_timer pending_head; /**< dummy timer instance to head up list */
- rte_spinlock_t list_lock; /**< lock to protect list access */
-
- /** per-core variable that true if a timer was updated on this
- * core since last reset of the variable */
- int updated;
-
- /** track the current depth of the skiplist */
- unsigned curr_skiplist_depth;
-
- unsigned prev_lcore; /**< used for lcore round robin */
-
-#ifdef RTE_LIBRTE_TIMER_DEBUG
- /** per-lcore statistics */
- struct rte_timer_debug_stats stats;
-#endif
-} __rte_cache_aligned;
-
-/** per-lcore private info for timers */
-static struct priv_timer priv_timer[RTE_MAX_LCORE];
-
-/* when debug is enabled, store some statistics */
-#ifdef RTE_LIBRTE_TIMER_DEBUG
-#define __TIMER_STAT_ADD(name, n) do { \
- unsigned __lcore_id = rte_lcore_id(); \
- priv_timer[__lcore_id].stats.name += (n); \
- } while(0)
-#else
-#define __TIMER_STAT_ADD(name, n) do {} while(0)
-#endif
-
-/* Init the timer library. */
-void
-rte_timer_subsystem_init(void)
-{
- unsigned lcore_id;
-
- /* since priv_timer is static, it's zeroed by default, so only init some
- * fields.
- */
- for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++) {
- rte_spinlock_init(&priv_timer[lcore_id].list_lock);
- priv_timer[lcore_id].prev_lcore = lcore_id;
- }
-}
-
-/* Initialize the timer handle tim for use */
-void
-rte_timer_init(struct rte_timer *tim)
-{
- union rte_timer_status status;
-
- status.state = RTE_TIMER_STOP;
- status.owner = RTE_TIMER_NO_OWNER;
- tim->status.u32 = status.u32;
-}
-
-/*
- * if timer is pending or stopped (or running on the same core than
- * us), mark timer as configuring, and on success return the previous
- * status of the timer
- */
-static int
-timer_set_config_state(struct rte_timer *tim,
- union rte_timer_status *ret_prev_status)
-{
- union rte_timer_status prev_status, status;
- int success = 0;
- unsigned lcore_id;
-
- lcore_id = rte_lcore_id();
-
- /* wait that the timer is in correct status before update,
- * and mark it as being configured */
- while (success == 0) {
- prev_status.u32 = tim->status.u32;
-
- /* timer is running on another core, exit */
- if (prev_status.state == RTE_TIMER_RUNNING &&
- (unsigned)prev_status.owner != lcore_id)
- return -1;
-
- /* timer is being configured on another core */
- if (prev_status.state == RTE_TIMER_CONFIG)
- return -1;
-
- /* here, we know that timer is stopped or pending,
- * mark it atomically as being configured */
- status.state = RTE_TIMER_CONFIG;
- status.owner = (int16_t)lcore_id;
- success = rte_atomic32_cmpset(&tim->status.u32,
- prev_status.u32,
- status.u32);
- }
-
- ret_prev_status->u32 = prev_status.u32;
- return 0;
-}
-
-/*
- * if timer is pending, mark timer as running
- */
-static int
-timer_set_running_state(struct rte_timer *tim)
-{
- union rte_timer_status prev_status, status;
- unsigned lcore_id = rte_lcore_id();
- int success = 0;
-
- /* wait that the timer is in correct status before update,
- * and mark it as running */
- while (success == 0) {
- prev_status.u32 = tim->status.u32;
-
- /* timer is not pending anymore */
- if (prev_status.state != RTE_TIMER_PENDING)
- return -1;
-
- /* here, we know that timer is stopped or pending,
- * mark it atomically as beeing configured */
- status.state = RTE_TIMER_RUNNING;
- status.owner = (int16_t)lcore_id;
- success = rte_atomic32_cmpset(&tim->status.u32,
- prev_status.u32,
- status.u32);
- }
-
- return 0;
-}
-
-/*
- * Return a skiplist level for a new entry.
- * This probabalistically gives a level with p=1/4 that an entry at level n
- * will also appear at level n+1.
- */
-static uint32_t
-timer_get_skiplist_level(unsigned curr_depth)
-{
-#ifdef RTE_LIBRTE_TIMER_DEBUG
- static uint32_t i, count = 0;
- static uint32_t levels[MAX_SKIPLIST_DEPTH] = {0};
-#endif
-
- /* probability value is 1/4, i.e. all at level 0, 1 in 4 is at level 1,
- * 1 in 16 at level 2, 1 in 64 at level 3, etc. Calculated using lowest
- * bit position of a (pseudo)random number.
- */
- uint32_t rand = rte_rand() & (UINT32_MAX - 1);
- uint32_t level = rand == 0 ? MAX_SKIPLIST_DEPTH : (rte_bsf32(rand)-1) / 2;
-
- /* limit the levels used to one above our current level, so we don't,
- * for instance, have a level 0 and a level 7 without anything between
- */
- if (level > curr_depth)
- level = curr_depth;
- if (level >= MAX_SKIPLIST_DEPTH)
- level = MAX_SKIPLIST_DEPTH-1;
-#ifdef RTE_LIBRTE_TIMER_DEBUG
- count ++;
- levels[level]++;
- if (count % 10000 == 0)
- for (i = 0; i < MAX_SKIPLIST_DEPTH; i++)
- printf("Level %u: %u\n", (unsigned)i, (unsigned)levels[i]);
-#endif
- return level;
-}
-
-/*
- * For a given time value, get the entries at each level which
- * are <= that time value.
- */
-static void
-timer_get_prev_entries(uint64_t time_val, unsigned tim_lcore,
- struct rte_timer **prev)
-{
- unsigned lvl = priv_timer[tim_lcore].curr_skiplist_depth;
- prev[lvl] = &priv_timer[tim_lcore].pending_head;
- while(lvl != 0) {
- lvl--;
- prev[lvl] = prev[lvl+1];
- while (prev[lvl]->sl_next[lvl] &&
- prev[lvl]->sl_next[lvl]->expire <= time_val)
- prev[lvl] = prev[lvl]->sl_next[lvl];
- }
-}
-
-/*
- * Given a timer node in the skiplist, find the previous entries for it at
- * all skiplist levels.
- */
-static void
-timer_get_prev_entries_for_node(struct rte_timer *tim, unsigned tim_lcore,
- struct rte_timer **prev)
-{
- int i;
- /* to get a specific entry in the list, look for just lower than the time
- * values, and then increment on each level individually if necessary
- */
- timer_get_prev_entries(tim->expire - 1, tim_lcore, prev);
- for (i = priv_timer[tim_lcore].curr_skiplist_depth - 1; i >= 0; i--) {
- while (prev[i]->sl_next[i] != NULL &&
- prev[i]->sl_next[i] != tim &&
- prev[i]->sl_next[i]->expire <= tim->expire)
- prev[i] = prev[i]->sl_next[i];
- }
-}
-
-/*
- * add in list, lock if needed
- * timer must be in config state
- * timer must not be in a list
- */
-static void
-timer_add(struct rte_timer *tim, unsigned tim_lcore, int local_is_locked)
-{
- unsigned lcore_id = rte_lcore_id();
- unsigned lvl;
- struct rte_timer *prev[MAX_SKIPLIST_DEPTH+1];
-
- /* if timer needs to be scheduled on another core, we need to
- * lock the list; if it is on local core, we need to lock if
- * we are not called from rte_timer_manage() */
- if (tim_lcore != lcore_id || !local_is_locked)
- rte_spinlock_lock(&priv_timer[tim_lcore].list_lock);
-
- /* find where exactly this element goes in the list of elements
- * for each depth. */
- timer_get_prev_entries(tim->expire, tim_lcore, prev);
-
- /* now assign it a new level and add at that level */
- const unsigned tim_level = timer_get_skiplist_level(
- priv_timer[tim_lcore].curr_skiplist_depth);
- if (tim_level == priv_timer[tim_lcore].curr_skiplist_depth)
- priv_timer[tim_lcore].curr_skiplist_depth++;
-
- lvl = tim_level;
- while (lvl > 0) {
- tim->sl_next[lvl] = prev[lvl]->sl_next[lvl];
- prev[lvl]->sl_next[lvl] = tim;
- lvl--;
- }
- tim->sl_next[0] = prev[0]->sl_next[0];
- prev[0]->sl_next[0] = tim;
-
- /* save the lowest list entry into the expire field of the dummy hdr
- * NOTE: this is not atomic on 32-bit*/
- priv_timer[tim_lcore].pending_head.expire = priv_timer[tim_lcore].\
- pending_head.sl_next[0]->expire;
-
- if (tim_lcore != lcore_id || !local_is_locked)
- rte_spinlock_unlock(&priv_timer[tim_lcore].list_lock);
-}
-
-/*
- * del from list, lock if needed
- * timer must be in config state
- * timer must be in a list
- */
-static void
-timer_del(struct rte_timer *tim, union rte_timer_status prev_status,
- int local_is_locked)
-{
- unsigned lcore_id = rte_lcore_id();
- unsigned prev_owner = prev_status.owner;
- int i;
- struct rte_timer *prev[MAX_SKIPLIST_DEPTH+1];
-
- /* if timer needs is pending another core, we need to lock the
- * list; if it is on local core, we need to lock if we are not
- * called from rte_timer_manage() */
- if (prev_owner != lcore_id || !local_is_locked)
- rte_spinlock_lock(&priv_timer[prev_owner].list_lock);
-
- /* save the lowest list entry into the expire field of the dummy hdr.
- * NOTE: this is not atomic on 32-bit */
- if (tim == priv_timer[prev_owner].pending_head.sl_next[0])
- priv_timer[prev_owner].pending_head.expire =
- ((tim->sl_next[0] == NULL) ? 0 : tim->sl_next[0]->expire);
-
- /* adjust pointers from previous entries to point past this */
- timer_get_prev_entries_for_node(tim, prev_owner, prev);
- for (i = priv_timer[prev_owner].curr_skiplist_depth - 1; i >= 0; i--) {
- if (prev[i]->sl_next[i] == tim)
- prev[i]->sl_next[i] = tim->sl_next[i];
- }
-
- /* in case we deleted last entry at a level, adjust down max level */
- for (i = priv_timer[prev_owner].curr_skiplist_depth - 1; i >= 0; i--)
- if (priv_timer[prev_owner].pending_head.sl_next[i] == NULL)
- priv_timer[prev_owner].curr_skiplist_depth --;
- else
- break;
-
- if (prev_owner != lcore_id || !local_is_locked)
- rte_spinlock_unlock(&priv_timer[prev_owner].list_lock);
-}
-
-/* Reset and start the timer associated with the timer handle (private func) */
-static int
-__rte_timer_reset(struct rte_timer *tim, uint64_t expire,
- uint64_t period, unsigned tim_lcore,
- rte_timer_cb_t fct, void *arg,
- int local_is_locked)
-{
- union rte_timer_status prev_status, status;
- int ret;
- unsigned lcore_id = rte_lcore_id();
-
- /* round robin for tim_lcore */
- if (tim_lcore == (unsigned)LCORE_ID_ANY) {
- tim_lcore = rte_get_next_lcore(priv_timer[lcore_id].prev_lcore,
- 0, 1);
- priv_timer[lcore_id].prev_lcore = tim_lcore;
- }
-
- /* wait that the timer is in correct status before update,
- * and mark it as being configured */
- ret = timer_set_config_state(tim, &prev_status);
- if (ret < 0)
- return -1;
-
- __TIMER_STAT_ADD(reset, 1);
- if (prev_status.state == RTE_TIMER_RUNNING) {
- priv_timer[lcore_id].updated = 1;
- }
-
- /* remove it from list */
- if (prev_status.state == RTE_TIMER_PENDING) {
- timer_del(tim, prev_status, local_is_locked);
- __TIMER_STAT_ADD(pending, -1);
- }
-
- tim->period = period;
- tim->expire = expire;
- tim->f = fct;
- tim->arg = arg;
-
- __TIMER_STAT_ADD(pending, 1);
- timer_add(tim, tim_lcore, local_is_locked);
-
- /* update state: as we are in CONFIG state, only us can modify
- * the state so we don't need to use cmpset() here */
- rte_wmb();
- status.state = RTE_TIMER_PENDING;
- status.owner = (int16_t)tim_lcore;
- tim->status.u32 = status.u32;
-
- return 0;
-}
-
-/* Reset and start the timer associated with the timer handle tim */
-int
-rte_timer_reset(struct rte_timer *tim, uint64_t ticks,
- enum rte_timer_type type, unsigned tim_lcore,
- rte_timer_cb_t fct, void *arg)
-{
- uint64_t cur_time = rte_get_timer_cycles();
- uint64_t period;
-
- if (unlikely((tim_lcore != (unsigned)LCORE_ID_ANY) &&
- !rte_lcore_is_enabled(tim_lcore)))
- return -1;
-
- if (type == PERIODICAL)
- period = ticks;
- else
- period = 0;
-
- __rte_timer_reset(tim, cur_time + ticks, period, tim_lcore,
- fct, arg, 0);
-
- return 0;
-}
-
-/* loop until rte_timer_reset() succeed */
-void
-rte_timer_reset_sync(struct rte_timer *tim, uint64_t ticks,
- enum rte_timer_type type, unsigned tim_lcore,
- rte_timer_cb_t fct, void *arg)
-{
- while (rte_timer_reset(tim, ticks, type, tim_lcore,
- fct, arg) != 0);
-}
-
-/* Stop the timer associated with the timer handle tim */
-int
-rte_timer_stop(struct rte_timer *tim)
-{
- union rte_timer_status prev_status, status;
- unsigned lcore_id = rte_lcore_id();
- int ret;
-
- /* wait that the timer is in correct status before update,
- * and mark it as being configured */
- ret = timer_set_config_state(tim, &prev_status);
- if (ret < 0)
- return -1;
-
- __TIMER_STAT_ADD(stop, 1);
- if (prev_status.state == RTE_TIMER_RUNNING) {
- priv_timer[lcore_id].updated = 1;
- }
-
- /* remove it from list */
- if (prev_status.state == RTE_TIMER_PENDING) {
- timer_del(tim, prev_status, 0);
- __TIMER_STAT_ADD(pending, -1);
- }
-
- /* mark timer as stopped */
- rte_wmb();
- status.state = RTE_TIMER_STOP;
- status.owner = RTE_TIMER_NO_OWNER;
- tim->status.u32 = status.u32;
-
- return 0;
-}
-
-/* loop until rte_timer_stop() succeed */
-void
-rte_timer_stop_sync(struct rte_timer *tim)
-{
- while (rte_timer_stop(tim) != 0)
- rte_pause();
-}
-
-/* Test the PENDING status of the timer handle tim */
-int
-rte_timer_pending(struct rte_timer *tim)
-{
- return tim->status.state == RTE_TIMER_PENDING;
-}
-
-/* must be called periodically, run all timer that expired */
-void rte_timer_manage(void)
-{
- union rte_timer_status status;
- struct rte_timer *tim, *next_tim;
- unsigned lcore_id = rte_lcore_id();
- struct rte_timer *prev[MAX_SKIPLIST_DEPTH + 1];
- uint64_t cur_time;
- int i, ret;
-
- __TIMER_STAT_ADD(manage, 1);
- /* optimize for the case where per-cpu list is empty */
- if (priv_timer[lcore_id].pending_head.sl_next[0] == NULL)
- return;
- cur_time = rte_get_timer_cycles();
-
-#ifdef RTE_ARCH_X86_64
- /* on 64-bit the value cached in the pending_head.expired will be updated
- * atomically, so we can consult that for a quick check here outside the
- * lock */
- if (likely(priv_timer[lcore_id].pending_head.expire > cur_time))
- return;
-#endif
-
- /* browse ordered list, add expired timers in 'expired' list */
- rte_spinlock_lock(&priv_timer[lcore_id].list_lock);
-
- /* if nothing to do just unlock and return */
- if (priv_timer[lcore_id].pending_head.sl_next[0] == NULL ||
- priv_timer[lcore_id].pending_head.sl_next[0]->expire > cur_time)
- goto done;
-
- /* save start of list of expired timers */
- tim = priv_timer[lcore_id].pending_head.sl_next[0];
-
- /* break the existing list at current time point */
- timer_get_prev_entries(cur_time, lcore_id, prev);
- for (i = priv_timer[lcore_id].curr_skiplist_depth -1; i >= 0; i--) {
- priv_timer[lcore_id].pending_head.sl_next[i] = prev[i]->sl_next[i];
- if (prev[i]->sl_next[i] == NULL)
- priv_timer[lcore_id].curr_skiplist_depth--;
- prev[i] ->sl_next[i] = NULL;
- }
-
- /* now scan expired list and call callbacks */
- for ( ; tim != NULL; tim = next_tim) {
- next_tim = tim->sl_next[0];
-
- ret = timer_set_running_state(tim);
-
- /* this timer was not pending, continue */
- if (ret < 0)
- continue;
-
- rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
-
- priv_timer[lcore_id].updated = 0;
-
- /* execute callback function with list unlocked */
- tim->f(tim, tim->arg);
-
- rte_spinlock_lock(&priv_timer[lcore_id].list_lock);
- __TIMER_STAT_ADD(pending, -1);
- /* the timer was stopped or reloaded by the callback
- * function, we have nothing to do here */
- if (priv_timer[lcore_id].updated == 1)
- continue;
-
- if (tim->period == 0) {
- /* remove from done list and mark timer as stopped */
- status.state = RTE_TIMER_STOP;
- status.owner = RTE_TIMER_NO_OWNER;
- rte_wmb();
- tim->status.u32 = status.u32;
- }
- else {
- /* keep it in list and mark timer as pending */
- status.state = RTE_TIMER_PENDING;
- __TIMER_STAT_ADD(pending, 1);
- status.owner = (int16_t)lcore_id;
- rte_wmb();
- tim->status.u32 = status.u32;
- __rte_timer_reset(tim, cur_time + tim->period,
- tim->period, lcore_id, tim->f, tim->arg, 1);
- }
- }
-
- /* update the next to expire timer value */
- priv_timer[lcore_id].pending_head.expire =
- (priv_timer[lcore_id].pending_head.sl_next[0] == NULL) ? 0 :
- priv_timer[lcore_id].pending_head.sl_next[0]->expire;
-done:
- /* job finished, unlock the list lock */
- rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
-}
-
-/* dump statistics about timers */
-void rte_timer_dump_stats(FILE *f)
-{
-#ifdef RTE_LIBRTE_TIMER_DEBUG
- struct rte_timer_debug_stats sum;
- unsigned lcore_id;
-
- memset(&sum, 0, sizeof(sum));
- for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
- sum.reset += priv_timer[lcore_id].stats.reset;
- sum.stop += priv_timer[lcore_id].stats.stop;
- sum.manage += priv_timer[lcore_id].stats.manage;
- sum.pending += priv_timer[lcore_id].stats.pending;
- }
- fprintf(f, "Timer statistics:\n");
- fprintf(f, " reset = %"PRIu64"\n", sum.reset);
- fprintf(f, " stop = %"PRIu64"\n", sum.stop);
- fprintf(f, " manage = %"PRIu64"\n", sum.manage);
- fprintf(f, " pending = %"PRIu64"\n", sum.pending);
-#else
- fprintf(f, "No timer statistics, RTE_LIBRTE_TIMER_DEBUG is disabled\n");
-#endif
-}
diff --git a/src/dpdk_lib18/librte_timer/rte_timer.h b/src/dpdk_lib18/librte_timer/rte_timer.h
deleted file mode 100755
index 4907cf5a..00000000
--- a/src/dpdk_lib18/librte_timer/rte_timer.h
+++ /dev/null
@@ -1,335 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_TIMER_H_
-#define _RTE_TIMER_H_
-
-/**
- * @file
- RTE Timer
- *
- * This library provides a timer service to RTE Data Plane execution
- * units that allows the execution of callback functions asynchronously.
- *
- * - Timers can be periodic or single (one-shot).
- * - The timers can be loaded from one core and executed on another. This has
- * to be specified in the call to rte_timer_reset().
- * - High precision is possible. NOTE: this depends on the call frequency to
- * rte_timer_manage() that check the timer expiration for the local core.
- * - If not used in an application, for improved performance, it can be
- * disabled at compilation time by not calling the rte_timer_manage()
- * to improve performance.
- *
- * The timer library uses the rte_get_hpet_cycles() function that
- * uses the HPET, when available, to provide a reliable time reference. [HPET
- * routines are provided by EAL, which falls back to using the chip TSC (time-
- * stamp counter) as fallback when HPET is not available]
- *
- * This library provides an interface to add, delete and restart a
- * timer. The API is based on the BSD callout(9) API with a few
- * differences.
- *
- * See the RTE architecture documentation for more information about the
- * design of this library.
- */
-
-#include <stdio.h>
-#include <stdint.h>
-#include <stddef.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define RTE_TIMER_STOP 0 /**< State: timer is stopped. */
-#define RTE_TIMER_PENDING 1 /**< State: timer is scheduled. */
-#define RTE_TIMER_RUNNING 2 /**< State: timer function is running. */
-#define RTE_TIMER_CONFIG 3 /**< State: timer is being configured. */
-
-#define RTE_TIMER_NO_OWNER -1 /**< Timer has no owner. */
-
-/**
- * Timer type: Periodic or single (one-shot).
- */
-enum rte_timer_type {
- SINGLE,
- PERIODICAL
-};
-
-/**
- * Timer status: A union of the state (stopped, pending, running,
- * config) and an owner (the id of the lcore that owns the timer).
- */
-union rte_timer_status {
- struct {
- uint16_t state; /**< Stop, pending, running, config. */
- int16_t owner; /**< The lcore that owns the timer. */
- };
- uint32_t u32; /**< To atomic-set status + owner. */
-};
-
-#ifdef RTE_LIBRTE_TIMER_DEBUG
-/**
- * A structure that stores the timer statistics (per-lcore).
- */
-struct rte_timer_debug_stats {
- uint64_t reset; /**< Number of success calls to rte_timer_reset(). */
- uint64_t stop; /**< Number of success calls to rte_timer_stop(). */
- uint64_t manage; /**< Number of calls to rte_timer_manage(). */
- uint64_t pending; /**< Number of pending/running timers. */
-};
-#endif
-
-struct rte_timer;
-
-/**
- * Callback function type for timer expiry.
- */
-typedef void (rte_timer_cb_t)(struct rte_timer *, void *);
-
-#define MAX_SKIPLIST_DEPTH 10
-
-/**
- * A structure describing a timer in RTE.
- */
-struct rte_timer
-{
- uint64_t expire; /**< Time when timer expire. */
- struct rte_timer *sl_next[MAX_SKIPLIST_DEPTH];
- volatile union rte_timer_status status; /**< Status of timer. */
- uint64_t period; /**< Period of timer (0 if not periodic). */
- rte_timer_cb_t *f; /**< Callback function. */
- void *arg; /**< Argument to callback function. */
-};
-
-
-#ifdef __cplusplus
-/**
- * A C++ static initializer for a timer structure.
- */
-#define RTE_TIMER_INITIALIZER { \
- 0, \
- {NULL}, \
- {{RTE_TIMER_STOP, RTE_TIMER_NO_OWNER}}, \
- 0, \
- NULL, \
- NULL, \
- }
-#else
-/**
- * A static initializer for a timer structure.
- */
-#define RTE_TIMER_INITIALIZER { \
- .status = {{ \
- .state = RTE_TIMER_STOP, \
- .owner = RTE_TIMER_NO_OWNER, \
- }}, \
- }
-#endif
-
-/**
- * Initialize the timer library.
- *
- * Initializes internal variables (list, locks and so on) for the RTE
- * timer library.
- */
-void rte_timer_subsystem_init(void);
-
-/**
- * Initialize a timer handle.
- *
- * The rte_timer_init() function initializes the timer handle *tim*
- * for use. No operations can be performed on a timer before it is
- * initialized.
- *
- * @param tim
- * The timer to initialize.
- */
-void rte_timer_init(struct rte_timer *tim);
-
-/**
- * Reset and start the timer associated with the timer handle.
- *
- * The rte_timer_reset() function resets and starts the timer
- * associated with the timer handle *tim*. When the timer expires after
- * *ticks* HPET cycles, the function specified by *fct* will be called
- * with the argument *arg* on core *tim_lcore*.
- *
- * If the timer associated with the timer handle is already running
- * (in the RUNNING state), the function will fail. The user has to check
- * the return value of the function to see if there is a chance that the
- * timer is in the RUNNING state.
- *
- * If the timer is being configured on another core (the CONFIG state),
- * it will also fail.
- *
- * If the timer is pending or stopped, it will be rescheduled with the
- * new parameters.
- *
- * @param tim
- * The timer handle.
- * @param ticks
- * The number of cycles (see rte_get_hpet_hz()) before the callback
- * function is called.
- * @param type
- * The type can be either:
- * - PERIODICAL: The timer is automatically reloaded after execution
- * (returns to the PENDING state)
- * - SINGLE: The timer is one-shot, that is, the timer goes to a
- * STOPPED state after execution.
- * @param tim_lcore
- * The ID of the lcore where the timer callback function has to be
- * executed. If tim_lcore is LCORE_ID_ANY, the timer library will
- * launch it on a different core for each call (round-robin).
- * @param fct
- * The callback function of the timer.
- * @param arg
- * The user argument of the callback function.
- * @return
- * - 0: Success; the timer is scheduled.
- * - (-1): Timer is in the RUNNING or CONFIG state.
- */
-int rte_timer_reset(struct rte_timer *tim, uint64_t ticks,
- enum rte_timer_type type, unsigned tim_lcore,
- rte_timer_cb_t fct, void *arg);
-
-
-/**
- * Loop until rte_timer_reset() succeeds.
- *
- * Reset and start the timer associated with the timer handle. Always
- * succeed. See rte_timer_reset() for details.
- *
- * @param tim
- * The timer handle.
- * @param ticks
- * The number of cycles (see rte_get_hpet_hz()) before the callback
- * function is called.
- * @param type
- * The type can be either:
- * - PERIODICAL: The timer is automatically reloaded after execution
- * (returns to the PENDING state)
- * - SINGLE: The timer is one-shot, that is, the timer goes to a
- * STOPPED state after execution.
- * @param tim_lcore
- * The ID of the lcore where the timer callback function has to be
- * executed. If tim_lcore is LCORE_ID_ANY, the timer library will
- * launch it on a different core for each call (round-robin).
- * @param fct
- * The callback function of the timer.
- * @param arg
- * The user argument of the callback function.
- */
-void
-rte_timer_reset_sync(struct rte_timer *tim, uint64_t ticks,
- enum rte_timer_type type, unsigned tim_lcore,
- rte_timer_cb_t fct, void *arg);
-
-/**
- * Stop a timer.
- *
- * The rte_timer_stop() function stops the timer associated with the
- * timer handle *tim*. It may fail if the timer is currently running or
- * being configured.
- *
- * If the timer is pending or stopped (for instance, already expired),
- * the function will succeed. The timer handle tim must have been
- * initialized using rte_timer_init(), otherwise, undefined behavior
- * will occur.
- *
- * This function can be called safely from a timer callback. If it
- * succeeds, the timer is not referenced anymore by the timer library
- * and the timer structure can be freed (even in the callback
- * function).
- *
- * @param tim
- * The timer handle.
- * @return
- * - 0: Success; the timer is stopped.
- * - (-1): The timer is in the RUNNING or CONFIG state.
- */
-int rte_timer_stop(struct rte_timer *tim);
-
-
-/**
- * Loop until rte_timer_stop() succeeds.
- *
- * After a call to this function, the timer identified by *tim* is
- * stopped. See rte_timer_stop() for details.
- *
- * @param tim
- * The timer handle.
- */
-void rte_timer_stop_sync(struct rte_timer *tim);
-
-/**
- * Test if a timer is pending.
- *
- * The rte_timer_pending() function tests the PENDING status
- * of the timer handle *tim*. A PENDING timer is one that has been
- * scheduled and whose function has not yet been called.
- *
- * @param tim
- * The timer handle.
- * @return
- * - 0: The timer is not pending.
- * - 1: The timer is pending.
- */
-int rte_timer_pending(struct rte_timer *tim);
-
-/**
- * Manage the timer list and execute callback functions.
- *
- * This function must be called periodically from all cores
- * main_loop(). It browses the list of pending timers and runs all
- * timers that are expired.
- *
- * The precision of the timer depends on the call frequency of this
- * function. However, the more often the function is called, the more
- * CPU resources it will use.
- */
-void rte_timer_manage(void);
-
-/**
- * Dump statistics about timers.
- *
- * @param f
- * A pointer to a file for output
- */
-void rte_timer_dump_stats(FILE *f);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_TIMER_H_ */
diff --git a/src/dpdk_lib18/librte_vhost/Makefile b/src/dpdk_lib18/librte_vhost/Makefile
deleted file mode 100755
index c008d64a..00000000
--- a/src/dpdk_lib18/librte_vhost/Makefile
+++ /dev/null
@@ -1,50 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_vhost.a
-
-CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3 -D_FILE_OFFSET_BITS=64 -lfuse
-LDFLAGS += -lfuse
-# all source are stored in SRCS-y
-SRCS-$(CONFIG_RTE_LIBRTE_VHOST) := vhost-net-cdev.c virtio-net.c vhost_rxtx.c
-
-# install includes
-SYMLINK-$(CONFIG_RTE_LIBRTE_VHOST)-include += rte_virtio_net.h
-
-# dependencies
-DEPDIRS-$(CONFIG_RTE_LIBRTE_VHOST) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_VHOST) += lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_VHOST) += lib/librte_mbuf
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_vhost/eventfd_link/Makefile b/src/dpdk_lib18/librte_vhost/eventfd_link/Makefile
deleted file mode 100755
index fc3927bf..00000000
--- a/src/dpdk_lib18/librte_vhost/eventfd_link/Makefile
+++ /dev/null
@@ -1,39 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-obj-m += eventfd_link.o
-
-
-all:
- make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
-
-clean:
- make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
diff --git a/src/dpdk_lib18/librte_vhost/eventfd_link/eventfd_link.c b/src/dpdk_lib18/librte_vhost/eventfd_link/eventfd_link.c
deleted file mode 100755
index 7755dd67..00000000
--- a/src/dpdk_lib18/librte_vhost/eventfd_link/eventfd_link.c
+++ /dev/null
@@ -1,195 +0,0 @@
-/*-
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Corporation
- */
-
-#include <linux/eventfd.h>
-#include <linux/miscdevice.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/rcupdate.h>
-#include <linux/file.h>
-#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/mmu_context.h>
-#include <linux/sched.h>
-#include <asm/mmu_context.h>
-#include <linux/fdtable.h>
-
-#include "eventfd_link.h"
-
-
-/*
- * get_files_struct is copied from fs/file.c
- */
-struct files_struct *
-get_files_struct(struct task_struct *task)
-{
- struct files_struct *files;
-
- task_lock(task);
- files = task->files;
- if (files)
- atomic_inc(&files->count);
- task_unlock(task);
-
- return files;
-}
-
-/*
- * put_files_struct is extracted from fs/file.c
- */
-void
-put_files_struct(struct files_struct *files)
-{
- if (atomic_dec_and_test(&files->count))
- BUG();
-}
-
-
-static long
-eventfd_link_ioctl(struct file *f, unsigned int ioctl, unsigned long arg)
-{
- void __user *argp = (void __user *) arg;
- struct task_struct *task_target = NULL;
- struct file *file;
- struct files_struct *files;
- struct fdtable *fdt;
- struct eventfd_copy eventfd_copy;
-
- switch (ioctl) {
- case EVENTFD_COPY:
- if (copy_from_user(&eventfd_copy, argp,
- sizeof(struct eventfd_copy)))
- return -EFAULT;
-
- /*
- * Find the task struct for the target pid
- */
- task_target =
- pid_task(find_vpid(eventfd_copy.target_pid), PIDTYPE_PID);
- if (task_target == NULL) {
- pr_debug("Failed to get mem ctx for target pid\n");
- return -EFAULT;
- }
-
- files = get_files_struct(current);
- if (files == NULL) {
- pr_debug("Failed to get files struct\n");
- return -EFAULT;
- }
-
- rcu_read_lock();
- file = fcheck_files(files, eventfd_copy.source_fd);
- if (file) {
- if (file->f_mode & FMODE_PATH ||
- !atomic_long_inc_not_zero(&file->f_count))
- file = NULL;
- }
- rcu_read_unlock();
- put_files_struct(files);
-
- if (file == NULL) {
- pr_debug("Failed to get file from source pid\n");
- return 0;
- }
-
- /*
- * Release the existing eventfd in the source process
- */
- spin_lock(&files->file_lock);
- filp_close(file, files);
- fdt = files_fdtable(files);
- fdt->fd[eventfd_copy.source_fd] = NULL;
- spin_unlock(&files->file_lock);
-
- /*
- * Find the file struct associated with the target fd.
- */
-
- files = get_files_struct(task_target);
- if (files == NULL) {
- pr_debug("Failed to get files struct\n");
- return -EFAULT;
- }
-
- rcu_read_lock();
- file = fcheck_files(files, eventfd_copy.target_fd);
- if (file) {
- if (file->f_mode & FMODE_PATH ||
- !atomic_long_inc_not_zero(&file->f_count))
- file = NULL;
- }
- rcu_read_unlock();
- put_files_struct(files);
-
- if (file == NULL) {
- pr_debug("Failed to get file from target pid\n");
- return 0;
- }
-
- /*
- * Install the file struct from the target process into the
- * file desciptor of the source process,
- */
-
- fd_install(eventfd_copy.source_fd, file);
-
- return 0;
-
- default:
- return -ENOIOCTLCMD;
- }
-}
-
-static const struct file_operations eventfd_link_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = eventfd_link_ioctl,
-};
-
-
-static struct miscdevice eventfd_link_misc = {
- .name = "eventfd-link",
- .fops = &eventfd_link_fops,
-};
-
-static int __init
-eventfd_link_init(void)
-{
- return misc_register(&eventfd_link_misc);
-}
-
-module_init(eventfd_link_init);
-
-static void __exit
-eventfd_link_exit(void)
-{
- misc_deregister(&eventfd_link_misc);
-}
-
-module_exit(eventfd_link_exit);
-
-MODULE_VERSION("0.0.1");
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Anthony Fee");
-MODULE_DESCRIPTION("Link eventfd");
-MODULE_ALIAS("devname:eventfd-link");
diff --git a/src/dpdk_lib18/librte_vhost/libvirt/qemu-wrap.py b/src/dpdk_lib18/librte_vhost/libvirt/qemu-wrap.py
deleted file mode 100755
index e2d68a0e..00000000
--- a/src/dpdk_lib18/librte_vhost/libvirt/qemu-wrap.py
+++ /dev/null
@@ -1,367 +0,0 @@
-#!/usr/bin/python
-#/*
-# * BSD LICENSE
-# *
-# * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# * All rights reserved.
-# *
-# * Redistribution and use in source and binary forms, with or without
-# * modification, are permitted provided that the following conditions
-# * are met:
-# *
-# * * Redistributions of source code must retain the above copyright
-# * notice, this list of conditions and the following disclaimer.
-# * * Redistributions in binary form must reproduce the above copyright
-# * notice, this list of conditions and the following disclaimer in
-# * the documentation and/or other materials provided with the
-# * distribution.
-# * * Neither the name of Intel Corporation nor the names of its
-# * contributors may be used to endorse or promote products derived
-# * from this software without specific prior written permission.
-# *
-# * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# */
-
-#####################################################################
-# This script is designed to modify the call to the QEMU emulator
-# to support userspace vhost when starting a guest machine through
-# libvirt with vhost enabled. The steps to enable this are as follows
-# and should be run as root:
-#
-# 1. Place this script in a libvirtd's binary search PATH ($PATH)
-# A good location would be in the same directory that the QEMU
-# binary is located
-#
-# 2. Ensure that the script has the same owner/group and file
-# permissions as the QEMU binary
-#
-# 3. Update the VM xml file using "virsh edit VM.xml"
-#
-# 3.a) Set the VM to use the launch script
-#
-# Set the emulator path contained in the
-# <emulator><emulator/> tags
-#
-# e.g replace <emulator>/usr/bin/qemu-kvm<emulator/>
-# with <emulator>/usr/bin/qemu-wrap.py<emulator/>
-#
-# 3.b) Set the VM's device's to use vhost-net offload
-#
-# <interface type="network">
-# <model type="virtio"/>
-# <driver name="vhost"/>
-# <interface/>
-#
-# 4. Enable libvirt to access our userpace device file by adding it to
-# controllers cgroup for libvirtd using the following steps
-#
-# 4.a) In /etc/libvirt/qemu.conf add/edit the following lines:
-# 1) cgroup_controllers = [ ... "devices", ... ]
-# 2) clear_emulator_capabilities = 0
-# 3) user = "root"
-# 4) group = "root"
-# 5) cgroup_device_acl = [
-# "/dev/null", "/dev/full", "/dev/zero",
-# "/dev/random", "/dev/urandom",
-# "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
-# "/dev/rtc", "/dev/hpet", "/dev/net/tun",
-# "/dev/<devbase-name>-<index>",
-# ]
-#
-# 4.b) Disable SELinux or set to permissive mode
-#
-# 4.c) Mount cgroup device controller
-# "mkdir /dev/cgroup"
-# "mount -t cgroup none /dev/cgroup -o devices"
-#
-# 4.d) Set hugetlbfs_mount variable - ( Optional )
-# VMs using userspace vhost must use hugepage backed
-# memory. This can be enabled in the libvirt XML
-# config by adding a memory backing section to the
-# XML config e.g.
-# <memoryBacking>
-# <hugepages/>
-# </memoryBacking>
-# This memory backing section should be added after the
-# <memory> and <currentMemory> sections. This will add
-# flags "-mem-prealloc -mem-path <path>" to the QEMU
-# command line. The hugetlbfs_mount variable can be used
-# to override the default <path> passed through by libvirt.
-#
-# if "-mem-prealloc" or "-mem-path <path>" are not passed
-# through and a vhost device is detected then these options will
-# be automatically added by this script. This script will detect
-# the system hugetlbfs mount point to be used for <path>. The
-# default <path> for this script can be overidden by the
-# hugetlbfs_dir variable in the configuration section of this script.
-#
-#
-# 4.e) Restart the libvirtd system process
-# e.g. on Fedora "systemctl restart libvirtd.service"
-#
-#
-# 4.f) Edit the Configuration Parameters section of this script
-# to point to the correct emulator location and set any
-# addition options
-#
-# The script modifies the libvirtd Qemu call by modifying/adding
-# options based on the configuration parameters below.
-# NOTE:
-# emul_path and us_vhost_path must be set
-# All other parameters are optional
-#####################################################################
-
-
-#############################################
-# Configuration Parameters
-#############################################
-#Path to QEMU binary
-emul_path = "/usr/local/bin/qemu-system-x86_64"
-
-#Path to userspace vhost device file
-# This filename should match the --dev-basename --dev-index parameters of
-# the command used to launch the userspace vhost sample application e.g.
-# if the sample app lauch command is:
-# ./build/vhost-switch ..... --dev-basename usvhost --dev-index 1
-# then this variable should be set to:
-# us_vhost_path = "/dev/usvhost-1"
-us_vhost_path = "/dev/usvhost-1"
-
-#List of additional user defined emulation options. These options will
-#be added to all Qemu calls
-emul_opts_user = []
-
-#List of additional user defined emulation options for vhost only.
-#These options will only be added to vhost enabled guests
-emul_opts_user_vhost = []
-
-#For all VHOST enabled VMs, the VM memory is preallocated from hugetlbfs
-# Set this variable to one to enable this option for all VMs
-use_huge_all = 0
-
-#Instead of autodetecting, override the hugetlbfs directory by setting
-#this variable
-hugetlbfs_dir = ""
-
-#############################################
-
-
-#############################################
-# ****** Do Not Modify Below this Line ******
-#############################################
-
-import sys, os, subprocess
-
-
-#List of open userspace vhost file descriptors
-fd_list = []
-
-#additional virtio device flags when using userspace vhost
-vhost_flags = [ "csum=off",
- "gso=off",
- "guest_tso4=off",
- "guest_tso6=off",
- "guest_ecn=off"
- ]
-
-
-#############################################
-# Find the system hugefile mount point.
-# Note:
-# if multiple hugetlbfs mount points exist
-# then the first one found will be used
-#############################################
-def find_huge_mount():
-
- if (len(hugetlbfs_dir)):
- return hugetlbfs_dir
-
- huge_mount = ""
-
- if (os.access("/proc/mounts", os.F_OK)):
- f = open("/proc/mounts", "r")
- line = f.readline()
- while line:
- line_split = line.split(" ")
- if line_split[2] == 'hugetlbfs':
- huge_mount = line_split[1]
- break
- line = f.readline()
- else:
- print "/proc/mounts not found"
- exit (1)
-
- f.close
- if len(huge_mount) == 0:
- print "Failed to find hugetlbfs mount point"
- exit (1)
-
- return huge_mount
-
-
-#############################################
-# Get a userspace Vhost file descriptor
-#############################################
-def get_vhost_fd():
-
- if (os.access(us_vhost_path, os.F_OK)):
- fd = os.open( us_vhost_path, os.O_RDWR)
- else:
- print ("US-Vhost file %s not found" %us_vhost_path)
- exit (1)
-
- return fd
-
-
-#############################################
-# Check for vhostfd. if found then replace
-# with our own vhost fd and append any vhost
-# flags onto the end
-#############################################
-def modify_netdev_arg(arg):
-
- global fd_list
- vhost_in_use = 0
- s = ''
- new_opts = []
- netdev_opts = arg.split(",")
-
- for opt in netdev_opts:
- #check if vhost is used
- if "vhost" == opt[:5]:
- vhost_in_use = 1
- else:
- new_opts.append(opt)
-
- #if using vhost append vhost options
- if vhost_in_use == 1:
- #append vhost on option
- new_opts.append('vhost=on')
- #append vhostfd ption
- new_fd = get_vhost_fd()
- new_opts.append('vhostfd=' + str(new_fd))
- fd_list.append(new_fd)
-
- #concatenate all options
- for opt in new_opts:
- if len(s) > 0:
- s+=','
-
- s+=opt
-
- return s
-
-
-#############################################
-# Main
-#############################################
-def main():
-
- global fd_list
- global vhost_in_use
- new_args = []
- num_cmd_args = len(sys.argv)
- emul_call = ''
- mem_prealloc_set = 0
- mem_path_set = 0
- num = 0;
-
- #parse the parameters
- while (num < num_cmd_args):
- arg = sys.argv[num]
-
- #Check netdev +1 parameter for vhostfd
- if arg == '-netdev':
- num_vhost_devs = len(fd_list)
- new_args.append(arg)
-
- num+=1
- arg = sys.argv[num]
- mod_arg = modify_netdev_arg(arg)
- new_args.append(mod_arg)
-
- #append vhost flags if this is a vhost device
- # and -device is the next arg
- # i.e -device -opt1,-opt2,...,-opt3,%vhost
- if (num_vhost_devs < len(fd_list)):
- num+=1
- arg = sys.argv[num]
- if arg == '-device':
- new_args.append(arg)
- num+=1
- new_arg = sys.argv[num]
- for flag in vhost_flags:
- new_arg = ''.join([new_arg,',',flag])
- new_args.append(new_arg)
- else:
- new_args.append(arg)
- elif arg == '-mem-prealloc':
- mem_prealloc_set = 1
- new_args.append(arg)
- elif arg == '-mem-path':
- mem_path_set = 1
- new_args.append(arg)
-
- else:
- new_args.append(arg)
-
- num+=1
-
- #Set Qemu binary location
- emul_call+=emul_path
- emul_call+=" "
-
- #Add prealloc mem options if using vhost and not already added
- if ((len(fd_list) > 0) and (mem_prealloc_set == 0)):
- emul_call += "-mem-prealloc "
-
- #Add mempath mem options if using vhost and not already added
- if ((len(fd_list) > 0) and (mem_path_set == 0)):
- #Detect and add hugetlbfs mount point
- mp = find_huge_mount()
- mp = "".join(["-mem-path ", mp])
- emul_call += mp
- emul_call += " "
-
-
- #add user options
- for opt in emul_opts_user:
- emul_call += opt
- emul_call += " "
-
- #Add add user vhost only options
- if len(fd_list) > 0:
- for opt in emul_opts_user_vhost:
- emul_call += opt
- emul_call += " "
-
- #Add updated libvirt options
- iter_args = iter(new_args)
- #skip 1st arg i.e. call to this script
- next(iter_args)
- for arg in iter_args:
- emul_call+=str(arg)
- emul_call+= " "
-
- #Call QEMU
- subprocess.call(emul_call, shell=True)
-
-
- #Close usvhost files
- for fd in fd_list:
- os.close(fd)
-
-
-if __name__ == "__main__":
- main()
-
diff --git a/src/dpdk_lib18/librte_vhost/rte_virtio_net.h b/src/dpdk_lib18/librte_vhost/rte_virtio_net.h
deleted file mode 100755
index 0bf07c72..00000000
--- a/src/dpdk_lib18/librte_vhost/rte_virtio_net.h
+++ /dev/null
@@ -1,215 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _VIRTIO_NET_H_
-#define _VIRTIO_NET_H_
-
-/**
- * @file
- * Interface to vhost net
- */
-
-#include <stdint.h>
-#include <linux/virtio_ring.h>
-#include <linux/virtio_net.h>
-#include <sys/eventfd.h>
-#include <sys/socket.h>
-#include <linux/if.h>
-
-#include <rte_memory.h>
-#include <rte_mempool.h>
-#include <rte_mbuf.h>
-
-/* Used to indicate that the device is running on a data core */
-#define VIRTIO_DEV_RUNNING 1
-
-/* Backend value set by guest. */
-#define VIRTIO_DEV_STOPPED -1
-
-
-/* Enum for virtqueue management. */
-enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
-
-#define BUF_VECTOR_MAX 256
-
-/**
- * Structure contains buffer address, length and descriptor index
- * from vring to do scatter RX.
- */
-struct buf_vector {
- uint64_t buf_addr;
- uint32_t buf_len;
- uint32_t desc_idx;
-};
-
-/**
- * Structure contains variables relevant to RX/TX virtqueues.
- */
-struct vhost_virtqueue {
- struct vring_desc *desc; /**< Virtqueue descriptor ring. */
- struct vring_avail *avail; /**< Virtqueue available ring. */
- struct vring_used *used; /**< Virtqueue used ring. */
- uint32_t size; /**< Size of descriptor ring. */
- uint32_t backend; /**< Backend value to determine if device should started/stopped. */
- uint16_t vhost_hlen; /**< Vhost header length (varies depending on RX merge buffers. */
- volatile uint16_t last_used_idx; /**< Last index used on the available ring */
- volatile uint16_t last_used_idx_res; /**< Used for multiple devices reserving buffers. */
- eventfd_t callfd; /**< Currently unused as polling mode is enabled. */
- eventfd_t kickfd; /**< Used to notify the guest (trigger interrupt). */
- struct buf_vector buf_vec[BUF_VECTOR_MAX]; /**< for scatter RX. */
-} __rte_cache_aligned;
-
-/**
- * Device structure contains all configuration information relating to the device.
- */
-struct virtio_net {
- struct vhost_virtqueue *virtqueue[VIRTIO_QNUM]; /**< Contains all virtqueue information. */
- struct virtio_memory *mem; /**< QEMU memory and memory region information. */
- uint64_t features; /**< Negotiated feature set. */
- uint64_t device_fh; /**< device identifier. */
- uint32_t flags; /**< Device flags. Only used to check if device is running on data core. */
- char ifname[IFNAMSIZ]; /**< Name of the tap device. */
- void *priv; /**< private context */
-} __rte_cache_aligned;
-
-/**
- * Information relating to memory regions including offsets to addresses in QEMUs memory file.
- */
-struct virtio_memory_regions {
- uint64_t guest_phys_address; /**< Base guest physical address of region. */
- uint64_t guest_phys_address_end; /**< End guest physical address of region. */
- uint64_t memory_size; /**< Size of region. */
- uint64_t userspace_address; /**< Base userspace address of region. */
- uint64_t address_offset; /**< Offset of region for address translation. */
-};
-
-
-/**
- * Memory structure includes region and mapping information.
- */
-struct virtio_memory {
- uint64_t base_address; /**< Base QEMU userspace address of the memory file. */
- uint64_t mapped_address; /**< Mapped address of memory file base in our applications memory space. */
- uint64_t mapped_size; /**< Total size of memory file. */
- uint32_t nregions; /**< Number of memory regions. */
- struct virtio_memory_regions regions[0]; /**< Memory region information. */
-};
-
-/**
- * Device operations to add/remove device.
- */
-struct virtio_net_device_ops {
- int (*new_device)(struct virtio_net *); /**< Add device. */
- void (*destroy_device)(volatile struct virtio_net *); /**< Remove device. */
-};
-
-static inline uint16_t __attribute__((always_inline))
-rte_vring_available_entries(struct virtio_net *dev, uint16_t queue_id)
-{
- struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
- return *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx_res;
-}
-
-/**
- * Function to convert guest physical addresses to vhost virtual addresses.
- * This is used to convert guest virtio buffer addresses.
- */
-static inline uint64_t __attribute__((always_inline))
-gpa_to_vva(struct virtio_net *dev, uint64_t guest_pa)
-{
- struct virtio_memory_regions *region;
- uint32_t regionidx;
- uint64_t vhost_va = 0;
-
- for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
- region = &dev->mem->regions[regionidx];
- if ((guest_pa >= region->guest_phys_address) &&
- (guest_pa <= region->guest_phys_address_end)) {
- vhost_va = region->address_offset + guest_pa;
- break;
- }
- }
- return vhost_va;
-}
-
-/**
- * Disable features in feature_mask. Returns 0 on success.
- */
-int rte_vhost_feature_disable(uint64_t feature_mask);
-
-/**
- * Enable features in feature_mask. Returns 0 on success.
- */
-int rte_vhost_feature_enable(uint64_t feature_mask);
-
-/* Returns currently supported vhost features */
-uint64_t rte_vhost_feature_get(void);
-
-int rte_vhost_enable_guest_notification(struct virtio_net *dev, uint16_t queue_id, int enable);
-
-/* Register vhost driver. dev_name could be different for multiple instance support. */
-int rte_vhost_driver_register(const char *dev_name);
-
-/* Register callbacks. */
-int rte_vhost_driver_callback_register(struct virtio_net_device_ops const * const);
-/* Start vhost driver session blocking loop. */
-int rte_vhost_driver_session_start(void);
-
-/**
- * This function adds buffers to the virtio devices RX virtqueue. Buffers can
- * be received from the physical port or from another virtual device. A packet
- * count is returned to indicate the number of packets that were succesfully
- * added to the RX queue.
- * @param queue_id
- * virtio queue index in mq case
- * @return
- * num of packets enqueued
- */
-uint16_t rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id,
- struct rte_mbuf **pkts, uint16_t count);
-
-/**
- * This function gets guest buffers from the virtio device TX virtqueue,
- * construct host mbufs, copies guest buffer content to host mbufs and
- * store them in pkts to be processed.
- * @param mbuf_pool
- * mbuf_pool where host mbuf is allocated.
- * @param queue_id
- * virtio queue index in mq case.
- * @return
- * num of packets dequeued
- */
-uint16_t rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
- struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count);
-
-#endif /* _VIRTIO_NET_H_ */
diff --git a/src/dpdk_lib18/librte_vhost/vhost-net-cdev.c b/src/dpdk_lib18/librte_vhost/vhost-net-cdev.c
deleted file mode 100755
index 57c76cb0..00000000
--- a/src/dpdk_lib18/librte_vhost/vhost-net-cdev.c
+++ /dev/null
@@ -1,389 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <errno.h>
-#include <fuse/cuse_lowlevel.h>
-#include <linux/limits.h>
-#include <linux/vhost.h>
-#include <stdint.h>
-#include <string.h>
-#include <unistd.h>
-
-#include <rte_ethdev.h>
-#include <rte_log.h>
-#include <rte_string_fns.h>
-#include <rte_virtio_net.h>
-
-#include "vhost-net-cdev.h"
-
-#define FUSE_OPT_DUMMY "\0\0"
-#define FUSE_OPT_FORE "-f\0\0"
-#define FUSE_OPT_NOMULTI "-s\0\0"
-
-static const uint32_t default_major = 231;
-static const uint32_t default_minor = 1;
-static const char cuse_device_name[] = "/dev/cuse";
-static const char default_cdev[] = "vhost-net";
-
-static struct fuse_session *session;
-static struct vhost_net_device_ops const *ops;
-
-/*
- * Returns vhost_device_ctx from given fuse_req_t. The index is populated later
- * when the device is added to the device linked list.
- */
-static struct vhost_device_ctx
-fuse_req_to_vhost_ctx(fuse_req_t req, struct fuse_file_info *fi)
-{
- struct vhost_device_ctx ctx;
- struct fuse_ctx const *const req_ctx = fuse_req_ctx(req);
-
- ctx.pid = req_ctx->pid;
- ctx.fh = fi->fh;
-
- return ctx;
-}
-
-/*
- * When the device is created in QEMU it gets initialised here and
- * added to the device linked list.
- */
-static void
-vhost_net_open(fuse_req_t req, struct fuse_file_info *fi)
-{
- struct vhost_device_ctx ctx = fuse_req_to_vhost_ctx(req, fi);
- int err = 0;
-
- err = ops->new_device(ctx);
- if (err == -1) {
- fuse_reply_err(req, EPERM);
- return;
- }
-
- fi->fh = err;
-
- RTE_LOG(INFO, VHOST_CONFIG,
- "(%"PRIu64") Device configuration started\n", fi->fh);
- fuse_reply_open(req, fi);
-}
-
-/*
- * When QEMU is shutdown or killed the device gets released.
- */
-static void
-vhost_net_release(fuse_req_t req, struct fuse_file_info *fi)
-{
- int err = 0;
- struct vhost_device_ctx ctx = fuse_req_to_vhost_ctx(req, fi);
-
- ops->destroy_device(ctx);
- RTE_LOG(INFO, VHOST_CONFIG, "(%"PRIu64") Device released\n", ctx.fh);
- fuse_reply_err(req, err);
-}
-
-/*
- * Boilerplate code for CUSE IOCTL
- * Implicit arguments: ctx, req, result.
- */
-#define VHOST_IOCTL(func) do { \
- result = (func)(ctx); \
- fuse_reply_ioctl(req, result, NULL, 0); \
-} while (0)
-
-/*
- * Boilerplate IOCTL RETRY
- * Implicit arguments: req.
- */
-#define VHOST_IOCTL_RETRY(size_r, size_w) do { \
- struct iovec iov_r = { arg, (size_r) }; \
- struct iovec iov_w = { arg, (size_w) }; \
- fuse_reply_ioctl_retry(req, &iov_r, \
- (size_r) ? 1 : 0, &iov_w, (size_w) ? 1 : 0);\
-} while (0)
-
-/*
- * Boilerplate code for CUSE Read IOCTL
- * Implicit arguments: ctx, req, result, in_bufsz, in_buf.
- */
-#define VHOST_IOCTL_R(type, var, func) do { \
- if (!in_bufsz) { \
- VHOST_IOCTL_RETRY(sizeof(type), 0);\
- } else { \
- (var) = *(const type*)in_buf; \
- result = func(ctx, &(var)); \
- fuse_reply_ioctl(req, result, NULL, 0);\
- } \
-} while (0)
-
-/*
- * Boilerplate code for CUSE Write IOCTL
- * Implicit arguments: ctx, req, result, out_bufsz.
- */
-#define VHOST_IOCTL_W(type, var, func) do { \
- if (!out_bufsz) { \
- VHOST_IOCTL_RETRY(0, sizeof(type));\
- } else { \
- result = (func)(ctx, &(var));\
- fuse_reply_ioctl(req, result, &(var), sizeof(type));\
- } \
-} while (0)
-
-/*
- * Boilerplate code for CUSE Read/Write IOCTL
- * Implicit arguments: ctx, req, result, in_bufsz, in_buf.
- */
-#define VHOST_IOCTL_RW(type1, var1, type2, var2, func) do { \
- if (!in_bufsz) { \
- VHOST_IOCTL_RETRY(sizeof(type1), sizeof(type2));\
- } else { \
- (var1) = *(const type1*) (in_buf); \
- result = (func)(ctx, (var1), &(var2)); \
- fuse_reply_ioctl(req, result, &(var2), sizeof(type2));\
- } \
-} while (0)
-
-/*
- * The IOCTLs are handled using CUSE/FUSE in userspace. Depending on the type
- * of IOCTL a buffer is requested to read or to write. This request is handled
- * by FUSE and the buffer is then given to CUSE.
- */
-static void
-vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
- struct fuse_file_info *fi, __rte_unused unsigned flags,
- const void *in_buf, size_t in_bufsz, size_t out_bufsz)
-{
- struct vhost_device_ctx ctx = fuse_req_to_vhost_ctx(req, fi);
- struct vhost_vring_file file;
- struct vhost_vring_state state;
- struct vhost_vring_addr addr;
- uint64_t features;
- uint32_t index;
- int result = 0;
-
- switch (cmd) {
- case VHOST_NET_SET_BACKEND:
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") IOCTL: VHOST_NET_SET_BACKEND\n", ctx.fh);
- VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_backend);
- break;
-
- case VHOST_GET_FEATURES:
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") IOCTL: VHOST_GET_FEATURES\n", ctx.fh);
- VHOST_IOCTL_W(uint64_t, features, ops->get_features);
- break;
-
- case VHOST_SET_FEATURES:
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") IOCTL: VHOST_SET_FEATURES\n", ctx.fh);
- VHOST_IOCTL_R(uint64_t, features, ops->set_features);
- break;
-
- case VHOST_RESET_OWNER:
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") IOCTL: VHOST_RESET_OWNER\n", ctx.fh);
- VHOST_IOCTL(ops->reset_owner);
- break;
-
- case VHOST_SET_OWNER:
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") IOCTL: VHOST_SET_OWNER\n", ctx.fh);
- VHOST_IOCTL(ops->set_owner);
- break;
-
- case VHOST_SET_MEM_TABLE:
- /*TODO fix race condition.*/
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") IOCTL: VHOST_SET_MEM_TABLE\n", ctx.fh);
- static struct vhost_memory mem_temp;
-
- switch (in_bufsz) {
- case 0:
- VHOST_IOCTL_RETRY(sizeof(struct vhost_memory), 0);
- break;
-
- case sizeof(struct vhost_memory):
- mem_temp = *(const struct vhost_memory *) in_buf;
-
- if (mem_temp.nregions > 0) {
- VHOST_IOCTL_RETRY(sizeof(struct vhost_memory) +
- (sizeof(struct vhost_memory_region) *
- mem_temp.nregions), 0);
- } else {
- result = -1;
- fuse_reply_ioctl(req, result, NULL, 0);
- }
- break;
-
- default:
- result = ops->set_mem_table(ctx,
- in_buf, mem_temp.nregions);
- if (result)
- fuse_reply_err(req, EINVAL);
- else
- fuse_reply_ioctl(req, result, NULL, 0);
- }
- break;
-
- case VHOST_SET_VRING_NUM:
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") IOCTL: VHOST_SET_VRING_NUM\n", ctx.fh);
- VHOST_IOCTL_R(struct vhost_vring_state, state,
- ops->set_vring_num);
- break;
-
- case VHOST_SET_VRING_BASE:
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") IOCTL: VHOST_SET_VRING_BASE\n", ctx.fh);
- VHOST_IOCTL_R(struct vhost_vring_state, state,
- ops->set_vring_base);
- break;
-
- case VHOST_GET_VRING_BASE:
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") IOCTL: VHOST_GET_VRING_BASE\n", ctx.fh);
- VHOST_IOCTL_RW(uint32_t, index,
- struct vhost_vring_state, state, ops->get_vring_base);
- break;
-
- case VHOST_SET_VRING_ADDR:
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") IOCTL: VHOST_SET_VRING_ADDR\n", ctx.fh);
- VHOST_IOCTL_R(struct vhost_vring_addr, addr,
- ops->set_vring_addr);
- break;
-
- case VHOST_SET_VRING_KICK:
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") IOCTL: VHOST_SET_VRING_KICK\n", ctx.fh);
- VHOST_IOCTL_R(struct vhost_vring_file, file,
- ops->set_vring_kick);
- break;
-
- case VHOST_SET_VRING_CALL:
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") IOCTL: VHOST_SET_VRING_CALL\n", ctx.fh);
- VHOST_IOCTL_R(struct vhost_vring_file, file,
- ops->set_vring_call);
- break;
-
- default:
- RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") IOCTL: DOESN NOT EXIST\n", ctx.fh);
- result = -1;
- fuse_reply_ioctl(req, result, NULL, 0);
- }
-
- if (result < 0)
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") IOCTL: FAIL\n", ctx.fh);
- else
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") IOCTL: SUCCESS\n", ctx.fh);
-}
-
-/*
- * Structure handling open, release and ioctl function pointers is populated.
- */
-static const struct cuse_lowlevel_ops vhost_net_ops = {
- .open = vhost_net_open,
- .release = vhost_net_release,
- .ioctl = vhost_net_ioctl,
-};
-
-/*
- * cuse_info is populated and used to register the cuse device.
- * vhost_net_device_ops are also passed when the device is registered in app.
- */
-int
-rte_vhost_driver_register(const char *dev_name)
-{
- struct cuse_info cuse_info;
- char device_name[PATH_MAX] = "";
- char char_device_name[PATH_MAX] = "";
- const char *device_argv[] = { device_name };
-
- char fuse_opt_dummy[] = FUSE_OPT_DUMMY;
- char fuse_opt_fore[] = FUSE_OPT_FORE;
- char fuse_opt_nomulti[] = FUSE_OPT_NOMULTI;
- char *fuse_argv[] = {fuse_opt_dummy, fuse_opt_fore, fuse_opt_nomulti};
-
- if (access(cuse_device_name, R_OK | W_OK) < 0) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "char device %s can't be accessed, maybe not exist\n",
- cuse_device_name);
- return -1;
- }
-
- /*
- * The device name is created. This is passed to QEMU so that it can
- * register the device with our application.
- */
- snprintf(device_name, PATH_MAX, "DEVNAME=%s", dev_name);
- snprintf(char_device_name, PATH_MAX, "/dev/%s", dev_name);
-
- /* Check if device already exists. */
- if (access(char_device_name, F_OK) != -1) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "char device %s already exists\n", char_device_name);
- return -1;
- }
-
- memset(&cuse_info, 0, sizeof(cuse_info));
- cuse_info.dev_major = default_major;
- cuse_info.dev_minor = default_minor;
- cuse_info.dev_info_argc = 1;
- cuse_info.dev_info_argv = device_argv;
- cuse_info.flags = CUSE_UNRESTRICTED_IOCTL;
-
- ops = get_virtio_net_callbacks();
-
- session = cuse_lowlevel_setup(3, fuse_argv,
- &cuse_info, &vhost_net_ops, 0, NULL);
- if (session == NULL)
- return -1;
-
- return 0;
-}
-
-/**
- * The CUSE session is launched allowing the application to receive open,
- * release and ioctl calls.
- */
-int
-rte_vhost_driver_session_start(void)
-{
- fuse_session_loop(session);
-
- return 0;
-}
diff --git a/src/dpdk_lib18/librte_vhost/vhost-net-cdev.h b/src/dpdk_lib18/librte_vhost/vhost-net-cdev.h
deleted file mode 100755
index 03a5c575..00000000
--- a/src/dpdk_lib18/librte_vhost/vhost-net-cdev.h
+++ /dev/null
@@ -1,113 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _VHOST_NET_CDEV_H_
-#define _VHOST_NET_CDEV_H_
-#include <stdint.h>
-#include <stdio.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <linux/vhost.h>
-
-#include <rte_log.h>
-
-/* Macros for printing using RTE_LOG */
-#define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
-#define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER1
-
-#ifdef RTE_LIBRTE_VHOST_DEBUG
-#define VHOST_MAX_PRINT_BUFF 6072
-#define LOG_LEVEL RTE_LOG_DEBUG
-#define LOG_DEBUG(log_type, fmt, args...) RTE_LOG(DEBUG, log_type, fmt, ##args)
-#define PRINT_PACKET(device, addr, size, header) do { \
- char *pkt_addr = (char *)(addr); \
- unsigned int index; \
- char packet[VHOST_MAX_PRINT_BUFF]; \
- \
- if ((header)) \
- snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%"PRIu64") Header size %d: ", (device->device_fh), (size)); \
- else \
- snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%"PRIu64") Packet size %d: ", (device->device_fh), (size)); \
- for (index = 0; index < (size); index++) { \
- snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), \
- "%02hhx ", pkt_addr[index]); \
- } \
- snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), "\n"); \
- \
- LOG_DEBUG(VHOST_DATA, "%s", packet); \
-} while (0)
-#else
-#define LOG_LEVEL RTE_LOG_INFO
-#define LOG_DEBUG(log_type, fmt, args...) do {} while (0)
-#define PRINT_PACKET(device, addr, size, header) do {} while (0)
-#endif
-
-
-/*
- * Structure used to identify device context.
- */
-struct vhost_device_ctx {
- pid_t pid; /* PID of process calling the IOCTL. */
- uint64_t fh; /* Populated with fi->fh to track the device index. */
-};
-
-/*
- * Structure contains function pointers to be defined in virtio-net.c. These
- * functions are called in CUSE context and are used to configure devices.
- */
-struct vhost_net_device_ops {
- int (*new_device)(struct vhost_device_ctx);
- void (*destroy_device)(struct vhost_device_ctx);
-
- int (*get_features)(struct vhost_device_ctx, uint64_t *);
- int (*set_features)(struct vhost_device_ctx, uint64_t *);
-
- int (*set_mem_table)(struct vhost_device_ctx, const void *, uint32_t);
-
- int (*set_vring_num)(struct vhost_device_ctx, struct vhost_vring_state *);
- int (*set_vring_addr)(struct vhost_device_ctx, struct vhost_vring_addr *);
- int (*set_vring_base)(struct vhost_device_ctx, struct vhost_vring_state *);
- int (*get_vring_base)(struct vhost_device_ctx, uint32_t, struct vhost_vring_state *);
-
- int (*set_vring_kick)(struct vhost_device_ctx, struct vhost_vring_file *);
- int (*set_vring_call)(struct vhost_device_ctx, struct vhost_vring_file *);
-
- int (*set_backend)(struct vhost_device_ctx, struct vhost_vring_file *);
-
- int (*set_owner)(struct vhost_device_ctx);
- int (*reset_owner)(struct vhost_device_ctx);
-};
-
-
-struct vhost_net_device_ops const *get_virtio_net_callbacks(void);
-#endif /* _VHOST_NET_CDEV_H_ */
diff --git a/src/dpdk_lib18/librte_vhost/vhost_rxtx.c b/src/dpdk_lib18/librte_vhost/vhost_rxtx.c
deleted file mode 100755
index ccfd82f4..00000000
--- a/src/dpdk_lib18/librte_vhost/vhost_rxtx.c
+++ /dev/null
@@ -1,730 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdint.h>
-#include <linux/virtio_net.h>
-
-#include <rte_mbuf.h>
-#include <rte_memcpy.h>
-#include <rte_virtio_net.h>
-
-#include "vhost-net-cdev.h"
-
-#define MAX_PKT_BURST 32
-
-/**
- * This function adds buffers to the virtio devices RX virtqueue. Buffers can
- * be received from the physical port or from another virtio device. A packet
- * count is returned to indicate the number of packets that are succesfully
- * added to the RX queue. This function works when mergeable is disabled.
- */
-static inline uint32_t __attribute__((always_inline))
-virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
- struct rte_mbuf **pkts, uint32_t count)
-{
- struct vhost_virtqueue *vq;
- struct vring_desc *desc;
- struct rte_mbuf *buff;
- /* The virtio_hdr is initialised to 0. */
- struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0};
- uint64_t buff_addr = 0;
- uint64_t buff_hdr_addr = 0;
- uint32_t head[MAX_PKT_BURST], packet_len = 0;
- uint32_t head_idx, packet_success = 0;
- uint16_t avail_idx, res_cur_idx;
- uint16_t res_base_idx, res_end_idx;
- uint16_t free_entries;
- uint8_t success = 0;
-
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
- if (unlikely(queue_id != VIRTIO_RXQ)) {
- LOG_DEBUG(VHOST_DATA, "mq isn't supported in this version.\n");
- return 0;
- }
-
- vq = dev->virtqueue[VIRTIO_RXQ];
- count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
-
- /*
- * As many data cores may want access to available buffers,
- * they need to be reserved.
- */
- do {
- res_base_idx = vq->last_used_idx_res;
- avail_idx = *((volatile uint16_t *)&vq->avail->idx);
-
- free_entries = (avail_idx - res_base_idx);
- /*check that we have enough buffers*/
- if (unlikely(count > free_entries))
- count = free_entries;
-
- if (count == 0)
- return 0;
-
- res_end_idx = res_base_idx + count;
- /* vq->last_used_idx_res is atomically updated. */
- /* TODO: Allow to disable cmpset if no concurrency in application. */
- success = rte_atomic16_cmpset(&vq->last_used_idx_res,
- res_base_idx, res_end_idx);
- } while (unlikely(success == 0));
- res_cur_idx = res_base_idx;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n",
- dev->device_fh, res_cur_idx, res_end_idx);
-
- /* Prefetch available ring to retrieve indexes. */
- rte_prefetch0(&vq->avail->ring[res_cur_idx & (vq->size - 1)]);
-
- /* Retrieve all of the head indexes first to avoid caching issues. */
- for (head_idx = 0; head_idx < count; head_idx++)
- head[head_idx] = vq->avail->ring[(res_cur_idx + head_idx) &
- (vq->size - 1)];
-
- /*Prefetch descriptor index. */
- rte_prefetch0(&vq->desc[head[packet_success]]);
-
- while (res_cur_idx != res_end_idx) {
- /* Get descriptor from available ring */
- desc = &vq->desc[head[packet_success]];
-
- buff = pkts[packet_success];
-
- /* Convert from gpa to vva (guest physical addr -> vhost virtual addr) */
- buff_addr = gpa_to_vva(dev, desc->addr);
- /* Prefetch buffer address. */
- rte_prefetch0((void *)(uintptr_t)buff_addr);
-
- /* Copy virtio_hdr to packet and increment buffer address */
- buff_hdr_addr = buff_addr;
- packet_len = rte_pktmbuf_data_len(buff) + vq->vhost_hlen;
-
- /*
- * If the descriptors are chained the header and data are
- * placed in separate buffers.
- */
- if (desc->flags & VRING_DESC_F_NEXT) {
- desc->len = vq->vhost_hlen;
- desc = &vq->desc[desc->next];
- /* Buffer address translation. */
- buff_addr = gpa_to_vva(dev, desc->addr);
- desc->len = rte_pktmbuf_data_len(buff);
- } else {
- buff_addr += vq->vhost_hlen;
- desc->len = packet_len;
- }
-
- /* Update used ring with desc information */
- vq->used->ring[res_cur_idx & (vq->size - 1)].id =
- head[packet_success];
- vq->used->ring[res_cur_idx & (vq->size - 1)].len = packet_len;
-
- /* Copy mbuf data to buffer */
- /* FIXME for sg mbuf and the case that desc couldn't hold the mbuf data */
- rte_memcpy((void *)(uintptr_t)buff_addr,
- rte_pktmbuf_mtod(buff, const void *),
- rte_pktmbuf_data_len(buff));
- PRINT_PACKET(dev, (uintptr_t)buff_addr,
- rte_pktmbuf_data_len(buff), 0);
-
- res_cur_idx++;
- packet_success++;
-
- rte_memcpy((void *)(uintptr_t)buff_hdr_addr,
- (const void *)&virtio_hdr, vq->vhost_hlen);
-
- PRINT_PACKET(dev, (uintptr_t)buff_hdr_addr, vq->vhost_hlen, 1);
-
- if (res_cur_idx < res_end_idx) {
- /* Prefetch descriptor index. */
- rte_prefetch0(&vq->desc[head[packet_success]]);
- }
- }
-
- rte_compiler_barrier();
-
- /* Wait until it's our turn to add our buffer to the used ring. */
- while (unlikely(vq->last_used_idx != res_base_idx))
- rte_pause();
-
- *(volatile uint16_t *)&vq->used->idx += count;
- vq->last_used_idx = res_end_idx;
-
- /* Kick the guest if necessary. */
- if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
- eventfd_write((int)vq->kickfd, 1);
- return count;
-}
-
-static inline uint32_t __attribute__((always_inline))
-copy_from_mbuf_to_vring(struct virtio_net *dev, uint16_t res_base_idx,
- uint16_t res_end_idx, struct rte_mbuf *pkt)
-{
- uint32_t vec_idx = 0;
- uint32_t entry_success = 0;
- struct vhost_virtqueue *vq;
- /* The virtio_hdr is initialised to 0. */
- struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {
- {0, 0, 0, 0, 0, 0}, 0};
- uint16_t cur_idx = res_base_idx;
- uint64_t vb_addr = 0;
- uint64_t vb_hdr_addr = 0;
- uint32_t seg_offset = 0;
- uint32_t vb_offset = 0;
- uint32_t seg_avail;
- uint32_t vb_avail;
- uint32_t cpy_len, entry_len;
-
- if (pkt == NULL)
- return 0;
-
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| "
- "End Index %d\n",
- dev->device_fh, cur_idx, res_end_idx);
-
- /*
- * Convert from gpa to vva
- * (guest physical addr -> vhost virtual addr)
- */
- vq = dev->virtqueue[VIRTIO_RXQ];
- vb_addr =
- gpa_to_vva(dev, vq->buf_vec[vec_idx].buf_addr);
- vb_hdr_addr = vb_addr;
-
- /* Prefetch buffer address. */
- rte_prefetch0((void *)(uintptr_t)vb_addr);
-
- virtio_hdr.num_buffers = res_end_idx - res_base_idx;
-
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") RX: Num merge buffers %d\n",
- dev->device_fh, virtio_hdr.num_buffers);
-
- rte_memcpy((void *)(uintptr_t)vb_hdr_addr,
- (const void *)&virtio_hdr, vq->vhost_hlen);
-
- PRINT_PACKET(dev, (uintptr_t)vb_hdr_addr, vq->vhost_hlen, 1);
-
- seg_avail = rte_pktmbuf_data_len(pkt);
- vb_offset = vq->vhost_hlen;
- vb_avail =
- vq->buf_vec[vec_idx].buf_len - vq->vhost_hlen;
-
- entry_len = vq->vhost_hlen;
-
- if (vb_avail == 0) {
- uint32_t desc_idx =
- vq->buf_vec[vec_idx].desc_idx;
- vq->desc[desc_idx].len = vq->vhost_hlen;
-
- if ((vq->desc[desc_idx].flags
- & VRING_DESC_F_NEXT) == 0) {
- /* Update used ring with desc information */
- vq->used->ring[cur_idx & (vq->size - 1)].id
- = vq->buf_vec[vec_idx].desc_idx;
- vq->used->ring[cur_idx & (vq->size - 1)].len
- = entry_len;
-
- entry_len = 0;
- cur_idx++;
- entry_success++;
- }
-
- vec_idx++;
- vb_addr =
- gpa_to_vva(dev, vq->buf_vec[vec_idx].buf_addr);
-
- /* Prefetch buffer address. */
- rte_prefetch0((void *)(uintptr_t)vb_addr);
- vb_offset = 0;
- vb_avail = vq->buf_vec[vec_idx].buf_len;
- }
-
- cpy_len = RTE_MIN(vb_avail, seg_avail);
-
- while (cpy_len > 0) {
- /* Copy mbuf data to vring buffer */
- rte_memcpy((void *)(uintptr_t)(vb_addr + vb_offset),
- (const void *)(rte_pktmbuf_mtod(pkt, char*) + seg_offset),
- cpy_len);
-
- PRINT_PACKET(dev,
- (uintptr_t)(vb_addr + vb_offset),
- cpy_len, 0);
-
- seg_offset += cpy_len;
- vb_offset += cpy_len;
- seg_avail -= cpy_len;
- vb_avail -= cpy_len;
- entry_len += cpy_len;
-
- if (seg_avail != 0) {
- /*
- * The virtio buffer in this vring
- * entry reach to its end.
- * But the segment doesn't complete.
- */
- if ((vq->desc[vq->buf_vec[vec_idx].desc_idx].flags &
- VRING_DESC_F_NEXT) == 0) {
- /* Update used ring with desc information */
- vq->used->ring[cur_idx & (vq->size - 1)].id
- = vq->buf_vec[vec_idx].desc_idx;
- vq->used->ring[cur_idx & (vq->size - 1)].len
- = entry_len;
- entry_len = 0;
- cur_idx++;
- entry_success++;
- }
-
- vec_idx++;
- vb_addr = gpa_to_vva(dev,
- vq->buf_vec[vec_idx].buf_addr);
- vb_offset = 0;
- vb_avail = vq->buf_vec[vec_idx].buf_len;
- cpy_len = RTE_MIN(vb_avail, seg_avail);
- } else {
- /*
- * This current segment complete, need continue to
- * check if the whole packet complete or not.
- */
- pkt = pkt->next;
- if (pkt != NULL) {
- /*
- * There are more segments.
- */
- if (vb_avail == 0) {
- /*
- * This current buffer from vring is
- * used up, need fetch next buffer
- * from buf_vec.
- */
- uint32_t desc_idx =
- vq->buf_vec[vec_idx].desc_idx;
- vq->desc[desc_idx].len = vb_offset;
-
- if ((vq->desc[desc_idx].flags &
- VRING_DESC_F_NEXT) == 0) {
- uint16_t wrapped_idx =
- cur_idx & (vq->size - 1);
- /*
- * Update used ring with the
- * descriptor information
- */
- vq->used->ring[wrapped_idx].id
- = desc_idx;
- vq->used->ring[wrapped_idx].len
- = entry_len;
- entry_success++;
- entry_len = 0;
- cur_idx++;
- }
-
- /* Get next buffer from buf_vec. */
- vec_idx++;
- vb_addr = gpa_to_vva(dev,
- vq->buf_vec[vec_idx].buf_addr);
- vb_avail =
- vq->buf_vec[vec_idx].buf_len;
- vb_offset = 0;
- }
-
- seg_offset = 0;
- seg_avail = rte_pktmbuf_data_len(pkt);
- cpy_len = RTE_MIN(vb_avail, seg_avail);
- } else {
- /*
- * This whole packet completes.
- */
- uint32_t desc_idx =
- vq->buf_vec[vec_idx].desc_idx;
- vq->desc[desc_idx].len = vb_offset;
-
- while (vq->desc[desc_idx].flags &
- VRING_DESC_F_NEXT) {
- desc_idx = vq->desc[desc_idx].next;
- vq->desc[desc_idx].len = 0;
- }
-
- /* Update used ring with desc information */
- vq->used->ring[cur_idx & (vq->size - 1)].id
- = vq->buf_vec[vec_idx].desc_idx;
- vq->used->ring[cur_idx & (vq->size - 1)].len
- = entry_len;
- entry_len = 0;
- cur_idx++;
- entry_success++;
- seg_avail = 0;
- cpy_len = RTE_MIN(vb_avail, seg_avail);
- }
- }
- }
-
- return entry_success;
-}
-
-/*
- * This function works for mergeable RX.
- */
-static inline uint32_t __attribute__((always_inline))
-virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
- struct rte_mbuf **pkts, uint32_t count)
-{
- struct vhost_virtqueue *vq;
- uint32_t pkt_idx = 0, entry_success = 0;
- uint16_t avail_idx, res_cur_idx;
- uint16_t res_base_idx, res_end_idx;
- uint8_t success = 0;
-
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_merge_rx()\n",
- dev->device_fh);
- if (unlikely(queue_id != VIRTIO_RXQ)) {
- LOG_DEBUG(VHOST_DATA, "mq isn't supported in this version.\n");
- }
-
- vq = dev->virtqueue[VIRTIO_RXQ];
- count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
-
- if (count == 0)
- return 0;
-
- for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
- uint32_t secure_len = 0;
- uint16_t need_cnt;
- uint32_t vec_idx = 0;
- uint32_t pkt_len = pkts[pkt_idx]->pkt_len + vq->vhost_hlen;
- uint16_t i, id;
-
- do {
- /*
- * As many data cores may want access to available
- * buffers, they need to be reserved.
- */
- res_base_idx = vq->last_used_idx_res;
- res_cur_idx = res_base_idx;
-
- do {
- avail_idx = *((volatile uint16_t *)&vq->avail->idx);
- if (unlikely(res_cur_idx == avail_idx)) {
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") Failed "
- "to get enough desc from "
- "vring\n",
- dev->device_fh);
- return pkt_idx;
- } else {
- uint16_t wrapped_idx =
- (res_cur_idx) & (vq->size - 1);
- uint32_t idx =
- vq->avail->ring[wrapped_idx];
- uint8_t next_desc;
-
- do {
- next_desc = 0;
- secure_len += vq->desc[idx].len;
- if (vq->desc[idx].flags &
- VRING_DESC_F_NEXT) {
- idx = vq->desc[idx].next;
- next_desc = 1;
- }
- } while (next_desc);
-
- res_cur_idx++;
- }
- } while (pkt_len > secure_len);
-
- /* vq->last_used_idx_res is atomically updated. */
- success = rte_atomic16_cmpset(&vq->last_used_idx_res,
- res_base_idx,
- res_cur_idx);
- } while (success == 0);
-
- id = res_base_idx;
- need_cnt = res_cur_idx - res_base_idx;
-
- for (i = 0; i < need_cnt; i++, id++) {
- uint16_t wrapped_idx = id & (vq->size - 1);
- uint32_t idx = vq->avail->ring[wrapped_idx];
- uint8_t next_desc;
- do {
- next_desc = 0;
- vq->buf_vec[vec_idx].buf_addr =
- vq->desc[idx].addr;
- vq->buf_vec[vec_idx].buf_len =
- vq->desc[idx].len;
- vq->buf_vec[vec_idx].desc_idx = idx;
- vec_idx++;
-
- if (vq->desc[idx].flags & VRING_DESC_F_NEXT) {
- idx = vq->desc[idx].next;
- next_desc = 1;
- }
- } while (next_desc);
- }
-
- res_end_idx = res_cur_idx;
-
- entry_success = copy_from_mbuf_to_vring(dev, res_base_idx,
- res_end_idx, pkts[pkt_idx]);
-
- rte_compiler_barrier();
-
- /*
- * Wait until it's our turn to add our buffer
- * to the used ring.
- */
- while (unlikely(vq->last_used_idx != res_base_idx))
- rte_pause();
-
- *(volatile uint16_t *)&vq->used->idx += entry_success;
- vq->last_used_idx = res_end_idx;
-
- /* Kick the guest if necessary. */
- if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
- eventfd_write((int)vq->kickfd, 1);
- }
-
- return count;
-}
-
-uint16_t
-rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id,
- struct rte_mbuf **pkts, uint16_t count)
-{
- if (unlikely(dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)))
- return virtio_dev_merge_rx(dev, queue_id, pkts, count);
- else
- return virtio_dev_rx(dev, queue_id, pkts, count);
-}
-
-uint16_t
-rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
- struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
-{
- struct rte_mbuf *m, *prev;
- struct vhost_virtqueue *vq;
- struct vring_desc *desc;
- uint64_t vb_addr = 0;
- uint32_t head[MAX_PKT_BURST];
- uint32_t used_idx;
- uint32_t i;
- uint16_t free_entries, entry_success = 0;
- uint16_t avail_idx;
-
- if (unlikely(queue_id != VIRTIO_TXQ)) {
- LOG_DEBUG(VHOST_DATA, "mq isn't supported in this version.\n");
- return 0;
- }
-
- vq = dev->virtqueue[VIRTIO_TXQ];
- avail_idx = *((volatile uint16_t *)&vq->avail->idx);
-
- /* If there are no available buffers then return. */
- if (vq->last_used_idx == avail_idx)
- return 0;
-
- LOG_DEBUG(VHOST_DATA, "%s (%"PRIu64")\n", __func__,
- dev->device_fh);
-
- /* Prefetch available ring to retrieve head indexes. */
- rte_prefetch0(&vq->avail->ring[vq->last_used_idx & (vq->size - 1)]);
-
- /*get the number of free entries in the ring*/
- free_entries = (avail_idx - vq->last_used_idx);
-
- free_entries = RTE_MIN(free_entries, count);
- /* Limit to MAX_PKT_BURST. */
- free_entries = RTE_MIN(free_entries, MAX_PKT_BURST);
-
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Buffers available %d\n",
- dev->device_fh, free_entries);
- /* Retrieve all of the head indexes first to avoid caching issues. */
- for (i = 0; i < free_entries; i++)
- head[i] = vq->avail->ring[(vq->last_used_idx + i) & (vq->size - 1)];
-
- /* Prefetch descriptor index. */
- rte_prefetch0(&vq->desc[head[entry_success]]);
- rte_prefetch0(&vq->used->ring[vq->last_used_idx & (vq->size - 1)]);
-
- while (entry_success < free_entries) {
- uint32_t vb_avail, vb_offset;
- uint32_t seg_avail, seg_offset;
- uint32_t cpy_len;
- uint32_t seg_num = 0;
- struct rte_mbuf *cur;
- uint8_t alloc_err = 0;
-
- desc = &vq->desc[head[entry_success]];
-
- /* Discard first buffer as it is the virtio header */
- desc = &vq->desc[desc->next];
-
- /* Buffer address translation. */
- vb_addr = gpa_to_vva(dev, desc->addr);
- /* Prefetch buffer address. */
- rte_prefetch0((void *)(uintptr_t)vb_addr);
-
- used_idx = vq->last_used_idx & (vq->size - 1);
-
- if (entry_success < (free_entries - 1)) {
- /* Prefetch descriptor index. */
- rte_prefetch0(&vq->desc[head[entry_success+1]]);
- rte_prefetch0(&vq->used->ring[(used_idx + 1) & (vq->size - 1)]);
- }
-
- /* Update used index buffer information. */
- vq->used->ring[used_idx].id = head[entry_success];
- vq->used->ring[used_idx].len = 0;
-
- vb_offset = 0;
- vb_avail = desc->len;
- /* Allocate an mbuf and populate the structure. */
- m = rte_pktmbuf_alloc(mbuf_pool);
- if (unlikely(m == NULL)) {
- RTE_LOG(ERR, VHOST_DATA,
- "Failed to allocate memory for mbuf.\n");
- return entry_success;
- }
- seg_offset = 0;
- seg_avail = m->buf_len - RTE_PKTMBUF_HEADROOM;
- cpy_len = RTE_MIN(vb_avail, seg_avail);
-
- PRINT_PACKET(dev, (uintptr_t)vb_addr, desc->len, 0);
-
- seg_num++;
- cur = m;
- prev = m;
- while (cpy_len != 0) {
- rte_memcpy((void *)(rte_pktmbuf_mtod(cur, char *) + seg_offset),
- (void *)((uintptr_t)(vb_addr + vb_offset)),
- cpy_len);
-
- seg_offset += cpy_len;
- vb_offset += cpy_len;
- vb_avail -= cpy_len;
- seg_avail -= cpy_len;
-
- if (vb_avail != 0) {
- /*
- * The segment reachs to its end,
- * while the virtio buffer in TX vring has
- * more data to be copied.
- */
- cur->data_len = seg_offset;
- m->pkt_len += seg_offset;
- /* Allocate mbuf and populate the structure. */
- cur = rte_pktmbuf_alloc(mbuf_pool);
- if (unlikely(cur == NULL)) {
- RTE_LOG(ERR, VHOST_DATA, "Failed to "
- "allocate memory for mbuf.\n");
- rte_pktmbuf_free(m);
- alloc_err = 1;
- break;
- }
-
- seg_num++;
- prev->next = cur;
- prev = cur;
- seg_offset = 0;
- seg_avail = cur->buf_len - RTE_PKTMBUF_HEADROOM;
- } else {
- if (desc->flags & VRING_DESC_F_NEXT) {
- /*
- * There are more virtio buffers in
- * same vring entry need to be copied.
- */
- if (seg_avail == 0) {
- /*
- * The current segment hasn't
- * room to accomodate more
- * data.
- */
- cur->data_len = seg_offset;
- m->pkt_len += seg_offset;
- /*
- * Allocate an mbuf and
- * populate the structure.
- */
- cur = rte_pktmbuf_alloc(mbuf_pool);
- if (unlikely(cur == NULL)) {
- RTE_LOG(ERR,
- VHOST_DATA,
- "Failed to "
- "allocate memory "
- "for mbuf\n");
- rte_pktmbuf_free(m);
- alloc_err = 1;
- break;
- }
- seg_num++;
- prev->next = cur;
- prev = cur;
- seg_offset = 0;
- seg_avail = cur->buf_len - RTE_PKTMBUF_HEADROOM;
- }
-
- desc = &vq->desc[desc->next];
-
- /* Buffer address translation. */
- vb_addr = gpa_to_vva(dev, desc->addr);
- /* Prefetch buffer address. */
- rte_prefetch0((void *)(uintptr_t)vb_addr);
- vb_offset = 0;
- vb_avail = desc->len;
-
- PRINT_PACKET(dev, (uintptr_t)vb_addr,
- desc->len, 0);
- } else {
- /* The whole packet completes. */
- cur->data_len = seg_offset;
- m->pkt_len += seg_offset;
- vb_avail = 0;
- }
- }
-
- cpy_len = RTE_MIN(vb_avail, seg_avail);
- }
-
- if (unlikely(alloc_err == 1))
- break;
-
- m->nb_segs = seg_num;
-
- pkts[entry_success] = m;
- vq->last_used_idx++;
- entry_success++;
- }
-
- rte_compiler_barrier();
- vq->used->idx += entry_success;
- /* Kick guest if required. */
- if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
- eventfd_write((int)vq->kickfd, 1);
- return entry_success;
-}
diff --git a/src/dpdk_lib18/librte_vhost/virtio-net.c b/src/dpdk_lib18/librte_vhost/virtio-net.c
deleted file mode 100755
index b041849d..00000000
--- a/src/dpdk_lib18/librte_vhost/virtio-net.c
+++ /dev/null
@@ -1,1163 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <dirent.h>
-#include <fuse/cuse_lowlevel.h>
-#include <linux/vhost.h>
-#include <linux/virtio_net.h>
-#include <stddef.h>
-#include <stdint.h>
-#include <stdlib.h>
-#include <sys/eventfd.h>
-#include <sys/ioctl.h>
-#include <sys/mman.h>
-#include <unistd.h>
-
-#include <sys/socket.h>
-#include <linux/if_tun.h>
-#include <linux/if.h>
-
-#include <rte_ethdev.h>
-#include <rte_log.h>
-#include <rte_string_fns.h>
-#include <rte_memory.h>
-#include <rte_virtio_net.h>
-
-#include "vhost-net-cdev.h"
-#include "eventfd_link/eventfd_link.h"
-
-/*
- * Device linked list structure for configuration.
- */
-struct virtio_net_config_ll {
- struct virtio_net dev; /* Virtio device.*/
- struct virtio_net_config_ll *next; /* Next dev on linked list.*/
-};
-
-const char eventfd_cdev[] = "/dev/eventfd-link";
-
-/* device ops to add/remove device to/from data core. */
-static struct virtio_net_device_ops const *notify_ops;
-/* root address of the linked list of managed virtio devices */
-static struct virtio_net_config_ll *ll_root;
-
-/* Features supported by this lib. */
-#define VHOST_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \
- (1ULL << VIRTIO_NET_F_CTRL_RX))
-static uint64_t VHOST_FEATURES = VHOST_SUPPORTED_FEATURES;
-
-/* Line size for reading maps file. */
-static const uint32_t BUFSIZE = PATH_MAX;
-
-/* Size of prot char array in procmap. */
-#define PROT_SZ 5
-
-/* Number of elements in procmap struct. */
-#define PROCMAP_SZ 8
-
-/* Structure containing information gathered from maps file. */
-struct procmap {
- uint64_t va_start; /* Start virtual address in file. */
- uint64_t len; /* Size of file. */
- uint64_t pgoff; /* Not used. */
- uint32_t maj; /* Not used. */
- uint32_t min; /* Not used. */
- uint32_t ino; /* Not used. */
- char prot[PROT_SZ]; /* Not used. */
- char fname[PATH_MAX]; /* File name. */
-};
-
-/*
- * Converts QEMU virtual address to Vhost virtual address. This function is
- * used to convert the ring addresses to our address space.
- */
-static uint64_t
-qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)
-{
- struct virtio_memory_regions *region;
- uint64_t vhost_va = 0;
- uint32_t regionidx = 0;
-
- /* Find the region where the address lives. */
- for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
- region = &dev->mem->regions[regionidx];
- if ((qemu_va >= region->userspace_address) &&
- (qemu_va <= region->userspace_address +
- region->memory_size)) {
- vhost_va = dev->mem->mapped_address + qemu_va -
- dev->mem->base_address;
- break;
- }
- }
- return vhost_va;
-}
-
-/*
- * Locate the file containing QEMU's memory space and
- * map it to our address space.
- */
-static int
-host_memory_map(struct virtio_net *dev, struct virtio_memory *mem,
- pid_t pid, uint64_t addr)
-{
- struct dirent *dptr = NULL;
- struct procmap procmap;
- DIR *dp = NULL;
- int fd;
- int i;
- char memfile[PATH_MAX];
- char mapfile[PATH_MAX];
- char procdir[PATH_MAX];
- char resolved_path[PATH_MAX];
- char *path = NULL;
- FILE *fmap;
- void *map;
- uint8_t found = 0;
- char line[BUFSIZE];
- char dlm[] = "- : ";
- char *str, *sp, *in[PROCMAP_SZ];
- char *end = NULL;
-
- /* Path where mem files are located. */
- snprintf(procdir, PATH_MAX, "/proc/%u/fd/", pid);
- /* Maps file used to locate mem file. */
- snprintf(mapfile, PATH_MAX, "/proc/%u/maps", pid);
-
- fmap = fopen(mapfile, "r");
- if (fmap == NULL) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") Failed to open maps file for pid %d\n",
- dev->device_fh, pid);
- return -1;
- }
-
- /* Read through maps file until we find out base_address. */
- while (fgets(line, BUFSIZE, fmap) != 0) {
- str = line;
- errno = 0;
- /* Split line into fields. */
- for (i = 0; i < PROCMAP_SZ; i++) {
- in[i] = strtok_r(str, &dlm[i], &sp);
- if ((in[i] == NULL) || (errno != 0)) {
- fclose(fmap);
- return -1;
- }
- str = NULL;
- }
-
- /* Convert/Copy each field as needed. */
- procmap.va_start = strtoull(in[0], &end, 16);
- if ((in[0] == '\0') || (end == NULL) || (*end != '\0') ||
- (errno != 0)) {
- fclose(fmap);
- return -1;
- }
-
- procmap.len = strtoull(in[1], &end, 16);
- if ((in[1] == '\0') || (end == NULL) || (*end != '\0') ||
- (errno != 0)) {
- fclose(fmap);
- return -1;
- }
-
- procmap.pgoff = strtoull(in[3], &end, 16);
- if ((in[3] == '\0') || (end == NULL) || (*end != '\0') ||
- (errno != 0)) {
- fclose(fmap);
- return -1;
- }
-
- procmap.maj = strtoul(in[4], &end, 16);
- if ((in[4] == '\0') || (end == NULL) || (*end != '\0') ||
- (errno != 0)) {
- fclose(fmap);
- return -1;
- }
-
- procmap.min = strtoul(in[5], &end, 16);
- if ((in[5] == '\0') || (end == NULL) || (*end != '\0') ||
- (errno != 0)) {
- fclose(fmap);
- return -1;
- }
-
- procmap.ino = strtoul(in[6], &end, 16);
- if ((in[6] == '\0') || (end == NULL) || (*end != '\0') ||
- (errno != 0)) {
- fclose(fmap);
- return -1;
- }
-
- memcpy(&procmap.prot, in[2], PROT_SZ);
- memcpy(&procmap.fname, in[7], PATH_MAX);
-
- if (procmap.va_start == addr) {
- procmap.len = procmap.len - procmap.va_start;
- found = 1;
- break;
- }
- }
- fclose(fmap);
-
- if (!found) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") Failed to find memory file in pid %d maps file\n",
- dev->device_fh, pid);
- return -1;
- }
-
- /* Find the guest memory file among the process fds. */
- dp = opendir(procdir);
- if (dp == NULL) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") Cannot open pid %d process directory\n",
- dev->device_fh, pid);
- return -1;
- }
-
- found = 0;
-
- /* Read the fd directory contents. */
- while (NULL != (dptr = readdir(dp))) {
- snprintf(memfile, PATH_MAX, "/proc/%u/fd/%s",
- pid, dptr->d_name);
- path = realpath(memfile, resolved_path);
- if ((path == NULL) && (strlen(resolved_path) == 0)) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") Failed to resolve fd directory\n",
- dev->device_fh);
- closedir(dp);
- return -1;
- }
- if (strncmp(resolved_path, procmap.fname,
- strnlen(procmap.fname, PATH_MAX)) == 0) {
- found = 1;
- break;
- }
- }
-
- closedir(dp);
-
- if (found == 0) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") Failed to find memory file for pid %d\n",
- dev->device_fh, pid);
- return -1;
- }
- /* Open the shared memory file and map the memory into this process. */
- fd = open(memfile, O_RDWR);
-
- if (fd == -1) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") Failed to open %s for pid %d\n",
- dev->device_fh, memfile, pid);
- return -1;
- }
-
- map = mmap(0, (size_t)procmap.len, PROT_READ|PROT_WRITE,
- MAP_POPULATE|MAP_SHARED, fd, 0);
- close(fd);
-
- if (map == MAP_FAILED) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") Error mapping the file %s for pid %d\n",
- dev->device_fh, memfile, pid);
- return -1;
- }
-
- /* Store the memory address and size in the device data structure */
- mem->mapped_address = (uint64_t)(uintptr_t)map;
- mem->mapped_size = procmap.len;
-
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") Mem File: %s->%s - Size: %llu - VA: %p\n",
- dev->device_fh,
- memfile, resolved_path,
- (unsigned long long)mem->mapped_size, map);
-
- return 0;
-}
-
-/*
- * Retrieves an entry from the devices configuration linked list.
- */
-static struct virtio_net_config_ll *
-get_config_ll_entry(struct vhost_device_ctx ctx)
-{
- struct virtio_net_config_ll *ll_dev = ll_root;
-
- /* Loop through linked list until the device_fh is found. */
- while (ll_dev != NULL) {
- if (ll_dev->dev.device_fh == ctx.fh)
- return ll_dev;
- ll_dev = ll_dev->next;
- }
-
- return NULL;
-}
-
-/*
- * Searches the configuration core linked list and
- * retrieves the device if it exists.
- */
-static struct virtio_net *
-get_device(struct vhost_device_ctx ctx)
-{
- struct virtio_net_config_ll *ll_dev;
-
- ll_dev = get_config_ll_entry(ctx);
-
- if (ll_dev)
- return &ll_dev->dev;
-
- RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") Device not found in linked list.\n", ctx.fh);
- return NULL;
-}
-
-/*
- * Add entry containing a device to the device configuration linked list.
- */
-static void
-add_config_ll_entry(struct virtio_net_config_ll *new_ll_dev)
-{
- struct virtio_net_config_ll *ll_dev = ll_root;
-
- /* If ll_dev == NULL then this is the first device so go to else */
- if (ll_dev) {
- /* If the 1st device_fh != 0 then we insert our device here. */
- if (ll_dev->dev.device_fh != 0) {
- new_ll_dev->dev.device_fh = 0;
- new_ll_dev->next = ll_dev;
- ll_root = new_ll_dev;
- } else {
- /*
- * Increment through the ll until we find un unused
- * device_fh. Insert the device at that entry.
- */
- while ((ll_dev->next != NULL) &&
- (ll_dev->dev.device_fh ==
- (ll_dev->next->dev.device_fh - 1)))
- ll_dev = ll_dev->next;
-
- new_ll_dev->dev.device_fh = ll_dev->dev.device_fh + 1;
- new_ll_dev->next = ll_dev->next;
- ll_dev->next = new_ll_dev;
- }
- } else {
- ll_root = new_ll_dev;
- ll_root->dev.device_fh = 0;
- }
-
-}
-
-/*
- * Unmap any memory, close any file descriptors and
- * free any memory owned by a device.
- */
-static void
-cleanup_device(struct virtio_net *dev)
-{
- /* Unmap QEMU memory file if mapped. */
- if (dev->mem) {
- munmap((void *)(uintptr_t)dev->mem->mapped_address,
- (size_t)dev->mem->mapped_size);
- free(dev->mem);
- }
-
- /* Close any event notifiers opened by device. */
- if (dev->virtqueue[VIRTIO_RXQ]->callfd)
- close((int)dev->virtqueue[VIRTIO_RXQ]->callfd);
- if (dev->virtqueue[VIRTIO_RXQ]->kickfd)
- close((int)dev->virtqueue[VIRTIO_RXQ]->kickfd);
- if (dev->virtqueue[VIRTIO_TXQ]->callfd)
- close((int)dev->virtqueue[VIRTIO_TXQ]->callfd);
- if (dev->virtqueue[VIRTIO_TXQ]->kickfd)
- close((int)dev->virtqueue[VIRTIO_TXQ]->kickfd);
-}
-
-/*
- * Release virtqueues and device memory.
- */
-static void
-free_device(struct virtio_net_config_ll *ll_dev)
-{
- /* Free any malloc'd memory */
- free(ll_dev->dev.virtqueue[VIRTIO_RXQ]);
- free(ll_dev->dev.virtqueue[VIRTIO_TXQ]);
- free(ll_dev);
-}
-
-/*
- * Remove an entry from the device configuration linked list.
- */
-static struct virtio_net_config_ll *
-rm_config_ll_entry(struct virtio_net_config_ll *ll_dev,
- struct virtio_net_config_ll *ll_dev_last)
-{
- /* First remove the device and then clean it up. */
- if (ll_dev == ll_root) {
- ll_root = ll_dev->next;
- cleanup_device(&ll_dev->dev);
- free_device(ll_dev);
- return ll_root;
- } else {
- if (likely(ll_dev_last != NULL)) {
- ll_dev_last->next = ll_dev->next;
- cleanup_device(&ll_dev->dev);
- free_device(ll_dev);
- return ll_dev_last->next;
- } else {
- cleanup_device(&ll_dev->dev);
- free_device(ll_dev);
- RTE_LOG(ERR, VHOST_CONFIG,
- "Remove entry from config_ll failed\n");
- return NULL;
- }
- }
-}
-
-/*
- * Initialise all variables in device structure.
- */
-static void
-init_device(struct virtio_net *dev)
-{
- uint64_t vq_offset;
-
- /*
- * Virtqueues have already been malloced so
- * we don't want to set them to NULL.
- */
- vq_offset = offsetof(struct virtio_net, mem);
-
- /* Set everything to 0. */
- memset((void *)(uintptr_t)((uint64_t)(uintptr_t)dev + vq_offset), 0,
- (sizeof(struct virtio_net) - (size_t)vq_offset));
- memset(dev->virtqueue[VIRTIO_RXQ], 0, sizeof(struct vhost_virtqueue));
- memset(dev->virtqueue[VIRTIO_TXQ], 0, sizeof(struct vhost_virtqueue));
-
- /* Backends are set to -1 indicating an inactive device. */
- dev->virtqueue[VIRTIO_RXQ]->backend = VIRTIO_DEV_STOPPED;
- dev->virtqueue[VIRTIO_TXQ]->backend = VIRTIO_DEV_STOPPED;
-}
-
-/*
- * Function is called from the CUSE open function. The device structure is
- * initialised and a new entry is added to the device configuration linked
- * list.
- */
-static int
-new_device(struct vhost_device_ctx ctx)
-{
- struct virtio_net_config_ll *new_ll_dev;
- struct vhost_virtqueue *virtqueue_rx, *virtqueue_tx;
-
- /* Setup device and virtqueues. */
- new_ll_dev = malloc(sizeof(struct virtio_net_config_ll));
- if (new_ll_dev == NULL) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") Failed to allocate memory for dev.\n",
- ctx.fh);
- return -1;
- }
-
- virtqueue_rx = malloc(sizeof(struct vhost_virtqueue));
- if (virtqueue_rx == NULL) {
- free(new_ll_dev);
- RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") Failed to allocate memory for rxq.\n",
- ctx.fh);
- return -1;
- }
-
- virtqueue_tx = malloc(sizeof(struct vhost_virtqueue));
- if (virtqueue_tx == NULL) {
- free(virtqueue_rx);
- free(new_ll_dev);
- RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") Failed to allocate memory for txq.\n",
- ctx.fh);
- return -1;
- }
-
- new_ll_dev->dev.virtqueue[VIRTIO_RXQ] = virtqueue_rx;
- new_ll_dev->dev.virtqueue[VIRTIO_TXQ] = virtqueue_tx;
-
- /* Initialise device and virtqueues. */
- init_device(&new_ll_dev->dev);
-
- new_ll_dev->next = NULL;
-
- /* Add entry to device configuration linked list. */
- add_config_ll_entry(new_ll_dev);
-
- return new_ll_dev->dev.device_fh;
-}
-
-/*
- * Function is called from the CUSE release function. This function will
- * cleanup the device and remove it from device configuration linked list.
- */
-static void
-destroy_device(struct vhost_device_ctx ctx)
-{
- struct virtio_net_config_ll *ll_dev_cur_ctx, *ll_dev_last = NULL;
- struct virtio_net_config_ll *ll_dev_cur = ll_root;
-
- /* Find the linked list entry for the device to be removed. */
- ll_dev_cur_ctx = get_config_ll_entry(ctx);
- while (ll_dev_cur != NULL) {
- /*
- * If the device is found or
- * a device that doesn't exist is found then it is removed.
- */
- if (ll_dev_cur == ll_dev_cur_ctx) {
- /*
- * If the device is running on a data core then call
- * the function to remove it from the data core.
- */
- if ((ll_dev_cur->dev.flags & VIRTIO_DEV_RUNNING))
- notify_ops->destroy_device(&(ll_dev_cur->dev));
- ll_dev_cur = rm_config_ll_entry(ll_dev_cur,
- ll_dev_last);
- } else {
- ll_dev_last = ll_dev_cur;
- ll_dev_cur = ll_dev_cur->next;
- }
- }
-}
-
-/*
- * Called from CUSE IOCTL: VHOST_SET_OWNER
- * This function just returns success at the moment unless
- * the device hasn't been initialised.
- */
-static int
-set_owner(struct vhost_device_ctx ctx)
-{
- struct virtio_net *dev;
-
- dev = get_device(ctx);
- if (dev == NULL)
- return -1;
-
- return 0;
-}
-
-/*
- * Called from CUSE IOCTL: VHOST_RESET_OWNER
- */
-static int
-reset_owner(struct vhost_device_ctx ctx)
-{
- struct virtio_net_config_ll *ll_dev;
-
- ll_dev = get_config_ll_entry(ctx);
-
- cleanup_device(&ll_dev->dev);
- init_device(&ll_dev->dev);
-
- return 0;
-}
-
-/*
- * Called from CUSE IOCTL: VHOST_GET_FEATURES
- * The features that we support are requested.
- */
-static int
-get_features(struct vhost_device_ctx ctx, uint64_t *pu)
-{
- struct virtio_net *dev;
-
- dev = get_device(ctx);
- if (dev == NULL)
- return -1;
-
- /* Send our supported features. */
- *pu = VHOST_FEATURES;
- return 0;
-}
-
-/*
- * Called from CUSE IOCTL: VHOST_SET_FEATURES
- * We receive the negotiated features supported by us and the virtio device.
- */
-static int
-set_features(struct vhost_device_ctx ctx, uint64_t *pu)
-{
- struct virtio_net *dev;
-
- dev = get_device(ctx);
- if (dev == NULL)
- return -1;
- if (*pu & ~VHOST_FEATURES)
- return -1;
-
- /* Store the negotiated feature list for the device. */
- dev->features = *pu;
-
- /* Set the vhost_hlen depending on if VIRTIO_NET_F_MRG_RXBUF is set. */
- if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) {
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") Mergeable RX buffers enabled\n",
- dev->device_fh);
- dev->virtqueue[VIRTIO_RXQ]->vhost_hlen =
- sizeof(struct virtio_net_hdr_mrg_rxbuf);
- dev->virtqueue[VIRTIO_TXQ]->vhost_hlen =
- sizeof(struct virtio_net_hdr_mrg_rxbuf);
- } else {
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") Mergeable RX buffers disabled\n",
- dev->device_fh);
- dev->virtqueue[VIRTIO_RXQ]->vhost_hlen =
- sizeof(struct virtio_net_hdr);
- dev->virtqueue[VIRTIO_TXQ]->vhost_hlen =
- sizeof(struct virtio_net_hdr);
- }
- return 0;
-}
-
-
-/*
- * Called from CUSE IOCTL: VHOST_SET_MEM_TABLE
- * This function creates and populates the memory structure for the device.
- * This includes storing offsets used to translate buffer addresses.
- */
-static int
-set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr,
- uint32_t nregions)
-{
- struct virtio_net *dev;
- struct vhost_memory_region *mem_regions;
- struct virtio_memory *mem;
- uint64_t size = offsetof(struct vhost_memory, regions);
- uint32_t regionidx, valid_regions;
-
- dev = get_device(ctx);
- if (dev == NULL)
- return -1;
-
- if (dev->mem) {
- munmap((void *)(uintptr_t)dev->mem->mapped_address,
- (size_t)dev->mem->mapped_size);
- free(dev->mem);
- }
-
- /* Malloc the memory structure depending on the number of regions. */
- mem = calloc(1, sizeof(struct virtio_memory) +
- (sizeof(struct virtio_memory_regions) * nregions));
- if (mem == NULL) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") Failed to allocate memory for dev->mem.\n",
- dev->device_fh);
- return -1;
- }
-
- mem->nregions = nregions;
-
- mem_regions = (void *)(uintptr_t)
- ((uint64_t)(uintptr_t)mem_regions_addr + size);
-
- for (regionidx = 0; regionidx < mem->nregions; regionidx++) {
- /* Populate the region structure for each region. */
- mem->regions[regionidx].guest_phys_address =
- mem_regions[regionidx].guest_phys_addr;
- mem->regions[regionidx].guest_phys_address_end =
- mem->regions[regionidx].guest_phys_address +
- mem_regions[regionidx].memory_size;
- mem->regions[regionidx].memory_size =
- mem_regions[regionidx].memory_size;
- mem->regions[regionidx].userspace_address =
- mem_regions[regionidx].userspace_addr;
-
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") REGION: %u - GPA: %p - QEMU VA: %p - SIZE (%"PRIu64")\n", dev->device_fh,
- regionidx,
- (void *)(uintptr_t)mem->regions[regionidx].guest_phys_address,
- (void *)(uintptr_t)mem->regions[regionidx].userspace_address,
- mem->regions[regionidx].memory_size);
-
- /*set the base address mapping*/
- if (mem->regions[regionidx].guest_phys_address == 0x0) {
- mem->base_address =
- mem->regions[regionidx].userspace_address;
- /* Map VM memory file */
- if (host_memory_map(dev, mem, ctx.pid,
- mem->base_address) != 0) {
- free(mem);
- return -1;
- }
- }
- }
-
- /* Check that we have a valid base address. */
- if (mem->base_address == 0) {
- RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find base address of qemu memory file.\n", dev->device_fh);
- free(mem);
- return -1;
- }
-
- /*
- * Check if all of our regions have valid mappings.
- * Usually one does not exist in the QEMU memory file.
- */
- valid_regions = mem->nregions;
- for (regionidx = 0; regionidx < mem->nregions; regionidx++) {
- if ((mem->regions[regionidx].userspace_address <
- mem->base_address) ||
- (mem->regions[regionidx].userspace_address >
- (mem->base_address + mem->mapped_size)))
- valid_regions--;
- }
-
- /*
- * If a region does not have a valid mapping,
- * we rebuild our memory struct to contain only valid entries.
- */
- if (valid_regions != mem->nregions) {
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Not all memory regions exist in the QEMU mem file. Re-populating mem structure\n",
- dev->device_fh);
-
- /*
- * Re-populate the memory structure with only valid regions.
- * Invalid regions are over-written with memmove.
- */
- valid_regions = 0;
-
- for (regionidx = mem->nregions; 0 != regionidx--;) {
- if ((mem->regions[regionidx].userspace_address <
- mem->base_address) ||
- (mem->regions[regionidx].userspace_address >
- (mem->base_address + mem->mapped_size))) {
- memmove(&mem->regions[regionidx],
- &mem->regions[regionidx + 1],
- sizeof(struct virtio_memory_regions) *
- valid_regions);
- } else {
- valid_regions++;
- }
- }
- }
- mem->nregions = valid_regions;
- dev->mem = mem;
-
- /*
- * Calculate the address offset for each region.
- * This offset is used to identify the vhost virtual address
- * corresponding to a QEMU guest physical address.
- */
- for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
- dev->mem->regions[regionidx].address_offset =
- dev->mem->regions[regionidx].userspace_address -
- dev->mem->base_address +
- dev->mem->mapped_address -
- dev->mem->regions[regionidx].guest_phys_address;
-
- }
- return 0;
-}
-
-/*
- * Called from CUSE IOCTL: VHOST_SET_VRING_NUM
- * The virtio device sends us the size of the descriptor ring.
- */
-static int
-set_vring_num(struct vhost_device_ctx ctx, struct vhost_vring_state *state)
-{
- struct virtio_net *dev;
-
- dev = get_device(ctx);
- if (dev == NULL)
- return -1;
-
- /* State->index refers to the queue index. The txq is 1, rxq is 0. */
- dev->virtqueue[state->index]->size = state->num;
-
- return 0;
-}
-
-/*
- * Called from CUSE IOCTL: VHOST_SET_VRING_ADDR
- * The virtio device sends us the desc, used and avail ring addresses.
- * This function then converts these to our address space.
- */
-static int
-set_vring_addr(struct vhost_device_ctx ctx, struct vhost_vring_addr *addr)
-{
- struct virtio_net *dev;
- struct vhost_virtqueue *vq;
-
- dev = get_device(ctx);
- if (dev == NULL)
- return -1;
-
- /* addr->index refers to the queue index. The txq 1, rxq is 0. */
- vq = dev->virtqueue[addr->index];
-
- /* The addresses are converted from QEMU virtual to Vhost virtual. */
- vq->desc = (struct vring_desc *)(uintptr_t)qva_to_vva(dev,
- addr->desc_user_addr);
- if (vq->desc == 0) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") Failed to find desc ring address.\n",
- dev->device_fh);
- return -1;
- }
-
- vq->avail = (struct vring_avail *)(uintptr_t)qva_to_vva(dev,
- addr->avail_user_addr);
- if (vq->avail == 0) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") Failed to find avail ring address.\n",
- dev->device_fh);
- return -1;
- }
-
- vq->used = (struct vring_used *)(uintptr_t)qva_to_vva(dev,
- addr->used_user_addr);
- if (vq->used == 0) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") Failed to find used ring address.\n",
- dev->device_fh);
- return -1;
- }
-
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address desc: %p\n",
- dev->device_fh, vq->desc);
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address avail: %p\n",
- dev->device_fh, vq->avail);
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address used: %p\n",
- dev->device_fh, vq->used);
-
- return 0;
-}
-
-/*
- * Called from CUSE IOCTL: VHOST_SET_VRING_BASE
- * The virtio device sends us the available ring last used index.
- */
-static int
-set_vring_base(struct vhost_device_ctx ctx, struct vhost_vring_state *state)
-{
- struct virtio_net *dev;
-
- dev = get_device(ctx);
- if (dev == NULL)
- return -1;
-
- /* State->index refers to the queue index. The txq is 1, rxq is 0. */
- dev->virtqueue[state->index]->last_used_idx = state->num;
- dev->virtqueue[state->index]->last_used_idx_res = state->num;
-
- return 0;
-}
-
-/*
- * Called from CUSE IOCTL: VHOST_GET_VRING_BASE
- * We send the virtio device our available ring last used index.
- */
-static int
-get_vring_base(struct vhost_device_ctx ctx, uint32_t index,
- struct vhost_vring_state *state)
-{
- struct virtio_net *dev;
-
- dev = get_device(ctx);
- if (dev == NULL)
- return -1;
-
- state->index = index;
- /* State->index refers to the queue index. The txq is 1, rxq is 0. */
- state->num = dev->virtqueue[state->index]->last_used_idx;
-
- return 0;
-}
-
-/*
- * This function uses the eventfd_link kernel module to copy an eventfd file
- * descriptor provided by QEMU in to our process space.
- */
-static int
-eventfd_copy(struct virtio_net *dev, struct eventfd_copy *eventfd_copy)
-{
- int eventfd_link, ret;
-
- /* Open the character device to the kernel module. */
- eventfd_link = open(eventfd_cdev, O_RDWR);
- if (eventfd_link < 0) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") eventfd_link module is not loaded\n",
- dev->device_fh);
- return -1;
- }
-
- /* Call the IOCTL to copy the eventfd. */
- ret = ioctl(eventfd_link, EVENTFD_COPY, eventfd_copy);
- close(eventfd_link);
-
- if (ret < 0) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") EVENTFD_COPY ioctl failed\n",
- dev->device_fh);
- return -1;
- }
-
- return 0;
-}
-
-/*
- * Called from CUSE IOCTL: VHOST_SET_VRING_CALL
- * The virtio device sends an eventfd to interrupt the guest. This fd gets
- * copied into our process space.
- */
-static int
-set_vring_call(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
-{
- struct virtio_net *dev;
- struct eventfd_copy eventfd_kick;
- struct vhost_virtqueue *vq;
-
- dev = get_device(ctx);
- if (dev == NULL)
- return -1;
-
- /* file->index refers to the queue index. The txq is 1, rxq is 0. */
- vq = dev->virtqueue[file->index];
-
- if (vq->kickfd)
- close((int)vq->kickfd);
-
- /* Populate the eventfd_copy structure and call eventfd_copy. */
- vq->kickfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
- eventfd_kick.source_fd = vq->kickfd;
- eventfd_kick.target_fd = file->fd;
- eventfd_kick.target_pid = ctx.pid;
-
- if (eventfd_copy(dev, &eventfd_kick))
- return -1;
-
- return 0;
-}
-
-/*
- * Called from CUSE IOCTL: VHOST_SET_VRING_KICK
- * The virtio device sends an eventfd that it can use to notify us.
- * This fd gets copied into our process space.
- */
-static int
-set_vring_kick(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
-{
- struct virtio_net *dev;
- struct eventfd_copy eventfd_call;
- struct vhost_virtqueue *vq;
-
- dev = get_device(ctx);
- if (dev == NULL)
- return -1;
-
- /* file->index refers to the queue index. The txq is 1, rxq is 0. */
- vq = dev->virtqueue[file->index];
-
- if (vq->callfd)
- close((int)vq->callfd);
-
- /* Populate the eventfd_copy structure and call eventfd_copy. */
- vq->callfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
- eventfd_call.source_fd = vq->callfd;
- eventfd_call.target_fd = file->fd;
- eventfd_call.target_pid = ctx.pid;
-
- if (eventfd_copy(dev, &eventfd_call))
- return -1;
-
- return 0;
-}
-
-/*
- * Function to get the tap device name from the provided file descriptor and
- * save it in the device structure.
- */
-static int
-get_ifname(struct virtio_net *dev, int tap_fd, int pid)
-{
- struct eventfd_copy fd_tap;
- struct ifreq ifr;
- uint32_t size, ifr_size;
- int ret;
-
- fd_tap.source_fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
- fd_tap.target_fd = tap_fd;
- fd_tap.target_pid = pid;
-
- if (eventfd_copy(dev, &fd_tap))
- return -1;
-
- ret = ioctl(fd_tap.source_fd, TUNGETIFF, &ifr);
-
- if (close(fd_tap.source_fd) < 0)
- RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") fd close failed\n",
- dev->device_fh);
-
- if (ret >= 0) {
- ifr_size = strnlen(ifr.ifr_name, sizeof(ifr.ifr_name));
- size = ifr_size > sizeof(dev->ifname) ?
- sizeof(dev->ifname) : ifr_size;
-
- strncpy(dev->ifname, ifr.ifr_name, size);
- } else
- RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") TUNGETIFF ioctl failed\n",
- dev->device_fh);
-
- return 0;
-}
-
-/*
- * Called from CUSE IOCTL: VHOST_NET_SET_BACKEND
- * To complete device initialisation when the virtio driver is loaded,
- * we are provided with a valid fd for a tap device (not used by us).
- * If this happens then we can add the device to a data core.
- * When the virtio driver is removed we get fd=-1.
- * At that point we remove the device from the data core.
- * The device will still exist in the device configuration linked list.
- */
-static int
-set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
-{
- struct virtio_net *dev;
-
- dev = get_device(ctx);
- if (dev == NULL)
- return -1;
-
- /* file->index refers to the queue index. The txq is 1, rxq is 0. */
- dev->virtqueue[file->index]->backend = file->fd;
-
- /*
- * If the device isn't already running and both backend fds are set,
- * we add the device.
- */
- if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
- if (((int)dev->virtqueue[VIRTIO_TXQ]->backend != VIRTIO_DEV_STOPPED) &&
- ((int)dev->virtqueue[VIRTIO_RXQ]->backend != VIRTIO_DEV_STOPPED)) {
- get_ifname(dev, file->fd, ctx.pid);
- return notify_ops->new_device(dev);
- }
- /* Otherwise we remove it. */
- } else
- if (file->fd == VIRTIO_DEV_STOPPED)
- notify_ops->destroy_device(dev);
- return 0;
-}
-
-/*
- * Function pointers are set for the device operations to allow CUSE to call
- * functions when an IOCTL, device_add or device_release is received.
- */
-static const struct vhost_net_device_ops vhost_device_ops = {
- .new_device = new_device,
- .destroy_device = destroy_device,
-
- .get_features = get_features,
- .set_features = set_features,
-
- .set_mem_table = set_mem_table,
-
- .set_vring_num = set_vring_num,
- .set_vring_addr = set_vring_addr,
- .set_vring_base = set_vring_base,
- .get_vring_base = get_vring_base,
-
- .set_vring_kick = set_vring_kick,
- .set_vring_call = set_vring_call,
-
- .set_backend = set_backend,
-
- .set_owner = set_owner,
- .reset_owner = reset_owner,
-};
-
-/*
- * Called by main to setup callbacks when registering CUSE device.
- */
-struct vhost_net_device_ops const *
-get_virtio_net_callbacks(void)
-{
- return &vhost_device_ops;
-}
-
-int rte_vhost_enable_guest_notification(struct virtio_net *dev,
- uint16_t queue_id, int enable)
-{
- if (enable) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "guest notification isn't supported.\n");
- return -1;
- }
-
- dev->virtqueue[queue_id]->used->flags =
- enable ? 0 : VRING_USED_F_NO_NOTIFY;
- return 0;
-}
-
-uint64_t rte_vhost_feature_get(void)
-{
- return VHOST_FEATURES;
-}
-
-int rte_vhost_feature_disable(uint64_t feature_mask)
-{
- VHOST_FEATURES = VHOST_FEATURES & ~feature_mask;
- return 0;
-}
-
-int rte_vhost_feature_enable(uint64_t feature_mask)
-{
- if ((feature_mask & VHOST_SUPPORTED_FEATURES) == feature_mask) {
- VHOST_FEATURES = VHOST_FEATURES | feature_mask;
- return 0;
- }
- return -1;
-}
-
-/*
- * Register ops so that we can add/remove device to data core.
- */
-int
-rte_vhost_driver_callback_register(struct virtio_net_device_ops const * const ops)
-{
- notify_ops = ops;
-
- return 0;
-}
diff --git a/src/main_dpdk.cpp b/src/main_dpdk.cpp
index 198f8d72..7327ec53 100755
--- a/src/main_dpdk.cpp
+++ b/src/main_dpdk.cpp
@@ -65,9 +65,9 @@ limitations under the License.
#include <../linux_dpdk/version.h>
extern "C" {
- #include <dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_type.h>
+#include <dpdk22/drivers/net/ixgbe/base/ixgbe_type.h>
}
-#include <dpdk_lib18/librte_pmd_e1000/e1000/e1000_regs.h>
+#include <dpdk22/drivers/net/e1000/base/e1000_regs.h>
#include <zmq.h>
#include <stdio.h>
#include <unistd.h>
@@ -107,8 +107,6 @@ typedef struct rte_mbuf * (*rte_mbuf_convert_to_one_seg_t)(struct rte_mbuf *m);
struct rte_mbuf * rte_mbuf_convert_to_one_seg(struct rte_mbuf *m);
extern "C" int vmxnet3_xmit_set_callback(rte_mbuf_convert_to_one_seg_t cb);
-
-
#define RTE_TEST_TX_DESC_DEFAULT 512
#define RTE_TEST_RX_DESC_DROP 0
@@ -307,7 +305,7 @@ public:
virtual int wait_for_stable_link();
private:
void add_rules(CPhyEthIF * _if,
- enum rte_eth_flow_type type,
+ uint16_t type,
uint8_t ttl);
};
@@ -1037,6 +1035,9 @@ struct port_cfg_t {
/* enable FDIR */
inline void update_global_config_fdir_10g_1g(void){
+#if 0
+fix me
+ //????????
m_port_conf.fdir_conf.mode=RTE_FDIR_MODE_PERFECT;
m_port_conf.fdir_conf.pballoc=RTE_FDIR_PBALLOC_64K;
m_port_conf.fdir_conf.status=RTE_FDIR_NO_REPORT_STATUS;
@@ -1053,8 +1054,8 @@ struct port_cfg_t {
m_port_conf.fdir_conf.flexbytes_offset+=(4/2);
}
m_port_conf.fdir_conf.drop_queue=1;
+#endif
}
-
inline void update_global_config_fdir_40g(void){
m_port_conf.fdir_conf.mode=RTE_FDIR_MODE_PERFECT;
m_port_conf.fdir_conf.pballoc=RTE_FDIR_PBALLOC_64K;
@@ -3580,10 +3581,13 @@ int CGlobalTRex::ixgbe_prob_init(void){
CTRexExtendedDriverDb::Ins()->set_driver_name(dev_info.driver_name);
+#if 0
+ //??? fixme
/* register driver callback to convert mseg to signle seg */
if (strcmp(dev_info.driver_name,"rte_vmxnet3_pmd")==0 ) {
vmxnet3_xmit_set_callback(rte_mbuf_convert_to_one_seg);
}
+#endif
m_port_cfg.update_var();
@@ -4898,7 +4902,10 @@ void CTRexExtendedDriverBase10G::update_configuration(port_cfg_t * cfg){
}
int CTRexExtendedDriverBase10G::configure_rx_filter_rules(CPhyEthIF * _if){
+
+ // ??? fixme
/* 10Gb/sec 82599 */
+#if 0
uint8_t port_id=_if->get_rte_port_id();
uint16_t hops = get_rx_check_hops();
@@ -4906,7 +4913,7 @@ int CTRexExtendedDriverBase10G::configure_rx_filter_rules(CPhyEthIF * _if){
/* set the mask only for flex-data */
- rte_fdir_masks fdir_mask;
+ rte_eth_fdir_masks fdir_mask;
memset(&fdir_mask,0,sizeof(rte_fdir_masks));
fdir_mask.flexbytes=1;
//fdir_mask.dst_port_mask=0xffff; /* enable of
@@ -4977,6 +4984,7 @@ int CTRexExtendedDriverBase10G::configure_rx_filter_rules(CPhyEthIF * _if){
rte_exit(EXIT_FAILURE, " ERROR rte_eth_dev_fdir_add_perfect_filter : %d\n",res);
}
}
+#endif
return (0);
}
@@ -5053,7 +5061,7 @@ void CTRexExtendedDriverBase40G::update_configuration(port_cfg_t * cfg){
/* Add rule to send packets with protocol 'type', and ttl 'ttl' to rx queue 1 */
void CTRexExtendedDriverBase40G::add_rules(CPhyEthIF * _if,
- enum rte_eth_flow_type type,
+ uint16_t type,
uint8_t ttl){
uint8_t port_id = _if->get_port_id();
int ret=rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR);
@@ -5074,11 +5082,13 @@ void CTRexExtendedDriverBase40G::add_rules(CPhyEthIF * _if,
filter.soft_id=0;
filter.input.flow_type = type;
- filter.input.ttl=ttl;
+ /* // ??? fixme
+ filter.input.flow.ip4_flow.ttl=ttl;
- if (type == RTE_ETH_FLOW_TYPE_IPV4_OTHER) {
+ if (type == RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) {
filter.input.flow.ip4_flow.l4_proto = IPPROTO_ICMP; // In this case we want filter for icmp packets
}
+ */
/* We want to place latency packets in queue 1 */
ret=rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
@@ -5097,10 +5107,10 @@ int CTRexExtendedDriverBase40G::configure_rx_filter_rules(CPhyEthIF * _if){
int i;
for (i=0; i<2; i++) {
uint8_t ttl=0xff-i-hops;
- add_rules(_if,RTE_ETH_FLOW_TYPE_UDPV4,ttl);
- add_rules(_if,RTE_ETH_FLOW_TYPE_TCPV4,ttl);
- add_rules(_if,RTE_ETH_FLOW_TYPE_UDPV6,ttl);
- add_rules(_if,RTE_ETH_FLOW_TYPE_TCPV6,ttl);
+ add_rules(_if,RTE_ETH_FLOW_NONFRAG_IPV4_UDP, ttl);
+ add_rules(_if,RTE_ETH_FLOW_NONFRAG_IPV4_TCP, ttl);
+ add_rules(_if,RTE_ETH_FLOW_NONFRAG_IPV6_UDP, ttl);
+ add_rules(_if,RTE_ETH_FLOW_NONFRAG_IPV6_TCP, ttl);
}
return (0);
@@ -5110,8 +5120,8 @@ int CTRexExtendedDriverBase40G::configure_rx_filter_rules(CPhyEthIF * _if){
int CTRexExtendedDriverBase40G::configure_drop_queue(CPhyEthIF * _if){
/* Configure queue for latency packets */
- add_rules(_if,RTE_ETH_FLOW_TYPE_IPV4_OTHER,255);
- add_rules(_if,RTE_ETH_FLOW_TYPE_SCTPV4,255);
+ add_rules(_if,RTE_ETH_FLOW_NONFRAG_IPV4_OTHER, 255);
+ add_rules(_if,RTE_ETH_FLOW_NONFRAG_IPV4_SCTP, 255);
return (0);
}
diff --git a/src/pal/linux_dpdk/dpdk22/rte_config.h b/src/pal/linux_dpdk/dpdk22/rte_config.h
new file mode 100644
index 00000000..01d9b7a1
--- /dev/null
+++ b/src/pal/linux_dpdk/dpdk22/rte_config.h
@@ -0,0 +1,322 @@
+#ifndef __RTE_CONFIG_H
+#define __RTE_CONFIG_H
+#undef RTE_EXEC_ENV
+#define RTE_EXEC_ENV "linuxapp"
+#undef RTE_EXEC_ENV_LINUXAPP
+#define RTE_EXEC_ENV_LINUXAPP 1
+#undef RTE_FORCE_INTRINSICS
+#undef RTE_ARCH_STRICT_ALIGN
+#undef RTE_BUILD_SHARED_LIB
+#undef RTE_BUILD_COMBINE_LIBS
+#undef RTE_NEXT_ABI
+#define RTE_NEXT_ABI 1
+#undef RTE_LIBRTE_EAL
+#define RTE_LIBRTE_EAL 1
+#undef RTE_MAX_LCORE
+#define RTE_MAX_LCORE 128
+#undef RTE_MAX_NUMA_NODES
+#define RTE_MAX_NUMA_NODES 8
+#undef RTE_MAX_MEMSEG
+#define RTE_MAX_MEMSEG 256
+#undef RTE_MAX_MEMZONE
+#define RTE_MAX_MEMZONE 2560
+#undef RTE_MAX_TAILQ
+#define RTE_MAX_TAILQ 32
+#undef RTE_LOG_LEVEL
+#define RTE_LOG_LEVEL 8
+#undef RTE_LOG_HISTORY
+#define RTE_LOG_HISTORY 256
+#undef RTE_LIBEAL_USE_HPET
+#undef RTE_EAL_ALLOW_INV_SOCKET_ID
+#undef RTE_EAL_ALWAYS_PANIC_ON_ERROR
+#undef RTE_EAL_IGB_UIO
+#define RTE_EAL_IGB_UIO 1
+#undef RTE_EAL_VFIO
+#define RTE_EAL_VFIO 1
+#undef RTE_MALLOC_DEBUG
+#undef RTE_EAL_PMD_PATH
+#define RTE_EAL_PMD_PATH ""
+#undef RTE_PCI_CONFIG
+#undef RTE_PCI_EXTENDED_TAG
+#define RTE_PCI_EXTENDED_TAG ""
+#undef RTE_PCI_MAX_READ_REQUEST_SIZE
+#define RTE_PCI_MAX_READ_REQUEST_SIZE 0
+#undef RTE_LIBRTE_EAL_LINUXAPP
+#define RTE_LIBRTE_EAL_LINUXAPP 1
+#undef RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT
+#define RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT 1
+#undef RTE_LIBRTE_KVARGS
+#define RTE_LIBRTE_KVARGS 1
+#undef RTE_LIBRTE_ETHER
+#define RTE_LIBRTE_ETHER 1
+#undef RTE_LIBRTE_ETHDEV_DEBUG
+#undef RTE_MAX_ETHPORTS
+#define RTE_MAX_ETHPORTS 32
+#undef RTE_MAX_QUEUES_PER_PORT
+#define RTE_MAX_QUEUES_PER_PORT 1024
+#undef RTE_LIBRTE_IEEE1588
+#undef RTE_ETHDEV_QUEUE_STAT_CNTRS
+#define RTE_ETHDEV_QUEUE_STAT_CNTRS 16
+#undef RTE_ETHDEV_RXTX_CALLBACKS
+#define RTE_ETHDEV_RXTX_CALLBACKS 1
+#undef RTE_NIC_BYPASS
+#undef RTE_LIBRTE_EM_PMD
+#define RTE_LIBRTE_EM_PMD 1
+#undef RTE_LIBRTE_IGB_PMD
+#define RTE_LIBRTE_IGB_PMD 1
+#undef RTE_LIBRTE_E1000_DEBUG_INIT
+#undef RTE_LIBRTE_E1000_DEBUG_RX
+#undef RTE_LIBRTE_E1000_DEBUG_TX
+#undef RTE_LIBRTE_E1000_DEBUG_TX_FREE
+#undef RTE_LIBRTE_E1000_DEBUG_DRIVER
+#undef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
+#undef RTE_LIBRTE_IXGBE_PMD
+#define RTE_LIBRTE_IXGBE_PMD 1
+#undef RTE_LIBRTE_IXGBE_DEBUG_INIT
+#undef RTE_LIBRTE_IXGBE_DEBUG_RX
+#undef RTE_LIBRTE_IXGBE_DEBUG_TX
+#undef RTE_LIBRTE_IXGBE_DEBUG_TX_FREE
+#undef RTE_LIBRTE_IXGBE_DEBUG_DRIVER
+#undef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
+#undef RTE_IXGBE_INC_VECTOR
+#define RTE_IXGBE_INC_VECTOR 1
+#undef RTE_IXGBE_RX_OLFLAGS_ENABLE
+#define RTE_IXGBE_RX_OLFLAGS_ENABLE 1
+#undef RTE_LIBRTE_I40E_PMD
+#define RTE_LIBRTE_I40E_PMD 1
+#undef RTE_LIBRTE_I40E_DEBUG_INIT
+#undef RTE_LIBRTE_I40E_DEBUG_RX
+#undef RTE_LIBRTE_I40E_DEBUG_TX
+#undef RTE_LIBRTE_I40E_DEBUG_TX_FREE
+#undef RTE_LIBRTE_I40E_DEBUG_DRIVER
+#undef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+#define RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC 1
+#undef RTE_LIBRTE_I40E_INC_VECTOR
+#undef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
+#define RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE 1
+#undef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+#undef RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF
+#define RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF 64
+#undef RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF
+#define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF 4
+#undef RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM
+#define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM 4
+#undef RTE_LIBRTE_I40E_ITR_INTERVAL
+#define RTE_LIBRTE_I40E_ITR_INTERVAL -1
+#undef RTE_LIBRTE_FM10K_PMD
+#define RTE_LIBRTE_FM10K_PMD 1
+#undef RTE_LIBRTE_FM10K_DEBUG_INIT
+#undef RTE_LIBRTE_FM10K_DEBUG_RX
+#undef RTE_LIBRTE_FM10K_DEBUG_TX
+#undef RTE_LIBRTE_FM10K_DEBUG_TX_FREE
+#undef RTE_LIBRTE_FM10K_DEBUG_DRIVER
+#undef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
+#define RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE 1
+#undef RTE_LIBRTE_FM10K_INC_VECTOR
+#define RTE_LIBRTE_FM10K_INC_VECTOR 1
+#undef RTE_LIBRTE_MLX4_PMD
+#undef RTE_LIBRTE_MLX4_DEBUG
+#undef RTE_LIBRTE_MLX4_SGE_WR_N
+#define RTE_LIBRTE_MLX4_SGE_WR_N 4
+#undef RTE_LIBRTE_MLX4_MAX_INLINE
+#define RTE_LIBRTE_MLX4_MAX_INLINE 0
+#undef RTE_LIBRTE_MLX4_TX_MP_CACHE
+#define RTE_LIBRTE_MLX4_TX_MP_CACHE 8
+#undef RTE_LIBRTE_MLX4_SOFT_COUNTERS
+#define RTE_LIBRTE_MLX4_SOFT_COUNTERS 1
+#undef RTE_LIBRTE_MLX5_PMD
+#undef RTE_LIBRTE_MLX5_DEBUG
+#undef RTE_LIBRTE_MLX5_SGE_WR_N
+#define RTE_LIBRTE_MLX5_SGE_WR_N 4
+#undef RTE_LIBRTE_MLX5_MAX_INLINE
+#define RTE_LIBRTE_MLX5_MAX_INLINE 0
+#undef RTE_LIBRTE_MLX5_TX_MP_CACHE
+#define RTE_LIBRTE_MLX5_TX_MP_CACHE 8
+#undef RTE_LIBRTE_BNX2X_PMD
+#undef RTE_LIBRTE_BNX2X_DEBUG
+#undef RTE_LIBRTE_BNX2X_DEBUG_INIT
+#undef RTE_LIBRTE_BNX2X_DEBUG_RX
+#undef RTE_LIBRTE_BNX2X_DEBUG_TX
+#undef RTE_LIBRTE_BNX2X_MF_SUPPORT
+#undef RTE_LIBRTE_CXGBE_PMD
+#define RTE_LIBRTE_CXGBE_PMD 1
+#undef RTE_LIBRTE_CXGBE_DEBUG
+#undef RTE_LIBRTE_CXGBE_DEBUG_REG
+#undef RTE_LIBRTE_CXGBE_DEBUG_MBOX
+#undef RTE_LIBRTE_CXGBE_DEBUG_TX
+#undef RTE_LIBRTE_CXGBE_DEBUG_RX
+#undef RTE_LIBRTE_ENIC_PMD
+#define RTE_LIBRTE_ENIC_PMD 1
+#undef RTE_LIBRTE_ENIC_DEBUG
+#undef RTE_LIBRTE_PMD_SZEDATA2
+#undef RTE_LIBRTE_VIRTIO_PMD
+#define RTE_LIBRTE_VIRTIO_PMD 1
+#undef RTE_LIBRTE_VIRTIO_DEBUG_INIT
+#undef RTE_LIBRTE_VIRTIO_DEBUG_RX
+#undef RTE_LIBRTE_VIRTIO_DEBUG_TX
+#undef RTE_LIBRTE_VIRTIO_DEBUG_DRIVER
+#undef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
+#undef RTE_LIBRTE_VMXNET3_PMD
+#define RTE_LIBRTE_VMXNET3_PMD 1
+#undef RTE_LIBRTE_VMXNET3_DEBUG_INIT
+#undef RTE_LIBRTE_VMXNET3_DEBUG_RX
+#undef RTE_LIBRTE_VMXNET3_DEBUG_TX
+#undef RTE_LIBRTE_VMXNET3_DEBUG_TX_FREE
+#undef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
+#undef RTE_LIBRTE_PMD_RING
+#define RTE_LIBRTE_PMD_RING 1
+#undef RTE_PMD_RING_MAX_RX_RINGS
+#define RTE_PMD_RING_MAX_RX_RINGS 16
+#undef RTE_PMD_RING_MAX_TX_RINGS
+#define RTE_PMD_RING_MAX_TX_RINGS 16
+#undef RTE_LIBRTE_PMD_PCAP
+#define RTE_LIBRTE_PMD_PCAP 1
+#undef RTE_LIBRTE_PMD_BOND
+#define RTE_LIBRTE_PMD_BOND 1
+#undef RTE_LIBRTE_BOND_DEBUG_ALB
+#undef RTE_LIBRTE_BOND_DEBUG_ALB_L1
+#undef RTE_LIBRTE_PMD_AF_PACKET
+#define RTE_LIBRTE_PMD_AF_PACKET 1
+#undef RTE_LIBRTE_PMD_XENVIRT
+#undef RTE_LIBRTE_PMD_NULL
+#define RTE_LIBRTE_PMD_NULL 1
+#undef RTE_PMD_PACKET_PREFETCH
+#define RTE_PMD_PACKET_PREFETCH 1
+#undef RTE_LIBRTE_CRYPTODEV
+#define RTE_LIBRTE_CRYPTODEV 1
+#undef RTE_LIBRTE_CRYPTODEV_DEBUG
+#undef RTE_CRYPTO_MAX_DEVS
+#define RTE_CRYPTO_MAX_DEVS 64
+#undef RTE_CRYPTODEV_NAME_LEN
+#define RTE_CRYPTODEV_NAME_LEN 64
+#undef RTE_LIBRTE_PMD_QAT
+#undef RTE_LIBRTE_PMD_QAT_DEBUG_INIT
+#undef RTE_LIBRTE_PMD_QAT_DEBUG_TX
+#undef RTE_LIBRTE_PMD_QAT_DEBUG_RX
+#undef RTE_LIBRTE_PMD_QAT_DEBUG_DRIVER
+#undef RTE_QAT_PMD_MAX_NB_SESSIONS
+#define RTE_QAT_PMD_MAX_NB_SESSIONS 2048
+#undef RTE_LIBRTE_PMD_AESNI_MB
+#undef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
+#undef RTE_AESNI_MB_PMD_MAX_NB_QUEUE_PAIRS
+#define RTE_AESNI_MB_PMD_MAX_NB_QUEUE_PAIRS 8
+#undef RTE_AESNI_MB_PMD_MAX_NB_SESSIONS
+#define RTE_AESNI_MB_PMD_MAX_NB_SESSIONS 2048
+#undef RTE_LIBRTE_RING
+#define RTE_LIBRTE_RING 1
+#undef RTE_LIBRTE_RING_DEBUG
+#undef RTE_RING_SPLIT_PROD_CONS
+#undef RTE_RING_PAUSE_REP_COUNT
+#define RTE_RING_PAUSE_REP_COUNT 0
+#undef RTE_LIBRTE_MEMPOOL
+#define RTE_LIBRTE_MEMPOOL 1
+#undef RTE_MEMPOOL_CACHE_MAX_SIZE
+#define RTE_MEMPOOL_CACHE_MAX_SIZE 512
+#undef RTE_LIBRTE_MEMPOOL_DEBUG
+#undef RTE_LIBRTE_MBUF
+#define RTE_LIBRTE_MBUF 1
+#undef RTE_LIBRTE_MBUF_DEBUG
+#undef RTE_MBUF_REFCNT_ATOMIC
+#define RTE_MBUF_REFCNT_ATOMIC 1
+#undef RTE_PKTMBUF_HEADROOM
+//???#define RTE_PKTMBUF_HEADROOM 128
+#define RTE_PKTMBUF_HEADROOM 16
+#undef RTE_LIBRTE_MBUF_OFFLOAD
+#define RTE_LIBRTE_MBUF_OFFLOAD 1
+#undef RTE_LIBRTE_MBUF_OFFLOAD_DEBUG
+#undef RTE_LIBRTE_TIMER
+#define RTE_LIBRTE_TIMER 1
+#undef RTE_LIBRTE_TIMER_DEBUG
+#undef RTE_LIBRTE_CFGFILE
+#define RTE_LIBRTE_CFGFILE 1
+#undef RTE_LIBRTE_CMDLINE
+#define RTE_LIBRTE_CMDLINE 1
+#undef RTE_LIBRTE_CMDLINE_DEBUG
+#undef RTE_LIBRTE_HASH
+#define RTE_LIBRTE_HASH 1
+#undef RTE_LIBRTE_HASH_DEBUG
+#undef RTE_LIBRTE_JOBSTATS
+#define RTE_LIBRTE_JOBSTATS 1
+#undef RTE_LIBRTE_LPM
+#define RTE_LIBRTE_LPM 1
+#undef RTE_LIBRTE_LPM_DEBUG
+#undef RTE_LIBRTE_ACL
+#define RTE_LIBRTE_ACL 1
+#undef RTE_LIBRTE_ACL_DEBUG
+#undef RTE_LIBRTE_POWER
+#define RTE_LIBRTE_POWER 1
+#undef RTE_LIBRTE_POWER_DEBUG
+#undef RTE_MAX_LCORE_FREQS
+#define RTE_MAX_LCORE_FREQS 64
+#undef RTE_LIBRTE_NET
+#define RTE_LIBRTE_NET 1
+#undef RTE_LIBRTE_IP_FRAG
+#define RTE_LIBRTE_IP_FRAG 1
+#undef RTE_LIBRTE_IP_FRAG_DEBUG
+#undef RTE_LIBRTE_IP_FRAG_MAX_FRAG
+#define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
+#undef RTE_LIBRTE_IP_FRAG_TBL_STAT
+#undef RTE_LIBRTE_METER
+#define RTE_LIBRTE_METER 1
+#undef RTE_LIBRTE_SCHED
+#define RTE_LIBRTE_SCHED 1
+#undef RTE_SCHED_DEBUG
+#undef RTE_SCHED_RED
+#undef RTE_SCHED_COLLECT_STATS
+#undef RTE_SCHED_SUBPORT_TC_OV
+#undef RTE_SCHED_PORT_N_GRINDERS
+#define RTE_SCHED_PORT_N_GRINDERS 8
+#undef RTE_SCHED_VECTOR
+#undef RTE_LIBRTE_DISTRIBUTOR
+#define RTE_LIBRTE_DISTRIBUTOR 1
+#undef RTE_LIBRTE_REORDER
+#define RTE_LIBRTE_REORDER 1
+#undef RTE_LIBRTE_PORT
+#define RTE_LIBRTE_PORT 1
+#undef RTE_PORT_STATS_COLLECT
+#undef RTE_LIBRTE_TABLE
+#define RTE_LIBRTE_TABLE 1
+#undef RTE_TABLE_STATS_COLLECT
+#undef RTE_LIBRTE_PIPELINE
+#define RTE_LIBRTE_PIPELINE 1
+#undef RTE_PIPELINE_STATS_COLLECT
+#undef RTE_LIBRTE_KNI
+#define RTE_LIBRTE_KNI 1
+#undef RTE_KNI_KMOD
+#define RTE_KNI_KMOD 1
+#undef RTE_KNI_PREEMPT_DEFAULT
+#define RTE_KNI_PREEMPT_DEFAULT 1
+#undef RTE_KNI_KO_DEBUG
+#undef RTE_KNI_VHOST
+#undef RTE_KNI_VHOST_MAX_CACHE_SIZE
+#define RTE_KNI_VHOST_MAX_CACHE_SIZE 1024
+#undef RTE_KNI_VHOST_VNET_HDR_EN
+#undef RTE_KNI_VHOST_DEBUG_RX
+#undef RTE_KNI_VHOST_DEBUG_TX
+#undef RTE_LIBRTE_VHOST
+#define RTE_LIBRTE_VHOST 1
+#undef RTE_LIBRTE_VHOST_USER
+#define RTE_LIBRTE_VHOST_USER 1
+#undef RTE_LIBRTE_VHOST_NUMA
+#undef RTE_LIBRTE_VHOST_DEBUG
+#undef RTE_LIBRTE_XEN_DOM0
+#undef RTE_INSECURE_FUNCTION_WARNING
+#undef RTE_APP_TEST
+#define RTE_APP_TEST 1
+#undef RTE_TEST_PMD
+#define RTE_TEST_PMD 1
+#undef RTE_TEST_PMD_RECORD_CORE_CYCLES
+#undef RTE_TEST_PMD_RECORD_BURST_STATS
+#undef RTE_MACHINE
+#define RTE_MACHINE "native"
+#undef RTE_ARCH
+#define RTE_ARCH "x86_64"
+#undef RTE_ARCH_X86_64
+#define RTE_ARCH_X86_64 1
+#undef RTE_ARCH_64
+#define RTE_ARCH_64 1
+#undef RTE_TOOLCHAIN
+#define RTE_TOOLCHAIN "gcc"
+#undef RTE_TOOLCHAIN_GCC
+#define RTE_TOOLCHAIN_GCC 1
+#endif /* __RTE_CONFIG_H */